summaryrefslogtreecommitdiff
path: root/scripts/git.orderFile
blob: 5102ba73357f0029b6623034fc97b6cc550461b7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
# SPDX-License-Identifier: GPL-2.0

# order file for git, to produce patches which are easier to review
# by diffing the important stuff like header changes first.
#
# one-off usage:
#   git diff -O scripts/git.orderFile ...
#
# add to git config:
#   git config diff.orderFile scripts/git.orderFile
#

MAINTAINERS

# Documentation
Documentation/*
*.rst

# git-specific
.gitignore
scripts/git.orderFile

# build system
Kconfig*
*/Kconfig*
Kbuild*
*/Kbuild*
Makefile*
*/Makefile*
*.mak
*.mk
scripts/*

# semantic patches
*.cocci

# headers
*types.h
*.h

# code
*.c
rm class='right' method='get' action='/cgit/linux-arm.git/log/tools'>
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/Makefile81
-rw-r--r--tools/accounting/.gitignore1
-rw-r--r--tools/accounting/Makefile2
-rw-r--r--tools/accounting/getdelays.c38
-rw-r--r--tools/accounting/procacct.c412
-rw-r--r--tools/arch/arm64/include/asm/cputype.h278
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h1296
-rw-r--r--tools/arch/arm64/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h95
-rw-r--r--tools/arch/arm64/include/uapi/asm/perf_regs.h7
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/h8300/include/asm/bitsperlong.h15
-rw-r--r--tools/arch/h8300/include/uapi/asm/mman.h7
-rw-r--r--tools/arch/ia64/include/asm/barrier.h3
-rw-r--r--tools/arch/loongarch/include/uapi/asm/bitsperlong.h9
-rw-r--r--tools/arch/loongarch/include/uapi/asm/perf_regs.h40
-rw-r--r--tools/arch/loongarch/include/uapi/asm/unistd.h9
-rw-r--r--tools/arch/mips/include/uapi/asm/perf_regs.h40
-rw-r--r--tools/arch/parisc/include/uapi/asm/mman.h13
-rw-r--r--tools/arch/powerpc/include/uapi/asm/errno.h1
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h7
-rw-r--r--tools/arch/powerpc/include/uapi/asm/perf_regs.h44
-rw-r--r--tools/arch/riscv/include/uapi/asm/unistd.h2
-rw-r--r--tools/arch/s390/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--tools/arch/s390/include/uapi/asm/kvm.h8
-rw-r--r--tools/arch/s390/include/uapi/asm/ptrace.h457
-rw-r--r--tools/arch/s390/include/uapi/asm/sie.h2
-rw-r--r--tools/arch/sh/include/asm/barrier.h2
-rw-r--r--tools/arch/x86/include/asm/amd-ibs.h152
-rw-r--r--tools/arch/x86/include/asm/asm.h193
-rw-r--r--tools/arch/x86/include/asm/atomic.h11
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h77
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h69
-rw-r--r--tools/arch/x86/include/asm/inat.h2
-rw-r--r--tools/arch/x86/include/asm/insn.h108
-rw-r--r--tools/arch/x86/include/asm/irq_vectors.h7
-rw-r--r--tools/arch/x86/include/asm/mcsafe_test.h13
-rw-r--r--tools/arch/x86/include/asm/msr-index.h275
-rw-r--r--tools/arch/x86/include/asm/nops.h77
-rw-r--r--tools/arch/x86/include/asm/orc_types.h53
-rw-r--r--tools/arch/x86/include/asm/pvclock-abi.h48
-rw-r--r--tools/arch/x86/include/asm/pvclock.h103
-rw-r--r--tools/arch/x86/include/asm/required-features.h10
-rw-r--r--tools/arch/x86/include/asm/rmwcc.h21
-rw-r--r--tools/arch/x86/include/asm/unistd_32.h16
-rw-r--r--tools/arch/x86/include/asm/unistd_64.h19
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h158
-rw-r--r--tools/arch/x86/include/uapi/asm/prctl.h24
-rw-r--r--tools/arch/x86/include/uapi/asm/svm.h63
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd.h11
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_32.h25
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_64.h28
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h12
-rw-r--r--tools/arch/x86/intel_sdsi/Makefile21
-rw-r--r--tools/arch/x86/intel_sdsi/intel_sdsi.c846
-rw-r--r--tools/arch/x86/kcpuid/Makefile24
-rw-r--r--tools/arch/x86/kcpuid/cpuid.csv451
-rw-r--r--tools/arch/x86/kcpuid/kcpuid.c675
-rw-r--r--tools/arch/x86/lib/inat.c2
-rw-r--r--tools/arch/x86/lib/insn.c352
-rw-r--r--tools/arch/x86/lib/memcpy_64.S146
-rw-r--r--tools/arch/x86/lib/memset_64.S21
-rw-r--r--tools/arch/x86/lib/x86-opcode-map.txt112
-rw-r--r--tools/arch/x86/tools/gen-insn-attr-x86.awk50
-rw-r--r--tools/bootconfig/Makefile4
-rw-r--r--tools/bootconfig/include/linux/bootconfig.h47
-rw-r--r--tools/bootconfig/include/linux/bug.h12
-rw-r--r--tools/bootconfig/include/linux/ctype.h7
-rw-r--r--tools/bootconfig/include/linux/errno.h7
-rw-r--r--tools/bootconfig/include/linux/kernel.h18
-rw-r--r--tools/bootconfig/include/linux/memblock.h12
-rw-r--r--tools/bootconfig/include/linux/printk.h14
-rw-r--r--tools/bootconfig/include/linux/string.h32
-rw-r--r--tools/bootconfig/main.c352
-rw-r--r--tools/bootconfig/samples/good-mixed-append.bconf4
-rw-r--r--tools/bootconfig/samples/good-mixed-kv1.bconf (renamed from tools/bootconfig/samples/bad-mixed-kv1.bconf)0
-rw-r--r--tools/bootconfig/samples/good-mixed-kv2.bconf (renamed from tools/bootconfig/samples/bad-mixed-kv2.bconf)0
-rw-r--r--tools/bootconfig/samples/good-mixed-kv3.bconf6
-rw-r--r--tools/bootconfig/samples/good-mixed-override.bconf4
-rw-r--r--tools/bootconfig/samples/good-override.bconf6
-rwxr-xr-xtools/bootconfig/scripts/bconf2ftrace.sh301
-rw-r--r--tools/bootconfig/scripts/ftrace.sh109
-rwxr-xr-xtools/bootconfig/scripts/ftrace2bconf.sh260
-rw-r--r--tools/bootconfig/scripts/xbc.sh56
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh77
-rw-r--r--tools/bpf/Makefile35
-rw-r--r--tools/bpf/Makefile.helpers60
-rw-r--r--tools/bpf/bpf_asm.c2
-rw-r--r--tools/bpf/bpf_dbg.c6
-rw-r--r--tools/bpf/bpf_exp.y14
-rw-r--r--tools/bpf/bpf_jit_disasm.c5
-rw-r--r--tools/bpf/bpftool/.gitignore8
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile29
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst86
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst63
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst61
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst245
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-iter.rst76
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-link.rst112
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst90
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst104
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst38
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst137
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst52
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst59
-rw-r--r--tools/bpf/bpftool/Documentation/common_options.rst25
-rw-r--r--tools/bpf/bpftool/Documentation/substitutions.rst3
-rw-r--r--tools/bpf/bpftool/Makefile257
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool241
-rw-r--r--tools/bpf/bpftool/btf.c419
-rw-r--r--tools/bpf/bpftool/btf_dumper.c163
-rw-r--r--tools/bpf/bpftool/cfg.c33
-rw-r--r--tools/bpf/bpftool/cfg.h5
-rw-r--r--tools/bpf/bpftool/cgroup.c267
-rw-r--r--tools/bpf/bpftool/common.c674
-rw-r--r--tools/bpf/bpftool/feature.c744
-rw-r--r--tools/bpf/bpftool/gen.c2058
-rw-r--r--tools/bpf/bpftool/iter.c123
-rw-r--r--tools/bpf/bpftool/jit_disasm.c304
-rw-r--r--tools/bpf/bpftool/json_writer.c11
-rw-r--r--tools/bpf/bpftool/json_writer.h4
-rw-r--r--tools/bpf/bpftool/link.c576
-rw-r--r--tools/bpf/bpftool/main.c207
-rw-r--r--tools/bpf/bpftool/main.h192
-rw-r--r--tools/bpf/bpftool/map.c559
-rw-r--r--tools/bpf/bpftool/map_perf_ring.c26
-rw-r--r--tools/bpf/bpftool/net.c441
-rw-r--r--tools/bpf/bpftool/perf.c119
-rw-r--r--tools/bpf/bpftool/pids.c256
-rw-r--r--tools/bpf/bpftool/prog.c1109
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.bpf.c103
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.h14
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.bpf.c11
-rw-r--r--tools/bpf/bpftool/skeleton/profiler.h46
-rw-r--r--tools/bpf/bpftool/struct_ops.c122
-rw-r--r--tools/bpf/bpftool/tracelog.c2
-rw-r--r--tools/bpf/bpftool/xlated_dumper.c67
-rw-r--r--tools/bpf/bpftool/xlated_dumper.h3
-rw-r--r--tools/bpf/resolve_btfids/.gitignore4
-rw-r--r--tools/bpf/resolve_btfids/Build12
-rw-r--r--tools/bpf/resolve_btfids/Makefile112
-rw-r--r--tools/bpf/resolve_btfids/main.c783
-rw-r--r--tools/bpf/runqslower/Makefile82
-rw-r--r--tools/bpf/runqslower/runqslower.bpf.c37
-rw-r--r--tools/bpf/runqslower/runqslower.c26
-rw-r--r--tools/bpf/runqslower/runqslower.h2
-rw-r--r--tools/build/Build.include29
-rw-r--r--tools/build/Makefile14
-rw-r--r--tools/build/Makefile.build23
-rw-r--r--tools/build/Makefile.feature93
-rw-r--r--tools/build/feature/Makefile102
-rw-r--r--tools/build/feature/test-all.c44
-rw-r--r--tools/build/feature/test-bpf.c6
-rw-r--r--tools/build/feature/test-clang-bpf-co-re.c9
-rw-r--r--tools/build/feature/test-clang-bpf-global-var.c4
-rw-r--r--tools/build/feature/test-cxa-demangle.cpp17
-rw-r--r--tools/build/feature/test-disassembler-init-styled.c13
-rw-r--r--tools/build/feature/test-libbfd-buildid.c8
-rw-r--r--tools/build/feature/test-libbpf.c4
-rw-r--r--tools/build/feature/test-libcrypto.c15
-rw-r--r--tools/build/feature/test-libdebuginfod.c8
-rw-r--r--tools/build/feature/test-libelf-mmap.c9
-rw-r--r--tools/build/feature/test-libopencsd.c4
-rw-r--r--tools/build/feature/test-libpfm4.c9
-rw-r--r--tools/build/feature/test-libpython-version.c11
-rw-r--r--tools/build/feature/test-libtraceevent.c12
-rw-r--r--tools/build/feature/test-libtracefs.c10
-rw-r--r--tools/build/feature/test-scandirat.c13
-rw-r--r--tools/build/feature/test-sync-compare-and-swap.c15
-rwxr-xr-xtools/certs/print-cert-tbs-hash.sh91
-rw-r--r--tools/cgroup/iocost_monitor.py105
-rw-r--r--tools/cgroup/memcg_shrinker.py70
-rw-r--r--tools/cgroup/memcg_slabinfo.py226
-rw-r--r--tools/counter/Build1
-rw-r--r--tools/counter/Makefile53
-rw-r--r--tools/counter/counter_example.c92
-rwxr-xr-xtools/debugging/kernel-chktaint15
-rw-r--r--tools/gpio/Makefile2
-rw-r--r--tools/gpio/gpio-event-mon.c158
-rw-r--r--tools/gpio/gpio-hammer.c56
-rw-r--r--tools/gpio/gpio-utils.c149
-rw-r--r--tools/gpio/gpio-utils.h50
-rw-r--r--tools/gpio/gpio-watch.c19
-rw-r--r--tools/gpio/lsgpio.c73
-rw-r--r--tools/hv/Makefile2
-rw-r--r--tools/hv/hv_kvp_daemon.c8
-rw-r--r--tools/iio/Makefile3
-rw-r--r--tools/iio/iio_event_monitor.c87
-rw-r--r--tools/iio/iio_generic_buffer.c157
-rw-r--r--tools/iio/iio_utils.c45
-rw-r--r--tools/iio/iio_utils.h9
-rw-r--r--tools/include/asm-generic/atomic-gcc.h23
-rw-r--r--tools/include/asm-generic/bitops.h1
-rw-r--r--tools/include/asm-generic/bitops/atomic.h15
-rw-r--r--tools/include/asm-generic/bitops/find.h78
-rw-r--r--tools/include/asm-generic/bitops/non-atomic.h34
-rw-r--r--tools/include/asm-generic/bitsperlong.h3
-rw-r--r--tools/include/asm-generic/hugetlb_encode.h23
-rw-r--r--tools/include/asm-generic/unaligned.h23
-rw-r--r--tools/include/asm/alternative.h (renamed from tools/include/asm/alternative-asm.h)0
-rw-r--r--tools/include/asm/barrier.h2
-rw-r--r--tools/include/linux/arm-smccc.h193
-rw-r--r--tools/include/linux/atomic.h2
-rw-r--r--tools/include/linux/bitfield.h176
-rw-r--r--tools/include/linux/bitmap.h93
-rw-r--r--tools/include/linux/bitops.h16
-rw-r--r--tools/include/linux/bits.h24
-rw-r--r--tools/include/linux/btf_ids.h208
-rw-r--r--tools/include/linux/build_bug.h91
-rw-r--r--tools/include/linux/cache.h10
-rw-r--r--tools/include/linux/compiler-gcc.h14
-rw-r--r--tools/include/linux/compiler.h42
-rw-r--r--tools/include/linux/compiler_types.h43
-rw-r--r--tools/include/linux/const.h11
-rw-r--r--tools/include/linux/coresight-pmu.h69
-rw-r--r--tools/include/linux/ctype.h17
-rw-r--r--tools/include/linux/debug_locks.h14
-rw-r--r--tools/include/linux/debugfs.h5
-rw-r--r--tools/include/linux/err.h2
-rw-r--r--tools/include/linux/export.h3
-rw-r--r--tools/include/linux/filter.h24
-rw-r--r--tools/include/linux/find.h177
-rw-r--r--tools/include/linux/gfp.h9
-rw-r--r--tools/include/linux/gfp_types.h1
-rw-r--r--tools/include/linux/hardirq.h12
-rw-r--r--tools/include/linux/hash.h5
-rw-r--r--tools/include/linux/interval_tree_generic.h187
-rw-r--r--tools/include/linux/io.h5
-rw-r--r--tools/include/linux/irqflags.h39
-rw-r--r--tools/include/linux/jhash.h2
-rw-r--r--tools/include/linux/kallsyms.h4
-rw-r--r--tools/include/linux/kconfig.h67
-rw-r--r--tools/include/linux/kernel.h30
-rw-r--r--tools/include/linux/list.h11
-rw-r--r--tools/include/linux/list_sort.h14
-rw-r--r--tools/include/linux/lockdep.h72
-rw-r--r--tools/include/linux/math.h25
-rw-r--r--tools/include/linux/math64.h75
-rw-r--r--tools/include/linux/mm.h42
-rw-r--r--tools/include/linux/objtool_types.h57
-rw-r--r--tools/include/linux/overflow.h140
-rw-r--r--tools/include/linux/pfn.h10
-rw-r--r--tools/include/linux/poison.h6
-rw-r--r--tools/include/linux/proc_fs.h4
-rw-r--r--tools/include/linux/rbtree.h194
-rw-r--r--tools/include/linux/rbtree_augmented.h2
-rw-r--r--tools/include/linux/sched/mm.h2
-rw-r--r--tools/include/linux/slab.h48
-rw-r--r--tools/include/linux/spinlock.h2
-rw-r--r--tools/include/linux/stacktrace.h33
-rw-r--r--tools/include/linux/static_call_types.h103
-rw-r--r--tools/include/linux/string.h1
-rw-r--r--tools/include/linux/types.h26
-rw-r--r--tools/include/nolibc/.gitignore1
-rw-r--r--tools/include/nolibc/Makefile75
-rw-r--r--tools/include/nolibc/arch-aarch64.h199
-rw-r--r--tools/include/nolibc/arch-arm.h242
-rw-r--r--tools/include/nolibc/arch-i386.h224
-rw-r--r--tools/include/nolibc/arch-loongarch.h200
-rw-r--r--tools/include/nolibc/arch-mips.h224
-rw-r--r--tools/include/nolibc/arch-riscv.h208
-rw-r--r--tools/include/nolibc/arch-s390.h226
-rw-r--r--tools/include/nolibc/arch-x86_64.h220
-rw-r--r--tools/include/nolibc/arch.h36
-rw-r--r--tools/include/nolibc/ctype.h102
-rw-r--r--tools/include/nolibc/errno.h28
-rw-r--r--tools/include/nolibc/nolibc.h2419
-rw-r--r--tools/include/nolibc/signal.h25
-rw-r--r--tools/include/nolibc/stackprotector.h53
-rw-r--r--tools/include/nolibc/std.h36
-rw-r--r--tools/include/nolibc/stdint.h99
-rw-r--r--tools/include/nolibc/stdio.h315
-rw-r--r--tools/include/nolibc/stdlib.h452
-rw-r--r--tools/include/nolibc/string.h294
-rw-r--r--tools/include/nolibc/sys.h1371
-rw-r--r--tools/include/nolibc/time.h31
-rw-r--r--tools/include/nolibc/types.h233
-rw-r--r--tools/include/nolibc/unistd.h62
-rw-r--r--tools/include/tools/dis-asm-compat.h55
-rw-r--r--tools/include/uapi/asm-generic/fcntl.h31
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h7
-rw-r--r--tools/include/uapi/asm-generic/socket.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h68
-rw-r--r--tools/include/uapi/asm/bitsperlong.h2
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h4
-rw-r--r--tools/include/uapi/asm/errno.h6
-rw-r--r--tools/include/uapi/drm/drm.h278
-rw-r--r--tools/include/uapi/drm/i915_drm.h1854
-rw-r--r--tools/include/uapi/linux/bpf.h3914
-rw-r--r--tools/include/uapi/linux/bpf_perf_event.h1
-rw-r--r--tools/include/uapi/linux/btf.h76
-rw-r--r--tools/include/uapi/linux/const.h5
-rw-r--r--tools/include/uapi/linux/ethtool.h53
-rw-r--r--tools/include/uapi/linux/fcntl.h11
-rw-r--r--tools/include/uapi/linux/filter.h90
-rw-r--r--tools/include/uapi/linux/fs.h6
-rw-r--r--tools/include/uapi/linux/fscrypt.h18
-rw-r--r--tools/include/uapi/linux/hw_breakpoint.h10
-rw-r--r--tools/include/uapi/linux/if_link.h307
-rw-r--r--tools/include/uapi/linux/if_tun.h2
-rw-r--r--tools/include/uapi/linux/if_xdp.h5
-rw-r--r--tools/include/uapi/linux/in.h35
-rw-r--r--tools/include/uapi/linux/kvm.h673
-rw-r--r--tools/include/uapi/linux/lirc.h229
-rw-r--r--tools/include/uapi/linux/mman.h6
-rw-r--r--tools/include/uapi/linux/mount.h21
-rw-r--r--tools/include/uapi/linux/netdev.h61
-rw-r--r--tools/include/uapi/linux/openat2.h4
-rw-r--r--tools/include/uapi/linux/perf_event.h293
-rw-r--r--tools/include/uapi/linux/pkt_cls.h4
-rw-r--r--tools/include/uapi/linux/pkt_sched.h1
-rw-r--r--tools/include/uapi/linux/prctl.h54
-rw-r--r--tools/include/uapi/linux/sched.h5
-rw-r--r--tools/include/uapi/linux/seg6.h4
-rw-r--r--tools/include/uapi/linux/stat.h28
-rw-r--r--tools/include/uapi/linux/stddef.h47
-rw-r--r--tools/include/uapi/linux/tc_act/tc_bpf.h5
-rw-r--r--tools/include/uapi/linux/tcp.h357
-rw-r--r--tools/include/uapi/linux/usbdevice_fs.h4
-rw-r--r--tools/include/uapi/linux/vhost.h78
-rw-r--r--tools/include/uapi/sound/asound.h63
-rw-r--r--tools/include/vdso/bits.h10
-rw-r--r--tools/include/vdso/const.h10
-rw-r--r--tools/io_uring/io_uring-bench.c4
-rw-r--r--tools/io_uring/io_uring-cp.c31
-rw-r--r--tools/io_uring/liburing.h6
-rwxr-xr-xtools/kvm/kvm_stat/kvm_stat193
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.service17
-rw-r--r--tools/kvm/kvm_stat/kvm_stat.txt15
-rw-r--r--tools/lib/api/Makefile68
-rw-r--r--tools/lib/api/fd/array.c40
-rw-r--r--tools/lib/api/fd/array.h18
-rw-r--r--tools/lib/api/fs/cgroup.c95
-rw-r--r--tools/lib/api/fs/fs.c17
-rw-r--r--tools/lib/api/fs/fs.h12
-rw-r--r--tools/lib/api/fs/tracing_path.c20
-rw-r--r--tools/lib/api/fs/tracing_path.h1
-rw-r--r--tools/lib/api/io.h160
-rw-r--r--tools/lib/bitmap.c34
-rw-r--r--tools/lib/bpf/.gitignore2
-rw-r--r--tools/lib/bpf/Build5
-rw-r--r--tools/lib/bpf/Makefile178
-rw-r--r--tools/lib/bpf/README.rst168
-rw-r--r--tools/lib/bpf/bpf.c1016
-rw-r--r--tools/lib/bpf/bpf.h467
-rw-r--r--tools/lib/bpf/bpf_core_read.h347
-rw-r--r--tools/lib/bpf/bpf_endian.h43
-rw-r--r--tools/lib/bpf/bpf_gen_internal.h74
-rw-r--r--tools/lib/bpf/bpf_helpers.h381
-rw-r--r--tools/lib/bpf/bpf_prog_linfo.c21
-rw-r--r--tools/lib/bpf/bpf_tracing.h991
-rw-r--r--tools/lib/bpf/btf.c3828
-rw-r--r--tools/lib/bpf/btf.h387
-rw-r--r--tools/lib/bpf/btf_dump.c1453
-rw-r--r--tools/lib/bpf/gen_loader.c1123
-rw-r--r--tools/lib/bpf/hashmap.c34
-rw-r--r--tools/lib/bpf/hashmap.h134
-rw-r--r--tools/lib/bpf/libbpf.c12250
-rw-r--r--tools/lib/bpf/libbpf.h1657
-rw-r--r--tools/lib/bpf/libbpf.map333
-rw-r--r--tools/lib/bpf/libbpf_common.h35
-rw-r--r--tools/lib/bpf/libbpf_errno.c23
-rw-r--r--tools/lib/bpf/libbpf_internal.h530
-rw-r--r--tools/lib/bpf/libbpf_legacy.h140
-rw-r--r--tools/lib/bpf/libbpf_probes.c387
-rw-r--r--tools/lib/bpf/libbpf_util.h47
-rw-r--r--tools/lib/bpf/libbpf_version.h9
-rw-r--r--tools/lib/bpf/linker.c2905
-rw-r--r--tools/lib/bpf/netlink.c885
-rw-r--r--tools/lib/bpf/nlattr.c15
-rw-r--r--tools/lib/bpf/nlattr.h72
-rw-r--r--tools/lib/bpf/relo_core.c1687
-rw-r--r--tools/lib/bpf/relo_core.h99
-rw-r--r--tools/lib/bpf/ringbuf.c587
-rw-r--r--tools/lib/bpf/skel_internal.h374
-rw-r--r--tools/lib/bpf/strset.c177
-rw-r--r--tools/lib/bpf/strset.h21
-rw-r--r--tools/lib/bpf/usdt.bpf.h250
-rw-r--r--tools/lib/bpf/usdt.c1556
-rw-r--r--tools/lib/bpf/xsk.c812
-rw-r--r--tools/lib/bpf/zip.c333
-rw-r--r--tools/lib/bpf/zip.h47
-rw-r--r--tools/lib/find_bit.c143
-rw-r--r--tools/lib/list_sort.c252
-rw-r--r--tools/lib/lockdep/Build1
-rw-r--r--tools/lib/lockdep/Makefile162
-rw-r--r--tools/lib/lockdep/common.c29
-rw-r--r--tools/lib/lockdep/include/liblockdep/common.h54
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h73
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h87
-rwxr-xr-xtools/lib/lockdep/lockdep3
-rw-r--r--tools/lib/lockdep/lockdep.c33
-rw-r--r--tools/lib/lockdep/lockdep_internals.h1
-rw-r--r--tools/lib/lockdep/lockdep_states.h1
-rw-r--r--tools/lib/lockdep/preload.c443
-rw-r--r--tools/lib/lockdep/rbtree.c1
-rwxr-xr-xtools/lib/lockdep/run_tests.sh47
-rw-r--r--tools/lib/lockdep/tests/AA.c14
-rw-r--r--tools/lib/lockdep/tests/AA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABA.c14
-rw-r--r--tools/lib/lockdep/tests/ABA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBA.c26
-rw-r--r--tools/lib/lockdep/tests/ABBA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBA_2threads.c47
-rw-r--r--tools/lib/lockdep/tests/ABBA_2threads.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBCCA.c20
-rw-r--r--tools/lib/lockdep/tests/ABBCCA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABBCCDDA.c23
-rw-r--r--tools/lib/lockdep/tests/ABBCCDDA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABCABC.c20
-rw-r--r--tools/lib/lockdep/tests/ABCABC.sh2
-rw-r--r--tools/lib/lockdep/tests/ABCDBCDA.c23
-rw-r--r--tools/lib/lockdep/tests/ABCDBCDA.sh2
-rw-r--r--tools/lib/lockdep/tests/ABCDBDDA.c23
-rw-r--r--tools/lib/lockdep/tests/ABCDBDDA.sh2
-rw-r--r--tools/lib/lockdep/tests/WW.c14
-rw-r--r--tools/lib/lockdep/tests/WW.sh2
-rw-r--r--tools/lib/lockdep/tests/common.h13
-rw-r--r--tools/lib/lockdep/tests/unlock_balance.c15
-rw-r--r--tools/lib/lockdep/tests/unlock_balance.sh2
-rw-r--r--tools/lib/perf/Build2
-rw-r--r--tools/lib/perf/Documentation/libperf-counting.txt14
-rw-r--r--tools/lib/perf/Documentation/libperf-sampling.txt13
-rw-r--r--tools/lib/perf/Documentation/libperf.txt23
-rw-r--r--tools/lib/perf/Makefile67
-rw-r--r--tools/lib/perf/cpumap.c215
-rw-r--r--tools/lib/perf/evlist.c195
-rw-r--r--tools/lib/perf/evsel.c336
-rw-r--r--tools/lib/perf/include/internal/cpumap.h25
-rw-r--r--tools/lib/perf/include/internal/evlist.h19
-rw-r--r--tools/lib/perf/include/internal/evsel.h25
-rw-r--r--tools/lib/perf/include/internal/lib.h2
-rw-r--r--tools/lib/perf/include/internal/mmap.h8
-rw-r--r--tools/lib/perf/include/internal/rc_check.h102
-rw-r--r--tools/lib/perf/include/internal/tests.h36
-rw-r--r--tools/lib/perf/include/internal/xyarray.h9
-rw-r--r--tools/lib/perf/include/perf/bpf_perf.h31
-rw-r--r--tools/lib/perf/include/perf/cpumap.h14
-rw-r--r--tools/lib/perf/include/perf/event.h140
-rw-r--r--tools/lib/perf/include/perf/evlist.h2
-rw-r--r--tools/lib/perf/include/perf/evsel.h20
-rw-r--r--tools/lib/perf/include/perf/threadmap.h7
-rw-r--r--tools/lib/perf/lib.c20
-rw-r--r--tools/lib/perf/libperf.map8
-rw-r--r--tools/lib/perf/mmap.c186
-rw-r--r--tools/lib/perf/tests/Build5
-rw-r--r--tools/lib/perf/tests/Makefile38
-rw-r--r--tools/lib/perf/tests/main.c15
-rw-r--r--tools/lib/perf/tests/test-cpumap.c16
-rw-r--r--tools/lib/perf/tests/test-evlist.c212
-rw-r--r--tools/lib/perf/tests/test-evsel.c242
-rw-r--r--tools/lib/perf/tests/test-threadmap.c46
-rw-r--r--tools/lib/perf/tests/tests.h10
-rw-r--r--tools/lib/perf/threadmap.c36
-rw-r--r--tools/lib/rbtree.c2
-rw-r--r--tools/lib/slab.c38
-rw-r--r--tools/lib/string.c58
-rw-r--r--tools/lib/subcmd/Makefile54
-rw-r--r--tools/lib/subcmd/exec-cmd.c3
-rw-r--r--tools/lib/subcmd/help.c10
-rw-r--r--tools/lib/subcmd/parse-options.c20
-rw-r--r--tools/lib/subcmd/parse-options.h3
-rw-r--r--tools/lib/subcmd/subcmd-util.h11
-rw-r--r--tools/lib/symbol/Build1
-rw-r--r--tools/lib/symbol/Makefile122
-rw-r--r--tools/lib/symbol/kallsyms.c86
-rw-r--r--tools/lib/symbol/kallsyms.h4
-rw-r--r--tools/lib/thermal/.gitignore2
-rw-r--r--tools/lib/thermal/Build5
-rw-r--r--tools/lib/thermal/Makefile165
-rw-r--r--tools/lib/thermal/commands.c349
-rw-r--r--tools/lib/thermal/events.c164
-rw-r--r--tools/lib/thermal/include/thermal.h142
-rw-r--r--tools/lib/thermal/libthermal.map25
-rw-r--r--tools/lib/thermal/libthermal.pc.template12
-rw-r--r--tools/lib/thermal/sampling.c75
-rw-r--r--tools/lib/thermal/thermal.c135
-rw-r--r--tools/lib/thermal/thermal_nl.c215
-rw-r--r--tools/lib/thermal/thermal_nl.h46
-rw-r--r--tools/lib/traceevent/.gitignore4
-rw-r--r--tools/lib/traceevent/Build8
-rw-r--r--tools/lib/traceevent/Documentation/Makefile207
-rw-r--r--tools/lib/traceevent/Documentation/asciidoc.conf120
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-commands.txt153
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-cpus.txt77
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt78
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_find.txt103
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_get.txt99
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_list.txt122
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-event_print.txt130
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_find.txt118
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt122
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_print.txt126
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-field_read.txt81
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-fields.txt105
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt91
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-filter.txt209
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt183
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-func_find.txt88
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-handle.txt101
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-header_page.txt102
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt104
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-long_size.txt78
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-page_size.txt82
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt90
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt82
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-plugins.txt99
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt137
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt156
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt155
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt104
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-strerror.txt85
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent-tseq.txt158
-rw-r--r--tools/lib/traceevent/Documentation/libtraceevent.txt192
-rw-r--r--tools/lib/traceevent/Documentation/manpage-1.72.xsl14
-rw-r--r--tools/lib/traceevent/Documentation/manpage-base.xsl35
-rw-r--r--tools/lib/traceevent/Documentation/manpage-bold-literal.xsl17
-rw-r--r--tools/lib/traceevent/Documentation/manpage-normal.xsl13
-rw-r--r--tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl21
-rw-r--r--tools/lib/traceevent/Makefile300
-rw-r--r--tools/lib/traceevent/event-parse-api.c333
-rw-r--r--tools/lib/traceevent/event-parse-local.h93
-rw-r--r--tools/lib/traceevent/event-parse.c7075
-rw-r--r--tools/lib/traceevent/event-parse.h753
-rw-r--r--tools/lib/traceevent/event-plugin.c446
-rw-r--r--tools/lib/traceevent/event-utils.h67
-rw-r--r--tools/lib/traceevent/kbuffer-parse.c778
-rw-r--r--tools/lib/traceevent/kbuffer.h81
-rw-r--r--tools/lib/traceevent/libtraceevent.pc.template10
-rw-r--r--tools/lib/traceevent/parse-filter.c2274
-rw-r--r--tools/lib/traceevent/parse-utils.c71
-rw-r--r--tools/lib/traceevent/plugins/Build10
-rw-r--r--tools/lib/traceevent/plugins/Makefile223
-rw-r--r--tools/lib/traceevent/plugins/plugin_cfg80211.c43
-rw-r--r--tools/lib/traceevent/plugins/plugin_function.c195
-rw-r--r--tools/lib/traceevent/plugins/plugin_hrtimer.c89
-rw-r--r--tools/lib/traceevent/plugins/plugin_jbd2.c76
-rw-r--r--tools/lib/traceevent/plugins/plugin_kmem.c95
-rw-r--r--tools/lib/traceevent/plugins/plugin_kvm.c523
-rw-r--r--tools/lib/traceevent/plugins/plugin_mac80211.c103
-rw-r--r--tools/lib/traceevent/plugins/plugin_sched_switch.c161
-rw-r--r--tools/lib/traceevent/plugins/plugin_scsi.c434
-rw-r--r--tools/lib/traceevent/plugins/plugin_xen.c138
-rw-r--r--tools/lib/traceevent/tep_strerror.c53
-rw-r--r--tools/lib/traceevent/trace-seq.c249
-rw-r--r--tools/lib/traceevent/trace-seq.h55
-rw-r--r--tools/memory-model/Documentation/README76
-rw-r--r--tools/memory-model/Documentation/access-marking.txt598
-rw-r--r--tools/memory-model/Documentation/cheatsheet.txt33
-rw-r--r--tools/memory-model/Documentation/control-dependencies.txt258
-rw-r--r--tools/memory-model/Documentation/explanation.txt430
-rw-r--r--tools/memory-model/Documentation/glossary.txt178
-rw-r--r--tools/memory-model/Documentation/litmus-tests.txt1083
-rw-r--r--tools/memory-model/Documentation/locking.txt298
-rw-r--r--tools/memory-model/Documentation/ordering.txt556
-rw-r--r--tools/memory-model/Documentation/recipes.txt6
-rw-r--r--tools/memory-model/Documentation/references.txt23
-rw-r--r--tools/memory-model/Documentation/simple.txt270
-rw-r--r--tools/memory-model/README203
-rw-r--r--tools/memory-model/linux-kernel.bell34
-rw-r--r--tools/memory-model/linux-kernel.cat27
-rw-r--r--tools/memory-model/linux-kernel.def7
-rw-r--r--tools/memory-model/litmus-tests/.gitignore2
-rw-r--r--tools/memory-model/litmus-tests/LB+unlocklockonceonce+poacquireonce.litmus35
-rw-r--r--tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus14
-rw-r--r--tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus13
-rw-r--r--tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus9
-rw-r--r--tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus9
-rw-r--r--tools/memory-model/litmus-tests/MP+polocks.litmus14
-rw-r--r--tools/memory-model/litmus-tests/MP+poonceonces.litmus14
-rw-r--r--tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus14
-rw-r--r--tools/memory-model/litmus-tests/MP+porevlocks.litmus14
-rw-r--r--tools/memory-model/litmus-tests/MP+unlocklockonceonce+fencermbonceonce.litmus33
-rw-r--r--tools/memory-model/litmus-tests/README8
-rw-r--r--tools/memory-model/litmus-tests/dep+plain.litmus31
-rw-r--r--tools/memory-model/lock.cat6
-rw-r--r--tools/memory-model/scripts/README48
-rwxr-xr-xtools/memory-model/scripts/checkalllitmus.sh29
-rwxr-xr-xtools/memory-model/scripts/checkghlitmus.sh15
-rwxr-xr-xtools/memory-model/scripts/checklitmus.sh25
-rwxr-xr-xtools/memory-model/scripts/checklitmushist.sh2
-rwxr-xr-xtools/memory-model/scripts/checktheselitmus.sh43
-rwxr-xr-xtools/memory-model/scripts/cmplitmushist.sh49
-rwxr-xr-xtools/memory-model/scripts/hwfnseg.sh20
-rwxr-xr-xtools/memory-model/scripts/initlitmushist.sh2
-rwxr-xr-xtools/memory-model/scripts/judgelitmus.sh120
-rwxr-xr-xtools/memory-model/scripts/newlitmushist.sh4
-rwxr-xr-xtools/memory-model/scripts/parseargs.sh21
-rwxr-xr-xtools/memory-model/scripts/runlitmus.sh80
-rwxr-xr-xtools/memory-model/scripts/runlitmushist.sh29
-rwxr-xr-xtools/memory-model/scripts/simpletest.sh35
-rw-r--r--tools/mm/.gitignore (renamed from tools/vm/.gitignore)1
-rw-r--r--tools/mm/Makefile (renamed from tools/vm/Makefile)6
-rw-r--r--tools/mm/page-types.c (renamed from tools/vm/page-types.c)56
-rw-r--r--tools/mm/page_owner_sort.c897
-rw-r--r--tools/mm/slabinfo-gnuplot.sh (renamed from tools/vm/slabinfo-gnuplot.sh)4
-rw-r--r--tools/mm/slabinfo.c (renamed from tools/vm/slabinfo.c)64
-rwxr-xr-xtools/net/ynl/cli.py52
-rwxr-xr-xtools/net/ynl/ethtool.py424
-rw-r--r--tools/net/ynl/lib/.gitignore1
-rw-r--r--tools/net/ynl/lib/__init__.py8
-rw-r--r--tools/net/ynl/lib/nlspec.py484
-rw-r--r--tools/net/ynl/lib/ynl.py607
-rw-r--r--tools/net/ynl/requirements.txt2
-rwxr-xr-xtools/net/ynl/ynl-gen-c.py2305
-rwxr-xr-xtools/net/ynl/ynl-regen.sh30
-rwxr-xr-xtools/nfsd/inject_fault.sh50
-rw-r--r--tools/objtool/.gitignore3
-rw-r--r--tools/objtool/Build15
-rw-r--r--tools/objtool/Documentation/objtool.txt (renamed from tools/objtool/Documentation/stack-validation.txt)175
-rw-r--r--tools/objtool/Makefile91
-rw-r--r--tools/objtool/arch/powerpc/Build2
-rw-r--r--tools/objtool/arch/powerpc/decode.c108
-rw-r--r--tools/objtool/arch/powerpc/include/arch/cfi_regs.h11
-rw-r--r--tools/objtool/arch/powerpc/include/arch/elf.h10
-rw-r--r--tools/objtool/arch/powerpc/include/arch/special.h21
-rw-r--r--tools/objtool/arch/powerpc/special.c19
-rw-r--r--tools/objtool/arch/x86/Build1
-rw-r--r--tools/objtool/arch/x86/decode.c775
-rw-r--r--tools/objtool/arch/x86/include/arch/cfi_regs.h25
-rw-r--r--tools/objtool/arch/x86/include/arch/elf.h8
-rw-r--r--tools/objtool/arch/x86/include/arch/special.h21
-rw-r--r--tools/objtool/arch/x86/special.c145
-rw-r--r--tools/objtool/builtin-check.c237
-rw-r--r--tools/objtool/builtin-orc.c56
-rw-r--r--tools/objtool/builtin.h16
-rw-r--r--tools/objtool/check.c3808
-rw-r--r--tools/objtool/check.h72
-rw-r--r--tools/objtool/elf.c1188
-rw-r--r--tools/objtool/elf.h134
-rw-r--r--tools/objtool/include/objtool/arch.h (renamed from tools/objtool/arch.h)36
-rw-r--r--tools/objtool/include/objtool/builtin.h48
-rw-r--r--tools/objtool/include/objtool/cfi.h (renamed from tools/objtool/cfi.h)38
-rw-r--r--tools/objtool/include/objtool/check.h124
-rw-r--r--tools/objtool/include/objtool/elf.h200
-rw-r--r--tools/objtool/include/objtool/endianness.h38
-rw-r--r--tools/objtool/include/objtool/objtool.h50
-rw-r--r--tools/objtool/include/objtool/special.h (renamed from tools/objtool/special.h)13
-rw-r--r--tools/objtool/include/objtool/warn.h (renamed from tools/objtool/warn.h)40
-rw-r--r--tools/objtool/objtool.c172
-rw-r--r--tools/objtool/orc.h18
-rw-r--r--tools/objtool/orc_dump.c75
-rw-r--r--tools/objtool/orc_gen.c356
-rw-r--r--tools/objtool/special.c121
-rwxr-xr-xtools/objtool/sync-check.sh41
-rw-r--r--tools/objtool/weak.c26
-rw-r--r--tools/pci/Makefile2
-rw-r--r--tools/pci/pcitest.c2
-rw-r--r--tools/perf/.gitignore19
-rw-r--r--tools/perf/Build18
-rw-r--r--tools/perf/Documentation/Makefile80
-rw-r--r--tools/perf/Documentation/arm-coresight.txt5
-rwxr-xr-xtools/perf/Documentation/build-docdep.perl46
-rwxr-xr-xtools/perf/Documentation/cat-texi.perl46
-rw-r--r--tools/perf/Documentation/examples.txt2
-rw-r--r--tools/perf/Documentation/guest-files.txt16
-rw-r--r--tools/perf/Documentation/guestmount.txt11
-rw-r--r--tools/perf/Documentation/intel-hybrid.txt204
-rw-r--r--tools/perf/Documentation/itrace.txt38
-rw-r--r--tools/perf/Documentation/jitdump-specification.txt2
-rw-r--r--tools/perf/Documentation/perf-annotate.txt24
-rw-r--r--tools/perf/Documentation/perf-arm-spe.txt218
-rw-r--r--tools/perf/Documentation/perf-bench.txt21
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt11
-rw-r--r--tools/perf/Documentation/perf-buildid-list.txt4
-rw-r--r--tools/perf/Documentation/perf-c2c.txt110
-rw-r--r--tools/perf/Documentation/perf-config.txt80
-rw-r--r--tools/perf/Documentation/perf-daemon.txt208
-rw-r--r--tools/perf/Documentation/perf-data.txt8
-rw-r--r--tools/perf/Documentation/perf-diff.txt6
-rw-r--r--tools/perf/Documentation/perf-dlfilter.txt281
-rw-r--r--tools/perf/Documentation/perf-evlist.txt2
-rw-r--r--tools/perf/Documentation/perf-ftrace.txt129
-rw-r--r--tools/perf/Documentation/perf-inject.txt53
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt941
-rw-r--r--tools/perf/Documentation/perf-iostat.txt88
-rw-r--r--tools/perf/Documentation/perf-kallsyms.txt2
-rw-r--r--tools/perf/Documentation/perf-kmem.txt13
-rw-r--r--tools/perf/Documentation/perf-kvm.txt31
-rw-r--r--tools/perf/Documentation/perf-kwork.txt180
-rw-r--r--tools/perf/Documentation/perf-list.txt68
-rw-r--r--tools/perf/Documentation/perf-lock.txt140
-rw-r--r--tools/perf/Documentation/perf-mem.txt13
-rw-r--r--tools/perf/Documentation/perf-probe.txt23
-rw-r--r--tools/perf/Documentation/perf-record.txt272
-rw-r--r--tools/perf/Documentation/perf-report.txt43
-rw-r--r--tools/perf/Documentation/perf-script-perl.txt4
-rw-r--r--tools/perf/Documentation/perf-script-python.txt54
-rw-r--r--tools/perf/Documentation/perf-script.txt91
-rw-r--r--tools/perf/Documentation/perf-stat.txt230
-rw-r--r--tools/perf/Documentation/perf-test.txt3
-rw-r--r--tools/perf/Documentation/perf-top.txt60
-rw-r--r--tools/perf/Documentation/perf-trace.txt4
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt76
-rw-r--r--tools/perf/Documentation/perf.txt81
-rw-r--r--tools/perf/Documentation/security.txt237
-rw-r--r--tools/perf/Documentation/topdown.txt332
-rw-r--r--tools/perf/MANIFEST10
-rw-r--r--tools/perf/Makefile7
-rw-r--r--tools/perf/Makefile.config432
-rw-r--r--tools/perf/Makefile.perf413
-rw-r--r--tools/perf/arch/arm/include/arch-tests.h7
-rw-r--r--tools/perf/arch/arm/include/perf_regs.h42
-rw-r--r--tools/perf/arch/arm/tests/arch-tests.c16
-rw-r--r--tools/perf/arch/arm/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/arch/arm/tests/vectors-page.c5
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c166
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c675
-rw-r--r--tools/perf/arch/arm/util/perf_regs.c2
-rw-r--r--tools/perf/arch/arm/util/pmu.c7
-rw-r--r--tools/perf/arch/arm/util/unwind-libdw.c6
-rw-r--r--tools/perf/arch/arm/util/unwind-libunwind.c4
-rw-r--r--tools/perf/arch/arm64/Makefile1
-rw-r--r--tools/perf/arch/arm64/annotate/instructions.c2
-rwxr-xr-xtools/perf/arch/arm64/entry/syscalls/mksyscalltbl25
-rw-r--r--tools/perf/arch/arm64/include/arch-tests.h7
-rw-r--r--tools/perf/arch/arm64/include/perf_regs.h78
-rw-r--r--tools/perf/arch/arm64/tests/arch-tests.c11
-rw-r--r--tools/perf/arch/arm64/tests/dwarf-unwind.c2
-rw-r--r--tools/perf/arch/arm64/util/Build5
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c329
-rw-r--r--tools/perf/arch/arm64/util/arm64_exception_types.h92
-rw-r--r--tools/perf/arch/arm64/util/hisi-ptt.c188
-rw-r--r--tools/perf/arch/arm64/util/kvm-stat.c84
-rw-r--r--tools/perf/arch/arm64/util/machine.c24
-rw-r--r--tools/perf/arch/arm64/util/mem-events.c37
-rw-r--r--tools/perf/arch/arm64/util/perf_regs.c165
-rw-r--r--tools/perf/arch/arm64/util/pmu.c66
-rw-r--r--tools/perf/arch/arm64/util/tsc.c21
-rw-r--r--tools/perf/arch/arm64/util/unwind-libdw.c6
-rw-r--r--tools/perf/arch/arm64/util/unwind-libunwind.c77
-rw-r--r--tools/perf/arch/common.c6
-rw-r--r--tools/perf/arch/common.h2
-rw-r--r--tools/perf/arch/csky/include/perf_regs.h82
-rw-r--r--tools/perf/arch/loongarch/Build (renamed from tools/perf/arch/nds32/Build)0
-rw-r--r--tools/perf/arch/loongarch/Makefile28
-rw-r--r--tools/perf/arch/loongarch/annotate/instructions.c45
-rwxr-xr-xtools/perf/arch/loongarch/entry/syscalls/mksyscalltbl61
-rw-r--r--tools/perf/arch/loongarch/include/dwarf-regs-table.h16
-rw-r--r--tools/perf/arch/loongarch/include/perf_regs.h15
-rw-r--r--tools/perf/arch/loongarch/util/Build5
-rw-r--r--tools/perf/arch/loongarch/util/dwarf-regs.c44
-rw-r--r--tools/perf/arch/loongarch/util/perf_regs.c6
-rw-r--r--tools/perf/arch/loongarch/util/unwind-libdw.c56
-rw-r--r--tools/perf/arch/loongarch/util/unwind-libunwind.c82
-rw-r--r--tools/perf/arch/mips/Build2
-rw-r--r--tools/perf/arch/mips/Makefile22
-rw-r--r--tools/perf/arch/mips/annotate/instructions.c46
-rw-r--r--tools/perf/arch/mips/entry/syscalls/mksyscalltbl32
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl367
-rw-r--r--tools/perf/arch/mips/include/dwarf-regs-table.h31
-rw-r--r--tools/perf/arch/mips/include/perf_regs.h15
-rw-r--r--tools/perf/arch/mips/util/Build3
-rw-r--r--tools/perf/arch/mips/util/dwarf-regs.c38
-rw-r--r--tools/perf/arch/mips/util/perf_regs.c6
-rw-r--r--tools/perf/arch/mips/util/unwind-libunwind.c22
-rw-r--r--tools/perf/arch/nds32/util/Build1
-rw-r--r--tools/perf/arch/nds32/util/header.c29
-rw-r--r--tools/perf/arch/powerpc/Makefile7
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl90
-rw-r--r--tools/perf/arch/powerpc/include/arch-tests.h9
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h52
-rw-r--r--tools/perf/arch/powerpc/tests/arch-tests.c12
-rw-r--r--tools/perf/arch/powerpc/tests/dwarf-unwind.c3
-rw-r--r--tools/perf/arch/powerpc/util/Build5
-rw-r--r--tools/perf/arch/powerpc/util/book3s_hcalls.h2
-rw-r--r--tools/perf/arch/powerpc/util/event.c60
-rw-r--r--tools/perf/arch/powerpc/util/evsel.c8
-rw-r--r--tools/perf/arch/powerpc/util/header.c20
-rw-r--r--tools/perf/arch/powerpc/util/kvm-stat.c16
-rw-r--r--tools/perf/arch/powerpc/util/mem-events.c2
-rw-r--r--tools/perf/arch/powerpc/util/perf_regs.c63
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c6
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c4
-rw-r--r--tools/perf/arch/powerpc/util/unwind-libdw.c6
-rw-r--r--tools/perf/arch/powerpc/util/utils_header.h15
-rw-r--r--tools/perf/arch/riscv/Makefile1
-rw-r--r--tools/perf/arch/riscv/include/perf_regs.h74
-rw-r--r--tools/perf/arch/riscv/util/Build1
-rw-r--r--tools/perf/arch/riscv/util/header.c104
-rw-r--r--tools/perf/arch/riscv/util/unwind-libdw.c2
-rw-r--r--tools/perf/arch/riscv64/annotate/instructions.c34
-rw-r--r--tools/perf/arch/s390/Makefile4
-rw-r--r--tools/perf/arch/s390/annotate/instructions.c2
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl427
-rw-r--r--tools/perf/arch/s390/include/perf_regs.h78
-rw-r--r--tools/perf/arch/s390/util/Build3
-rw-r--r--tools/perf/arch/s390/util/auxtrace.c1
-rw-r--r--tools/perf/arch/s390/util/dwarf-regs.c3
-rw-r--r--tools/perf/arch/s390/util/kvm-stat.c9
-rw-r--r--tools/perf/arch/s390/util/machine.c17
-rw-r--r--tools/perf/arch/s390/util/pmu.c23
-rw-r--r--tools/perf/arch/s390/util/unwind-libdw.c1
-rw-r--r--tools/perf/arch/x86/Makefile11
-rw-r--r--tools/perf/arch/x86/annotate/instructions.c29
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl763
-rw-r--r--tools/perf/arch/x86/include/arch-tests.h23
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h82
-rw-r--r--tools/perf/arch/x86/tests/Build5
-rw-r--r--tools/perf/arch/x86/tests/arch-tests.c56
-rw-r--r--tools/perf/arch/x86/tests/bp-modify.c4
-rw-r--r--tools/perf/arch/x86/tests/dwarf-unwind.c18
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-32.c918
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-64.c1426
-rw-r--r--tools/perf/arch/x86/tests/insn-x86-dat-src.c1189
-rw-r--r--tools/perf/arch/x86/tests/insn-x86.c18
-rw-r--r--tools/perf/arch/x86/tests/intel-cqm.c6
-rw-r--r--tools/perf/arch/x86/tests/intel-pt-test.c (renamed from tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c)177
-rw-r--r--tools/perf/arch/x86/tests/rdpmc.c182
-rw-r--r--tools/perf/arch/x86/tests/sample-parsing.c125
-rw-r--r--tools/perf/arch/x86/util/Build8
-rw-r--r--tools/perf/arch/x86/util/archinsn.c11
-rw-r--r--tools/perf/arch/x86/util/auxtrace.c4
-rw-r--r--tools/perf/arch/x86/util/cpuid.h34
-rw-r--r--tools/perf/arch/x86/util/event.c63
-rw-r--r--tools/perf/arch/x86/util/evlist.c86
-rw-r--r--tools/perf/arch/x86/util/evsel.c135
-rw-r--r--tools/perf/arch/x86/util/evsel.h7
-rw-r--r--tools/perf/arch/x86/util/group.c28
-rw-r--r--tools/perf/arch/x86/util/header.c27
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c11
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c207
-rw-r--r--tools/perf/arch/x86/util/iostat.c471
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c69
-rw-r--r--tools/perf/arch/x86/util/mem-events.c107
-rw-r--r--tools/perf/arch/x86/util/perf_regs.c16
-rw-r--r--tools/perf/arch/x86/util/pmu.c156
-rw-r--r--tools/perf/arch/x86/util/topdown.c50
-rw-r--r--tools/perf/arch/x86/util/topdown.h7
-rw-r--r--tools/perf/arch/x86/util/tsc.c131
-rw-r--r--tools/perf/arch/x86/util/unwind-libdw.c6
-rw-r--r--tools/perf/bench/Build10
-rw-r--r--tools/perf/bench/bench.h25
-rw-r--r--tools/perf/bench/breakpoint.c244
-rw-r--r--tools/perf/bench/epoll-ctl.c67
-rw-r--r--tools/perf/bench/epoll-wait.c72
-rw-r--r--tools/perf/bench/evlist-open-close.c265
-rw-r--r--tools/perf/bench/find-bit-bench.c139
-rw-r--r--tools/perf/bench/futex-hash.c125
-rw-r--r--tools/perf/bench/futex-lock-pi.c119
-rw-r--r--tools/perf/bench/futex-requeue.c215
-rw-r--r--tools/perf/bench/futex-wake-parallel.c122
-rw-r--r--tools/perf/bench/futex-wake.c120
-rw-r--r--tools/perf/bench/futex.h87
-rw-r--r--tools/perf/bench/inject-buildid.c485
-rw-r--r--tools/perf/bench/kallsyms-parse.c75
-rw-r--r--tools/perf/bench/mem-functions.c21
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-asm.S3
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-lib.c24
-rw-r--r--tools/perf/bench/mem-memset-x86-64-asm.S3
-rw-r--r--tools/perf/bench/numa.c376
-rw-r--r--tools/perf/bench/pmu-scan.c184
-rw-r--r--tools/perf/bench/sched-messaging.c10
-rw-r--r--tools/perf/bench/sched-pipe.c4
-rw-r--r--tools/perf/bench/synthesize.c262
-rw-r--r--tools/perf/bench/syscall.c184
-rw-r--r--tools/perf/builtin-annotate.c187
-rw-r--r--tools/perf/builtin-bench.c43
-rw-r--r--tools/perf/builtin-buildid-cache.c56
-rw-r--r--tools/perf/builtin-buildid-list.c49
-rw-r--r--tools/perf/builtin-c2c.c962
-rw-r--r--tools/perf/builtin-daemon.c1532
-rw-r--r--tools/perf/builtin-data.c95
-rw-r--r--tools/perf/builtin-diff.c148
-rw-r--r--tools/perf/builtin-evlist.c22
-rw-r--r--tools/perf/builtin-ftrace.c933
-rw-r--r--tools/perf/builtin-help.c1
-rw-r--r--tools/perf/builtin-inject.c1723
-rw-r--r--tools/perf/builtin-kallsyms.c6
-rw-r--r--tools/perf/builtin-kmem.c142
-rw-r--r--tools/perf/builtin-kvm.c943
-rw-r--r--tools/perf/builtin-kwork.c1839
-rw-r--r--tools/perf/builtin-list.c575
-rw-r--r--tools/perf/builtin-lock.c1764
-rw-r--r--tools/perf/builtin-mem.c301
-rw-r--r--tools/perf/builtin-probe.c53
-rw-r--r--tools/perf/builtin-record.c2191
-rw-r--r--tools/perf/builtin-report.c349
-rw-r--r--tools/perf/builtin-sched.c420
-rw-r--r--tools/perf/builtin-script.c1414
-rw-r--r--tools/perf/builtin-stat.c1690
-rw-r--r--tools/perf/builtin-timechart.c129
-rw-r--r--tools/perf/builtin-top.c272
-rw-r--r--tools/perf/builtin-trace.c576
-rw-r--r--tools/perf/builtin-version.c12
-rw-r--r--tools/perf/builtin.h5
-rwxr-xr-xtools/perf/check-headers.sh55
-rw-r--r--tools/perf/command-list.txt11
-rw-r--r--tools/perf/design.txt6
-rw-r--r--tools/perf/dlfilters/dlfilter-show-cycles.c144
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v0.c334
-rw-r--r--tools/perf/examples/bpf/5sec.c8
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c179
-rw-r--r--tools/perf/examples/bpf/augmented_syscalls.c169
-rw-r--r--tools/perf/examples/bpf/empty.c13
-rw-r--r--tools/perf/examples/bpf/etcsnoop.c76
-rw-r--r--tools/perf/examples/bpf/hello.c24
-rw-r--r--tools/perf/include/bpf/bpf.h70
-rw-r--r--tools/perf/include/bpf/linux/socket.h24
-rw-r--r--tools/perf/include/bpf/pid_filter.h21
-rw-r--r--tools/perf/include/bpf/stdio.h16
-rw-r--r--tools/perf/include/bpf/unistd.h10
-rw-r--r--tools/perf/include/perf/perf_dlfilter.h158
-rw-r--r--tools/perf/jvmti/jvmti_agent.c4
-rw-r--r--tools/perf/jvmti/libjvmti.c92
-rw-r--r--tools/perf/perf-archive.sh3
-rw-r--r--tools/perf/perf-completion.sh11
-rw-r--r--tools/perf/perf-iostat.sh12
-rw-r--r--tools/perf/perf-sys.h22
-rw-r--r--tools/perf/perf-with-kcore.sh247
-rw-r--r--tools/perf/perf.c75
-rw-r--r--tools/perf/perf.h9
-rw-r--r--tools/perf/pmu-events/Build34
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json5
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json58
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json4
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json34
-rw-r--r--tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/branch.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/instruction.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a34/memory.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/branch.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/cache.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/instruction.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a35/memory.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/branch.json59
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/cache.json182
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/instruction.json95
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/memory.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pipeline.json107
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pmu.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a510/trace.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/branch.json59
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/cache.json188
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/exception.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/instruction.json65
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/memory.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a55/pipeline.json80
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/bus.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/cache.json80
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json179
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/exception.json (renamed from tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json)33
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/instruction.json68
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/memory.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/cache.json236
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/dpu.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/ifu.json122
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/instruction.json71
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/memory.json35
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/pipeline.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/instruction.json134
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/memory.json41
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a710/trace.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/branch.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/bus.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/cache.json107
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/etm.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/exception.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/instruction.json65
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/memory.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/mmu.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a73/pipeline.json38
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/branch.json11
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/cache.json164
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/etm.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/exception.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/instruction.json74
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/memory.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/mmu.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a75/pipeline.json44
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json24
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json207
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json108
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json7
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76/branch.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76/bus.json21
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76/cache.json169
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76/exception.json48
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76/instruction.json91
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76/memory.json (renamed from tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json)9
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a76/pipeline.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/bus.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/cache.json143
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/instruction.json77
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a77/pipeline.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/instruction.json80
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-a78/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/instruction.json80
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x1/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/branch.json17
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/instruction.json134
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/memory.json41
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/cortex-x2/trace.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/bus.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/exception.json62
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/general.json6
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l1d_cache.json50
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l1i_cache.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l2_cache.json46
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l3_cache.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/ll_cache.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/memory.json22
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/metrics.json219
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/retired.json26
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/spe.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/spec_operation.json102
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/stall.json10
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/tlb.json66
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/instruction.json143
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json41
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json273
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spe.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/trace.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json119
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/common-and-microarch.json812
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mm/sys/ddrc.json39
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mm/sys/metrics.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/ddrc.json37
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/metrics.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/ddrc.json37
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/metrics.json466
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/ddrc.json37
-rw-r--r--tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/metrics.json18
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/bus.json62
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cache.json128
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cycle.json5
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/exception.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/instruction.json131
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/memory.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/other.json188
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/pipeline.json194
-rw-r--r--tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/sve.json110
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json233
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json32
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json120
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json52
-rw-r--r--tools/perf/pmu-events/arch/arm64/hisilicon/hip09/sys/uncore-cpa.json81
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv23
-rw-r--r--tools/perf/pmu-events/arch/arm64/recommended.json (renamed from tools/perf/pmu-events/arch/arm64/armv8-recommended.json)202
-rw-r--r--tools/perf/pmu-events/arch/arm64/sbsa.json30
-rw-r--r--tools/perf/pmu-events/arch/nds32/n13/atcpmu.json2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/cache.json57
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/floating_point.json7
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/frontend.json247
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/locks.json12
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/marked.json142
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/memory.json187
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/metrics.json676
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json424
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/others.json272
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pipeline.json292
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/pmc.json22
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power10/translation.json57
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/cache.json10
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/frontend.json12
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/marked.json10
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/metrics.json14
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json18
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/translation.json2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/metrics.json323
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/nest_metrics.json63
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/other.json4
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power9/pipeline.json2
-rw-r--r--tools/perf/pmu-events/arch/riscv/mapfile.csv17
-rw-r--r--tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json134
-rw-r--r--tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json68
-rw-r--r--tools/perf/pmu-events/arch/riscv/sifive/u74/instructions.json92
-rw-r--r--tools/perf/pmu-events/arch/riscv/sifive/u74/memory.json32
-rw-r--r--tools/perf/pmu-events/arch/riscv/sifive/u74/microarch.json57
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/basic.json50
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/crypto.json66
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z10/extended.json38
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/basic.json50
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/crypto.json66
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/extended.json102
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z13/transaction.json70
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/basic.json34
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/crypto.json66
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/extended.json104
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z14/transaction.json65
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/basic.json34
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto.json114
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json114
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/extended.json112
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z15/transaction.json65
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/basic.json58
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json142
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/extended.json492
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json1101
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/pai_ext.json178
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z16/transaction.json72
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/basic.json50
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/crypto.json66
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_z196/extended.json46
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/basic.json50
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json66
-rw-r--r--tools/perf/pmu-events/arch/s390/cf_zec12/extended.json68
-rw-r--r--tools/perf/pmu-events/arch/s390/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/test/arch-std-events.json8
-rw-r--r--tools/perf/pmu-events/arch/test/test_cpu/uncore.json21
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/cpu/branch.json (renamed from tools/perf/pmu-events/arch/test/test_cpu/branch.json)0
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/cpu/cache.json5
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/cpu/metrics.json64
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/cpu/other.json (renamed from tools/perf/pmu-events/arch/test/test_cpu/other.json)0
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json58
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json2464
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/cache.json1064
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/floating-point.json151
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/frontend.json426
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/memory.json305
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/other.json174
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/pipeline.json1658
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json90
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/uncore-memory.json183
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json236
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json698
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/cache.json330
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json18
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/memory.json88
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/other.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json533
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/uncore-memory.json183
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/uncore-other.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json47
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/branch.json5
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/cache.json83
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/core.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/data-fabric.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/memory.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/other.json12
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen1/recommended.json178
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/cache.json79
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/core.json14
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/data-fabric.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/memory.json86
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen2/recommended.json178
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/branch.json53
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/cache.json402
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/core.json137
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json139
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/memory.json428
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/other.json103
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen3/recommended.json214
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/branch.json82
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/cache.json772
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/core.json122
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/data-fabric.json1090
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/floating-point.json818
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/memory.json174
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/other.json138
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/pipeline.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/amdzen4/recommended.json334
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/cache.json793
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/floating-point.json295
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/frontend.json95
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/memory.json139
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/other.json486
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/pipeline.json413
-rw-r--r--tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json129
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json1236
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/cache.json3852
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/floating-point.json232
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/frontend.json323
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/memory.json3454
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/other.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/pipeline.json1855
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json133
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore-interconnect.json61
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/uncore.json278
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json370
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json1142
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/cache.json1024
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json221
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/frontend.json315
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/memory.json576
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/other.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json1829
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json3363
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json614
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-io.json555
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json2824
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json455
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json358
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json1271
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/cache.json1167
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json221
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/frontend.json315
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/memory.json856
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/other.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json1829
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json3479
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json4014
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-io.json555
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json2845
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json455
-rw-r--r--tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json358
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/cache.json12269
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json1721
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json100
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json563
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/memory.json8144
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/other.json8151
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json1210
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json10764
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json11334
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-io.json4250
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json4008
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json1770
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json196
-rw-r--r--tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json290
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/cache.json886
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/ehl-metrics.json57
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json69
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/memory.json358
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/other.json532
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json450
-rw-r--r--tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json247
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/cache.json1382
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/floating-point.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/frontend.json82
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/memory.json40
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/other.json81
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/pipeline.json556
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json98
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/cache.json1512
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json92
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/memory.json44
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/other.json97
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json652
-rw-r--r--tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json215
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/frontend.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/pipeline.json96
-rw-r--r--tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/cache.json54
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/frontend.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/memory.json174
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/other.json29
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json102
-rw-r--r--tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/cache.json1339
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/floating-point.json121
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/frontend.json332
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json1047
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/memory.json876
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/other.json36
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/pipeline.json1747
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/uncore-cache.json202
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/uncore-interconnect.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/uncore-other.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/uncore.json374
-rw-r--r--tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json468
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/cache.json1295
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/floating-point.json108
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/frontend.json318
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json1084
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/memory.json920
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/other.json28
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/pipeline.json1724
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json3470
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json3960
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-io.json528
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json2838
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json495
-rw-r--r--tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json446
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/cache.json1146
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/floating-point.json149
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/frontend.json625
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json1518
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/memory.json650
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/other.json331
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/pipeline.json1319
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/uncore-interconnect.json74
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/uncore-other.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json239
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/cache.json876
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/floating-point.json105
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/frontend.json344
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json1568
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/memory.json414
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/other.json460
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/pipeline.json775
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json9860
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json14571
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json9270
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json1548
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json204
-rw-r--r--tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json181
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/cache.json1323
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json198
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/frontend.json356
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json1096
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/memory.json262
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/other.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json1693
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/uncore-cache.json202
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/uncore-interconnect.json75
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/uncore.json314
-rw-r--r--tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json186
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/cache.json1436
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/floating-point.json198
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/frontend.json356
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json1123
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/memory.json463
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/other.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/pipeline.json1693
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json3119
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json3281
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-io.json549
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json1599
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json634
-rw-r--r--tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json208
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/cache.json1407
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/floating-point.json146
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/frontend.json351
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json583
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/memory.json401
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/other.json52
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/pipeline.json1471
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json1796
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json1781
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-io.json324
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json405
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json352
-rw-r--r--tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json170
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/cache.json2209
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/frontend.json43
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/memory.json1027
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json459
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json3365
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/uncore-io.json194
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json112
-rw-r--r--tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json63
-rw-r--r--tools/perf/pmu-events/arch/x86/mapfile.csv73
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/cache.json204
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/frontend.json27
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/memory.json157
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/other.json42
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json214
-rw-r--r--tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json38
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/cache.json2518
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/frontend.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/memory.json538
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/other.json156
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json737
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json81
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/cache.json2433
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/frontend.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/memory.json538
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/other.json156
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json737
-rw-r--r--tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json81
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/cache.json1975
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/frontend.json345
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/memory.json434
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/other.json62
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json1589
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json580
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/uncore-cache.json202
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/uncore-interconnect.json75
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/uncore.json314
-rw-r--r--tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json140
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json906
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json193
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json362
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json341
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/other.json342
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json975
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json1675
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json5644
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cxl.json450
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json6199
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json3651
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json3308
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json197
-rw-r--r--tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json165
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/frontend.json16
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/memory.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json96
-rw-r--r--tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json24
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/cache.json833
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/floating-point.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/frontend.json71
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/memory.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/other.json20
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/pipeline.json407
-rw-r--r--tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json71
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/cache.json3342
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/floating-point.json81
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/frontend.json701
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/memory.json1903
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/other.json53
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/pipeline.json1293
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json1587
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json124
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore-interconnect.json67
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore-other.json10
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/uncore.json254
-rw-r--r--tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json296
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/cache.json1961
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/floating-point.json82
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/frontend.json701
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/memory.json1675
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/other.json150
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/pipeline.json1288
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json1677
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json10649
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json11248
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json4250
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json3247
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json1156
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json196
-rw-r--r--tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json282
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/cache.json886
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/frontend.json69
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/memory.json358
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/other.json532
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json450
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json7100
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json6016
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json8944
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json547
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json203
-rw-r--r--tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json247
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/cache.json553
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json105
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/frontend.json353
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/memory.json218
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/other.json35
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json808
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json1532
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json90
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/uncore-memory.json50
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json9
-rw-r--r--tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json165
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/cache.json111
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/frontend.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/memory.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/other.json26
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/pipeline.json111
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json73
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json431
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/uncore-power.json11
-rw-r--r--tools/perf/pmu-events/arch/x86/tremontx/virtual-memory.json86
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/cache.json2255
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/floating-point.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/frontend.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/memory.json551
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/other.json236
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/pipeline.json751
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-dp/virtual-memory.json121
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/cache.json2559
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/floating-point.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/frontend.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/memory.json538
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/other.json236
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/pipeline.json751
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereep-sp/virtual-memory.json106
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/cache.json2568
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/floating-point.json158
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/frontend.json17
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/memory.json543
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/other.json236
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/pipeline.json755
-rw-r--r--tools/perf/pmu-events/arch/x86/westmereex/virtual-memory.json121
-rw-r--r--tools/perf/pmu-events/empty-pmu-events.c422
-rw-r--r--tools/perf/pmu-events/jevents.c1227
-rw-r--r--tools/perf/pmu-events/jevents.h23
-rwxr-xr-xtools/perf/pmu-events/jevents.py997
-rw-r--r--tools/perf/pmu-events/jsmn.c313
-rw-r--r--tools/perf/pmu-events/jsmn.h68
-rw-r--r--tools/perf/pmu-events/json.c162
-rw-r--r--tools/perf/pmu-events/json.h39
-rw-r--r--tools/perf/pmu-events/metric.py579
-rwxr-xr-xtools/perf/pmu-events/metric_test.py168
-rw-r--r--tools/perf/pmu-events/pmu-events.h96
-rwxr-xr-xtools/perf/python/tracepoint.py6
-rwxr-xr-xtools/perf/python/twatch.py2
-rw-r--r--tools/perf/scripts/Build4
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Build6
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/Context.c175
-rwxr-xr-xtools/perf/scripts/python/arm-cs-trace-disasm.py274
-rwxr-xr-xtools/perf/scripts/python/bin/flamegraph-record2
-rwxr-xr-xtools/perf/scripts/python/bin/flamegraph-report3
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-record4
-rw-r--r--tools/perf/scripts/python/bin/intel-pt-events-report4
-rwxr-xr-xtools/perf/scripts/python/bin/stackcollapse-report2
-rwxr-xr-xtools/perf/scripts/python/bin/task-analyzer-record2
-rwxr-xr-xtools/perf/scripts/python/bin/task-analyzer-report3
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py19
-rw-r--r--tools/perf/scripts/python/export-to-sqlite.py19
-rwxr-xr-xtools/perf/scripts/python/exported-sql-viewer.py112
-rwxr-xr-xtools/perf/scripts/python/flamegraph.py243
-rw-r--r--tools/perf/scripts/python/futex-contention.py51
-rw-r--r--tools/perf/scripts/python/intel-pt-events.py426
-rw-r--r--tools/perf/scripts/python/libxed.py107
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py4
-rw-r--r--tools/perf/scripts/python/netdev-times.py8
-rwxr-xr-xtools/perf/scripts/python/task-analyzer.py934
-rw-r--r--tools/perf/tests/Build27
-rw-r--r--tools/perf/tests/api-io.c343
-rw-r--r--tools/perf/tests/attr.c24
-rw-r--r--tools/perf/tests/attr.py71
-rw-r--r--tools/perf/tests/attr/README9
-rw-r--r--tools/perf/tests/attr/base-record6
-rw-r--r--tools/perf/tests/attr/base-record-spe40
-rw-r--r--tools/perf/tests/attr/base-stat2
-rw-r--r--tools/perf/tests/attr/system-wide-dummy50
-rw-r--r--tools/perf/tests/attr/test-record-C012
-rw-r--r--tools/perf/tests/attr/test-record-graph-default2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default-aarch649
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp2
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp-aarch649
-rw-r--r--tools/perf/tests/attr/test-record-group22
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling6
-rw-r--r--tools/perf/tests/attr/test-record-group14
-rw-r--r--tools/perf/tests/attr/test-record-group229
-rw-r--r--tools/perf/tests/attr/test-record-pfm-period9
-rw-r--r--tools/perf/tests/attr/test-record-spe-period12
-rw-r--r--tools/perf/tests/attr/test-record-spe-period-term12
-rw-r--r--tools/perf/tests/attr/test-record-spe-physical-address12
-rw-r--r--tools/perf/tests/attr/test-record-user-regs-no-sve-aarch649
-rw-r--r--tools/perf/tests/attr/test-record-user-regs-old-sve-aarch6410
-rw-r--r--tools/perf/tests/attr/test-record-user-regs-sve-aarch6414
-rw-r--r--tools/perf/tests/attr/test-stat-default97
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-1113
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-2137
-rw-r--r--tools/perf/tests/attr/test-stat-detailed-3145
-rw-r--r--tools/perf/tests/attr/test-stat-group17
-rw-r--r--tools/perf/tests/backward-ring-buffer.c11
-rw-r--r--tools/perf/tests/bitmap.c10
-rw-r--r--tools/perf/tests/bp_account.c52
-rw-r--r--tools/perf/tests/bp_signal.c45
-rw-r--r--tools/perf/tests/bp_signal_overflow.c9
-rw-r--r--tools/perf/tests/bpf-script-example.c37
-rw-r--r--tools/perf/tests/bpf-script-test-prologue.c2
-rw-r--r--tools/perf/tests/bpf.c124
-rw-r--r--tools/perf/tests/builtin-test-list.c207
-rw-r--r--tools/perf/tests/builtin-test-list.h12
-rw-r--r--tools/perf/tests/builtin-test.c739
-rw-r--r--tools/perf/tests/clang.c54
-rw-r--r--tools/perf/tests/code-reading.c125
-rw-r--r--tools/perf/tests/cpumap.c95
-rw-r--r--tools/perf/tests/demangle-java-test.c44
-rw-r--r--tools/perf/tests/demangle-ocaml-test.c45
-rw-r--r--tools/perf/tests/dlfilter-test.c419
-rw-r--r--tools/perf/tests/dso-data.c24
-rw-r--r--tools/perf/tests/dwarf-unwind.c52
-rw-r--r--tools/perf/tests/event-times.c20
-rw-r--r--tools/perf/tests/event_groups.c139
-rw-r--r--tools/perf/tests/event_update.c34
-rw-r--r--tools/perf/tests/evsel-roundtrip-name.c48
-rw-r--r--tools/perf/tests/evsel-tp-sched.c40
-rw-r--r--tools/perf/tests/expand-cgroup.c224
-rw-r--r--tools/perf/tests/expr.c226
-rw-r--r--tools/perf/tests/fdarray.c29
-rw-r--r--tools/perf/tests/genelf.c6
-rw-r--r--tools/perf/tests/hists_common.c8
-rw-r--r--tools/perf/tests/hists_cumulate.c34
-rw-r--r--tools/perf/tests/hists_filter.c38
-rw-r--r--tools/perf/tests/hists_link.c30
-rw-r--r--tools/perf/tests/hists_output.c28
-rw-r--r--tools/perf/tests/is_printable_array.c4
-rw-r--r--tools/perf/tests/keep-tracking.c15
-rw-r--r--tools/perf/tests/kmod-path.c4
-rw-r--r--tools/perf/tests/llvm.c93
-rw-r--r--tools/perf/tests/make75
-rw-r--r--tools/perf/tests/maps.c91
-rw-r--r--tools/perf/tests/mem.c4
-rw-r--r--tools/perf/tests/mem2node.c15
-rw-r--r--tools/perf/tests/mmap-basic.c179
-rw-r--r--tools/perf/tests/mmap-thread-lookup.c11
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c74
-rw-r--r--tools/perf/tests/openat-syscall-tp-fields.c22
-rw-r--r--tools/perf/tests/openat-syscall.c34
-rw-r--r--tools/perf/tests/parse-events.c1076
-rw-r--r--tools/perf/tests/parse-metric.c312
-rw-r--r--tools/perf/tests/parse-no-sample-id-all.c10
-rw-r--r--tools/perf/tests/pe-file-parsing.c100
-rw-r--r--tools/perf/tests/pe-file.c14
-rw-r--r--tools/perf/tests/pe-file.exebin0 -> 75595 bytes
-rw-r--r--tools/perf/tests/pe-file.exe.debugbin0 -> 141644 bytes
-rw-r--r--tools/perf/tests/perf-hooks.c4
-rw-r--r--tools/perf/tests/perf-record.c52
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c (renamed from tools/perf/arch/x86/tests/perf-time-to-tsc.c)86
-rw-r--r--tools/perf/tests/pfm.c194
-rw-r--r--tools/perf/tests/pmu-events.c1126
-rw-r--r--tools/perf/tests/pmu.c18
-rw-r--r--tools/perf/tests/python-use.c5
-rw-r--r--tools/perf/tests/sample-parsing.c87
-rw-r--r--tools/perf/tests/sdt.c12
-rwxr-xr-xtools/perf/tests/shell/buildid.sh175
-rw-r--r--tools/perf/tests/shell/coresight/Makefile29
-rw-r--r--tools/perf/tests/shell/coresight/Makefile.miniconfig14
-rwxr-xr-xtools/perf/tests/shell/coresight/asm_pure_loop.sh18
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/Makefile34
-rw-r--r--tools/perf/tests/shell/coresight/asm_pure_loop/asm_pure_loop.S28
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/Makefile33
-rw-r--r--tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c79
-rwxr-xr-xtools/perf/tests/shell/coresight/memcpy_thread_16k_10.sh18
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/Makefile33
-rw-r--r--tools/perf/tests/shell/coresight/thread_loop/thread_loop.c86
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_10.sh19
-rwxr-xr-xtools/perf/tests/shell/coresight/thread_loop_check_tid_2.sh19
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/.gitignore1
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/Makefile33
-rw-r--r--tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c74
-rwxr-xr-xtools/perf/tests/shell/coresight/unroll_loop_thread_10.sh18
-rwxr-xr-xtools/perf/tests/shell/daemon.sh487
-rw-r--r--tools/perf/tests/shell/lib/coresight.sh132
-rw-r--r--tools/perf/tests/shell/lib/perf_json_output_lint.py94
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh12
-rw-r--r--tools/perf/tests/shell/lib/waiting.sh77
-rwxr-xr-xtools/perf/tests/shell/lock_contention.sh248
-rwxr-xr-xtools/perf/tests/shell/pipe_test.sh34
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh17
-rwxr-xr-xtools/perf/tests/shell/record+script_probe_vfs_getname.sh13
-rwxr-xr-xtools/perf/tests/shell/record+zstd_comp_decomp.sh7
-rwxr-xr-xtools/perf/tests/shell/record.sh163
-rwxr-xr-xtools/perf/tests/shell/record_offcpu.sh103
-rwxr-xr-xtools/perf/tests/shell/stat+csv_output.sh231
-rwxr-xr-xtools/perf/tests/shell/stat+csv_summary.sh31
-rwxr-xr-xtools/perf/tests/shell/stat+json_output.sh206
-rwxr-xr-xtools/perf/tests/shell/stat+shadow_stat.sh81
-rwxr-xr-xtools/perf/tests/shell/stat.sh99
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh12
-rwxr-xr-xtools/perf/tests/shell/stat_all_metrics.sh43
-rwxr-xr-xtools/perf/tests/shell/stat_all_pmu.sh23
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters.sh45
-rwxr-xr-xtools/perf/tests/shell/stat_bpf_counters_cgrp.sh83
-rwxr-xr-xtools/perf/tests/shell/test_arm_callgraph_fp.sh40
-rwxr-xr-xtools/perf/tests/shell/test_arm_coresight.sh212
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe.sh113
-rwxr-xr-xtools/perf/tests/shell/test_arm_spe_fork.sh50
-rwxr-xr-xtools/perf/tests/shell/test_brstack.sh76
-rwxr-xr-xtools/perf/tests/shell/test_data_symbol.sh66
-rwxr-xr-xtools/perf/tests/shell/test_intel_pt.sh680
-rwxr-xr-xtools/perf/tests/shell/test_java_symbol.sh75
-rwxr-xr-xtools/perf/tests/shell/test_task_analyzer.sh151
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh8
-rw-r--r--tools/perf/tests/sigtrap.c227
-rw-r--r--tools/perf/tests/stat.c14
-rw-r--r--tools/perf/tests/sw-clock.c21
-rw-r--r--tools/perf/tests/switch-tracking.c73
-rw-r--r--tools/perf/tests/symbols.c153
-rw-r--r--tools/perf/tests/task-exit.c27
-rw-r--r--tools/perf/tests/tests.h257
-rw-r--r--tools/perf/tests/thread-map.c19
-rw-r--r--tools/perf/tests/thread-maps-share.c32
-rw-r--r--tools/perf/tests/time-utils-test.c4
-rw-r--r--tools/perf/tests/topology.c143
-rw-r--r--tools/perf/tests/unit_number__scnprintf.c4
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c180
-rw-r--r--tools/perf/tests/workloads/Build13
-rw-r--r--tools/perf/tests/workloads/brstack.c40
-rw-r--r--tools/perf/tests/workloads/datasym.c24
-rw-r--r--tools/perf/tests/workloads/leafloop.c34
-rw-r--r--tools/perf/tests/workloads/noploop.c32
-rw-r--r--tools/perf/tests/workloads/sqrtloop.c45
-rw-r--r--tools/perf/tests/workloads/thloop.c53
-rw-r--r--tools/perf/tests/wp.c133
-rw-r--r--tools/perf/trace/beauty/Build1
-rwxr-xr-xtools/perf/trace/beauty/arch_errno_names.sh27
-rw-r--r--tools/perf/trace/beauty/beauty.h8
-rw-r--r--tools/perf/trace/beauty/clone.c1
-rwxr-xr-xtools/perf/trace/beauty/fadvise.sh2
-rwxr-xr-xtools/perf/trace/beauty/fsconfig.sh7
-rwxr-xr-xtools/perf/trace/beauty/fsmount.sh2
-rwxr-xr-xtools/perf/trace/beauty/fspick.sh2
-rw-r--r--tools/perf/trace/beauty/include/linux/socket.h459
-rwxr-xr-xtools/perf/trace/beauty/kcmp_type.sh2
-rwxr-xr-xtools/perf/trace/beauty/kvm_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/madvise_behavior.sh2
-rw-r--r--tools/perf/trace/beauty/mmap.c73
-rwxr-xr-xtools/perf/trace/beauty/mmap_flags.sh38
-rwxr-xr-xtools/perf/trace/beauty/mmap_prot.sh30
-rwxr-xr-xtools/perf/trace/beauty/mount_flags.sh4
-rwxr-xr-xtools/perf/trace/beauty/move_mount_flags.sh4
-rwxr-xr-xtools/perf/trace/beauty/mremap_flags.sh18
-rw-r--r--tools/perf/trace/beauty/perf_event_open.c44
-rwxr-xr-xtools/perf/trace/beauty/perf_ioctl.sh2
-rwxr-xr-xtools/perf/trace/beauty/pkey_alloc_access_rights.sh2
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh6
-rwxr-xr-xtools/perf/trace/beauty/rename_flags.sh4
-rw-r--r--tools/perf/trace/beauty/sockaddr.c9
-rwxr-xr-xtools/perf/trace/beauty/sockaddr.sh24
-rw-r--r--tools/perf/trace/beauty/socket.c21
-rwxr-xr-xtools/perf/trace/beauty/socket.sh28
-rwxr-xr-xtools/perf/trace/beauty/socket_ipproto.sh12
-rw-r--r--tools/perf/trace/beauty/statx.c2
-rwxr-xr-xtools/perf/trace/beauty/sync_file_range.sh2
-rw-r--r--tools/perf/trace/beauty/timespec.c21
-rwxr-xr-xtools/perf/trace/beauty/tracepoints/x86_irq_vectors.sh4
-rwxr-xr-xtools/perf/trace/beauty/tracepoints/x86_msr.sh12
-rwxr-xr-xtools/perf/trace/beauty/usbdevfs_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/vhost_virtio_ioctl.sh4
-rwxr-xr-xtools/perf/trace/beauty/x86_arch_prctl.sh4
-rw-r--r--tools/perf/ui/browser.c53
-rw-r--r--tools/perf/ui/browser.h2
-rw-r--r--tools/perf/ui/browsers/annotate.c139
-rw-r--r--tools/perf/ui/browsers/hists.c157
-rw-r--r--tools/perf/ui/browsers/map.c4
-rw-r--r--tools/perf/ui/gtk/annotate.c16
-rw-r--r--tools/perf/ui/gtk/browser.c2
-rw-r--r--tools/perf/ui/gtk/gtk.h7
-rw-r--r--tools/perf/ui/gtk/helpline.c2
-rw-r--r--tools/perf/ui/gtk/hists.c14
-rw-r--r--tools/perf/ui/hist.c46
-rw-r--r--tools/perf/ui/setup.c24
-rw-r--r--tools/perf/ui/stdio/hist.c15
-rw-r--r--tools/perf/ui/tui/helpline.c5
-rw-r--r--tools/perf/ui/tui/progress.c8
-rw-r--r--tools/perf/ui/tui/setup.c13
-rw-r--r--tools/perf/ui/tui/util.c18
-rw-r--r--tools/perf/ui/ui.h7
-rw-r--r--tools/perf/ui/util.c5
-rw-r--r--tools/perf/util/Build157
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN9
-rw-r--r--tools/perf/util/affinity.c26
-rw-r--r--tools/perf/util/amd-sample-raw.c342
-rw-r--r--tools/perf/util/annotate.c321
-rw-r--r--tools/perf/util/annotate.h27
-rw-r--r--tools/perf/util/arm-spe-decoder/Build1
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.c267
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h114
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.c546
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-pkt-decoder.h159
-rw-r--r--tools/perf/util/arm-spe-pkt-decoder.c462
-rw-r--r--tools/perf/util/arm-spe-pkt-decoder.h43
-rw-r--r--tools/perf/util/arm-spe.c1237
-rw-r--r--tools/perf/util/arm64-frame-pointer-unwind-support.c63
-rw-r--r--tools/perf/util/arm64-frame-pointer-unwind-support.h12
-rw-r--r--tools/perf/util/auxtrace.c496
-rw-r--r--tools/perf/util/auxtrace.h213
-rw-r--r--tools/perf/util/block-info.c10
-rw-r--r--tools/perf/util/block-range.c6
-rw-r--r--tools/perf/util/bpf-event.c187
-rw-r--r--tools/perf/util/bpf-event.h10
-rw-r--r--tools/perf/util/bpf-filter.c197
-rw-r--r--tools/perf/util/bpf-filter.h49
-rw-r--r--tools/perf/util/bpf-filter.l159
-rw-r--r--tools/perf/util/bpf-filter.y78
-rw-r--r--tools/perf/util/bpf-loader.c568
-rw-r--r--tools/perf/util/bpf-loader.h3
-rw-r--r--tools/perf/util/bpf-prologue.c14
-rw-r--r--tools/perf/util/bpf-prologue.h6
-rw-r--r--tools/perf/util/bpf-utils.c260
-rw-r--r--tools/perf/util/bpf-utils.h76
-rw-r--r--tools/perf/util/bpf_counter.c819
-rw-r--r--tools/perf/util/bpf_counter.h137
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c295
-rw-r--r--tools/perf/util/bpf_ftrace.c154
-rw-r--r--tools/perf/util/bpf_kwork.c349
-rw-r--r--tools/perf/util/bpf_lock_contention.c375
-rw-r--r--tools/perf/util/bpf_map.c28
-rw-r--r--tools/perf/util/bpf_map.h3
-rw-r--r--tools/perf/util/bpf_off_cpu.c392
-rw-r--r--tools/perf/util/bpf_skel/.gitignore (renamed from tools/lib/lockdep/.gitignore)3
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c227
-rw-r--r--tools/perf/util/bpf_skel/bperf_follower.bpf.c78
-rw-r--r--tools/perf/util/bpf_skel/bperf_leader.bpf.c55
-rw-r--r--tools/perf/util/bpf_skel/bperf_u.h14
-rw-r--r--tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c92
-rw-r--r--tools/perf/util/bpf_skel/func_latency.bpf.c116
-rw-r--r--tools/perf/util/bpf_skel/kwork_trace.bpf.c383
-rw-r--r--tools/perf/util/bpf_skel/lock_contention.bpf.c453
-rw-r--r--tools/perf/util/bpf_skel/lock_data.h49
-rw-r--r--tools/perf/util/bpf_skel/off_cpu.bpf.c283
-rw-r--r--tools/perf/util/bpf_skel/sample-filter.h27
-rw-r--r--tools/perf/util/bpf_skel/sample_filter.bpf.c196
-rw-r--r--tools/perf/util/bpf_skel/vmlinux.h173
-rw-r--r--tools/perf/util/branch.c87
-rw-r--r--tools/perf/util/branch.h35
-rw-r--r--tools/perf/util/build-id.c313
-rw-r--r--tools/perf/util/build-id.h38
-rw-r--r--tools/perf/util/c++/clang-c.h8
-rw-r--r--tools/perf/util/c++/clang-test.cpp6
-rw-r--r--tools/perf/util/c++/clang.cpp25
-rw-r--r--tools/perf/util/cacheline.h25
-rw-r--r--tools/perf/util/call-path.h2
-rw-r--r--tools/perf/util/callchain.c185
-rw-r--r--tools/perf/util/callchain.h20
-rw-r--r--tools/perf/util/cap.h4
-rw-r--r--tools/perf/util/cgroup.c328
-rw-r--r--tools/perf/util/cgroup.h16
-rw-r--r--tools/perf/util/clockid.c119
-rw-r--r--tools/perf/util/clockid.h11
-rw-r--r--tools/perf/util/cloexec.c19
-rw-r--r--tools/perf/util/config.c180
-rw-r--r--tools/perf/util/config.h8
-rw-r--r--tools/perf/util/counts.c15
-rw-r--r--tools/perf/util/counts.h20
-rw-r--r--tools/perf/util/cpumap.c439
-rw-r--r--tools/perf/util/cpumap.h159
-rw-r--r--tools/perf/util/cputopo.c237
-rw-r--r--tools/perf/util/cputopo.h58
-rw-r--r--tools/perf/util/cs-etm-base.c193
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c336
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.h21
-rw-r--r--tools/perf/util/cs-etm.c1324
-rw-r--r--tools/perf/util/cs-etm.h128
-rw-r--r--tools/perf/util/data-convert-bt.c81
-rw-r--r--tools/perf/util/data-convert-bt.h11
-rw-r--r--tools/perf/util/data-convert-json.c416
-rw-r--r--tools/perf/util/data-convert.h11
-rw-r--r--tools/perf/util/data.c124
-rw-r--r--tools/perf/util/data.h17
-rw-r--r--tools/perf/util/db-export.c30
-rw-r--r--tools/perf/util/db-export.h2
-rw-r--r--tools/perf/util/debug.c123
-rw-r--r--tools/perf/util/debug.h17
-rw-r--r--tools/perf/util/demangle-cxx.cpp49
-rw-r--r--tools/perf/util/demangle-cxx.h16
-rw-r--r--tools/perf/util/demangle-java.c17
-rw-r--r--tools/perf/util/demangle-ocaml.c68
-rw-r--r--tools/perf/util/demangle-ocaml.h7
-rw-r--r--tools/perf/util/dlfilter.c623
-rw-r--r--tools/perf/util/dlfilter.h99
-rw-r--r--tools/perf/util/dso.c148
-rw-r--r--tools/perf/util/dso.h46
-rw-r--r--tools/perf/util/dsos.c36
-rw-r--r--tools/perf/util/dwarf-aux.c127
-rw-r--r--tools/perf/util/dwarf-aux.h7
-rw-r--r--tools/perf/util/dwarf-regs.c10
-rw-r--r--tools/perf/util/env.c221
-rw-r--r--tools/perf/util/env.h52
-rw-r--r--tools/perf/util/event.c184
-rw-r--r--tools/perf/util/event.h202
-rw-r--r--tools/perf/util/events_stats.h19
-rw-r--r--tools/perf/util/evlist-hybrid.c162
-rw-r--r--tools/perf/util/evlist-hybrid.h15
-rw-r--r--tools/perf/util/evlist.c1456
-rw-r--r--tools/perf/util/evlist.h320
-rw-r--r--tools/perf/util/evsel.c1736
-rw-r--r--tools/perf/util/evsel.h488
-rw-r--r--tools/perf/util/evsel_config.h45
-rw-r--r--tools/perf/util/evsel_fprintf.c27
-rw-r--r--tools/perf/util/evsel_fprintf.h3
-rw-r--r--tools/perf/util/evswitch.c4
-rw-r--r--tools/perf/util/evswitch.h4
-rw-r--r--tools/perf/util/expr.c470
-rw-r--r--tools/perf/util/expr.h60
-rw-r--r--tools/perf/util/expr.l80
-rw-r--r--tools/perf/util/expr.y322
-rw-r--r--tools/perf/util/ftrace.h81
-rw-r--r--tools/perf/util/genelf.c33
-rw-r--r--tools/perf/util/genelf.h16
-rw-r--r--tools/perf/util/genelf_debug.c50
-rwxr-xr-xtools/perf/util/generate-cmdlist.sh19
-rw-r--r--tools/perf/util/get_current_dir_name.c3
-rw-r--r--tools/perf/util/group.h8
-rw-r--r--tools/perf/util/hashmap.c240
-rw-r--r--tools/perf/util/hashmap.h218
-rw-r--r--tools/perf/util/header.c896
-rw-r--r--tools/perf/util/header.h28
-rw-r--r--tools/perf/util/hisi-ptt-decoder/Build1
-rw-r--r--tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.c164
-rw-r--r--tools/perf/util/hisi-ptt-decoder/hisi-ptt-pkt-decoder.h31
-rw-r--r--tools/perf/util/hisi-ptt.c192
-rw-r--r--tools/perf/util/hisi-ptt.h19
-rw-r--r--tools/perf/util/hist.c173
-rw-r--r--tools/perf/util/hist.h57
-rw-r--r--tools/perf/util/include/linux/linkage.h68
-rw-r--r--tools/perf/util/intel-bts.c18
-rw-r--r--tools/perf/util/intel-pt-decoder/Build2
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c1640
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h52
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.c54
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-insn-decoder.h4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.c125
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-log.h8
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c65
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.h5
-rw-r--r--tools/perf/util/intel-pt.c1556
-rw-r--r--tools/perf/util/intlist.c27
-rw-r--r--tools/perf/util/intlist.h10
-rw-r--r--tools/perf/util/iostat.c54
-rw-r--r--tools/perf/util/iostat.h47
-rw-r--r--tools/perf/util/jit.h2
-rw-r--r--tools/perf/util/jitdump.c158
-rw-r--r--tools/perf/util/jitdump.h6
-rw-r--r--tools/perf/util/kvm-stat.h73
-rw-r--r--tools/perf/util/kwork.h257
-rw-r--r--tools/perf/util/levenshtein.c2
-rw-r--r--tools/perf/util/libunwind/arm64.c6
-rw-r--r--tools/perf/util/libunwind/x86_32.c2
-rw-r--r--tools/perf/util/llvm-utils.c79
-rw-r--r--tools/perf/util/lock-contention.h174
-rw-r--r--tools/perf/util/lzma.c8
-rw-r--r--tools/perf/util/machine.c1142
-rw-r--r--tools/perf/util/machine.h40
-rw-r--r--tools/perf/util/map.c712
-rw-r--r--tools/perf/util/map.h181
-rw-r--r--tools/perf/util/map_symbol.h2
-rw-r--r--tools/perf/util/maps.c476
-rw-r--r--tools/perf/util/maps.h74
-rw-r--r--tools/perf/util/mem-events.c375
-rw-r--r--tools/perf/util/mem-events.h21
-rw-r--r--tools/perf/util/mem2node.c4
-rw-r--r--tools/perf/util/metricgroup.c1875
-rw-r--r--tools/perf/util/metricgroup.h64
-rw-r--r--tools/perf/util/mmap.c50
-rw-r--r--tools/perf/util/mmap.h16
-rw-r--r--tools/perf/util/mutex.c119
-rw-r--r--tools/perf/util/mutex.h108
-rw-r--r--tools/perf/util/namespaces.c226
-rw-r--r--tools/perf/util/namespaces.h18
-rw-r--r--tools/perf/util/off_cpu.h38
-rw-r--r--tools/perf/util/ordered-events.c7
-rw-r--r--tools/perf/util/ordered-events.h11
-rw-r--r--tools/perf/util/parse-branch-options.c8
-rw-r--r--tools/perf/util/parse-events-hybrid.c214
-rw-r--r--tools/perf/util/parse-events-hybrid.h25
-rw-r--r--tools/perf/util/parse-events.c1706
-rw-r--r--tools/perf/util/parse-events.h101
-rw-r--r--tools/perf/util/parse-events.l63
-rw-r--r--tools/perf/util/parse-events.y147
-rw-r--r--tools/perf/util/parse-regs-options.c2
-rw-r--r--tools/perf/util/parse-sublevel-options.c70
-rw-r--r--tools/perf/util/parse-sublevel-options.h11
-rw-r--r--tools/perf/util/path.c14
-rw-r--r--tools/perf/util/path.h1
-rw-r--r--tools/perf/util/perf_api_probe.c197
-rw-r--r--tools/perf/util/perf_api_probe.h17
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c13
-rw-r--r--tools/perf/util/perf_regs.c749
-rw-r--r--tools/perf/util/perf_regs.h10
-rw-r--r--tools/perf/util/pfm.c268
-rw-r--r--tools/perf/util/pfm.h38
-rw-r--r--tools/perf/util/pmu-hybrid.c72
-rw-r--r--tools/perf/util/pmu-hybrid.h33
-rw-r--r--tools/perf/util/pmu.c1363
-rw-r--r--tools/perf/util/pmu.h206
-rw-r--r--tools/perf/util/pmu.l19
-rw-r--r--tools/perf/util/pmu.y22
-rw-r--r--tools/perf/util/pmus.c5
-rw-r--r--tools/perf/util/pmus.h9
-rw-r--r--tools/perf/util/print-events.c418
-rw-r--r--tools/perf/util/print-events.h40
-rw-r--r--tools/perf/util/print_binary.c2
-rw-r--r--tools/perf/util/probe-event.c479
-rw-r--r--tools/perf/util/probe-event.h6
-rw-r--r--tools/perf/util/probe-file.c150
-rw-r--r--tools/perf/util/probe-finder.c146
-rw-r--r--tools/perf/util/probe-finder.h9
-rw-r--r--tools/perf/util/pstack.c2
-rw-r--r--tools/perf/util/python-ext-sources6
-rw-r--r--tools/perf/util/python.c137
-rw-r--r--tools/perf/util/record.c244
-rw-r--r--tools/perf/util/record.h20
-rw-r--r--tools/perf/util/s390-cpumcf-kernel.h1
-rw-r--r--tools/perf/util/s390-cpumsf.c37
-rw-r--r--tools/perf/util/s390-sample-raw.c58
-rw-r--r--tools/perf/util/sample-raw.c12
-rw-r--r--tools/perf/util/sample-raw.h11
-rw-r--r--tools/perf/util/sample.h133
-rw-r--r--tools/perf/util/scripting-engines/Build8
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c28
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c639
-rw-r--r--tools/perf/util/session.c965
-rw-r--r--tools/perf/util/session.h34
-rw-r--r--tools/perf/util/setup.py48
-rw-r--r--tools/perf/util/sideband_evlist.c149
-rw-r--r--tools/perf/util/smt.c60
-rw-r--r--tools/perf/util/smt.h19
-rw-r--r--tools/perf/util/sort.c737
-rw-r--r--tools/perf/util/sort.h28
-rw-r--r--tools/perf/util/srccode.c3
-rw-r--r--tools/perf/util/srcline.c393
-rw-r--r--tools/perf/util/stat-display.c1546
-rw-r--r--tools/perf/util/stat-shadow.c1304
-rw-r--r--tools/perf/util/stat.c582
-rw-r--r--tools/perf/util/stat.h182
-rw-r--r--tools/perf/util/strbuf.h2
-rw-r--r--tools/perf/util/stream.c342
-rw-r--r--tools/perf/util/stream.h41
-rw-r--r--tools/perf/util/strfilter.c2
-rw-r--r--tools/perf/util/strfilter.h4
-rw-r--r--tools/perf/util/string.c12
-rw-r--r--tools/perf/util/string2.h2
-rw-r--r--tools/perf/util/svghelper.c10
-rw-r--r--tools/perf/util/symbol-elf.c995
-rw-r--r--tools/perf/util/symbol-minimal.c36
-rw-r--r--tools/perf/util/symbol.c698
-rw-r--r--tools/perf/util/symbol.h45
-rw-r--r--tools/perf/util/symbol_conf.h10
-rw-r--r--tools/perf/util/symbol_fprintf.c4
-rw-r--r--tools/perf/util/symsrc.h1
-rw-r--r--tools/perf/util/synthetic-events.c954
-rw-r--r--tools/perf/util/synthetic-events.h33
-rw-r--r--tools/perf/util/syscalltbl.c12
-rw-r--r--tools/perf/util/syscalltbl.h14
-rw-r--r--tools/perf/util/target.c34
-rw-r--r--tools/perf/util/target.h20
-rw-r--r--tools/perf/util/thread-stack.c278
-rw-r--r--tools/perf/util/thread-stack.h12
-rw-r--r--tools/perf/util/thread.c94
-rw-r--r--tools/perf/util/thread.h19
-rw-r--r--tools/perf/util/thread_map.c1
-rw-r--r--tools/perf/util/thread_map.h2
-rw-r--r--tools/perf/util/tool.h10
-rw-r--r--tools/perf/util/top.c12
-rw-r--r--tools/perf/util/top.h13
-rw-r--r--tools/perf/util/topdown.c8
-rw-r--r--tools/perf/util/topdown.h11
-rw-r--r--tools/perf/util/trace-event-info.c122
-rw-r--r--tools/perf/util/trace-event-parse.c4
-rw-r--r--tools/perf/util/trace-event-read.c7
-rw-r--r--tools/perf/util/trace-event-scripting.c38
-rw-r--r--tools/perf/util/trace-event.c1
-rw-r--r--tools/perf/util/trace-event.h65
-rw-r--r--tools/perf/util/tracepoint.c64
-rw-r--r--tools/perf/util/tracepoint.h25
-rw-r--r--tools/perf/util/tsc.c111
-rw-r--r--tools/perf/util/tsc.h10
-rw-r--r--tools/perf/util/units.c21
-rw-r--r--tools/perf/util/units.h1
-rw-r--r--tools/perf/util/unwind-libdw.c61
-rw-r--r--tools/perf/util/unwind-libdw.h1
-rw-r--r--tools/perf/util/unwind-libunwind-local.c175
-rw-r--r--tools/perf/util/unwind-libunwind.c41
-rw-r--r--tools/perf/util/unwind.h13
-rw-r--r--tools/perf/util/usage.c6
-rw-r--r--tools/perf/util/util.c155
-rw-r--r--tools/perf/util/util.h67
-rw-r--r--tools/perf/util/vdso.c9
-rw-r--r--tools/perf/util/xyarray.c33
-rw-r--r--tools/perf/util/zstd.c2
-rw-r--r--tools/power/acpi/.gitignore1
-rw-r--r--tools/power/acpi/Makefile18
-rw-r--r--tools/power/acpi/Makefile.config2
-rw-r--r--tools/power/acpi/Makefile.rules3
-rw-r--r--tools/power/acpi/common/cmfsize.c4
-rw-r--r--tools/power/acpi/common/getopt.c2
-rw-r--r--tools/power/acpi/man/pfrut.8137
-rw-r--r--tools/power/acpi/os_specific/service_layers/oslinuxtbl.c15
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixdir.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixmap.c2
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c2
-rw-r--r--tools/power/acpi/tools/acpidump/Makefile1
-rw-r--r--tools/power/acpi/tools/acpidump/acpidump.h2
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c6
-rw-r--r--tools/power/acpi/tools/acpidump/apfiles.c2
-rw-r--r--tools/power/acpi/tools/acpidump/apmain.c2
-rw-r--r--tools/power/acpi/tools/pfrut/Makefile23
-rw-r--r--tools/power/acpi/tools/pfrut/pfrut.c436
-rw-r--r--tools/power/cpupower/Makefile25
-rw-r--r--tools/power/cpupower/TODO (renamed from tools/power/cpupower/ToDo)0
-rw-r--r--tools/power/cpupower/bench/Makefile2
-rw-r--r--tools/power/cpupower/debug/i386/dump_psb.c6
-rw-r--r--tools/power/cpupower/debug/i386/intel_gsic.c2
-rw-r--r--tools/power/cpupower/lib/acpi_cppc.c59
-rw-r--r--tools/power/cpupower/lib/acpi_cppc.h21
-rw-r--r--tools/power/cpupower/lib/cpufreq.c33
-rw-r--r--tools/power/cpupower/lib/cpufreq.h12
-rw-r--r--tools/power/cpupower/lib/cpupower.c23
-rw-r--r--tools/power/cpupower/lib/cpupower_intern.h5
-rw-r--r--tools/power/cpupower/lib/powercap.c290
-rw-r--r--tools/power/cpupower/lib/powercap.h54
-rw-r--r--tools/power/cpupower/man/cpupower-frequency-info.13
-rw-r--r--tools/power/cpupower/man/cpupower-idle-info.12
-rw-r--r--tools/power/cpupower/man/cpupower-idle-set.12
-rw-r--r--tools/power/cpupower/man/cpupower-monitor.14
-rw-r--r--tools/power/cpupower/man/cpupower-powercap-info.125
-rw-r--r--tools/power/cpupower/po/ka.po983
-rw-r--r--tools/power/cpupower/utils/builtin.h2
-rw-r--r--tools/power/cpupower/utils/cpufreq-info.c94
-rw-r--r--tools/power/cpupower/utils/cpufreq-set.c17
-rw-r--r--tools/power/cpupower/utils/cpuidle-info.c4
-rw-r--r--tools/power/cpupower/utils/cpuidle-set.c4
-rw-r--r--tools/power/cpupower/utils/cpupower-info.c8
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c4
-rw-r--r--tools/power/cpupower/utils/cpupower.c9
-rw-r--r--tools/power/cpupower/utils/helpers/amd.c142
-rw-r--r--tools/power/cpupower/utils/helpers/bitmask.c6
-rw-r--r--tools/power/cpupower/utils/helpers/cpuid.c33
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h56
-rw-r--r--tools/power/cpupower/utils/helpers/misc.c179
-rw-r--r--tools/power/cpupower/utils/helpers/msr.c28
-rw-r--r--tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c6
-rw-r--r--tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c7
-rw-r--r--tools/power/cpupower/utils/idle_monitor/hsw_ext_idle.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/idle_monitors.def1
-rw-r--r--tools/power/cpupower/utils/idle_monitor/nhm_idle.c2
-rw-r--r--tools/power/cpupower/utils/idle_monitor/rapl_monitor.c148
-rw-r--r--tools/power/cpupower/utils/idle_monitor/snb_idle.c2
-rw-r--r--tools/power/cpupower/utils/powercap-info.c117
-rw-r--r--tools/power/pm-graph/Makefile4
-rw-r--r--tools/power/pm-graph/README138
-rwxr-xr-xtools/power/pm-graph/bootgraph.py22
-rwxr-xr-xtools/power/pm-graph/install_latest_from_github.sh38
-rw-r--r--tools/power/pm-graph/sleepgraph.868
-rwxr-xr-xtools/power/pm-graph/sleepgraph.py2011
-rwxr-xr-xtools/power/x86/amd_pstate_tracer/amd_pstate_trace.py354
-rw-r--r--tools/power/x86/intel-speed-select/Build2
-rw-r--r--tools/power/x86/intel-speed-select/Makefile14
-rw-r--r--tools/power/x86/intel-speed-select/hfi-events.c308
-rw-r--r--tools/power/x86/intel-speed-select/isst-config.c1490
-rw-r--r--tools/power/x86/intel-speed-select/isst-core-mbox.c1066
-rw-r--r--tools/power/x86/intel-speed-select/isst-core-tpmi.c787
-rw-r--r--tools/power/x86/intel-speed-select/isst-core.c916
-rw-r--r--tools/power/x86/intel-speed-select/isst-daemon.c255
-rw-r--r--tools/power/x86/intel-speed-select/isst-display.c336
-rw-r--r--tools/power/x86/intel-speed-select/isst.h189
-rwxr-xr-xtools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py268
-rw-r--r--tools/power/x86/turbostat/Makefile5
-rw-r--r--tools/power/x86/turbostat/turbostat.8212
-rw-r--r--tools/power/x86/turbostat/turbostat.c2536
-rw-r--r--tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c176
-rwxr-xr-xtools/rcu/extract-stall.sh48
-rw-r--r--tools/rcu/rcu-cbs.py46
-rw-r--r--tools/scripts/Makefile.arch13
-rw-r--r--tools/scripts/Makefile.include87
-rw-r--r--tools/scripts/utilities.mak2
-rw-r--r--tools/spi/Makefile9
-rw-r--r--tools/spi/spidev_test.c42
-rw-r--r--tools/testing/crypto/chacha20-s390/Makefile12
-rw-r--r--tools/testing/crypto/chacha20-s390/run-tests.sh34
-rw-r--r--tools/testing/crypto/chacha20-s390/test-cipher.c369
-rw-r--r--tools/testing/cxl/Kbuild64
-rw-r--r--tools/testing/cxl/config_check.c17
-rw-r--r--tools/testing/cxl/cxl_acpi_test.c6
-rw-r--r--tools/testing/cxl/cxl_core_test.c6
-rw-r--r--tools/testing/cxl/cxl_mem_test.c6
-rw-r--r--tools/testing/cxl/cxl_pmem_test.c6
-rw-r--r--tools/testing/cxl/cxl_port_test.c6
-rw-r--r--tools/testing/cxl/mock_acpi.c35
-rw-r--r--tools/testing/cxl/test/Kbuild10
-rw-r--r--tools/testing/cxl/test/cxl.c1470
-rw-r--r--tools/testing/cxl/test/mem.c1338
-rw-r--r--tools/testing/cxl/test/mock.c268
-rw-r--r--tools/testing/cxl/test/mock.h36
-rw-r--r--tools/testing/cxl/watermark.h25
-rwxr-xr-xtools/testing/ktest/compare-ktest-sample.pl2
-rw-r--r--tools/testing/ktest/examples/README2
-rw-r--r--tools/testing/ktest/examples/bootconfigs/boottrace.bconf59
-rw-r--r--tools/testing/ktest/examples/bootconfigs/config-bootconfig1
-rw-r--r--tools/testing/ktest/examples/bootconfigs/functiongraph.bconf15
-rw-r--r--tools/testing/ktest/examples/bootconfigs/tracing.bconf33
-rwxr-xr-xtools/testing/ktest/examples/bootconfigs/verify-boottrace.sh84
-rwxr-xr-xtools/testing/ktest/examples/bootconfigs/verify-functiongraph.sh61
-rwxr-xr-xtools/testing/ktest/examples/bootconfigs/verify-tracing.sh72
-rw-r--r--tools/testing/ktest/examples/crosstests.conf2
-rw-r--r--tools/testing/ktest/examples/include/bootconfig.conf69
-rw-r--r--tools/testing/ktest/examples/kvm.conf1
-rw-r--r--tools/testing/ktest/examples/vmware.conf137
-rwxr-xr-xtools/testing/ktest/ktest.pl720
-rw-r--r--tools/testing/ktest/sample.conf31
-rw-r--r--tools/testing/kunit/.gitattributes1
-rw-r--r--tools/testing/kunit/configs/all_tests.config34
-rw-r--r--tools/testing/kunit/configs/arch_uml.config5
-rw-r--r--tools/testing/kunit/configs/broken_on_uml.config41
-rw-r--r--tools/testing/kunit/configs/coverage_uml.config11
-rw-r--r--tools/testing/kunit/configs/default.config3
-rwxr-xr-xtools/testing/kunit/kunit.py595
-rw-r--r--tools/testing/kunit/kunit_config.py133
-rw-r--r--tools/testing/kunit/kunit_json.py63
-rw-r--r--tools/testing/kunit/kunit_kernel.py391
-rw-r--r--tools/testing/kunit/kunit_parser.py1045
-rw-r--r--tools/testing/kunit/kunit_printer.py48
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py856
-rw-r--r--tools/testing/kunit/qemu_config.py20
-rw-r--r--tools/testing/kunit/qemu_configs/alpha.py10
-rw-r--r--tools/testing/kunit/qemu_configs/arm.py13
-rw-r--r--tools/testing/kunit/qemu_configs/arm64.py12
-rw-r--r--tools/testing/kunit/qemu_configs/i386.py10
-rw-r--r--tools/testing/kunit/qemu_configs/m68k.py10
-rw-r--r--tools/testing/kunit/qemu_configs/powerpc.py12
-rw-r--r--tools/testing/kunit/qemu_configs/riscv.py28
-rw-r--r--tools/testing/kunit/qemu_configs/s390.py14
-rw-r--r--tools/testing/kunit/qemu_configs/sh.py17
-rw-r--r--tools/testing/kunit/qemu_configs/sparc.py10
-rw-r--r--tools/testing/kunit/qemu_configs/x86_64.py10
-rwxr-xr-xtools/testing/kunit/run_checks.py81
-rw-r--r--tools/testing/kunit/test_data/test_config_printk_time.log3
-rw-r--r--tools/testing/kunit/test_data/test_insufficient_memory.log (renamed from tools/testing/radix-tree/linux/compiler_types.h)0
-rw-r--r--tools/testing/kunit/test_data/test_interrupted_tap_output.log3
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-all_passed.log1
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log34
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-crash.log69
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-failure.log1
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-kselftest.log14
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log31
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log7
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log (renamed from tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log)0
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log2
-rw-r--r--tools/testing/kunit/test_data/test_kernel_panic_interrupt.log3
-rw-r--r--tools/testing/kunit/test_data/test_multiple_prefixes.log3
-rw-r--r--tools/testing/kunit/test_data/test_parse_ktap_output.log8
-rw-r--r--tools/testing/kunit/test_data/test_parse_subtest_header.log7
-rw-r--r--tools/testing/kunit/test_data/test_pound_no_prefix.log3
-rw-r--r--tools/testing/kunit/test_data/test_pound_sign.log1
-rw-r--r--tools/testing/kunit/test_data/test_skip_all_tests.log15
-rw-r--r--tools/testing/kunit/test_data/test_skip_tests.log15
-rw-r--r--tools/testing/kunit/test_data/test_strip_hyphen.log16
-rw-r--r--tools/testing/memblock/.gitignore5
-rw-r--r--tools/testing/memblock/Makefile55
-rw-r--r--tools/testing/memblock/README118
-rw-r--r--tools/testing/memblock/TODO5
-rw-r--r--tools/testing/memblock/asm/dma.h5
-rw-r--r--tools/testing/memblock/internal.h23
-rw-r--r--tools/testing/memblock/lib/slab.c9
-rw-r--r--tools/testing/memblock/linux/init.h34
-rw-r--r--tools/testing/memblock/linux/kernel.h12
-rw-r--r--tools/testing/memblock/linux/kmemleak.h18
-rw-r--r--tools/testing/memblock/linux/memory_hotplug.h17
-rw-r--r--tools/testing/memblock/linux/mmzone.h37
-rw-r--r--tools/testing/memblock/linux/printk.h25
-rw-r--r--tools/testing/memblock/main.c19
-rw-r--r--tools/testing/memblock/mmzone.c20
-rw-r--r--tools/testing/memblock/scripts/Makefile.include19
-rw-r--r--tools/testing/memblock/tests/alloc_api.c884
-rw-r--r--tools/testing/memblock/tests/alloc_api.h9
-rw-r--r--tools/testing/memblock/tests/alloc_exact_nid_api.c1113
-rw-r--r--tools/testing/memblock/tests/alloc_exact_nid_api.h25
-rw-r--r--tools/testing/memblock/tests/alloc_helpers_api.c414
-rw-r--r--tools/testing/memblock/tests/alloc_helpers_api.h9
-rw-r--r--tools/testing/memblock/tests/alloc_nid_api.c2693
-rw-r--r--tools/testing/memblock/tests/alloc_nid_api.h26
-rw-r--r--tools/testing/memblock/tests/basic_api.c2143
-rw-r--r--tools/testing/memblock/tests/basic_api.h9
-rw-r--r--tools/testing/memblock/tests/common.c209
-rw-r--r--tools/testing/memblock/tests/common.h172
-rw-r--r--tools/testing/nvdimm/Kbuild13
-rw-r--r--tools/testing/nvdimm/config_check.c4
-rw-r--r--tools/testing/nvdimm/dax-dev.c22
-rw-r--r--tools/testing/nvdimm/dax_pmem_compat_test.c8
-rw-r--r--tools/testing/nvdimm/dax_pmem_core_test.c8
-rw-r--r--tools/testing/nvdimm/dimm_devs.c30
-rw-r--r--tools/testing/nvdimm/pmem-dax.c4
-rw-r--r--tools/testing/nvdimm/test/Kbuild6
-rw-r--r--tools/testing/nvdimm/test/iomap.c59
-rw-r--r--tools/testing/nvdimm/test/ndtest.c989
-rw-r--r--tools/testing/nvdimm/test/ndtest.h109
-rw-r--r--tools/testing/nvdimm/test/nfit.c553
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h6
-rw-r--r--tools/testing/radix-tree/.gitignore3
-rw-r--r--tools/testing/radix-tree/Makefile29
-rw-r--r--tools/testing/radix-tree/generated/autoconf.h1
-rw-r--r--tools/testing/radix-tree/idr-test.c50
-rw-r--r--tools/testing/radix-tree/linux.c176
-rw-r--r--tools/testing/radix-tree/linux/gfp.h33
-rw-r--r--tools/testing/radix-tree/linux/kernel.h2
-rw-r--r--tools/testing/radix-tree/linux/local_lock.h8
-rw-r--r--tools/testing/radix-tree/linux/lockdep.h5
-rw-r--r--tools/testing/radix-tree/linux/maple_tree.h7
-rw-r--r--tools/testing/radix-tree/linux/slab.h27
-rw-r--r--tools/testing/radix-tree/maple.c35870
-rw-r--r--tools/testing/radix-tree/multiorder.c2
-rw-r--r--tools/testing/radix-tree/test.h4
-rw-r--r--tools/testing/radix-tree/trace/events/maple_tree.h5
-rw-r--r--tools/testing/radix-tree/xarray.c2
-rw-r--r--tools/testing/scatterlist/Makefile3
-rw-r--r--tools/testing/scatterlist/linux/mm.h40
-rw-r--r--tools/testing/scatterlist/main.c128
-rw-r--r--tools/testing/selftests/.gitignore1
-rw-r--r--tools/testing/selftests/Makefile154
-rw-r--r--tools/testing/selftests/alsa/.gitignore2
-rw-r--r--tools/testing/selftests/alsa/Makefile27
-rw-r--r--tools/testing/selftests/alsa/alsa-local.h27
-rw-r--r--tools/testing/selftests/alsa/conf.c470
-rw-r--r--tools/testing/selftests/alsa/conf.d/Lenovo_ThinkPad_P1_Gen2.conf84
-rw-r--r--tools/testing/selftests/alsa/mixer-test.c1117
-rw-r--r--tools/testing/selftests/alsa/pcm-test.c629
-rw-r--r--tools/testing/selftests/alsa/pcm-test.conf63
-rw-r--r--tools/testing/selftests/amd-pstate/Makefile18
-rwxr-xr-xtools/testing/selftests/amd-pstate/basic.sh38
-rw-r--r--tools/testing/selftests/amd-pstate/config1
-rwxr-xr-xtools/testing/selftests/amd-pstate/gitsource.sh354
-rwxr-xr-xtools/testing/selftests/amd-pstate/run.sh387
-rwxr-xr-xtools/testing/selftests/amd-pstate/tbench.sh339
-rw-r--r--tools/testing/selftests/android/Makefile39
-rw-r--r--tools/testing/selftests/android/config5
-rw-r--r--tools/testing/selftests/android/ion/Makefile20
-rw-r--r--tools/testing/selftests/android/ion/README101
-rw-r--r--tools/testing/selftests/android/ion/ion.h134
-rwxr-xr-xtools/testing/selftests/android/ion/ion_test.sh58
-rw-r--r--tools/testing/selftests/android/ion/ionapp_export.c127
-rw-r--r--tools/testing/selftests/android/ion/ionapp_import.c79
-rw-r--r--tools/testing/selftests/android/ion/ionmap_test.c136
-rw-r--r--tools/testing/selftests/android/ion/ionutils.c253
-rw-r--r--tools/testing/selftests/android/ion/ionutils.h55
-rw-r--r--tools/testing/selftests/android/ion/ipcsocket.c227
-rw-r--r--tools/testing/selftests/android/ion/ipcsocket.h35
-rwxr-xr-xtools/testing/selftests/android/run.sh3
-rw-r--r--tools/testing/selftests/arm64/Makefile13
-rw-r--r--tools/testing/selftests/arm64/abi/.gitignore4
-rw-r--r--tools/testing/selftests/arm64/abi/Makefile15
-rw-r--r--tools/testing/selftests/arm64/abi/hwcap.c483
-rw-r--r--tools/testing/selftests/arm64/abi/ptrace.c241
-rw-r--r--tools/testing/selftests/arm64/abi/syscall-abi-asm.S362
-rw-r--r--tools/testing/selftests/arm64/abi/syscall-abi.c557
-rw-r--r--tools/testing/selftests/arm64/abi/syscall-abi.h15
-rw-r--r--tools/testing/selftests/arm64/abi/tpidr2.c298
-rw-r--r--tools/testing/selftests/arm64/bti/.gitignore2
-rw-r--r--tools/testing/selftests/arm64/bti/Makefile61
-rw-r--r--tools/testing/selftests/arm64/bti/assembler.h80
-rw-r--r--tools/testing/selftests/arm64/bti/btitest.h23
-rw-r--r--tools/testing/selftests/arm64/bti/compiler.h21
-rw-r--r--tools/testing/selftests/arm64/bti/gen/.gitignore2
-rw-r--r--tools/testing/selftests/arm64/bti/signal.c37
-rw-r--r--tools/testing/selftests/arm64/bti/signal.h21
-rw-r--r--tools/testing/selftests/arm64/bti/start.S14
-rw-r--r--tools/testing/selftests/arm64/bti/syscall.S23
-rw-r--r--tools/testing/selftests/arm64/bti/system.c22
-rw-r--r--tools/testing/selftests/arm64/bti/system.h28
-rw-r--r--tools/testing/selftests/arm64/bti/test.c230
-rw-r--r--tools/testing/selftests/arm64/bti/teststubs.S39
-rw-r--r--tools/testing/selftests/arm64/bti/trampoline.S29
-rw-r--r--tools/testing/selftests/arm64/fp/.gitignore16
-rw-r--r--tools/testing/selftests/arm64/fp/Makefile50
-rw-r--r--tools/testing/selftests/arm64/fp/README100
-rw-r--r--tools/testing/selftests/arm64/fp/TODO7
-rw-r--r--tools/testing/selftests/arm64/fp/asm-offsets.h12
-rw-r--r--tools/testing/selftests/arm64/fp/asm-utils.S172
-rw-r--r--tools/testing/selftests/arm64/fp/assembler.h68
-rw-r--r--tools/testing/selftests/arm64/fp/fp-pidbench.S70
-rw-r--r--tools/testing/selftests/arm64/fp/fp-stress.c649
-rwxr-xr-xtools/testing/selftests/arm64/fp/fpsimd-stress60
-rw-r--r--tools/testing/selftests/arm64/fp/fpsimd-test.S332
-rw-r--r--tools/testing/selftests/arm64/fp/rdvl-sme.c14
-rw-r--r--tools/testing/selftests/arm64/fp/rdvl-sve.c14
-rw-r--r--tools/testing/selftests/arm64/fp/rdvl.S20
-rw-r--r--tools/testing/selftests/arm64/fp/rdvl.h9
-rw-r--r--tools/testing/selftests/arm64/fp/sme-inst.h71
-rw-r--r--tools/testing/selftests/arm64/fp/ssve-stress59
-rw-r--r--tools/testing/selftests/arm64/fp/sve-probe-vls.c63
-rw-r--r--tools/testing/selftests/arm64/fp/sve-ptrace.c764
-rwxr-xr-xtools/testing/selftests/arm64/fp/sve-stress59
-rw-r--r--tools/testing/selftests/arm64/fp/sve-test.S555
-rw-r--r--tools/testing/selftests/arm64/fp/vec-syscfg.c670
-rw-r--r--tools/testing/selftests/arm64/fp/vlset.c161
-rw-r--r--tools/testing/selftests/arm64/fp/za-fork-asm.S61
-rw-r--r--tools/testing/selftests/arm64/fp/za-fork.c100
-rw-r--r--tools/testing/selftests/arm64/fp/za-ptrace.c366
-rw-r--r--tools/testing/selftests/arm64/fp/za-stress59
-rw-r--r--tools/testing/selftests/arm64/fp/za-test.S397
-rw-r--r--tools/testing/selftests/arm64/fp/zt-ptrace.c365
-rw-r--r--tools/testing/selftests/arm64/fp/zt-test.S316
-rw-r--r--tools/testing/selftests/arm64/mte/.gitignore8
-rw-r--r--tools/testing/selftests/arm64/mte/Makefile41
-rw-r--r--tools/testing/selftests/arm64/mte/check_buffer_fill.c478
-rw-r--r--tools/testing/selftests/arm64/mte/check_child_memory.c198
-rw-r--r--tools/testing/selftests/arm64/mte/check_gcr_el1_cswitch.c143
-rw-r--r--tools/testing/selftests/arm64/mte/check_ksm_options.c166
-rw-r--r--tools/testing/selftests/arm64/mte/check_mmap_options.c265
-rw-r--r--tools/testing/selftests/arm64/mte/check_prctl.c119
-rw-r--r--tools/testing/selftests/arm64/mte/check_tags_inclusion.c200
-rw-r--r--tools/testing/selftests/arm64/mte/check_user_mem.c243
-rw-r--r--tools/testing/selftests/arm64/mte/mte_common_util.c368
-rw-r--r--tools/testing/selftests/arm64/mte/mte_common_util.h129
-rw-r--r--tools/testing/selftests/arm64/mte/mte_def.h60
-rw-r--r--tools/testing/selftests/arm64/mte/mte_helper.S130
-rw-r--r--tools/testing/selftests/arm64/pauth/.gitignore2
-rw-r--r--tools/testing/selftests/arm64/pauth/Makefile39
-rw-r--r--tools/testing/selftests/arm64/pauth/exec_target.c34
-rw-r--r--tools/testing/selftests/arm64/pauth/helper.c39
-rw-r--r--tools/testing/selftests/arm64/pauth/helper.h28
-rw-r--r--tools/testing/selftests/arm64/pauth/pac.c370
-rw-r--r--tools/testing/selftests/arm64/pauth/pac_corruptor.S19
-rw-r--r--tools/testing/selftests/arm64/signal/.gitignore6
-rw-r--r--tools/testing/selftests/arm64/signal/Makefile13
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals.c4
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals.h13
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals_utils.c126
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals_utils.h7
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/TODO1
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_magic.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_bad_size_for_magic0.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_duplicated_fpsimd.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_misaligned_sp.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_missing_fpsimd.c2
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sme_change_vl.c92
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/fake_sigreturn_sve_change_vl.c94
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sme_trap_no_sm.c38
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sme_trap_non_streaming.c45
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sme_trap_za.c36
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sme_vl.c68
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/ssve_regs.c132
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/ssve_za_regs.c161
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sve_regs.c121
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/sve_vl.c68
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/testcases.c171
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/testcases.h13
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/tpidr2_siginfo.c90
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/za_no_regs.c119
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/za_regs.c138
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/zt_no_regs.c51
-rw-r--r--tools/testing/selftests/arm64/signal/testcases/zt_regs.c85
-rw-r--r--tools/testing/selftests/arm64/tags/Makefile2
-rw-r--r--tools/testing/selftests/bpf/.gitignore31
-rw-r--r--tools/testing/selftests/bpf/DENYLIST7
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.aarch6485
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x28
-rw-r--r--tools/testing/selftests/bpf/Makefile533
-rw-r--r--tools/testing/selftests/bpf/Makefile.docs83
-rw-r--r--tools/testing/selftests/bpf/README.rst309
-rw-r--r--tools/testing/selftests/bpf/autoconf_helper.h9
-rw-r--r--tools/testing/selftests/bpf/bench.c689
-rw-r--r--tools/testing/selftests/bpf/bench.h105
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c487
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c95
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c283
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_bpf_loop.c106
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_count.c91
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage.c290
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_create.c264
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c269
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_rename.c178
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_ringbufs.c566
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_strncmp.c163
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c328
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_bloom_filter_map.sh45
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_bpf_hashmap_full_update.sh11
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_bpf_loop.sh15
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_local_storage.sh24
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_local_storage_rcu_tasks_trace.sh11
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_rename.sh9
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh49
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_strncmp.sh12
-rwxr-xr-xtools/testing/selftests/bpf/benchs/run_bench_trigger.sh9
-rw-r--r--tools/testing/selftests/bpf/benchs/run_common.sh92
-rw-r--r--tools/testing/selftests/bpf/bpf_experimental.h134
-rw-r--r--tools/testing/selftests/bpf/bpf_kfuncs.h38
-rw-r--r--tools/testing/selftests/bpf/bpf_legacy.h40
-rw-r--r--tools/testing/selftests/bpf/bpf_rlimit.h28
-rw-r--r--tools/testing/selftests/bpf/bpf_sockopt_helpers.h21
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h68
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/.gitignore6
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/Makefile20
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod-events.h57
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c332
-rw-r--r--tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h31
-rw-r--r--tools/testing/selftests/bpf/bpf_util.h19
-rw-r--r--tools/testing/selftests/bpf/btf_helpers.c292
-rw-r--r--tools/testing/selftests/bpf/btf_helpers.h19
-rw-r--r--tools/testing/selftests/bpf/cap_helpers.c67
-rw-r--r--tools/testing/selftests/bpf/cap_helpers.h19
-rw-r--r--tools/testing/selftests/bpf/cgroup_getset_retval_hooks.h25
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.c381
-rw-r--r--tools/testing/selftests/bpf/cgroup_helpers.h27
-rw-r--r--tools/testing/selftests/bpf/config102
-rw-r--r--tools/testing/selftests/bpf/config.aarch64183
-rw-r--r--tools/testing/selftests/bpf/config.s390x147
-rw-r--r--tools/testing/selftests/bpf/config.x86_64248
l---------tools/testing/selftests/bpf/disasm.c1
l---------tools/testing/selftests/bpf/disasm.h1
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c18
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h7
-rw-r--r--tools/testing/selftests/bpf/get_cgroup_id_user.c38
-rwxr-xr-xtools/testing/selftests/bpf/ima_setup.sh156
l---------tools/testing/selftests/bpf/json_writer.c1
l---------tools/testing/selftests/bpf/json_writer.h1
-rw-r--r--tools/testing/selftests/bpf/map_tests/array_map_batch_ops.c118
-rw-r--r--tools/testing/selftests/bpf/map_tests/htab_map_batch_ops.c15
-rw-r--r--tools/testing/selftests/bpf/map_tests/lpm_trie_map_batch_ops.c155
-rw-r--r--tools/testing/selftests/bpf/map_tests/map_in_map_batch_ops.c252
-rw-r--r--tools/testing/selftests/bpf/map_tests/sk_storage_map.c88
-rw-r--r--tools/testing/selftests/bpf/map_tests/task_storage_map.c127
-rw-r--r--tools/testing/selftests/bpf/netcnt_common.h38
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c429
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h69
-rw-r--r--tools/testing/selftests/bpf/prog_tests/access_variable_array.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c (renamed from tools/testing/selftests/bpf/test_align.c)345
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arg_parsing.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomic_bounds.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/atomics.c198
-rw-r--r--tools/testing/selftests/bpf/prog_tests/attach_probe.c354
-rw-r--r--tools/testing/selftests/bpf/prog_tests/autoattach.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/autoload.c41
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bind_perm.c95
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bloom_filter_map.c213
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_cookie.c528
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c1716
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt.c226
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter_setsockopt_unix.c100
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_loop.c207
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c230
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_nf.c181
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c128
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c389
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c268
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c (renamed from tools/testing/selftests/bpf/test_btf.c)2186
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dedup_split.c453
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c817
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_endian.c99
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c164
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_module.c34
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c221
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_split.c97
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_tag.c249
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_write.c506
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cb_refs.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cg_storage_multi.c393
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_autodetach.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_multi.c41
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_attach_override.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c529
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_hierarchical_stats.c339
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_iter.c300
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_link.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_skb_sk_lookup.c91
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c79
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c109
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cgrp_local_storage.c265
-rw-r--r--tools/testing/selftests/bpf/prog_tests/check_mtu.c206
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cls_redirect.c524
-rw-r--r--tools/testing/selftests/bpf/prog_tests/connect_force_port.c167
-rw-r--r--tools/testing/selftests/bpf/prog_tests/connect_ping.c178
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_autosize.c223
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_extern.c17
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_kern.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_kern_overflow.c13
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_read_macros.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c783
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_retro.c38
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cpumask.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c917
-rw-r--r--tools/testing/selftests/bpf/prog_tests/custom_sec_handlers.c176
-rw-r--r--tools/testing/selftests/bpf/prog_tests/d_path.c191
-rw-r--r--tools/testing/selftests/bpf/prog_tests/decap_sanity.c77
-rw-r--r--tools/testing/selftests/bpf/prog_tests/deny_namespace.c102
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dummy_st_ops.c161
-rw-r--r--tools/testing/selftests/bpf/prog_tests/dynptr.c102
-rw-r--r--tools/testing/selftests/bpf/prog_tests/empty_skb.c141
-rw-r--r--tools/testing/selftests/bpf/prog_tests/enable_stats.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/endian.c53
-rw-r--r--tools/testing/selftests/bpf/prog_tests/exhandler.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_fexit.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fentry_test.c63
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c563
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_sleep.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c79
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_test.c63
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fib_lookup.c187
-rw-r--r--tools/testing/selftests/bpf/prog_tests/find_vma.c127
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c252
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_load_bytes.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector_reattach.c626
-rw-r--r--tools/testing/selftests/bpf/prog_tests/for_each.c154
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c130
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_func_args_test.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/get_stackid_cannot_attach.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data_init.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_func_args.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/hash_large_key.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/hashmap.c459
-rw-r--r--tools/testing/selftests/bpf/prog_tests/helper_restricted.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/htab_reuse.c101
-rw-r--r--tools/testing/selftests/bpf/prog_tests/htab_update.c126
-rw-r--r--tools/testing/selftests/bpf/prog_tests/iters.c106
-rw-r--r--tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/jit_probe_mem.c28
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfree_skb.c93
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_call.c322
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kfunc_dynptr_param.c119
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c491
-rw-r--r--tools/testing/selftests/bpf/prog_tests/kprobe_multi_testmod_test.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms.c61
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_btf.c191
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_module.c69
-rw-r--r--tools/testing/selftests/bpf/prog_tests/l4lb_all.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/legacy_printk.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_get_fd_by_id_opts.c87
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_probes.c124
-rw-r--r--tools/testing/selftests/bpf/prog_tests/libbpf_str.c215
-rw-r--r--tools/testing/selftests/bpf/prog_tests/link_pinning.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_funcs.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_list.c801
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_maps.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_vars.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/load_bytes_relative.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/local_kptr_stash.c60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_buf.c276
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_fixup.c181
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lookup_and_delete.c291
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lookup_key.c112
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lru_bug.c21
-rw-r--r--tools/testing/selftests/bpf/prog_tests/lsm_cgroup.c323
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_init.c214
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_kptr.c163
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lock.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c58
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_ops.c162
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_ptr.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/metadata.c141
-rw-r--r--tools/testing/selftests/bpf/prog_tests/migrate_reuseport.c559
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mmap.c118
-rw-r--r--tools/testing/selftests/bpf/prog_tests/modify_return.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c115
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_fentry_shadow.c128
-rw-r--r--tools/testing/selftests/bpf/prog_tests/mptcp.c189
-rw-r--r--tools/testing/selftests/bpf/prog_tests/nested_trust.c12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netcnt.c82
-rw-r--r--tools/testing/selftests/bpf/prog_tests/netns_cookie.c80
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ns_current_pid_tgid.c122
-rw-r--r--tools/testing/selftests/bpf/prog_tests/obj_name.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/parse_tcp_hdr_opt.c93
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c66
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_branches.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_buffer.c118
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c116
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_link.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning.c64
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_access.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pkt_md_access.c19
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_read_user_str.c71
-rw-r--r--tools/testing/selftests/bpf/prog_tests/probe_user.c43
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_array_init.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_opts.c77
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_run_xattr.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/queue_stack_map.c59
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c86
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_reject_nbd_invalid.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_writable_test_run.c48
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rbtree.c142
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rcu_read_lock.c148
-rw-r--r--tools/testing/selftests/bpf/prog_tests/rdonly_maps.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/recursion.c41
-rw-r--r--tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c47
-rw-r--r--tools/testing/selftests/bpf/prog_tests/resolve_btfids.c167
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf.c363
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ringbuf_multi.c132
-rw-r--r--tools/testing/selftests/bpf/prog_tests/section_names.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c81
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal.c89
-rw-r--r--tools/testing/selftests/bpf/prog_tests/send_signal_sched_switch.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/setget_sockopt.c198
-rw-r--r--tools/testing/selftests/bpf/prog_tests/signal_pending.c26
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_lookup.c1413
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_storage_tracing.c135
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_ctx.c79
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_helpers.c30
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skc_to_unix_sock.c54
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skeleton.c97
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf.c127
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf_btf.c60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_fields.c402
-rw-r--r--tools/testing/selftests/bpf/prog_tests/socket_cookie.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c332
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_ktls.c123
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_listen.c748
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_multi.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c70
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_sk.c159
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spin_lock.c142
-rw-r--r--tools/testing/selftests/bpf/prog_tests/spinlock.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stack_var_off.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c27
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c46
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c13
-rw-r--r--tools/testing/selftests/bpf/prog_tests/stacktrace_map_skip.c63
-rw-r--r--tools/testing/selftests/bpf/prog_tests/static_linked.c35
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subprogs.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subskeleton.c78
-rw-r--r--tools/testing/selftests/bpf/prog_tests/syscall.c55
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c660
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_kfunc.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_local_storage.c246
-rw-r--r--tools/testing/selftests/bpf/prog_tests/task_pt_regs.c50
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_bpf.c395
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c1145
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_estats.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c563
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_rtt.c150
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcpbpf_user.c137
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpf_syscall_macro.c90
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpffs.c159
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bprm_opts.c108
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_global_funcs.c111
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_ima.c240
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_local_storage.c172
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c80
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_overhead.c36
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_profiler.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_strncmp.c148
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_tunnel.c570
-rw-r--r--tools/testing/selftests/bpf/prog_tests/time_tai.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer.c58
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_crash.c32
-rw-r--r--tools/testing/selftests/bpf/prog_tests/timer_mim.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tp_attach_query.c20
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_ext.c115
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_printk.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_vprintk.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tracing_struct.c66
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trampoline_count.c155
-rw-r--r--tools/testing/selftests/bpf/prog_tests/type_cast.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/udp_limit.c70
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uninit_stack.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c312
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/usdt.c420
-rw-r--r--tools/testing/selftests/bpf/prog_tests/user_ringbuf.c696
-rw-r--r--tools/testing/selftests/bpf/prog_tests/varlen.c75
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verif_stats.c29
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c216
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier_log.c450
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verify_pkcs7_sig.c402
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp.c44
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_frags.c146
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c287
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_attach.c70
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bonding.c570
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_bpf2bpf.c147
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_context_test_run.c105
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_cpumap_attach.c121
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_devmap_attach.c137
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c248
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_info.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_link.c152
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_metadata.c406
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_noinline.c76
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_perf.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_synproxy.c178
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdpwall.c15
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xfrm_info.c347
-rw-r--r--tools/testing/selftests/bpf/progs/atomic_bounds.c24
-rw-r--r--tools/testing/selftests/bpf/progs/atomics.c170
-rw-r--r--tools/testing/selftests/bpf/progs/bench_local_storage_create.c82
-rw-r--r--tools/testing/selftests/bpf/progs/bind4_prog.c159
-rw-r--r--tools/testing/selftests/bpf/progs/bind6_prog.c176
-rw-r--r--tools/testing/selftests/bpf/progs/bind_perm.c45
-rw-r--r--tools/testing/selftests/bpf/progs/bloom_filter_bench.c154
-rw-r--r--tools/testing/selftests/bpf/progs/bloom_filter_map.c83
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c55
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c58
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp_release.c26
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c52
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_hashmap_full_update_bench.c40
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_hashmap_lookup.c63
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter.h167
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c59
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c123
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_link.c21
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_map.c27
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c46
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_hash_map.c50
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c65
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_map.c52
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_ipv6_route.c56
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_ksym.c73
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_netlink.c63
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_setsockopt.c71
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_setsockopt_unix.c60
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c59
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task.c88
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c49
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_file.c40
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_stack.c63
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_vma.c62
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c233
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_tcp6.c249
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern1.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern2.c4
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern3.c18
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern4.c52
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern5.c35
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern6.c21
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_test_kern_common.h22
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp4.c70
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_udp6.c78
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_unix.c80
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_vma_offset.c37
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_loop.c225
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_loop_bench.c27
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_misc.h130
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_mod_race.c100
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_syscall_macro.c110
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tcp_nogpl.c19
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_tracing_net.h101
-rw-r--r--tools/testing/selftests/bpf/progs/bprm_opts.c34
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___err_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enum64val___val3_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enumval.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___err_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___val3_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size___diff_offs.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size___err_ambiguous.c4
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___all_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___fn_wrong_args.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___incompat.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_id.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_id___missing_targets.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf_data.c50
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_bitfields.c12
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_packing.c84
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c173
-rw-r--r--tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c57
-rw-r--r--tools/testing/selftests/bpf/progs/btf_ptr.h27
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag.c25
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag_percpu.c67
-rw-r--r--tools/testing/selftests/bpf/progs/btf_type_tag_user.c40
-rw-r--r--tools/testing/selftests/bpf/progs/cb_refs.c115
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi.h13
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi_egress_only.c33
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi_isolated.c57
-rw-r--r--tools/testing/selftests/bpf/progs/cg_storage_multi_shared.c57
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c45
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_hooks.c16
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c52
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_hierarchical_stats.c155
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_iter.c39
-rw-r--r--tools/testing/selftests/bpf/progs/cgroup_skb_sk_lookup_kern.c95
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h79
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c247
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c223
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_attach_cgroup.c100
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_negative.c26
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_recursion.c70
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_sleepable.c79
-rw-r--r--tools/testing/selftests/bpf/progs/cgrp_ls_tp_btf.c88
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_dropper.c26
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c156
-rw-r--r--tools/testing/selftests/bpf/progs/connect6_prog.c2
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port4.c90
-rw-r--r--tools/testing/selftests/bpf/progs/connect_force_port6.c101
-rw-r--r--tools/testing/selftests/bpf/progs/connect_ping.c53
-rw-r--r--tools/testing/selftests/bpf/progs/core_kern.c120
-rw-r--r--tools/testing/selftests/bpf/progs/core_kern_overflow.c22
-rw-r--r--tools/testing/selftests/bpf/progs/core_reloc_types.h651
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_common.h119
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_failure.c192
-rw-r--r--tools/testing/selftests/bpf/progs/cpumask_success.c428
-rw-r--r--tools/testing/selftests/bpf/progs/decap_sanity.c68
-rw-r--r--tools/testing/selftests/bpf/progs/dev_cgroup.c1
-rw-r--r--tools/testing/selftests/bpf/progs/dummy_st_ops_fail.c27
-rw-r--r--tools/testing/selftests/bpf/progs/dummy_st_ops_success.c47
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_fail.c1380
-rw-r--r--tools/testing/selftests/bpf/progs/dynptr_success.c209
-rw-r--r--tools/testing/selftests/bpf/progs/empty_skb.c37
-rw-r--r--tools/testing/selftests/bpf/progs/err.h18
-rw-r--r--tools/testing/selftests/bpf/progs/exhandler_kern.c52
-rw-r--r--tools/testing/selftests/bpf/progs/fentry_test.c22
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c37
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_sleep.c32
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_test.c22
-rw-r--r--tools/testing/selftests/bpf/progs/fib_lookup.c22
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma.c69
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma_fail1.c30
-rw-r--r--tools/testing/selftests/bpf/progs/find_vma_fail2.c29
-rw-r--r--tools/testing/selftests/bpf/progs/fmod_ret_freplace.c14
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_array_map_elem.c73
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_hash_map_elem.c95
-rw-r--r--tools/testing/selftests/bpf/progs/for_each_map_elem_write_key.c27
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_attach_probe.c40
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_cls_redirect.c34
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_connect4.c18
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c19
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_get_constant.c15
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_global_func.c18
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_progmap.c24
-rw-r--r--tools/testing/selftests/bpf/progs/get_branch_snapshot.c40
-rw-r--r--tools/testing/selftests/bpf/progs/get_cgroup_id_kern.c1
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_args_test.c123
-rw-r--r--tools/testing/selftests/bpf/progs/get_func_ip_test.c85
-rw-r--r--tools/testing/selftests/bpf/progs/htab_reuse.c19
-rw-r--r--tools/testing/selftests/bpf/progs/htab_update.c29
-rw-r--r--tools/testing/selftests/bpf/progs/ima.c103
-rw-r--r--tools/testing/selftests/bpf/progs/iters.c723
-rw-r--r--tools/testing/selftests/bpf/progs/iters_looping.c163
-rw-r--r--tools/testing/selftests/bpf/progs/iters_num.c242
-rw-r--r--tools/testing/selftests/bpf/progs/iters_state_safety.c426
-rw-r--r--tools/testing/selftests/bpf/progs/iters_testmod_seq.c79
-rw-r--r--tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c42
-rw-r--r--tools/testing/selftests/bpf/progs/jit_probe_mem.c61
-rw-r--r--tools/testing/selftests/bpf/progs/kfree_skb.c8
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_destructive.c14
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_fail.c160
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_race.c14
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test.c195
-rw-r--r--tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c42
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi.c162
-rw-r--r--tools/testing/selftests/bpf/progs/kprobe_multi_empty.c12
-rw-r--r--tools/testing/selftests/bpf/progs/ksym_race.c13
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs1.c89
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs2.c89
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.c381
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list.h56
-rw-r--r--tools/testing/selftests/bpf/progs/linked_list_fail.c610
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps1.c82
-rw-r--r--tools/testing/selftests/bpf/progs/linked_maps2.c76
-rw-r--r--tools/testing/selftests/bpf/progs/linked_vars1.c54
-rw-r--r--tools/testing/selftests/bpf/progs/linked_vars2.c55
-rw-r--r--tools/testing/selftests/bpf/progs/load_bytes_relative.c48
-rw-r--r--tools/testing/selftests/bpf/progs/local_kptr_stash.c108
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage.c226
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage_bench.c104
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage_rcu_tasks_trace_bench.c67
-rw-r--r--tools/testing/selftests/bpf/progs/loop3.c4
-rw-r--r--tools/testing/selftests/bpf/progs/loop5.c1
-rw-r--r--tools/testing/selftests/bpf/progs/loop6.c102
-rw-r--r--tools/testing/selftests/bpf/progs/lru_bug.c49
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c145
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup.c188
-rw-r--r--tools/testing/selftests/bpf/progs/lsm_cgroup_nonvoid.c14
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c533
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr_fail.c390
-rw-r--r--tools/testing/selftests/bpf/progs/map_ptr_kern.c698
-rw-r--r--tools/testing/selftests/bpf/progs/metadata_unused.c15
-rw-r--r--tools/testing/selftests/bpf/progs/metadata_used.c15
-rw-r--r--tools/testing/selftests/bpf/progs/mptcp_sock.c88
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_common.h12
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_failure.c33
-rw-r--r--tools/testing/selftests/bpf/progs/nested_trust_success.c19
-rw-r--r--tools/testing/selftests/bpf/progs/netcnt_prog.c10
-rw-r--r--tools/testing/selftests/bpf/progs/netif_receive_skb.c255
-rw-r--r--tools/testing/selftests/bpf/progs/netns_cookie_prog.c84
-rw-r--r--tools/testing/selftests/bpf/progs/perf_event_stackmap.c59
-rw-r--r--tools/testing/selftests/bpf/progs/perfbuf_bench.c33
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.h177
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h990
-rw-r--r--tools/testing/selftests/bpf/progs/profiler1.c5
-rw-r--r--tools/testing/selftests/bpf/progs/profiler2.c6
-rw-r--r--tools/testing/selftests/bpf/progs/profiler3.c6
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h94
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600.c11
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_bpf_loop.c6
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_iter.c7
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600_nounroll.c3
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf_subprogs.c5
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree.c246
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_btf_fail__add_wrong_type.c52
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_btf_fail__wrong_node_type.c38
-rw-r--r--tools/testing/selftests/bpf/progs/rbtree_fail.c303
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_read_lock.c321
-rw-r--r--tools/testing/selftests/bpf/progs/rcu_tasks_trace_gp.c36
-rw-r--r--tools/testing/selftests/bpf/progs/read_bpf_task_storage_busy.c38
-rw-r--r--tools/testing/selftests/bpf/progs/recursion.c43
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg4_prog.c40
-rw-r--r--tools/testing/selftests/bpf/progs/recvmsg6_prog.c46
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr.c406
-rw-r--r--tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c72
-rw-r--r--tools/testing/selftests/bpf/progs/ringbuf_bench.c61
-rw-r--r--tools/testing/selftests/bpf/progs/sample_map_ret0.c24
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg4_prog.c7
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c7
-rw-r--r--tools/testing/selftests/bpf/progs/setget_sockopt.c403
-rw-r--r--tools/testing/selftests/bpf/progs/skb_load_bytes.c19
-rw-r--r--tools/testing/selftests/bpf/progs/skb_pkt_end.c53
-rw-r--r--tools/testing/selftests/bpf/progs/socket_cookie_prog.c47
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_parse_prog.c4
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_tcp_msg_prog.c14
-rw-r--r--tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c18
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_inherit.c1
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_multi.c5
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c39
-rw-r--r--tools/testing/selftests/bpf/progs/sockopt_sk.c123
-rw-r--r--tools/testing/selftests/bpf/progs/stacktrace_map_skip.c68
-rw-r--r--tools/testing/selftests/bpf/progs/strncmp_bench.c50
-rw-r--r--tools/testing/selftests/bpf/progs/strncmp_test.c54
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h112
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta_bpf_loop.c9
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta_subprogs.c10
-rw-r--r--tools/testing/selftests/bpf/progs/syscall.c121
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall1.c31
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall2.c37
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall3.c13
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall4.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall5.c9
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall6.c34
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c37
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c40
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c71
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c78
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf6.c45
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_common.h76
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_failure.c326
-rw-r--r--tools/testing/selftests/bpf/progs/task_kfunc_success.c265
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_storage.c64
-rw-r--r--tools/testing/selftests/bpf/progs/task_local_storage_exit_creds.c35
-rw-r--r--tools/testing/selftests/bpf/progs/task_ls_recursion.c107
-rw-r--r--tools/testing/selftests/bpf/progs/task_storage_nodeadlock.c47
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_incompl_cong_ops.c35
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_unsupp_cong_op.c21
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_update.c80
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_ca_write_sk_pacing.c71
-rw-r--r--tools/testing/selftests/bpf/progs/tcp_rtt.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_access_variable_array.c19
-rw-r--r--tools/testing/selftests/bpf/progs/test_adjust_tail.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_kprobe_sleepable.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe.c119
-rw-r--r--tools/testing/selftests/bpf/progs/test_attach_probe_manual.c53
-rw-r--r--tools/testing/selftests/bpf/progs/test_autoattach.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_autoload.c40
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_cookie.c121
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf.c237
-rw-r--r--tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c148
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_decl_tag.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_haskv.c58
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_map_in_map.c150
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_newkv.c35
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_nokv.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c174
-rw-r--r--tools/testing/selftests/bpf/progs/test_cgroup_link.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_check_mtu.c290
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c1075
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.h54
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_dynptr.c979
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_autosize.c182
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_extern.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_read_macros.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_bitfields_probed.c1
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_enum64val.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_enumval.c72
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_existence.c11
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_mods.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_module.c104
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_size.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c157
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c115
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_retro.c43
-rw-r--r--tools/testing/selftests/bpf/progs/test_custom_sec_handlers.c63
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path.c65
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path_check_rdonly_mem.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path_check_types.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_deny_namespace.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_enable_stats.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_endian.c37
-rw-r--r--tools/testing/selftests/bpf/progs/test_get_stack_rawtp.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_data.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func1.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func10.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func11.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func12.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func13.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func14.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func15.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func16.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func17.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func2.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func3.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func4.c55
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func5.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func6.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func7.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func8.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func9.c134
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_args.c91
-rw-r--r--tools/testing/selftests/bpf/progs/test_global_func_ctx_args.c104
-rw-r--r--tools/testing/selftests/bpf/progs/test_hash_large_key.c44
-rw-r--r--tools/testing/selftests/bpf/progs/test_helper_restricted.c123
-rw-r--r--tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c86
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_btf.c55
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_btf_null_check.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_btf_write_check.c44
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_module.c50
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_weak.c71
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline_dynptr.c487
-rw-r--r--tools/testing/selftests/bpf/progs/test_legacy_printk.c73
-rw-r--r--tools/testing/selftests/bpf/progs/test_libbpf_get_fd_by_id_opts.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_buf.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_fixup.c74
-rw-r--r--tools/testing/selftests/bpf/progs/test_lookup_and_delete.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_lookup_key.c46
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map.c13
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_in_map_invalid.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_init.c33
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lock.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c76
-rw-r--r--tools/testing/selftests/bpf/progs/test_map_ops.c138
-rw-r--r--tools/testing/selftests/bpf/progs/test_migrate_reuseport.c135
-rw-r--r--tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c329
-rw-r--r--tools/testing/selftests/bpf/progs/test_mmap.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c119
-rw-r--r--tools/testing/selftests/bpf/progs/test_ns_current_pid_tgid.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_obj_id.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_overhead.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt.c118
-rw-r--r--tools/testing/selftests/bpf/progs/test_parse_tcp_hdr_opt_dynptr.c114
-rw-r--r--tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c38
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_buffer.c26
-rw-r--r--tools/testing/selftests/bpf/progs/test_perf_link.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pinning_invalid.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_md_access.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_read_user_str.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_probe_user.c43
-rw-r--r--tools/testing/selftests/bpf/progs/test_prog_array_init.c39
-rw-r--r--tools/testing/selftests/bpf/progs/test_queue_stack_map.h4
-rw-r--r--tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_rdonly_maps.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf.c77
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_map_key.c71
-rw-r--r--tools/testing/selftests/bpf/progs/test_ringbuf_multi.c88
-rw-r--r--tools/testing/selftests/bpf/progs/test_select_reuseport_kern.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_send_signal_kern.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign.c104
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup.c660
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c42
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_storage_trace_itself.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_storage_tracing.c111
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_cgroup_id_kern.c14
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_ctx.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_skb_helpers.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_skc_to_unix_sock.c40
-rw-r--r--tools/testing/selftests/bpf/progs/test_skeleton.c62
-rw-r--r--tools/testing/selftests/bpf/progs/test_skmsg_load_helpers.c47
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf.c77
-rw-r--r--tools/testing/selftests/bpf/progs/test_snprintf_single.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields.c (renamed from tools/testing/selftests/bpf/progs/test_sock_fields_kern.c)227
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_invalid_update.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.h (renamed from tools/testing/selftests/bpf/test_sockmap_kern.h)263
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_listen.c29
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_progs_query.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_skb_verdict_attach.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_update.c48
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock.c7
-rw-r--r--tools/testing/selftests/bpf/progs/test_spin_lock_fail.c204
-rw-r--r--tools/testing/selftests/bpf/progs/test_stack_var_off.c51
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_build_id.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked1.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_static_linked2.c31
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs.c124
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs_unused.c21
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton.c28
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton_lib.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_subskeleton_lib2.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_task_pt_regs.c36
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_bpf.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_dtime.c397
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_edt.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh.c136
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh_fib.c158
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_peer.c63
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_tunnel.c205
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_check_syncookie_kern.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_estats.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c626
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpbpf_kern.c159
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcpnotify_kern.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_time_tai.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_trace_ext.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_trace_ext_tracing.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_tracepoint.c9
-rw-r--r--tools/testing/selftests/bpf/progs/test_trampoline_count.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_tunnel_kern.c641
-rw-r--r--tools/testing/selftests/bpf/progs/test_unpriv_bpf_disabled.c83
-rw-r--r--tools/testing/selftests/bpf/progs/test_uprobe_autoattach.c117
-rw-r--r--tools/testing/selftests/bpf/progs/test_urandom_usdt.c70
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt.c96
-rw-r--r--tools/testing/selftests/bpf/progs/test_usdt_multispec.c30
-rw-r--r--tools/testing/selftests/bpf/progs/test_user_ringbuf.h35
-rw-r--r--tools/testing/selftests/bpf/progs/test_varlen.c163
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale1.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale2.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_verif_scale3.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_verify_pkcs7_sig.c94
-rw-r--r--tools/testing/selftests/bpf/progs/test_vmlinux.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_grow.c41
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_adjust_tail_shrink.c52
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_bpf2bpf.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_context_test_run.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_devmap_helpers.c22
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_do_redirect.c116
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_dynptr.c255
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_link.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_loop.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c113
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_redirect.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_update_frags.c42
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_vlan.c17
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_frags_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_cpumap_helpers.c42
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_devmap_frags_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_with_devmap_helpers.c50
-rw-r--r--tools/testing/selftests/bpf/progs/timer.c331
-rw-r--r--tools/testing/selftests/bpf/progs/timer_crash.c54
-rw-r--r--tools/testing/selftests/bpf/progs/timer_mim.c88
-rw-r--r--tools/testing/selftests/bpf/progs/timer_mim_reject.c74
-rw-r--r--tools/testing/selftests/bpf/progs/trace_dummy_st_ops.c21
-rw-r--r--tools/testing/selftests/bpf/progs/trace_printk.c22
-rw-r--r--tools/testing/selftests/bpf/progs/trace_vprintk.c34
-rw-r--r--tools/testing/selftests/bpf/progs/tracing_struct.c133
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c62
-rw-r--r--tools/testing/selftests/bpf/progs/twfw.c58
-rw-r--r--tools/testing/selftests/bpf/progs/type_cast.c82
-rw-r--r--tools/testing/selftests/bpf/progs/udp_limit.c59
-rw-r--r--tools/testing/selftests/bpf/progs/uninit_stack.c87
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_fail.c223
-rw-r--r--tools/testing/selftests/bpf/progs/user_ringbuf_success.c212
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_and.c107
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_array_access.c529
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_basic_stack.c100
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds.c1076
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c171
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_deduction_non_const.c639
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bounds_mix_sign_unsign.c554
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_get_stack.c124
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_btf_ctx_access.c32
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cfg.c100
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cgroup_inv_retcode.c89
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cgroup_skb.c227
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_cgroup_storage.c308
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const_or.c82
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx.c221
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ctx_sk_msg.c228
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_d_path.c48
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_packet_access.c803
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_direct_stack_access_wraparound.c56
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_div0.c213
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_div_overflow.c144
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_access_var_len.c825
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_packet_access.c550
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_restricted.c279
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_helper_value_access.c1245
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_int_ptr.c157
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c213
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ld_ind.c110
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_leak_ptr.c92
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_loops1.c259
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_lwt.c234
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_in_map.c142
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ptr.c159
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ptr_mixing.c265
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_map_ret_val.c110
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_masking.c410
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_meta_access.c284
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c121
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_netfilter_retcode.c49
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_prevent_map_lookup.c61
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_raw_stack.c371
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_raw_tp_writable.c50
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ref_tracking.c1495
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_reg_equal.c58
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_regalloc.c364
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_ringbuf.c131
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_runtime_jit.c360
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_search_pruning.c339
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_sock.c980
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spill_fill.c374
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_spin_lock.c533
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_stack_ptr.c484
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_subreg.c673
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_uninit.c61
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv.c726
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_unpriv_perf.c34
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value.c158
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_adj_spill.c78
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_illegal_alu.c149
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_or_null.c288
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c1423
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_var_off.c349
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xadd.c124
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xdp.c24
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_xdp_direct_packet_access.c1722
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_dummy.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_features.c268
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_hw_metadata.c91
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata.c64
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_metadata2.c24
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_redirect_multi_kern.c94
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c843
-rw-r--r--tools/testing/selftests/bpf/progs/xdp_tx.c2
-rw-r--r--tools/testing/selftests/bpf/progs/xdping_kern.c6
-rw-r--r--tools/testing/selftests/bpf/progs/xdpwall.c364
-rw-r--r--tools/testing/selftests/bpf/progs/xfrm_info.c40
-rw-r--r--tools/testing/selftests/bpf/progs/xsk_xdp_progs.c55
-rw-r--r--tools/testing/selftests/bpf/sdt-config.h6
-rw-r--r--tools/testing/selftests/bpf/sdt.h513
-rw-r--r--tools/testing/selftests/bpf/settings (renamed from tools/testing/selftests/powerpc/dscr/settings)0
-rw-r--r--tools/testing/selftests/bpf/task_local_storage_helpers.h22
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py50
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py80
-rw-r--r--tools/testing/selftests/bpf/test_bpftool.py22
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool.sh6
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_build.sh6
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_metadata.sh85
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_synctypes.py618
-rw-r--r--tools/testing/selftests/bpf/test_btf.h12
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c39
-rw-r--r--tools/testing/selftests/bpf/test_cpp.cpp104
-rw-r--r--tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c159
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c24
-rwxr-xr-xtools/testing/selftests/bpf/test_doc_build.sh20
-rw-r--r--tools/testing/selftests/bpf/test_flow_dissector.c2
-rwxr-xr-xtools/testing/selftests/bpf/test_flow_dissector.sh22
-rwxr-xr-xtools/testing/selftests/bpf/test_ftrace.sh7
-rw-r--r--tools/testing/selftests/bpf/test_hashmap.c382
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh30
-rwxr-xr-xtools/testing/selftests/bpf/test_lirc_mode2.sh5
-rw-r--r--tools/testing/selftests/bpf/test_lirc_mode2_user.c7
-rw-r--r--tools/testing/selftests/bpf/test_loader.c707
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c73
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c118
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh40
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_seg6local.sh173
-rw-r--r--tools/testing/selftests/bpf/test_maps.c570
-rw-r--r--tools/testing/selftests/bpf/test_maps.h2
-rw-r--r--tools/testing/selftests/bpf/test_netcnt.c161
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py92
-rw-r--r--tools/testing/selftests/bpf/test_progs.c1646
-rw-r--r--tools/testing/selftests/bpf/test_progs.h373
-rwxr-xr-xtools/testing/selftests/bpf/test_skb_cgroup_id.sh2
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c12
-rw-r--r--tools/testing/selftests/bpf/test_sock.c405
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c326
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c490
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c214
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c1215
-rw-r--r--tools/testing/selftests/bpf/test_stub.c44
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c45
-rw-r--r--tools/testing/selftests/bpf/test_tag.c12
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_edt.sh3
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_tunnel.sh37
-rwxr-xr-xtools/testing/selftests/bpf/test_tcp_check_syncookie.sh11
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c84
-rw-r--r--tools/testing/selftests/bpf/test_tcp_hdr_options.h153
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf.h4
-rw-r--r--tools/testing/selftests/bpf/test_tcpbpf_user.c171
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c38
-rwxr-xr-xtools/testing/selftests/bpf/test_tunnel.sh222
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c883
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c174
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_features.sh107
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_meta.sh44
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect.sh90
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_redirect_multi.sh214
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_veth.sh47
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan.sh77
-rwxr-xr-xtools/testing/selftests/bpf/test_xdping.sh4
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh209
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c251
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h24
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c215
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h14
-rw-r--r--tools/testing/selftests/bpf/unpriv_helpers.c26
-rw-r--r--tools/testing/selftests/bpf/unpriv_helpers.h7
-rw-r--r--tools/testing/selftests/bpf/urandom_read.c63
-rw-r--r--tools/testing/selftests/bpf/urandom_read_aux.c9
-rw-r--r--tools/testing/selftests/bpf/urandom_read_lib1.c13
-rw-r--r--tools/testing/selftests/bpf/urandom_read_lib2.c8
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c50
-rw-r--r--tools/testing/selftests/bpf/verifier/array_access.c378
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_and.c100
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_bounds.c27
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c245
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_fetch.c151
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_fetch_add.c106
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_invalid.c25
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_or.c102
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_xchg.c46
-rw-r--r--tools/testing/selftests/bpf/verifier/atomic_xor.c77
-rw-r--r--tools/testing/selftests/bpf/verifier/basic.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/basic_stack.c64
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c541
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_deduction.c124
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds_mix_sign_unsign.c406
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_get_stack.c44
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_loop_inline.c264
-rw-r--r--tools/testing/selftests/bpf/verifier/bpf_st_mem.c67
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c408
-rw-r--r--tools/testing/selftests/bpf/verifier/cfg.c73
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_inv_retcode.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_skb.c197
-rw-r--r--tools/testing/selftests/bpf/verifier/cgroup_storage.c220
-rw-r--r--tools/testing/selftests/bpf/verifier/const_or.c60
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx.c198
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_lookup.c532
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_msg.c181
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c116
-rw-r--r--tools/testing/selftests/bpf/verifier/dead_code.c24
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c656
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_stack_access_wraparound.c40
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_value_access.c7
-rw-r--r--tools/testing/selftests/bpf/verifier/div0.c184
-rw-r--r--tools/testing/selftests/bpf/verifier/div_overflow.c110
-rw-r--r--tools/testing/selftests/bpf/verifier/event_output.c25
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_access_var_len.c616
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_packet_access.c460
-rw-r--r--tools/testing/selftests/bpf/verifier/helper_value_access.c953
-rw-r--r--tools/testing/selftests/bpf/verifier/int_ptr.c160
-rw-r--r--tools/testing/selftests/bpf/verifier/jit.c117
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c43
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c10
-rw-r--r--tools/testing/selftests/bpf/verifier/jump.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_imm64.c8
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_ind.c72
-rw-r--r--tools/testing/selftests/bpf/verifier/leak_ptr.c67
-rw-r--r--tools/testing/selftests/bpf/verifier/loops1.c206
-rw-r--r--tools/testing/selftests/bpf/verifier/lwt.c189
-rw-r--r--tools/testing/selftests/bpf/verifier/map_in_map.c62
-rw-r--r--tools/testing/selftests/bpf/verifier/map_kptr.c442
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ptr_mixing.c100
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ret_val.c65
-rw-r--r--tools/testing/selftests/bpf/verifier/masking.c322
-rw-r--r--tools/testing/selftests/bpf/verifier/meta_access.c235
-rw-r--r--tools/testing/selftests/bpf/verifier/perf_event_sample_period.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c37
-rw-r--r--tools/testing/selftests/bpf/verifier/prevent_map_lookup.c59
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_stack.c305
-rw-r--r--tools/testing/selftests/bpf/verifier/raw_tp_writable.c34
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c823
-rw-r--r--tools/testing/selftests/bpf/verifier/runtime_jit.c231
-rw-r--r--tools/testing/selftests/bpf/verifier/search_pruning.c156
-rw-r--r--tools/testing/selftests/bpf/verifier/sleepable.c91
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c518
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c76
-rw-r--r--tools/testing/selftests/bpf/verifier/spin_lock.c333
-rw-r--r--tools/testing/selftests/bpf/verifier/stack_ptr.c317
-rw-r--r--tools/testing/selftests/bpf/verifier/subreg.c533
-rw-r--r--tools/testing/selftests/bpf/verifier/uninit.c39
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c522
-rw-r--r--tools/testing/selftests/bpf/verifier/value.c104
-rw-r--r--tools/testing/selftests/bpf/verifier/value_adj_spill.c43
-rw-r--r--tools/testing/selftests/bpf/verifier/value_illegal_alu.c94
-rw-r--r--tools/testing/selftests/bpf/verifier/value_or_null.c152
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c838
-rw-r--r--tools/testing/selftests/bpf/verifier/var_off.c248
-rw-r--r--tools/testing/selftests/bpf/verifier/wide_access.c46
-rw-r--r--tools/testing/selftests/bpf/verifier/xadd.c97
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c900
-rwxr-xr-xtools/testing/selftests/bpf/verify_sig_setup.sh104
-rw-r--r--tools/testing/selftests/bpf/veristat.c2093
-rw-r--r--tools/testing/selftests/bpf/veristat.cfg17
-rwxr-xr-xtools/testing/selftests/bpf/vmtest.sh434
-rw-r--r--tools/testing/selftests/bpf/xdp_features.c718
-rw-r--r--tools/testing/selftests/bpf/xdp_features.h20
-rw-r--r--tools/testing/selftests/bpf/xdp_hw_metadata.c451
-rw-r--r--tools/testing/selftests/bpf/xdp_metadata.h19
-rw-r--r--tools/testing/selftests/bpf/xdp_redirect_multi.c226
-rw-r--r--tools/testing/selftests/bpf/xdp_synproxy.c471
-rw-r--r--tools/testing/selftests/bpf/xdping.c24
-rw-r--r--tools/testing/selftests/bpf/xsk.c645
-rw-r--r--tools/testing/selftests/bpf/xsk.h (renamed from tools/lib/bpf/xsk.h)105
-rwxr-xr-xtools/testing/selftests/bpf/xsk_prereqs.sh84
-rw-r--r--tools/testing/selftests/bpf/xsk_xdp_metadata.h5
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.c2070
-rw-r--r--tools/testing/selftests/bpf/xskxceiver.h191
-rw-r--r--tools/testing/selftests/breakpoints/breakpoint_test_arm64.c4
-rw-r--r--tools/testing/selftests/breakpoints/step_after_suspend_test.c53
-rw-r--r--tools/testing/selftests/cgroup/.gitignore4
-rw-r--r--tools/testing/selftests/cgroup/Makefile19
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c133
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.h12
-rw-r--r--tools/testing/selftests/cgroup/config7
-rw-r--r--tools/testing/selftests/cgroup/memcg_protection.m89
-rw-r--r--tools/testing/selftests/cgroup/test_core.c165
-rw-r--r--tools/testing/selftests/cgroup/test_cpu.c726
-rwxr-xr-xtools/testing/selftests/cgroup/test_cpuset_prs.sh693
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c57
-rw-r--r--tools/testing/selftests/cgroup/test_kill.c297
-rw-r--r--tools/testing/selftests/cgroup/test_kmem.c456
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c498
-rwxr-xr-xtools/testing/selftests/cgroup/test_stress.sh2
-rw-r--r--tools/testing/selftests/cgroup/wait_inotify.c87
-rw-r--r--tools/testing/selftests/clone3/.gitignore1
-rw-r--r--tools/testing/selftests/clone3/Makefile6
-rw-r--r--tools/testing/selftests/clone3/clone3.c62
-rw-r--r--tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c182
-rw-r--r--tools/testing/selftests/clone3/clone3_clear_sighand.c5
-rw-r--r--tools/testing/selftests/clone3/clone3_selftests.h24
-rw-r--r--tools/testing/selftests/clone3/clone3_set_tid.c6
-rw-r--r--tools/testing/selftests/core/.gitignore1
-rw-r--r--tools/testing/selftests/core/Makefile7
-rw-r--r--tools/testing/selftests/core/close_range_test.c566
-rw-r--r--tools/testing/selftests/cpu-hotplug/Makefile2
-rw-r--r--tools/testing/selftests/cpu-hotplug/config1
-rwxr-xr-xtools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh140
-rw-r--r--tools/testing/selftests/cpufreq/config2
-rwxr-xr-xtools/testing/selftests/cpufreq/main.sh2
-rw-r--r--tools/testing/selftests/damon/.gitignore (renamed from tools/testing/selftests/android/ion/.gitignore)4
-rw-r--r--tools/testing/selftests/damon/Makefile14
-rw-r--r--tools/testing/selftests/damon/_chk_dependency.sh38
-rw-r--r--tools/testing/selftests/damon/_debugfs_common.sh52
-rw-r--r--tools/testing/selftests/damon/debugfs_attrs.sh17
-rw-r--r--tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh27
-rw-r--r--tools/testing/selftests/damon/debugfs_empty_targets.sh13
-rw-r--r--tools/testing/selftests/damon/debugfs_huge_count_read_write.sh22
-rw-r--r--tools/testing/selftests/damon/debugfs_rm_non_contexts.sh19
-rw-r--r--tools/testing/selftests/damon/debugfs_schemes.sh19
-rw-r--r--tools/testing/selftests/damon/debugfs_target_ids.sh19
-rw-r--r--tools/testing/selftests/damon/huge_count_read_write.c48
-rw-r--r--tools/testing/selftests/damon/lru_sort.sh41
-rw-r--r--tools/testing/selftests/damon/reclaim.sh42
-rw-r--r--tools/testing/selftests/damon/sysfs.sh343
-rw-r--r--tools/testing/selftests/damon/sysfs_update_removed_scheme_dir.sh58
-rw-r--r--tools/testing/selftests/dma/Makefile7
-rw-r--r--tools/testing/selftests/dma/config1
-rw-r--r--tools/testing/selftests/dma/dma_map_benchmark.c129
-rw-r--r--tools/testing/selftests/dmabuf-heaps/Makefile2
-rw-r--r--tools/testing/selftests/dmabuf-heaps/dmabuf-heap.c153
-rw-r--r--tools/testing/selftests/drivers/.gitignore1
-rw-r--r--tools/testing/selftests/drivers/dma-buf/Makefile2
-rw-r--r--tools/testing/selftests/drivers/dma-buf/udmabuf.c8
-rwxr-xr-xtools/testing/selftests/drivers/gpu/drm_mm.sh4
-rw-r--r--tools/testing/selftests/drivers/net/bonding/Makefile19
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-arp-interval-causes-panic.sh49
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh81
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh85
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond-lladdr-target.sh65
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/bond_options.sh314
-rw-r--r--tools/testing/selftests/drivers/net/bonding/bond_topo_3d1c.sh145
-rw-r--r--tools/testing/selftests/drivers/net/bonding/config2
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/dev_addr_lists.sh109
-rw-r--r--tools/testing/selftests/drivers/net/bonding/lag_lib.sh167
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-1-recovery-updelay.sh45
-rwxr-xr-xtools/testing/selftests/drivers/net/bonding/mode-2-recovery-updelay.sh45
l---------tools/testing/selftests/drivers/net/bonding/net_forwarding_lib.sh1
-rw-r--r--tools/testing/selftests/drivers/net/bonding/settings1
-rw-r--r--tools/testing/selftests/drivers/net/dsa/Makefile18
l---------tools/testing/selftests/drivers/net/dsa/bridge_locked_port.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_mdb.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_mld.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_aware.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_mcast.sh1
l---------tools/testing/selftests/drivers/net/dsa/bridge_vlan_unaware.sh1
-rw-r--r--tools/testing/selftests/drivers/net/dsa/forwarding.config2
l---------tools/testing/selftests/drivers/net/dsa/lib.sh1
l---------tools/testing/selftests/drivers/net/dsa/local_termination.sh1
l---------tools/testing/selftests/drivers/net/dsa/no_forwarding.sh1
l---------tools/testing/selftests/drivers/net/dsa/tc_actions.sh1
l---------tools/testing/selftests/drivers/net/dsa/tc_common.sh1
-rwxr-xr-xtools/testing/selftests/drivers/net/dsa/test_bridge_fdb_stress.sh47
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_linecard.sh334
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_acl_drops.sh4
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_control.sh709
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l2_drops.sh138
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_drops.sh74
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_l3_exceptions.sh54
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh65
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip.sh56
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_ipip6.sh250
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_tunnel_vxlan_ipv6.sh342
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/egress_vid_classification.sh273
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ethtool_lanes.sh187
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/fib.sh14
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/hw_stats_l3.sh31
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_1d.sh264
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_1q.sh264
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/ingress_rif_conf_vxlan.sh311
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh3
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/mlxsw_lib.sh64
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/port_scale.sh62
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/q_in_q_veto.sh296
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_defprio.sh68
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_bridge.sh25
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_dscp_router.sh27
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh14
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh379
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh28
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_max_descriptors.sh282
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh402
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/rif_counter_scale.sh107
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/rif_mac_profile_scale.sh72
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles.sh213
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh147
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/router_scale.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/rtnetlink.sh454
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_ets.sh24
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_offload.sh290
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh281
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_ets.sh100
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_red_root.sh25
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_tbf_ets.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_tbf_prio.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_tbf_root.sh6
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh2
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sharedbuffer_configuration.py2
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/port_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/resource_scale.sh42
l---------tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_counter_scale.sh1
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/rif_mac_profile_scale.sh16
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower_scale.sh15
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_police_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum-2/vxlan_flooding_ipv6.sh339
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/devlink_lib_spectrum.sh6
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/port_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/q_in_vni_veto.sh66
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/resource_scale.sh33
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_counter_scale.sh34
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/rif_mac_profile_scale.sh16
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/spectrum/tc_police_scale.sh16
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/spectrum/vxlan_flooding_ipv6.sh334
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_flower_restrictions.sh186
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh23
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_police_occ.sh108
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh101
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_restrictions.sh414
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/tc_sample.sh658
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan.sh242
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto.sh39
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_fdb_veto_ipv6.sh12
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_flooding.sh17
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/vxlan_ipv6.sh65
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh238
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink_trap.sh31
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-coalesce.sh132
-rw-r--r--tools/testing/selftests/drivers/net/netdevsim/ethtool-common.sh56
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-fec.sh110
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-pause.sh49
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-ring.sh85
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/fib.sh65
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/fib_notifications.sh430
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/hw_stats_l3.sh421
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/nexthop.sh1058
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/psample.sh183
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/tc-mq-visibility.sh77
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh953
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/basic_qos.sh253
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/psfp.sh327
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh352
-rw-r--r--tools/testing/selftests/drivers/net/team/Makefile10
-rw-r--r--tools/testing/selftests/drivers/net/team/config3
-rwxr-xr-xtools/testing/selftests/drivers/net/team/dev_addr_lists.sh51
l---------tools/testing/selftests/drivers/net/team/lag_lib.sh1
l---------tools/testing/selftests/drivers/net/team/net_forwarding_lib.sh1
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/Makefile20
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/config1
-rw-r--r--tools/testing/selftests/drivers/s390x/uvdevice/test_uvdevice.c276
-rwxr-xr-xtools/testing/selftests/drivers/sdsi/sdsi.sh25
-rw-r--r--tools/testing/selftests/drivers/sdsi/sdsi_test.py226
-rwxr-xr-xtools/testing/selftests/efivarfs/efivarfs.sh5
-rw-r--r--tools/testing/selftests/exec/.gitignore5
-rw-r--r--tools/testing/selftests/exec/Makefile14
-rwxr-xr-xtools/testing/selftests/exec/binfmt_script.py171
-rw-r--r--tools/testing/selftests/exec/execveat.c8
-rw-r--r--tools/testing/selftests/exec/load_address.c68
-rw-r--r--tools/testing/selftests/exec/non-regular.c196
-rw-r--r--tools/testing/selftests/exec/null-argv.c78
-rw-r--r--tools/testing/selftests/filesystems/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/binderfs/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c309
-rw-r--r--tools/testing/selftests/filesystems/binderfs/config1
-rw-r--r--tools/testing/selftests/filesystems/epoll/Makefile2
-rw-r--r--tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c357
-rw-r--r--tools/testing/selftests/filesystems/fat/.gitignore2
-rw-r--r--tools/testing/selftests/filesystems/fat/Makefile7
-rw-r--r--tools/testing/selftests/filesystems/fat/config2
-rw-r--r--tools/testing/selftests/filesystems/fat/rename_exchange.c37
-rwxr-xr-xtools/testing/selftests/filesystems/fat/run_fat_tests.sh82
-rw-r--r--tools/testing/selftests/firmware/.gitignore2
-rw-r--r--tools/testing/selftests/firmware/Makefile2
-rw-r--r--tools/testing/selftests/firmware/config1
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh249
-rwxr-xr-xtools/testing/selftests/firmware/fw_lib.sh19
-rw-r--r--tools/testing/selftests/firmware/fw_namespace.c5
-rwxr-xr-xtools/testing/selftests/firmware/fw_run_tests.sh4
-rwxr-xr-xtools/testing/selftests/firmware/fw_upload.sh214
-rw-r--r--tools/testing/selftests/firmware/settings8
-rw-r--r--tools/testing/selftests/fpu/.gitignore2
-rw-r--r--tools/testing/selftests/fpu/Makefile9
-rwxr-xr-xtools/testing/selftests/fpu/run_test_fpu.sh46
-rw-r--r--tools/testing/selftests/fpu/test_fpu.c61
-rw-r--r--tools/testing/selftests/ftrace/config1
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest45
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/trace_pipe.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/direct/kprobe-direct.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_eprobe.tc97
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc15
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/add_remove_synth.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc13
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/eprobes_syntax_errors.tc29
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc10
-rw-r--r--tools/testing/selftests/ftrace/test.d/dynevent/test_duplicates.tc38
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-enable.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-no-pid.tc16
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/event-pid.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/subsystem-enable.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/event/toplevel-enable.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc58
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter-stack.tc17
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/fgraph-filter.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc18
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc18
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_cpumask.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_event_triggers.tc27
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_mod_trace.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_profile_stat.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_profiler.tc15
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_stack_tracer.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_traceonoff_triggers.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/tracing-error-log.tc10
-rw-r--r--tools/testing/selftests/ftrace/test.d/functions77
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance-event.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/instances/instance.tc6
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_char.tc47
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc16
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc13
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc17
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc9
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc8
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_eventname.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc18
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_module.tc3
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc27
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_maxactive.tc4
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_return_suffix.tc21
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/probepoint.tc5
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/profile.tc7
-rw-r--r--tools/testing/selftests/ftrace/test.d/kprobe/uprobe_syntax_errors.tc16
-rw-r--r--tools/testing/selftests/ftrace/test.d/preemptirq/irqsoff_tracer.tc21
-rw-r--r--tools/testing/selftests/ftrace/test.d/template4
-rw-r--r--tools/testing/selftests/ftrace/test.d/tracer/wakeup.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/tracer/wakeup_rt.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-action-hist-xfail.tc13
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-field-variable-support.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-inter-event-combined-hist.tc19
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-multi-actions-accept.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onchange-action-hist.tc8
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-action-hist.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmatch-onmax-action-hist.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-onmax-action-hist.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-snapshot-action-hist.tc15
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-eprobe.tc53
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-createremove.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-dynstring.tc31
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack.tc24
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc21
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic_event_syntax_errors.tc38
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-trace-action-hist.tc13
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-eventonoff.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-filter.tc11
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-expressions.tc63
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc16
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-syntax-errors.tc18
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc36
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc16
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-snapshot.tc16
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-stacktrace.tc13
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-hist.tc23
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-snapshot.tc23
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic-kernel.tc31
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-trace-marker-synthetic.tc26
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/trigger-traceonoff.tc11
-rw-r--r--tools/testing/selftests/futex/Makefile4
-rw-r--r--tools/testing/selftests/futex/functional/.gitignore3
-rw-r--r--tools/testing/selftests/futex/functional/Makefile17
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue.c136
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c2
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait.c171
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_timeout.c145
-rw-r--r--tools/testing/selftests/futex/functional/futex_wait_wouldblock.c41
-rw-r--r--tools/testing/selftests/futex/functional/futex_waitv.c237
-rwxr-xr-xtools/testing/selftests/futex/functional/run.sh9
-rw-r--r--tools/testing/selftests/futex/include/futex2test.h22
-rwxr-xr-xtools/testing/selftests/gen_kselftest_tar.sh5
-rw-r--r--tools/testing/selftests/gpio/.gitignore4
-rw-r--r--tools/testing/selftests/gpio/Makefile30
-rw-r--r--tools/testing/selftests/gpio/config2
-rw-r--r--tools/testing/selftests/gpio/gpio-chip-info.c57
-rw-r--r--tools/testing/selftests/gpio/gpio-line-name.c55
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-cdev.c198
-rw-r--r--tools/testing/selftests/gpio/gpio-mockup-chardev.c323
-rwxr-xr-xtools/testing/selftests/gpio/gpio-mockup-sysfs.sh168
-rwxr-xr-xtools/testing/selftests/gpio/gpio-mockup.sh497
-rwxr-xr-xtools/testing/selftests/gpio/gpio-sim.sh396
-rw-r--r--tools/testing/selftests/hid/.gitignore5
-rw-r--r--tools/testing/selftests/hid/Makefile243
-rw-r--r--tools/testing/selftests/hid/config33
-rw-r--r--tools/testing/selftests/hid/config.common241
-rw-r--r--tools/testing/selftests/hid/config.x86_644
-rwxr-xr-xtools/testing/selftests/hid/hid-apple.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-core.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-gamepad.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-ite.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-keyboard.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-mouse.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-multitouch.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-sony.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-tablet.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-usb_crash.sh7
-rwxr-xr-xtools/testing/selftests/hid/hid-wacom.sh7
-rw-r--r--tools/testing/selftests/hid/hid_bpf.c869
-rw-r--r--tools/testing/selftests/hid/progs/hid.c209
-rw-r--r--tools/testing/selftests/hid/progs/hid_bpf_helpers.h21
-rwxr-xr-xtools/testing/selftests/hid/run-hid-tools-tests.sh28
-rw-r--r--tools/testing/selftests/hid/settings3
-rw-r--r--tools/testing/selftests/hid/tests/__init__.py2
-rw-r--r--tools/testing/selftests/hid/tests/base.py345
-rw-r--r--tools/testing/selftests/hid/tests/conftest.py81
-rw-r--r--tools/testing/selftests/hid/tests/descriptors_wacom.py1360
-rw-r--r--tools/testing/selftests/hid/tests/test_apple_keyboard.py440
-rw-r--r--tools/testing/selftests/hid/tests/test_gamepad.py209
-rw-r--r--tools/testing/selftests/hid/tests/test_hid_core.py154
-rw-r--r--tools/testing/selftests/hid/tests/test_ite_keyboard.py166
-rw-r--r--tools/testing/selftests/hid/tests/test_keyboard.py485
-rw-r--r--tools/testing/selftests/hid/tests/test_mouse.py977
-rw-r--r--tools/testing/selftests/hid/tests/test_multitouch.py2088
-rw-r--r--tools/testing/selftests/hid/tests/test_sony.py342
-rw-r--r--tools/testing/selftests/hid/tests/test_tablet.py872
-rw-r--r--tools/testing/selftests/hid/tests/test_usb_crash.py103
-rw-r--r--tools/testing/selftests/hid/tests/test_wacom_generic.py844
-rwxr-xr-xtools/testing/selftests/hid/vmtest.sh289
-rw-r--r--tools/testing/selftests/intel_pstate/Makefile8
-rw-r--r--tools/testing/selftests/intel_pstate/aperf.c22
-rw-r--r--tools/testing/selftests/iommu/.gitignore3
-rw-r--r--tools/testing/selftests/iommu/Makefile11
-rw-r--r--tools/testing/selftests/iommu/config2
-rw-r--r--tools/testing/selftests/iommu/iommufd.c1698
-rw-r--r--tools/testing/selftests/iommu/iommufd_fail_nth.c572
-rw-r--r--tools/testing/selftests/iommu/iommufd_utils.h278
-rw-r--r--tools/testing/selftests/ipc/Makefile2
-rw-r--r--tools/testing/selftests/ipc/msgque.c8
-rw-r--r--tools/testing/selftests/ir/ir_loopback.c11
-rwxr-xr-xtools/testing/selftests/ir/ir_loopback.sh2
-rw-r--r--tools/testing/selftests/kcmp/Makefile2
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c6
-rw-r--r--tools/testing/selftests/kexec/Makefile6
-rwxr-xr-xtools/testing/selftests/kexec/kexec_common_lib.sh67
-rwxr-xr-xtools/testing/selftests/kexec/test_kexec_file_load.sh13
-rwxr-xr-xtools/testing/selftests/kmod/kmod.sh55
-rw-r--r--tools/testing/selftests/kselftest.h139
-rwxr-xr-xtools/testing/selftests/kselftest/prefix.pl2
-rw-r--r--tools/testing/selftests/kselftest/runner.sh64
-rwxr-xr-xtools/testing/selftests/kselftest_deps.sh270
-rw-r--r--tools/testing/selftests/kselftest_harness.h665
-rw-r--r--tools/testing/selftests/kselftest_module.h22
-rw-r--r--tools/testing/selftests/kvm/.gitignore30
-rw-r--r--tools/testing/selftests/kvm/Makefile226
-rw-r--r--tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c167
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c484
-rw-r--r--tools/testing/selftests/kvm/aarch64/debug-exceptions.c607
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c1138
-rw-r--r--tools/testing/selftests/kvm/aarch64/hypercalls.c310
-rw-r--r--tools/testing/selftests/kvm/aarch64/page_fault_test.c1134
-rw-r--r--tools/testing/selftests/kvm/aarch64/psci_test.c198
-rw-r--r--tools/testing/selftests/kvm/aarch64/smccc_filter.c268
-rw-r--r--tools/testing/selftests/kvm/aarch64/vcpu_width_config.c121
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_init.c716
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_irq.c856
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c394
-rw-r--r--tools/testing/selftests/kvm/clear_dirty_log_test.c6
-rw-r--r--tools/testing/selftests/kvm/config2
-rw-r--r--tools/testing/selftests/kvm/demand_paging_test.c655
-rw-r--r--tools/testing/selftests/kvm/dirty_log_perf_test.c515
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c771
-rw-r--r--tools/testing/selftests/kvm/hardware_disable_test.c184
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/arch_timer.h142
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/delay.h25
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/gic.h47
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/gic_v3.h82
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h223
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/spinlock.h13
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/vgic.h36
-rw-r--r--tools/testing/selftests/kvm/include/guest_modes.h21
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h312
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h911
-rw-r--r--tools/testing/selftests/kvm/include/memstress.h75
-rw-r--r--tools/testing/selftests/kvm/include/numaif.h55
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h119
-rw-r--r--tools/testing/selftests/kvm/include/s390x/diag318_test_handler.h13
-rw-r--r--tools/testing/selftests/kvm/include/s390x/processor.h8
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h125
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h124
-rw-r--r--tools/testing/selftests/kvm/include/userfaultfd_util.h45
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/apic.h93
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/evmcs.h (renamed from tools/testing/selftests/kvm/include/evmcs.h)249
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/hyperv.h346
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/mce.h25
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h1024
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm.h35
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm_util.h39
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h126
-rw-r--r--tools/testing/selftests/kvm/kvm_binary_stats_test.c253
-rw-r--r--tools/testing/selftests/kvm/kvm_create_max_vcpus.c34
-rw-r--r--tools/testing/selftests/kvm/kvm_page_table_test.c479
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic.c161
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_private.h32
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_v3.c398
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/handlers.S126
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c414
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/spinlock.c27
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/ucall.c103
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/vgic.c170
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c24
-rw-r--r--tools/testing/selftests/kvm/lib/elf.c15
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c133
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c1784
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h111
-rw-r--r--tools/testing/selftests/kvm/lib/memstress.c322
-rw-r--r--tools/testing/selftests/kvm/lib/rbtree.c1
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c369
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/ucall.c68
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c80
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/processor.c82
-rw-r--r--tools/testing/selftests/kvm/lib/s390x/ucall.c38
-rw-r--r--tools/testing/selftests/kvm/lib/sparsebit.c3
-rw-r--r--tools/testing/selftests/kvm/lib/string_override.c39
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c290
-rw-r--r--tools/testing/selftests/kvm/lib/ucall_common.c115
-rw-r--r--tools/testing/selftests/kvm/lib/userfaultfd_util.c186
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/apic.c45
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/handlers.S81
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/hyperv.c46
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/memstress.c112
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c1395
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/svm.c43
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/ucall.c40
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c262
-rw-r--r--tools/testing/selftests/kvm/max_guest_memory_test.c294
-rw-r--r--tools/testing/selftests/kvm/memslot_modification_stress_test.c182
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c1132
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c268
-rw-r--r--tools/testing/selftests/kvm/s390x/memop.c1218
-rw-r--r--tools/testing/selftests/kvm/s390x/resets.c180
-rw-r--r--tools/testing/selftests/kvm/s390x/sync_regs_test.c149
-rw-r--r--tools/testing/selftests/kvm/s390x/tprot.c243
-rw-r--r--tools/testing/selftests/kvm/set_memory_region_test.c417
-rw-r--r--tools/testing/selftests/kvm/settings1
-rw-r--r--tools/testing/selftests/kvm/steal_time.c161
-rw-r--r--tools/testing/selftests/kvm/system_counter_offset_test.c127
-rw-r--r--tools/testing/selftests/kvm/x86_64/amx_test.c334
-rw-r--r--tools/testing/selftests/kvm/x86_64/cpuid_test.c187
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c56
-rw-r--r--tools/testing/selftests/kvm/x86_64/debug_regs.c214
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c165
-rw-r--r--tools/testing/selftests/kvm/x86_64/exit_on_emulation_failure_test.c42
-rw-r--r--tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c135
-rw-r--r--tools/testing/selftests/kvm/x86_64/flds_emulation.h52
-rw-r--r--tools/testing/selftests/kvm/x86_64/get_msr_index_features.c35
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_clock.c260
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_cpuid.c154
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c309
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_extended_hypercalls.c97
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c689
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_ipi.c311
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c201
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c680
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_clock_test.c192
-rw-r--r--tools/testing/selftests/kvm/x86_64/kvm_pv_test.c154
-rw-r--r--tools/testing/selftests/kvm/x86_64/max_vcpuid_cap_test.c44
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmio_warning_test.c21
-rw-r--r--tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c124
-rw-r--r--tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c292
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c271
-rwxr-xr-xtools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh59
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c64
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c834
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_boot_cpu_id.c131
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_memory_region_test.c141
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c98
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c421
-rw-r--r--tools/testing/selftests/kvm/x86_64/smaller_maxphyaddr_emulation_test.c111
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c132
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c107
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c121
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c62
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c213
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c25
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c97
-rw-r--r--tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c124
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c161
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c114
-rw-r--r--tools/testing/selftests/kvm/x86_64/ucna_injection_test.c302
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_io_test.c101
-rw-r--r--tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c816
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c124
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c31
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c35
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c145
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c103
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_msrs_test.c131
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c231
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_pmu_caps_test.c255
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c246
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c126
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c30
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c491
-rw-r--r--tools/testing/selftests/kvm/x86_64/xapic_state_test.c215
-rw-r--r--tools/testing/selftests/kvm/x86_64/xcr0_cpuid_test.c132
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c1119
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c142
-rw-r--r--tools/testing/selftests/kvm/x86_64/xss_msr_test.c56
-rw-r--r--tools/testing/selftests/landlock/.gitignore2
-rw-r--r--tools/testing/selftests/landlock/Makefile23
-rw-r--r--tools/testing/selftests/landlock/base_test.c329
-rw-r--r--tools/testing/selftests/landlock/common.h258
-rw-r--r--tools/testing/selftests/landlock/config7
-rw-r--r--tools/testing/selftests/landlock/fs_test.c4426
-rw-r--r--tools/testing/selftests/landlock/ptrace_test.c438
-rw-r--r--tools/testing/selftests/landlock/true.c5
-rw-r--r--tools/testing/selftests/lib.mk111
-rw-r--r--tools/testing/selftests/lib/Makefile2
-rw-r--r--tools/testing/selftests/lib/config2
-rwxr-xr-xtools/testing/selftests/lib/scanf.sh4
-rw-r--r--tools/testing/selftests/livepatch/Makefile3
-rw-r--r--tools/testing/selftests/livepatch/README16
-rw-r--r--tools/testing/selftests/livepatch/functions.sh92
-rwxr-xr-xtools/testing/selftests/livepatch/test-callbacks.sh84
-rwxr-xr-xtools/testing/selftests/livepatch/test-ftrace.sh9
-rwxr-xr-xtools/testing/selftests/livepatch/test-livepatch.sh12
-rwxr-xr-xtools/testing/selftests/livepatch/test-shadow-vars.sh85
-rwxr-xr-xtools/testing/selftests/livepatch/test-state.sh21
-rwxr-xr-xtools/testing/selftests/livepatch/test-sysfs.sh86
-rw-r--r--tools/testing/selftests/lkdtm/.gitignore1
-rw-r--r--tools/testing/selftests/lkdtm/Makefile1
-rw-r--r--tools/testing/selftests/lkdtm/config14
-rwxr-xr-xtools/testing/selftests/lkdtm/run.sh48
-rwxr-xr-xtools/testing/selftests/lkdtm/stack-entropy.sh51
-rw-r--r--tools/testing/selftests/lkdtm/tests.txt32
-rw-r--r--tools/testing/selftests/media_tests/Makefile2
-rw-r--r--tools/testing/selftests/membarrier/Makefile2
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_impl.h33
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_multi_thread.c2
-rw-r--r--tools/testing/selftests/membarrier/membarrier_test_single_thread.c6
-rw-r--r--tools/testing/selftests/memfd/Makefile21
-rw-r--r--tools/testing/selftests/memfd/fuse_test.c3
-rw-r--r--tools/testing/selftests/memfd/memfd_test.c348
-rw-r--r--tools/testing/selftests/memory-hotplug/config1
-rwxr-xr-xtools/testing/selftests/memory-hotplug/mem-on-off-test.sh35
-rw-r--r--tools/testing/selftests/mincore/.gitignore2
-rw-r--r--tools/testing/selftests/mincore/Makefile6
-rw-r--r--tools/testing/selftests/mincore/mincore_selftest.c368
-rw-r--r--tools/testing/selftests/mm/.gitignore41
-rw-r--r--tools/testing/selftests/mm/Makefile185
-rw-r--r--tools/testing/selftests/mm/charge_reserved_hugetlb.sh (renamed from tools/testing/selftests/vm/charge_reserved_hugetlb.sh)39
-rw-r--r--tools/testing/selftests/mm/check_config.sh31
-rw-r--r--tools/testing/selftests/mm/compaction_test.c (renamed from tools/testing/selftests/vm/compaction_test.c)11
-rw-r--r--tools/testing/selftests/mm/config8
-rw-r--r--tools/testing/selftests/mm/cow.c1742
-rw-r--r--tools/testing/selftests/mm/gup_test.c270
-rw-r--r--tools/testing/selftests/mm/hmm-tests.c2054
-rw-r--r--tools/testing/selftests/mm/hugepage-mmap.c (renamed from tools/testing/selftests/vm/hugepage-mmap.c)10
-rw-r--r--tools/testing/selftests/mm/hugepage-mremap.c185
-rw-r--r--tools/testing/selftests/mm/hugepage-shm.c (renamed from tools/testing/selftests/vm/hugepage-shm.c)0
-rw-r--r--tools/testing/selftests/mm/hugepage-vmemmap.c144
-rw-r--r--tools/testing/selftests/mm/hugetlb-madvise.c382
-rw-r--r--tools/testing/selftests/mm/hugetlb_reparenting_test.sh (renamed from tools/testing/selftests/vm/hugetlb_reparenting_test.sh)26
-rw-r--r--tools/testing/selftests/mm/khugepaged.c1562
-rw-r--r--tools/testing/selftests/mm/ksm_functional_tests.c398
-rw-r--r--tools/testing/selftests/mm/ksm_tests.c947
-rw-r--r--tools/testing/selftests/mm/madv_populate.c296
-rw-r--r--tools/testing/selftests/mm/map_fixed_noreplace.c (renamed from tools/testing/selftests/vm/map_fixed_noreplace.c)49
-rw-r--r--tools/testing/selftests/mm/map_hugetlb.c (renamed from tools/testing/selftests/vm/map_hugetlb.c)2
-rw-r--r--tools/testing/selftests/mm/map_populate.c (renamed from tools/testing/selftests/vm/map_populate.c)0
-rw-r--r--tools/testing/selftests/mm/mdwe_test.c196
-rw-r--r--tools/testing/selftests/mm/memfd_secret.c296
-rw-r--r--tools/testing/selftests/mm/migration.c193
-rw-r--r--tools/testing/selftests/mm/mkdirty.c379
-rw-r--r--tools/testing/selftests/mm/mlock-random-test.c (renamed from tools/testing/selftests/vm/mlock-random-test.c)2
-rw-r--r--tools/testing/selftests/mm/mlock2-tests.c (renamed from tools/testing/selftests/vm/mlock2-tests.c)0
-rw-r--r--tools/testing/selftests/mm/mlock2.h (renamed from tools/testing/selftests/vm/mlock2.h)0
-rw-r--r--tools/testing/selftests/mm/mrelease_test.c205
-rw-r--r--tools/testing/selftests/mm/mremap_dontunmap.c (renamed from tools/testing/selftests/vm/mremap_dontunmap.c)53
-rw-r--r--tools/testing/selftests/mm/mremap_test.c548
-rw-r--r--tools/testing/selftests/mm/on-fault-limit.c (renamed from tools/testing/selftests/vm/on-fault-limit.c)0
-rw-r--r--tools/testing/selftests/mm/pkey-helpers.h226
-rw-r--r--tools/testing/selftests/mm/pkey-powerpc.h133
-rw-r--r--tools/testing/selftests/mm/pkey-x86.h177
-rw-r--r--tools/testing/selftests/mm/protection_keys.c (renamed from tools/testing/selftests/x86/protection_keys.c)914
-rw-r--r--tools/testing/selftests/mm/run_vmtests.sh298
-rw-r--r--tools/testing/selftests/mm/settings1
-rw-r--r--tools/testing/selftests/mm/soft-dirty.c213
-rw-r--r--tools/testing/selftests/mm/split_huge_page_test.c313
-rw-r--r--tools/testing/selftests/mm/test_hmm.sh105
-rw-r--r--[-rwxr-xr-x]tools/testing/selftests/mm/test_vmalloc.sh (renamed from tools/testing/selftests/vm/test_vmalloc.sh)21
-rw-r--r--tools/testing/selftests/mm/thuge-gen.c (renamed from tools/testing/selftests/vm/thuge-gen.c)19
-rw-r--r--tools/testing/selftests/mm/transhuge-stress.c (renamed from tools/testing/selftests/vm/transhuge-stress.c)82
-rw-r--r--tools/testing/selftests/mm/uffd-common.c618
-rw-r--r--tools/testing/selftests/mm/uffd-common.h117
-rw-r--r--tools/testing/selftests/mm/uffd-stress.c481
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c1228
-rw-r--r--tools/testing/selftests/mm/va_high_addr_switch.c (renamed from tools/testing/selftests/vm/va_128TBswitch.c)51
-rw-r--r--tools/testing/selftests/mm/va_high_addr_switch.sh58
-rw-r--r--tools/testing/selftests/mm/virtual_address_range.c (renamed from tools/testing/selftests/vm/virtual_address_range.c)24
-rw-r--r--tools/testing/selftests/mm/vm_util.c303
-rw-r--r--tools/testing/selftests/mm/vm_util.h65
-rw-r--r--tools/testing/selftests/mm/write_hugetlb_memory.sh (renamed from tools/testing/selftests/vm/write_hugetlb_memory.sh)2
-rw-r--r--tools/testing/selftests/mm/write_to_hugetlbfs.c (renamed from tools/testing/selftests/vm/write_to_hugetlbfs.c)2
-rw-r--r--tools/testing/selftests/mount/.gitignore1
-rw-r--r--tools/testing/selftests/mount/Makefile4
-rw-r--r--tools/testing/selftests/mount/nosymfollow-test.c218
-rwxr-xr-xtools/testing/selftests/mount/run_nosymfollow.sh4
-rwxr-xr-xtools/testing/selftests/mount/run_unprivileged_remount.sh (renamed from tools/testing/selftests/mount/run_tests.sh)0
-rw-r--r--tools/testing/selftests/mount/unprivileged-remount-test.c4
-rw-r--r--tools/testing/selftests/mount_setattr/.gitignore1
-rw-r--r--tools/testing/selftests/mount_setattr/Makefile7
-rw-r--r--tools/testing/selftests/mount_setattr/config1
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c1500
-rw-r--r--tools/testing/selftests/move_mount_set_group/.gitignore1
-rw-r--r--tools/testing/selftests/move_mount_set_group/Makefile7
-rw-r--r--tools/testing/selftests/move_mount_set_group/config1
-rw-r--r--tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c375
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c29
-rw-r--r--tools/testing/selftests/nci/.gitignore1
-rw-r--r--tools/testing/selftests/nci/Makefile6
-rw-r--r--tools/testing/selftests/nci/config3
-rw-r--r--tools/testing/selftests/nci/nci_dev.c904
-rw-r--r--tools/testing/selftests/net/.gitignore53
-rw-r--r--tools/testing/selftests/net/Makefile121
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile3
-rw-r--r--tools/testing/selftests/net/af_unix/diag_uid.c178
-rw-r--r--tools/testing/selftests/net/af_unix/test_unix_oob.c438
-rw-r--r--tools/testing/selftests/net/af_unix/unix_connect.c148
-rwxr-xr-xtools/testing/selftests/net/altnames.sh2
-rwxr-xr-xtools/testing/selftests/net/amt.sh284
-rwxr-xr-xtools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh227
-rwxr-xr-xtools/testing/selftests/net/arp_ndisc_untracked_subnets.sh308
-rwxr-xr-xtools/testing/selftests/net/bareudp.sh546
-rwxr-xr-xtools/testing/selftests/net/big_tcp.sh180
-rw-r--r--tools/testing/selftests/net/bind_bhash.c144
-rwxr-xr-xtools/testing/selftests/net/bind_bhash.sh66
-rw-r--r--tools/testing/selftests/net/bind_timewait.c92
-rw-r--r--tools/testing/selftests/net/bind_wildcard.c114
-rwxr-xr-xtools/testing/selftests/net/cmsg_ipv6.sh156
-rw-r--r--tools/testing/selftests/net/cmsg_sender.c506
-rwxr-xr-xtools/testing/selftests/net/cmsg_so_mark.sh77
-rwxr-xr-xtools/testing/selftests/net/cmsg_time.sh83
-rw-r--r--tools/testing/selftests/net/config23
-rw-r--r--tools/testing/selftests/net/csum.c986
-rwxr-xr-xtools/testing/selftests/net/devlink_port_split.py309
-rwxr-xr-xtools/testing/selftests/net/drop_monitor_tests.sh215
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh799
-rwxr-xr-xtools/testing/selftests/net/fib_nexthop_multiprefix.sh2
-rwxr-xr-xtools/testing/selftests/net/fib_nexthop_nongw.sh119
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh1367
-rwxr-xr-xtools/testing/selftests/net/fib_rule_tests.sh254
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh525
-rw-r--r--tools/testing/selftests/net/forwarding/Makefile45
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_igmp.sh494
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_locked_port.sh329
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb.sh1215
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb_host.sh103
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb_max.sh1336
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mdb_port_down.sh118
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_mld.sh564
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_aware.sh5
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_mcast.sh546
-rwxr-xr-xtools/testing/selftests/net/forwarding/bridge_vlan_unaware.sh5
-rw-r--r--tools/testing/selftests/net/forwarding/config5
-rwxr-xr-xtools/testing/selftests/net/forwarding/custom_multipath_hash.sh372
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh168
-rwxr-xr-xtools/testing/selftests/net/forwarding/dual_vxlan_bridge.sh366
-rwxr-xr-xtools/testing/selftests/net/forwarding/ethtool.sh19
-rwxr-xr-xtools/testing/selftests/net/forwarding/ethtool_extended_state.sh115
-rw-r--r--tools/testing/selftests/net/forwarding/ethtool_lib.sh51
-rwxr-xr-xtools/testing/selftests/net/forwarding/ethtool_mm.sh288
-rw-r--r--tools/testing/selftests/net/forwarding/fib_offload_lib.sh14
-rw-r--r--tools/testing/selftests/net/forwarding/forwarding.config.sample13
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_custom_multipath_hash.sh464
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath_nh.sh356
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath_nh_res.sh361
-rwxr-xr-xtools/testing/selftests/net/forwarding/hw_stats_l3.sh337
-rwxr-xr-xtools/testing/selftests/net/forwarding/hw_stats_l3_gre.sh109
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6_forward_instats_vrf.sh172
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_custom_multipath_hash.sh466
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat_key.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_flat_keys.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier_key.sh65
-rwxr-xr-xtools/testing/selftests/net/forwarding/ip6gre_hier_keys.sh65
-rw-r--r--tools/testing/selftests/net/forwarding/ip6gre_lib.sh438
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/net/forwarding/lib.sh801
-rwxr-xr-xtools/testing/selftests/net/forwarding/local_termination.sh299
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh3
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh7
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_changes.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh15
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh24
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_vlan.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/no_forwarding.sh261
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_dsfield.sh75
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_ip.sh201
-rwxr-xr-xtools/testing/selftests/net/forwarding/pedit_l4port.sh200
-rwxr-xr-xtools/testing/selftests/net/forwarding/q_in_vni.sh347
-rwxr-xr-xtools/testing/selftests/net/forwarding/q_in_vni_ipv6.sh347
-rwxr-xr-xtools/testing/selftests/net/forwarding/router.sh18
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh.sh74
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_mpath_nh_res.sh400
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multicast.sh92
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_multipath.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_nh.sh160
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_vid_1.sh27
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_red.sh493
-rw-r--r--tools/testing/selftests/net/forwarding/sch_tbf_etsprio.sh32
-rwxr-xr-xtools/testing/selftests/net/forwarding/sch_tbf_root.sh4
-rwxr-xr-xtools/testing/selftests/net/forwarding/skbedit_priority.sh2
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_actions.sh124
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_chains.sh4
-rw-r--r--tools/testing/selftests/net/forwarding/tc_common.sh10
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_flower.sh344
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_mpls_l2vpn.sh192
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_police.sh441
-rwxr-xr-xtools/testing/selftests/net/forwarding/tc_tunnel_key.sh161
-rw-r--r--tools/testing/selftests/net/forwarding/tsn_lib.sh249
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_asymmetric.sh12
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_asymmetric_ipv6.sh504
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh13
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d_ipv6.sh804
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d_port_8472_ipv6.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q.sh20
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_ipv6.sh837
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1q_port_8472_ipv6.sh11
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_symmetric.sh10
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_symmetric_ipv6.sh563
-rwxr-xr-xtools/testing/selftests/net/gre_gso.sh239
-rw-r--r--tools/testing/selftests/net/gro.c1106
-rwxr-xr-xtools/testing/selftests/net/gro.sh99
-rw-r--r--tools/testing/selftests/net/hsr/Makefile7
-rw-r--r--tools/testing/selftests/net/hsr/config4
-rwxr-xr-xtools/testing/selftests/net/hsr/hsr_ping.sh256
-rwxr-xr-xtools/testing/selftests/net/icmp.sh74
-rwxr-xr-xtools/testing/selftests/net/icmp_redirect.sh17
-rw-r--r--tools/testing/selftests/net/io_uring_zerocopy_tx.c584
-rwxr-xr-xtools/testing/selftests/net/io_uring_zerocopy_tx.sh127
-rwxr-xr-xtools/testing/selftests/net/ioam6.sh778
-rw-r--r--tools/testing/selftests/net/ioam6_parser.c673
-rw-r--r--tools/testing/selftests/net/ip_defrag.c8
-rwxr-xr-xtools/testing/selftests/net/ip_defrag.sh2
-rw-r--r--tools/testing/selftests/net/ip_local_port_range.c447
-rwxr-xr-xtools/testing/selftests/net/ip_local_port_range.sh5
-rw-r--r--tools/testing/selftests/net/ipsec.c2341
-rw-r--r--tools/testing/selftests/net/ipv6_flowlabel.c75
-rwxr-xr-xtools/testing/selftests/net/ipv6_flowlabel.sh16
-rwxr-xr-xtools/testing/selftests/net/l2_tos_ttl_inherit.sh446
-rw-r--r--tools/testing/selftests/net/mptcp/.gitignore2
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile8
-rw-r--r--tools/testing/selftests/net/mptcp/config25
-rwxr-xr-xtools/testing/selftests/net/mptcp/diag.sh283
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c849
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh478
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_inq.c602
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh3290
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_sockopt.c746
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_sockopt.sh347
-rwxr-xr-xtools/testing/selftests/net/mptcp/pm_netlink.sh69
-rw-r--r--tools/testing/selftests/net/mptcp/pm_nl_ctl.c917
-rw-r--r--tools/testing/selftests/net/mptcp/settings2
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh304
-rwxr-xr-xtools/testing/selftests/net/mptcp/userspace_pm.sh947
-rw-r--r--tools/testing/selftests/net/msg_zerocopy.c5
-rw-r--r--tools/testing/selftests/net/nat6to4.c285
-rwxr-xr-xtools/testing/selftests/net/ndisc_unsolicited_na_test.sh254
-rw-r--r--tools/testing/selftests/net/nettest.c760
-rw-r--r--tools/testing/selftests/net/openvswitch/Makefile13
-rwxr-xr-xtools/testing/selftests/net/openvswitch/openvswitch.sh305
-rw-r--r--tools/testing/selftests/net/openvswitch/ovs-dpctl.py1597
-rwxr-xr-xtools/testing/selftests/net/pmtu.sh974
-rw-r--r--tools/testing/selftests/net/psock_fanout.c84
-rw-r--r--tools/testing/selftests/net/psock_snd.c2
-rwxr-xr-xtools/testing/selftests/net/psock_snd.sh17
-rw-r--r--tools/testing/selftests/net/reuseaddr_ports_exhausted.c32
-rw-r--r--tools/testing/selftests/net/reuseport_bpf.c6
-rw-r--r--tools/testing/selftests/net/reuseport_bpf_numa.c6
-rwxr-xr-xtools/testing/selftests/net/rps_default_mask.sh75
-rwxr-xr-xtools/testing/selftests/net/rtnetlink.sh267
-rwxr-xr-xtools/testing/selftests/net/run_afpackettests5
-rw-r--r--tools/testing/selftests/net/rxtimestamp.c128
-rwxr-xr-xtools/testing/selftests/net/rxtimestamp.sh4
-rw-r--r--tools/testing/selftests/net/sctp_hello.c137
-rwxr-xr-xtools/testing/selftests/net/sctp_vrf.sh178
-rw-r--r--tools/testing/selftests/net/settings1
-rwxr-xr-xtools/testing/selftests/net/setup_loopback.sh118
-rw-r--r--tools/testing/selftests/net/setup_veth.sh41
-rw-r--r--tools/testing/selftests/net/sk_bind_sendto_listen.c80
-rw-r--r--tools/testing/selftests/net/sk_connect_zero_addr.c62
-rw-r--r--tools/testing/selftests/net/so_incoming_cpu.c242
-rw-r--r--tools/testing/selftests/net/so_netns_cookie.c61
-rw-r--r--tools/testing/selftests/net/so_txtime.c282
-rwxr-xr-xtools/testing/selftests/net/so_txtime.sh97
-rw-r--r--tools/testing/selftests/net/socket.c3
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh576
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh497
-rwxr-xr-xtools/testing/selftests/net/srv6_end_dt6_l3vpn_test.sh505
-rwxr-xr-xtools/testing/selftests/net/srv6_end_flavors_test.sh869
-rwxr-xr-xtools/testing/selftests/net/srv6_end_next_csid_l3vpn_test.sh1145
-rwxr-xr-xtools/testing/selftests/net/srv6_hencap_red_l3vpn_test.sh879
-rwxr-xr-xtools/testing/selftests/net/srv6_hl2encap_red_l2vpn_test.sh821
-rw-r--r--tools/testing/selftests/net/stress_reuseport_listen.c105
-rwxr-xr-xtools/testing/selftests/net/stress_reuseport_listen.sh25
-rw-r--r--tools/testing/selftests/net/tap.c434
-rw-r--r--tools/testing/selftests/net/tcp_fastopen_backup_key.c6
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c158
-rwxr-xr-xtools/testing/selftests/net/test_bridge_neigh_suppress.sh862
-rw-r--r--tools/testing/selftests/net/test_ingress_egress_chaining.sh79
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_mdb.sh2318
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_under_vrf.sh12
-rwxr-xr-xtools/testing/selftests/net/test_vxlan_vnifiltering.sh571
-rw-r--r--tools/testing/selftests/net/timestamping.c112
-rw-r--r--tools/testing/selftests/net/tls.c925
-rw-r--r--tools/testing/selftests/net/toeplitz.c589
-rwxr-xr-xtools/testing/selftests/net/toeplitz.sh199
-rwxr-xr-xtools/testing/selftests/net/toeplitz_client.sh28
-rw-r--r--tools/testing/selftests/net/tun.c162
-rw-r--r--tools/testing/selftests/net/txtimestamp.c22
-rwxr-xr-xtools/testing/selftests/net/txtimestamp.sh2
-rwxr-xr-xtools/testing/selftests/net/udpgro.sh46
-rwxr-xr-xtools/testing/selftests/net/udpgro_bench.sh10
-rwxr-xr-xtools/testing/selftests/net/udpgro_frglist.sh103
-rwxr-xr-xtools/testing/selftests/net/udpgro_fwd.sh255
-rw-r--r--tools/testing/selftests/net/udpgso.c12
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh26
-rw-r--r--tools/testing/selftests/net/udpgso_bench_rx.c24
-rw-r--r--tools/testing/selftests/net/udpgso_bench_tx.c44
-rwxr-xr-xtools/testing/selftests/net/unicast_extensions.sh232
-rwxr-xr-xtools/testing/selftests/net/veth.sh360
-rwxr-xr-xtools/testing/selftests/net/vrf-xfrm-tests.sh436
-rwxr-xr-xtools/testing/selftests/net/vrf_route_leaking.sh626
-rwxr-xr-xtools/testing/selftests/net/vrf_strict_mode_test.sh429
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy.sh45
-rw-r--r--tools/testing/selftests/netfilter/.gitignore3
-rw-r--r--tools/testing/selftests/netfilter/Makefile16
-rw-r--r--tools/testing/selftests/netfilter/connect_close.c136
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_icmp_related.sh36
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_tcp_unreplied.sh167
-rwxr-xr-xtools/testing/selftests/netfilter/conntrack_vrf.sh241
-rwxr-xr-xtools/testing/selftests/netfilter/ipip-conntrack-mtu.sh207
-rw-r--r--tools/testing/selftests/netfilter/nf-queue.c61
-rwxr-xr-xtools/testing/selftests/netfilter/nf_nat_edemux.sh99
-rwxr-xr-xtools/testing/selftests/netfilter/nft_concat_range.sh161
-rwxr-xr-xtools/testing/selftests/netfilter/nft_conntrack_helper.sh197
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh273
-rwxr-xr-xtools/testing/selftests/netfilter/nft_flowtable.sh581
-rwxr-xr-xtools/testing/selftests/netfilter/nft_meta.sh142
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh355
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat_zones.sh309
-rwxr-xr-xtools/testing/selftests/netfilter/nft_queue.sh143
-rwxr-xr-xtools/testing/selftests/netfilter/nft_synproxy.sh117
-rwxr-xr-xtools/testing/selftests/netfilter/nft_trans_stress.sh91
-rwxr-xr-xtools/testing/selftests/netfilter/nft_zones_many.sh163
-rwxr-xr-xtools/testing/selftests/netfilter/rpath.sh169
-rw-r--r--tools/testing/selftests/netfilter/settings1
-rw-r--r--tools/testing/selftests/nolibc/.gitignore4
-rw-r--r--tools/testing/selftests/nolibc/Makefile167
-rw-r--r--tools/testing/selftests/nolibc/nolibc-test.c909
-rw-r--r--tools/testing/selftests/nsfs/pidns.c2
-rwxr-xr-xtools/testing/selftests/ntb/ntb_test.sh2
-rw-r--r--tools/testing/selftests/openat2/Makefile2
-rw-r--r--tools/testing/selftests/openat2/helpers.h12
-rw-r--r--tools/testing/selftests/openat2/openat2_test.c29
-rw-r--r--tools/testing/selftests/perf_events/.gitignore3
-rw-r--r--tools/testing/selftests/perf_events/Makefile6
-rw-r--r--tools/testing/selftests/perf_events/config1
-rw-r--r--tools/testing/selftests/perf_events/remove_on_exec.c260
-rw-r--r--tools/testing/selftests/perf_events/settings1
-rw-r--r--tools/testing/selftests/perf_events/sigtrap_threads.c240
-rw-r--r--tools/testing/selftests/pid_namespace/Makefile8
-rw-r--r--tools/testing/selftests/pid_namespace/regression_enomem.c1
-rw-r--r--tools/testing/selftests/pidfd/.gitignore1
-rw-r--r--tools/testing/selftests/pidfd/Makefile5
-rw-r--r--tools/testing/selftests/pidfd/config7
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h26
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c22
-rw-r--r--tools/testing/selftests/pidfd/pidfd_getfd_test.c11
-rw-r--r--tools/testing/selftests/pidfd/pidfd_open_test.c1
-rw-r--r--tools/testing/selftests/pidfd/pidfd_poll_test.c1
-rw-r--r--tools/testing/selftests/pidfd/pidfd_setns_test.c559
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c67
-rw-r--r--tools/testing/selftests/pidfd/pidfd_wait.c320
-rw-r--r--tools/testing/selftests/powerpc/Makefile13
-rw-r--r--tools/testing/selftests/powerpc/alignment/alignment_handler.c166
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/context_switch.c27
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/gettimeofday.c6
-rw-r--r--tools/testing/selftests/powerpc/benchmarks/null_syscall.c3
-rw-r--r--tools/testing/selftests/powerpc/copyloops/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/copyloops/asm/ppc_asm.h2
l---------tools/testing/selftests/powerpc/copyloops/copy_mc_64.S1
l---------tools/testing/selftests/powerpc/copyloops/mem_64.S1
l---------tools/testing/selftests/powerpc/copyloops/memcpy_mcsafe_64.S1
-rw-r--r--tools/testing/selftests/powerpc/copyloops/memcpy_stubs.S8
-rw-r--r--tools/testing/selftests/powerpc/copyloops/memmove_validate.c58
-rw-r--r--tools/testing/selftests/powerpc/dscr/Makefile5
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr.h55
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_default_test.c205
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_explicit_test.c171
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c6
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_sysfs_test.c41
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_sysfs_thread_test.c2
-rw-r--r--tools/testing/selftests/powerpc/dscr/dscr_user_test.c6
-rw-r--r--tools/testing/selftests/powerpc/eeh/Makefile2
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-basic.sh40
-rw-r--r--[-rwxr-xr-x]tools/testing/selftests/powerpc/eeh/eeh-functions.sh179
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-aware.sh45
-rwxr-xr-xtools/testing/selftests/powerpc/eeh/eeh-vf-unaware.sh35
-rw-r--r--tools/testing/selftests/powerpc/eeh/settings1
-rw-r--r--tools/testing/selftests/powerpc/include/basic_asm.h63
-rw-r--r--tools/testing/selftests/powerpc/include/instructions.h77
-rw-r--r--tools/testing/selftests/powerpc/include/pkeys.h136
-rw-r--r--tools/testing/selftests/powerpc/include/reg.h83
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h76
-rw-r--r--tools/testing/selftests/powerpc/lib/reg.S107
-rw-r--r--tools/testing/selftests/powerpc/math/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/math/Makefile12
-rw-r--r--tools/testing/selftests/powerpc/math/fpu_denormal.c38
-rw-r--r--tools/testing/selftests/powerpc/math/mma.S36
-rw-r--r--tools/testing/selftests/powerpc/math/mma.c48
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_preempt.c3
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_signal.c4
-rw-r--r--tools/testing/selftests/powerpc/math/vmx_syscall.c7
-rw-r--r--tools/testing/selftests/powerpc/math/vsx_preempt.c2
-rw-r--r--tools/testing/selftests/powerpc/mce/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/mce/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/mce/inject-ra-err.c65
l---------tools/testing/selftests/powerpc/mce/vas-api.h1
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore5
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile20
-rw-r--r--tools/testing/selftests/powerpc/mm/bad_accesses.c31
-rw-r--r--tools/testing/selftests/powerpc/mm/exec_prot.c231
-rw-r--r--tools/testing/selftests/powerpc/mm/large_vm_gpr_corruption.c158
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_exec_prot.c294
-rw-r--r--tools/testing/selftests/powerpc/mm/pkey_siginfo.c333
-rw-r--r--tools/testing/selftests/powerpc/mm/prot_sao.c10
-rw-r--r--tools/testing/selftests/powerpc/mm/stack_expansion_ldst.c202
-rw-r--r--tools/testing/selftests/powerpc/mm/stack_expansion_signal.c118
-rwxr-xr-xtools/testing/selftests/powerpc/mm/stress_code_patching.sh49
-rw-r--r--tools/testing/selftests/powerpc/mm/tlbie_test.c1
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/99-nx-gzip.rules1
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/Makefile8
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/README45
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gunz_test.c1028
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gzfht_test.c398
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/gzip_vas.c316
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/copy-paste.h56
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/crb.h155
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/nx.h38
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/nx_dbg.h95
-rw-r--r--tools/testing/selftests/powerpc/nx-gzip/include/nxu.h650
l---------tools/testing/selftests/powerpc/nx-gzip/include/vas-api.h1
-rwxr-xr-xtools/testing/selftests/powerpc/nx-gzip/nx-gzip-test.sh46
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/Makefile7
-rw-r--r--tools/testing/selftests/powerpc/papr_attributes/attr_test.c113
-rw-r--r--tools/testing/selftests/powerpc/pmu/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile39
-rw-r--r--tools/testing/selftests/powerpc/pmu/branch_loops.S28
-rw-r--r--tools/testing/selftests/powerpc/pmu/count_stcx_fail.c164
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/Makefile6
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cpu_event_pinned_vs_ebb_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cpu_event_vs_ebb_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/ebb_vs_cpu_event_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/fixed_instruction_loop.S43
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c1
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c7
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c8
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/no_handler_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/regs_access_pmccext_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/trace.h4
-rw-r--r--tools/testing/selftests/powerpc/pmu/event.c19
-rw-r--r--tools/testing/selftests/powerpc/pmu/event.h6
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/.gitignore20
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/blacklisted_events_test.c132
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p10.c109
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/event_alternatives_tests_p9.c116
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/generic_events_valid_test.c130
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_cache_test.c60
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_l2l3_sel_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_mmcra_sample_test.c54
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc56_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_pmc_count_test.c70
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_radix_scope_qual_test.c56
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_repeat_test.c56
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_cmp_test.c96
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_ctl_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_thresh_sel_test.c63
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_constraint_unit_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/group_pmc56_exclude_constraints_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/hw_cache_event_type_test.c88
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/invalid_event_code_test.c67
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_sample_elig_mode_test.c77
-rw-r--r--tools/testing/selftests/powerpc/pmu/event_code_tests/reserved_bits_mmcra_thresh_ctl_test.c44
-rw-r--r--tools/testing/selftests/powerpc/pmu/l3_bank_test.c3
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.c53
-rw-r--r--tools/testing/selftests/powerpc/pmu/lib.h2
-rw-r--r--tools/testing/selftests/powerpc/pmu/loop.S35
-rw-r--r--tools/testing/selftests/powerpc/pmu/per_event_excludes.c5
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/.gitignore21
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/Makefile15
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_filter_map_test.c114
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/bhrb_no_crash_wo_pmu_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/intr_regs_no_crash_wo_pmu_test.c57
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.c537
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/misc.h234
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_cc56run_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_exceptionbits_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc1ce_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_fc56_pmc56_test.c58
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmccext_test.c59
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr0_pmcjce_test.c58
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_comb_test.c66
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr1_sel_unit_cache_test.c77
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_fcs_fch_test.c85
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr2_l2l3_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcr3_src_test.c67
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_any_test.c65
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_cond_test.c69
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_no_branch_test.c64
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_disable_test.c66
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_bhrb_ind_call_test.c69
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_cmp_test.c74
-rw-r--r--tools/testing/selftests/powerpc/pmu/sampling_tests/mmcra_thresh_marked_sample_test.c80
l---------tools/testing/selftests/powerpc/primitives/asm/extable.h1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile40
-rw-r--r--tools/testing/selftests/powerpc/ptrace/core-pkey.c32
-rw-r--r--tools/testing/selftests/powerpc/ptrace/perf-hwbreak.c669
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.S52
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.c125
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-gpr.h14
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c133
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-perf-hwbreak.c659
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-pkey.c57
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tar.c3
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-gpr.c21
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-gpr.c23
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-tar.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spd-vsx.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-spr.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-tar.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-tm-vsx.c1
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-vsx.c2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace.h84
-rwxr-xr-xtools/testing/selftests/powerpc/scripts/hmi.sh2
-rw-r--r--tools/testing/selftests/powerpc/security/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/security/Makefile9
-rw-r--r--tools/testing/selftests/powerpc/security/entry_flush.c139
-rw-r--r--tools/testing/selftests/powerpc/security/flush_utils.c84
-rw-r--r--tools/testing/selftests/powerpc/security/flush_utils.h24
-rwxr-xr-xtools/testing/selftests/powerpc/security/mitigation-patching.sh78
-rw-r--r--tools/testing/selftests/powerpc/security/rfi_flush.c68
-rw-r--r--tools/testing/selftests/powerpc/security/spectre_v2.c29
-rw-r--r--tools/testing/selftests/powerpc/security/uaccess_flush.c158
-rw-r--r--tools/testing/selftests/powerpc/signal/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/signal/sig_sc_double_restart.c174
-rw-r--r--tools/testing/selftests/powerpc/signal/signal_tm.c1
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_kernel.c132
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_unaligned.c43
-rw-r--r--tools/testing/selftests/powerpc/stringloops/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/stringloops/asm/ppc_asm.h1
-rw-r--r--tools/testing/selftests/powerpc/stringloops/memcmp.c46
-rw-r--r--tools/testing/selftests/powerpc/switch_endian/switch_endian_test.S23
-rw-r--r--tools/testing/selftests/powerpc/syscalls/.gitignore1
-rw-r--r--tools/testing/selftests/powerpc/syscalls/Makefile6
-rw-r--r--tools/testing/selftests/powerpc/syscalls/rtas_filter.c224
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-exec.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-fork.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-poison.c13
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-fpu.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-gpr.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vmx.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-context-chk-vsx.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-pagefault.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-sigreturn-nt.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-signal-stack.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-sigreturn.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall-asm.S37
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-syscall.c38
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tar.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tmspr.c11
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-trap.c15
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-unavailable.c10
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmx-unavail.c2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-vmxcopy.c1
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h39
-rw-r--r--tools/testing/selftests/powerpc/utils.c523
-rw-r--r--tools/testing/selftests/prctl/.gitignore1
-rw-r--r--tools/testing/selftests/prctl/Makefile2
-rw-r--r--tools/testing/selftests/prctl/config1
-rw-r--r--tools/testing/selftests/prctl/disable-tsc-ctxt-sw-stress-test.c2
-rw-r--r--tools/testing/selftests/prctl/disable-tsc-on-off-stress-test.c2
-rw-r--r--tools/testing/selftests/prctl/set-anon-vma-name-test.c104
-rw-r--r--tools/testing/selftests/proc/.gitignore5
-rw-r--r--tools/testing/selftests/proc/Makefile6
-rw-r--r--tools/testing/selftests/proc/proc-empty-vm.c388
-rw-r--r--tools/testing/selftests/proc/proc-fsconfig-hidepid.c50
-rw-r--r--tools/testing/selftests/proc/proc-loadavg-001.c1
-rw-r--r--tools/testing/selftests/proc/proc-multiple-procfs.c48
-rw-r--r--tools/testing/selftests/proc/proc-pid-vm.c64
-rw-r--r--tools/testing/selftests/proc/proc-self-syscall.c1
-rw-r--r--tools/testing/selftests/proc/proc-subset-pid.c121
-rw-r--r--tools/testing/selftests/proc/proc-tid0.c81
-rw-r--r--tools/testing/selftests/proc/proc-uptime-001.c25
-rw-r--r--tools/testing/selftests/proc/proc-uptime-002.c31
-rw-r--r--tools/testing/selftests/proc/proc-uptime.h28
-rw-r--r--tools/testing/selftests/proc/read.c4
-rwxr-xr-xtools/testing/selftests/pstore/pstore_tests2
-rw-r--r--tools/testing/selftests/ptp/Makefile9
-rw-r--r--tools/testing/selftests/ptp/testptp.c95
-rw-r--r--tools/testing/selftests/ptrace/.gitignore2
-rw-r--r--tools/testing/selftests/ptrace/Makefile4
-rw-r--r--tools/testing/selftests/ptrace/get_set_sud.c72
-rw-r--r--tools/testing/selftests/ptrace/peeksiginfo.c14
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config2csv.sh66
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/config_override.sh3
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configcheck.sh8
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/configinit.sh7
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/console-badness.sh18
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/cpus2use.sh1
-rw-r--r--tools/testing/selftests/rcutorture/bin/functions.sh66
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/jitter.sh26
-rw-r--r--tools/testing/selftests/rcutorture/bin/jitterstart.sh37
-rw-r--r--tools/testing/selftests/rcutorture/bin/jitterstop.sh23
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kcsan-collapse.sh22
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-again.sh221
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-assign-cpus.sh105
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-build.sh19
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-check-branches.sh102
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-end-run-stats.sh39
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-find-errors.sh21
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-get-cpus-script.sh88
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-lock.sh2
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcu.sh20
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuscale-ftrace.sh (renamed from tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh)6
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-rcuscale.sh (renamed from tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh)16
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-refscale.sh71
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck-scf.sh38
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-recheck.sh42
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-remote-noreap.sh30
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-remote.sh274
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run-batch.sh90
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run-qemu.sh184
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh238
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm-transform.sh139
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/kvm.sh348
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/mkinitrd.sh4
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-build.sh8
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/parse-console.sh52
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/srcu_lockdep.sh78
-rwxr-xr-xtools/testing/selftests/rcutorture/bin/torture.sh616
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/CFLIST2
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK086
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK08.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK096
-rw-r--r--tools/testing/selftests/rcutorture/configs/lock/LOCK09.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST17
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/BUSTED-BOOST.boot8
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/CFLIST7
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/RUDE0112
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/RUDE01.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-N2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-N.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-T (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-t)4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-T.boot (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-t.boot)0
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-U (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-u)4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/SRCU-U.boot (renamed from tools/testing/selftests/rcutorture/configs/rcu/SRCU-u.boot)0
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS013
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS01.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS024
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS02.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TASKS034
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TINY011
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TINY021
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TRACE0114
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TRACE01.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TRACE0213
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TRACE02.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE012
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE01.boot4
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE021
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE02.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE047
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE052
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE061
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE072
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE081
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE092
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE104
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/TREE10.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcu/ver_functions.sh16
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/CFLIST (renamed from tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST)0
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/CFcommon6
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TINY (renamed from tools/testing/selftests/rcutorture/configs/rcuperf/TINY)2
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TRACE0114
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TRACE01.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TREE (renamed from tools/testing/selftests/rcutorture/configs/rcuperf/TREE)5
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/TREE54 (renamed from tools/testing/selftests/rcutorture/configs/rcuperf/TREE54)3
-rw-r--r--tools/testing/selftests/rcutorture/configs/rcuscale/ver_functions.sh (renamed from tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh)4
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/CFLIST2
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/CFcommon6
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/NOPREEMPT19
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/PREEMPT17
-rw-r--r--tools/testing/selftests/rcutorture/configs/refscale/ver_functions.sh16
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/CFLIST2
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/CFcommon2
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT11
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/NOPREEMPT.boot1
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/PREEMPT10
-rw-r--r--tools/testing/selftests/rcutorture/configs/scf/ver_functions.sh29
-rw-r--r--tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt5
-rw-r--r--tools/testing/selftests/rcutorture/doc/initrd.txt36
-rw-r--r--tools/testing/selftests/rcutorture/doc/rcu-test-image.txt41
-rw-r--r--tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/locks.h2
-rw-r--r--tools/testing/selftests/resctrl/.gitignore2
-rw-r--r--tools/testing/selftests/resctrl/Makefile19
-rw-r--r--tools/testing/selftests/resctrl/README43
-rw-r--r--tools/testing/selftests/resctrl/cache.c67
-rw-r--r--tools/testing/selftests/resctrl/cat_test.c90
-rw-r--r--tools/testing/selftests/resctrl/cmt_test.c (renamed from tools/testing/selftests/resctrl/cqm_test.c)91
-rw-r--r--tools/testing/selftests/resctrl/config2
-rw-r--r--tools/testing/selftests/resctrl/fill_buf.c29
-rw-r--r--tools/testing/selftests/resctrl/mba_test.c73
-rw-r--r--tools/testing/selftests/resctrl/mbm_test.c64
-rw-r--r--tools/testing/selftests/resctrl/resctrl.h40
-rw-r--r--tools/testing/selftests/resctrl/resctrl_tests.c202
-rw-r--r--tools/testing/selftests/resctrl/resctrl_val.c180
-rw-r--r--tools/testing/selftests/resctrl/resctrlfs.c141
-rw-r--r--tools/testing/selftests/resctrl/settings3
-rw-r--r--tools/testing/selftests/riscv/Makefile58
-rw-r--r--tools/testing/selftests/riscv/hwprobe/Makefile10
-rw-r--r--tools/testing/selftests/riscv/hwprobe/hwprobe.c90
-rw-r--r--tools/testing/selftests/riscv/hwprobe/sys_hwprobe.S12
-rw-r--r--tools/testing/selftests/rlimits/.gitignore2
-rw-r--r--tools/testing/selftests/rlimits/Makefile6
-rw-r--r--tools/testing/selftests/rlimits/config1
-rw-r--r--tools/testing/selftests/rlimits/rlimits-per-userns.c161
-rw-r--r--tools/testing/selftests/rseq/.gitignore4
-rw-r--r--tools/testing/selftests/rseq/Makefile24
-rw-r--r--tools/testing/selftests/rseq/basic_percpu_ops_test.c49
-rw-r--r--tools/testing/selftests/rseq/basic_test.c4
-rw-r--r--tools/testing/selftests/rseq/compiler.h36
-rw-r--r--tools/testing/selftests/rseq/param_test.c385
-rw-r--r--tools/testing/selftests/rseq/rseq-abi.h173
-rw-r--r--tools/testing/selftests/rseq/rseq-arm-bits.h505
-rw-r--r--tools/testing/selftests/rseq/rseq-arm.h699
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64-bits.h392
-rw-r--r--tools/testing/selftests/rseq/rseq-arm64.h487
-rw-r--r--tools/testing/selftests/rseq/rseq-bits-reset.h11
-rw-r--r--tools/testing/selftests/rseq/rseq-bits-template.h41
-rw-r--r--tools/testing/selftests/rseq/rseq-generic-thread-pointer.h25
-rw-r--r--tools/testing/selftests/rseq/rseq-mips-bits.h462
-rw-r--r--tools/testing/selftests/rseq/rseq-mips.h683
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc-bits.h454
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc-thread-pointer.h30
-rw-r--r--tools/testing/selftests/rseq/rseq-ppc.h633
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv-bits.h410
-rw-r--r--tools/testing/selftests/rseq/rseq-riscv.h198
-rw-r--r--tools/testing/selftests/rseq/rseq-s390-bits.h474
-rw-r--r--tools/testing/selftests/rseq/rseq-s390.h466
-rw-r--r--tools/testing/selftests/rseq/rseq-skip.h65
-rw-r--r--tools/testing/selftests/rseq/rseq-thread-pointer.h19
-rw-r--r--tools/testing/selftests/rseq/rseq-x86-bits.h993
-rw-r--r--tools/testing/selftests/rseq/rseq-x86-thread-pointer.h40
-rw-r--r--tools/testing/selftests/rseq/rseq-x86.h1072
-rw-r--r--tools/testing/selftests/rseq/rseq.c221
-rw-r--r--tools/testing/selftests/rseq/rseq.h239
-rwxr-xr-xtools/testing/selftests/rseq/run_param_test.sh7
-rw-r--r--tools/testing/selftests/rtc/rtctest.c99
-rw-r--r--tools/testing/selftests/rtc/settings2
-rwxr-xr-xtools/testing/selftests/run_kselftest.sh93
-rw-r--r--tools/testing/selftests/safesetid/Makefile2
-rw-r--r--tools/testing/selftests/safesetid/safesetid-test.c295
-rw-r--r--tools/testing/selftests/sched/.gitignore1
-rw-r--r--tools/testing/selftests/sched/Makefile14
-rw-r--r--tools/testing/selftests/sched/config1
-rw-r--r--tools/testing/selftests/sched/cs_prctl_test.c359
-rw-r--r--tools/testing/selftests/seccomp/Makefile3
-rw-r--r--tools/testing/selftests/seccomp/config2
-rw-r--r--tools/testing/selftests/seccomp/seccomp_benchmark.c195
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c1680
-rw-r--r--tools/testing/selftests/seccomp/settings1
-rw-r--r--tools/testing/selftests/sgx/.gitignore2
-rw-r--r--tools/testing/selftests/sgx/Makefile57
-rw-r--r--tools/testing/selftests/sgx/call.S44
-rw-r--r--tools/testing/selftests/sgx/defines.h79
-rw-r--r--tools/testing/selftests/sgx/load.c367
-rw-r--r--tools/testing/selftests/sgx/main.c1993
-rw-r--r--tools/testing/selftests/sgx/main.h46
-rw-r--r--tools/testing/selftests/sgx/sign_key.S12
-rw-r--r--tools/testing/selftests/sgx/sign_key.pem39
-rw-r--r--tools/testing/selftests/sgx/sigstruct.c390
-rw-r--r--tools/testing/selftests/sgx/test_encl.c139
-rw-r--r--tools/testing/selftests/sgx/test_encl.lds41
-rw-r--r--tools/testing/selftests/sgx/test_encl_bootstrap.S102
-rw-r--r--tools/testing/selftests/sigaltstack/current_stack_pointer.h23
-rw-r--r--tools/testing/selftests/sigaltstack/sas.c31
-rw-r--r--tools/testing/selftests/sparc64/drivers/adi-test.c4
-rw-r--r--tools/testing/selftests/splice/.gitignore1
-rw-r--r--tools/testing/selftests/splice/Makefile4
-rw-r--r--tools/testing/selftests/splice/config1
-rw-r--r--tools/testing/selftests/splice/settings1
-rwxr-xr-xtools/testing/selftests/splice/short_splice_read.sh133
-rw-r--r--tools/testing/selftests/splice/splice_read.c57
-rw-r--r--tools/testing/selftests/sync/Makefile2
-rw-r--r--tools/testing/selftests/sync/config2
-rw-r--r--tools/testing/selftests/sync/sync_test.c2
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/.gitignore3
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/Makefile9
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/config1
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_benchmark.c202
-rw-r--r--tools/testing/selftests/syscall_user_dispatch/sud_test.c312
-rw-r--r--tools/testing/selftests/sysctl/config2
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh92
-rw-r--r--tools/testing/selftests/tc-testing/.gitignore1
-rw-r--r--tools/testing/selftests/tc-testing/Makefile (renamed from tools/testing/selftests/tc-testing/bpf/Makefile)13
-rw-r--r--tools/testing/selftests/tc-testing/action.c (renamed from tools/testing/selftests/tc-testing/bpf/action.c)0
-rw-r--r--tools/testing/selftests/tc-testing/config43
-rw-r--r--tools/testing/selftests/tc-testing/creating-testcases/AddingTestCases.txt2
-rw-r--r--tools/testing/selftests/tc-testing/plugin-lib/scapyPlugin.py42
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json6
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json50
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/csum.json4
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ct.json45
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ctinfo.json316
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gact.json77
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/gate.json315
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/ife.json50
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/nat.json50
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json25
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/police.json72
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/sample.json50
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/simple.json83
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json24
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json95
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json28
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/xt.json219
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/basic.json47
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/bpf.json171
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/cgroup.json1236
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/flow.json623
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/matchall.json72
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/route.json181
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/tests.json44
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/filters/u32.json46
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/infra/actions.json416
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/cake.json487
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/cbs.json234
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/choke.json188
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/codel.json211
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/drr.json71
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/etf.json117
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq.json417
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_codel.json326
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json21
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/gred.json164
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/hfsc.json167
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/hhf.json210
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/htb.json285
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/ingress.json20
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/mq.json159
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/mqprio.json114
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/multiq.json114
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/netem.json372
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/pfifo_fast.json119
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/plug.json188
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/prio.json20
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/qfq.json217
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/red.json23
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfb.json279
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/sfq.json232
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/skbprio.json95
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/taprio.json135
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/tbf.json211
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/qdiscs/teql.json97
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.py151
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc.sh6
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_batch.py8
-rw-r--r--tools/testing/selftests/tc-testing/tdc_config.py5
-rwxr-xr-xtools/testing/selftests/tc-testing/tdc_multibatch.py2
-rw-r--r--tools/testing/selftests/tdx/Makefile7
-rw-r--r--tools/testing/selftests/tdx/config1
-rw-r--r--tools/testing/selftests/tdx/tdx_guest_test.c163
-rw-r--r--tools/testing/selftests/timens/.gitignore2
-rw-r--r--tools/testing/selftests/timens/Makefile2
-rw-r--r--tools/testing/selftests/timens/clock_nanosleep.c2
-rw-r--r--tools/testing/selftests/timens/futex.c110
-rw-r--r--tools/testing/selftests/timens/gettime_perf.c8
-rw-r--r--tools/testing/selftests/timens/procfs.c60
-rw-r--r--tools/testing/selftests/timens/timens.c4
-rw-r--r--tools/testing/selftests/timens/timens.h13
-rw-r--r--tools/testing/selftests/timens/timer.c5
-rw-r--r--tools/testing/selftests/timens/timerfd.c5
-rw-r--r--tools/testing/selftests/timens/vfork_exec.c139
-rw-r--r--tools/testing/selftests/timers/Makefile1
-rw-r--r--tools/testing/selftests/timers/adjtick.c2
-rw-r--r--tools/testing/selftests/timers/alarmtimer-suspend.c4
-rw-r--r--tools/testing/selftests/timers/change_skew.c2
-rw-r--r--tools/testing/selftests/timers/clocksource-switch.c75
-rw-r--r--tools/testing/selftests/timers/inconsistency-check.c34
-rw-r--r--tools/testing/selftests/timers/leap-a-day.c2
-rw-r--r--tools/testing/selftests/timers/leapcrash.c4
-rw-r--r--tools/testing/selftests/timers/nanosleep.c18
-rw-r--r--tools/testing/selftests/timers/posix_timers.c77
-rw-r--r--tools/testing/selftests/timers/raw_skew.c2
-rw-r--r--tools/testing/selftests/timers/rtcpie.c10
-rw-r--r--tools/testing/selftests/timers/settings1
-rw-r--r--tools/testing/selftests/timers/skew_consistency.c2
-rw-r--r--tools/testing/selftests/timers/threadtest.c2
-rw-r--r--tools/testing/selftests/timers/valid-adjtimex.c2
-rw-r--r--tools/testing/selftests/tpm2/Makefile2
-rw-r--r--tools/testing/selftests/tpm2/settings1
-rwxr-xr-xtools/testing/selftests/tpm2/test_async.sh10
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh16
-rwxr-xr-xtools/testing/selftests/tpm2/test_space.sh10
-rw-r--r--tools/testing/selftests/tpm2/tpm2.py93
-rw-r--r--tools/testing/selftests/tpm2/tpm2_tests.py80
-rw-r--r--tools/testing/selftests/uevent/uevent_filtering.c1
-rw-r--r--tools/testing/selftests/user_events/Makefile17
-rw-r--r--tools/testing/selftests/user_events/abi_test.c229
-rw-r--r--tools/testing/selftests/user_events/dyn_test.c130
-rw-r--r--tools/testing/selftests/user_events/ftrace_test.c509
-rw-r--r--tools/testing/selftests/user_events/perf_test.c166
-rw-r--r--tools/testing/selftests/user_events/settings1
-rw-r--r--tools/testing/selftests/vDSO/.gitignore5
-rw-r--r--tools/testing/selftests/vDSO/Makefile21
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.c24
-rw-r--r--tools/testing/selftests/vDSO/parse_vdso.h31
-rw-r--r--tools/testing/selftests/vDSO/vdso_config.h90
-rw-r--r--tools/testing/selftests/vDSO/vdso_standalone_test_x86.c4
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_abi.c233
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_clock_getres.c124
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_correctness.c (renamed from tools/testing/selftests/x86/test_vdso.c)118
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_getcpu.c58
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_gettimeofday.c (renamed from tools/testing/selftests/vDSO/vdso_test.c)13
-rw-r--r--tools/testing/selftests/vm/.gitignore18
-rw-r--r--tools/testing/selftests/vm/Makefile38
-rw-r--r--tools/testing/selftests/vm/config3
-rw-r--r--tools/testing/selftests/vm/gup_benchmark.c137
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests310
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c1479
-rw-r--r--tools/testing/selftests/watchdog/watchdog-test.c106
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh133
-rw-r--r--tools/testing/selftests/wireguard/qemu/.gitignore1
-rw-r--r--tools/testing/selftests/wireguard/qemu/Makefile254
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/aarch64.config5
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config5
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/arm.config6
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/armeb.config6
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/i686.config7
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/m68k.config10
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips64.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mips64el.config2
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/mipsel.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/powerpc.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/powerpc64.config13
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/riscv32.config13
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/riscv64.config12
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/s390x.config6
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/um.config3
-rw-r--r--tools/testing/selftests/wireguard/qemu/arch/x86_64.config6
-rw-r--r--tools/testing/selftests/wireguard/qemu/debug.config10
-rw-r--r--tools/testing/selftests/wireguard/qemu/init.c46
-rw-r--r--tools/testing/selftests/wireguard/qemu/kernel.config7
-rw-r--r--tools/testing/selftests/x86/.gitignore1
-rw-r--r--tools/testing/selftests/x86/Makefile23
-rw-r--r--tools/testing/selftests/x86/amx.c955
-rwxr-xr-xtools/testing/selftests/x86/check_cc.sh2
-rw-r--r--tools/testing/selftests/x86/corrupt_xstate_header.c102
-rw-r--r--tools/testing/selftests/x86/fsgsbase.c108
-rw-r--r--tools/testing/selftests/x86/fsgsbase_restore.c245
-rw-r--r--tools/testing/selftests/x86/helpers.h25
-rw-r--r--tools/testing/selftests/x86/iopl.c78
-rw-r--r--tools/testing/selftests/x86/lam.c1241
-rw-r--r--tools/testing/selftests/x86/ldt_gdt.c2
-rw-r--r--tools/testing/selftests/x86/mov_ss_trap.c4
-rw-r--r--tools/testing/selftests/x86/pkey-helpers.h219
-rw-r--r--tools/testing/selftests/x86/raw_syscall_helper_32.S2
-rw-r--r--tools/testing/selftests/x86/sigaltstack.c128
-rw-r--r--tools/testing/selftests/x86/sigreturn.c7
-rw-r--r--tools/testing/selftests/x86/single_step_syscall.c21
-rw-r--r--tools/testing/selftests/x86/syscall_arg_fault.c54
-rw-r--r--tools/testing/selftests/x86/syscall_nt.c47
-rw-r--r--tools/testing/selftests/x86/syscall_numbering.c491
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c46
-rw-r--r--tools/testing/selftests/x86/thunks.S2
-rw-r--r--tools/testing/selftests/x86/thunks_32.S2
-rw-r--r--tools/testing/selftests/x86/unwind_vdso.c23
-rwxr-xr-xtools/testing/selftests/zram/zram.sh15
-rwxr-xr-xtools/testing/selftests/zram/zram01.sh33
-rwxr-xr-xtools/testing/selftests/zram/zram02.sh1
-rwxr-xr-xtools/testing/selftests/zram/zram_lib.sh134
-rw-r--r--tools/testing/vsock/.gitignore1
-rw-r--r--tools/testing/vsock/Makefile3
-rw-r--r--tools/testing/vsock/README34
-rw-r--r--tools/testing/vsock/control.c28
-rw-r--r--tools/testing/vsock/control.h2
-rw-r--r--tools/testing/vsock/util.c45
-rw-r--r--tools/testing/vsock/util.h4
-rw-r--r--tools/testing/vsock/vsock_diag_test.c2
-rw-r--r--tools/testing/vsock/vsock_perf.c427
-rw-r--r--tools/testing/vsock/vsock_test.c820
-rw-r--r--tools/thermal/lib/Build3
-rw-r--r--tools/thermal/lib/Makefile158
-rw-r--r--tools/thermal/lib/libthermal_tools.pc.template12
-rw-r--r--tools/thermal/lib/log.c77
-rw-r--r--tools/thermal/lib/log.h31
-rw-r--r--tools/thermal/lib/mainloop.c120
-rw-r--r--tools/thermal/lib/mainloop.h15
-rw-r--r--tools/thermal/lib/thermal-tools.h10
-rw-r--r--tools/thermal/lib/uptimeofday.c40
-rw-r--r--tools/thermal/lib/uptimeofday.h12
-rw-r--r--tools/thermal/thermal-engine/Build1
-rw-r--r--tools/thermal/thermal-engine/Makefile28
-rw-r--r--tools/thermal/thermal-engine/thermal-engine.c341
-rw-r--r--tools/thermal/thermometer/Build1
-rw-r--r--tools/thermal/thermometer/Makefile26
-rw-r--r--tools/thermal/thermometer/thermometer.892
-rw-r--r--tools/thermal/thermometer/thermometer.c572
-rw-r--r--tools/thermal/thermometer/thermometer.conf5
-rw-r--r--tools/thermal/tmon/Makefile12
-rw-r--r--tools/thermal/tmon/pid.c5
-rw-r--r--tools/thermal/tmon/sysfs.c24
-rw-r--r--tools/thermal/tmon/tmon.h3
-rw-r--r--tools/tracing/Makefile29
-rw-r--r--tools/tracing/latency/.gitignore2
-rw-r--r--tools/tracing/latency/Makefile24
-rw-r--r--tools/tracing/latency/latency-collector.c2108
-rw-r--r--tools/tracing/rtla/.gitignore1
-rw-r--r--tools/tracing/rtla/Makefile152
-rw-r--r--tools/tracing/rtla/README.txt33
-rw-r--r--tools/tracing/rtla/src/osnoise.c1077
-rw-r--r--tools/tracing/rtla/src/osnoise.h106
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c883
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c711
-rw-r--r--tools/tracing/rtla/src/rtla.c89
-rw-r--r--tools/tracing/rtla/src/timerlat.c71
-rw-r--r--tools/tracing/rtla/src/timerlat.h4
-rw-r--r--tools/tracing/rtla/src/timerlat_aa.c990
-rw-r--r--tools/tracing/rtla/src/timerlat_aa.h12
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c914
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c790
-rw-r--r--tools/tracing/rtla/src/trace.c542
-rw-r--r--tools/tracing/rtla/src/trace.h50
-rw-r--r--tools/tracing/rtla/src/utils.c531
-rw-r--r--tools/tracing/rtla/src/utils.h61
-rw-r--r--tools/usb/Build2
-rw-r--r--tools/usb/Makefile53
-rw-r--r--tools/usb/ffs-aio-example/simple/device_app/aio_simple.c44
-rw-r--r--tools/usb/hcd-tests.sh2
-rw-r--r--tools/usb/testusb.c34
-rw-r--r--tools/usb/usbip/doc/usbip.846
-rw-r--r--tools/usb/usbip/doc/usbipd.828
-rw-r--r--tools/usb/usbip/libsrc/list.h10
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c2
-rw-r--r--tools/usb/usbip/src/usbip_list.c3
-rwxr-xr-xtools/usb/usbip/vudc/vudc_server_example.sh2
-rw-r--r--tools/verification/dot2/Makefile26
-rw-r--r--tools/verification/dot2/automata.py174
-rw-r--r--tools/verification/dot2/dot2c26
-rw-r--r--tools/verification/dot2/dot2c.py254
-rw-r--r--tools/verification/dot2/dot2k47
-rw-r--r--tools/verification/dot2/dot2k.py177
-rw-r--r--tools/verification/dot2/dot2k_templates/main_global.c91
-rw-r--r--tools/verification/dot2/dot2k_templates/main_per_cpu.c91
-rw-r--r--tools/verification/dot2/dot2k_templates/main_per_task.c91
-rw-r--r--tools/verification/models/wip.dot16
-rw-r--r--tools/verification/models/wwnr.dot16
-rw-r--r--tools/verification/rv/Makefile141
-rw-r--r--tools/verification/rv/README.txt38
-rw-r--r--tools/verification/rv/include/in_kernel.h3
-rw-r--r--tools/verification/rv/include/rv.h12
-rw-r--r--tools/verification/rv/include/trace.h16
-rw-r--r--tools/verification/rv/include/utils.h8
-rw-r--r--tools/verification/rv/src/in_kernel.c698
-rw-r--r--tools/verification/rv/src/rv.c188
-rw-r--r--tools/verification/rv/src/trace.c133
-rw-r--r--tools/verification/rv/src/utils.c47
-rw-r--r--tools/virtio/.gitignore1
-rw-r--r--tools/virtio/Makefile7
-rw-r--r--tools/virtio/asm/barrier.h11
-rw-r--r--tools/virtio/generated/autoconf.h0
-rw-r--r--tools/virtio/linux/bug.h10
-rw-r--r--tools/virtio/linux/build_bug.h7
-rw-r--r--tools/virtio/linux/compiler.h3
-rw-r--r--tools/virtio/linux/cpumask.h7
-rw-r--r--tools/virtio/linux/dma-mapping.h4
-rw-r--r--tools/virtio/linux/gfp.h7
-rw-r--r--tools/virtio/linux/kernel.h30
-rw-r--r--tools/virtio/linux/kmsan.h12
-rw-r--r--tools/virtio/linux/mm_types.h3
-rw-r--r--tools/virtio/linux/scatterlist.h1
-rw-r--r--tools/virtio/linux/spinlock.h56
-rw-r--r--tools/virtio/linux/topology.h7
-rw-r--r--tools/virtio/linux/uaccess.h11
-rw-r--r--tools/virtio/linux/virtio.h10
-rw-r--r--tools/virtio/linux/virtio_config.h11
-rw-r--r--tools/virtio/linux/vringh.h1
-rw-r--r--tools/virtio/ringtest/main.h37
-rw-r--r--tools/virtio/virtio-trace/README2
-rw-r--r--tools/virtio/virtio-trace/trace-agent-ctl.c2
-rw-r--r--tools/virtio/virtio_test.c154
-rw-r--r--tools/virtio/vringh_test.c4
-rw-r--r--tools/vm/page_owner_sort.c152
5218 files changed, 916891 insertions, 200788 deletions
diff --git a/tools/Makefile b/tools/Makefile
index bd778812e915..37e9f6804832 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -12,6 +12,7 @@ help:
@echo ' acpi - ACPI tools'
@echo ' bpf - misc BPF tools'
@echo ' cgroup - cgroup tools'
+ @echo ' counter - counter tools'
@echo ' cpupower - a tool for all things x86 CPU power'
@echo ' debugging - tools for debugging'
@echo ' firewire - the userspace part of nosy, an IEEE-1394 traffic sniffer'
@@ -23,7 +24,7 @@ help:
@echo ' intel-speed-select - Intel Speed Select tool'
@echo ' kvm_stat - top-like utility for displaying kvm statistics'
@echo ' leds - LEDs tools'
- @echo ' liblockdep - user-space wrapper for kernel locking-validator'
+ @echo ' nolibc - nolibc headers testing and installation'
@echo ' objtool - an ELF object analysis tool'
@echo ' pci - PCI tools'
@echo ' perf - Linux performance measurement and analysis tool'
@@ -31,10 +32,14 @@ help:
@echo ' bootconfig - boot config tool'
@echo ' spi - spi tools'
@echo ' tmon - thermal monitoring and tuning tool'
+ @echo ' thermometer - temperature capture tool'
+ @echo ' thermal-engine - thermal monitoring tool'
+ @echo ' thermal - thermal library'
+ @echo ' tracing - misc tracing tools'
@echo ' turbostat - Intel CPU idle stats and freq reporting tool'
@echo ' usb - USB testing tools'
@echo ' virtio - vhost test module'
- @echo ' vm - misc vm tools'
+ @echo ' mm - misc mm tools'
@echo ' wmi - WMI interface examples'
@echo ' x86_energy_perf_policy - Intel energy policy tool'
@echo ''
@@ -64,15 +69,21 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup firewire hv guest bootconfig spi usb virtio vm bpf iio gpio objtool leds wmi pci firmware debugging: FORCE
+cgroup counter firewire hv guest bootconfig spi usb virtio mm bpf iio gpio objtool leds wmi pci firmware debugging tracing: FORCE
$(call descend,$@)
-liblockdep: FORCE
- $(call descend,lib/lockdep)
+bpf/%: FORCE
+ $(call descend,$@)
libapi: FORCE
$(call descend,lib/api)
+nolibc: FORCE
+ $(call descend,include/nolibc)
+
+nolibc_%: FORCE
+ $(call descend,include/nolibc,$(patsubst nolibc_%,%,$@))
+
# The perf build does not follow the descend function setup,
# invoking it via it's own make rule.
PERF_O = $(if $(O),$(O)/tools/perf,)
@@ -84,23 +95,32 @@ perf: FORCE
selftests: FORCE
$(call descend,testing/$@)
+thermal: FORCE
+ $(call descend,lib/$@)
+
turbostat x86_energy_perf_policy intel-speed-select: FORCE
$(call descend,power/x86/$@)
tmon: FORCE
$(call descend,thermal/$@)
+thermometer: FORCE
+ $(call descend,thermal/$@)
+
+thermal-engine: FORCE thermal
+ $(call descend,thermal/$@)
+
freefall: FORCE
$(call descend,laptop/$@)
kvm_stat: FORCE
$(call descend,kvm/$@)
-all: acpi cgroup cpupower gpio hv firewire liblockdep \
+all: acpi cgroup counter cpupower gpio hv firewire \
perf selftests bootconfig spi turbostat usb \
- virtio vm bpf x86_energy_perf_policy \
+ virtio mm bpf x86_energy_perf_policy \
tmon freefall iio objtool kvm_stat wmi \
- pci debugging
+ pci debugging tracing thermal thermometer thermal-engine
acpi_install:
$(call descend,power/$(@:_install=),install)
@@ -108,33 +128,40 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install debugging_install:
+cgroup_install counter_install firewire_install gpio_install hv_install iio_install perf_install bootconfig_install spi_install usb_install virtio_install mm_install bpf_install objtool_install wmi_install pci_install debugging_install tracing_install:
$(call descend,$(@:_install=),install)
-liblockdep_install:
- $(call descend,lib/lockdep,install)
-
selftests_install:
$(call descend,testing/$(@:_install=),install)
+thermal_install:
+ $(call descend,lib/$(@:_install=),install)
+
turbostat_install x86_energy_perf_policy_install intel-speed-select_install:
$(call descend,power/x86/$(@:_install=),install)
tmon_install:
$(call descend,thermal/$(@:_install=),install)
+thermometer_install:
+ $(call descend,thermal/$(@:_install=),install)
+
+thermal-engine_install:
+ $(call descend,thermal/$(@:_install=),install)
+
freefall_install:
$(call descend,laptop/$(@:_install=),install)
kvm_stat_install:
$(call descend,kvm/$(@:_install=),install)
-install: acpi_install cgroup_install cpupower_install gpio_install \
- hv_install firewire_install iio_install liblockdep_install \
+install: acpi_install cgroup_install counter_install cpupower_install gpio_install \
+ hv_install firewire_install iio_install \
perf_install selftests_install turbostat_install usb_install \
- virtio_install vm_install bpf_install x86_energy_perf_policy_install \
+ virtio_install mm_install bpf_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install \
- wmi_install pci_install debugging_install intel-speed-select_install
+ wmi_install pci_install debugging_install intel-speed-select_install \
+ tracing_install thermometer_install thermal-engine_install
acpi_clean:
$(call descend,power/acpi,clean)
@@ -142,12 +169,9 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean:
+cgroup_clean counter_clean hv_clean firewire_clean bootconfig_clean spi_clean usb_clean virtio_clean mm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean firmware_clean debugging_clean tracing_clean:
$(call descend,$(@:_clean=),clean)
-liblockdep_clean:
- $(call descend,lib/lockdep,clean)
-
libapi_clean:
$(call descend,lib/api,clean)
@@ -164,9 +188,18 @@ perf_clean:
selftests_clean:
$(call descend,testing/$(@:_clean=),clean)
+thermal_clean:
+ $(call descend,lib/thermal,clean)
+
turbostat_clean x86_energy_perf_policy_clean intel-speed-select_clean:
$(call descend,power/x86/$(@:_clean=),clean)
+thermometer_clean:
+ $(call descend,thermal/thermometer,clean)
+
+thermal-engine_clean:
+ $(call descend,thermal/thermal-engine,clean)
+
tmon_clean:
$(call descend,thermal/tmon,clean)
@@ -176,11 +209,11 @@ freefall_clean:
build_clean:
$(call descend,build,clean)
-clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
+clean: acpi_clean cgroup_clean counter_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean bootconfig_clean spi_clean usb_clean virtio_clean \
- vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
- freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
+ mm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
+ freefall_clean build_clean libbpf_clean libsubcmd_clean \
gpio_clean objtool_clean leds_clean wmi_clean pci_clean firmware_clean debugging_clean \
- intel-speed-select_clean
+ intel-speed-select_clean tracing_clean thermal_clean thermometer_clean thermal-engine_clean
.PHONY: FORCE
diff --git a/tools/accounting/.gitignore b/tools/accounting/.gitignore
index c45fb4ed4309..522a690aaf3d 100644
--- a/tools/accounting/.gitignore
+++ b/tools/accounting/.gitignore
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
getdelays
+procacct
diff --git a/tools/accounting/Makefile b/tools/accounting/Makefile
index 03687f19cbb1..11def1ad046c 100644
--- a/tools/accounting/Makefile
+++ b/tools/accounting/Makefile
@@ -2,7 +2,7 @@
CC := $(CROSS_COMPILE)gcc
CFLAGS := -I../../usr/include
-PROGS := getdelays
+PROGS := getdelays procacct
all: $(PROGS)
diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
index 5ef1c15e88ad..1334214546d7 100644
--- a/tools/accounting/getdelays.c
+++ b/tools/accounting/getdelays.c
@@ -45,7 +45,6 @@
exit(code); \
} while (0)
-int done;
int rcvbufsz;
char name[100];
int dbg;
@@ -199,13 +198,19 @@ static void print_delayacct(struct taskstats *t)
printf("\n\nCPU %15s%15s%15s%15s%15s\n"
" %15llu%15llu%15llu%15llu%15.3fms\n"
"IO %15s%15s%15s\n"
- " %15llu%15llu%15llums\n"
+ " %15llu%15llu%15.3fms\n"
"SWAP %15s%15s%15s\n"
- " %15llu%15llu%15llums\n"
+ " %15llu%15llu%15.3fms\n"
"RECLAIM %12s%15s%15s\n"
- " %15llu%15llu%15llums\n"
+ " %15llu%15llu%15.3fms\n"
"THRASHING%12s%15s%15s\n"
- " %15llu%15llu%15llums\n",
+ " %15llu%15llu%15.3fms\n"
+ "COMPACT %12s%15s%15s\n"
+ " %15llu%15llu%15.3fms\n"
+ "WPCOPY %12s%15s%15s\n"
+ " %15llu%15llu%15.3fms\n"
+ "IRQ %15s%15s%15s\n"
+ " %15llu%15llu%15.3fms\n",
"count", "real total", "virtual total",
"delay total", "delay average",
(unsigned long long)t->cpu_count,
@@ -216,19 +221,31 @@ static void print_delayacct(struct taskstats *t)
"count", "delay total", "delay average",
(unsigned long long)t->blkio_count,
(unsigned long long)t->blkio_delay_total,
- average_ms(t->blkio_delay_total, t->blkio_count),
+ average_ms((double)t->blkio_delay_total, t->blkio_count),
"count", "delay total", "delay average",
(unsigned long long)t->swapin_count,
(unsigned long long)t->swapin_delay_total,
- average_ms(t->swapin_delay_total, t->swapin_count),
+ average_ms((double)t->swapin_delay_total, t->swapin_count),
"count", "delay total", "delay average",
(unsigned long long)t->freepages_count,
(unsigned long long)t->freepages_delay_total,
- average_ms(t->freepages_delay_total, t->freepages_count),
+ average_ms((double)t->freepages_delay_total, t->freepages_count),
"count", "delay total", "delay average",
(unsigned long long)t->thrashing_count,
(unsigned long long)t->thrashing_delay_total,
- average_ms(t->thrashing_delay_total, t->thrashing_count));
+ average_ms((double)t->thrashing_delay_total, t->thrashing_count),
+ "count", "delay total", "delay average",
+ (unsigned long long)t->compact_count,
+ (unsigned long long)t->compact_delay_total,
+ average_ms((double)t->compact_delay_total, t->compact_count),
+ "count", "delay total", "delay average",
+ (unsigned long long)t->wpcopy_count,
+ (unsigned long long)t->wpcopy_delay_total,
+ average_ms((double)t->wpcopy_delay_total, t->wpcopy_count),
+ "count", "delay total", "delay average",
+ (unsigned long long)t->irq_count,
+ (unsigned long long)t->irq_delay_total,
+ average_ms((double)t->irq_delay_total, t->irq_count));
}
static void task_context_switch_counts(struct taskstats *t)
@@ -273,7 +290,6 @@ int main(int argc, char *argv[])
pid_t rtid = 0;
int fd = 0;
- int count = 0;
int write_file = 0;
int maskset = 0;
char *logfile = NULL;
@@ -483,7 +499,6 @@ int main(int argc, char *argv[])
len2 = 0;
/* For nested attributes, na follows */
na = (struct nlattr *) NLA_DATA(na);
- done = 0;
while (len2 < aggr_len) {
switch (na->nla_type) {
case TASKSTATS_TYPE_PID:
@@ -497,7 +512,6 @@ int main(int argc, char *argv[])
printf("TGID\t%d\n", rtid);
break;
case TASKSTATS_TYPE_STATS:
- count++;
if (print_delays)
print_delayacct((struct taskstats *) NLA_DATA(na));
if (print_io_accounting)
diff --git a/tools/accounting/procacct.c b/tools/accounting/procacct.c
new file mode 100644
index 000000000000..90c4a37f53d9
--- /dev/null
+++ b/tools/accounting/procacct.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0
+/* procacct.c
+ *
+ * Demonstrator of fetching resource data on task exit, as a way
+ * to accumulate accurate program resource usage statistics, without
+ * prior identification of the programs. For that, the fields for
+ * device and inode of the program executable binary file are also
+ * extracted in addition to the command string.
+ *
+ * The TGID together with the PID and the AGROUP flag allow
+ * identification of threads in a process and single-threaded processes.
+ * The ac_tgetime field gives proper whole-process walltime.
+ *
+ * Written (changed) by Thomas Orgis, University of Hamburg in 2022
+ *
+ * This is a cheap derivation (inheriting the style) of getdelays.c:
+ *
+ * Utility to get per-pid and per-tgid delay accounting statistics
+ * Also illustrates usage of the taskstats interface
+ *
+ * Copyright (C) Shailabh Nagar, IBM Corp. 2005
+ * Copyright (C) Balbir Singh, IBM Corp. 2006
+ * Copyright (c) Jay Lan, SGI. 2006
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <poll.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <signal.h>
+
+#include <linux/genetlink.h>
+#include <linux/acct.h>
+#include <linux/taskstats.h>
+#include <linux/kdev_t.h>
+
+/*
+ * Generic macros for dealing with netlink sockets. Might be duplicated
+ * elsewhere. It is recommended that commercial grade applications use
+ * libnl or libnetlink and use the interfaces provided by the library
+ */
+#define GENLMSG_DATA(glh) ((void *)(NLMSG_DATA(glh) + GENL_HDRLEN))
+#define GENLMSG_PAYLOAD(glh) (NLMSG_PAYLOAD(glh, 0) - GENL_HDRLEN)
+#define NLA_DATA(na) ((void *)((char *)(na) + NLA_HDRLEN))
+#define NLA_PAYLOAD(len) (len - NLA_HDRLEN)
+
+#define err(code, fmt, arg...) \
+ do { \
+ fprintf(stderr, fmt, ##arg); \
+ exit(code); \
+ } while (0)
+
+int rcvbufsz;
+char name[100];
+int dbg;
+int print_delays;
+int print_io_accounting;
+int print_task_context_switch_counts;
+
+#define PRINTF(fmt, arg...) { \
+ if (dbg) { \
+ printf(fmt, ##arg); \
+ } \
+ }
+
+/* Maximum size of response requested or message sent */
+#define MAX_MSG_SIZE 1024
+/* Maximum number of cpus expected to be specified in a cpumask */
+#define MAX_CPUS 32
+
+struct msgtemplate {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[MAX_MSG_SIZE];
+};
+
+char cpumask[100+6*MAX_CPUS];
+
+static void usage(void)
+{
+ fprintf(stderr, "procacct [-v] [-w logfile] [-r bufsize] [-m cpumask]\n");
+ fprintf(stderr, " -v: debug on\n");
+}
+
+/*
+ * Create a raw netlink socket and bind
+ */
+static int create_nl_socket(int protocol)
+{
+ int fd;
+ struct sockaddr_nl local;
+
+ fd = socket(AF_NETLINK, SOCK_RAW, protocol);
+ if (fd < 0)
+ return -1;
+
+ if (rcvbufsz)
+ if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF,
+ &rcvbufsz, sizeof(rcvbufsz)) < 0) {
+ fprintf(stderr, "Unable to set socket rcv buf size to %d\n",
+ rcvbufsz);
+ goto error;
+ }
+
+ memset(&local, 0, sizeof(local));
+ local.nl_family = AF_NETLINK;
+
+ if (bind(fd, (struct sockaddr *) &local, sizeof(local)) < 0)
+ goto error;
+
+ return fd;
+error:
+ close(fd);
+ return -1;
+}
+
+
+static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
+ __u8 genl_cmd, __u16 nla_type,
+ void *nla_data, int nla_len)
+{
+ struct nlattr *na;
+ struct sockaddr_nl nladdr;
+ int r, buflen;
+ char *buf;
+
+ struct msgtemplate msg;
+
+ msg.n.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ msg.n.nlmsg_type = nlmsg_type;
+ msg.n.nlmsg_flags = NLM_F_REQUEST;
+ msg.n.nlmsg_seq = 0;
+ msg.n.nlmsg_pid = nlmsg_pid;
+ msg.g.cmd = genl_cmd;
+ msg.g.version = 0x1;
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ na->nla_type = nla_type;
+ na->nla_len = nla_len + 1 + NLA_HDRLEN;
+ memcpy(NLA_DATA(na), nla_data, nla_len);
+ msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+
+ buf = (char *) &msg;
+ buflen = msg.n.nlmsg_len;
+ memset(&nladdr, 0, sizeof(nladdr));
+ nladdr.nl_family = AF_NETLINK;
+ while ((r = sendto(sd, buf, buflen, 0, (struct sockaddr *) &nladdr,
+ sizeof(nladdr))) < buflen) {
+ if (r > 0) {
+ buf += r;
+ buflen -= r;
+ } else if (errno != EAGAIN)
+ return -1;
+ }
+ return 0;
+}
+
+
+/*
+ * Probe the controller in genetlink to find the family id
+ * for the TASKSTATS family
+ */
+static int get_family_id(int sd)
+{
+ struct {
+ struct nlmsghdr n;
+ struct genlmsghdr g;
+ char buf[256];
+ } ans;
+
+ int id = 0, rc;
+ struct nlattr *na;
+ int rep_len;
+
+ strcpy(name, TASKSTATS_GENL_NAME);
+ rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
+ CTRL_ATTR_FAMILY_NAME, (void *)name,
+ strlen(TASKSTATS_GENL_NAME)+1);
+ if (rc < 0)
+ return 0; /* sendto() failure? */
+
+ rep_len = recv(sd, &ans, sizeof(ans), 0);
+ if (ans.n.nlmsg_type == NLMSG_ERROR ||
+ (rep_len < 0) || !NLMSG_OK((&ans.n), rep_len))
+ return 0;
+
+ na = (struct nlattr *) GENLMSG_DATA(&ans);
+ na = (struct nlattr *) ((char *) na + NLA_ALIGN(na->nla_len));
+ if (na->nla_type == CTRL_ATTR_FAMILY_ID)
+ id = *(__u16 *) NLA_DATA(na);
+
+ return id;
+}
+
+#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
+
+static void print_procacct(struct taskstats *t)
+{
+ /* First letter: T is a mere thread, G the last in a group, U unknown. */
+ printf(
+ "%c pid=%lu tgid=%lu uid=%lu wall=%llu gwall=%llu cpu=%llu vmpeak=%llu rsspeak=%llu dev=%lu:%lu inode=%llu comm=%s\n"
+ , t->version >= 12 ? (t->ac_flag & AGROUP ? 'P' : 'T') : '?'
+ , (unsigned long)t->ac_pid
+ , (unsigned long)(t->version >= 12 ? t->ac_tgid : 0)
+ , (unsigned long)t->ac_uid
+ , (unsigned long long)t->ac_etime
+ , (unsigned long long)(t->version >= 12 ? t->ac_tgetime : 0)
+ , (unsigned long long)(t->ac_utime+t->ac_stime)
+ , (unsigned long long)t->hiwater_vm
+ , (unsigned long long)t->hiwater_rss
+ , (unsigned long)(t->version >= 12 ? MAJOR(t->ac_exe_dev) : 0)
+ , (unsigned long)(t->version >= 12 ? MINOR(t->ac_exe_dev) : 0)
+ , (unsigned long long)(t->version >= 12 ? t->ac_exe_inode : 0)
+ , t->ac_comm
+ );
+}
+
+void handle_aggr(int mother, struct nlattr *na, int fd)
+{
+ int aggr_len = NLA_PAYLOAD(na->nla_len);
+ int len2 = 0;
+ pid_t rtid = 0;
+
+ na = (struct nlattr *) NLA_DATA(na);
+ while (len2 < aggr_len) {
+ switch (na->nla_type) {
+ case TASKSTATS_TYPE_PID:
+ rtid = *(int *) NLA_DATA(na);
+ PRINTF("PID\t%d\n", rtid);
+ break;
+ case TASKSTATS_TYPE_TGID:
+ rtid = *(int *) NLA_DATA(na);
+ PRINTF("TGID\t%d\n", rtid);
+ break;
+ case TASKSTATS_TYPE_STATS:
+ if (mother == TASKSTATS_TYPE_AGGR_PID)
+ print_procacct((struct taskstats *) NLA_DATA(na));
+ if (fd) {
+ if (write(fd, NLA_DATA(na), na->nla_len) < 0)
+ err(1, "write error\n");
+ }
+ break;
+ case TASKSTATS_TYPE_NULL:
+ break;
+ default:
+ fprintf(stderr, "Unknown nested nla_type %d\n",
+ na->nla_type);
+ break;
+ }
+ len2 += NLA_ALIGN(na->nla_len);
+ na = (struct nlattr *)((char *)na +
+ NLA_ALIGN(na->nla_len));
+ }
+}
+
+int main(int argc, char *argv[])
+{
+ int c, rc, rep_len;
+ __u16 id;
+ __u32 mypid;
+
+ struct nlattr *na;
+ int nl_sd = -1;
+ int len = 0;
+
+ int fd = 0;
+ int write_file = 0;
+ int maskset = 0;
+ char *logfile = NULL;
+ int cfd = 0;
+ int forking = 0;
+
+ struct msgtemplate msg;
+
+ while (!forking) {
+ c = getopt(argc, argv, "m:vr:");
+ if (c < 0)
+ break;
+
+ switch (c) {
+ case 'w':
+ logfile = strdup(optarg);
+ printf("write to file %s\n", logfile);
+ write_file = 1;
+ break;
+ case 'r':
+ rcvbufsz = atoi(optarg);
+ printf("receive buf size %d\n", rcvbufsz);
+ if (rcvbufsz < 0)
+ err(1, "Invalid rcv buf size\n");
+ break;
+ case 'm':
+ strncpy(cpumask, optarg, sizeof(cpumask));
+ cpumask[sizeof(cpumask) - 1] = '\0';
+ maskset = 1;
+ break;
+ case 'v':
+ printf("debug on\n");
+ dbg = 1;
+ break;
+ default:
+ usage();
+ exit(-1);
+ }
+ }
+ if (!maskset) {
+ maskset = 1;
+ strncpy(cpumask, "1", sizeof(cpumask));
+ cpumask[sizeof(cpumask) - 1] = '\0';
+ }
+ printf("cpumask %s maskset %d\n", cpumask, maskset);
+
+ if (write_file) {
+ fd = open(logfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (fd == -1) {
+ perror("Cannot open output file\n");
+ exit(1);
+ }
+ }
+
+ nl_sd = create_nl_socket(NETLINK_GENERIC);
+ if (nl_sd < 0)
+ err(1, "error creating Netlink socket\n");
+
+ mypid = getpid();
+ id = get_family_id(nl_sd);
+ if (!id) {
+ fprintf(stderr, "Error getting family id, errno %d\n", errno);
+ goto err;
+ }
+ PRINTF("family id %d\n", id);
+
+ if (maskset) {
+ rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
+ TASKSTATS_CMD_ATTR_REGISTER_CPUMASK,
+ &cpumask, strlen(cpumask) + 1);
+ PRINTF("Sent register cpumask, retval %d\n", rc);
+ if (rc < 0) {
+ fprintf(stderr, "error sending register cpumask\n");
+ goto err;
+ }
+ }
+
+ do {
+ rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
+ PRINTF("received %d bytes\n", rep_len);
+
+ if (rep_len < 0) {
+ fprintf(stderr, "nonfatal reply error: errno %d\n",
+ errno);
+ continue;
+ }
+ if (msg.n.nlmsg_type == NLMSG_ERROR ||
+ !NLMSG_OK((&msg.n), rep_len)) {
+ struct nlmsgerr *err = NLMSG_DATA(&msg);
+
+ fprintf(stderr, "fatal reply error, errno %d\n",
+ err->error);
+ goto done;
+ }
+
+ PRINTF("nlmsghdr size=%zu, nlmsg_len=%d, rep_len=%d\n",
+ sizeof(struct nlmsghdr), msg.n.nlmsg_len, rep_len);
+
+
+ rep_len = GENLMSG_PAYLOAD(&msg.n);
+
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ len = 0;
+ while (len < rep_len) {
+ len += NLA_ALIGN(na->nla_len);
+ int mother = na->nla_type;
+
+ PRINTF("mother=%i\n", mother);
+ switch (na->nla_type) {
+ case TASKSTATS_TYPE_AGGR_PID:
+ case TASKSTATS_TYPE_AGGR_TGID:
+ /* For nested attributes, na follows */
+ handle_aggr(mother, na, fd);
+ break;
+ default:
+ fprintf(stderr, "Unexpected nla_type %d\n",
+ na->nla_type);
+ case TASKSTATS_TYPE_NULL:
+ break;
+ }
+ na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);
+ }
+ } while (1);
+done:
+ if (maskset) {
+ rc = send_cmd(nl_sd, id, mypid, TASKSTATS_CMD_GET,
+ TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK,
+ &cpumask, strlen(cpumask) + 1);
+ printf("Sent deregister mask, retval %d\n", rc);
+ if (rc < 0)
+ err(rc, "error sending deregister cpumask\n");
+ }
+err:
+ close(nl_sd);
+ if (fd)
+ close(fd);
+ if (cfd)
+ close(cfd);
+ return 0;
+}
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
new file mode 100644
index 000000000000..683ca3af4084
--- /dev/null
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -0,0 +1,278 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+#ifndef __ASM_CPUTYPE_H
+#define __ASM_CPUTYPE_H
+
+#define INVALID_HWID ULONG_MAX
+
+#define MPIDR_UP_BITMASK (0x1 << 30)
+#define MPIDR_MT_BITMASK (0x1 << 24)
+#define MPIDR_HWID_BITMASK UL(0xff00ffffff)
+
+#define MPIDR_LEVEL_BITS_SHIFT 3
+#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
+#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+
+#define MPIDR_LEVEL_SHIFT(level) \
+ (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
+
+#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
+ ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
+
+#define MIDR_REVISION_MASK 0xf
+#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
+#define MIDR_PARTNUM_SHIFT 4
+#define MIDR_PARTNUM_MASK (0xfff << MIDR_PARTNUM_SHIFT)
+#define MIDR_PARTNUM(midr) \
+ (((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT)
+#define MIDR_ARCHITECTURE_SHIFT 16
+#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_ARCHITECTURE(midr) \
+ (((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_VARIANT_SHIFT 20
+#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
+#define MIDR_VARIANT(midr) \
+ (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
+#define MIDR_IMPLEMENTOR_SHIFT 24
+#define MIDR_IMPLEMENTOR_MASK (0xffU << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR(midr) \
+ (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+
+#define MIDR_CPU_MODEL(imp, partnum) \
+ ((_AT(u32, imp) << MIDR_IMPLEMENTOR_SHIFT) | \
+ (0xf << MIDR_ARCHITECTURE_SHIFT) | \
+ ((partnum) << MIDR_PARTNUM_SHIFT))
+
+#define MIDR_CPU_VAR_REV(var, rev) \
+ (((var) << MIDR_VARIANT_SHIFT) | (rev))
+
+#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+ MIDR_ARCHITECTURE_MASK)
+
+#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_APM 0x50
+#define ARM_CPU_IMP_CAVIUM 0x43
+#define ARM_CPU_IMP_BRCM 0x42
+#define ARM_CPU_IMP_QCOM 0x51
+#define ARM_CPU_IMP_NVIDIA 0x4E
+#define ARM_CPU_IMP_FUJITSU 0x46
+#define ARM_CPU_IMP_HISI 0x48
+#define ARM_CPU_IMP_APPLE 0x61
+#define ARM_CPU_IMP_AMPERE 0xC0
+
+#define ARM_CPU_PART_AEM_V8 0xD0F
+#define ARM_CPU_PART_FOUNDATION 0xD00
+#define ARM_CPU_PART_CORTEX_A57 0xD07
+#define ARM_CPU_PART_CORTEX_A72 0xD08
+#define ARM_CPU_PART_CORTEX_A53 0xD03
+#define ARM_CPU_PART_CORTEX_A73 0xD09
+#define ARM_CPU_PART_CORTEX_A75 0xD0A
+#define ARM_CPU_PART_CORTEX_A35 0xD04
+#define ARM_CPU_PART_CORTEX_A55 0xD05
+#define ARM_CPU_PART_CORTEX_A76 0xD0B
+#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
+#define ARM_CPU_PART_CORTEX_A77 0xD0D
+#define ARM_CPU_PART_NEOVERSE_V1 0xD40
+#define ARM_CPU_PART_CORTEX_A78 0xD41
+#define ARM_CPU_PART_CORTEX_A78AE 0xD42
+#define ARM_CPU_PART_CORTEX_X1 0xD44
+#define ARM_CPU_PART_CORTEX_A510 0xD46
+#define ARM_CPU_PART_CORTEX_A710 0xD47
+#define ARM_CPU_PART_CORTEX_A715 0xD4D
+#define ARM_CPU_PART_CORTEX_X2 0xD48
+#define ARM_CPU_PART_NEOVERSE_N2 0xD49
+#define ARM_CPU_PART_CORTEX_A78C 0xD4B
+
+#define APM_CPU_PART_POTENZA 0x000
+
+#define CAVIUM_CPU_PART_THUNDERX 0x0A1
+#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
+#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
+#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
+/* OcteonTx2 series */
+#define CAVIUM_CPU_PART_OCTX2_98XX 0x0B1
+#define CAVIUM_CPU_PART_OCTX2_96XX 0x0B2
+#define CAVIUM_CPU_PART_OCTX2_95XX 0x0B3
+#define CAVIUM_CPU_PART_OCTX2_95XXN 0x0B4
+#define CAVIUM_CPU_PART_OCTX2_95XXMM 0x0B5
+#define CAVIUM_CPU_PART_OCTX2_95XXO 0x0B6
+
+#define BRCM_CPU_PART_BRAHMA_B53 0x100
+#define BRCM_CPU_PART_VULCAN 0x516
+
+#define QCOM_CPU_PART_FALKOR_V1 0x800
+#define QCOM_CPU_PART_FALKOR 0xC00
+#define QCOM_CPU_PART_KRYO 0x200
+#define QCOM_CPU_PART_KRYO_2XX_GOLD 0x800
+#define QCOM_CPU_PART_KRYO_2XX_SILVER 0x801
+#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
+#define QCOM_CPU_PART_KRYO_4XX_GOLD 0x804
+#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
+
+#define NVIDIA_CPU_PART_DENVER 0x003
+#define NVIDIA_CPU_PART_CARMEL 0x004
+
+#define FUJITSU_CPU_PART_A64FX 0x001
+
+#define HISI_CPU_PART_TSV110 0xD01
+
+#define APPLE_CPU_PART_M1_ICESTORM 0x022
+#define APPLE_CPU_PART_M1_FIRESTORM 0x023
+#define APPLE_CPU_PART_M1_ICESTORM_PRO 0x024
+#define APPLE_CPU_PART_M1_FIRESTORM_PRO 0x025
+#define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
+#define APPLE_CPU_PART_M1_FIRESTORM_MAX 0x029
+#define APPLE_CPU_PART_M2_BLIZZARD 0x032
+#define APPLE_CPU_PART_M2_AVALANCHE 0x033
+
+#define AMPERE_CPU_PART_AMPERE1 0xAC3
+
+#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
+#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
+#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
+#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
+#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
+#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
+#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
+#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1)
+#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78)
+#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
+#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
+#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
+#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
+#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
+#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
+#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
+#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
+#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
+#define MIDR_OCTX2_98XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_OCTX2_98XX)
+#define MIDR_OCTX2_96XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_OCTX2_96XX)
+#define MIDR_OCTX2_95XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_OCTX2_95XX)
+#define MIDR_OCTX2_95XXN MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_OCTX2_95XXN)
+#define MIDR_OCTX2_95XXMM MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_OCTX2_95XXMM)
+#define MIDR_OCTX2_95XXO MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_OCTX2_95XXO)
+#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_BRAHMA_B53 MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_BRAHMA_B53)
+#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
+#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
+#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
+#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
+#define MIDR_QCOM_KRYO_2XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_GOLD)
+#define MIDR_QCOM_KRYO_2XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_2XX_SILVER)
+#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
+#define MIDR_QCOM_KRYO_4XX_GOLD MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_GOLD)
+#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
+#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
+#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
+#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
+#define MIDR_HISI_TSV110 MIDR_CPU_MODEL(ARM_CPU_IMP_HISI, HISI_CPU_PART_TSV110)
+#define MIDR_APPLE_M1_ICESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM)
+#define MIDR_APPLE_M1_FIRESTORM MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM)
+#define MIDR_APPLE_M1_ICESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_PRO)
+#define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
+#define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
+#define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
+#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
+#define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
+
+/* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
+#define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX
+#define MIDR_FUJITSU_ERRATUM_010001_MASK (~MIDR_CPU_VAR_REV(1, 0))
+#define TCR_CLEAR_FUJITSU_ERRATUM_010001 (TCR_NFD1 | TCR_NFD0)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/sysreg.h>
+
+#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
+
+/*
+ * Represent a range of MIDR values for a given CPU model and a
+ * range of variant/revision values.
+ *
+ * @model - CPU model as defined by MIDR_CPU_MODEL
+ * @rv_min - Minimum value for the revision/variant as defined by
+ * MIDR_CPU_VAR_REV
+ * @rv_max - Maximum value for the variant/revision for the range.
+ */
+struct midr_range {
+ u32 model;
+ u32 rv_min;
+ u32 rv_max;
+};
+
+#define MIDR_RANGE(m, v_min, r_min, v_max, r_max) \
+ { \
+ .model = m, \
+ .rv_min = MIDR_CPU_VAR_REV(v_min, r_min), \
+ .rv_max = MIDR_CPU_VAR_REV(v_max, r_max), \
+ }
+
+#define MIDR_REV_RANGE(m, v, r_min, r_max) MIDR_RANGE(m, v, r_min, v, r_max)
+#define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r)
+#define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
+
+static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
+ u32 rv_max)
+{
+ u32 _model = midr & MIDR_CPU_MODEL_MASK;
+ u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
+
+ return _model == model && rv >= rv_min && rv <= rv_max;
+}
+
+static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
+{
+ return midr_is_cpu_model_range(midr, range->model,
+ range->rv_min, range->rv_max);
+}
+
+static inline bool
+is_midr_in_range_list(u32 midr, struct midr_range const *ranges)
+{
+ while (ranges->model)
+ if (is_midr_in_range(midr, ranges++))
+ return true;
+ return false;
+}
+
+/*
+ * The CPU ID never changes at run time, so we might as well tell the
+ * compiler that it's constant. Use this function to read the CPU ID
+ * rather than directly reading processor_id or read_cpuid() directly.
+ */
+static inline u32 __attribute_const__ read_cpuid_id(void)
+{
+ return read_cpuid(MIDR_EL1);
+}
+
+static inline u64 __attribute_const__ read_cpuid_mpidr(void)
+{
+ return read_cpuid(MPIDR_EL1);
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
+{
+ return MIDR_IMPLEMENTOR(read_cpuid_id());
+}
+
+static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
+{
+ return MIDR_PARTNUM(read_cpuid_id());
+}
+
+static inline u32 __attribute_const__ read_cpuid_cachetype(void)
+{
+ return read_cpuid(CTR_EL0);
+}
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
new file mode 100644
index 000000000000..7640fa27be94
--- /dev/null
+++ b/tools/arch/arm64/include/asm/sysreg.h
@@ -0,0 +1,1296 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Macros for accessing system registers with older binutils.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ */
+
+#ifndef __ASM_SYSREG_H
+#define __ASM_SYSREG_H
+
+#include <linux/bits.h>
+#include <linux/stringify.h>
+
+/*
+ * ARMv8 ARM reserves the following encoding for system registers:
+ * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
+ * C5.2, version:ARM DDI 0487A.f)
+ * [20-19] : Op0
+ * [18-16] : Op1
+ * [15-12] : CRn
+ * [11-8] : CRm
+ * [7-5] : Op2
+ */
+#define Op0_shift 19
+#define Op0_mask 0x3
+#define Op1_shift 16
+#define Op1_mask 0x7
+#define CRn_shift 12
+#define CRn_mask 0xf
+#define CRm_shift 8
+#define CRm_mask 0xf
+#define Op2_shift 5
+#define Op2_mask 0x7
+
+#define sys_reg(op0, op1, crn, crm, op2) \
+ (((op0) << Op0_shift) | ((op1) << Op1_shift) | \
+ ((crn) << CRn_shift) | ((crm) << CRm_shift) | \
+ ((op2) << Op2_shift))
+
+#define sys_insn sys_reg
+
+#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask)
+#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask)
+#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask)
+#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask)
+#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask)
+
+#ifndef CONFIG_BROKEN_GAS_INST
+
+#ifdef __ASSEMBLY__
+// The space separator is omitted so that __emit_inst(x) can be parsed as
+// either an assembler directive or an assembler macro argument.
+#define __emit_inst(x) .inst(x)
+#else
+#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
+#endif
+
+#else /* CONFIG_BROKEN_GAS_INST */
+
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define __INSTR_BSWAP(x) (x)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+#define __INSTR_BSWAP(x) ((((x) << 24) & 0xff000000) | \
+ (((x) << 8) & 0x00ff0000) | \
+ (((x) >> 8) & 0x0000ff00) | \
+ (((x) >> 24) & 0x000000ff))
+#endif /* CONFIG_CPU_BIG_ENDIAN */
+
+#ifdef __ASSEMBLY__
+#define __emit_inst(x) .long __INSTR_BSWAP(x)
+#else /* __ASSEMBLY__ */
+#define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_BROKEN_GAS_INST */
+
+/*
+ * Instructions for modifying PSTATE fields.
+ * As per Arm ARM for v8-A, Section "C.5.1.3 op0 == 0b00, architectural hints,
+ * barriers and CLREX, and PSTATE access", ARM DDI 0487 C.a, system instructions
+ * for accessing PSTATE fields have the following encoding:
+ * Op0 = 0, CRn = 4
+ * Op1, Op2 encodes the PSTATE field modified and defines the constraints.
+ * CRm = Imm4 for the instruction.
+ * Rt = 0x1f
+ */
+#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift)
+#define PSTATE_Imm_shift CRm_shift
+
+#define PSTATE_PAN pstate_field(0, 4)
+#define PSTATE_UAO pstate_field(0, 3)
+#define PSTATE_SSBS pstate_field(3, 1)
+#define PSTATE_TCO pstate_field(3, 4)
+
+#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift))
+
+#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x))
+#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x))
+#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
+
+#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
+ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
+
+#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
+
+#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
+#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
+#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
+
+/*
+ * System registers, organised loosely by encoding but grouped together
+ * where the architected name contains an index. e.g. ID_MMFR<n>_EL1.
+ */
+#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
+#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
+#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2)
+#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2)
+#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2)
+#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4)
+#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5)
+#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6)
+#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7)
+#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0)
+#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4)
+#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4)
+#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4)
+#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4)
+#define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6)
+#define SYS_DBGCLAIMCLR_EL1 sys_reg(2, 0, 7, 9, 6)
+#define SYS_DBGAUTHSTATUS_EL1 sys_reg(2, 0, 7, 14, 6)
+#define SYS_MDCCSR_EL0 sys_reg(2, 3, 0, 1, 0)
+#define SYS_DBGDTR_EL0 sys_reg(2, 3, 0, 4, 0)
+#define SYS_DBGDTRRX_EL0 sys_reg(2, 3, 0, 5, 0)
+#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0)
+#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
+
+#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
+#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
+#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
+
+#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
+#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1)
+#define SYS_ID_PFR2_EL1 sys_reg(3, 0, 0, 3, 4)
+#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2)
+#define SYS_ID_DFR1_EL1 sys_reg(3, 0, 0, 3, 5)
+#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3)
+#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4)
+#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5)
+#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6)
+#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7)
+#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
+#define SYS_ID_MMFR5_EL1 sys_reg(3, 0, 0, 3, 6)
+
+#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0)
+#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1)
+#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2)
+#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3)
+#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
+#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
+#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7)
+
+#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
+#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
+#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
+
+#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
+#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
+#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
+
+#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
+#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
+
+#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
+#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
+
+#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
+#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
+
+#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
+#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
+#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
+
+#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0)
+#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
+#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
+#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
+#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
+
+#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
+#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
+
+#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
+#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
+#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
+
+#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0)
+#define SYS_APIAKEYHI_EL1 sys_reg(3, 0, 2, 1, 1)
+#define SYS_APIBKEYLO_EL1 sys_reg(3, 0, 2, 1, 2)
+#define SYS_APIBKEYHI_EL1 sys_reg(3, 0, 2, 1, 3)
+
+#define SYS_APDAKEYLO_EL1 sys_reg(3, 0, 2, 2, 0)
+#define SYS_APDAKEYHI_EL1 sys_reg(3, 0, 2, 2, 1)
+#define SYS_APDBKEYLO_EL1 sys_reg(3, 0, 2, 2, 2)
+#define SYS_APDBKEYHI_EL1 sys_reg(3, 0, 2, 2, 3)
+
+#define SYS_APGAKEYLO_EL1 sys_reg(3, 0, 2, 3, 0)
+#define SYS_APGAKEYHI_EL1 sys_reg(3, 0, 2, 3, 1)
+
+#define SYS_SPSR_EL1 sys_reg(3, 0, 4, 0, 0)
+#define SYS_ELR_EL1 sys_reg(3, 0, 4, 0, 1)
+
+#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
+
+#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
+#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1)
+#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0)
+
+#define SYS_ERRIDR_EL1 sys_reg(3, 0, 5, 3, 0)
+#define SYS_ERRSELR_EL1 sys_reg(3, 0, 5, 3, 1)
+#define SYS_ERXFR_EL1 sys_reg(3, 0, 5, 4, 0)
+#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1)
+#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2)
+#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
+#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
+#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
+#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
+#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
+
+#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
+#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
+
+#define SYS_PAR_EL1_F BIT(0)
+#define SYS_PAR_EL1_FST GENMASK(6, 1)
+
+/*** Statistical Profiling Extension ***/
+/* ID registers */
+#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7)
+#define SYS_PMSIDR_EL1_FE_SHIFT 0
+#define SYS_PMSIDR_EL1_FT_SHIFT 1
+#define SYS_PMSIDR_EL1_FL_SHIFT 2
+#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3
+#define SYS_PMSIDR_EL1_LDS_SHIFT 4
+#define SYS_PMSIDR_EL1_ERND_SHIFT 5
+#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8
+#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL
+#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12
+#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL
+#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16
+#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL
+
+#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
+#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0
+#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU
+#define SYS_PMBIDR_EL1_P_SHIFT 4
+#define SYS_PMBIDR_EL1_F_SHIFT 5
+
+/* Sampling controls */
+#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
+#define SYS_PMSCR_EL1_E0SPE_SHIFT 0
+#define SYS_PMSCR_EL1_E1SPE_SHIFT 1
+#define SYS_PMSCR_EL1_CX_SHIFT 3
+#define SYS_PMSCR_EL1_PA_SHIFT 4
+#define SYS_PMSCR_EL1_TS_SHIFT 5
+#define SYS_PMSCR_EL1_PCT_SHIFT 6
+
+#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0)
+#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0
+#define SYS_PMSCR_EL2_E2SPE_SHIFT 1
+#define SYS_PMSCR_EL2_CX_SHIFT 3
+#define SYS_PMSCR_EL2_PA_SHIFT 4
+#define SYS_PMSCR_EL2_TS_SHIFT 5
+#define SYS_PMSCR_EL2_PCT_SHIFT 6
+
+#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2)
+
+#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3)
+#define SYS_PMSIRR_EL1_RND_SHIFT 0
+#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8
+#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL
+
+/* Filtering controls */
+#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1)
+
+#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4)
+#define SYS_PMSFCR_EL1_FE_SHIFT 0
+#define SYS_PMSFCR_EL1_FT_SHIFT 1
+#define SYS_PMSFCR_EL1_FL_SHIFT 2
+#define SYS_PMSFCR_EL1_B_SHIFT 16
+#define SYS_PMSFCR_EL1_LD_SHIFT 17
+#define SYS_PMSFCR_EL1_ST_SHIFT 18
+
+#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5)
+#define SYS_PMSEVFR_EL1_RES0_8_2 \
+ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\
+ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0))
+#define SYS_PMSEVFR_EL1_RES0_8_3 \
+ (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11)))
+
+#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6)
+#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0
+
+/* Buffer controls */
+#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
+#define SYS_PMBLIMITR_EL1_E_SHIFT 0
+#define SYS_PMBLIMITR_EL1_FM_SHIFT 1
+#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL
+#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT)
+
+#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1)
+
+/* Buffer error reporting */
+#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3)
+#define SYS_PMBSR_EL1_COLL_SHIFT 16
+#define SYS_PMBSR_EL1_S_SHIFT 17
+#define SYS_PMBSR_EL1_EA_SHIFT 18
+#define SYS_PMBSR_EL1_DL_SHIFT 19
+#define SYS_PMBSR_EL1_EC_SHIFT 26
+#define SYS_PMBSR_EL1_EC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT)
+
+#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0
+#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0
+#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT)
+
+/*** End of Statistical Profiling Extension ***/
+
+/*
+ * TRBE Registers
+ */
+#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0)
+#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1)
+#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2)
+#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3)
+#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4)
+#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6)
+#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7)
+
+#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0)
+#define TRBLIMITR_LIMIT_SHIFT 12
+#define TRBLIMITR_NVM BIT(5)
+#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0)
+#define TRBLIMITR_TRIG_MODE_SHIFT 3
+#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0)
+#define TRBLIMITR_FILL_MODE_SHIFT 1
+#define TRBLIMITR_ENABLE BIT(0)
+#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0)
+#define TRBPTR_PTR_SHIFT 0
+#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0)
+#define TRBBASER_BASE_SHIFT 12
+#define TRBSR_EC_MASK GENMASK(5, 0)
+#define TRBSR_EC_SHIFT 26
+#define TRBSR_IRQ BIT(22)
+#define TRBSR_TRG BIT(21)
+#define TRBSR_WRAP BIT(20)
+#define TRBSR_ABORT BIT(18)
+#define TRBSR_STOP BIT(17)
+#define TRBSR_MSS_MASK GENMASK(15, 0)
+#define TRBSR_MSS_SHIFT 0
+#define TRBSR_BSC_MASK GENMASK(5, 0)
+#define TRBSR_BSC_SHIFT 0
+#define TRBSR_FSC_MASK GENMASK(5, 0)
+#define TRBSR_FSC_SHIFT 0
+#define TRBMAR_SHARE_MASK GENMASK(1, 0)
+#define TRBMAR_SHARE_SHIFT 8
+#define TRBMAR_OUTER_MASK GENMASK(3, 0)
+#define TRBMAR_OUTER_SHIFT 4
+#define TRBMAR_INNER_MASK GENMASK(3, 0)
+#define TRBMAR_INNER_SHIFT 0
+#define TRBTRG_TRG_MASK GENMASK(31, 0)
+#define TRBTRG_TRG_SHIFT 0
+#define TRBIDR_FLAG BIT(5)
+#define TRBIDR_PROG BIT(4)
+#define TRBIDR_ALIGN_MASK GENMASK(3, 0)
+#define TRBIDR_ALIGN_SHIFT 0
+
+#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
+#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
+
+#define SYS_PMMIR_EL1 sys_reg(3, 0, 9, 14, 6)
+
+#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0)
+#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
+
+#define SYS_LORSA_EL1 sys_reg(3, 0, 10, 4, 0)
+#define SYS_LOREA_EL1 sys_reg(3, 0, 10, 4, 1)
+#define SYS_LORN_EL1 sys_reg(3, 0, 10, 4, 2)
+#define SYS_LORC_EL1 sys_reg(3, 0, 10, 4, 3)
+#define SYS_LORID_EL1 sys_reg(3, 0, 10, 4, 7)
+
+#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
+#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1)
+
+#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0)
+#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1)
+#define SYS_ICC_HPPIR0_EL1 sys_reg(3, 0, 12, 8, 2)
+#define SYS_ICC_BPR0_EL1 sys_reg(3, 0, 12, 8, 3)
+#define SYS_ICC_AP0Rn_EL1(n) sys_reg(3, 0, 12, 8, 4 | n)
+#define SYS_ICC_AP0R0_EL1 SYS_ICC_AP0Rn_EL1(0)
+#define SYS_ICC_AP0R1_EL1 SYS_ICC_AP0Rn_EL1(1)
+#define SYS_ICC_AP0R2_EL1 SYS_ICC_AP0Rn_EL1(2)
+#define SYS_ICC_AP0R3_EL1 SYS_ICC_AP0Rn_EL1(3)
+#define SYS_ICC_AP1Rn_EL1(n) sys_reg(3, 0, 12, 9, n)
+#define SYS_ICC_AP1R0_EL1 SYS_ICC_AP1Rn_EL1(0)
+#define SYS_ICC_AP1R1_EL1 SYS_ICC_AP1Rn_EL1(1)
+#define SYS_ICC_AP1R2_EL1 SYS_ICC_AP1Rn_EL1(2)
+#define SYS_ICC_AP1R3_EL1 SYS_ICC_AP1Rn_EL1(3)
+#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
+#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3)
+#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
+#define SYS_ICC_ASGI1R_EL1 sys_reg(3, 0, 12, 11, 6)
+#define SYS_ICC_SGI0R_EL1 sys_reg(3, 0, 12, 11, 7)
+#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
+#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2)
+#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
+#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
+#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
+#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
+#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+
+#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
+#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
+
+#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
+
+#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
+
+#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
+#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
+#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4)
+#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
+
+#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
+
+#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
+#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
+
+#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
+#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
+
+#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
+#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
+#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
+#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3)
+#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4)
+#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5)
+#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6)
+#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
+#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0)
+#define SYS_PMXEVTYPER_EL0 sys_reg(3, 3, 9, 13, 1)
+#define SYS_PMXEVCNTR_EL0 sys_reg(3, 3, 9, 13, 2)
+#define SYS_PMUSERENR_EL0 sys_reg(3, 3, 9, 14, 0)
+#define SYS_PMOVSSET_EL0 sys_reg(3, 3, 9, 14, 3)
+
+#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
+#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
+
+#define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7)
+
+/* Definitions for system register interface to AMU for ARMv8.4 onwards */
+#define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2))
+#define SYS_AMCR_EL0 SYS_AM_EL0(2, 0)
+#define SYS_AMCFGR_EL0 SYS_AM_EL0(2, 1)
+#define SYS_AMCGCR_EL0 SYS_AM_EL0(2, 2)
+#define SYS_AMUSERENR_EL0 SYS_AM_EL0(2, 3)
+#define SYS_AMCNTENCLR0_EL0 SYS_AM_EL0(2, 4)
+#define SYS_AMCNTENSET0_EL0 SYS_AM_EL0(2, 5)
+#define SYS_AMCNTENCLR1_EL0 SYS_AM_EL0(3, 0)
+#define SYS_AMCNTENSET1_EL0 SYS_AM_EL0(3, 1)
+
+/*
+ * Group 0 of activity monitors (architected):
+ * op0 op1 CRn CRm op2
+ * Counter: 11 011 1101 010:n<3> n<2:0>
+ * Type: 11 011 1101 011:n<3> n<2:0>
+ * n: 0-15
+ *
+ * Group 1 of activity monitors (auxiliary):
+ * op0 op1 CRn CRm op2
+ * Counter: 11 011 1101 110:n<3> n<2:0>
+ * Type: 11 011 1101 111:n<3> n<2:0>
+ * n: 0-15
+ */
+
+#define SYS_AMEVCNTR0_EL0(n) SYS_AM_EL0(4 + ((n) >> 3), (n) & 7)
+#define SYS_AMEVTYPER0_EL0(n) SYS_AM_EL0(6 + ((n) >> 3), (n) & 7)
+#define SYS_AMEVCNTR1_EL0(n) SYS_AM_EL0(12 + ((n) >> 3), (n) & 7)
+#define SYS_AMEVTYPER1_EL0(n) SYS_AM_EL0(14 + ((n) >> 3), (n) & 7)
+
+/* AMU v1: Fixed (architecturally defined) activity monitors */
+#define SYS_AMEVCNTR0_CORE_EL0 SYS_AMEVCNTR0_EL0(0)
+#define SYS_AMEVCNTR0_CONST_EL0 SYS_AMEVCNTR0_EL0(1)
+#define SYS_AMEVCNTR0_INST_RET_EL0 SYS_AMEVCNTR0_EL0(2)
+#define SYS_AMEVCNTR0_MEM_STALL SYS_AMEVCNTR0_EL0(3)
+
+#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+
+#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0)
+#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
+#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
+
+#define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1)
+#define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2)
+
+#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
+#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
+#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
+
+#define __PMEV_op2(n) ((n) & 0x7)
+#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
+#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
+#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3))
+#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n))
+
+#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
+
+#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
+#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4)
+#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
+#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
+#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
+#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
+#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
+#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
+#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
+#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
+#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
+#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
+#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
+#define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0)
+#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
+#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
+#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0)
+#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0)
+
+#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
+#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
+#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0)
+#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1)
+#define SYS_ICH_AP0R2_EL2 __SYS__AP0Rx_EL2(2)
+#define SYS_ICH_AP0R3_EL2 __SYS__AP0Rx_EL2(3)
+
+#define __SYS__AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
+#define SYS_ICH_AP1R0_EL2 __SYS__AP1Rx_EL2(0)
+#define SYS_ICH_AP1R1_EL2 __SYS__AP1Rx_EL2(1)
+#define SYS_ICH_AP1R2_EL2 __SYS__AP1Rx_EL2(2)
+#define SYS_ICH_AP1R3_EL2 __SYS__AP1Rx_EL2(3)
+
+#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
+#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
+#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
+#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
+#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
+#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
+#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
+#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
+
+#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
+#define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0)
+#define SYS_ICH_LR1_EL2 __SYS__LR0_EL2(1)
+#define SYS_ICH_LR2_EL2 __SYS__LR0_EL2(2)
+#define SYS_ICH_LR3_EL2 __SYS__LR0_EL2(3)
+#define SYS_ICH_LR4_EL2 __SYS__LR0_EL2(4)
+#define SYS_ICH_LR5_EL2 __SYS__LR0_EL2(5)
+#define SYS_ICH_LR6_EL2 __SYS__LR0_EL2(6)
+#define SYS_ICH_LR7_EL2 __SYS__LR0_EL2(7)
+
+#define __SYS__LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
+#define SYS_ICH_LR8_EL2 __SYS__LR8_EL2(0)
+#define SYS_ICH_LR9_EL2 __SYS__LR8_EL2(1)
+#define SYS_ICH_LR10_EL2 __SYS__LR8_EL2(2)
+#define SYS_ICH_LR11_EL2 __SYS__LR8_EL2(3)
+#define SYS_ICH_LR12_EL2 __SYS__LR8_EL2(4)
+#define SYS_ICH_LR13_EL2 __SYS__LR8_EL2(5)
+#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6)
+#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
+
+/* VHE encodings for architectural EL0/1 system registers */
+#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
+#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
+#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
+#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
+#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
+#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
+#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0)
+#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1)
+#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
+#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
+#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
+#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
+#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
+#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
+#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
+#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
+#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1)
+#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
+#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
+#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1)
+#define SYS_CNTP_CVAL_EL02 sys_reg(3, 5, 14, 2, 2)
+#define SYS_CNTV_TVAL_EL02 sys_reg(3, 5, 14, 3, 0)
+#define SYS_CNTV_CTL_EL02 sys_reg(3, 5, 14, 3, 1)
+#define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2)
+
+/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_DSSBS (BIT(44))
+#define SCTLR_ELx_ATA (BIT(43))
+
+#define SCTLR_ELx_TCF_SHIFT 40
+#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
+
+#define SCTLR_ELx_ENIA_SHIFT 31
+
+#define SCTLR_ELx_ITFSB (BIT(37))
+#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
+#define SCTLR_ELx_ENIB (BIT(30))
+#define SCTLR_ELx_ENDA (BIT(27))
+#define SCTLR_ELx_EE (BIT(25))
+#define SCTLR_ELx_IESB (BIT(21))
+#define SCTLR_ELx_WXN (BIT(19))
+#define SCTLR_ELx_ENDB (BIT(13))
+#define SCTLR_ELx_I (BIT(12))
+#define SCTLR_ELx_SA (BIT(3))
+#define SCTLR_ELx_C (BIT(2))
+#define SCTLR_ELx_A (BIT(1))
+#define SCTLR_ELx_M (BIT(0))
+
+/* SCTLR_EL2 specific flags. */
+#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
+ (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
+ (BIT(29)))
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ENDIAN_SET_EL2 SCTLR_ELx_EE
+#else
+#define ENDIAN_SET_EL2 0
+#endif
+
+#define INIT_SCTLR_EL2_MMU_ON \
+ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \
+ SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | \
+ SCTLR_ELx_ITFSB | SCTLR_EL2_RES1)
+
+#define INIT_SCTLR_EL2_MMU_OFF \
+ (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
+
+/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_EPAN (BIT(57))
+#define SCTLR_EL1_ATA0 (BIT(42))
+
+#define SCTLR_EL1_TCF0_SHIFT 38
+#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
+
+#define SCTLR_EL1_BT1 (BIT(36))
+#define SCTLR_EL1_BT0 (BIT(35))
+#define SCTLR_EL1_UCI (BIT(26))
+#define SCTLR_EL1_E0E (BIT(24))
+#define SCTLR_EL1_SPAN (BIT(23))
+#define SCTLR_EL1_NTWE (BIT(18))
+#define SCTLR_EL1_NTWI (BIT(16))
+#define SCTLR_EL1_UCT (BIT(15))
+#define SCTLR_EL1_DZE (BIT(14))
+#define SCTLR_EL1_UMA (BIT(9))
+#define SCTLR_EL1_SED (BIT(8))
+#define SCTLR_EL1_ITD (BIT(7))
+#define SCTLR_EL1_CP15BEN (BIT(5))
+#define SCTLR_EL1_SA0 (BIT(4))
+
+#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \
+ (BIT(29)))
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+#else
+#define ENDIAN_SET_EL1 0
+#endif
+
+#define INIT_SCTLR_EL1_MMU_OFF \
+ (ENDIAN_SET_EL1 | SCTLR_EL1_RES1)
+
+#define INIT_SCTLR_EL1_MMU_ON \
+ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \
+ SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
+ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
+ SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
+ SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
+
+/* MAIR_ELx memory attributes (used by Linux) */
+#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
+#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
+#define MAIR_ATTR_NORMAL_NC UL(0x44)
+#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
+#define MAIR_ATTR_NORMAL UL(0xff)
+#define MAIR_ATTR_MASK UL(0xff)
+
+/* Position the attr at the correct index */
+#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
+
+/* id_aa64isar0 */
+#define ID_AA64ISAR0_RNDR_SHIFT 60
+#define ID_AA64ISAR0_TLB_SHIFT 56
+#define ID_AA64ISAR0_TS_SHIFT 52
+#define ID_AA64ISAR0_FHM_SHIFT 48
+#define ID_AA64ISAR0_DP_SHIFT 44
+#define ID_AA64ISAR0_SM4_SHIFT 40
+#define ID_AA64ISAR0_SM3_SHIFT 36
+#define ID_AA64ISAR0_SHA3_SHIFT 32
+#define ID_AA64ISAR0_RDM_SHIFT 28
+#define ID_AA64ISAR0_ATOMICS_SHIFT 20
+#define ID_AA64ISAR0_CRC32_SHIFT 16
+#define ID_AA64ISAR0_SHA2_SHIFT 12
+#define ID_AA64ISAR0_SHA1_SHIFT 8
+#define ID_AA64ISAR0_AES_SHIFT 4
+
+#define ID_AA64ISAR0_TLB_RANGE_NI 0x0
+#define ID_AA64ISAR0_TLB_RANGE 0x2
+
+/* id_aa64isar1 */
+#define ID_AA64ISAR1_I8MM_SHIFT 52
+#define ID_AA64ISAR1_DGH_SHIFT 48
+#define ID_AA64ISAR1_BF16_SHIFT 44
+#define ID_AA64ISAR1_SPECRES_SHIFT 40
+#define ID_AA64ISAR1_SB_SHIFT 36
+#define ID_AA64ISAR1_FRINTTS_SHIFT 32
+#define ID_AA64ISAR1_GPI_SHIFT 28
+#define ID_AA64ISAR1_GPA_SHIFT 24
+#define ID_AA64ISAR1_LRCPC_SHIFT 20
+#define ID_AA64ISAR1_FCMA_SHIFT 16
+#define ID_AA64ISAR1_JSCVT_SHIFT 12
+#define ID_AA64ISAR1_API_SHIFT 8
+#define ID_AA64ISAR1_APA_SHIFT 4
+#define ID_AA64ISAR1_DPB_SHIFT 0
+
+#define ID_AA64ISAR1_APA_NI 0x0
+#define ID_AA64ISAR1_APA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2
+#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5
+#define ID_AA64ISAR1_API_NI 0x0
+#define ID_AA64ISAR1_API_IMP_DEF 0x1
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5
+#define ID_AA64ISAR1_GPA_NI 0x0
+#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_GPI_NI 0x0
+#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
+
+/* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV3_SHIFT 60
+#define ID_AA64PFR0_CSV2_SHIFT 56
+#define ID_AA64PFR0_DIT_SHIFT 48
+#define ID_AA64PFR0_AMU_SHIFT 44
+#define ID_AA64PFR0_MPAM_SHIFT 40
+#define ID_AA64PFR0_SEL2_SHIFT 36
+#define ID_AA64PFR0_SVE_SHIFT 32
+#define ID_AA64PFR0_RAS_SHIFT 28
+#define ID_AA64PFR0_GIC_SHIFT 24
+#define ID_AA64PFR0_ASIMD_SHIFT 20
+#define ID_AA64PFR0_FP_SHIFT 16
+#define ID_AA64PFR0_EL3_SHIFT 12
+#define ID_AA64PFR0_EL2_SHIFT 8
+#define ID_AA64PFR0_EL1_SHIFT 4
+#define ID_AA64PFR0_EL0_SHIFT 0
+
+#define ID_AA64PFR0_AMU 0x1
+#define ID_AA64PFR0_SVE 0x1
+#define ID_AA64PFR0_RAS_V1 0x1
+#define ID_AA64PFR0_RAS_V1P1 0x2
+#define ID_AA64PFR0_FP_NI 0xf
+#define ID_AA64PFR0_FP_SUPPORTED 0x0
+#define ID_AA64PFR0_ASIMD_NI 0xf
+#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
+#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1
+#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
+
+/* id_aa64pfr1 */
+#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
+#define ID_AA64PFR1_RASFRAC_SHIFT 12
+#define ID_AA64PFR1_MTE_SHIFT 8
+#define ID_AA64PFR1_SSBS_SHIFT 4
+#define ID_AA64PFR1_BT_SHIFT 0
+
+#define ID_AA64PFR1_SSBS_PSTATE_NI 0
+#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
+#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
+#define ID_AA64PFR1_BT_BTI 0x1
+
+#define ID_AA64PFR1_MTE_NI 0x0
+#define ID_AA64PFR1_MTE_EL0 0x1
+#define ID_AA64PFR1_MTE 0x2
+
+/* id_aa64zfr0 */
+#define ID_AA64ZFR0_F64MM_SHIFT 56
+#define ID_AA64ZFR0_F32MM_SHIFT 52
+#define ID_AA64ZFR0_I8MM_SHIFT 44
+#define ID_AA64ZFR0_SM4_SHIFT 40
+#define ID_AA64ZFR0_SHA3_SHIFT 32
+#define ID_AA64ZFR0_BF16_SHIFT 20
+#define ID_AA64ZFR0_BITPERM_SHIFT 16
+#define ID_AA64ZFR0_AES_SHIFT 4
+#define ID_AA64ZFR0_SVEVER_SHIFT 0
+
+#define ID_AA64ZFR0_F64MM 0x1
+#define ID_AA64ZFR0_F32MM 0x1
+#define ID_AA64ZFR0_I8MM 0x1
+#define ID_AA64ZFR0_BF16 0x1
+#define ID_AA64ZFR0_SM4 0x1
+#define ID_AA64ZFR0_SHA3 0x1
+#define ID_AA64ZFR0_BITPERM 0x1
+#define ID_AA64ZFR0_AES 0x1
+#define ID_AA64ZFR0_AES_PMULL 0x2
+#define ID_AA64ZFR0_SVEVER_SVE2 0x1
+
+/* id_aa64mmfr0 */
+#define ID_AA64MMFR0_ECV_SHIFT 60
+#define ID_AA64MMFR0_FGT_SHIFT 56
+#define ID_AA64MMFR0_EXS_SHIFT 44
+#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40
+#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36
+#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32
+#define ID_AA64MMFR0_TGRAN4_SHIFT 28
+#define ID_AA64MMFR0_TGRAN64_SHIFT 24
+#define ID_AA64MMFR0_TGRAN16_SHIFT 20
+#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
+#define ID_AA64MMFR0_SNSMEM_SHIFT 12
+#define ID_AA64MMFR0_BIGENDEL_SHIFT 8
+#define ID_AA64MMFR0_ASID_SHIFT 4
+#define ID_AA64MMFR0_PARANGE_SHIFT 0
+
+#define ID_AA64MMFR0_ASID_8 0x0
+#define ID_AA64MMFR0_ASID_16 0x2
+
+#define ID_AA64MMFR0_TGRAN4_NI 0xf
+#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
+#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
+#define ID_AA64MMFR0_TGRAN64_NI 0xf
+#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0
+#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7
+#define ID_AA64MMFR0_TGRAN16_NI 0x0
+#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1
+#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf
+
+#define ID_AA64MMFR0_PARANGE_32 0x0
+#define ID_AA64MMFR0_PARANGE_36 0x1
+#define ID_AA64MMFR0_PARANGE_40 0x2
+#define ID_AA64MMFR0_PARANGE_42 0x3
+#define ID_AA64MMFR0_PARANGE_44 0x4
+#define ID_AA64MMFR0_PARANGE_48 0x5
+#define ID_AA64MMFR0_PARANGE_52 0x6
+
+#define ARM64_MIN_PARANGE_BITS 32
+
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
+
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
+#else
+#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48
+#endif
+
+/* id_aa64mmfr1 */
+#define ID_AA64MMFR1_ETS_SHIFT 36
+#define ID_AA64MMFR1_TWED_SHIFT 32
+#define ID_AA64MMFR1_XNX_SHIFT 28
+#define ID_AA64MMFR1_SPECSEI_SHIFT 24
+#define ID_AA64MMFR1_PAN_SHIFT 20
+#define ID_AA64MMFR1_LOR_SHIFT 16
+#define ID_AA64MMFR1_HPD_SHIFT 12
+#define ID_AA64MMFR1_VHE_SHIFT 8
+#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
+#define ID_AA64MMFR1_HADBS_SHIFT 0
+
+#define ID_AA64MMFR1_VMIDBITS_8 0
+#define ID_AA64MMFR1_VMIDBITS_16 2
+
+/* id_aa64mmfr2 */
+#define ID_AA64MMFR2_E0PD_SHIFT 60
+#define ID_AA64MMFR2_EVT_SHIFT 56
+#define ID_AA64MMFR2_BBM_SHIFT 52
+#define ID_AA64MMFR2_TTL_SHIFT 48
+#define ID_AA64MMFR2_FWB_SHIFT 40
+#define ID_AA64MMFR2_IDS_SHIFT 36
+#define ID_AA64MMFR2_AT_SHIFT 32
+#define ID_AA64MMFR2_ST_SHIFT 28
+#define ID_AA64MMFR2_NV_SHIFT 24
+#define ID_AA64MMFR2_CCIDX_SHIFT 20
+#define ID_AA64MMFR2_LVA_SHIFT 16
+#define ID_AA64MMFR2_IESB_SHIFT 12
+#define ID_AA64MMFR2_LSM_SHIFT 8
+#define ID_AA64MMFR2_UAO_SHIFT 4
+#define ID_AA64MMFR2_CNP_SHIFT 0
+
+/* id_aa64dfr0 */
+#define ID_AA64DFR0_MTPMU_SHIFT 48
+#define ID_AA64DFR0_TRBE_SHIFT 44
+#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
+#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
+#define ID_AA64DFR0_PMSVER_SHIFT 32
+#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
+#define ID_AA64DFR0_WRPS_SHIFT 20
+#define ID_AA64DFR0_BRPS_SHIFT 12
+#define ID_AA64DFR0_PMUVER_SHIFT 8
+#define ID_AA64DFR0_TRACEVER_SHIFT 4
+#define ID_AA64DFR0_DEBUGVER_SHIFT 0
+
+#define ID_AA64DFR0_PMUVER_8_0 0x1
+#define ID_AA64DFR0_PMUVER_8_1 0x4
+#define ID_AA64DFR0_PMUVER_8_4 0x5
+#define ID_AA64DFR0_PMUVER_8_5 0x6
+#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
+
+#define ID_AA64DFR0_PMSVER_8_2 0x1
+#define ID_AA64DFR0_PMSVER_8_3 0x2
+
+#define ID_DFR0_PERFMON_SHIFT 24
+
+#define ID_DFR0_PERFMON_8_0 0x3
+#define ID_DFR0_PERFMON_8_1 0x4
+#define ID_DFR0_PERFMON_8_4 0x5
+#define ID_DFR0_PERFMON_8_5 0x6
+
+#define ID_ISAR4_SWP_FRAC_SHIFT 28
+#define ID_ISAR4_PSR_M_SHIFT 24
+#define ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT 20
+#define ID_ISAR4_BARRIER_SHIFT 16
+#define ID_ISAR4_SMC_SHIFT 12
+#define ID_ISAR4_WRITEBACK_SHIFT 8
+#define ID_ISAR4_WITHSHIFTS_SHIFT 4
+#define ID_ISAR4_UNPRIV_SHIFT 0
+
+#define ID_DFR1_MTPMU_SHIFT 0
+
+#define ID_ISAR0_DIVIDE_SHIFT 24
+#define ID_ISAR0_DEBUG_SHIFT 20
+#define ID_ISAR0_COPROC_SHIFT 16
+#define ID_ISAR0_CMPBRANCH_SHIFT 12
+#define ID_ISAR0_BITFIELD_SHIFT 8
+#define ID_ISAR0_BITCOUNT_SHIFT 4
+#define ID_ISAR0_SWAP_SHIFT 0
+
+#define ID_ISAR5_RDM_SHIFT 24
+#define ID_ISAR5_CRC32_SHIFT 16
+#define ID_ISAR5_SHA2_SHIFT 12
+#define ID_ISAR5_SHA1_SHIFT 8
+#define ID_ISAR5_AES_SHIFT 4
+#define ID_ISAR5_SEVL_SHIFT 0
+
+#define ID_ISAR6_I8MM_SHIFT 24
+#define ID_ISAR6_BF16_SHIFT 20
+#define ID_ISAR6_SPECRES_SHIFT 16
+#define ID_ISAR6_SB_SHIFT 12
+#define ID_ISAR6_FHM_SHIFT 8
+#define ID_ISAR6_DP_SHIFT 4
+#define ID_ISAR6_JSCVT_SHIFT 0
+
+#define ID_MMFR0_INNERSHR_SHIFT 28
+#define ID_MMFR0_FCSE_SHIFT 24
+#define ID_MMFR0_AUXREG_SHIFT 20
+#define ID_MMFR0_TCM_SHIFT 16
+#define ID_MMFR0_SHARELVL_SHIFT 12
+#define ID_MMFR0_OUTERSHR_SHIFT 8
+#define ID_MMFR0_PMSA_SHIFT 4
+#define ID_MMFR0_VMSA_SHIFT 0
+
+#define ID_MMFR4_EVT_SHIFT 28
+#define ID_MMFR4_CCIDX_SHIFT 24
+#define ID_MMFR4_LSM_SHIFT 20
+#define ID_MMFR4_HPDS_SHIFT 16
+#define ID_MMFR4_CNP_SHIFT 12
+#define ID_MMFR4_XNX_SHIFT 8
+#define ID_MMFR4_AC2_SHIFT 4
+#define ID_MMFR4_SPECSEI_SHIFT 0
+
+#define ID_MMFR5_ETS_SHIFT 0
+
+#define ID_PFR0_DIT_SHIFT 24
+#define ID_PFR0_CSV2_SHIFT 16
+#define ID_PFR0_STATE3_SHIFT 12
+#define ID_PFR0_STATE2_SHIFT 8
+#define ID_PFR0_STATE1_SHIFT 4
+#define ID_PFR0_STATE0_SHIFT 0
+
+#define ID_DFR0_PERFMON_SHIFT 24
+#define ID_DFR0_MPROFDBG_SHIFT 20
+#define ID_DFR0_MMAPTRC_SHIFT 16
+#define ID_DFR0_COPTRC_SHIFT 12
+#define ID_DFR0_MMAPDBG_SHIFT 8
+#define ID_DFR0_COPSDBG_SHIFT 4
+#define ID_DFR0_COPDBG_SHIFT 0
+
+#define ID_PFR2_SSBS_SHIFT 4
+#define ID_PFR2_CSV3_SHIFT 0
+
+#define MVFR0_FPROUND_SHIFT 28
+#define MVFR0_FPSHVEC_SHIFT 24
+#define MVFR0_FPSQRT_SHIFT 20
+#define MVFR0_FPDIVIDE_SHIFT 16
+#define MVFR0_FPTRAP_SHIFT 12
+#define MVFR0_FPDP_SHIFT 8
+#define MVFR0_FPSP_SHIFT 4
+#define MVFR0_SIMD_SHIFT 0
+
+#define MVFR1_SIMDFMAC_SHIFT 28
+#define MVFR1_FPHP_SHIFT 24
+#define MVFR1_SIMDHP_SHIFT 20
+#define MVFR1_SIMDSP_SHIFT 16
+#define MVFR1_SIMDINT_SHIFT 12
+#define MVFR1_SIMDLS_SHIFT 8
+#define MVFR1_FPDNAN_SHIFT 4
+#define MVFR1_FPFTZ_SHIFT 0
+
+#define ID_PFR1_GIC_SHIFT 28
+#define ID_PFR1_VIRT_FRAC_SHIFT 24
+#define ID_PFR1_SEC_FRAC_SHIFT 20
+#define ID_PFR1_GENTIMER_SHIFT 16
+#define ID_PFR1_VIRTUALIZATION_SHIFT 12
+#define ID_PFR1_MPROGMOD_SHIFT 8
+#define ID_PFR1_SECURITY_SHIFT 4
+#define ID_PFR1_PROGMOD_SHIFT 0
+
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX
+#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX
+#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT
+#elif defined(CONFIG_ARM64_64K_PAGES)
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX
+#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT
+#endif
+
+#define MVFR2_FPMISC_SHIFT 4
+#define MVFR2_SIMDMISC_SHIFT 0
+
+#define DCZID_DZP_SHIFT 4
+#define DCZID_BS_SHIFT 0
+
+/*
+ * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which
+ * are reserved by the SVE architecture for future expansion of the LEN
+ * field, with compatible semantics.
+ */
+#define ZCR_ELx_LEN_SHIFT 0
+#define ZCR_ELx_LEN_SIZE 9
+#define ZCR_ELx_LEN_MASK 0x1ff
+
+#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */
+#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
+#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+
+/* TCR EL1 Bit Definitions */
+#define SYS_TCR_EL1_TCMA1 (BIT(58))
+#define SYS_TCR_EL1_TCMA0 (BIT(57))
+
+/* GCR_EL1 Definitions */
+#define SYS_GCR_EL1_RRND (BIT(16))
+#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
+
+/* RGSR_EL1 Definitions */
+#define SYS_RGSR_EL1_TAG_MASK 0xfUL
+#define SYS_RGSR_EL1_SEED_SHIFT 8
+#define SYS_RGSR_EL1_SEED_MASK 0xffffUL
+
+/* GMID_EL1 field definitions */
+#define SYS_GMID_EL1_BS_SHIFT 0
+#define SYS_GMID_EL1_BS_SIZE 4
+
+/* TFSR{,E0}_EL1 bit definitions */
+#define SYS_TFSR_EL1_TF0_SHIFT 0
+#define SYS_TFSR_EL1_TF1_SHIFT 1
+#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
+#define SYS_TFSR_EL1_TF1 (UL(1) << SYS_TFSR_EL1_TF1_SHIFT)
+
+/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
+#define SYS_MPIDR_SAFE_VAL (BIT(31))
+
+#define TRFCR_ELx_TS_SHIFT 5
+#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_EL2_CX BIT(3)
+#define TRFCR_ELx_ExTRE BIT(1)
+#define TRFCR_ELx_E0TRE BIT(0)
+
+
+/* GIC Hypervisor interface registers */
+/* ICH_MISR_EL2 bit definitions */
+#define ICH_MISR_EOI (1 << 0)
+#define ICH_MISR_U (1 << 1)
+
+/* ICH_LR*_EL2 bit definitions */
+#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
+
+#define ICH_LR_EOI (1ULL << 41)
+#define ICH_LR_GROUP (1ULL << 60)
+#define ICH_LR_HW (1ULL << 61)
+#define ICH_LR_STATE (3ULL << 62)
+#define ICH_LR_PENDING_BIT (1ULL << 62)
+#define ICH_LR_ACTIVE_BIT (1ULL << 63)
+#define ICH_LR_PHYS_ID_SHIFT 32
+#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
+#define ICH_LR_PRIORITY_SHIFT 48
+#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
+
+/* ICH_HCR_EL2 bit definitions */
+#define ICH_HCR_EN (1 << 0)
+#define ICH_HCR_UIE (1 << 1)
+#define ICH_HCR_NPIE (1 << 3)
+#define ICH_HCR_TC (1 << 10)
+#define ICH_HCR_TALL0 (1 << 11)
+#define ICH_HCR_TALL1 (1 << 12)
+#define ICH_HCR_EOIcount_SHIFT 27
+#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
+
+/* ICH_VMCR_EL2 bit definitions */
+#define ICH_VMCR_ACK_CTL_SHIFT 2
+#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
+#define ICH_VMCR_FIQ_EN_SHIFT 3
+#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
+#define ICH_VMCR_CBPR_SHIFT 4
+#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
+#define ICH_VMCR_EOIM_SHIFT 9
+#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
+#define ICH_VMCR_BPR1_SHIFT 18
+#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
+#define ICH_VMCR_BPR0_SHIFT 21
+#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
+#define ICH_VMCR_PMR_SHIFT 24
+#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
+#define ICH_VMCR_ENG0_SHIFT 0
+#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
+#define ICH_VMCR_ENG1_SHIFT 1
+#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
+
+/* ICH_VTR_EL2 bit definitions */
+#define ICH_VTR_PRI_BITS_SHIFT 29
+#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
+#define ICH_VTR_ID_BITS_SHIFT 23
+#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
+#define ICH_VTR_SEIS_SHIFT 22
+#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
+#define ICH_VTR_A3V_SHIFT 21
+#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
+
+#define ARM64_FEATURE_FIELD_BITS 4
+
+/* Create a mask for the feature bits of the specified feature. */
+#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT))
+
+#ifdef __ASSEMBLY__
+
+ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+ .equ .L__reg_num_x\num, \num
+ .endr
+ .equ .L__reg_num_xzr, 31
+
+ .macro mrs_s, rt, sreg
+ __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
+ .endm
+
+ .macro msr_s, sreg, rt
+ __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
+ .endm
+
+#else
+
+#include <linux/build_bug.h>
+#include <linux/types.h>
+#include <asm/alternative.h>
+
+#define __DEFINE_MRS_MSR_S_REGNUM \
+" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
+" .equ .L__reg_num_x\\num, \\num\n" \
+" .endr\n" \
+" .equ .L__reg_num_xzr, 31\n"
+
+#define DEFINE_MRS_S \
+ __DEFINE_MRS_MSR_S_REGNUM \
+" .macro mrs_s, rt, sreg\n" \
+ __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \
+" .endm\n"
+
+#define DEFINE_MSR_S \
+ __DEFINE_MRS_MSR_S_REGNUM \
+" .macro msr_s, sreg, rt\n" \
+ __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \
+" .endm\n"
+
+#define UNDEFINE_MRS_S \
+" .purgem mrs_s\n"
+
+#define UNDEFINE_MSR_S \
+" .purgem msr_s\n"
+
+#define __mrs_s(v, r) \
+ DEFINE_MRS_S \
+" mrs_s " v ", " __stringify(r) "\n" \
+ UNDEFINE_MRS_S
+
+#define __msr_s(r, v) \
+ DEFINE_MSR_S \
+" msr_s " __stringify(r) ", " v "\n" \
+ UNDEFINE_MSR_S
+
+/*
+ * Unlike read_cpuid, calls to read_sysreg are never expected to be
+ * optimized away or replaced with synthetic values.
+ */
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+/*
+ * The "Z" constraint normally means a zero immediate, but when combined with
+ * the "%x0" template means XZR.
+ */
+#define write_sysreg(v, r) do { \
+ u64 __val = (u64)(v); \
+ asm volatile("msr " __stringify(r) ", %x0" \
+ : : "rZ" (__val)); \
+} while (0)
+
+/*
+ * For registers without architectural names, or simply unsupported by
+ * GAS.
+ */
+#define read_sysreg_s(r) ({ \
+ u64 __val; \
+ asm volatile(__mrs_s("%0", r) : "=r" (__val)); \
+ __val; \
+})
+
+#define write_sysreg_s(v, r) do { \
+ u64 __val = (u64)(v); \
+ asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
+} while (0)
+
+/*
+ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
+ * set mask are set. Other bits are left as-is.
+ */
+#define sysreg_clear_set(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg(__scs_new, sysreg); \
+} while (0)
+
+#define sysreg_clear_set_s(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg_s(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg_s(__scs_new, sysreg); \
+} while (0)
+
+#define read_sysreg_par() ({ \
+ u64 par; \
+ asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
+ par = read_sysreg(par_el1); \
+ asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
+ par; \
+})
+
+#endif
+
+#endif /* __ASM_SYSREG_H */
diff --git a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h b/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
deleted file mode 100644
index b551b741653d..000000000000
--- a/tools/arch/arm64/include/uapi/asm/bpf_perf_event.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
-#define _UAPI__ASM_BPF_PERF_EVENT_H__
-
-#include <asm/ptrace.h>
-
-typedef struct user_pt_regs bpf_user_pt_regs_t;
-
-#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index ba85bb23f060..f8129c624b07 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -43,6 +43,7 @@
#define __KVM_HAVE_VCPU_EVENTS
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define KVM_REG_SIZE(id) \
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
@@ -75,9 +76,11 @@ struct kvm_regs {
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
-#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
+#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
+ KVM_ARM_DEVICE_TYPE_SHIFT)
#define KVM_ARM_DEVICE_ID_SHIFT 16
-#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
+#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
+ KVM_ARM_DEVICE_ID_SHIFT)
/* Supported device IDs */
#define KVM_ARM_DEVICE_VGIC_V2 0
@@ -106,6 +109,7 @@ struct kvm_regs {
#define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */
#define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */
#define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */
+#define KVM_ARM_VCPU_HAS_EL2 7 /* Support nested virtualization */
struct kvm_vcpu_init {
__u32 target;
@@ -139,8 +143,10 @@ struct kvm_guest_debug_arch {
__u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
};
+#define KVM_DEBUG_ARCH_HSR_HIGH_VALID (1 << 0)
struct kvm_debug_exit_arch {
__u32 hsr;
+ __u32 hsr_high; /* ESR_EL2[61:32] */
__u64 far; /* used for watchpoints */
};
@@ -156,7 +162,19 @@ struct kvm_sync_regs {
__u64 device_irq_level;
};
-struct kvm_arch_memory_slot {
+/*
+ * PMU filter structure. Describe a range of events with a particular
+ * action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER.
+ */
+struct kvm_pmu_event_filter {
+ __u16 base_event;
+ __u16 nevents;
+
+#define KVM_PMU_EVENT_ALLOW 0
+#define KVM_PMU_EVENT_DENY 1
+
+ __u8 action;
+ __u8 pad[3];
};
/* for KVM_GET/SET_VCPU_EVENTS */
@@ -172,6 +190,17 @@ struct kvm_vcpu_events {
__u32 reserved[12];
};
+struct kvm_arm_copy_mte_tags {
+ __u64 guest_ipa;
+ __u64 length;
+ void __user *addr;
+ __u64 flags;
+ __u64 reserved[2];
+};
+
+#define KVM_ARM_TAGS_TO_GUEST 0
+#define KVM_ARM_TAGS_FROM_GUEST 1
+
/* If you need to interpret the index values, here is the key: */
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
#define KVM_REG_ARM_COPROC_SHIFT 16
@@ -242,6 +271,15 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
+
+/*
+ * Only two states can be presented by the host kernel:
+ * - NOT_REQUIRED: the guest doesn't need to do anything
+ * - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
+ *
+ * All the other values are deprecated. The host still accepts all
+ * values (they are ABI), but will narrow them to the above two.
+ */
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
@@ -249,6 +287,11 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3 KVM_REG_ARM_FW_REG(3)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_AVAIL 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_3_NOT_REQUIRED 2
+
/* SVE registers */
#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
@@ -295,6 +338,40 @@ struct kvm_vcpu_events {
#define KVM_ARM64_SVE_VLS_WORDS \
((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1)
+/* Bitmap feature firmware registers */
+#define KVM_REG_ARM_FW_FEAT_BMAP (0x0016 << KVM_REG_ARM_COPROC_SHIFT)
+#define KVM_REG_ARM_FW_FEAT_BMAP_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_FW_FEAT_BMAP | \
+ ((r) & 0xffff))
+
+#define KVM_REG_ARM_STD_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(0)
+
+enum {
+ KVM_REG_ARM_STD_BIT_TRNG_V1_0 = 0,
+#ifdef __KERNEL__
+ KVM_REG_ARM_STD_BMAP_BIT_COUNT,
+#endif
+};
+
+#define KVM_REG_ARM_STD_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(1)
+
+enum {
+ KVM_REG_ARM_STD_HYP_BIT_PV_TIME = 0,
+#ifdef __KERNEL__
+ KVM_REG_ARM_STD_HYP_BMAP_BIT_COUNT,
+#endif
+};
+
+#define KVM_REG_ARM_VENDOR_HYP_BMAP KVM_REG_ARM_FW_FEAT_BMAP_REG(2)
+
+enum {
+ KVM_REG_ARM_VENDOR_HYP_BIT_FUNC_FEAT = 0,
+ KVM_REG_ARM_VENDOR_HYP_BIT_PTP = 1,
+#ifdef __KERNEL__
+ KVM_REG_ARM_VENDOR_HYP_BMAP_BIT_COUNT,
+#endif
+};
+
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
@@ -329,6 +406,8 @@ struct kvm_vcpu_events {
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
#define KVM_ARM_VCPU_PMU_V3_IRQ 0
#define KVM_ARM_VCPU_PMU_V3_INIT 1
+#define KVM_ARM_VCPU_PMU_V3_FILTER 2
+#define KVM_ARM_VCPU_PMU_V3_SET_PMU 3
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
@@ -380,6 +459,16 @@ struct kvm_vcpu_events {
#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
+/* arm64-specific kvm_run::system_event flags */
+/*
+ * Reset caused by a PSCI v1.1 SYSTEM_RESET2 call.
+ * Valid only when the system event has a type of KVM_SYSTEM_EVENT_RESET.
+ */
+#define KVM_SYSTEM_EVENT_RESET_FLAG_PSCI_RESET2 (1ULL << 0)
+
+/* run->fail_entry.hardware_entry_failure_reason codes. */
+#define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED (1ULL << 0)
+
#endif
#endif /* __ARM_KVM_H__ */
diff --git a/tools/arch/arm64/include/uapi/asm/perf_regs.h b/tools/arch/arm64/include/uapi/asm/perf_regs.h
index d54daafa89e3..fd157f46727e 100644
--- a/tools/arch/arm64/include/uapi/asm/perf_regs.h
+++ b/tools/arch/arm64/include/uapi/asm/perf_regs.h
@@ -36,6 +36,11 @@ enum perf_event_arm_regs {
PERF_REG_ARM64_LR,
PERF_REG_ARM64_SP,
PERF_REG_ARM64_PC,
- PERF_REG_ARM64_MAX,
+
+ /* Extended/pseudo registers */
+ PERF_REG_ARM64_VG = 46, // SVE Vector Granule
+
+ PERF_REG_ARM64_MAX = PERF_REG_ARM64_PC + 1,
+ PERF_REG_ARM64_EXTENDED_MAX = PERF_REG_ARM64_VG + 1
};
#endif /* _ASM_ARM64_PERF_REGS_H */
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index f83a70e07df8..ce2ee8f1e361 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -20,5 +20,6 @@
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
#define __ARCH_WANT_SYS_CLONE3
+#define __ARCH_WANT_MEMFD_SECRET
#include <asm-generic/unistd.h>
diff --git a/tools/arch/h8300/include/asm/bitsperlong.h b/tools/arch/h8300/include/asm/bitsperlong.h
deleted file mode 100644
index fa1508337ffc..000000000000
--- a/tools/arch/h8300/include/asm/bitsperlong.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_H8300_BITS_PER_LONG
-#define __ASM_H8300_BITS_PER_LONG
-
-#include <asm-generic/bitsperlong.h>
-
-#if !defined(__ASSEMBLY__)
-/* h8300-unknown-linux required long */
-#define __kernel_size_t __kernel_size_t
-typedef unsigned long __kernel_size_t;
-typedef long __kernel_ssize_t;
-typedef long __kernel_ptrdiff_t;
-#endif
-
-#endif /* __ASM_H8300_BITS_PER_LONG */
diff --git a/tools/arch/h8300/include/uapi/asm/mman.h b/tools/arch/h8300/include/uapi/asm/mman.h
deleted file mode 100644
index be7bbe0528d1..000000000000
--- a/tools/arch/h8300/include/uapi/asm/mman.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef TOOLS_ARCH_H8300_UAPI_ASM_MMAN_FIX_H
-#define TOOLS_ARCH_H8300_UAPI_ASM_MMAN_FIX_H
-#include <uapi/asm-generic/mman.h>
-/* MAP_32BIT is undefined on h8300, fix it for perf */
-#define MAP_32BIT 0
-#endif
diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h
index 4d471d9511a5..6fffe5682713 100644
--- a/tools/arch/ia64/include/asm/barrier.h
+++ b/tools/arch/ia64/include/asm/barrier.h
@@ -39,9 +39,6 @@
* sequential memory pages only.
*/
-/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
-#define ia64_mf() asm volatile ("mf" ::: "memory")
-
#define mb() ia64_mf()
#define rmb() mb()
#define wmb() mb()
diff --git a/tools/arch/loongarch/include/uapi/asm/bitsperlong.h b/tools/arch/loongarch/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000000..00b4ba1e5cdf
--- /dev/null
+++ b/tools/arch/loongarch/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_LOONGARCH_BITSPERLONG_H
+#define __ASM_LOONGARCH_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* __ASM_LOONGARCH_BITSPERLONG_H */
diff --git a/tools/arch/loongarch/include/uapi/asm/perf_regs.h b/tools/arch/loongarch/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..29d69c00fc7a
--- /dev/null
+++ b/tools/arch/loongarch/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_LOONGARCH_PERF_REGS_H
+#define _ASM_LOONGARCH_PERF_REGS_H
+
+enum perf_event_loongarch_regs {
+ PERF_REG_LOONGARCH_PC,
+ PERF_REG_LOONGARCH_R1,
+ PERF_REG_LOONGARCH_R2,
+ PERF_REG_LOONGARCH_R3,
+ PERF_REG_LOONGARCH_R4,
+ PERF_REG_LOONGARCH_R5,
+ PERF_REG_LOONGARCH_R6,
+ PERF_REG_LOONGARCH_R7,
+ PERF_REG_LOONGARCH_R8,
+ PERF_REG_LOONGARCH_R9,
+ PERF_REG_LOONGARCH_R10,
+ PERF_REG_LOONGARCH_R11,
+ PERF_REG_LOONGARCH_R12,
+ PERF_REG_LOONGARCH_R13,
+ PERF_REG_LOONGARCH_R14,
+ PERF_REG_LOONGARCH_R15,
+ PERF_REG_LOONGARCH_R16,
+ PERF_REG_LOONGARCH_R17,
+ PERF_REG_LOONGARCH_R18,
+ PERF_REG_LOONGARCH_R19,
+ PERF_REG_LOONGARCH_R20,
+ PERF_REG_LOONGARCH_R21,
+ PERF_REG_LOONGARCH_R22,
+ PERF_REG_LOONGARCH_R23,
+ PERF_REG_LOONGARCH_R24,
+ PERF_REG_LOONGARCH_R25,
+ PERF_REG_LOONGARCH_R26,
+ PERF_REG_LOONGARCH_R27,
+ PERF_REG_LOONGARCH_R28,
+ PERF_REG_LOONGARCH_R29,
+ PERF_REG_LOONGARCH_R30,
+ PERF_REG_LOONGARCH_R31,
+ PERF_REG_LOONGARCH_MAX,
+};
+#endif /* _ASM_LOONGARCH_PERF_REGS_H */
diff --git a/tools/arch/loongarch/include/uapi/asm/unistd.h b/tools/arch/loongarch/include/uapi/asm/unistd.h
new file mode 100644
index 000000000000..0c743344e92d
--- /dev/null
+++ b/tools/arch/loongarch/include/uapi/asm/unistd.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
+
+#include <asm-generic/unistd.h>
diff --git a/tools/arch/mips/include/uapi/asm/perf_regs.h b/tools/arch/mips/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..d0f4ecd616cf
--- /dev/null
+++ b/tools/arch/mips/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _ASM_MIPS_PERF_REGS_H
+#define _ASM_MIPS_PERF_REGS_H
+
+enum perf_event_mips_regs {
+ PERF_REG_MIPS_PC,
+ PERF_REG_MIPS_R1,
+ PERF_REG_MIPS_R2,
+ PERF_REG_MIPS_R3,
+ PERF_REG_MIPS_R4,
+ PERF_REG_MIPS_R5,
+ PERF_REG_MIPS_R6,
+ PERF_REG_MIPS_R7,
+ PERF_REG_MIPS_R8,
+ PERF_REG_MIPS_R9,
+ PERF_REG_MIPS_R10,
+ PERF_REG_MIPS_R11,
+ PERF_REG_MIPS_R12,
+ PERF_REG_MIPS_R13,
+ PERF_REG_MIPS_R14,
+ PERF_REG_MIPS_R15,
+ PERF_REG_MIPS_R16,
+ PERF_REG_MIPS_R17,
+ PERF_REG_MIPS_R18,
+ PERF_REG_MIPS_R19,
+ PERF_REG_MIPS_R20,
+ PERF_REG_MIPS_R21,
+ PERF_REG_MIPS_R22,
+ PERF_REG_MIPS_R23,
+ PERF_REG_MIPS_R24,
+ PERF_REG_MIPS_R25,
+ PERF_REG_MIPS_R26,
+ PERF_REG_MIPS_R27,
+ PERF_REG_MIPS_R28,
+ PERF_REG_MIPS_R29,
+ PERF_REG_MIPS_R30,
+ PERF_REG_MIPS_R31,
+ PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1,
+};
+#endif /* _ASM_MIPS_PERF_REGS_H */
diff --git a/tools/arch/parisc/include/uapi/asm/mman.h b/tools/arch/parisc/include/uapi/asm/mman.h
index f9fd1325f5bd..4cc88a642e10 100644
--- a/tools/arch/parisc/include/uapi/asm/mman.h
+++ b/tools/arch/parisc/include/uapi/asm/mman.h
@@ -1,20 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H
#define TOOLS_ARCH_PARISC_UAPI_ASM_MMAN_FIX_H
-#define MADV_DODUMP 70
+#define MADV_DODUMP 17
#define MADV_DOFORK 11
-#define MADV_DONTDUMP 69
+#define MADV_DONTDUMP 16
#define MADV_DONTFORK 10
#define MADV_DONTNEED 4
#define MADV_FREE 8
-#define MADV_HUGEPAGE 67
-#define MADV_MERGEABLE 65
-#define MADV_NOHUGEPAGE 68
+#define MADV_HUGEPAGE 14
+#define MADV_MERGEABLE 12
+#define MADV_NOHUGEPAGE 15
#define MADV_NORMAL 0
#define MADV_RANDOM 1
#define MADV_REMOVE 9
#define MADV_SEQUENTIAL 2
-#define MADV_UNMERGEABLE 66
+#define MADV_UNMERGEABLE 13
#define MADV_WILLNEED 3
#define MAP_ANONYMOUS 0x10
#define MAP_DENYWRITE 0x0800
@@ -39,6 +39,5 @@
#define MADV_SOFT_OFFLINE 101
/* MAP_32BIT is undefined on parisc, fix it for perf */
#define MAP_32BIT 0
-/* MAP_UNINITIALIZED is undefined on parisc, fix it for perf */
#define MAP_UNINITIALIZED 0
#endif
diff --git a/tools/arch/powerpc/include/uapi/asm/errno.h b/tools/arch/powerpc/include/uapi/asm/errno.h
index cc79856896a1..4ba87de32be0 100644
--- a/tools/arch/powerpc/include/uapi/asm/errno.h
+++ b/tools/arch/powerpc/include/uapi/asm/errno.h
@@ -2,6 +2,7 @@
#ifndef _ASM_POWERPC_ERRNO_H
#define _ASM_POWERPC_ERRNO_H
+#undef EDEADLOCK
#include <asm-generic/errno.h>
#undef EDEADLOCK
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 264e266a85bf..9f18fa090f1f 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -640,6 +640,13 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0)
+/* POWER10 registers */
+#define KVM_REG_PPC_MMCR3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc1)
+#define KVM_REG_PPC_SIER2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc2)
+#define KVM_REG_PPC_SIER3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc3)
+#define KVM_REG_PPC_DAWR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc4)
+#define KVM_REG_PPC_DAWRX1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc5)
+
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
*/
diff --git a/tools/arch/powerpc/include/uapi/asm/perf_regs.h b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
index f599064dd8dc..749a2e3af89e 100644
--- a/tools/arch/powerpc/include/uapi/asm/perf_regs.h
+++ b/tools/arch/powerpc/include/uapi/asm/perf_regs.h
@@ -48,6 +48,48 @@ enum perf_event_powerpc_regs {
PERF_REG_POWERPC_DSISR,
PERF_REG_POWERPC_SIER,
PERF_REG_POWERPC_MMCRA,
- PERF_REG_POWERPC_MAX,
+ /* Extended registers */
+ PERF_REG_POWERPC_MMCR0,
+ PERF_REG_POWERPC_MMCR1,
+ PERF_REG_POWERPC_MMCR2,
+ PERF_REG_POWERPC_MMCR3,
+ PERF_REG_POWERPC_SIER2,
+ PERF_REG_POWERPC_SIER3,
+ PERF_REG_POWERPC_PMC1,
+ PERF_REG_POWERPC_PMC2,
+ PERF_REG_POWERPC_PMC3,
+ PERF_REG_POWERPC_PMC4,
+ PERF_REG_POWERPC_PMC5,
+ PERF_REG_POWERPC_PMC6,
+ PERF_REG_POWERPC_SDAR,
+ PERF_REG_POWERPC_SIAR,
+ /* Max mask value for interrupt regs w/o extended regs */
+ PERF_REG_POWERPC_MAX = PERF_REG_POWERPC_MMCRA + 1,
+ /* Max mask value for interrupt regs including extended regs */
+ PERF_REG_EXTENDED_MAX = PERF_REG_POWERPC_SIAR + 1,
};
+
+#define PERF_REG_PMU_MASK ((1ULL << PERF_REG_POWERPC_MAX) - 1)
+
+/*
+ * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_300
+ * includes 11 SPRS from MMCR0 to SIAR excluding the
+ * unsupported SPRS MMCR3, SIER2 and SIER3.
+ */
+#define PERF_REG_PMU_MASK_300 \
+ ((1ULL << PERF_REG_POWERPC_MMCR0) | (1ULL << PERF_REG_POWERPC_MMCR1) | \
+ (1ULL << PERF_REG_POWERPC_MMCR2) | (1ULL << PERF_REG_POWERPC_PMC1) | \
+ (1ULL << PERF_REG_POWERPC_PMC2) | (1ULL << PERF_REG_POWERPC_PMC3) | \
+ (1ULL << PERF_REG_POWERPC_PMC4) | (1ULL << PERF_REG_POWERPC_PMC5) | \
+ (1ULL << PERF_REG_POWERPC_PMC6) | (1ULL << PERF_REG_POWERPC_SDAR) | \
+ (1ULL << PERF_REG_POWERPC_SIAR))
+
+/*
+ * PERF_REG_EXTENDED_MASK value for CPU_FTR_ARCH_31
+ * includes 14 SPRs from MMCR0 to SIAR.
+ */
+#define PERF_REG_PMU_MASK_31 \
+ (PERF_REG_PMU_MASK_300 | (1ULL << PERF_REG_POWERPC_MMCR3) | \
+ (1ULL << PERF_REG_POWERPC_SIER2) | (1ULL << PERF_REG_POWERPC_SIER3))
+
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
diff --git a/tools/arch/riscv/include/uapi/asm/unistd.h b/tools/arch/riscv/include/uapi/asm/unistd.h
index 0e2eeeb1fd27..f506cca520b0 100644
--- a/tools/arch/riscv/include/uapi/asm/unistd.h
+++ b/tools/arch/riscv/include/uapi/asm/unistd.h
@@ -12,7 +12,7 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
#ifdef __LP64__
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
deleted file mode 100644
index 0a8e37a519f2..000000000000
--- a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
-#define _UAPI__ASM_BPF_PERF_EVENT_H__
-
-#include "ptrace.h"
-
-typedef user_pt_regs bpf_user_pt_regs_t;
-
-#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h
index 436ec7636927..a73cf01a1606 100644
--- a/tools/arch/s390/include/uapi/asm/kvm.h
+++ b/tools/arch/s390/include/uapi/asm/kvm.h
@@ -74,6 +74,7 @@ struct kvm_s390_io_adapter_req {
#define KVM_S390_VM_CRYPTO 2
#define KVM_S390_VM_CPU_MODEL 3
#define KVM_S390_VM_MIGRATION 4
+#define KVM_S390_VM_CPU_TOPOLOGY 5
/* kvm attributes for mem_ctrl */
#define KVM_S390_VM_MEM_ENABLE_CMMA 0
@@ -231,11 +232,13 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_GSCB (1UL << 9)
#define KVM_SYNC_BPBC (1UL << 10)
#define KVM_SYNC_ETOKEN (1UL << 11)
+#define KVM_SYNC_DIAG318 (1UL << 12)
#define KVM_SYNC_S390_VALID_FIELDS \
(KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \
KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \
- KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN)
+ KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN | \
+ KVM_SYNC_DIAG318)
/* length and alignment of the sdnx as a power of two */
#define SDNXC 8
@@ -264,7 +267,8 @@ struct kvm_sync_regs {
__u8 reserved2 : 7;
__u8 padding1[51]; /* riccb needs to be 64byte aligned */
__u8 riccb[64]; /* runtime instrumentation controls block */
- __u8 padding2[192]; /* sdnx needs to be 256byte aligned */
+ __u64 diag318; /* diagnose 0x318 info */
+ __u8 padding2[184]; /* sdnx needs to be 256byte aligned */
union {
__u8 sdnx[SDNXL]; /* state description annex */
struct {
diff --git a/tools/arch/s390/include/uapi/asm/ptrace.h b/tools/arch/s390/include/uapi/asm/ptrace.h
deleted file mode 100644
index 543dd70e12c8..000000000000
--- a/tools/arch/s390/include/uapi/asm/ptrace.h
+++ /dev/null
@@ -1,457 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * S390 version
- * Copyright IBM Corp. 1999, 2000
- * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
- */
-
-#ifndef _UAPI_S390_PTRACE_H
-#define _UAPI_S390_PTRACE_H
-
-/*
- * Offsets in the user_regs_struct. They are used for the ptrace
- * system call and in entry.S
- */
-#ifndef __s390x__
-
-#define PT_PSWMASK 0x00
-#define PT_PSWADDR 0x04
-#define PT_GPR0 0x08
-#define PT_GPR1 0x0C
-#define PT_GPR2 0x10
-#define PT_GPR3 0x14
-#define PT_GPR4 0x18
-#define PT_GPR5 0x1C
-#define PT_GPR6 0x20
-#define PT_GPR7 0x24
-#define PT_GPR8 0x28
-#define PT_GPR9 0x2C
-#define PT_GPR10 0x30
-#define PT_GPR11 0x34
-#define PT_GPR12 0x38
-#define PT_GPR13 0x3C
-#define PT_GPR14 0x40
-#define PT_GPR15 0x44
-#define PT_ACR0 0x48
-#define PT_ACR1 0x4C
-#define PT_ACR2 0x50
-#define PT_ACR3 0x54
-#define PT_ACR4 0x58
-#define PT_ACR5 0x5C
-#define PT_ACR6 0x60
-#define PT_ACR7 0x64
-#define PT_ACR8 0x68
-#define PT_ACR9 0x6C
-#define PT_ACR10 0x70
-#define PT_ACR11 0x74
-#define PT_ACR12 0x78
-#define PT_ACR13 0x7C
-#define PT_ACR14 0x80
-#define PT_ACR15 0x84
-#define PT_ORIGGPR2 0x88
-#define PT_FPC 0x90
-/*
- * A nasty fact of life that the ptrace api
- * only supports passing of longs.
- */
-#define PT_FPR0_HI 0x98
-#define PT_FPR0_LO 0x9C
-#define PT_FPR1_HI 0xA0
-#define PT_FPR1_LO 0xA4
-#define PT_FPR2_HI 0xA8
-#define PT_FPR2_LO 0xAC
-#define PT_FPR3_HI 0xB0
-#define PT_FPR3_LO 0xB4
-#define PT_FPR4_HI 0xB8
-#define PT_FPR4_LO 0xBC
-#define PT_FPR5_HI 0xC0
-#define PT_FPR5_LO 0xC4
-#define PT_FPR6_HI 0xC8
-#define PT_FPR6_LO 0xCC
-#define PT_FPR7_HI 0xD0
-#define PT_FPR7_LO 0xD4
-#define PT_FPR8_HI 0xD8
-#define PT_FPR8_LO 0XDC
-#define PT_FPR9_HI 0xE0
-#define PT_FPR9_LO 0xE4
-#define PT_FPR10_HI 0xE8
-#define PT_FPR10_LO 0xEC
-#define PT_FPR11_HI 0xF0
-#define PT_FPR11_LO 0xF4
-#define PT_FPR12_HI 0xF8
-#define PT_FPR12_LO 0xFC
-#define PT_FPR13_HI 0x100
-#define PT_FPR13_LO 0x104
-#define PT_FPR14_HI 0x108
-#define PT_FPR14_LO 0x10C
-#define PT_FPR15_HI 0x110
-#define PT_FPR15_LO 0x114
-#define PT_CR_9 0x118
-#define PT_CR_10 0x11C
-#define PT_CR_11 0x120
-#define PT_IEEE_IP 0x13C
-#define PT_LASTOFF PT_IEEE_IP
-#define PT_ENDREGS 0x140-1
-
-#define GPR_SIZE 4
-#define CR_SIZE 4
-
-#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
-
-#else /* __s390x__ */
-
-#define PT_PSWMASK 0x00
-#define PT_PSWADDR 0x08
-#define PT_GPR0 0x10
-#define PT_GPR1 0x18
-#define PT_GPR2 0x20
-#define PT_GPR3 0x28
-#define PT_GPR4 0x30
-#define PT_GPR5 0x38
-#define PT_GPR6 0x40
-#define PT_GPR7 0x48
-#define PT_GPR8 0x50
-#define PT_GPR9 0x58
-#define PT_GPR10 0x60
-#define PT_GPR11 0x68
-#define PT_GPR12 0x70
-#define PT_GPR13 0x78
-#define PT_GPR14 0x80
-#define PT_GPR15 0x88
-#define PT_ACR0 0x90
-#define PT_ACR1 0x94
-#define PT_ACR2 0x98
-#define PT_ACR3 0x9C
-#define PT_ACR4 0xA0
-#define PT_ACR5 0xA4
-#define PT_ACR6 0xA8
-#define PT_ACR7 0xAC
-#define PT_ACR8 0xB0
-#define PT_ACR9 0xB4
-#define PT_ACR10 0xB8
-#define PT_ACR11 0xBC
-#define PT_ACR12 0xC0
-#define PT_ACR13 0xC4
-#define PT_ACR14 0xC8
-#define PT_ACR15 0xCC
-#define PT_ORIGGPR2 0xD0
-#define PT_FPC 0xD8
-#define PT_FPR0 0xE0
-#define PT_FPR1 0xE8
-#define PT_FPR2 0xF0
-#define PT_FPR3 0xF8
-#define PT_FPR4 0x100
-#define PT_FPR5 0x108
-#define PT_FPR6 0x110
-#define PT_FPR7 0x118
-#define PT_FPR8 0x120
-#define PT_FPR9 0x128
-#define PT_FPR10 0x130
-#define PT_FPR11 0x138
-#define PT_FPR12 0x140
-#define PT_FPR13 0x148
-#define PT_FPR14 0x150
-#define PT_FPR15 0x158
-#define PT_CR_9 0x160
-#define PT_CR_10 0x168
-#define PT_CR_11 0x170
-#define PT_IEEE_IP 0x1A8
-#define PT_LASTOFF PT_IEEE_IP
-#define PT_ENDREGS 0x1B0-1
-
-#define GPR_SIZE 8
-#define CR_SIZE 8
-
-#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
-
-#endif /* __s390x__ */
-
-#define NUM_GPRS 16
-#define NUM_FPRS 16
-#define NUM_CRS 16
-#define NUM_ACRS 16
-
-#define NUM_CR_WORDS 3
-
-#define FPR_SIZE 8
-#define FPC_SIZE 4
-#define FPC_PAD_SIZE 4 /* gcc insists on aligning the fpregs */
-#define ACR_SIZE 4
-
-
-#define PTRACE_OLDSETOPTIONS 21
-
-#ifndef __ASSEMBLY__
-#include <linux/stddef.h>
-#include <linux/types.h>
-
-typedef union {
- float f;
- double d;
- __u64 ui;
- struct
- {
- __u32 hi;
- __u32 lo;
- } fp;
-} freg_t;
-
-typedef struct {
- __u32 fpc;
- __u32 pad;
- freg_t fprs[NUM_FPRS];
-} s390_fp_regs;
-
-#define FPC_EXCEPTION_MASK 0xF8000000
-#define FPC_FLAGS_MASK 0x00F80000
-#define FPC_DXC_MASK 0x0000FF00
-#define FPC_RM_MASK 0x00000003
-
-/* this typedef defines how a Program Status Word looks like */
-typedef struct {
- unsigned long mask;
- unsigned long addr;
-} __attribute__ ((aligned(8))) psw_t;
-
-#ifndef __s390x__
-
-#define PSW_MASK_PER 0x40000000UL
-#define PSW_MASK_DAT 0x04000000UL
-#define PSW_MASK_IO 0x02000000UL
-#define PSW_MASK_EXT 0x01000000UL
-#define PSW_MASK_KEY 0x00F00000UL
-#define PSW_MASK_BASE 0x00080000UL /* always one */
-#define PSW_MASK_MCHECK 0x00040000UL
-#define PSW_MASK_WAIT 0x00020000UL
-#define PSW_MASK_PSTATE 0x00010000UL
-#define PSW_MASK_ASC 0x0000C000UL
-#define PSW_MASK_CC 0x00003000UL
-#define PSW_MASK_PM 0x00000F00UL
-#define PSW_MASK_RI 0x00000000UL
-#define PSW_MASK_EA 0x00000000UL
-#define PSW_MASK_BA 0x00000000UL
-
-#define PSW_MASK_USER 0x0000FF00UL
-
-#define PSW_ADDR_AMODE 0x80000000UL
-#define PSW_ADDR_INSN 0x7FFFFFFFUL
-
-#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20)
-
-#define PSW_ASC_PRIMARY 0x00000000UL
-#define PSW_ASC_ACCREG 0x00004000UL
-#define PSW_ASC_SECONDARY 0x00008000UL
-#define PSW_ASC_HOME 0x0000C000UL
-
-#else /* __s390x__ */
-
-#define PSW_MASK_PER 0x4000000000000000UL
-#define PSW_MASK_DAT 0x0400000000000000UL
-#define PSW_MASK_IO 0x0200000000000000UL
-#define PSW_MASK_EXT 0x0100000000000000UL
-#define PSW_MASK_BASE 0x0000000000000000UL
-#define PSW_MASK_KEY 0x00F0000000000000UL
-#define PSW_MASK_MCHECK 0x0004000000000000UL
-#define PSW_MASK_WAIT 0x0002000000000000UL
-#define PSW_MASK_PSTATE 0x0001000000000000UL
-#define PSW_MASK_ASC 0x0000C00000000000UL
-#define PSW_MASK_CC 0x0000300000000000UL
-#define PSW_MASK_PM 0x00000F0000000000UL
-#define PSW_MASK_RI 0x0000008000000000UL
-#define PSW_MASK_EA 0x0000000100000000UL
-#define PSW_MASK_BA 0x0000000080000000UL
-
-#define PSW_MASK_USER 0x0000FF0180000000UL
-
-#define PSW_ADDR_AMODE 0x0000000000000000UL
-#define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL
-
-#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52)
-
-#define PSW_ASC_PRIMARY 0x0000000000000000UL
-#define PSW_ASC_ACCREG 0x0000400000000000UL
-#define PSW_ASC_SECONDARY 0x0000800000000000UL
-#define PSW_ASC_HOME 0x0000C00000000000UL
-
-#endif /* __s390x__ */
-
-
-/*
- * The s390_regs structure is used to define the elf_gregset_t.
- */
-typedef struct {
- psw_t psw;
- unsigned long gprs[NUM_GPRS];
- unsigned int acrs[NUM_ACRS];
- unsigned long orig_gpr2;
-} s390_regs;
-
-/*
- * The user_pt_regs structure exports the beginning of
- * the in-kernel pt_regs structure to user space.
- */
-typedef struct {
- unsigned long args[1];
- psw_t psw;
- unsigned long gprs[NUM_GPRS];
-} user_pt_regs;
-
-/*
- * Now for the user space program event recording (trace) definitions.
- * The following structures are used only for the ptrace interface, don't
- * touch or even look at it if you don't want to modify the user-space
- * ptrace interface. In particular stay away from it for in-kernel PER.
- */
-typedef struct {
- unsigned long cr[NUM_CR_WORDS];
-} per_cr_words;
-
-#define PER_EM_MASK 0xE8000000UL
-
-typedef struct {
-#ifdef __s390x__
- unsigned : 32;
-#endif /* __s390x__ */
- unsigned em_branching : 1;
- unsigned em_instruction_fetch : 1;
- /*
- * Switching on storage alteration automatically fixes
- * the storage alteration event bit in the users std.
- */
- unsigned em_storage_alteration : 1;
- unsigned em_gpr_alt_unused : 1;
- unsigned em_store_real_address : 1;
- unsigned : 3;
- unsigned branch_addr_ctl : 1;
- unsigned : 1;
- unsigned storage_alt_space_ctl : 1;
- unsigned : 21;
- unsigned long starting_addr;
- unsigned long ending_addr;
-} per_cr_bits;
-
-typedef struct {
- unsigned short perc_atmid;
- unsigned long address;
- unsigned char access_id;
-} per_lowcore_words;
-
-typedef struct {
- unsigned perc_branching : 1;
- unsigned perc_instruction_fetch : 1;
- unsigned perc_storage_alteration : 1;
- unsigned perc_gpr_alt_unused : 1;
- unsigned perc_store_real_address : 1;
- unsigned : 3;
- unsigned atmid_psw_bit_31 : 1;
- unsigned atmid_validity_bit : 1;
- unsigned atmid_psw_bit_32 : 1;
- unsigned atmid_psw_bit_5 : 1;
- unsigned atmid_psw_bit_16 : 1;
- unsigned atmid_psw_bit_17 : 1;
- unsigned si : 2;
- unsigned long address;
- unsigned : 4;
- unsigned access_id : 4;
-} per_lowcore_bits;
-
-typedef struct {
- union {
- per_cr_words words;
- per_cr_bits bits;
- } control_regs;
- /*
- * The single_step and instruction_fetch bits are obsolete,
- * the kernel always sets them to zero. To enable single
- * stepping use ptrace(PTRACE_SINGLESTEP) instead.
- */
- unsigned single_step : 1;
- unsigned instruction_fetch : 1;
- unsigned : 30;
- /*
- * These addresses are copied into cr10 & cr11 if single
- * stepping is switched off
- */
- unsigned long starting_addr;
- unsigned long ending_addr;
- union {
- per_lowcore_words words;
- per_lowcore_bits bits;
- } lowcore;
-} per_struct;
-
-typedef struct {
- unsigned int len;
- unsigned long kernel_addr;
- unsigned long process_addr;
-} ptrace_area;
-
-/*
- * S/390 specific non posix ptrace requests. I chose unusual values so
- * they are unlikely to clash with future ptrace definitions.
- */
-#define PTRACE_PEEKUSR_AREA 0x5000
-#define PTRACE_POKEUSR_AREA 0x5001
-#define PTRACE_PEEKTEXT_AREA 0x5002
-#define PTRACE_PEEKDATA_AREA 0x5003
-#define PTRACE_POKETEXT_AREA 0x5004
-#define PTRACE_POKEDATA_AREA 0x5005
-#define PTRACE_GET_LAST_BREAK 0x5006
-#define PTRACE_PEEK_SYSTEM_CALL 0x5007
-#define PTRACE_POKE_SYSTEM_CALL 0x5008
-#define PTRACE_ENABLE_TE 0x5009
-#define PTRACE_DISABLE_TE 0x5010
-#define PTRACE_TE_ABORT_RAND 0x5011
-
-/*
- * The numbers chosen here are somewhat arbitrary but absolutely MUST
- * not overlap with any of the number assigned in <linux/ptrace.h>.
- */
-#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
-
-/*
- * PT_PROT definition is loosely based on hppa bsd definition in
- * gdb/hppab-nat.c
- */
-#define PTRACE_PROT 21
-
-typedef enum {
- ptprot_set_access_watchpoint,
- ptprot_set_write_watchpoint,
- ptprot_disable_watchpoint
-} ptprot_flags;
-
-typedef struct {
- unsigned long lowaddr;
- unsigned long hiaddr;
- ptprot_flags prot;
-} ptprot_area;
-
-/* Sequence of bytes for breakpoint illegal instruction. */
-#define S390_BREAKPOINT {0x0,0x1}
-#define S390_BREAKPOINT_U16 ((__u16)0x0001)
-#define S390_SYSCALL_OPCODE ((__u16)0x0a00)
-#define S390_SYSCALL_SIZE 2
-
-/*
- * The user_regs_struct defines the way the user registers are
- * store on the stack for signal handling.
- */
-struct user_regs_struct {
- psw_t psw;
- unsigned long gprs[NUM_GPRS];
- unsigned int acrs[NUM_ACRS];
- unsigned long orig_gpr2;
- s390_fp_regs fp_regs;
- /*
- * These per registers are in here so that gdb can modify them
- * itself as there is no "official" ptrace interface for hardware
- * watchpoints. This is the way intel does it.
- */
- per_struct per_info;
- unsigned long ieee_instruction_pointer; /* obsolete, always 0 */
-};
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _UAPI_S390_PTRACE_H */
diff --git a/tools/arch/s390/include/uapi/asm/sie.h b/tools/arch/s390/include/uapi/asm/sie.h
index 6ca1e68d7103..ede318653c87 100644
--- a/tools/arch/s390/include/uapi/asm/sie.h
+++ b/tools/arch/s390/include/uapi/asm/sie.h
@@ -29,7 +29,7 @@
{ 0x13, "SIGP conditional emergency signal" }, \
{ 0x15, "SIGP sense running" }, \
{ 0x16, "SIGP set multithreading"}, \
- { 0x17, "SIGP store additional status ait address"}
+ { 0x17, "SIGP store additional status at address"}
#define icpt_prog_codes \
{ 0x0001, "Prog Operation" }, \
diff --git a/tools/arch/sh/include/asm/barrier.h b/tools/arch/sh/include/asm/barrier.h
index bde5221af282..7eaea27cdd67 100644
--- a/tools/arch/sh/include/asm/barrier.h
+++ b/tools/arch/sh/include/asm/barrier.h
@@ -22,7 +22,7 @@
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
-#if defined(__SH4A__) || defined(__SH5__)
+#if defined(__SH4A__)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()
diff --git a/tools/arch/x86/include/asm/amd-ibs.h b/tools/arch/x86/include/asm/amd-ibs.h
new file mode 100644
index 000000000000..93807b437e4d
--- /dev/null
+++ b/tools/arch/x86/include/asm/amd-ibs.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * From PPR Vol 1 for AMD Family 19h Model 01h B1
+ * 55898 Rev 0.35 - Feb 5, 2021
+ */
+
+#include "msr-index.h"
+
+/* IBS_OP_DATA2 DataSrc */
+#define IBS_DATA_SRC_LOC_CACHE 2
+#define IBS_DATA_SRC_DRAM 3
+#define IBS_DATA_SRC_REM_CACHE 4
+#define IBS_DATA_SRC_IO 7
+
+/* IBS_OP_DATA2 DataSrc Extension */
+#define IBS_DATA_SRC_EXT_LOC_CACHE 1
+#define IBS_DATA_SRC_EXT_NEAR_CCX_CACHE 2
+#define IBS_DATA_SRC_EXT_DRAM 3
+#define IBS_DATA_SRC_EXT_FAR_CCX_CACHE 5
+#define IBS_DATA_SRC_EXT_PMEM 6
+#define IBS_DATA_SRC_EXT_IO 7
+#define IBS_DATA_SRC_EXT_EXT_MEM 8
+#define IBS_DATA_SRC_EXT_PEER_AGENT_MEM 12
+
+/*
+ * IBS Hardware MSRs
+ */
+
+/* MSR 0xc0011030: IBS Fetch Control */
+union ibs_fetch_ctl {
+ __u64 val;
+ struct {
+ __u64 fetch_maxcnt:16,/* 0-15: instruction fetch max. count */
+ fetch_cnt:16, /* 16-31: instruction fetch count */
+ fetch_lat:16, /* 32-47: instruction fetch latency */
+ fetch_en:1, /* 48: instruction fetch enable */
+ fetch_val:1, /* 49: instruction fetch valid */
+ fetch_comp:1, /* 50: instruction fetch complete */
+ ic_miss:1, /* 51: i-cache miss */
+ phy_addr_valid:1,/* 52: physical address valid */
+ l1tlb_pgsz:2, /* 53-54: i-cache L1TLB page size
+ * (needs IbsPhyAddrValid) */
+ l1tlb_miss:1, /* 55: i-cache fetch missed in L1TLB */
+ l2tlb_miss:1, /* 56: i-cache fetch missed in L2TLB */
+ rand_en:1, /* 57: random tagging enable */
+ fetch_l2_miss:1,/* 58: L2 miss for sampled fetch
+ * (needs IbsFetchComp) */
+ l3_miss_only:1, /* 59: Collect L3 miss samples only */
+ fetch_oc_miss:1,/* 60: Op cache miss for the sampled fetch */
+ fetch_l3_miss:1,/* 61: L3 cache miss for the sampled fetch */
+ reserved:2; /* 62-63: reserved */
+ };
+};
+
+/* MSR 0xc0011033: IBS Execution Control */
+union ibs_op_ctl {
+ __u64 val;
+ struct {
+ __u64 opmaxcnt:16, /* 0-15: periodic op max. count */
+ l3_miss_only:1, /* 16: Collect L3 miss samples only */
+ op_en:1, /* 17: op sampling enable */
+ op_val:1, /* 18: op sample valid */
+ cnt_ctl:1, /* 19: periodic op counter control */
+ opmaxcnt_ext:7, /* 20-26: upper 7 bits of periodic op maximum count */
+ reserved0:5, /* 27-31: reserved */
+ opcurcnt:27, /* 32-58: periodic op counter current count */
+ reserved1:5; /* 59-63: reserved */
+ };
+};
+
+/* MSR 0xc0011035: IBS Op Data 1 */
+union ibs_op_data {
+ __u64 val;
+ struct {
+ __u64 comp_to_ret_ctr:16, /* 0-15: op completion to retire count */
+ tag_to_ret_ctr:16, /* 15-31: op tag to retire count */
+ reserved1:2, /* 32-33: reserved */
+ op_return:1, /* 34: return op */
+ op_brn_taken:1, /* 35: taken branch op */
+ op_brn_misp:1, /* 36: mispredicted branch op */
+ op_brn_ret:1, /* 37: branch op retired */
+ op_rip_invalid:1, /* 38: RIP is invalid */
+ op_brn_fuse:1, /* 39: fused branch op */
+ op_microcode:1, /* 40: microcode op */
+ reserved2:23; /* 41-63: reserved */
+ };
+};
+
+/* MSR 0xc0011036: IBS Op Data 2 */
+union ibs_op_data2 {
+ __u64 val;
+ struct {
+ __u64 data_src_lo:3, /* 0-2: data source low */
+ reserved0:1, /* 3: reserved */
+ rmt_node:1, /* 4: destination node */
+ cache_hit_st:1, /* 5: cache hit state */
+ data_src_hi:2, /* 6-7: data source high */
+ reserved1:56; /* 8-63: reserved */
+ };
+};
+
+/* MSR 0xc0011037: IBS Op Data 3 */
+union ibs_op_data3 {
+ __u64 val;
+ struct {
+ __u64 ld_op:1, /* 0: load op */
+ st_op:1, /* 1: store op */
+ dc_l1tlb_miss:1, /* 2: data cache L1TLB miss */
+ dc_l2tlb_miss:1, /* 3: data cache L2TLB hit in 2M page */
+ dc_l1tlb_hit_2m:1, /* 4: data cache L1TLB hit in 2M page */
+ dc_l1tlb_hit_1g:1, /* 5: data cache L1TLB hit in 1G page */
+ dc_l2tlb_hit_2m:1, /* 6: data cache L2TLB hit in 2M page */
+ dc_miss:1, /* 7: data cache miss */
+ dc_mis_acc:1, /* 8: misaligned access */
+ reserved:4, /* 9-12: reserved */
+ dc_wc_mem_acc:1, /* 13: write combining memory access */
+ dc_uc_mem_acc:1, /* 14: uncacheable memory access */
+ dc_locked_op:1, /* 15: locked operation */
+ dc_miss_no_mab_alloc:1, /* 16: DC miss with no MAB allocated */
+ dc_lin_addr_valid:1, /* 17: data cache linear address valid */
+ dc_phy_addr_valid:1, /* 18: data cache physical address valid */
+ dc_l2_tlb_hit_1g:1, /* 19: data cache L2 hit in 1GB page */
+ l2_miss:1, /* 20: L2 cache miss */
+ sw_pf:1, /* 21: software prefetch */
+ op_mem_width:4, /* 22-25: load/store size in bytes */
+ op_dc_miss_open_mem_reqs:6, /* 26-31: outstanding mem reqs on DC fill */
+ dc_miss_lat:16, /* 32-47: data cache miss latency */
+ tlb_refill_lat:16; /* 48-63: L1 TLB refill latency */
+ };
+};
+
+/* MSR 0xc001103c: IBS Fetch Control Extended */
+union ic_ibs_extd_ctl {
+ __u64 val;
+ struct {
+ __u64 itlb_refill_lat:16, /* 0-15: ITLB Refill latency for sampled fetch */
+ reserved:48; /* 16-63: reserved */
+ };
+};
+
+/*
+ * IBS driver related
+ */
+
+struct perf_ibs_data {
+ u32 size;
+ union {
+ u32 data[0]; /* data buffer starts here */
+ u32 caps;
+ };
+ u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
+};
diff --git a/tools/arch/x86/include/asm/asm.h b/tools/arch/x86/include/asm/asm.h
new file mode 100644
index 000000000000..3ad3da9a7d97
--- /dev/null
+++ b/tools/arch/x86/include/asm/asm.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_ASM_H
+#define _ASM_X86_ASM_H
+
+#ifdef __ASSEMBLY__
+# define __ASM_FORM(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_RAW(x, ...) x,## __VA_ARGS__
+# define __ASM_FORM_COMMA(x, ...) x,## __VA_ARGS__,
+#else
+#include <linux/stringify.h>
+# define __ASM_FORM(x, ...) " " __stringify(x,##__VA_ARGS__) " "
+# define __ASM_FORM_RAW(x, ...) __stringify(x,##__VA_ARGS__)
+# define __ASM_FORM_COMMA(x, ...) " " __stringify(x,##__VA_ARGS__) ","
+#endif
+
+#define _ASM_BYTES(x, ...) __ASM_FORM(.byte x,##__VA_ARGS__ ;)
+
+#ifndef __x86_64__
+/* 32 bit */
+# define __ASM_SEL(a,b) __ASM_FORM(a)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
+#else
+/* 64 bit */
+# define __ASM_SEL(a,b) __ASM_FORM(b)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
+#endif
+
+#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
+ inst##q##__VA_ARGS__)
+#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
+
+#define _ASM_PTR __ASM_SEL(.long, .quad)
+#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
+
+#define _ASM_MOV __ASM_SIZE(mov)
+#define _ASM_INC __ASM_SIZE(inc)
+#define _ASM_DEC __ASM_SIZE(dec)
+#define _ASM_ADD __ASM_SIZE(add)
+#define _ASM_SUB __ASM_SIZE(sub)
+#define _ASM_XADD __ASM_SIZE(xadd)
+#define _ASM_MUL __ASM_SIZE(mul)
+
+#define _ASM_AX __ASM_REG(ax)
+#define _ASM_BX __ASM_REG(bx)
+#define _ASM_CX __ASM_REG(cx)
+#define _ASM_DX __ASM_REG(dx)
+#define _ASM_SP __ASM_REG(sp)
+#define _ASM_BP __ASM_REG(bp)
+#define _ASM_SI __ASM_REG(si)
+#define _ASM_DI __ASM_REG(di)
+
+#ifndef __x86_64__
+/* 32 bit */
+
+#define _ASM_ARG1 _ASM_AX
+#define _ASM_ARG2 _ASM_DX
+#define _ASM_ARG3 _ASM_CX
+
+#define _ASM_ARG1L eax
+#define _ASM_ARG2L edx
+#define _ASM_ARG3L ecx
+
+#define _ASM_ARG1W ax
+#define _ASM_ARG2W dx
+#define _ASM_ARG3W cx
+
+#define _ASM_ARG1B al
+#define _ASM_ARG2B dl
+#define _ASM_ARG3B cl
+
+#else
+/* 64 bit */
+
+#define _ASM_ARG1 _ASM_DI
+#define _ASM_ARG2 _ASM_SI
+#define _ASM_ARG3 _ASM_DX
+#define _ASM_ARG4 _ASM_CX
+#define _ASM_ARG5 r8
+#define _ASM_ARG6 r9
+
+#define _ASM_ARG1Q rdi
+#define _ASM_ARG2Q rsi
+#define _ASM_ARG3Q rdx
+#define _ASM_ARG4Q rcx
+#define _ASM_ARG5Q r8
+#define _ASM_ARG6Q r9
+
+#define _ASM_ARG1L edi
+#define _ASM_ARG2L esi
+#define _ASM_ARG3L edx
+#define _ASM_ARG4L ecx
+#define _ASM_ARG5L r8d
+#define _ASM_ARG6L r9d
+
+#define _ASM_ARG1W di
+#define _ASM_ARG2W si
+#define _ASM_ARG3W dx
+#define _ASM_ARG4W cx
+#define _ASM_ARG5W r8w
+#define _ASM_ARG6W r9w
+
+#define _ASM_ARG1B dil
+#define _ASM_ARG2B sil
+#define _ASM_ARG3B dl
+#define _ASM_ARG4B cl
+#define _ASM_ARG5B r8b
+#define _ASM_ARG6B r9b
+
+#endif
+
+/*
+ * Macros to generate condition code outputs from inline assembly,
+ * The output operand must be type "bool".
+ */
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+# define CC_SET(c) "\n\t/* output condition code " #c "*/\n"
+# define CC_OUT(c) "=@cc" #c
+#else
+# define CC_SET(c) "\n\tset" #c " %[_cc_" #c "]\n"
+# define CC_OUT(c) [_cc_ ## c] "=qm"
+#endif
+
+#ifdef __KERNEL__
+
+/* Exception table entry */
+#ifdef __ASSEMBLY__
+# define _ASM_EXTABLE_HANDLE(from, to, handler) \
+ .pushsection "__ex_table","a" ; \
+ .balign 4 ; \
+ .long (from) - . ; \
+ .long (to) - . ; \
+ .long (handler) - . ; \
+ .popsection
+
+# define _ASM_EXTABLE(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
+
+# define _ASM_EXTABLE_UA(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+
+# define _ASM_EXTABLE_CPY(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy)
+
+# define _ASM_EXTABLE_FAULT(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
+
+# ifdef CONFIG_KPROBES
+# define _ASM_NOKPROBE(entry) \
+ .pushsection "_kprobe_blacklist","aw" ; \
+ _ASM_ALIGN ; \
+ _ASM_PTR (entry); \
+ .popsection
+# else
+# define _ASM_NOKPROBE(entry)
+# endif
+
+#else /* ! __ASSEMBLY__ */
+# define _EXPAND_EXTABLE_HANDLE(x) #x
+# define _ASM_EXTABLE_HANDLE(from, to, handler) \
+ " .pushsection \"__ex_table\",\"a\"\n" \
+ " .balign 4\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - .\n" \
+ " .long (" _EXPAND_EXTABLE_HANDLE(handler) ") - .\n" \
+ " .popsection\n"
+
+# define _ASM_EXTABLE(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
+
+# define _ASM_EXTABLE_UA(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_uaccess)
+
+# define _ASM_EXTABLE_CPY(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_copy)
+
+# define _ASM_EXTABLE_FAULT(from, to) \
+ _ASM_EXTABLE_HANDLE(from, to, ex_handler_fault)
+
+/* For C file, we already have NOKPROBE_SYMBOL macro */
+
+/*
+ * This output constraint should be used for any inline asm which has a "call"
+ * instruction. Otherwise the asm may be inserted before the frame pointer
+ * gets set up by the containing function. If you forget to do this, objtool
+ * may print a "call without frame pointer save/setup" warning.
+ */
+register unsigned long current_stack_pointer asm(_ASM_SP);
+#define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer)
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_X86_ASM_H */
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h
index 1f5e26aae9fc..365cf182df12 100644
--- a/tools/arch/x86/include/asm/atomic.h
+++ b/tools/arch/x86/include/asm/atomic.h
@@ -8,6 +8,7 @@
#define LOCK_PREFIX "\n\tlock; "
+#include <asm/asm.h>
#include <asm/cmpxchg.h>
/*
@@ -70,4 +71,14 @@ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return cmpxchg(&v->counter, old, new);
}
+static inline int test_and_set_bit(long nr, unsigned long *addr)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), *addr, "Ir", nr, "%0", "c");
+}
+
+static inline int test_and_clear_bit(long nr, unsigned long *addr)
+{
+ GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), *addr, "Ir", nr, "%0", "c");
+}
+
#endif /* _TOOLS_LINUX_ASM_X86_ATOMIC_H */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index f3327cb56edf..b89005819cd5 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -13,7 +13,7 @@
/*
* Defines x86 CPU feature bits
*/
-#define NCAPINTS 19 /* N 32-bit words worth of info */
+#define NCAPINTS 21 /* N 32-bit words worth of info */
#define NBUGINTS 1 /* N 32-bit bug flags */
/*
@@ -84,7 +84,7 @@
/* CPU types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
+/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
@@ -96,6 +96,7 @@
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
+#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
@@ -107,6 +108,7 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
+#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
@@ -199,17 +201,17 @@
#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 7) /* Effectively INVPCID && CR4.PCIDE=1 */
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
-#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
+#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
-#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
+#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* AMD Performance Monitoring Version 2 */
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
@@ -217,7 +219,7 @@
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
-#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_ZEN (7*32+28) /* "" CPU based on Zen microarchitecture */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
@@ -234,10 +236,14 @@
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
+#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */
+#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */
+#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* Intel Trust Domain Extensions Guest */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
+#define X86_FEATURE_SGX ( 9*32+ 2) /* Software Guard Extensions */
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
@@ -272,6 +278,7 @@
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
+#define X86_FEATURE_XFD (10*32+ 4) /* "" eXtended Feature Disabling */
/*
* Extended auxiliary flags: Linux defined - for features scattered in various
@@ -285,9 +292,29 @@
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
+#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
+#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
+#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
+#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
+#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
+#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
+#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
+#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
+#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
+#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
+#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
+#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */
+#define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */
+#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
+#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */
+#define X86_FEATURE_LKGS (12*32+18) /* "" Load "kernel" (userspace) GS */
+#define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */
+#define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
@@ -299,9 +326,13 @@
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
+#define X86_FEATURE_AMD_PPIN (13*32+23) /* Protected Processor Inventory Number */
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
+#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
+#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
+#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
@@ -314,6 +345,7 @@
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+#define X86_FEATURE_HFI (14*32+19) /* Hardware Feedback Interface */
/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
@@ -329,6 +361,9 @@
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
+#define X86_FEATURE_X2AVIC (15*32+18) /* Virtual x2apic */
+#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */
+#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
@@ -346,9 +381,12 @@
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
+#define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* Bus Lock detect */
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
+#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */
+#define X86_FEATURE_SGX_LC (16*32+30) /* Software Guard Extensions Launch Control */
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
@@ -360,15 +398,35 @@
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
#define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
+#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
+#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
+#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
+#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
+#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
+#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
+#define X86_FEATURE_IBT (18*32+20) /* Indirect Branch Tracking */
+#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */
+#define X86_FEATURE_AVX512_FP16 (18*32+23) /* AVX512 FP16 */
+#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */
+#define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* "" IA32_CORE_CAPABILITIES MSR */
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
+#define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */
+#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
+#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
+#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
+#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
+#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
+
/*
* BUG word(s)
*/
@@ -404,5 +462,10 @@
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
+#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
+#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
+#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
+#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
+#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 4ea8584682f9..5dfa4fb76f4b 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -10,12 +10,6 @@
* cpu_feature_enabled().
*/
-#ifdef CONFIG_X86_SMAP
-# define DISABLE_SMAP 0
-#else
-# define DISABLE_SMAP (1<<(X86_FEATURE_SMAP & 31))
-#endif
-
#ifdef CONFIG_X86_UMIP
# define DISABLE_UMIP 0
#else
@@ -56,6 +50,55 @@
# define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31))
#endif
+#ifdef CONFIG_RETPOLINE
+# define DISABLE_RETPOLINE 0
+#else
+# define DISABLE_RETPOLINE ((1 << (X86_FEATURE_RETPOLINE & 31)) | \
+ (1 << (X86_FEATURE_RETPOLINE_LFENCE & 31)))
+#endif
+
+#ifdef CONFIG_RETHUNK
+# define DISABLE_RETHUNK 0
+#else
+# define DISABLE_RETHUNK (1 << (X86_FEATURE_RETHUNK & 31))
+#endif
+
+#ifdef CONFIG_CPU_UNRET_ENTRY
+# define DISABLE_UNRET 0
+#else
+# define DISABLE_UNRET (1 << (X86_FEATURE_UNRET & 31))
+#endif
+
+#ifdef CONFIG_CALL_DEPTH_TRACKING
+# define DISABLE_CALL_DEPTH_TRACKING 0
+#else
+# define DISABLE_CALL_DEPTH_TRACKING (1 << (X86_FEATURE_CALL_DEPTH & 31))
+#endif
+
+#ifdef CONFIG_INTEL_IOMMU_SVM
+# define DISABLE_ENQCMD 0
+#else
+# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31))
+#endif
+
+#ifdef CONFIG_X86_SGX
+# define DISABLE_SGX 0
+#else
+# define DISABLE_SGX (1 << (X86_FEATURE_SGX & 31))
+#endif
+
+#ifdef CONFIG_XEN_PV
+# define DISABLE_XENPV 0
+#else
+# define DISABLE_XENPV (1 << (X86_FEATURE_XENPV & 31))
+#endif
+
+#ifdef CONFIG_INTEL_TDX_GUEST
+# define DISABLE_TDX_GUEST 0
+#else
+# define DISABLE_TDX_GUEST (1 << (X86_FEATURE_TDX_GUEST & 31))
+#endif
+
/*
* Make sure to add features to the correct mask
*/
@@ -67,17 +110,21 @@
#define DISABLED_MASK5 0
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
-#define DISABLED_MASK8 0
-#define DISABLED_MASK9 (DISABLE_SMAP)
+#define DISABLED_MASK8 (DISABLE_XENPV|DISABLE_TDX_GUEST)
+#define DISABLED_MASK9 (DISABLE_SGX)
#define DISABLED_MASK10 0
-#define DISABLED_MASK11 0
+#define DISABLED_MASK11 (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
+ DISABLE_CALL_DEPTH_TRACKING)
#define DISABLED_MASK12 0
#define DISABLED_MASK13 0
#define DISABLED_MASK14 0
#define DISABLED_MASK15 0
-#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP)
+#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \
+ DISABLE_ENQCMD)
#define DISABLED_MASK17 0
#define DISABLED_MASK18 0
-#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+#define DISABLED_MASK19 0
+#define DISABLED_MASK20 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/inat.h b/tools/arch/x86/include/asm/inat.h
index 877827b7c2c3..a61051400311 100644
--- a/tools/arch/x86/include/asm/inat.h
+++ b/tools/arch/x86/include/asm/inat.h
@@ -6,7 +6,7 @@
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
*/
-#include "inat_types.h"
+#include "inat_types.h" /* __ignore_sync_check__ */
/*
* Internal bits. Don't use bitmasks directly, because these bits are
diff --git a/tools/arch/x86/include/asm/insn.h b/tools/arch/x86/include/asm/insn.h
index 568854b14d0a..65c0d9ce1e29 100644
--- a/tools/arch/x86/include/asm/insn.h
+++ b/tools/arch/x86/include/asm/insn.h
@@ -7,8 +7,11 @@
* Copyright (C) IBM Corporation, 2009
*/
+#include <asm/byteorder.h>
/* insn_attr_t is defined in inat.h */
-#include "inat.h"
+#include "inat.h" /* __ignore_sync_check__ */
+
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
struct insn_field {
union {
@@ -20,6 +23,48 @@ struct insn_field {
unsigned char nbytes;
};
+static inline void insn_field_set(struct insn_field *p, insn_value_t v,
+ unsigned char n)
+{
+ p->value = v;
+ p->nbytes = n;
+}
+
+static inline void insn_set_byte(struct insn_field *p, unsigned char n,
+ insn_byte_t v)
+{
+ p->bytes[n] = v;
+}
+
+#else
+
+struct insn_field {
+ insn_value_t value;
+ union {
+ insn_value_t little;
+ insn_byte_t bytes[4];
+ };
+ /* !0 if we've run insn_get_xxx() for this field */
+ unsigned char got;
+ unsigned char nbytes;
+};
+
+static inline void insn_field_set(struct insn_field *p, insn_value_t v,
+ unsigned char n)
+{
+ p->value = v;
+ p->little = __cpu_to_le32(v);
+ p->nbytes = n;
+}
+
+static inline void insn_set_byte(struct insn_field *p, unsigned char n,
+ insn_byte_t v)
+{
+ p->bytes[n] = v;
+ p->value = __le32_to_cpu(p->little);
+}
+#endif
+
struct insn {
struct insn_field prefixes; /*
* Prefixes
@@ -79,7 +124,7 @@ struct insn {
#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
/* VEX bit fields */
-#define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */
+#define X86_EVEX_M(vex) ((vex) & 0x07) /* EVEX Byte1 */
#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
#define X86_VEX2_M 1 /* VEX2.M always 1 */
#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
@@ -87,13 +132,25 @@ struct insn {
#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
-extern void insn_get_prefixes(struct insn *insn);
-extern void insn_get_opcode(struct insn *insn);
-extern void insn_get_modrm(struct insn *insn);
-extern void insn_get_sib(struct insn *insn);
-extern void insn_get_displacement(struct insn *insn);
-extern void insn_get_immediate(struct insn *insn);
-extern void insn_get_length(struct insn *insn);
+extern int insn_get_prefixes(struct insn *insn);
+extern int insn_get_opcode(struct insn *insn);
+extern int insn_get_modrm(struct insn *insn);
+extern int insn_get_sib(struct insn *insn);
+extern int insn_get_displacement(struct insn *insn);
+extern int insn_get_immediate(struct insn *insn);
+extern int insn_get_length(struct insn *insn);
+
+enum insn_mode {
+ INSN_MODE_32,
+ INSN_MODE_64,
+ /* Mode is determined by the current kernel build. */
+ INSN_MODE_KERN,
+ INSN_NUM_MODES,
+};
+
+extern int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m);
+
+#define insn_decode_kernel(_insn, _ptr) insn_decode((_insn), (_ptr), MAX_INSN_SIZE, INSN_MODE_KERN)
/* Attribute will be determined after getting ModRM (for opcode groups) */
static inline void insn_get_attribute(struct insn *insn)
@@ -104,17 +161,6 @@ static inline void insn_get_attribute(struct insn *insn)
/* Instruction uses RIP-relative addressing */
extern int insn_rip_relative(struct insn *insn);
-/* Init insn for kernel text */
-static inline void kernel_insn_init(struct insn *insn,
- const void *kaddr, int buf_len)
-{
-#ifdef CONFIG_X86_64
- insn_init(insn, kaddr, buf_len, 1);
-#else /* CONFIG_X86_32 */
- insn_init(insn, kaddr, buf_len, 0);
-#endif
-}
-
static inline int insn_is_avx(struct insn *insn)
{
if (!insn->prefixes.got)
@@ -134,13 +180,6 @@ static inline int insn_has_emulate_prefix(struct insn *insn)
return !!insn->emulate_prefix_size;
}
-/* Ensure this instruction is decoded completely */
-static inline int insn_complete(struct insn *insn)
-{
- return insn->opcode.got && insn->modrm.got && insn->sib.got &&
- insn->displacement.got && insn->immediate.got;
-}
-
static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
{
if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
@@ -201,6 +240,21 @@ static inline int insn_offset_immediate(struct insn *insn)
return insn_offset_displacement(insn) + insn->displacement.nbytes;
}
+/**
+ * for_each_insn_prefix() -- Iterate prefixes in the instruction
+ * @insn: Pointer to struct insn.
+ * @idx: Index storage.
+ * @prefix: Prefix byte.
+ *
+ * Iterate prefix bytes of given @insn. Each prefix byte is stored in @prefix
+ * and the index is stored in @idx (note that this @idx is just for a cursor,
+ * do not change it.)
+ * Since prefixes.nbytes can be bigger than 4 if some prefixes
+ * are repeated, it cannot be used for looping over the prefixes.
+ */
+#define for_each_insn_prefix(insn, idx, prefix) \
+ for (idx = 0; idx < ARRAY_SIZE(insn->prefixes.bytes) && (prefix = insn->prefixes.bytes[idx]) != 0; idx++)
+
#define POP_SS_OPCODE 0x1f
#define MOV_SREG_OPCODE 0x8e
diff --git a/tools/arch/x86/include/asm/irq_vectors.h b/tools/arch/x86/include/asm/irq_vectors.h
index 889f8b1b5b7f..43dcb9284208 100644
--- a/tools/arch/x86/include/asm/irq_vectors.h
+++ b/tools/arch/x86/include/asm/irq_vectors.h
@@ -26,8 +26,8 @@
* This file enumerates the exact layout of them:
*/
+/* This is used as an interrupt vector when programming the APIC. */
#define NMI_VECTOR 0x02
-#define MCE_VECTOR 0x12
/*
* IDT vectors usable for external interrupt sources start at 0x20.
@@ -84,7 +84,7 @@
*/
#define IRQ_WORK_VECTOR 0xf6
-#define UV_BAU_MESSAGE 0xf5
+/* 0xf5 - unused, was UV_BAU_MESSAGE */
#define DEFERRED_ERROR_VECTOR 0xf4
/* Vector on which hypervisor callbacks will be delivered */
@@ -114,6 +114,9 @@
#define FIRST_SYSTEM_VECTOR NR_VECTORS
#endif
+#define NR_EXTERNAL_VECTORS (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+#define NR_SYSTEM_VECTORS (NR_VECTORS - FIRST_SYSTEM_VECTOR)
+
/*
* Size the maximum number of interrupts.
*
diff --git a/tools/arch/x86/include/asm/mcsafe_test.h b/tools/arch/x86/include/asm/mcsafe_test.h
deleted file mode 100644
index 2ccd588fbad4..000000000000
--- a/tools/arch/x86/include/asm/mcsafe_test.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _MCSAFE_TEST_H_
-#define _MCSAFE_TEST_H_
-
-.macro MCSAFE_TEST_CTL
-.endm
-
-.macro MCSAFE_TEST_SRC reg count target
-.endm
-
-.macro MCSAFE_TEST_DST reg count target
-.endm
-#endif /* _MCSAFE_TEST_H_ */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index d5e517d1c3dd..ad35355ee43e 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -4,12 +4,7 @@
#include <linux/bits.h>
-/*
- * CPU model specific register (MSR) numbers.
- *
- * Do not add new entries to this file unless the definitions are shared
- * between multiple compilation units.
- */
+/* CPU model specific register (MSR) numbers. */
/* x86-64 specific MSRs */
#define MSR_EFER 0xc0000080 /* extended feature register */
@@ -30,6 +25,7 @@
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
+#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
@@ -38,15 +34,26 @@
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR)
+#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/* Intel MSRs. Some also available on other CPUs */
+#define MSR_TEST_CTRL 0x00000033
+#define MSR_TEST_CTRL_SPLIT_LOCK_DETECT_BIT 29
+#define MSR_TEST_CTRL_SPLIT_LOCK_DETECT BIT(MSR_TEST_CTRL_SPLIT_LOCK_DETECT_BIT)
+
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
#define SPEC_CTRL_IBRS BIT(0) /* Indirect Branch Restricted Speculation */
#define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */
#define SPEC_CTRL_STIBP BIT(SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */
#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
#define SPEC_CTRL_SSBD BIT(SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+#define SPEC_CTRL_RRSBA_DIS_S_SHIFT 6 /* Disable RRSBA behavior */
+#define SPEC_CTRL_RRSBA_DIS_S BIT(SPEC_CTRL_RRSBA_DIS_S_SHIFT)
+
+/* A mask for bits which the kernel toggles when controlling mitigations */
+#define SPEC_CTRL_MITIGATIONS_MASK (SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD \
+ | SPEC_CTRL_RRSBA_DIS_S)
#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
#define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */
@@ -70,6 +77,13 @@
*/
#define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U)
+/* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */
+#define MSR_IA32_CORE_CAPS 0x000000cf
+#define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2
+#define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS BIT(MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT)
+#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT 5
+#define MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT BIT(MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT_BIT)
+
#define MSR_PKG_CST_CONFIG_CONTROL 0x000000e2
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
#define NHM_C1_AUTO_DEMOTE (1UL << 26)
@@ -82,6 +96,7 @@
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
#define ARCH_CAP_RDCL_NO BIT(0) /* Not susceptible to Meltdown */
#define ARCH_CAP_IBRS_ALL BIT(1) /* Enhanced IBRS support */
+#define ARCH_CAP_RSBA BIT(2) /* RET may use alternative branch predictors */
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH BIT(3) /* Skip L1D flush on vmentry */
#define ARCH_CAP_SSB_NO BIT(4) /*
* Not susceptible to Speculative Store Bypass
@@ -105,6 +120,46 @@
* Not susceptible to
* TSX Async Abort (TAA) vulnerabilities.
*/
+#define ARCH_CAP_SBDR_SSDP_NO BIT(13) /*
+ * Not susceptible to SBDR and SSDP
+ * variants of Processor MMIO stale data
+ * vulnerabilities.
+ */
+#define ARCH_CAP_FBSDP_NO BIT(14) /*
+ * Not susceptible to FBSDP variant of
+ * Processor MMIO stale data
+ * vulnerabilities.
+ */
+#define ARCH_CAP_PSDP_NO BIT(15) /*
+ * Not susceptible to PSDP variant of
+ * Processor MMIO stale data
+ * vulnerabilities.
+ */
+#define ARCH_CAP_FB_CLEAR BIT(17) /*
+ * VERW clears CPU fill buffer
+ * even on MDS_NO CPUs.
+ */
+#define ARCH_CAP_FB_CLEAR_CTRL BIT(18) /*
+ * MSR_IA32_MCU_OPT_CTRL[FB_CLEAR_DIS]
+ * bit available to control VERW
+ * behavior.
+ */
+#define ARCH_CAP_RRSBA BIT(19) /*
+ * Indicates RET may use predictors
+ * other than the RSB. With eIBRS
+ * enabled predictions in kernel mode
+ * are restricted to targets in
+ * kernel.
+ */
+#define ARCH_CAP_PBRSB_NO BIT(24) /*
+ * Not susceptible to Post-Barrier
+ * Return Stack Buffer Predictions.
+ */
+
+#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
+ * IA32_XAPIC_DISABLE_STATUS MSR
+ * supported
+ */
#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
@@ -119,6 +174,11 @@
#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */
+#define MSR_IA32_MCU_OPT_CTRL 0x00000123
+#define RNGDS_MITG_DIS BIT(0) /* SRBDS support */
+#define RTM_ALLOW BIT(1) /* TSX development mode */
+#define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */
+
#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
@@ -126,6 +186,7 @@
#define MSR_IA32_MCG_CAP 0x00000179
#define MSR_IA32_MCG_STATUS 0x0000017a
#define MSR_IA32_MCG_CTL 0x0000017b
+#define MSR_ERROR_CONTROL 0x0000017f
#define MSR_IA32_MCG_EXT_CTL 0x000004d0
#define MSR_OFFCORE_RSP_0 0x000001a6
@@ -134,8 +195,20 @@
#define MSR_TURBO_RATIO_LIMIT1 0x000001ae
#define MSR_TURBO_RATIO_LIMIT2 0x000001af
+#define MSR_SNOOP_RSP_0 0x00001328
+#define MSR_SNOOP_RSP_1 0x00001329
+
#define MSR_LBR_SELECT 0x000001c8
#define MSR_LBR_TOS 0x000001c9
+
+#define MSR_IA32_POWER_CTL 0x000001fc
+#define MSR_IA32_POWER_CTL_BIT_EE 19
+
+/* Abbreviated from Intel SDM name IA32_INTEGRITY_CAPABILITIES */
+#define MSR_INTEGRITY_CAPS 0x000002d9
+#define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4
+#define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT)
+
#define MSR_LBR_NHM_FROM 0x00000680
#define MSR_LBR_NHM_TO 0x000006c0
#define MSR_LBR_CORE_FROM 0x00000040
@@ -145,13 +218,38 @@
#define LBR_INFO_MISPRED BIT_ULL(63)
#define LBR_INFO_IN_TX BIT_ULL(62)
#define LBR_INFO_ABORT BIT_ULL(61)
+#define LBR_INFO_CYC_CNT_VALID BIT_ULL(60)
#define LBR_INFO_CYCLES 0xffff
+#define LBR_INFO_BR_TYPE_OFFSET 56
+#define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET)
+
+#define MSR_ARCH_LBR_CTL 0x000014ce
+#define ARCH_LBR_CTL_LBREN BIT(0)
+#define ARCH_LBR_CTL_CPL_OFFSET 1
+#define ARCH_LBR_CTL_CPL (0x3ull << ARCH_LBR_CTL_CPL_OFFSET)
+#define ARCH_LBR_CTL_STACK_OFFSET 3
+#define ARCH_LBR_CTL_STACK (0x1ull << ARCH_LBR_CTL_STACK_OFFSET)
+#define ARCH_LBR_CTL_FILTER_OFFSET 16
+#define ARCH_LBR_CTL_FILTER (0x7full << ARCH_LBR_CTL_FILTER_OFFSET)
+#define MSR_ARCH_LBR_DEPTH 0x000014cf
+#define MSR_ARCH_LBR_FROM_0 0x00001500
+#define MSR_ARCH_LBR_TO_0 0x00001600
+#define MSR_ARCH_LBR_INFO_0 0x00001200
#define MSR_IA32_PEBS_ENABLE 0x000003f1
#define MSR_PEBS_DATA_CFG 0x000003f2
#define MSR_IA32_DS_AREA 0x00000600
#define MSR_IA32_PERF_CAPABILITIES 0x00000345
+#define PERF_CAP_METRICS_IDX 15
+#define PERF_CAP_PT_IDX 16
+
#define MSR_PEBS_LD_LAT_THRESHOLD 0x000003f6
+#define PERF_CAP_PEBS_TRAP BIT_ULL(6)
+#define PERF_CAP_ARCH_REG BIT_ULL(7)
+#define PERF_CAP_PEBS_FORMAT 0xf00
+#define PERF_CAP_PEBS_BASELINE BIT_ULL(14)
+#define PERF_CAP_PEBS_MASK (PERF_CAP_PEBS_TRAP | PERF_CAP_ARCH_REG | \
+ PERF_CAP_PEBS_FORMAT | PERF_CAP_PEBS_BASELINE)
#define MSR_IA32_RTIT_CTL 0x00000570
#define RTIT_CTL_TRACEEN BIT(0)
@@ -168,6 +266,8 @@
#define RTIT_CTL_DISRETC BIT(11)
#define RTIT_CTL_PTW_EN BIT(12)
#define RTIT_CTL_BRANCH_EN BIT(13)
+#define RTIT_CTL_EVENT_EN BIT(31)
+#define RTIT_CTL_NOTNT BIT_ULL(55)
#define RTIT_CTL_MTC_RANGE_OFFSET 14
#define RTIT_CTL_MTC_RANGE (0x0full << RTIT_CTL_MTC_RANGE_OFFSET)
#define RTIT_CTL_CYC_THRESH_OFFSET 19
@@ -224,10 +324,14 @@
#define MSR_IA32_LASTINTFROMIP 0x000001dd
#define MSR_IA32_LASTINTTOIP 0x000001de
+#define MSR_IA32_PASID 0x00000d93
+#define MSR_IA32_PASID_VALID BIT_ULL(31)
+
/* DEBUGCTLMSR bits (others vary by model): */
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
#define DEBUGCTLMSR_BTF_SHIFT 1
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
+#define DEBUGCTLMSR_BUS_LOCK_DETECT (1UL << 2)
#define DEBUGCTLMSR_TR (1UL << 6)
#define DEBUGCTLMSR_BTS (1UL << 7)
#define DEBUGCTLMSR_BTINT (1UL << 8)
@@ -240,8 +344,6 @@
#define MSR_PEBS_FRONTEND 0x000003f7
-#define MSR_IA32_POWER_CTL 0x000001fc
-
#define MSR_IA32_MC0_CTL 0x00000400
#define MSR_IA32_MC0_STATUS 0x00000401
#define MSR_IA32_MC0_ADDR 0x00000402
@@ -271,6 +373,7 @@
/* Run Time Average Power Limiting (RAPL) Interface */
+#define MSR_VR_CURRENT_CONFIG 0x00000601
#define MSR_RAPL_POWER_UNIT 0x00000606
#define MSR_PKG_POWER_LIMIT 0x00000610
@@ -292,6 +395,10 @@
#define MSR_PP1_ENERGY_STATUS 0x00000641
#define MSR_PP1_POLICY 0x00000642
+#define MSR_AMD_RAPL_POWER_UNIT 0xc0010299
+#define MSR_AMD_CORE_ENERGY_STATUS 0xc001029a
+#define MSR_AMD_PKG_ENERGY_STATUS 0xc001029b
+
/* Config TDP MSRs */
#define MSR_CONFIG_TDP_NOMINAL 0x00000648
#define MSR_CONFIG_TDP_LEVEL_1 0x00000649
@@ -300,6 +407,7 @@
#define MSR_TURBO_ACTIVATION_RATIO 0x0000064C
#define MSR_PLATFORM_ENERGY_STATUS 0x0000064D
+#define MSR_SECONDARY_TURBO_RATIO_LIMIT 0x00000650
#define MSR_PKG_WEIGHTED_CORE_C0_RES 0x00000658
#define MSR_PKG_ANY_CORE_C0_RES 0x00000659
@@ -317,11 +425,29 @@
#define MSR_ATOM_CORE_TURBO_RATIOS 0x0000066c
#define MSR_ATOM_CORE_TURBO_VIDS 0x0000066d
-
#define MSR_CORE_PERF_LIMIT_REASONS 0x00000690
#define MSR_GFX_PERF_LIMIT_REASONS 0x000006B0
#define MSR_RING_PERF_LIMIT_REASONS 0x000006B1
+/* Control-flow Enforcement Technology MSRs */
+#define MSR_IA32_U_CET 0x000006a0 /* user mode cet */
+#define MSR_IA32_S_CET 0x000006a2 /* kernel mode cet */
+#define CET_SHSTK_EN BIT_ULL(0)
+#define CET_WRSS_EN BIT_ULL(1)
+#define CET_ENDBR_EN BIT_ULL(2)
+#define CET_LEG_IW_EN BIT_ULL(3)
+#define CET_NO_TRACK_EN BIT_ULL(4)
+#define CET_SUPPRESS_DISABLE BIT_ULL(5)
+#define CET_RESERVED (BIT_ULL(6) | BIT_ULL(7) | BIT_ULL(8) | BIT_ULL(9))
+#define CET_SUPPRESS BIT_ULL(10)
+#define CET_WAIT_ENDBR BIT_ULL(11)
+
+#define MSR_IA32_PL0_SSP 0x000006a4 /* ring-0 shadow stack pointer */
+#define MSR_IA32_PL1_SSP 0x000006a5 /* ring-1 shadow stack pointer */
+#define MSR_IA32_PL2_SSP 0x000006a6 /* ring-2 shadow stack pointer */
+#define MSR_IA32_PL3_SSP 0x000006a7 /* ring-3 shadow stack pointer */
+#define MSR_IA32_INT_SSP_TAB 0x000006a8 /* exception shadow stack table */
+
/* Hardware P state interface */
#define MSR_PPERF 0x0000064e
#define MSR_PERF_LIMIT_REASONS 0x0000064f
@@ -402,7 +528,6 @@
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
#define MSR_AMD64_TSC_RATIO 0xc0000104
#define MSR_AMD64_NB_CFG 0xc001001f
-#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD_PERF_CTL 0xc0010062
#define MSR_AMD_PERF_STATUS 0xc0010063
@@ -411,8 +536,14 @@
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD_PPIN_CTL 0xc00102f0
#define MSR_AMD_PPIN 0xc00102f1
+#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
+
+#define MSR_AMD64_DE_CFG 0xc0011029
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT 1
+#define MSR_AMD64_DE_CFG_LFENCE_SERIALIZE BIT_ULL(MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT)
+
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
@@ -430,17 +561,73 @@
#define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
#define MSR_AMD64_IBSCTL 0xc001103a
#define MSR_AMD64_IBSBRTARGET 0xc001103b
+#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
+#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
+#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
#define MSR_AMD64_SEV_ENABLED_BIT 0
+#define MSR_AMD64_SEV_ES_ENABLED_BIT 1
+#define MSR_AMD64_SEV_SNP_ENABLED_BIT 2
#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
+#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT)
+#define MSR_AMD64_SEV_SNP_ENABLED BIT_ULL(MSR_AMD64_SEV_SNP_ENABLED_BIT)
+
+/* SNP feature bits enabled by the hypervisor */
+#define MSR_AMD64_SNP_VTOM BIT_ULL(3)
+#define MSR_AMD64_SNP_REFLECT_VC BIT_ULL(4)
+#define MSR_AMD64_SNP_RESTRICTED_INJ BIT_ULL(5)
+#define MSR_AMD64_SNP_ALT_INJ BIT_ULL(6)
+#define MSR_AMD64_SNP_DEBUG_SWAP BIT_ULL(7)
+#define MSR_AMD64_SNP_PREVENT_HOST_IBS BIT_ULL(8)
+#define MSR_AMD64_SNP_BTB_ISOLATION BIT_ULL(9)
+#define MSR_AMD64_SNP_VMPL_SSS BIT_ULL(10)
+#define MSR_AMD64_SNP_SECURE_TSC BIT_ULL(11)
+#define MSR_AMD64_SNP_VMGEXIT_PARAM BIT_ULL(12)
+#define MSR_AMD64_SNP_IBS_VIRT BIT_ULL(14)
+#define MSR_AMD64_SNP_VMSA_REG_PROTECTION BIT_ULL(16)
+#define MSR_AMD64_SNP_SMT_PROTECTION BIT_ULL(17)
+
+/* SNP feature bits reserved for future use. */
+#define MSR_AMD64_SNP_RESERVED_BIT13 BIT_ULL(13)
+#define MSR_AMD64_SNP_RESERVED_BIT15 BIT_ULL(15)
+#define MSR_AMD64_SNP_RESERVED_MASK GENMASK_ULL(63, 18)
#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+/* AMD Collaborative Processor Performance Control MSRs */
+#define MSR_AMD_CPPC_CAP1 0xc00102b0
+#define MSR_AMD_CPPC_ENABLE 0xc00102b1
+#define MSR_AMD_CPPC_CAP2 0xc00102b2
+#define MSR_AMD_CPPC_REQ 0xc00102b3
+#define MSR_AMD_CPPC_STATUS 0xc00102b4
+
+#define AMD_CPPC_LOWEST_PERF(x) (((x) >> 0) & 0xff)
+#define AMD_CPPC_LOWNONLIN_PERF(x) (((x) >> 8) & 0xff)
+#define AMD_CPPC_NOMINAL_PERF(x) (((x) >> 16) & 0xff)
+#define AMD_CPPC_HIGHEST_PERF(x) (((x) >> 24) & 0xff)
+
+#define AMD_CPPC_MAX_PERF(x) (((x) & 0xff) << 0)
+#define AMD_CPPC_MIN_PERF(x) (((x) & 0xff) << 8)
+#define AMD_CPPC_DES_PERF(x) (((x) & 0xff) << 16)
+#define AMD_CPPC_ENERGY_PERF_PREF(x) (((x) & 0xff) << 24)
+
+/* AMD Performance Counter Global Status and Control MSRs */
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS 0xc0000300
+#define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301
+#define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302
+
+/* AMD Last Branch Record MSRs */
+#define MSR_AMD64_LBR_SELECT 0xc000010e
+
/* Fam 17h MSRs */
#define MSR_F17H_IRPERF 0xc00000e9
+#define MSR_ZEN2_SPECTRAL_CHICKEN 0xc00110e3
+#define MSR_ZEN2_SPECTRAL_CHICKEN_BIT BIT_ULL(1)
+
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
@@ -450,6 +637,8 @@
#define MSR_F16H_DR0_ADDR_MASK 0xc0011027
/* Fam 15h MSRs */
+#define MSR_F15H_CU_PWR_ACCUMULATOR 0xc001007a
+#define MSR_F15H_CU_MAX_PWR_ACCUMULATOR 0xc001007b
#define MSR_F15H_PERF_CTL 0xc0010200
#define MSR_F15H_PERF_CTL0 MSR_F15H_PERF_CTL
#define MSR_F15H_PERF_CTL1 (MSR_F15H_PERF_CTL + 2)
@@ -480,16 +669,13 @@
#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
#define FAM10H_MMIO_CONF_BASE_SHIFT 20
#define MSR_FAM10H_NODE_ID 0xc001100c
-#define MSR_F10H_DECFG 0xc0011029
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT 1
-#define MSR_F10H_DECFG_LFENCE_SERIALIZE BIT_ULL(MSR_F10H_DECFG_LFENCE_SERIALIZE_BIT)
/* K8 MSRs */
#define MSR_K8_TOP_MEM1 0xc001001a
#define MSR_K8_TOP_MEM2 0xc001001d
-#define MSR_K8_SYSCFG 0xc0010010
-#define MSR_K8_SYSCFG_MEM_ENCRYPT_BIT 23
-#define MSR_K8_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_K8_SYSCFG_MEM_ENCRYPT_BIT)
+#define MSR_AMD64_SYSCFG 0xc0010010
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT 23
+#define MSR_AMD64_SYSCFG_MEM_ENCRYPT BIT_ULL(MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT)
#define MSR_K8_INT_PENDING_MSG 0xc0010055
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
@@ -566,6 +752,8 @@
#define FEAT_CTL_LOCKED BIT(0)
#define FEAT_CTL_VMX_ENABLED_INSIDE_SMX BIT(1)
#define FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX BIT(2)
+#define FEAT_CTL_SGX_LC_ENABLED BIT(17)
+#define FEAT_CTL_SGX_ENABLED BIT(18)
#define FEAT_CTL_LMCE_ENABLED BIT(20)
#define MSR_IA32_TSC_ADJUST 0x0000003b
@@ -573,6 +761,8 @@
#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
+#define MSR_IA32_XFD 0x000001c4
+#define MSR_IA32_XFD_ERR 0x000001c5
#define MSR_IA32_XSS 0x00000da0
#define MSR_IA32_APICBASE 0x0000001b
@@ -580,11 +770,15 @@
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-#define MSR_IA32_TSCDEADLINE 0x000006e0
-
#define MSR_IA32_UCODE_WRITE 0x00000079
#define MSR_IA32_UCODE_REV 0x0000008b
+/* Intel SGX Launch Enclave Public Key Hash MSRs */
+#define MSR_IA32_SGXLEPUBKEYHASH0 0x0000008C
+#define MSR_IA32_SGXLEPUBKEYHASH1 0x0000008D
+#define MSR_IA32_SGXLEPUBKEYHASH2 0x0000008E
+#define MSR_IA32_SGXLEPUBKEYHASH3 0x0000008F
+
#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b
#define MSR_IA32_SMBASE 0x0000009e
@@ -592,6 +786,12 @@
#define MSR_IA32_PERF_CTL 0x00000199
#define INTEL_PERF_CTL_MASK 0xffff
+/* AMD Branch Sampling configuration */
+#define MSR_AMD_DBG_EXTN_CFG 0xc000010f
+#define MSR_AMD_SAMP_BR_FROM 0xc0010300
+
+#define DBG_EXTN_CFG_LBRV2EN BIT_ULL(6)
+
#define MSR_IA32_MPERF 0x000000e7
#define MSR_IA32_APERF 0x000000e8
@@ -622,6 +822,7 @@
#define ENERGY_PERF_BIAS_PERFORMANCE 0
#define ENERGY_PERF_BIAS_BALANCE_PERFORMANCE 4
#define ENERGY_PERF_BIAS_NORMAL 6
+#define ENERGY_PERF_BIAS_NORMAL_POWERSAVE 7
#define ENERGY_PERF_BIAS_BALANCE_POWERSAVE 8
#define ENERGY_PERF_BIAS_POWERSAVE 15
@@ -629,12 +830,14 @@
#define PACKAGE_THERM_STATUS_PROCHOT (1 << 0)
#define PACKAGE_THERM_STATUS_POWER_LIMIT (1 << 10)
+#define PACKAGE_THERM_STATUS_HFI_UPDATED (1 << 26)
#define MSR_IA32_PACKAGE_THERM_INTERRUPT 0x000001b2
#define PACKAGE_THERM_INT_HIGH_ENABLE (1 << 0)
#define PACKAGE_THERM_INT_LOW_ENABLE (1 << 1)
#define PACKAGE_THERM_INT_PLN_ENABLE (1 << 24)
+#define PACKAGE_THERM_INT_HFI_ENABLE (1 << 25)
/* Thermal Thresholds Support */
#define THERM_INT_THRESHOLD0_ENABLE (1 << 15)
@@ -716,6 +919,10 @@
#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+#define MSR_TFA_TSX_CPUID_CLEAR_BIT 1
+#define MSR_TFA_TSX_CPUID_CLEAR BIT_ULL(MSR_TFA_TSX_CPUID_CLEAR_BIT)
+#define MSR_TFA_SDV_ENABLE_RTM_BIT 2
+#define MSR_TFA_SDV_ENABLE_RTM BIT_ULL(MSR_TFA_SDV_ENABLE_RTM_BIT)
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
@@ -821,11 +1028,14 @@
#define MSR_CORE_PERF_FIXED_CTR0 0x00000309
#define MSR_CORE_PERF_FIXED_CTR1 0x0000030a
#define MSR_CORE_PERF_FIXED_CTR2 0x0000030b
+#define MSR_CORE_PERF_FIXED_CTR3 0x0000030c
#define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+#define MSR_PERF_METRICS 0x00000329
+
/* PERF_GLOBAL_OVF_CTL bits */
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT)
@@ -856,6 +1066,7 @@
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
#define MSR_IA32_VMX_VMFUNC 0x00000491
+#define MSR_IA32_VMX_PROCBASED_CTLS3 0x00000492
/* VMX_BASIC bits and bitmasks */
#define VMX_BASIC_VMCS_SIZE_SHIFT 32
@@ -866,6 +1077,22 @@
#define VMX_BASIC_MEM_TYPE_WB 6LLU
#define VMX_BASIC_INOUT 0x0040000000000000LLU
+/* Resctrl MSRs: */
+/* - Intel: */
+#define MSR_IA32_L3_QOS_CFG 0xc81
+#define MSR_IA32_L2_QOS_CFG 0xc82
+#define MSR_IA32_QM_EVTSEL 0xc8d
+#define MSR_IA32_QM_CTR 0xc8e
+#define MSR_IA32_PQR_ASSOC 0xc8f
+#define MSR_IA32_L3_CBM_BASE 0xc90
+#define MSR_IA32_L2_CBM_BASE 0xd10
+#define MSR_IA32_MBA_THRTL_BASE 0xd50
+
+/* - AMD: */
+#define MSR_IA32_MBA_BW_BASE 0xc0000200
+#define MSR_IA32_SMBA_BW_BASE 0xc0000280
+#define MSR_IA32_EVT_CFG_BASE 0xc0000400
+
/* MSR_IA32_VMX_MISC bits */
#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
@@ -876,4 +1103,16 @@
#define MSR_VM_IGNNE 0xc0010115
#define MSR_VM_HSAVE_PA 0xc0010117
+/* Hardware Feedback Interface */
+#define MSR_IA32_HW_FEEDBACK_PTR 0x17d0
+#define MSR_IA32_HW_FEEDBACK_CONFIG 0x17d1
+
+/* x2APIC locked status */
+#define MSR_IA32_XAPIC_DISABLE_STATUS 0xBD
+#define LEGACY_XAPIC_DISABLED BIT(0) /*
+ * x2APIC mode is locked and
+ * disabling x2APIC will cause
+ * a #GP
+ */
+
#endif /* _ASM_X86_MSR_INDEX_H */
diff --git a/tools/arch/x86/include/asm/nops.h b/tools/arch/x86/include/asm/nops.h
new file mode 100644
index 000000000000..c5573eaa5bb9
--- /dev/null
+++ b/tools/arch/x86/include/asm/nops.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_NOPS_H
+#define _ASM_X86_NOPS_H
+
+#include <asm/asm.h>
+
+/*
+ * Define nops for use with alternative() and for tracing.
+ */
+
+#ifndef CONFIG_64BIT
+
+/*
+ * Generic 32bit nops from GAS:
+ *
+ * 1: nop
+ * 2: movl %esi,%esi
+ * 3: leal 0x0(%esi),%esi
+ * 4: leal 0x0(%esi,%eiz,1),%esi
+ * 5: leal %ds:0x0(%esi,%eiz,1),%esi
+ * 6: leal 0x0(%esi),%esi
+ * 7: leal 0x0(%esi,%eiz,1),%esi
+ * 8: leal %ds:0x0(%esi,%eiz,1),%esi
+ *
+ * Except 5 and 8, which are DS prefixed 4 and 7 resp, where GAS would emit 2
+ * nop instructions.
+ */
+#define BYTES_NOP1 0x90
+#define BYTES_NOP2 0x89,0xf6
+#define BYTES_NOP3 0x8d,0x76,0x00
+#define BYTES_NOP4 0x8d,0x74,0x26,0x00
+#define BYTES_NOP5 0x3e,BYTES_NOP4
+#define BYTES_NOP6 0x8d,0xb6,0x00,0x00,0x00,0x00
+#define BYTES_NOP7 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00
+#define BYTES_NOP8 0x3e,BYTES_NOP7
+
+#else
+
+/*
+ * Generic 64bit nops from GAS:
+ *
+ * 1: nop
+ * 2: osp nop
+ * 3: nopl (%eax)
+ * 4: nopl 0x00(%eax)
+ * 5: nopl 0x00(%eax,%eax,1)
+ * 6: osp nopl 0x00(%eax,%eax,1)
+ * 7: nopl 0x00000000(%eax)
+ * 8: nopl 0x00000000(%eax,%eax,1)
+ */
+#define BYTES_NOP1 0x90
+#define BYTES_NOP2 0x66,BYTES_NOP1
+#define BYTES_NOP3 0x0f,0x1f,0x00
+#define BYTES_NOP4 0x0f,0x1f,0x40,0x00
+#define BYTES_NOP5 0x0f,0x1f,0x44,0x00,0x00
+#define BYTES_NOP6 0x66,BYTES_NOP5
+#define BYTES_NOP7 0x0f,0x1f,0x80,0x00,0x00,0x00,0x00
+#define BYTES_NOP8 0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
+
+#endif /* CONFIG_64BIT */
+
+#define ASM_NOP1 _ASM_BYTES(BYTES_NOP1)
+#define ASM_NOP2 _ASM_BYTES(BYTES_NOP2)
+#define ASM_NOP3 _ASM_BYTES(BYTES_NOP3)
+#define ASM_NOP4 _ASM_BYTES(BYTES_NOP4)
+#define ASM_NOP5 _ASM_BYTES(BYTES_NOP5)
+#define ASM_NOP6 _ASM_BYTES(BYTES_NOP6)
+#define ASM_NOP7 _ASM_BYTES(BYTES_NOP7)
+#define ASM_NOP8 _ASM_BYTES(BYTES_NOP8)
+
+#define ASM_NOP_MAX 8
+
+#ifndef __ASSEMBLY__
+extern const unsigned char * const x86_nops[];
+#endif
+
+#endif /* _ASM_X86_NOPS_H */
diff --git a/tools/arch/x86/include/asm/orc_types.h b/tools/arch/x86/include/asm/orc_types.h
index 6e060907c163..46d7e06763c9 100644
--- a/tools/arch/x86/include/asm/orc_types.h
+++ b/tools/arch/x86/include/asm/orc_types.h
@@ -39,29 +39,15 @@
#define ORC_REG_SP_INDIRECT 9
#define ORC_REG_MAX 15
-/*
- * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
- * caller's SP right before it made the call). Used for all callable
- * functions, i.e. all C code and all callable asm functions.
- *
- * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
- * to a fully populated pt_regs from a syscall, interrupt, or exception.
- *
- * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
- * points to the iret return frame.
- *
- * The UNWIND_HINT macros are used only for the unwind_hint struct. They
- * aren't used in struct orc_entry due to size and complexity constraints.
- * Objtool converts them to real types when it converts the hints to orc
- * entries.
- */
-#define ORC_TYPE_CALL 0
-#define ORC_TYPE_REGS 1
-#define ORC_TYPE_REGS_IRET 2
-#define UNWIND_HINT_TYPE_SAVE 3
-#define UNWIND_HINT_TYPE_RESTORE 4
+#define ORC_TYPE_UNDEFINED 0
+#define ORC_TYPE_END_OF_STACK 1
+#define ORC_TYPE_CALL 2
+#define ORC_TYPE_REGS 3
+#define ORC_TYPE_REGS_PARTIAL 4
#ifndef __ASSEMBLY__
+#include <asm/byteorder.h>
+
/*
* This struct is more or less a vastly simplified version of the DWARF Call
* Frame Information standard. It contains only the necessary parts of DWARF
@@ -73,25 +59,20 @@
struct orc_entry {
s16 sp_offset;
s16 bp_offset;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned sp_reg:4;
unsigned bp_reg:4;
- unsigned type:2;
- unsigned end:1;
+ unsigned type:3;
+ unsigned signal:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ unsigned bp_reg:4;
+ unsigned sp_reg:4;
+ unsigned unused:4;
+ unsigned signal:1;
+ unsigned type:3;
+#endif
} __packed;
-/*
- * This struct is used by asm and inline asm code to manually annotate the
- * location of registers on the stack for the ORC unwinder.
- *
- * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
- */
-struct unwind_hint {
- u32 ip;
- s16 sp_offset;
- u8 sp_reg;
- u8 type;
- u8 end;
-};
#endif /* __ASSEMBLY__ */
#endif /* _ORC_TYPES_H */
diff --git a/tools/arch/x86/include/asm/pvclock-abi.h b/tools/arch/x86/include/asm/pvclock-abi.h
new file mode 100644
index 000000000000..1436226efe3e
--- /dev/null
+++ b/tools/arch/x86/include/asm/pvclock-abi.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_PVCLOCK_ABI_H
+#define _ASM_X86_PVCLOCK_ABI_H
+#ifndef __ASSEMBLY__
+
+/*
+ * These structs MUST NOT be changed.
+ * They are the ABI between hypervisor and guest OS.
+ * Both Xen and KVM are using this.
+ *
+ * pvclock_vcpu_time_info holds the system time and the tsc timestamp
+ * of the last update. So the guest can use the tsc delta to get a
+ * more precise system time. There is one per virtual cpu.
+ *
+ * pvclock_wall_clock references the point in time when the system
+ * time was zero (usually boot time), thus the guest calculates the
+ * current wall clock by adding the system time.
+ *
+ * Protocol for the "version" fields is: hypervisor raises it (making
+ * it uneven) before it starts updating the fields and raises it again
+ * (making it even) when it is done. Thus the guest can make sure the
+ * time values it got are consistent by checking the version before
+ * and after reading them.
+ */
+
+struct pvclock_vcpu_time_info {
+ u32 version;
+ u32 pad0;
+ u64 tsc_timestamp;
+ u64 system_time;
+ u32 tsc_to_system_mul;
+ s8 tsc_shift;
+ u8 flags;
+ u8 pad[2];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct pvclock_wall_clock {
+ u32 version;
+ u32 sec;
+ u32 nsec;
+} __attribute__((__packed__));
+
+#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
+#define PVCLOCK_GUEST_STOPPED (1 << 1)
+/* PVCLOCK_COUNTS_FROM_ZERO broke ABI and can't be used anymore. */
+#define PVCLOCK_COUNTS_FROM_ZERO (1 << 2)
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/tools/arch/x86/include/asm/pvclock.h b/tools/arch/x86/include/asm/pvclock.h
new file mode 100644
index 000000000000..2628f9a6330b
--- /dev/null
+++ b/tools/arch/x86/include/asm/pvclock.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_PVCLOCK_H
+#define _ASM_X86_PVCLOCK_H
+
+#include <asm/barrier.h>
+#include <asm/pvclock-abi.h>
+
+/* some helper functions for xen and kvm pv clock sources */
+u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src);
+u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src);
+void pvclock_set_flags(u8 flags);
+unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
+void pvclock_resume(void);
+
+void pvclock_touch_watchdogs(void);
+
+static __always_inline
+unsigned pvclock_read_begin(const struct pvclock_vcpu_time_info *src)
+{
+ unsigned version = src->version & ~1;
+ /* Make sure that the version is read before the data. */
+ rmb();
+ return version;
+}
+
+static __always_inline
+bool pvclock_read_retry(const struct pvclock_vcpu_time_info *src,
+ unsigned version)
+{
+ /* Make sure that the version is re-read after the data. */
+ rmb();
+ return version != src->version;
+}
+
+/*
+ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
+ * yielding a 64-bit result.
+ */
+static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
+{
+ u64 product;
+#ifdef __i386__
+ u32 tmp1, tmp2;
+#else
+ unsigned long tmp;
+#endif
+
+ if (shift < 0)
+ delta >>= -shift;
+ else
+ delta <<= shift;
+
+#ifdef __i386__
+ __asm__ (
+ "mul %5 ; "
+ "mov %4,%%eax ; "
+ "mov %%edx,%4 ; "
+ "mul %5 ; "
+ "xor %5,%5 ; "
+ "add %4,%%eax ; "
+ "adc %5,%%edx ; "
+ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
+ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
+#elif defined(__x86_64__)
+ __asm__ (
+ "mulq %[mul_frac] ; shrd $32, %[hi], %[lo]"
+ : [lo]"=a"(product),
+ [hi]"=d"(tmp)
+ : "0"(delta),
+ [mul_frac]"rm"((u64)mul_frac));
+#else
+#error implement me!
+#endif
+
+ return product;
+}
+
+static __always_inline
+u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc)
+{
+ u64 delta = tsc - src->tsc_timestamp;
+ u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul,
+ src->tsc_shift);
+ return src->system_time + offset;
+}
+
+struct pvclock_vsyscall_time_info {
+ struct pvclock_vcpu_time_info pvti;
+} __attribute__((__aligned__(64)));
+
+#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
+
+#ifdef CONFIG_PARAVIRT_CLOCK
+void pvclock_set_pvti_cpu0_va(struct pvclock_vsyscall_time_info *pvti);
+struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void);
+#else
+static inline struct pvclock_vsyscall_time_info *pvclock_get_pvti_cpu0_va(void)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _ASM_X86_PVCLOCK_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
index 6847d85400a8..7ba1726b71c7 100644
--- a/tools/arch/x86/include/asm/required-features.h
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -35,11 +35,7 @@
# define NEED_CMOV 0
#endif
-#ifdef CONFIG_X86_USE_3DNOW
-# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
-#else
# define NEED_3DNOW 0
-#endif
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
@@ -54,7 +50,7 @@
#endif
#ifdef CONFIG_X86_64
-#ifdef CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT_XXL
/* Paravirtualized systems may not have PSE or PGE available */
#define NEED_PSE 0
#define NEED_PGE 0
@@ -101,6 +97,8 @@
#define REQUIRED_MASK16 0
#define REQUIRED_MASK17 0
#define REQUIRED_MASK18 0
-#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+#define REQUIRED_MASK19 0
+#define REQUIRED_MASK20 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h
index fee7983a90b4..11ff975242ca 100644
--- a/tools/arch/x86/include/asm/rmwcc.h
+++ b/tools/arch/x86/include/asm/rmwcc.h
@@ -2,8 +2,6 @@
#ifndef _TOOLS_LINUX_ASM_X86_RMWcc
#define _TOOLS_LINUX_ASM_X86_RMWcc
-#ifdef CONFIG_CC_HAS_ASM_GOTO
-
#define __GEN_RMWcc(fullop, var, cc, ...) \
do { \
asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \
@@ -20,23 +18,4 @@ cc_label: \
#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
__GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
-#else /* !CONFIG_CC_HAS_ASM_GOTO */
-
-#define __GEN_RMWcc(fullop, var, cc, ...) \
-do { \
- char c; \
- asm volatile (fullop "; set" cc " %1" \
- : "+m" (var), "=qm" (c) \
- : __VA_ARGS__ : "memory"); \
- return c != 0; \
-} while (0)
-
-#define GEN_UNARY_RMWcc(op, var, arg0, cc) \
- __GEN_RMWcc(op " " arg0, var, cc)
-
-#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc) \
- __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
-
-#endif /* CONFIG_CC_HAS_ASM_GOTO */
-
#endif /* _TOOLS_LINUX_ASM_X86_RMWcc */
diff --git a/tools/arch/x86/include/asm/unistd_32.h b/tools/arch/x86/include/asm/unistd_32.h
deleted file mode 100644
index 60a89dba01b6..000000000000
--- a/tools/arch/x86/include/asm/unistd_32.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NR_perf_event_open
-# define __NR_perf_event_open 336
-#endif
-#ifndef __NR_futex
-# define __NR_futex 240
-#endif
-#ifndef __NR_gettid
-# define __NR_gettid 224
-#endif
-#ifndef __NR_getcpu
-# define __NR_getcpu 318
-#endif
-#ifndef __NR_setns
-# define __NR_setns 346
-#endif
diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
deleted file mode 100644
index 4205ed4158bf..000000000000
--- a/tools/arch/x86/include/asm/unistd_64.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __NR_userfaultfd
-#define __NR_userfaultfd 282
-#endif
-#ifndef __NR_perf_event_open
-# define __NR_perf_event_open 298
-#endif
-#ifndef __NR_futex
-# define __NR_futex 202
-#endif
-#ifndef __NR_gettid
-# define __NR_gettid 186
-#endif
-#ifndef __NR_getcpu
-# define __NR_getcpu 309
-#endif
-#ifndef __NR_setns
-#define __NR_setns 308
-#endif
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 3f3f780c8c65..7f467fe05d42 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -9,9 +9,11 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#include <linux/stddef.h>
#define KVM_PIO_PAGE_OFFSET 1
#define KVM_COALESCED_MMIO_PAGE_OFFSET 2
+#define KVM_DIRTY_LOG_PAGE_OFFSET 64
#define DE_VECTOR 0
#define DB_VECTOR 1
@@ -52,14 +54,6 @@
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256
-struct kvm_memory_alias {
- __u32 slot; /* this has a different namespace than memory slots */
- __u32 flags;
- __u64 guest_phys_addr;
- __u64 memory_size;
- __u64 target_phys_addr;
-};
-
/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
struct kvm_pic_state {
__u8 last_irr; /* edge detection */
@@ -111,6 +105,7 @@ struct kvm_ioapic_state {
#define KVM_NR_IRQCHIPS 3
#define KVM_RUN_X86_SMM (1 << 0)
+#define KVM_RUN_X86_BUS_LOCK (1 << 1)
/* for KVM_GET_REGS and KVM_SET_REGS */
struct kvm_regs {
@@ -157,6 +152,19 @@ struct kvm_sregs {
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
};
+struct kvm_sregs2 {
+ /* out (KVM_GET_SREGS2) / in (KVM_SET_SREGS2) */
+ struct kvm_segment cs, ds, es, fs, gs, ss;
+ struct kvm_segment tr, ldt;
+ struct kvm_dtable gdt, idt;
+ __u64 cr0, cr2, cr3, cr4, cr8;
+ __u64 efer;
+ __u64 apic_base;
+ __u64 flags;
+ __u64 pdptrs[4];
+};
+#define KVM_SREGS2_FLAGS_PDPTRS_VALID 1
+
/* for KVM_GET_FPU and KVM_SET_FPU */
struct kvm_fpu {
__u8 fpr[8][16];
@@ -183,15 +191,40 @@ struct kvm_msrs {
__u32 nmsrs; /* number of msrs in entries */
__u32 pad;
- struct kvm_msr_entry entries[0];
+ struct kvm_msr_entry entries[];
};
/* for KVM_GET_MSR_INDEX_LIST */
struct kvm_msr_list {
__u32 nmsrs; /* number of msrs in entries */
- __u32 indices[0];
+ __u32 indices[];
};
+/* Maximum size of any access bitmap in bytes */
+#define KVM_MSR_FILTER_MAX_BITMAP_SIZE 0x600
+
+/* for KVM_X86_SET_MSR_FILTER */
+struct kvm_msr_filter_range {
+#define KVM_MSR_FILTER_READ (1 << 0)
+#define KVM_MSR_FILTER_WRITE (1 << 1)
+#define KVM_MSR_FILTER_RANGE_VALID_MASK (KVM_MSR_FILTER_READ | \
+ KVM_MSR_FILTER_WRITE)
+ __u32 flags;
+ __u32 nmsrs; /* number of msrs in bitmap */
+ __u32 base; /* MSR index the bitmap starts at */
+ __u8 *bitmap; /* a 1 bit allows the operations in flags, 0 denies */
+};
+
+#define KVM_MSR_FILTER_MAX_RANGES 16
+struct kvm_msr_filter {
+#ifndef __KERNEL__
+#define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)
+#endif
+#define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0)
+#define KVM_MSR_FILTER_VALID_MASK (KVM_MSR_FILTER_DEFAULT_DENY)
+ __u32 flags;
+ struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];
+};
struct kvm_cpuid_entry {
__u32 function;
@@ -206,7 +239,7 @@ struct kvm_cpuid_entry {
struct kvm_cpuid {
__u32 nent;
__u32 padding;
- struct kvm_cpuid_entry entries[0];
+ struct kvm_cpuid_entry entries[];
};
struct kvm_cpuid_entry2 {
@@ -228,7 +261,7 @@ struct kvm_cpuid_entry2 {
struct kvm_cpuid2 {
__u32 nent;
__u32 padding;
- struct kvm_cpuid_entry2 entries[0];
+ struct kvm_cpuid_entry2 entries[];
};
/* for KVM_GET_PIT and KVM_SET_PIT */
@@ -260,6 +293,7 @@ struct kvm_debug_exit_arch {
#define KVM_GUESTDBG_USE_HW_BP 0x00020000
#define KVM_GUESTDBG_INJECT_DB 0x00040000
#define KVM_GUESTDBG_INJECT_BP 0x00080000
+#define KVM_GUESTDBG_BLOCKIRQ 0x00100000
/* for KVM_SET_GUEST_DEBUG */
struct kvm_guest_debug_arch {
@@ -270,7 +304,8 @@ struct kvm_pit_state {
struct kvm_pit_channel_state channels[3];
};
-#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
+#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
+#define KVM_PIT_FLAGS_SPEAKER_DATA_ON 0x00000002
struct kvm_pit_state2 {
struct kvm_pit_channel_state channels[3];
@@ -289,6 +324,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008
#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
+#define KVM_VCPUEVENT_VALID_TRIPLE_FAULT 0x00000020
/* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01
@@ -323,7 +359,10 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
- __u8 reserved[27];
+ struct {
+ __u8 pending;
+ } triple_fault;
+ __u8 reserved[26];
__u8 exception_has_payload;
__u64 exception_payload;
};
@@ -337,9 +376,23 @@ struct kvm_debugregs {
__u64 reserved[9];
};
-/* for KVM_CAP_XSAVE */
+/* for KVM_CAP_XSAVE and KVM_CAP_XSAVE2 */
struct kvm_xsave {
+ /*
+ * KVM_GET_XSAVE2 and KVM_SET_XSAVE write and read as many bytes
+ * as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
+ * respectively, when invoked on the vm file descriptor.
+ *
+ * The size value returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
+ * will always be at least 4096. Currently, it is only greater
+ * than 4096 if a dynamic feature has been enabled with
+ * ``arch_prctl()``, but this may change in the future.
+ *
+ * The offsets of the state save areas in struct kvm_xsave follow
+ * the contents of CPUID leaf 0xD on the host.
+ */
__u32 region[1024];
+ __u32 extra[];
};
#define KVM_MAX_XCRS 16
@@ -378,25 +431,35 @@ struct kvm_sync_regs {
struct kvm_vcpu_events events;
};
-#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
-#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
-#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
-#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
-#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
+#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
+#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
+#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
+#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
#define KVM_STATE_NESTED_FORMAT_VMX 0
-#define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */
+#define KVM_STATE_NESTED_FORMAT_SVM 1
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004
#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
+#define KVM_STATE_NESTED_GIF_SET 0x00000100
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
+#define KVM_STATE_NESTED_SVM_VMCB_SIZE 0x1000
+
+#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
+
+/* attributes for system fd (group 0) */
+#define KVM_X86_XCOMP_GUEST_SUPP 0
+
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
@@ -409,6 +472,20 @@ struct kvm_vmx_nested_state_hdr {
struct {
__u16 flags;
} smm;
+
+ __u16 pad;
+
+ __u32 flags;
+ __u64 preemption_timer_deadline;
+};
+
+struct kvm_svm_nested_state_data {
+ /* Save area only used if KVM_STATE_NESTED_RUN_PENDING. */
+ __u8 vmcb12[KVM_STATE_NESTED_SVM_VMCB_SIZE];
+};
+
+struct kvm_svm_nested_state_hdr {
+ __u64 vmcb_pa;
};
/* for KVM_CAP_NESTED_STATE */
@@ -419,6 +496,7 @@ struct kvm_nested_state {
union {
struct kvm_vmx_nested_state_hdr vmx;
+ struct kvm_svm_nested_state_hdr svm;
/* Pad the header to 128 bytes. */
__u8 pad[120];
@@ -430,7 +508,8 @@ struct kvm_nested_state {
* KVM_{GET,PUT}_NESTED_STATE ioctl values.
*/
union {
- struct kvm_vmx_nested_state_data vmx[0];
+ __DECLARE_FLEX_ARRAY(struct kvm_vmx_nested_state_data, vmx);
+ __DECLARE_FLEX_ARRAY(struct kvm_svm_nested_state_data, svm);
} data;
};
@@ -441,10 +520,43 @@ struct kvm_pmu_event_filter {
__u32 fixed_counter_bitmap;
__u32 flags;
__u32 pad[4];
- __u64 events[0];
+ __u64 events[];
};
#define KVM_PMU_EVENT_ALLOW 0
#define KVM_PMU_EVENT_DENY 1
+#define KVM_PMU_EVENT_FLAG_MASKED_EVENTS BIT(0)
+#define KVM_PMU_EVENT_FLAGS_VALID_MASK (KVM_PMU_EVENT_FLAG_MASKED_EVENTS)
+
+/*
+ * Masked event layout.
+ * Bits Description
+ * ---- -----------
+ * 7:0 event select (low bits)
+ * 15:8 umask match
+ * 31:16 unused
+ * 35:32 event select (high bits)
+ * 36:54 unused
+ * 55 exclude bit
+ * 63:56 umask mask
+ */
+
+#define KVM_PMU_ENCODE_MASKED_ENTRY(event_select, mask, match, exclude) \
+ (((event_select) & 0xFFULL) | (((event_select) & 0XF00ULL) << 24) | \
+ (((mask) & 0xFFULL) << 56) | \
+ (((match) & 0xFFULL) << 8) | \
+ ((__u64)(!!(exclude)) << 55))
+
+#define KVM_PMU_MASKED_ENTRY_EVENT_SELECT \
+ (GENMASK_ULL(7, 0) | GENMASK_ULL(35, 32))
+#define KVM_PMU_MASKED_ENTRY_UMASK_MASK (GENMASK_ULL(63, 56))
+#define KVM_PMU_MASKED_ENTRY_UMASK_MATCH (GENMASK_ULL(15, 8))
+#define KVM_PMU_MASKED_ENTRY_EXCLUDE (BIT_ULL(55))
+#define KVM_PMU_MASKED_ENTRY_UMASK_MASK_SHIFT (56)
+
+/* for KVM_{GET,SET,HAS}_DEVICE_ATTR */
+#define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
+#define KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
+
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/arch/x86/include/uapi/asm/prctl.h b/tools/arch/x86/include/uapi/asm/prctl.h
index 5a6aac9fa41f..500b96e71f18 100644
--- a/tools/arch/x86/include/uapi/asm/prctl.h
+++ b/tools/arch/x86/include/uapi/asm/prctl.h
@@ -2,16 +2,22 @@
#ifndef _ASM_X86_PRCTL_H
#define _ASM_X86_PRCTL_H
-#define ARCH_SET_GS 0x1001
-#define ARCH_SET_FS 0x1002
-#define ARCH_GET_FS 0x1003
-#define ARCH_GET_GS 0x1004
+#define ARCH_SET_GS 0x1001
+#define ARCH_SET_FS 0x1002
+#define ARCH_GET_FS 0x1003
+#define ARCH_GET_GS 0x1004
-#define ARCH_GET_CPUID 0x1011
-#define ARCH_SET_CPUID 0x1012
+#define ARCH_GET_CPUID 0x1011
+#define ARCH_SET_CPUID 0x1012
-#define ARCH_MAP_VDSO_X32 0x2001
-#define ARCH_MAP_VDSO_32 0x2002
-#define ARCH_MAP_VDSO_64 0x2003
+#define ARCH_GET_XCOMP_SUPP 0x1021
+#define ARCH_GET_XCOMP_PERM 0x1022
+#define ARCH_REQ_XCOMP_PERM 0x1023
+#define ARCH_GET_XCOMP_GUEST_PERM 0x1024
+#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
+
+#define ARCH_MAP_VDSO_X32 0x2001
+#define ARCH_MAP_VDSO_32 0x2002
+#define ARCH_MAP_VDSO_64 0x2003
#endif /* _ASM_X86_PRCTL_H */
diff --git a/tools/arch/x86/include/uapi/asm/svm.h b/tools/arch/x86/include/uapi/asm/svm.h
index 2e8a30f06c74..80e1df482337 100644
--- a/tools/arch/x86/include/uapi/asm/svm.h
+++ b/tools/arch/x86/include/uapi/asm/svm.h
@@ -29,6 +29,7 @@
#define SVM_EXIT_WRITE_DR6 0x036
#define SVM_EXIT_WRITE_DR7 0x037
#define SVM_EXIT_EXCP_BASE 0x040
+#define SVM_EXIT_LAST_EXCP 0x05f
#define SVM_EXIT_INTR 0x060
#define SVM_EXIT_NMI 0x061
#define SVM_EXIT_SMI 0x062
@@ -76,9 +77,55 @@
#define SVM_EXIT_MWAIT_COND 0x08c
#define SVM_EXIT_XSETBV 0x08d
#define SVM_EXIT_RDPRU 0x08e
+#define SVM_EXIT_EFER_WRITE_TRAP 0x08f
+#define SVM_EXIT_CR0_WRITE_TRAP 0x090
+#define SVM_EXIT_CR1_WRITE_TRAP 0x091
+#define SVM_EXIT_CR2_WRITE_TRAP 0x092
+#define SVM_EXIT_CR3_WRITE_TRAP 0x093
+#define SVM_EXIT_CR4_WRITE_TRAP 0x094
+#define SVM_EXIT_CR5_WRITE_TRAP 0x095
+#define SVM_EXIT_CR6_WRITE_TRAP 0x096
+#define SVM_EXIT_CR7_WRITE_TRAP 0x097
+#define SVM_EXIT_CR8_WRITE_TRAP 0x098
+#define SVM_EXIT_CR9_WRITE_TRAP 0x099
+#define SVM_EXIT_CR10_WRITE_TRAP 0x09a
+#define SVM_EXIT_CR11_WRITE_TRAP 0x09b
+#define SVM_EXIT_CR12_WRITE_TRAP 0x09c
+#define SVM_EXIT_CR13_WRITE_TRAP 0x09d
+#define SVM_EXIT_CR14_WRITE_TRAP 0x09e
+#define SVM_EXIT_CR15_WRITE_TRAP 0x09f
+#define SVM_EXIT_INVPCID 0x0a2
#define SVM_EXIT_NPF 0x400
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
+#define SVM_EXIT_VMGEXIT 0x403
+
+/* SEV-ES software-defined VMGEXIT events */
+#define SVM_VMGEXIT_MMIO_READ 0x80000001
+#define SVM_VMGEXIT_MMIO_WRITE 0x80000002
+#define SVM_VMGEXIT_NMI_COMPLETE 0x80000003
+#define SVM_VMGEXIT_AP_HLT_LOOP 0x80000004
+#define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005
+#define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0
+#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1
+#define SVM_VMGEXIT_PSC 0x80000010
+#define SVM_VMGEXIT_GUEST_REQUEST 0x80000011
+#define SVM_VMGEXIT_EXT_GUEST_REQUEST 0x80000012
+#define SVM_VMGEXIT_AP_CREATION 0x80000013
+#define SVM_VMGEXIT_AP_CREATE_ON_INIT 0
+#define SVM_VMGEXIT_AP_CREATE 1
+#define SVM_VMGEXIT_AP_DESTROY 2
+#define SVM_VMGEXIT_HV_FEATURES 0x8000fffd
+#define SVM_VMGEXIT_TERM_REQUEST 0x8000fffe
+#define SVM_VMGEXIT_TERM_REASON(reason_set, reason_code) \
+ /* SW_EXITINFO1[3:0] */ \
+ (((((u64)reason_set) & 0xf)) | \
+ /* SW_EXITINFO1[11:4] */ \
+ ((((u64)reason_code) & 0xff) << 4))
+#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff
+
+/* Exit code reserved for hypervisor/software use */
+#define SVM_EXIT_SW 0xf0000000
#define SVM_EXIT_ERR -1
@@ -171,9 +218,25 @@
{ SVM_EXIT_MONITOR, "monitor" }, \
{ SVM_EXIT_MWAIT, "mwait" }, \
{ SVM_EXIT_XSETBV, "xsetbv" }, \
+ { SVM_EXIT_EFER_WRITE_TRAP, "write_efer_trap" }, \
+ { SVM_EXIT_CR0_WRITE_TRAP, "write_cr0_trap" }, \
+ { SVM_EXIT_CR4_WRITE_TRAP, "write_cr4_trap" }, \
+ { SVM_EXIT_CR8_WRITE_TRAP, "write_cr8_trap" }, \
+ { SVM_EXIT_INVPCID, "invpcid" }, \
{ SVM_EXIT_NPF, "npf" }, \
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \
+ { SVM_EXIT_VMGEXIT, "vmgexit" }, \
+ { SVM_VMGEXIT_MMIO_READ, "vmgexit_mmio_read" }, \
+ { SVM_VMGEXIT_MMIO_WRITE, "vmgexit_mmio_write" }, \
+ { SVM_VMGEXIT_NMI_COMPLETE, "vmgexit_nmi_complete" }, \
+ { SVM_VMGEXIT_AP_HLT_LOOP, "vmgexit_ap_hlt_loop" }, \
+ { SVM_VMGEXIT_AP_JUMP_TABLE, "vmgexit_ap_jump_table" }, \
+ { SVM_VMGEXIT_PSC, "vmgexit_page_state_change" }, \
+ { SVM_VMGEXIT_GUEST_REQUEST, "vmgexit_guest_request" }, \
+ { SVM_VMGEXIT_EXT_GUEST_REQUEST, "vmgexit_ext_guest_request" }, \
+ { SVM_VMGEXIT_AP_CREATION, "vmgexit_ap_creation" }, \
+ { SVM_VMGEXIT_HV_FEATURES, "vmgexit_hypervisor_feature" }, \
{ SVM_EXIT_ERR, "invalid_guest_state" }
diff --git a/tools/arch/x86/include/uapi/asm/unistd.h b/tools/arch/x86/include/uapi/asm/unistd.h
index 196fdd02b8b1..be5e2e747f50 100644
--- a/tools/arch/x86/include/uapi/asm/unistd.h
+++ b/tools/arch/x86/include/uapi/asm/unistd.h
@@ -2,8 +2,15 @@
#ifndef _UAPI_ASM_X86_UNISTD_H
#define _UAPI_ASM_X86_UNISTD_H
-/* x32 syscall flag bit */
-#define __X32_SYSCALL_BIT 0x40000000UL
+/*
+ * x32 syscall flag bit. Some user programs expect syscall NR macros
+ * and __X32_SYSCALL_BIT to have type int, even though syscall numbers
+ * are, for practical purposes, unsigned long.
+ *
+ * Fortunately, expressions like (nr & ~__X32_SYSCALL_BIT) do the right
+ * thing regardless.
+ */
+#define __X32_SYSCALL_BIT 0x40000000
#ifndef __KERNEL__
# ifdef __i386__
diff --git a/tools/arch/x86/include/uapi/asm/unistd_32.h b/tools/arch/x86/include/uapi/asm/unistd_32.h
new file mode 100644
index 000000000000..b8ddfc4c4ab0
--- /dev/null
+++ b/tools/arch/x86/include/uapi/asm/unistd_32.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NR_fork
+#define __NR_fork 2
+#endif
+#ifndef __NR_getppid
+#define __NR_getppid 64
+#endif
+#ifndef __NR_getpgid
+#define __NR_getpgid 132
+#endif
+#ifndef __NR_gettid
+#define __NR_gettid 224
+#endif
+#ifndef __NR_futex
+#define __NR_futex 240
+#endif
+#ifndef __NR_getcpu
+#define __NR_getcpu 318
+#endif
+#ifndef __NR_perf_event_open
+#define __NR_perf_event_open 336
+#endif
+#ifndef __NR_setns
+#define __NR_setns 346
+#endif
diff --git a/tools/arch/x86/include/uapi/asm/unistd_64.h b/tools/arch/x86/include/uapi/asm/unistd_64.h
new file mode 100644
index 000000000000..f70d2cada256
--- /dev/null
+++ b/tools/arch/x86/include/uapi/asm/unistd_64.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NR_fork
+#define __NR_fork 57
+#endif
+#ifndef __NR_execve
+#define __NR_execve 59
+#endif
+#ifndef __NR_getppid
+#define __NR_getppid 110
+#endif
+#ifndef __NR_getpgid
+#define __NR_getpgid 121
+#endif
+#ifndef __NR_gettid
+#define __NR_gettid 186
+#endif
+#ifndef __NR_futex
+#define __NR_futex 202
+#endif
+#ifndef __NR_perf_event_open
+#define __NR_perf_event_open 298
+#endif
+#ifndef __NR_setns
+#define __NR_setns 308
+#endif
+#ifndef __NR_getcpu
+#define __NR_getcpu 309
+#endif
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index e95b72ec19bc..a5faf6d88f1b 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -27,11 +27,13 @@
#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
+#define VMX_EXIT_REASONS_SGX_ENCLAVE_MODE 0x08000000
#define EXIT_REASON_EXCEPTION_NMI 0
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
#define EXIT_REASON_TRIPLE_FAULT 2
#define EXIT_REASON_INIT_SIGNAL 3
+#define EXIT_REASON_SIPI_SIGNAL 4
#define EXIT_REASON_INTERRUPT_WINDOW 7
#define EXIT_REASON_NMI_WINDOW 8
@@ -88,12 +90,15 @@
#define EXIT_REASON_XRSTORS 64
#define EXIT_REASON_UMWAIT 67
#define EXIT_REASON_TPAUSE 68
+#define EXIT_REASON_BUS_LOCK 74
+#define EXIT_REASON_NOTIFY 75
#define VMX_EXIT_REASONS \
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
{ EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
{ EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
{ EXIT_REASON_INIT_SIGNAL, "INIT_SIGNAL" }, \
+ { EXIT_REASON_SIPI_SIGNAL, "SIPI_SIGNAL" }, \
{ EXIT_REASON_INTERRUPT_WINDOW, "INTERRUPT_WINDOW" }, \
{ EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
@@ -148,7 +153,12 @@
{ EXIT_REASON_XSAVES, "XSAVES" }, \
{ EXIT_REASON_XRSTORS, "XRSTORS" }, \
{ EXIT_REASON_UMWAIT, "UMWAIT" }, \
- { EXIT_REASON_TPAUSE, "TPAUSE" }
+ { EXIT_REASON_TPAUSE, "TPAUSE" }, \
+ { EXIT_REASON_BUS_LOCK, "BUS_LOCK" }, \
+ { EXIT_REASON_NOTIFY, "NOTIFY" }
+
+#define VMX_EXIT_REASON_FLAGS \
+ { VMX_EXIT_REASONS_FAILED_VMENTRY, "FAILED_VMENTRY" }
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
diff --git a/tools/arch/x86/intel_sdsi/Makefile b/tools/arch/x86/intel_sdsi/Makefile
new file mode 100644
index 000000000000..5de2288cda79
--- /dev/null
+++ b/tools/arch/x86/intel_sdsi/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Intel Software Defined Silicon provisioning tool
+
+intel_sdsi: intel_sdsi.c
+
+CFLAGS = -Wextra
+
+BINDIR ?= /usr/sbin
+
+override CFLAGS += -O2 -Wall
+
+%: %.c
+ $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
+
+.PHONY : clean
+clean :
+ @rm -f intel_sdsi
+
+install : intel_sdsi
+ install -d $(DESTDIR)$(BINDIR)
+ install -m 755 -p intel_sdsi $(DESTDIR)$(BINDIR)/intel_sdsi
diff --git a/tools/arch/x86/intel_sdsi/intel_sdsi.c b/tools/arch/x86/intel_sdsi/intel_sdsi.c
new file mode 100644
index 000000000000..2cd92761f171
--- /dev/null
+++ b/tools/arch/x86/intel_sdsi/intel_sdsi.c
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * sdsi: Intel On Demand (formerly Software Defined Silicon) tool for
+ * provisioning certificates and activation payloads on supported cpus.
+ *
+ * See https://github.com/intel/intel-sdsi/blob/master/os-interface.rst
+ * for register descriptions.
+ *
+ * Copyright (C) 2022 Intel Corporation. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/types.h>
+
+#ifndef __packed
+#define __packed __attribute__((packed))
+#endif
+
+#define min(x, y) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void) (&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; })
+
+#define SDSI_DEV "intel_vsec.sdsi"
+#define AUX_DEV_PATH "/sys/bus/auxiliary/devices/"
+#define SDSI_PATH (AUX_DEV_DIR SDSI_DEV)
+#define GUID_V1 0x6dd191
+#define REGS_SIZE_GUID_V1 72
+#define GUID_V2 0xF210D9EF
+#define REGS_SIZE_GUID_V2 80
+#define STATE_CERT_MAX_SIZE 4096
+#define METER_CERT_MAX_SIZE 4096
+#define STATE_MAX_NUM_LICENSES 16
+#define STATE_MAX_NUM_IN_BUNDLE (uint32_t)8
+#define METER_MAX_NUM_BUNDLES 8
+
+#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
+#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)
+
+struct nvram_content_auth_err_sts {
+ uint64_t reserved:3;
+ uint64_t sdsi_content_auth_err:1;
+ uint64_t reserved1:1;
+ uint64_t sdsi_metering_auth_err:1;
+ uint64_t reserved2:58;
+};
+
+struct enabled_features {
+ uint64_t reserved:3;
+ uint64_t sdsi:1;
+ uint64_t reserved1:8;
+ uint64_t attestation:1;
+ uint64_t reserved2:13;
+ uint64_t metering:1;
+ uint64_t reserved3:37;
+};
+
+struct key_provision_status {
+ uint64_t reserved:1;
+ uint64_t license_key_provisioned:1;
+ uint64_t reserved2:62;
+};
+
+struct auth_fail_count {
+ uint64_t key_failure_count:3;
+ uint64_t key_failure_threshold:3;
+ uint64_t auth_failure_count:3;
+ uint64_t auth_failure_threshold:3;
+ uint64_t reserved:52;
+};
+
+struct availability {
+ uint64_t reserved:48;
+ uint64_t available:3;
+ uint64_t threshold:3;
+ uint64_t reserved2:10;
+};
+
+struct nvram_update_limit {
+ uint64_t reserved:12;
+ uint64_t sdsi_50_pct:1;
+ uint64_t sdsi_75_pct:1;
+ uint64_t sdsi_90_pct:1;
+ uint64_t reserved2:49;
+};
+
+struct sdsi_regs {
+ uint64_t ppin;
+ struct nvram_content_auth_err_sts auth_err_sts;
+ struct enabled_features en_features;
+ struct key_provision_status key_prov_sts;
+ struct auth_fail_count auth_fail_count;
+ struct availability prov_avail;
+ struct nvram_update_limit limits;
+ uint64_t pcu_cr3_capid_cfg;
+ union {
+ struct {
+ uint64_t socket_id;
+ } v1;
+ struct {
+ uint64_t reserved;
+ uint64_t socket_id;
+ uint64_t reserved2;
+ } v2;
+ } extra;
+};
+#define CONTENT_TYPE_LK_ENC 0xD
+#define CONTENT_TYPE_LK_BLOB_ENC 0xE
+
+struct state_certificate {
+ uint32_t content_type;
+ uint32_t region_rev_id;
+ uint32_t header_size;
+ uint32_t total_size;
+ uint32_t key_size;
+ uint32_t num_licenses;
+};
+
+struct license_key_info {
+ uint32_t key_rev_id;
+ uint64_t key_image_content[6];
+} __packed;
+
+#define LICENSE_BLOB_SIZE(l) (((l) & 0x7fffffff) * 4)
+#define LICENSE_VALID(l) (!!((l) & 0x80000000))
+
+// License Group Types
+#define LBT_ONE_TIME_UPGRADE 1
+#define LBT_METERED_UPGRADE 2
+
+struct license_blob_content {
+ uint32_t type;
+ uint64_t id;
+ uint64_t ppin;
+ uint64_t previous_ppin;
+ uint32_t rev_id;
+ uint32_t num_bundles;
+} __packed;
+
+struct bundle_encoding {
+ uint32_t encoding;
+ uint32_t encoding_rsvd[7];
+};
+
+struct meter_certificate {
+ uint32_t block_signature;
+ uint32_t counter_unit;
+ uint64_t ppin;
+ uint32_t bundle_length;
+ uint32_t reserved;
+ uint32_t mmrc_encoding;
+ uint32_t mmrc_counter;
+};
+
+struct bundle_encoding_counter {
+ uint32_t encoding;
+ uint32_t counter;
+};
+
+struct sdsi_dev {
+ struct sdsi_regs regs;
+ struct state_certificate sc;
+ char *dev_name;
+ char *dev_path;
+ uint32_t guid;
+};
+
+enum command {
+ CMD_SOCKET_INFO,
+ CMD_METER_CERT,
+ CMD_STATE_CERT,
+ CMD_PROV_AKC,
+ CMD_PROV_CAP,
+};
+
+static void sdsi_list_devices(void)
+{
+ struct dirent *entry;
+ DIR *aux_dir;
+ bool found = false;
+
+ aux_dir = opendir(AUX_DEV_PATH);
+ if (!aux_dir) {
+ fprintf(stderr, "Cannot open directory %s\n", AUX_DEV_PATH);
+ return;
+ }
+
+ while ((entry = readdir(aux_dir))) {
+ if (!strncmp(SDSI_DEV, entry->d_name, strlen(SDSI_DEV))) {
+ found = true;
+ printf("%s\n", entry->d_name);
+ }
+ }
+
+ if (!found)
+ fprintf(stderr, "No On Demand devices found.\n");
+}
+
+static int sdsi_update_registers(struct sdsi_dev *s)
+{
+ FILE *regs_ptr;
+ int ret;
+
+ memset(&s->regs, 0, sizeof(s->regs));
+
+ /* Open the registers file */
+ ret = chdir(s->dev_path);
+ if (ret == -1) {
+ perror("chdir");
+ return ret;
+ }
+
+ regs_ptr = fopen("registers", "r");
+ if (!regs_ptr) {
+ perror("Could not open 'registers' file");
+ return -1;
+ }
+
+ if (s->guid != GUID_V1 && s->guid != GUID_V2) {
+ fprintf(stderr, "Unrecognized guid, 0x%x\n", s->guid);
+ fclose(regs_ptr);
+ return -1;
+ }
+
+ /* Update register info for this guid */
+ ret = fread(&s->regs, sizeof(uint8_t), sizeof(s->regs), regs_ptr);
+ if ((s->guid == GUID_V1 && ret != REGS_SIZE_GUID_V1) ||
+ (s->guid == GUID_V2 && ret != REGS_SIZE_GUID_V2)) {
+ fprintf(stderr, "Could not read 'registers' file\n");
+ fclose(regs_ptr);
+ return -1;
+ }
+
+ fclose(regs_ptr);
+
+ return 0;
+}
+
+static int sdsi_read_reg(struct sdsi_dev *s)
+{
+ int ret;
+
+ ret = sdsi_update_registers(s);
+ if (ret)
+ return ret;
+
+ /* Print register info for this guid */
+ printf("\n");
+ printf("Socket information for device %s\n", s->dev_name);
+ printf("\n");
+ printf("PPIN: 0x%lx\n", s->regs.ppin);
+ printf("NVRAM Content Authorization Error Status\n");
+ printf(" SDSi Auth Err Sts: %s\n", !!s->regs.auth_err_sts.sdsi_content_auth_err ? "Error" : "Okay");
+
+ if (!!s->regs.en_features.metering)
+ printf(" Metering Auth Err Sts: %s\n", !!s->regs.auth_err_sts.sdsi_metering_auth_err ? "Error" : "Okay");
+
+ printf("Enabled Features\n");
+ printf(" On Demand: %s\n", !!s->regs.en_features.sdsi ? "Enabled" : "Disabled");
+ printf(" Attestation: %s\n", !!s->regs.en_features.attestation ? "Enabled" : "Disabled");
+ printf(" On Demand: %s\n", !!s->regs.en_features.sdsi ? "Enabled" : "Disabled");
+ printf(" Metering: %s\n", !!s->regs.en_features.metering ? "Enabled" : "Disabled");
+ printf("License Key (AKC) Provisioned: %s\n", !!s->regs.key_prov_sts.license_key_provisioned ? "Yes" : "No");
+ printf("Authorization Failure Count\n");
+ printf(" AKC Failure Count: %d\n", s->regs.auth_fail_count.key_failure_count);
+ printf(" AKC Failure Threshold: %d\n", s->regs.auth_fail_count.key_failure_threshold);
+ printf(" CAP Failure Count: %d\n", s->regs.auth_fail_count.auth_failure_count);
+ printf(" CAP Failure Threshold: %d\n", s->regs.auth_fail_count.auth_failure_threshold);
+ printf("Provisioning Availability\n");
+ printf(" Updates Available: %d\n", s->regs.prov_avail.available);
+ printf(" Updates Threshold: %d\n", s->regs.prov_avail.threshold);
+ printf("NVRAM Udate Limit\n");
+ printf(" 50%% Limit Reached: %s\n", !!s->regs.limits.sdsi_50_pct ? "Yes" : "No");
+ printf(" 75%% Limit Reached: %s\n", !!s->regs.limits.sdsi_75_pct ? "Yes" : "No");
+ printf(" 90%% Limit Reached: %s\n", !!s->regs.limits.sdsi_90_pct ? "Yes" : "No");
+ if (s->guid == GUID_V1)
+ printf("Socket ID: %ld\n", s->regs.extra.v1.socket_id & 0xF);
+ else
+ printf("Socket ID: %ld\n", s->regs.extra.v2.socket_id & 0xF);
+
+ return 0;
+}
+
+static char *license_blob_type(uint32_t type)
+{
+ switch (type) {
+ case LBT_ONE_TIME_UPGRADE:
+ return "One time upgrade";
+ case LBT_METERED_UPGRADE:
+ return "Metered upgrade";
+ default:
+ return "Unknown license blob type";
+ }
+}
+
+static char *content_type(uint32_t type)
+{
+ switch (type) {
+ case CONTENT_TYPE_LK_ENC:
+ return "Licencse key encoding";
+ case CONTENT_TYPE_LK_BLOB_ENC:
+ return "License key + Blob encoding";
+ default:
+ return "Unknown content type";
+ }
+}
+
+static void get_feature(uint32_t encoding, char *feature)
+{
+ char *name = (char *)&encoding;
+
+ feature[3] = name[0];
+ feature[2] = name[1];
+ feature[1] = name[2];
+ feature[0] = name[3];
+}
+
+static int sdsi_meter_cert_show(struct sdsi_dev *s)
+{
+ char buf[METER_CERT_MAX_SIZE] = {0};
+ struct bundle_encoding_counter *bec;
+ struct meter_certificate *mc;
+ uint32_t count = 0;
+ FILE *cert_ptr;
+ int ret, size;
+
+ ret = sdsi_update_registers(s);
+ if (ret)
+ return ret;
+
+ if (!s->regs.en_features.sdsi) {
+ fprintf(stderr, "SDSi feature is present but not enabled.\n");
+ fprintf(stderr, " Unable to read meter certificate\n");
+ return -1;
+ }
+
+ if (!s->regs.en_features.metering) {
+ fprintf(stderr, "Metering not supporting on this socket.\n");
+ return -1;
+ }
+
+ ret = chdir(s->dev_path);
+ if (ret == -1) {
+ perror("chdir");
+ return ret;
+ }
+
+ cert_ptr = fopen("meter_certificate", "r");
+ if (!cert_ptr) {
+ perror("Could not open 'meter_certificate' file");
+ return -1;
+ }
+
+ size = fread(buf, 1, sizeof(buf), cert_ptr);
+ if (!size) {
+ fprintf(stderr, "Could not read 'meter_certificate' file\n");
+ fclose(cert_ptr);
+ return -1;
+ }
+ fclose(cert_ptr);
+
+ mc = (struct meter_certificate *)buf;
+
+ printf("\n");
+ printf("Meter certificate for device %s\n", s->dev_name);
+ printf("\n");
+ printf("Block Signature: 0x%x\n", mc->block_signature);
+ printf("Count Unit: %dms\n", mc->counter_unit);
+ printf("PPIN: 0x%lx\n", mc->ppin);
+ printf("Feature Bundle Length: %d\n", mc->bundle_length);
+ printf("MMRC encoding: %d\n", mc->mmrc_encoding);
+ printf("MMRC counter: %d\n", mc->mmrc_counter);
+ if (mc->bundle_length % 8) {
+ fprintf(stderr, "Invalid bundle length\n");
+ return -1;
+ }
+
+ if (mc->bundle_length > METER_MAX_NUM_BUNDLES * 8) {
+ fprintf(stderr, "More than %d bundles: %d\n",
+ METER_MAX_NUM_BUNDLES, mc->bundle_length / 8);
+ return -1;
+ }
+
+ bec = (void *)(mc) + sizeof(mc);
+
+ printf("Number of Feature Counters: %d\n", mc->bundle_length / 8);
+ while (count++ < mc->bundle_length / 8) {
+ char feature[5];
+
+ feature[4] = '\0';
+ get_feature(bec[count].encoding, feature);
+ printf(" %s: %d\n", feature, bec[count].counter);
+ }
+
+ return 0;
+}
+
+static int sdsi_state_cert_show(struct sdsi_dev *s)
+{
+ char buf[STATE_CERT_MAX_SIZE] = {0};
+ struct state_certificate *sc;
+ struct license_key_info *lki;
+ uint32_t offset = 0;
+ uint32_t count = 0;
+ FILE *cert_ptr;
+ int ret, size;
+
+ ret = sdsi_update_registers(s);
+ if (ret)
+ return ret;
+
+ if (!s->regs.en_features.sdsi) {
+ fprintf(stderr, "On Demand feature is present but not enabled.");
+ fprintf(stderr, " Unable to read state certificate");
+ return -1;
+ }
+
+ ret = chdir(s->dev_path);
+ if (ret == -1) {
+ perror("chdir");
+ return ret;
+ }
+
+ cert_ptr = fopen("state_certificate", "r");
+ if (!cert_ptr) {
+ perror("Could not open 'state_certificate' file");
+ return -1;
+ }
+
+ size = fread(buf, 1, sizeof(buf), cert_ptr);
+ if (!size) {
+ fprintf(stderr, "Could not read 'state_certificate' file\n");
+ fclose(cert_ptr);
+ return -1;
+ }
+ fclose(cert_ptr);
+
+ sc = (struct state_certificate *)buf;
+
+ /* Print register info for this guid */
+ printf("\n");
+ printf("State certificate for device %s\n", s->dev_name);
+ printf("\n");
+ printf("Content Type: %s\n", content_type(sc->content_type));
+ printf("Region Revision ID: %d\n", sc->region_rev_id);
+ printf("Header Size: %d\n", sc->header_size * 4);
+ printf("Total Size: %d\n", sc->total_size);
+ printf("OEM Key Size: %d\n", sc->key_size * 4);
+ printf("Number of Licenses: %d\n", sc->num_licenses);
+
+ /* Skip over the license sizes 4 bytes per license) to get the license key info */
+ lki = (void *)sc + sizeof(*sc) + (4 * sc->num_licenses);
+
+ printf("License blob Info:\n");
+ printf(" License Key Revision ID: 0x%x\n", lki->key_rev_id);
+ printf(" License Key Image Content: 0x%lx%lx%lx%lx%lx%lx\n",
+ lki->key_image_content[5], lki->key_image_content[4],
+ lki->key_image_content[3], lki->key_image_content[2],
+ lki->key_image_content[1], lki->key_image_content[0]);
+
+ while (count++ < sc->num_licenses) {
+ uint32_t blob_size_field = *(uint32_t *)(buf + 0x14 + count * 4);
+ uint32_t blob_size = LICENSE_BLOB_SIZE(blob_size_field);
+ bool license_valid = LICENSE_VALID(blob_size_field);
+ struct license_blob_content *lbc =
+ (void *)(sc) + // start of the state certificate
+ sizeof(*sc) + // size of the state certificate
+ (4 * sc->num_licenses) + // total size of the blob size blocks
+ sizeof(*lki) + // size of the license key info
+ offset; // offset to this blob content
+ struct bundle_encoding *bundle = (void *)(lbc) + sizeof(*lbc);
+ char feature[5];
+ uint32_t i;
+
+ printf(" Blob %d:\n", count - 1);
+ printf(" License blob size: %u\n", blob_size);
+ printf(" License is valid: %s\n", license_valid ? "Yes" : "No");
+ printf(" License blob type: %s\n", license_blob_type(lbc->type));
+ printf(" License blob ID: 0x%lx\n", lbc->id);
+ printf(" PPIN: 0x%lx\n", lbc->ppin);
+ printf(" Previous PPIN: 0x%lx\n", lbc->previous_ppin);
+ printf(" Blob revision ID: %u\n", lbc->rev_id);
+ printf(" Number of Features: %u\n", lbc->num_bundles);
+
+ feature[4] = '\0';
+
+ for (i = 0; i < min(lbc->num_bundles, STATE_MAX_NUM_IN_BUNDLE); i++) {
+ get_feature(bundle[i].encoding, feature);
+ printf(" Feature %d: %s\n", i, feature);
+ }
+
+ if (lbc->num_bundles > STATE_MAX_NUM_IN_BUNDLE)
+ fprintf(stderr, " Warning: %d > %d licenses in bundle reported.\n",
+ lbc->num_bundles, STATE_MAX_NUM_IN_BUNDLE);
+
+ offset += blob_size;
+ };
+
+ return 0;
+}
+
+static int sdsi_provision(struct sdsi_dev *s, char *bin_file, enum command command)
+{
+ int bin_fd, prov_fd, size, ret;
+ char buf[STATE_CERT_MAX_SIZE] = { 0 };
+ char cap[] = "provision_cap";
+ char akc[] = "provision_akc";
+ char *prov_file;
+
+ if (!bin_file) {
+ fprintf(stderr, "No binary file provided\n");
+ return -1;
+ }
+
+ /* Open the binary */
+ bin_fd = open(bin_file, O_RDONLY);
+ if (bin_fd == -1) {
+ fprintf(stderr, "Could not open file %s: %s\n", bin_file, strerror(errno));
+ return bin_fd;
+ }
+
+ prov_file = (command == CMD_PROV_AKC) ? akc : cap;
+
+ ret = chdir(s->dev_path);
+ if (ret == -1) {
+ perror("chdir");
+ close(bin_fd);
+ return ret;
+ }
+
+ /* Open the provision file */
+ prov_fd = open(prov_file, O_WRONLY);
+ if (prov_fd == -1) {
+ fprintf(stderr, "Could not open file %s: %s\n", prov_file, strerror(errno));
+ close(bin_fd);
+ return prov_fd;
+ }
+
+ /* Read the binary file into the buffer */
+ size = read(bin_fd, buf, STATE_CERT_MAX_SIZE);
+ if (size == -1) {
+ close(bin_fd);
+ close(prov_fd);
+ return -1;
+ }
+
+ ret = write(prov_fd, buf, size);
+ if (ret == -1) {
+ close(bin_fd);
+ close(prov_fd);
+ perror("Provisioning failed");
+ return ret;
+ }
+
+ printf("Provisioned %s file %s successfully\n", prov_file, bin_file);
+
+ close(bin_fd);
+ close(prov_fd);
+
+ return 0;
+}
+
+static int sdsi_provision_akc(struct sdsi_dev *s, char *bin_file)
+{
+ int ret;
+
+ ret = sdsi_update_registers(s);
+ if (ret)
+ return ret;
+
+ if (!s->regs.en_features.sdsi) {
+ fprintf(stderr, "On Demand feature is present but not enabled. Unable to provision");
+ return -1;
+ }
+
+ if (!s->regs.prov_avail.available) {
+ fprintf(stderr, "Maximum number of updates (%d) has been reached.\n",
+ s->regs.prov_avail.threshold);
+ return -1;
+ }
+
+ if (s->regs.auth_fail_count.key_failure_count ==
+ s->regs.auth_fail_count.key_failure_threshold) {
+ fprintf(stderr, "Maximum number of AKC provision failures (%d) has been reached.\n",
+ s->regs.auth_fail_count.key_failure_threshold);
+ fprintf(stderr, "Power cycle the system to reset the counter\n");
+ return -1;
+ }
+
+ return sdsi_provision(s, bin_file, CMD_PROV_AKC);
+}
+
+static int sdsi_provision_cap(struct sdsi_dev *s, char *bin_file)
+{
+ int ret;
+
+ ret = sdsi_update_registers(s);
+ if (ret)
+ return ret;
+
+ if (!s->regs.en_features.sdsi) {
+ fprintf(stderr, "On Demand feature is present but not enabled. Unable to provision");
+ return -1;
+ }
+
+ if (!s->regs.prov_avail.available) {
+ fprintf(stderr, "Maximum number of updates (%d) has been reached.\n",
+ s->regs.prov_avail.threshold);
+ return -1;
+ }
+
+ if (s->regs.auth_fail_count.auth_failure_count ==
+ s->regs.auth_fail_count.auth_failure_threshold) {
+ fprintf(stderr, "Maximum number of CAP provision failures (%d) has been reached.\n",
+ s->regs.auth_fail_count.auth_failure_threshold);
+ fprintf(stderr, "Power cycle the system to reset the counter\n");
+ return -1;
+ }
+
+ return sdsi_provision(s, bin_file, CMD_PROV_CAP);
+}
+
+static int read_sysfs_data(const char *file, int *value)
+{
+ char buff[16];
+ FILE *fp;
+
+ fp = fopen(file, "r");
+ if (!fp) {
+ perror(file);
+ return -1;
+ }
+
+ if (!fgets(buff, 16, fp)) {
+ fprintf(stderr, "Failed to read file '%s'", file);
+ fclose(fp);
+ return -1;
+ }
+
+ fclose(fp);
+ *value = strtol(buff, NULL, 0);
+
+ return 0;
+}
+
+static struct sdsi_dev *sdsi_create_dev(char *dev_no)
+{
+ int dev_name_len = sizeof(SDSI_DEV) + strlen(dev_no) + 1;
+ struct sdsi_dev *s;
+ int guid;
+ DIR *dir;
+
+ s = (struct sdsi_dev *)malloc(sizeof(*s));
+ if (!s) {
+ perror("malloc");
+ return NULL;
+ }
+
+ s->dev_name = (char *)malloc(sizeof(SDSI_DEV) + strlen(dev_no) + 1);
+ if (!s->dev_name) {
+ perror("malloc");
+ free(s);
+ return NULL;
+ }
+
+ snprintf(s->dev_name, dev_name_len, "%s.%s", SDSI_DEV, dev_no);
+
+ s->dev_path = (char *)malloc(sizeof(AUX_DEV_PATH) + dev_name_len);
+ if (!s->dev_path) {
+ perror("malloc");
+ free(s->dev_name);
+ free(s);
+ return NULL;
+ }
+
+ snprintf(s->dev_path, sizeof(AUX_DEV_PATH) + dev_name_len, "%s%s", AUX_DEV_PATH,
+ s->dev_name);
+ dir = opendir(s->dev_path);
+ if (!dir) {
+ fprintf(stderr, "Could not open directory '%s': %s\n", s->dev_path,
+ strerror(errno));
+ free(s->dev_path);
+ free(s->dev_name);
+ free(s);
+ return NULL;
+ }
+
+ if (chdir(s->dev_path) == -1) {
+ perror("chdir");
+ free(s->dev_path);
+ free(s->dev_name);
+ free(s);
+ return NULL;
+ }
+
+ if (read_sysfs_data("guid", &guid)) {
+ free(s->dev_path);
+ free(s->dev_name);
+ free(s);
+ return NULL;
+ }
+
+ s->guid = guid;
+
+ return s;
+}
+
+static void sdsi_free_dev(struct sdsi_dev *s)
+{
+ free(s->dev_path);
+ free(s->dev_name);
+ free(s);
+}
+
+static void usage(char *prog)
+{
+ printf("Usage: %s [-l] [-d DEVNO [-i] [-s] [-m] [-a FILE] [-c FILE]]\n", prog);
+}
+
+static void show_help(void)
+{
+ printf("Commands:\n");
+ printf(" %-18s\t%s\n", "-l, --list", "list available On Demand devices");
+ printf(" %-18s\t%s\n", "-d, --devno DEVNO", "On Demand device number");
+ printf(" %-18s\t%s\n", "-i, --info", "show socket information");
+ printf(" %-18s\t%s\n", "-s, --state", "show state certificate");
+ printf(" %-18s\t%s\n", "-m, --meter", "show meter certificate");
+ printf(" %-18s\t%s\n", "-a, --akc FILE", "provision socket with AKC FILE");
+ printf(" %-18s\t%s\n", "-c, --cap FILE>", "provision socket with CAP FILE");
+}
+
+int main(int argc, char *argv[])
+{
+ char bin_file[PATH_MAX], *dev_no = NULL;
+ bool device_selected = false;
+ char *progname;
+ enum command command = -1;
+ struct sdsi_dev *s;
+ int ret = 0, opt;
+ int option_index = 0;
+
+ static struct option long_options[] = {
+ {"akc", required_argument, 0, 'a'},
+ {"cap", required_argument, 0, 'c'},
+ {"devno", required_argument, 0, 'd'},
+ {"help", no_argument, 0, 'h'},
+ {"info", no_argument, 0, 'i'},
+ {"list", no_argument, 0, 'l'},
+ {"meter", no_argument, 0, 'm'},
+ {"state", no_argument, 0, 's'},
+ {0, 0, 0, 0 }
+ };
+
+
+ progname = argv[0];
+
+ while ((opt = getopt_long_only(argc, argv, "+a:c:d:hilms", long_options,
+ &option_index)) != -1) {
+ switch (opt) {
+ case 'd':
+ dev_no = optarg;
+ device_selected = true;
+ break;
+ case 'l':
+ sdsi_list_devices();
+ return 0;
+ case 'i':
+ command = CMD_SOCKET_INFO;
+ break;
+ case 'm':
+ command = CMD_METER_CERT;
+ break;
+ case 's':
+ command = CMD_STATE_CERT;
+ break;
+ case 'a':
+ case 'c':
+ if (!access(optarg, F_OK) == 0) {
+ fprintf(stderr, "Could not open file '%s': %s\n", optarg,
+ strerror(errno));
+ return -1;
+ }
+
+ if (!realpath(optarg, bin_file)) {
+ perror("realpath");
+ return -1;
+ }
+
+ command = (opt == 'a') ? CMD_PROV_AKC : CMD_PROV_CAP;
+ break;
+ case 'h':
+ usage(progname);
+ show_help();
+ return 0;
+ default:
+ usage(progname);
+ return -1;
+ }
+ }
+
+ if (device_selected) {
+ s = sdsi_create_dev(dev_no);
+ if (!s)
+ return -1;
+
+ switch (command) {
+ case CMD_SOCKET_INFO:
+ ret = sdsi_read_reg(s);
+ break;
+ case CMD_METER_CERT:
+ ret = sdsi_meter_cert_show(s);
+ break;
+ case CMD_STATE_CERT:
+ ret = sdsi_state_cert_show(s);
+ break;
+ case CMD_PROV_AKC:
+ ret = sdsi_provision_akc(s, bin_file);
+ break;
+ case CMD_PROV_CAP:
+ ret = sdsi_provision_cap(s, bin_file);
+ break;
+ default:
+ fprintf(stderr, "No command specified\n");
+ return -1;
+ }
+
+ sdsi_free_dev(s);
+
+ } else {
+ fprintf(stderr, "No device specified\n");
+ return -1;
+ }
+
+ return ret;
+}
diff --git a/tools/arch/x86/kcpuid/Makefile b/tools/arch/x86/kcpuid/Makefile
new file mode 100644
index 000000000000..87b554fab14b
--- /dev/null
+++ b/tools/arch/x86/kcpuid/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for x86/kcpuid tool
+
+kcpuid : kcpuid.c
+
+CFLAGS = -Wextra
+
+BINDIR ?= /usr/sbin
+
+HWDATADIR ?= /usr/share/misc/
+
+override CFLAGS += -O2 -Wall -I../../../include
+
+%: %.c
+ $(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
+
+.PHONY : clean
+clean :
+ @rm -f kcpuid
+
+install : kcpuid
+ install -d $(DESTDIR)$(BINDIR)
+ install -m 755 -p kcpuid $(DESTDIR)$(BINDIR)/kcpuid
+ install -m 444 -p cpuid.csv $(HWDATADIR)/cpuid.csv
diff --git a/tools/arch/x86/kcpuid/cpuid.csv b/tools/arch/x86/kcpuid/cpuid.csv
new file mode 100644
index 000000000000..e0c25b75327e
--- /dev/null
+++ b/tools/arch/x86/kcpuid/cpuid.csv
@@ -0,0 +1,451 @@
+# The basic row format is:
+# LEAF, SUBLEAF, register_name, bits, short_name, long_description
+
+# Leaf 00H
+ 0, 0, EAX, 31:0, max_basic_leafs, Max input value for supported subleafs
+
+# Leaf 01H
+ 1, 0, EAX, 3:0, stepping, Stepping ID
+ 1, 0, EAX, 7:4, model, Model
+ 1, 0, EAX, 11:8, family, Family ID
+ 1, 0, EAX, 13:12, processor, Processor Type
+ 1, 0, EAX, 19:16, model_ext, Extended Model ID
+ 1, 0, EAX, 27:20, family_ext, Extended Family ID
+
+ 1, 0, EBX, 7:0, brand, Brand Index
+ 1, 0, EBX, 15:8, clflush_size, CLFLUSH line size (value * 8) in bytes
+ 1, 0, EBX, 23:16, max_cpu_id, Maxim number of addressable logic cpu in this package
+ 1, 0, EBX, 31:24, apic_id, Initial APIC ID
+
+ 1, 0, ECX, 0, sse3, Streaming SIMD Extensions 3(SSE3)
+ 1, 0, ECX, 1, pclmulqdq, PCLMULQDQ instruction supported
+ 1, 0, ECX, 2, dtes64, DS area uses 64-bit layout
+ 1, 0, ECX, 3, mwait, MONITOR/MWAIT supported
+ 1, 0, ECX, 4, ds_cpl, CPL Qualified Debug Store which allows for branch message storage qualified by CPL
+ 1, 0, ECX, 5, vmx, Virtual Machine Extensions supported
+ 1, 0, ECX, 6, smx, Safer Mode Extension supported
+ 1, 0, ECX, 7, eist, Enhanced Intel SpeedStep Technology
+ 1, 0, ECX, 8, tm2, Thermal Monitor 2
+ 1, 0, ECX, 9, ssse3, Supplemental Streaming SIMD Extensions 3 (SSSE3)
+ 1, 0, ECX, 10, l1_ctx_id, L1 data cache could be set to either adaptive mode or shared mode (check IA32_MISC_ENABLE bit 24 definition)
+ 1, 0, ECX, 11, sdbg, IA32_DEBUG_INTERFACE MSR for silicon debug supported
+ 1, 0, ECX, 12, fma, FMA extensions using YMM state supported
+ 1, 0, ECX, 13, cmpxchg16b, 'CMPXCHG16B - Compare and Exchange Bytes' supported
+ 1, 0, ECX, 14, xtpr_update, xTPR Update Control supported
+ 1, 0, ECX, 15, pdcm, Perfmon and Debug Capability present
+ 1, 0, ECX, 17, pcid, Process-Context Identifiers feature present
+ 1, 0, ECX, 18, dca, Prefetching data from a memory mapped device supported
+ 1, 0, ECX, 19, sse4_1, SSE4.1 feature present
+ 1, 0, ECX, 20, sse4_2, SSE4.2 feature present
+ 1, 0, ECX, 21, x2apic, x2APIC supported
+ 1, 0, ECX, 22, movbe, MOVBE instruction supported
+ 1, 0, ECX, 23, popcnt, POPCNT instruction supported
+ 1, 0, ECX, 24, tsc_deadline_timer, LAPIC supports one-shot operation using a TSC deadline value
+ 1, 0, ECX, 25, aesni, AESNI instruction supported
+ 1, 0, ECX, 26, xsave, XSAVE/XRSTOR processor extended states (XSETBV/XGETBV/XCR0)
+ 1, 0, ECX, 27, osxsave, OS has set CR4.OSXSAVE bit to enable XSETBV/XGETBV/XCR0
+ 1, 0, ECX, 28, avx, AVX instruction supported
+ 1, 0, ECX, 29, f16c, 16-bit floating-point conversion instruction supported
+ 1, 0, ECX, 30, rdrand, RDRAND instruction supported
+
+ 1, 0, EDX, 0, fpu, x87 FPU on chip
+ 1, 0, EDX, 1, vme, Virtual-8086 Mode Enhancement
+ 1, 0, EDX, 2, de, Debugging Extensions
+ 1, 0, EDX, 3, pse, Page Size Extensions
+ 1, 0, EDX, 4, tsc, Time Stamp Counter
+ 1, 0, EDX, 5, msr, RDMSR and WRMSR Support
+ 1, 0, EDX, 6, pae, Physical Address Extensions
+ 1, 0, EDX, 7, mce, Machine Check Exception
+ 1, 0, EDX, 8, cx8, CMPXCHG8B instr
+ 1, 0, EDX, 9, apic, APIC on Chip
+ 1, 0, EDX, 11, sep, SYSENTER and SYSEXIT instrs
+ 1, 0, EDX, 12, mtrr, Memory Type Range Registers
+ 1, 0, EDX, 13, pge, Page Global Bit
+ 1, 0, EDX, 14, mca, Machine Check Architecture
+ 1, 0, EDX, 15, cmov, Conditional Move Instrs
+ 1, 0, EDX, 16, pat, Page Attribute Table
+ 1, 0, EDX, 17, pse36, 36-Bit Page Size Extension
+ 1, 0, EDX, 18, psn, Processor Serial Number
+ 1, 0, EDX, 19, clflush, CLFLUSH instr
+# 1, 0, EDX, 20,
+ 1, 0, EDX, 21, ds, Debug Store
+ 1, 0, EDX, 22, acpi, Thermal Monitor and Software Controlled Clock Facilities
+ 1, 0, EDX, 23, mmx, Intel MMX Technology
+ 1, 0, EDX, 24, fxsr, XSAVE and FXRSTOR Instrs
+ 1, 0, EDX, 25, sse, SSE
+ 1, 0, EDX, 26, sse2, SSE2
+ 1, 0, EDX, 27, ss, Self Snoop
+ 1, 0, EDX, 28, hit, Max APIC IDs
+ 1, 0, EDX, 29, tm, Thermal Monitor
+# 1, 0, EDX, 30,
+ 1, 0, EDX, 31, pbe, Pending Break Enable
+
+# Leaf 02H
+# cache and TLB descriptor info
+
+# Leaf 03H
+# Precessor Serial Number, introduced on Pentium III, not valid for
+# latest models
+
+# Leaf 04H
+# thread/core and cache topology
+ 4, 0, EAX, 4:0, cache_type, Cache type like instr/data or unified
+ 4, 0, EAX, 7:5, cache_level, Cache Level (starts at 1)
+ 4, 0, EAX, 8, cache_self_init, Cache Self Initialization
+ 4, 0, EAX, 9, fully_associate, Fully Associative cache
+# 4, 0, EAX, 13:10, resvd, resvd
+ 4, 0, EAX, 25:14, max_logical_id, Max number of addressable IDs for logical processors sharing the cache
+ 4, 0, EAX, 31:26, max_phy_id, Max number of addressable IDs for processors in phy package
+
+ 4, 0, EBX, 11:0, cache_linesize, Size of a cache line in bytes
+ 4, 0, EBX, 21:12, cache_partition, Physical Line partitions
+ 4, 0, EBX, 31:22, cache_ways, Ways of associativity
+ 4, 0, ECX, 31:0, cache_sets, Number of Sets - 1
+ 4, 0, EDX, 0, c_wbinvd, 1 means WBINVD/INVD is not ganranteed to act upon lower level caches of non-originating threads sharing this cache
+ 4, 0, EDX, 1, c_incl, Whether cache is inclusive of lower cache level
+ 4, 0, EDX, 2, c_comp_index, Complex Cache Indexing
+
+# Leaf 05H
+# MONITOR/MWAIT
+ 5, 0, EAX, 15:0, min_mon_size, Smallest monitor line size in bytes
+ 5, 0, EBX, 15:0, max_mon_size, Largest monitor line size in bytes
+ 5, 0, ECX, 0, mwait_ext, Enum of Monitor-Mwait extensions supported
+ 5, 0, ECX, 1, mwait_irq_break, Largest monitor line size in bytes
+ 5, 0, EDX, 3:0, c0_sub_stats, Number of C0* sub C-states supported using MWAIT
+ 5, 0, EDX, 7:4, c1_sub_stats, Number of C1* sub C-states supported using MWAIT
+ 5, 0, EDX, 11:8, c2_sub_stats, Number of C2* sub C-states supported using MWAIT
+ 5, 0, EDX, 15:12, c3_sub_stats, Number of C3* sub C-states supported using MWAIT
+ 5, 0, EDX, 19:16, c4_sub_stats, Number of C4* sub C-states supported using MWAIT
+ 5, 0, EDX, 23:20, c5_sub_stats, Number of C5* sub C-states supported using MWAIT
+ 5, 0, EDX, 27:24, c6_sub_stats, Number of C6* sub C-states supported using MWAIT
+ 5, 0, EDX, 31:28, c7_sub_stats, Number of C7* sub C-states supported using MWAIT
+
+# Leaf 06H
+# Thermal & Power Management
+
+ 6, 0, EAX, 0, dig_temp, Digital temperature sensor supported
+ 6, 0, EAX, 1, turbo, Intel Turbo Boost
+ 6, 0, EAX, 2, arat, Always running APIC timer
+# 6, 0, EAX, 3, resv, Reserved
+ 6, 0, EAX, 4, pln, Power limit notifications supported
+ 6, 0, EAX, 5, ecmd, Clock modulation duty cycle extension supported
+ 6, 0, EAX, 6, ptm, Package thermal management supported
+ 6, 0, EAX, 7, hwp, HWP base register
+ 6, 0, EAX, 8, hwp_notify, HWP notification
+ 6, 0, EAX, 9, hwp_act_window, HWP activity window
+ 6, 0, EAX, 10, hwp_energy, HWP energy performance preference
+ 6, 0, EAX, 11, hwp_pkg_req, HWP package level request
+# 6, 0, EAX, 12, resv, Reserved
+ 6, 0, EAX, 13, hdc, HDC base registers supported
+ 6, 0, EAX, 14, turbo3, Turbo Boost Max 3.0
+ 6, 0, EAX, 15, hwp_cap, Highest Performance change supported
+ 6, 0, EAX, 16, hwp_peci, HWP PECI override is supported
+ 6, 0, EAX, 17, hwp_flex, Flexible HWP is supported
+ 6, 0, EAX, 18, hwp_fast, Fast access mode for the IA32_HWP_REQUEST MSR is supported
+# 6, 0, EAX, 19, resv, Reserved
+ 6, 0, EAX, 20, hwp_ignr, Ignoring Idle Logical Processor HWP request is supported
+
+ 6, 0, EBX, 3:0, therm_irq_thresh, Number of Interrupt Thresholds in Digital Thermal Sensor
+ 6, 0, ECX, 0, aperfmperf, Presence of IA32_MPERF and IA32_APERF
+ 6, 0, ECX, 3, energ_bias, Performance-energy bias preference supported
+
+# Leaf 07H
+# ECX == 0
+# AVX512 refers to https://en.wikipedia.org/wiki/AVX-512
+# XXX: Do we really need to enumerate each and every AVX512 sub features
+
+ 7, 0, EBX, 0, fsgsbase, RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE supported
+ 7, 0, EBX, 1, tsc_adjust, TSC_ADJUST MSR supported
+ 7, 0, EBX, 2, sgx, Software Guard Extensions
+ 7, 0, EBX, 3, bmi1, BMI1
+ 7, 0, EBX, 4, hle, Hardware Lock Elision
+ 7, 0, EBX, 5, avx2, AVX2
+# 7, 0, EBX, 6, fdp_excp_only, x87 FPU Data Pointer updated only on x87 exceptions
+ 7, 0, EBX, 7, smep, Supervisor-Mode Execution Prevention
+ 7, 0, EBX, 8, bmi2, BMI2
+ 7, 0, EBX, 9, rep_movsb, Enhanced REP MOVSB/STOSB
+ 7, 0, EBX, 10, invpcid, INVPCID instruction
+ 7, 0, EBX, 11, rtm, Restricted Transactional Memory
+ 7, 0, EBX, 12, rdt_m, Intel RDT Monitoring capability
+ 7, 0, EBX, 13, depc_fpu_cs_ds, Deprecates FPU CS and FPU DS
+ 7, 0, EBX, 14, mpx, Memory Protection Extensions
+ 7, 0, EBX, 15, rdt_a, Intel RDT Allocation capability
+ 7, 0, EBX, 16, avx512f, AVX512 Foundation instr
+ 7, 0, EBX, 17, avx512dq, AVX512 Double and Quadword AVX512 instr
+ 7, 0, EBX, 18, rdseed, RDSEED instr
+ 7, 0, EBX, 19, adx, ADX instr
+ 7, 0, EBX, 20, smap, Supervisor Mode Access Prevention
+ 7, 0, EBX, 21, avx512ifma, AVX512 Integer Fused Multiply Add
+# 7, 0, EBX, 22, resvd, resvd
+ 7, 0, EBX, 23, clflushopt, CLFLUSHOPT instr
+ 7, 0, EBX, 24, clwb, CLWB instr
+ 7, 0, EBX, 25, intel_pt, Intel Processor Trace instr
+ 7, 0, EBX, 26, avx512pf, Prefetch
+ 7, 0, EBX, 27, avx512er, AVX512 Exponent Reciproca instr
+ 7, 0, EBX, 28, avx512cd, AVX512 Conflict Detection instr
+ 7, 0, EBX, 29, sha, Intel Secure Hash Algorithm Extensions instr
+ 7, 0, EBX, 30, avx512bw, AVX512 Byte & Word instr
+ 7, 0, EBX, 31, avx512vl, AVX512 Vector Length Extentions (VL)
+ 7, 0, ECX, 0, prefetchwt1, X
+ 7, 0, ECX, 1, avx512vbmi, AVX512 Vector Byte Manipulation Instructions
+ 7, 0, ECX, 2, umip, User-mode Instruction Prevention
+
+ 7, 0, ECX, 3, pku, Protection Keys for User-mode pages
+ 7, 0, ECX, 4, ospke, CR4 PKE set to enable protection keys
+# 7, 0, ECX, 16:5, resvd, resvd
+ 7, 0, ECX, 21:17, mawau, The value of MAWAU used by the BNDLDX and BNDSTX instructions in 64-bit mode
+ 7, 0, ECX, 22, rdpid, RDPID and IA32_TSC_AUX
+# 7, 0, ECX, 29:23, resvd, resvd
+ 7, 0, ECX, 30, sgx_lc, SGX Launch Configuration
+# 7, 0, ECX, 31, resvd, resvd
+
+# Leaf 08H
+#
+
+
+# Leaf 09H
+# Direct Cache Access (DCA) information
+ 9, 0, ECX, 31:0, dca_cap, The value of IA32_PLATFORM_DCA_CAP
+
+# Leaf 0AH
+# Architectural Performance Monitoring
+#
+# Do we really need to print out the PMU related stuff?
+# Does normal user really care about it?
+#
+ 0xA, 0, EAX, 7:0, pmu_ver, Performance Monitoring Unit version
+ 0xA, 0, EAX, 15:8, pmu_gp_cnt_num, Numer of general-purose PMU counters per logical CPU
+ 0xA, 0, EAX, 23:16, pmu_cnt_bits, Bit wideth of PMU counter
+ 0xA, 0, EAX, 31:24, pmu_ebx_bits, Length of EBX bit vector to enumerate PMU events
+
+ 0xA, 0, EBX, 0, pmu_no_core_cycle_evt, Core cycle event not available
+ 0xA, 0, EBX, 1, pmu_no_instr_ret_evt, Instruction retired event not available
+ 0xA, 0, EBX, 2, pmu_no_ref_cycle_evt, Reference cycles event not available
+ 0xA, 0, EBX, 3, pmu_no_llc_ref_evt, Last-level cache reference event not available
+ 0xA, 0, EBX, 4, pmu_no_llc_mis_evt, Last-level cache misses event not available
+ 0xA, 0, EBX, 5, pmu_no_br_instr_ret_evt, Branch instruction retired event not available
+ 0xA, 0, EBX, 6, pmu_no_br_mispredict_evt, Branch mispredict retired event not available
+
+ 0xA, 0, ECX, 4:0, pmu_fixed_cnt_num, Performance Monitoring Unit version
+ 0xA, 0, ECX, 12:5, pmu_fixed_cnt_bits, Numer of PMU counters per logical CPU
+
+# Leaf 0BH
+# Extended Topology Enumeration Leaf
+#
+
+ 0xB, 0, EAX, 4:0, id_shift, Number of bits to shift right on x2APIC ID to get a unique topology ID of the next level type
+ 0xB, 0, EBX, 15:0, cpu_nr, Number of logical processors at this level type
+ 0xB, 0, ECX, 15:8, lvl_type, 0-Invalid 1-SMT 2-Core
+ 0xB, 0, EDX, 31:0, x2apic_id, x2APIC ID the current logical processor
+
+
+# Leaf 0DH
+# Processor Extended State
+
+ 0xD, 0, EAX, 0, x87, X87 state
+ 0xD, 0, EAX, 1, sse, SSE state
+ 0xD, 0, EAX, 2, avx, AVX state
+ 0xD, 0, EAX, 4:3, mpx, MPX state
+ 0xD, 0, EAX, 7:5, avx512, AVX-512 state
+ 0xD, 0, EAX, 9, pkru, PKRU state
+
+ 0xD, 0, EBX, 31:0, max_sz_xcr0, Maximum size (bytes) required by enabled features in XCR0
+ 0xD, 0, ECX, 31:0, max_sz_xsave, Maximum size (bytes) of the XSAVE/XRSTOR save area
+
+ 0xD, 1, EAX, 0, xsaveopt, XSAVEOPT available
+ 0xD, 1, EAX, 1, xsavec, XSAVEC and compacted form supported
+ 0xD, 1, EAX, 2, xgetbv, XGETBV supported
+ 0xD, 1, EAX, 3, xsaves, XSAVES/XRSTORS and IA32_XSS supported
+
+ 0xD, 1, EBX, 31:0, max_sz_xcr0, Maximum size (bytes) required by enabled features in XCR0
+ 0xD, 1, ECX, 8, pt, PT state
+ 0xD, 1, ECX, 11, cet_usr, CET user state
+ 0xD, 1, ECX, 12, cet_supv, CET supervisor state
+ 0xD, 1, ECX, 13, hdc, HDC state
+ 0xD, 1, ECX, 16, hwp, HWP state
+
+# Leaf 0FH
+# Intel RDT Monitoring
+
+ 0xF, 0, EBX, 31:0, rmid_range, Maximum range (zero-based) of RMID within this physical processor of all types
+ 0xF, 0, EDX, 1, l3c_rdt_mon, L3 Cache RDT Monitoring supported
+
+ 0xF, 1, ECX, 31:0, rmid_range, Maximum range (zero-based) of RMID of this types
+ 0xF, 1, EDX, 0, l3c_ocp_mon, L3 Cache occupancy Monitoring supported
+ 0xF, 1, EDX, 1, l3c_tbw_mon, L3 Cache Total Bandwidth Monitoring supported
+ 0xF, 1, EDX, 2, l3c_lbw_mon, L3 Cache Local Bandwidth Monitoring supported
+
+# Leaf 10H
+# Intel RDT Allocation
+
+ 0x10, 0, EBX, 1, l3c_rdt_alloc, L3 Cache Allocation supported
+ 0x10, 0, EBX, 2, l2c_rdt_alloc, L2 Cache Allocation supported
+ 0x10, 0, EBX, 3, mem_bw_alloc, Memory Bandwidth Allocation supported
+
+
+# Leaf 12H
+# SGX Capability
+#
+# Some detailed SGX features not added yet
+
+ 0x12, 0, EAX, 0, sgx1, L3 Cache Allocation supported
+ 0x12, 1, EAX, 0, sgx2, L3 Cache Allocation supported
+
+
+# Leaf 14H
+# Intel Processor Tracer
+#
+
+# Leaf 15H
+# Time Stamp Counter and Nominal Core Crystal Clock Information
+
+ 0x15, 0, EAX, 31:0, tsc_denominator, The denominator of the TSC/”core crystal clock” ratio
+ 0x15, 0, EBX, 31:0, tsc_numerator, The numerator of the TSC/”core crystal clock” ratio
+ 0x15, 0, ECX, 31:0, nom_freq, Nominal frequency of the core crystal clock in Hz
+
+# Leaf 16H
+# Processor Frequency Information
+
+ 0x16, 0, EAX, 15:0, cpu_base_freq, Processor Base Frequency in MHz
+ 0x16, 0, EBX, 15:0, cpu_max_freq, Maximum Frequency in MHz
+ 0x16, 0, ECX, 15:0, bus_freq, Bus (Reference) Frequency in MHz
+
+# Leaf 17H
+# System-On-Chip Vendor Attribute
+
+ 0x17, 0, EAX, 31:0, max_socid, Maximum input value of supported sub-leaf
+ 0x17, 0, EBX, 15:0, soc_vid, SOC Vendor ID
+ 0x17, 0, EBX, 16, std_vid, SOC Vendor ID is assigned via an industry standard scheme
+ 0x17, 0, ECX, 31:0, soc_pid, SOC Project ID assigned by vendor
+ 0x17, 0, EDX, 31:0, soc_sid, SOC Stepping ID
+
+# Leaf 18H
+# Deterministic Address Translation Parameters
+
+
+# Leaf 19H
+# Key Locker Leaf
+
+
+# Leaf 1AH
+# Hybrid Information
+
+ 0x1A, 0, EAX, 31:24, core_type, 20H-Intel_Atom 40H-Intel_Core
+
+
+# Leaf 1FH
+# V2 Extended Topology - A preferred superset to leaf 0BH
+
+
+# According to SDM
+# 40000000H - 4FFFFFFFH is invalid range
+
+# Leaf 80000001H
+# Extended Processor Signature and Feature Bits
+
+0x80000001, 0, EAX, 27:20, extfamily, Extended family
+0x80000001, 0, EAX, 19:16, extmodel, Extended model
+0x80000001, 0, EAX, 11:8, basefamily, Description of Family
+0x80000001, 0, EAX, 11:8, basemodel, Model numbers vary with product
+0x80000001, 0, EAX, 3:0, stepping, Processor stepping (revision) for a specific model
+
+0x80000001, 0, EBX, 31:28, pkgtype, Specifies the package type
+
+0x80000001, 0, ECX, 0, lahf_lm, LAHF/SAHF available in 64-bit mode
+0x80000001, 0, ECX, 1, cmplegacy, Core multi-processing legacy mode
+0x80000001, 0, ECX, 2, svm, Indicates support for: VMRUN, VMLOAD, VMSAVE, CLGI, VMMCALL, and INVLPGA
+0x80000001, 0, ECX, 3, extapicspace, Extended APIC register space
+0x80000001, 0, ECX, 4, altmovecr8, Indicates support for LOCK MOV CR0 means MOV CR8
+0x80000001, 0, ECX, 5, lzcnt, LZCNT
+0x80000001, 0, ECX, 6, sse4a, EXTRQ, INSERTQ, MOVNTSS, and MOVNTSD instruction support
+0x80000001, 0, ECX, 7, misalignsse, Misaligned SSE Mode
+0x80000001, 0, ECX, 8, prefetchw, PREFETCHW
+0x80000001, 0, ECX, 9, osvw, OS Visible Work-around support
+0x80000001, 0, ECX, 10, ibs, Instruction Based Sampling
+0x80000001, 0, ECX, 11, xop, Extended operation support
+0x80000001, 0, ECX, 12, skinit, SKINIT and STGI support
+0x80000001, 0, ECX, 13, wdt, Watchdog timer support
+0x80000001, 0, ECX, 15, lwp, Lightweight profiling support
+0x80000001, 0, ECX, 16, fma4, Four-operand FMA instruction support
+0x80000001, 0, ECX, 17, tce, Translation cache extension
+0x80000001, 0, ECX, 22, TopologyExtensions, Indicates support for Core::X86::Cpuid::CachePropEax0 and Core::X86::Cpuid::ExtApicId
+0x80000001, 0, ECX, 23, perfctrextcore, Indicates support for Core::X86::Msr::PERF_CTL0 - 5 and Core::X86::Msr::PERF_CTR
+0x80000001, 0, ECX, 24, perfctrextdf, Indicates support for Core::X86::Msr::DF_PERF_CTL and Core::X86::Msr::DF_PERF_CTR
+0x80000001, 0, ECX, 26, databreakpointextension, Indicates data breakpoint support for Core::X86::Msr::DR0_ADDR_MASK, Core::X86::Msr::DR1_ADDR_MASK, Core::X86::Msr::DR2_ADDR_MASK and Core::X86::Msr::DR3_ADDR_MASK
+0x80000001, 0, ECX, 27, perftsc, Performance time-stamp counter supported
+0x80000001, 0, ECX, 28, perfctrextllc, Indicates support for L3 performance counter extensions
+0x80000001, 0, ECX, 29, mwaitextended, MWAITX and MONITORX capability is supported
+0x80000001, 0, ECX, 30, admskextn, Indicates support for address mask extension (to 32 bits and to all 4 DRs) for instruction breakpoints
+
+0x80000001, 0, EDX, 0, fpu, x87 floating point unit on-chip
+0x80000001, 0, EDX, 1, vme, Virtual-mode enhancements
+0x80000001, 0, EDX, 2, de, Debugging extensions, IO breakpoints, CR4.DE
+0x80000001, 0, EDX, 3, pse, Page-size extensions (4 MB pages)
+0x80000001, 0, EDX, 4, tsc, Time stamp counter, RDTSC/RDTSCP instructions, CR4.TSD
+0x80000001, 0, EDX, 5, msr, Model-specific registers (MSRs), with RDMSR and WRMSR instructions
+0x80000001, 0, EDX, 6, pae, Physical-address extensions (PAE)
+0x80000001, 0, EDX, 7, mce, Machine Check Exception, CR4.MCE
+0x80000001, 0, EDX, 8, cmpxchg8b, CMPXCHG8B instruction
+0x80000001, 0, EDX, 9, apic, advanced programmable interrupt controller (APIC) exists and is enabled
+0x80000001, 0, EDX, 11, sysret, SYSCALL/SYSRET supported
+0x80000001, 0, EDX, 12, mtrr, Memory-type range registers
+0x80000001, 0, EDX, 13, pge, Page global extension, CR4.PGE
+0x80000001, 0, EDX, 14, mca, Machine check architecture, MCG_CAP
+0x80000001, 0, EDX, 15, cmov, Conditional move instructions, CMOV, FCOMI, FCMOV
+0x80000001, 0, EDX, 16, pat, Page attribute table
+0x80000001, 0, EDX, 17, pse36, Page-size extensions
+0x80000001, 0, EDX, 20, exec_dis, Execute Disable Bit available
+0x80000001, 0, EDX, 22, mmxext, AMD extensions to MMX instructions
+0x80000001, 0, EDX, 23, mmx, MMX instructions
+0x80000001, 0, EDX, 24, fxsr, FXSAVE and FXRSTOR instructions
+0x80000001, 0, EDX, 25, ffxsr, FXSAVE and FXRSTOR instruction optimizations
+0x80000001, 0, EDX, 26, 1gb_page, 1GB page supported
+0x80000001, 0, EDX, 27, rdtscp, RDTSCP and IA32_TSC_AUX are available
+0x80000001, 0, EDX, 29, lm, 64b Architecture supported
+0x80000001, 0, EDX, 30, threednowext, AMD extensions to 3DNow! instructions
+0x80000001, 0, EDX, 31, threednow, 3DNow! instructions
+
+# Leaf 80000002H/80000003H/80000004H
+# Processor Brand String
+
+# Leaf 80000005H
+# Reserved
+
+# Leaf 80000006H
+# Extended L2 Cache Features
+
+0x80000006, 0, ECX, 7:0, clsize, Cache Line size in bytes
+0x80000006, 0, ECX, 15:12, l2c_assoc, L2 Associativity
+0x80000006, 0, ECX, 31:16, csize, Cache size in 1K units
+
+
+# Leaf 80000007H
+
+0x80000007, 0, EDX, 8, nonstop_tsc, Invariant TSC available
+
+
+# Leaf 80000008H
+
+0x80000008, 0, EAX, 7:0, phy_adr_bits, Physical Address Bits
+0x80000008, 0, EAX, 15:8, lnr_adr_bits, Linear Address Bits
+0x80000007, 0, EBX, 9, wbnoinvd, WBNOINVD
+
+# 0x8000001E
+# EAX: Extended APIC ID
+0x8000001E, 0, EAX, 31:0, extended_apic_id, Extended APIC ID
+# EBX: Core Identifiers
+0x8000001E, 0, EBX, 7:0, core_id, Identifies the logical core ID
+0x8000001E, 0, EBX, 15:8, threads_per_core, The number of threads per core is threads_per_core + 1
+# ECX: Node Identifiers
+0x8000001E, 0, ECX, 7:0, node_id, Node ID
+0x8000001E, 0, ECX, 10:8, nodes_per_processor, Nodes per processor { 0: 1 node, else reserved }
+
+# 8000001F: AMD Secure Encryption
+0x8000001F, 0, EAX, 0, sme, Secure Memory Encryption
+0x8000001F, 0, EAX, 1, sev, Secure Encrypted Virtualization
+0x8000001F, 0, EAX, 2, vmpgflush, VM Page Flush MSR
+0x8000001F, 0, EAX, 3, seves, SEV Encrypted State
+0x8000001F, 0, EBX, 5:0, c-bit, Page table bit number used to enable memory encryption
+0x8000001F, 0, EBX, 11:6, mem_encrypt_physaddr_width, Reduction of physical address space in bits with SME enabled
+0x8000001F, 0, ECX, 31:0, num_encrypted_guests, Maximum ASID value that may be used for an SEV-enabled guest
+0x8000001F, 0, EDX, 31:0, minimum_sev_asid, Minimum ASID value that must be used for an SEV-enabled, SEV-ES-disabled guest
diff --git a/tools/arch/x86/kcpuid/kcpuid.c b/tools/arch/x86/kcpuid/kcpuid.c
new file mode 100644
index 000000000000..416f5b35dd8f
--- /dev/null
+++ b/tools/arch/x86/kcpuid/kcpuid.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+typedef unsigned int u32;
+typedef unsigned long long u64;
+
+char *def_csv = "/usr/share/misc/cpuid.csv";
+char *user_csv;
+
+
+/* Cover both single-bit flag and multiple-bits fields */
+struct bits_desc {
+ /* start and end bits */
+ int start, end;
+ /* 0 or 1 for 1-bit flag */
+ int value;
+ char simp[32];
+ char detail[256];
+};
+
+/* descriptor info for eax/ebx/ecx/edx */
+struct reg_desc {
+ /* number of valid entries */
+ int nr;
+ struct bits_desc descs[32];
+};
+
+enum cpuid_reg {
+ R_EAX = 0,
+ R_EBX,
+ R_ECX,
+ R_EDX,
+ NR_REGS
+};
+
+static const char * const reg_names[] = {
+ "EAX", "EBX", "ECX", "EDX",
+};
+
+struct subleaf {
+ u32 index;
+ u32 sub;
+ u32 eax, ebx, ecx, edx;
+ struct reg_desc info[NR_REGS];
+};
+
+/* Represent one leaf (basic or extended) */
+struct cpuid_func {
+ /*
+ * Array of subleafs for this func, if there is no subleafs
+ * then the leafs[0] is the main leaf
+ */
+ struct subleaf *leafs;
+ int nr;
+};
+
+struct cpuid_range {
+ /* array of main leafs */
+ struct cpuid_func *funcs;
+ /* number of valid leafs */
+ int nr;
+ bool is_ext;
+};
+
+/*
+ * basic: basic functions range: [0... ]
+ * ext: extended functions range: [0x80000000... ]
+ */
+struct cpuid_range *leafs_basic, *leafs_ext;
+
+static int num_leafs;
+static bool is_amd;
+static bool show_details;
+static bool show_raw;
+static bool show_flags_only = true;
+static u32 user_index = 0xFFFFFFFF;
+static u32 user_sub = 0xFFFFFFFF;
+static int flines;
+
+static inline void cpuid(u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
+{
+ /* ecx is often an input as well as an output. */
+ asm volatile("cpuid"
+ : "=a" (*eax),
+ "=b" (*ebx),
+ "=c" (*ecx),
+ "=d" (*edx)
+ : "0" (*eax), "2" (*ecx));
+}
+
+static inline bool has_subleafs(u32 f)
+{
+ if (f == 0x7 || f == 0xd)
+ return true;
+
+ if (is_amd) {
+ if (f == 0x8000001d)
+ return true;
+ return false;
+ }
+
+ switch (f) {
+ case 0x4:
+ case 0xb:
+ case 0xf:
+ case 0x10:
+ case 0x14:
+ case 0x18:
+ case 0x1f:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void leaf_print_raw(struct subleaf *leaf)
+{
+ if (has_subleafs(leaf->index)) {
+ if (leaf->sub == 0)
+ printf("0x%08x: subleafs:\n", leaf->index);
+
+ printf(" %2d: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n",
+ leaf->sub, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx);
+ } else {
+ printf("0x%08x: EAX=0x%08x, EBX=0x%08x, ECX=0x%08x, EDX=0x%08x\n",
+ leaf->index, leaf->eax, leaf->ebx, leaf->ecx, leaf->edx);
+ }
+}
+
+/* Return true is the input eax/ebx/ecx/edx are all zero */
+static bool cpuid_store(struct cpuid_range *range, u32 f, int subleaf,
+ u32 a, u32 b, u32 c, u32 d)
+{
+ struct cpuid_func *func;
+ struct subleaf *leaf;
+ int s = 0;
+
+ if (a == 0 && b == 0 && c == 0 && d == 0)
+ return true;
+
+ /*
+ * Cut off vendor-prefix from CPUID function as we're using it as an
+ * index into ->funcs.
+ */
+ func = &range->funcs[f & 0xffff];
+
+ if (!func->leafs) {
+ func->leafs = malloc(sizeof(struct subleaf));
+ if (!func->leafs)
+ perror("malloc func leaf");
+
+ func->nr = 1;
+ } else {
+ s = func->nr;
+ func->leafs = realloc(func->leafs, (s + 1) * sizeof(*leaf));
+ if (!func->leafs)
+ perror("realloc f->leafs");
+
+ func->nr++;
+ }
+
+ leaf = &func->leafs[s];
+
+ leaf->index = f;
+ leaf->sub = subleaf;
+ leaf->eax = a;
+ leaf->ebx = b;
+ leaf->ecx = c;
+ leaf->edx = d;
+
+ return false;
+}
+
+static void raw_dump_range(struct cpuid_range *range)
+{
+ u32 f;
+ int i;
+
+ printf("%s Leafs :\n", range->is_ext ? "Extended" : "Basic");
+ printf("================\n");
+
+ for (f = 0; (int)f < range->nr; f++) {
+ struct cpuid_func *func = &range->funcs[f];
+ u32 index = f;
+
+ if (range->is_ext)
+ index += 0x80000000;
+
+ /* Skip leaf without valid items */
+ if (!func->nr)
+ continue;
+
+ /* First item is the main leaf, followed by all subleafs */
+ for (i = 0; i < func->nr; i++)
+ leaf_print_raw(&func->leafs[i]);
+ }
+}
+
+#define MAX_SUBLEAF_NUM 32
+struct cpuid_range *setup_cpuid_range(u32 input_eax)
+{
+ u32 max_func, idx_func;
+ int subleaf;
+ struct cpuid_range *range;
+ u32 eax, ebx, ecx, edx;
+ u32 f = input_eax;
+ int max_subleaf;
+ bool allzero;
+
+ eax = input_eax;
+ ebx = ecx = edx = 0;
+
+ cpuid(&eax, &ebx, &ecx, &edx);
+ max_func = eax;
+ idx_func = (max_func & 0xffff) + 1;
+
+ range = malloc(sizeof(struct cpuid_range));
+ if (!range)
+ perror("malloc range");
+
+ if (input_eax & 0x80000000)
+ range->is_ext = true;
+ else
+ range->is_ext = false;
+
+ range->funcs = malloc(sizeof(struct cpuid_func) * idx_func);
+ if (!range->funcs)
+ perror("malloc range->funcs");
+
+ range->nr = idx_func;
+ memset(range->funcs, 0, sizeof(struct cpuid_func) * idx_func);
+
+ for (; f <= max_func; f++) {
+ eax = f;
+ subleaf = ecx = 0;
+
+ cpuid(&eax, &ebx, &ecx, &edx);
+ allzero = cpuid_store(range, f, subleaf, eax, ebx, ecx, edx);
+ if (allzero)
+ continue;
+ num_leafs++;
+
+ if (!has_subleafs(f))
+ continue;
+
+ max_subleaf = MAX_SUBLEAF_NUM;
+
+ /*
+ * Some can provide the exact number of subleafs,
+ * others have to be tried (0xf)
+ */
+ if (f == 0x7 || f == 0x14 || f == 0x17 || f == 0x18)
+ max_subleaf = (eax & 0xff) + 1;
+
+ if (f == 0xb)
+ max_subleaf = 2;
+
+ for (subleaf = 1; subleaf < max_subleaf; subleaf++) {
+ eax = f;
+ ecx = subleaf;
+
+ cpuid(&eax, &ebx, &ecx, &edx);
+ allzero = cpuid_store(range, f, subleaf,
+ eax, ebx, ecx, edx);
+ if (allzero)
+ continue;
+ num_leafs++;
+ }
+
+ }
+
+ return range;
+}
+
+/*
+ * The basic row format for cpuid.csv is
+ * LEAF,SUBLEAF,register_name,bits,short name,long description
+ *
+ * like:
+ * 0, 0, EAX, 31:0, max_basic_leafs, Max input value for supported subleafs
+ * 1, 0, ECX, 0, sse3, Streaming SIMD Extensions 3(SSE3)
+ */
+static int parse_line(char *line)
+{
+ char *str;
+ int i;
+ struct cpuid_range *range;
+ struct cpuid_func *func;
+ struct subleaf *leaf;
+ u32 index;
+ u32 sub;
+ char buffer[512];
+ char *buf;
+ /*
+ * Tokens:
+ * 1. leaf
+ * 2. subleaf
+ * 3. register
+ * 4. bits
+ * 5. short name
+ * 6. long detail
+ */
+ char *tokens[6];
+ struct reg_desc *reg;
+ struct bits_desc *bdesc;
+ int reg_index;
+ char *start, *end;
+
+ /* Skip comments and NULL line */
+ if (line[0] == '#' || line[0] == '\n')
+ return 0;
+
+ strncpy(buffer, line, 511);
+ buffer[511] = 0;
+ str = buffer;
+ for (i = 0; i < 5; i++) {
+ tokens[i] = strtok(str, ",");
+ if (!tokens[i])
+ goto err_exit;
+ str = NULL;
+ }
+ tokens[5] = strtok(str, "\n");
+ if (!tokens[5])
+ goto err_exit;
+
+ /* index/main-leaf */
+ index = strtoull(tokens[0], NULL, 0);
+
+ if (index & 0x80000000)
+ range = leafs_ext;
+ else
+ range = leafs_basic;
+
+ index &= 0x7FFFFFFF;
+ /* Skip line parsing for non-existing indexes */
+ if ((int)index >= range->nr)
+ return -1;
+
+ func = &range->funcs[index];
+
+ /* Return if the index has no valid item on this platform */
+ if (!func->nr)
+ return 0;
+
+ /* subleaf */
+ sub = strtoul(tokens[1], NULL, 0);
+ if ((int)sub > func->nr)
+ return -1;
+
+ leaf = &func->leafs[sub];
+ buf = tokens[2];
+
+ if (strcasestr(buf, "EAX"))
+ reg_index = R_EAX;
+ else if (strcasestr(buf, "EBX"))
+ reg_index = R_EBX;
+ else if (strcasestr(buf, "ECX"))
+ reg_index = R_ECX;
+ else if (strcasestr(buf, "EDX"))
+ reg_index = R_EDX;
+ else
+ goto err_exit;
+
+ reg = &leaf->info[reg_index];
+ bdesc = &reg->descs[reg->nr++];
+
+ /* bit flag or bits field */
+ buf = tokens[3];
+
+ end = strtok(buf, ":");
+ bdesc->end = strtoul(end, NULL, 0);
+ bdesc->start = bdesc->end;
+
+ /* start != NULL means it is bit fields */
+ start = strtok(NULL, ":");
+ if (start)
+ bdesc->start = strtoul(start, NULL, 0);
+
+ strcpy(bdesc->simp, tokens[4]);
+ strcpy(bdesc->detail, tokens[5]);
+ return 0;
+
+err_exit:
+ printf("Warning: wrong line format:\n");
+ printf("\tline[%d]: %s\n", flines, line);
+ return -1;
+}
+
+/* Parse csv file, and construct the array of all leafs and subleafs */
+static void parse_text(void)
+{
+ FILE *file;
+ char *filename, *line = NULL;
+ size_t len = 0;
+ int ret;
+
+ if (show_raw)
+ return;
+
+ filename = user_csv ? user_csv : def_csv;
+ file = fopen(filename, "r");
+ if (!file) {
+ /* Fallback to a csv in the same dir */
+ file = fopen("./cpuid.csv", "r");
+ }
+
+ if (!file) {
+ printf("Fail to open '%s'\n", filename);
+ return;
+ }
+
+ while (1) {
+ ret = getline(&line, &len, file);
+ flines++;
+ if (ret > 0)
+ parse_line(line);
+
+ if (feof(file))
+ break;
+ }
+
+ fclose(file);
+}
+
+
+/* Decode every eax/ebx/ecx/edx */
+static void decode_bits(u32 value, struct reg_desc *rdesc, enum cpuid_reg reg)
+{
+ struct bits_desc *bdesc;
+ int start, end, i;
+ u32 mask;
+
+ if (!rdesc->nr) {
+ if (show_details)
+ printf("\t %s: 0x%08x\n", reg_names[reg], value);
+ return;
+ }
+
+ for (i = 0; i < rdesc->nr; i++) {
+ bdesc = &rdesc->descs[i];
+
+ start = bdesc->start;
+ end = bdesc->end;
+ if (start == end) {
+ /* single bit flag */
+ if (value & (1 << start))
+ printf("\t%-20s %s%s\n",
+ bdesc->simp,
+ show_details ? "-" : "",
+ show_details ? bdesc->detail : ""
+ );
+ } else {
+ /* bit fields */
+ if (show_flags_only)
+ continue;
+
+ mask = ((u64)1 << (end - start + 1)) - 1;
+ printf("\t%-20s\t: 0x%-8x\t%s%s\n",
+ bdesc->simp,
+ (value >> start) & mask,
+ show_details ? "-" : "",
+ show_details ? bdesc->detail : ""
+ );
+ }
+ }
+}
+
+static void show_leaf(struct subleaf *leaf)
+{
+ if (!leaf)
+ return;
+
+ if (show_raw) {
+ leaf_print_raw(leaf);
+ } else {
+ if (show_details)
+ printf("CPUID_0x%x_ECX[0x%x]:\n",
+ leaf->index, leaf->sub);
+ }
+
+ decode_bits(leaf->eax, &leaf->info[R_EAX], R_EAX);
+ decode_bits(leaf->ebx, &leaf->info[R_EBX], R_EBX);
+ decode_bits(leaf->ecx, &leaf->info[R_ECX], R_ECX);
+ decode_bits(leaf->edx, &leaf->info[R_EDX], R_EDX);
+
+ if (!show_raw && show_details)
+ printf("\n");
+}
+
+static void show_func(struct cpuid_func *func)
+{
+ int i;
+
+ if (!func)
+ return;
+
+ for (i = 0; i < func->nr; i++)
+ show_leaf(&func->leafs[i]);
+}
+
+static void show_range(struct cpuid_range *range)
+{
+ int i;
+
+ for (i = 0; i < range->nr; i++)
+ show_func(&range->funcs[i]);
+}
+
+static inline struct cpuid_func *index_to_func(u32 index)
+{
+ struct cpuid_range *range;
+
+ range = (index & 0x80000000) ? leafs_ext : leafs_basic;
+ index &= 0x7FFFFFFF;
+
+ if (((index & 0xFFFF) + 1) > (u32)range->nr) {
+ printf("ERR: invalid input index (0x%x)\n", index);
+ return NULL;
+ }
+ return &range->funcs[index];
+}
+
+static void show_info(void)
+{
+ struct cpuid_func *func;
+
+ if (show_raw) {
+ /* Show all of the raw output of 'cpuid' instr */
+ raw_dump_range(leafs_basic);
+ raw_dump_range(leafs_ext);
+ return;
+ }
+
+ if (user_index != 0xFFFFFFFF) {
+ /* Only show specific leaf/subleaf info */
+ func = index_to_func(user_index);
+ if (!func)
+ return;
+
+ /* Dump the raw data also */
+ show_raw = true;
+
+ if (user_sub != 0xFFFFFFFF) {
+ if (user_sub + 1 <= (u32)func->nr) {
+ show_leaf(&func->leafs[user_sub]);
+ return;
+ }
+
+ printf("ERR: invalid input subleaf (0x%x)\n", user_sub);
+ }
+
+ show_func(func);
+ return;
+ }
+
+ printf("CPU features:\n=============\n\n");
+ show_range(leafs_basic);
+ show_range(leafs_ext);
+}
+
+static void setup_platform_cpuid(void)
+{
+ u32 eax, ebx, ecx, edx;
+
+ /* Check vendor */
+ eax = ebx = ecx = edx = 0;
+ cpuid(&eax, &ebx, &ecx, &edx);
+
+ /* "htuA" */
+ if (ebx == 0x68747541)
+ is_amd = true;
+
+ /* Setup leafs for the basic and extended range */
+ leafs_basic = setup_cpuid_range(0x0);
+ leafs_ext = setup_cpuid_range(0x80000000);
+}
+
+static void usage(void)
+{
+ printf("kcpuid [-abdfhr] [-l leaf] [-s subleaf]\n"
+ "\t-a|--all Show both bit flags and complex bit fields info\n"
+ "\t-b|--bitflags Show boolean flags only\n"
+ "\t-d|--detail Show details of the flag/fields (default)\n"
+ "\t-f|--flags Specify the cpuid csv file\n"
+ "\t-h|--help Show usage info\n"
+ "\t-l|--leaf=index Specify the leaf you want to check\n"
+ "\t-r|--raw Show raw cpuid data\n"
+ "\t-s|--subleaf=sub Specify the subleaf you want to check\n"
+ );
+}
+
+static struct option opts[] = {
+ { "all", no_argument, NULL, 'a' }, /* show both bit flags and fields */
+ { "bitflags", no_argument, NULL, 'b' }, /* only show bit flags, default on */
+ { "detail", no_argument, NULL, 'd' }, /* show detail descriptions */
+ { "file", required_argument, NULL, 'f' }, /* use user's cpuid file */
+ { "help", no_argument, NULL, 'h'}, /* show usage */
+ { "leaf", required_argument, NULL, 'l'}, /* only check a specific leaf */
+ { "raw", no_argument, NULL, 'r'}, /* show raw CPUID leaf data */
+ { "subleaf", required_argument, NULL, 's'}, /* check a specific subleaf */
+ { NULL, 0, NULL, 0 }
+};
+
+static int parse_options(int argc, char *argv[])
+{
+ int c;
+
+ while ((c = getopt_long(argc, argv, "abdf:hl:rs:",
+ opts, NULL)) != -1)
+ switch (c) {
+ case 'a':
+ show_flags_only = false;
+ break;
+ case 'b':
+ show_flags_only = true;
+ break;
+ case 'd':
+ show_details = true;
+ break;
+ case 'f':
+ user_csv = optarg;
+ break;
+ case 'h':
+ usage();
+ exit(1);
+ break;
+ case 'l':
+ /* main leaf */
+ user_index = strtoul(optarg, NULL, 0);
+ break;
+ case 'r':
+ show_raw = true;
+ break;
+ case 's':
+ /* subleaf */
+ user_sub = strtoul(optarg, NULL, 0);
+ break;
+ default:
+ printf("%s: Invalid option '%c'\n", argv[0], optopt);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Do 4 things in turn:
+ * 1. Parse user options
+ * 2. Parse and store all the CPUID leaf data supported on this platform
+ * 2. Parse the csv file, while skipping leafs which are not available
+ * on this platform
+ * 3. Print leafs info based on user options
+ */
+int main(int argc, char *argv[])
+{
+ if (parse_options(argc, argv))
+ return -1;
+
+ /* Setup the cpuid leafs of current platform */
+ setup_platform_cpuid();
+
+ /* Read and parse the 'cpuid.csv' */
+ parse_text();
+
+ show_info();
+ return 0;
+}
diff --git a/tools/arch/x86/lib/inat.c b/tools/arch/x86/lib/inat.c
index 4f5ed49e1b4e..dfbcc6405941 100644
--- a/tools/arch/x86/lib/inat.c
+++ b/tools/arch/x86/lib/inat.c
@@ -4,7 +4,7 @@
*
* Written by Masami Hiramatsu <mhiramat@redhat.com>
*/
-#include "../include/asm/insn.h"
+#include "../include/asm/insn.h" /* __ignore_sync_check__ */
/* Attribute tables are generated from opcode map */
#include "inat-tables.c"
diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c
index 0151dfc6da61..8fd63a067308 100644
--- a/tools/arch/x86/lib/insn.c
+++ b/tools/arch/x86/lib/insn.c
@@ -5,25 +5,43 @@
* Copyright (C) IBM Corporation, 2002, 2004, 2009
*/
+#include <linux/kernel.h>
#ifdef __KERNEL__
#include <linux/string.h>
#else
#include <string.h>
#endif
-#include "../include/asm/inat.h"
-#include "../include/asm/insn.h"
-
-#include "../include/asm/emulate_prefix.h"
+#include "../include/asm/inat.h" /* __ignore_sync_check__ */
+#include "../include/asm/insn.h" /* __ignore_sync_check__ */
+#include "../include/asm-generic/unaligned.h" /* __ignore_sync_check__ */
+
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+
+#include "../include/asm/emulate_prefix.h" /* __ignore_sync_check__ */
+
+#define leXX_to_cpu(t, r) \
+({ \
+ __typeof__(t) v; \
+ switch (sizeof(t)) { \
+ case 4: v = le32_to_cpu(r); break; \
+ case 2: v = le16_to_cpu(r); break; \
+ case 1: v = r; break; \
+ default: \
+ BUILD_BUG(); break; \
+ } \
+ v; \
+})
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+ ({ t r = get_unaligned((t *)(insn)->next_byte); (insn)->next_byte += sizeof(t); leXX_to_cpu(t, r); })
#define __peek_nbyte_next(t, insn, n) \
- ({ t r = *(t*)((insn)->next_byte + n); r; })
+ ({ t r = get_unaligned((t *)(insn)->next_byte + n); leXX_to_cpu(t, r); })
#define get_next(t, insn) \
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
@@ -37,6 +55,7 @@
* insn_init() - initialize struct insn
* @insn: &struct insn to be initialized
* @kaddr: address (in kernel memory) of instruction (or copy thereof)
+ * @buf_len: length of the insn buffer at @kaddr
* @x86_64: !0 for 64-bit kernel or 64-bit app
*/
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
@@ -97,8 +116,12 @@ static void insn_get_emulate_prefix(struct insn *insn)
* Populates the @insn->prefixes bitmap, and updates @insn->next_byte
* to point to the (first) opcode. No effect if @insn->prefixes.got
* is already set.
+ *
+ * * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_prefixes(struct insn *insn)
+int insn_get_prefixes(struct insn *insn)
{
struct insn_field *prefixes = &insn->prefixes;
insn_attr_t attr;
@@ -106,7 +129,7 @@ void insn_get_prefixes(struct insn *insn)
int i, nb;
if (prefixes->got)
- return;
+ return 0;
insn_get_emulate_prefix(insn);
@@ -147,9 +170,9 @@ found:
b = insn->prefixes.bytes[3];
for (i = 0; i < nb; i++)
if (prefixes->bytes[i] == lb)
- prefixes->bytes[i] = b;
+ insn_set_byte(prefixes, i, b);
}
- insn->prefixes.bytes[3] = lb;
+ insn_set_byte(&insn->prefixes, 3, lb);
}
/* Decode REX prefix */
@@ -157,8 +180,7 @@ found:
b = peek_next(insn_byte_t, insn);
attr = inat_get_opcode_attribute(b);
if (inat_is_rex_prefix(attr)) {
- insn->rex_prefix.value = b;
- insn->rex_prefix.nbytes = 1;
+ insn_field_set(&insn->rex_prefix, b, 1);
insn->next_byte++;
if (X86_REX_W(b))
/* REX.W overrides opnd_size */
@@ -181,13 +203,13 @@ found:
if (X86_MODRM_MOD(b2) != 3)
goto vex_end;
}
- insn->vex_prefix.bytes[0] = b;
- insn->vex_prefix.bytes[1] = b2;
+ insn_set_byte(&insn->vex_prefix, 0, b);
+ insn_set_byte(&insn->vex_prefix, 1, b2);
if (inat_is_evex_prefix(attr)) {
b2 = peek_nbyte_next(insn_byte_t, insn, 2);
- insn->vex_prefix.bytes[2] = b2;
+ insn_set_byte(&insn->vex_prefix, 2, b2);
b2 = peek_nbyte_next(insn_byte_t, insn, 3);
- insn->vex_prefix.bytes[3] = b2;
+ insn_set_byte(&insn->vex_prefix, 3, b2);
insn->vex_prefix.nbytes = 4;
insn->next_byte += 4;
if (insn->x86_64 && X86_VEX_W(b2))
@@ -195,7 +217,7 @@ found:
insn->opnd_bytes = 8;
} else if (inat_is_vex3_prefix(attr)) {
b2 = peek_nbyte_next(insn_byte_t, insn, 2);
- insn->vex_prefix.bytes[2] = b2;
+ insn_set_byte(&insn->vex_prefix, 2, b2);
insn->vex_prefix.nbytes = 3;
insn->next_byte += 3;
if (insn->x86_64 && X86_VEX_W(b2))
@@ -207,7 +229,7 @@ found:
* Makes it easier to decode vex.W, vex.vvvv,
* vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
*/
- insn->vex_prefix.bytes[2] = b2 & 0x7f;
+ insn_set_byte(&insn->vex_prefix, 2, b2 & 0x7f);
insn->vex_prefix.nbytes = 2;
insn->next_byte += 2;
}
@@ -217,8 +239,10 @@ vex_end:
prefixes->got = 1;
+ return 0;
+
err_out:
- return;
+ return -ENODATA;
}
/**
@@ -230,20 +254,29 @@ err_out:
* If necessary, first collects any preceding (prefix) bytes.
* Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
* is already 1.
+ *
+ * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_opcode(struct insn *insn)
+int insn_get_opcode(struct insn *insn)
{
struct insn_field *opcode = &insn->opcode;
+ int pfx_id, ret;
insn_byte_t op;
- int pfx_id;
+
if (opcode->got)
- return;
- if (!insn->prefixes.got)
- insn_get_prefixes(insn);
+ return 0;
+
+ if (!insn->prefixes.got) {
+ ret = insn_get_prefixes(insn);
+ if (ret)
+ return ret;
+ }
/* Get first opcode */
op = get_next(insn_byte_t, insn);
- opcode->bytes[0] = op;
+ insn_set_byte(opcode, 0, op);
opcode->nbytes = 1;
/* Check if there is VEX prefix or not */
@@ -254,9 +287,13 @@ void insn_get_opcode(struct insn *insn)
insn->attr = inat_get_avx_attribute(op, m, p);
if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
(!inat_accept_vex(insn->attr) &&
- !inat_is_group(insn->attr)))
- insn->attr = 0; /* This instruction is bad */
- goto end; /* VEX has only 1 byte for opcode */
+ !inat_is_group(insn->attr))) {
+ /* This instruction is bad */
+ insn->attr = 0;
+ return -EINVAL;
+ }
+ /* VEX has only 1 byte for opcode */
+ goto end;
}
insn->attr = inat_get_opcode_attribute(op);
@@ -267,13 +304,18 @@ void insn_get_opcode(struct insn *insn)
pfx_id = insn_last_prefix_id(insn);
insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
}
- if (inat_must_vex(insn->attr))
- insn->attr = 0; /* This instruction is bad */
+
+ if (inat_must_vex(insn->attr)) {
+ /* This instruction is bad */
+ insn->attr = 0;
+ return -EINVAL;
+ }
end:
opcode->got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
/**
@@ -283,35 +325,49 @@ err_out:
* Populates @insn->modrm and updates @insn->next_byte to point past the
* ModRM byte, if any. If necessary, first collects the preceding bytes
* (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
+ *
+ * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_modrm(struct insn *insn)
+int insn_get_modrm(struct insn *insn)
{
struct insn_field *modrm = &insn->modrm;
insn_byte_t pfx_id, mod;
+ int ret;
+
if (modrm->got)
- return;
- if (!insn->opcode.got)
- insn_get_opcode(insn);
+ return 0;
+
+ if (!insn->opcode.got) {
+ ret = insn_get_opcode(insn);
+ if (ret)
+ return ret;
+ }
if (inat_has_modrm(insn->attr)) {
mod = get_next(insn_byte_t, insn);
- modrm->value = mod;
- modrm->nbytes = 1;
+ insn_field_set(modrm, mod, 1);
if (inat_is_group(insn->attr)) {
pfx_id = insn_last_prefix_id(insn);
insn->attr = inat_get_group_attribute(mod, pfx_id,
insn->attr);
- if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
- insn->attr = 0; /* This is bad */
+ if (insn_is_avx(insn) && !inat_accept_vex(insn->attr)) {
+ /* Bad insn */
+ insn->attr = 0;
+ return -EINVAL;
+ }
}
}
if (insn->x86_64 && inat_is_force64(insn->attr))
insn->opnd_bytes = 8;
+
modrm->got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
@@ -325,16 +381,21 @@ err_out:
int insn_rip_relative(struct insn *insn)
{
struct insn_field *modrm = &insn->modrm;
+ int ret;
if (!insn->x86_64)
return 0;
- if (!modrm->got)
- insn_get_modrm(insn);
+
+ if (!modrm->got) {
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return 0;
+ }
/*
* For rip-relative instructions, the mod field (top 2 bits)
* is zero and the r/m field (bottom 3 bits) is 0x5.
*/
- return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
+ return (modrm->nbytes && (modrm->bytes[0] & 0xc7) == 0x5);
}
/**
@@ -343,27 +404,39 @@ int insn_rip_relative(struct insn *insn)
*
* If necessary, first collects the instruction up to and including the
* ModRM byte.
+ *
+ * Returns:
+ * 0: if decoding succeeded
+ * < 0: otherwise.
*/
-void insn_get_sib(struct insn *insn)
+int insn_get_sib(struct insn *insn)
{
insn_byte_t modrm;
+ int ret;
if (insn->sib.got)
- return;
- if (!insn->modrm.got)
- insn_get_modrm(insn);
+ return 0;
+
+ if (!insn->modrm.got) {
+ ret = insn_get_modrm(insn);
+ if (ret)
+ return ret;
+ }
+
if (insn->modrm.nbytes) {
- modrm = (insn_byte_t)insn->modrm.value;
+ modrm = insn->modrm.bytes[0];
if (insn->addr_bytes != 2 &&
X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
- insn->sib.value = get_next(insn_byte_t, insn);
- insn->sib.nbytes = 1;
+ insn_field_set(&insn->sib,
+ get_next(insn_byte_t, insn), 1);
}
}
insn->sib.got = 1;
+ return 0;
+
err_out:
- return;
+ return -ENODATA;
}
@@ -374,15 +447,25 @@ err_out:
* If necessary, first collects the instruction up to and including the
* SIB byte.
* Displacement value is sign-expanded.
+ *
+ * * Returns:
+ * 0: if decoding succeeded
+ * < 0: otherwise.
*/
-void insn_get_displacement(struct insn *insn)
+int insn_get_displacement(struct insn *insn)
{
insn_byte_t mod, rm, base;
+ int ret;
if (insn->displacement.got)
- return;
- if (!insn->sib.got)
- insn_get_sib(insn);
+ return 0;
+
+ if (!insn->sib.got) {
+ ret = insn_get_sib(insn);
+ if (ret)
+ return ret;
+ }
+
if (insn->modrm.nbytes) {
/*
* Interpreting the modrm byte:
@@ -407,27 +490,27 @@ void insn_get_displacement(struct insn *insn)
if (mod == 3)
goto out;
if (mod == 1) {
- insn->displacement.value = get_next(signed char, insn);
- insn->displacement.nbytes = 1;
+ insn_field_set(&insn->displacement,
+ get_next(signed char, insn), 1);
} else if (insn->addr_bytes == 2) {
if ((mod == 0 && rm == 6) || mod == 2) {
- insn->displacement.value =
- get_next(short, insn);
- insn->displacement.nbytes = 2;
+ insn_field_set(&insn->displacement,
+ get_next(short, insn), 2);
}
} else {
if ((mod == 0 && rm == 5) || mod == 2 ||
(mod == 0 && base == 5)) {
- insn->displacement.value = get_next(int, insn);
- insn->displacement.nbytes = 4;
+ insn_field_set(&insn->displacement,
+ get_next(int, insn), 4);
}
}
}
out:
insn->displacement.got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
/* Decode moffset16/32/64. Return 0 if failed */
@@ -435,18 +518,14 @@ static int __get_moffset(struct insn *insn)
{
switch (insn->addr_bytes) {
case 2:
- insn->moffset1.value = get_next(short, insn);
- insn->moffset1.nbytes = 2;
+ insn_field_set(&insn->moffset1, get_next(short, insn), 2);
break;
case 4:
- insn->moffset1.value = get_next(int, insn);
- insn->moffset1.nbytes = 4;
+ insn_field_set(&insn->moffset1, get_next(int, insn), 4);
break;
case 8:
- insn->moffset1.value = get_next(int, insn);
- insn->moffset1.nbytes = 4;
- insn->moffset2.value = get_next(int, insn);
- insn->moffset2.nbytes = 4;
+ insn_field_set(&insn->moffset1, get_next(int, insn), 4);
+ insn_field_set(&insn->moffset2, get_next(int, insn), 4);
break;
default: /* opnd_bytes must be modified manually */
goto err_out;
@@ -464,13 +543,11 @@ static int __get_immv32(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
- insn->immediate.value = get_next(short, insn);
- insn->immediate.nbytes = 2;
+ insn_field_set(&insn->immediate, get_next(short, insn), 2);
break;
case 4:
case 8:
- insn->immediate.value = get_next(int, insn);
- insn->immediate.nbytes = 4;
+ insn_field_set(&insn->immediate, get_next(int, insn), 4);
break;
default: /* opnd_bytes must be modified manually */
goto err_out;
@@ -487,18 +564,15 @@ static int __get_immv(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
- insn->immediate1.value = get_next(short, insn);
- insn->immediate1.nbytes = 2;
+ insn_field_set(&insn->immediate1, get_next(short, insn), 2);
break;
case 4:
- insn->immediate1.value = get_next(int, insn);
+ insn_field_set(&insn->immediate1, get_next(int, insn), 4);
insn->immediate1.nbytes = 4;
break;
case 8:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- insn->immediate2.value = get_next(int, insn);
- insn->immediate2.nbytes = 4;
+ insn_field_set(&insn->immediate1, get_next(int, insn), 4);
+ insn_field_set(&insn->immediate2, get_next(int, insn), 4);
break;
default: /* opnd_bytes must be modified manually */
goto err_out;
@@ -515,12 +589,10 @@ static int __get_immptr(struct insn *insn)
{
switch (insn->opnd_bytes) {
case 2:
- insn->immediate1.value = get_next(short, insn);
- insn->immediate1.nbytes = 2;
+ insn_field_set(&insn->immediate1, get_next(short, insn), 2);
break;
case 4:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
+ insn_field_set(&insn->immediate1, get_next(int, insn), 4);
break;
case 8:
/* ptr16:64 is not exist (no segment) */
@@ -528,8 +600,7 @@ static int __get_immptr(struct insn *insn)
default: /* opnd_bytes must be modified manually */
goto err_out;
}
- insn->immediate2.value = get_next(unsigned short, insn);
- insn->immediate2.nbytes = 2;
+ insn_field_set(&insn->immediate2, get_next(unsigned short, insn), 2);
insn->immediate1.got = insn->immediate2.got = 1;
return 1;
@@ -538,20 +609,30 @@ err_out:
}
/**
- * insn_get_immediate() - Get the immediates of instruction
+ * insn_get_immediate() - Get the immediate in an instruction
* @insn: &struct insn containing instruction
*
* If necessary, first collects the instruction up to and including the
* displacement bytes.
* Basically, most of immediates are sign-expanded. Unsigned-value can be
- * get by bit masking with ((1 << (nbytes * 8)) - 1)
+ * computed by bit masking with ((1 << (nbytes * 8)) - 1)
+ *
+ * Returns:
+ * 0: on success
+ * < 0: on error
*/
-void insn_get_immediate(struct insn *insn)
+int insn_get_immediate(struct insn *insn)
{
+ int ret;
+
if (insn->immediate.got)
- return;
- if (!insn->displacement.got)
- insn_get_displacement(insn);
+ return 0;
+
+ if (!insn->displacement.got) {
+ ret = insn_get_displacement(insn);
+ if (ret)
+ return ret;
+ }
if (inat_has_moffset(insn->attr)) {
if (!__get_moffset(insn))
@@ -565,22 +646,17 @@ void insn_get_immediate(struct insn *insn)
switch (inat_immediate_size(insn->attr)) {
case INAT_IMM_BYTE:
- insn->immediate.value = get_next(signed char, insn);
- insn->immediate.nbytes = 1;
+ insn_field_set(&insn->immediate, get_next(signed char, insn), 1);
break;
case INAT_IMM_WORD:
- insn->immediate.value = get_next(short, insn);
- insn->immediate.nbytes = 2;
+ insn_field_set(&insn->immediate, get_next(short, insn), 2);
break;
case INAT_IMM_DWORD:
- insn->immediate.value = get_next(int, insn);
- insn->immediate.nbytes = 4;
+ insn_field_set(&insn->immediate, get_next(int, insn), 4);
break;
case INAT_IMM_QWORD:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- insn->immediate2.value = get_next(int, insn);
- insn->immediate2.nbytes = 4;
+ insn_field_set(&insn->immediate1, get_next(int, insn), 4);
+ insn_field_set(&insn->immediate2, get_next(int, insn), 4);
break;
case INAT_IMM_PTR:
if (!__get_immptr(insn))
@@ -599,14 +675,14 @@ void insn_get_immediate(struct insn *insn)
goto err_out;
}
if (inat_has_second_immediate(insn->attr)) {
- insn->immediate2.value = get_next(signed char, insn);
- insn->immediate2.nbytes = 1;
+ insn_field_set(&insn->immediate2, get_next(signed char, insn), 1);
}
done:
insn->immediate.got = 1;
+ return 0;
err_out:
- return;
+ return -ENODATA;
}
/**
@@ -615,13 +691,65 @@ err_out:
*
* If necessary, first collects the instruction up to and including the
* immediates bytes.
- */
-void insn_get_length(struct insn *insn)
+ *
+ * Returns:
+ * - 0 on success
+ * - < 0 on error
+*/
+int insn_get_length(struct insn *insn)
{
+ int ret;
+
if (insn->length)
- return;
- if (!insn->immediate.got)
- insn_get_immediate(insn);
+ return 0;
+
+ if (!insn->immediate.got) {
+ ret = insn_get_immediate(insn);
+ if (ret)
+ return ret;
+ }
+
insn->length = (unsigned char)((unsigned long)insn->next_byte
- (unsigned long)insn->kaddr);
+
+ return 0;
+}
+
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+ return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+ insn->displacement.got && insn->immediate.got;
+}
+
+/**
+ * insn_decode() - Decode an x86 instruction
+ * @insn: &struct insn to be initialized
+ * @kaddr: address (in kernel memory) of instruction (or copy thereof)
+ * @buf_len: length of the insn buffer at @kaddr
+ * @m: insn mode, see enum insn_mode
+ *
+ * Returns:
+ * 0: if decoding succeeded
+ * < 0: otherwise.
+ */
+int insn_decode(struct insn *insn, const void *kaddr, int buf_len, enum insn_mode m)
+{
+ int ret;
+
+#define INSN_MODE_KERN (enum insn_mode)-1 /* __ignore_sync_check__ mode is only valid in the kernel */
+
+ if (m == INSN_MODE_KERN)
+ insn_init(insn, kaddr, buf_len, IS_ENABLED(CONFIG_X86_64));
+ else
+ insn_init(insn, kaddr, buf_len, m == INSN_MODE_64);
+
+ ret = insn_get_length(insn);
+ if (ret)
+ return ret;
+
+ if (insn_complete(insn))
+ return 0;
+
+ return -EINVAL;
}
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
index df767afc690f..a91ac666f758 100644
--- a/tools/arch/x86/lib/memcpy_64.S
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -4,10 +4,11 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/cpufeatures.h>
-#include <asm/mcsafe_test.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
#include <asm/export.h>
+.section .noinstr.text, "ax"
+
/*
* We build a jump to memcpy_orig by default which gets NOPped out on
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
@@ -15,8 +16,6 @@
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
*/
-.weak memcpy
-
/*
* memcpy - Copy a memory block.
*
@@ -28,8 +27,7 @@
* Output:
* rax original destination
*/
-SYM_FUNC_START_ALIAS(__memcpy)
-SYM_FUNC_START_LOCAL(memcpy)
+SYM_TYPED_FUNC_START(__memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS
@@ -40,24 +38,25 @@ SYM_FUNC_START_LOCAL(memcpy)
rep movsq
movl %edx, %ecx
rep movsb
- ret
-SYM_FUNC_END(memcpy)
-SYM_FUNC_END_ALIAS(__memcpy)
-EXPORT_SYMBOL(memcpy)
+ RET
+SYM_FUNC_END(__memcpy)
EXPORT_SYMBOL(__memcpy)
+SYM_FUNC_ALIAS(memcpy, __memcpy)
+EXPORT_SYMBOL(memcpy)
+
/*
* memcpy_erms() - enhanced fast string memcpy. This is faster and
* simpler than memcpy. Use memcpy_erms when possible.
*/
-SYM_FUNC_START(memcpy_erms)
+SYM_FUNC_START_LOCAL(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
- ret
+ RET
SYM_FUNC_END(memcpy_erms)
-SYM_FUNC_START(memcpy_orig)
+SYM_FUNC_START_LOCAL(memcpy_orig)
movq %rdi, %rax
cmpq $0x20, %rdx
@@ -138,7 +137,7 @@ SYM_FUNC_START(memcpy_orig)
movq %r9, 1*8(%rdi)
movq %r10, -2*8(%rdi, %rdx)
movq %r11, -1*8(%rdi, %rdx)
- retq
+ RET
.p2align 4
.Lless_16bytes:
cmpl $8, %edx
@@ -150,7 +149,7 @@ SYM_FUNC_START(memcpy_orig)
movq -1*8(%rsi, %rdx), %r9
movq %r8, 0*8(%rdi)
movq %r9, -1*8(%rdi, %rdx)
- retq
+ RET
.p2align 4
.Lless_8bytes:
cmpl $4, %edx
@@ -163,7 +162,7 @@ SYM_FUNC_START(memcpy_orig)
movl -4(%rsi, %rdx), %r8d
movl %ecx, (%rdi)
movl %r8d, -4(%rdi, %rdx)
- retq
+ RET
.p2align 4
.Lless_3bytes:
subl $1, %edx
@@ -181,119 +180,6 @@ SYM_FUNC_START(memcpy_orig)
movb %cl, (%rdi)
.Lend:
- retq
+ RET
SYM_FUNC_END(memcpy_orig)
-#ifndef CONFIG_UML
-
-MCSAFE_TEST_CTL
-
-/*
- * __memcpy_mcsafe - memory copy with machine check exception handling
- * Note that we only catch machine checks when reading the source addresses.
- * Writes to target are posted and don't generate machine checks.
- */
-SYM_FUNC_START(__memcpy_mcsafe)
- cmpl $8, %edx
- /* Less than 8 bytes? Go to byte copy loop */
- jb .L_no_whole_words
-
- /* Check for bad alignment of source */
- testl $7, %esi
- /* Already aligned */
- jz .L_8byte_aligned
-
- /* Copy one byte at a time until source is 8-byte aligned */
- movl %esi, %ecx
- andl $7, %ecx
- subl $8, %ecx
- negl %ecx
- subl %ecx, %edx
-.L_read_leading_bytes:
- movb (%rsi), %al
- MCSAFE_TEST_SRC %rsi 1 .E_leading_bytes
- MCSAFE_TEST_DST %rdi 1 .E_leading_bytes
-.L_write_leading_bytes:
- movb %al, (%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz .L_read_leading_bytes
-
-.L_8byte_aligned:
- movl %edx, %ecx
- andl $7, %edx
- shrl $3, %ecx
- jz .L_no_whole_words
-
-.L_read_words:
- movq (%rsi), %r8
- MCSAFE_TEST_SRC %rsi 8 .E_read_words
- MCSAFE_TEST_DST %rdi 8 .E_write_words
-.L_write_words:
- movq %r8, (%rdi)
- addq $8, %rsi
- addq $8, %rdi
- decl %ecx
- jnz .L_read_words
-
- /* Any trailing bytes? */
-.L_no_whole_words:
- andl %edx, %edx
- jz .L_done_memcpy_trap
-
- /* Copy trailing bytes */
- movl %edx, %ecx
-.L_read_trailing_bytes:
- movb (%rsi), %al
- MCSAFE_TEST_SRC %rsi 1 .E_trailing_bytes
- MCSAFE_TEST_DST %rdi 1 .E_trailing_bytes
-.L_write_trailing_bytes:
- movb %al, (%rdi)
- incq %rsi
- incq %rdi
- decl %ecx
- jnz .L_read_trailing_bytes
-
- /* Copy successful. Return zero */
-.L_done_memcpy_trap:
- xorl %eax, %eax
-.L_done:
- ret
-SYM_FUNC_END(__memcpy_mcsafe)
-EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
-
- .section .fixup, "ax"
- /*
- * Return number of bytes not copied for any failure. Note that
- * there is no "tail" handling since the source buffer is 8-byte
- * aligned and poison is cacheline aligned.
- */
-.E_read_words:
- shll $3, %ecx
-.E_leading_bytes:
- addl %edx, %ecx
-.E_trailing_bytes:
- mov %ecx, %eax
- jmp .L_done
-
- /*
- * For write fault handling, given the destination is unaligned,
- * we handle faults on multi-byte writes with a byte-by-byte
- * copy up to the write-protected page.
- */
-.E_write_words:
- shll $3, %ecx
- addl %edx, %ecx
- movl %ecx, %edx
- jmp mcsafe_handle_tail
-
- .previous
-
- _ASM_EXTABLE_FAULT(.L_read_leading_bytes, .E_leading_bytes)
- _ASM_EXTABLE_FAULT(.L_read_words, .E_read_words)
- _ASM_EXTABLE_FAULT(.L_read_trailing_bytes, .E_trailing_bytes)
- _ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
- _ASM_EXTABLE(.L_write_words, .E_write_words)
- _ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
-#endif
diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S
index fd5d25a474b7..6143b1a6fa2c 100644
--- a/tools/arch/x86/lib/memset_64.S
+++ b/tools/arch/x86/lib/memset_64.S
@@ -3,9 +3,10 @@
#include <linux/linkage.h>
#include <asm/cpufeatures.h>
-#include <asm/alternative-asm.h>
+#include <asm/alternative.h>
+#include <asm/export.h>
-.weak memset
+.section .noinstr.text, "ax"
/*
* ISO C memset - set a memory block to a byte value. This function uses fast
@@ -18,7 +19,6 @@
*
* rax original destination
*/
-SYM_FUNC_START_ALIAS(memset)
SYM_FUNC_START(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
@@ -41,9 +41,12 @@ SYM_FUNC_START(__memset)
movl %edx,%ecx
rep stosb
movq %r9,%rax
- ret
+ RET
SYM_FUNC_END(__memset)
-SYM_FUNC_END_ALIAS(memset)
+EXPORT_SYMBOL(__memset)
+
+SYM_FUNC_ALIAS(memset, __memset)
+EXPORT_SYMBOL(memset)
/*
* ISO C memset - set a memory block to a byte value. This function uses
@@ -56,16 +59,16 @@ SYM_FUNC_END_ALIAS(memset)
*
* rax original destination
*/
-SYM_FUNC_START(memset_erms)
+SYM_FUNC_START_LOCAL(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
- ret
+ RET
SYM_FUNC_END(memset_erms)
-SYM_FUNC_START(memset_orig)
+SYM_FUNC_START_LOCAL(memset_orig)
movq %rdi,%r10
/* expand byte value */
@@ -124,7 +127,7 @@ SYM_FUNC_START(memset_orig)
.Lende:
movq %r10,%rax
- ret
+ RET
.Lbad_alignment:
cmpq $7,%rdx
diff --git a/tools/arch/x86/lib/x86-opcode-map.txt b/tools/arch/x86/lib/x86-opcode-map.txt
index ec31f5b60323..5168ee0360b2 100644
--- a/tools/arch/x86/lib/x86-opcode-map.txt
+++ b/tools/arch/x86/lib/x86-opcode-map.txt
@@ -690,7 +690,10 @@ AVXcode: 2
45: vpsrlvd/q Vx,Hx,Wx (66),(v)
46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
47: vpsllvd/q Vx,Hx,Wx (66),(v)
-# Skip 0x48-0x4b
+# Skip 0x48
+49: TILERELEASE (v1),(000),(11B) | LDTILECFG Mtc (v1)(000) | STTILECFG Mtc (66),(v1),(000) | TILEZERO Vt (F2),(v1),(11B)
+# Skip 0x4a
+4b: TILELOADD Vt,Wsm (F2),(v1) | TILELOADDT1 Vt,Wsm (66),(v1) | TILESTORED Wsm,Vt (F3),(v)
4c: vrcp14ps/d Vpd,Wpd (66),(ev)
4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
@@ -705,7 +708,10 @@ AVXcode: 2
59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
-# Skip 0x5c-0x61
+5c: TDPBF16PS Vt,Wt,Ht (F3),(v1)
+# Skip 0x5d
+5e: TDPBSSD Vt,Wt,Ht (F2),(v1) | TDPBSUD Vt,Wt,Ht (F3),(v1) | TDPBUSD Vt,Wt,Ht (66),(v1) | TDPBUUD Vt,Wt,Ht (v1)
+# Skip 0x5f-0x61
62: vpexpandb/w Vx,Wx (66),(ev)
63: vpcompressb/w Wx,Vx (66),(ev)
64: vpblendmd/q Vx,Hx,Wx (66),(ev)
@@ -822,9 +828,9 @@ AVXcode: 3
05: vpermilpd Vx,Wx,Ib (66),(v)
06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
07:
-08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
+08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo) | vrndscaleph Vx,Wx,Ib (evo)
09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
-0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
+0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo) | vrndscalesh Vx,Hx,Wx,Ib (evo)
0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
0c: vblendps Vx,Hx,Wx,Ib (66)
0d: vblendpd Vx,Hx,Wx,Ib (66)
@@ -846,8 +852,8 @@ AVXcode: 3
22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
-26: vgetmantps/d Vx,Wx,Ib (66),(ev)
-27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
+26: vgetmantps/d Vx,Wx,Ib (66),(ev) | vgetmantph Vx,Wx,Ib (ev)
+27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev) | vgetmantsh Vx,Hx,Wx,Ib (ev)
30: kshiftrb/w Vk,Uk,Ib (66),(v)
31: kshiftrd/q Vk,Uk,Ib (66),(v)
32: kshiftlb/w Vk,Uk,Ib (66),(v)
@@ -871,23 +877,102 @@ AVXcode: 3
51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
-56: vreduceps/d Vx,Wx,Ib (66),(ev)
-57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
+56: vreduceps/d Vx,Wx,Ib (66),(ev) | vreduceph Vx,Wx,Ib (ev)
+57: vreducess/d Vx,Hx,Wx,Ib (66),(ev) | vreducesh Vx,Hx,Wx,Ib (ev)
60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
-66: vfpclassps/d Vk,Wx,Ib (66),(ev)
-67: vfpclassss/d Vk,Wx,Ib (66),(ev)
+66: vfpclassps/d Vk,Wx,Ib (66),(ev) | vfpclassph Vx,Wx,Ib (ev)
+67: vfpclassss/d Vk,Wx,Ib (66),(ev) | vfpclasssh Vx,Wx,Ib (ev)
70: vpshldw Vx,Hx,Wx,Ib (66),(ev)
71: vpshldd/q Vx,Hx,Wx,Ib (66),(ev)
72: vpshrdw Vx,Hx,Wx,Ib (66),(ev)
73: vpshrdd/q Vx,Hx,Wx,Ib (66),(ev)
+c2: vcmpph Vx,Hx,Wx,Ib (ev) | vcmpsh Vx,Hx,Wx,Ib (F3),(ev)
cc: sha1rnds4 Vdq,Wdq,Ib
ce: vgf2p8affineqb Vx,Wx,Ib (66)
cf: vgf2p8affineinvqb Vx,Wx,Ib (66)
df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
-f0: RORX Gy,Ey,Ib (F2),(v)
+f0: RORX Gy,Ey,Ib (F2),(v) | HRESET Gv,Ib (F3),(000),(11B)
+EndTable
+
+Table: EVEX map 5
+Referrer:
+AVXcode: 5
+10: vmovsh Vx,Hx,Wx (F3),(ev) | vmovsh Vx,Wx (F3),(ev)
+11: vmovsh Wx,Hx,Vx (F3),(ev) | vmovsh Wx,Vx (F3),(ev)
+1d: vcvtps2phx Vx,Wx (66),(ev) | vcvtss2sh Vx,Hx,Wx (ev)
+2a: vcvtsi2sh Vx,Hx,Wx (F3),(ev)
+2c: vcvttsh2si Vx,Wx (F3),(ev)
+2d: vcvtsh2si Vx,Wx (F3),(ev)
+2e: vucomish Vx,Wx (ev)
+2f: vcomish Vx,Wx (ev)
+51: vsqrtph Vx,Wx (ev) | vsqrtsh Vx,Hx,Wx (F3),(ev)
+58: vaddph Vx,Hx,Wx (ev) | vaddsh Vx,Hx,Wx (F3),(ev)
+59: vmulph Vx,Hx,Wx (ev) | vmulsh Vx,Hx,Wx (F3),(ev)
+5a: vcvtpd2ph Vx,Wx (66),(ev) | vcvtph2pd Vx,Wx (ev) | vcvtsd2sh Vx,Hx,Wx (F2),(ev) | vcvtsh2sd Vx,Hx,Wx (F3),(ev)
+5b: vcvtdq2ph Vx,Wx (ev) | vcvtph2dq Vx,Wx (66),(ev) | vcvtqq2ph Vx,Wx (ev) | vcvttph2dq Vx,Wx (F3),(ev)
+5c: vsubph Vx,Hx,Wx (ev) | vsubsh Vx,Hx,Wx (F3),(ev)
+5d: vminph Vx,Hx,Wx (ev) | vminsh Vx,Hx,Wx (F3),(ev)
+5e: vdivph Vx,Hx,Wx (ev) | vdivsh Vx,Hx,Wx (F3),(ev)
+5f: vmaxph Vx,Hx,Wx (ev) | vmaxsh Vx,Hx,Wx (F3),(ev)
+6e: vmovw Vx,Wx (66),(ev)
+78: vcvttph2udq Vx,Wx (ev) | vcvttph2uqq Vx,Wx (66),(ev) | vcvttsh2usi Vx,Wx (F3),(ev)
+79: vcvtph2udq Vx,Wx (ev) | vcvtph2uqq Vx,Wx (66),(ev) | vcvtsh2usi Vx,Wx (F3),(ev)
+7a: vcvttph2qq Vx,Wx (66),(ev) | vcvtudq2ph Vx,Wx (F2),(ev) | vcvtuqq2ph Vx,Wx (F2),(ev)
+7b: vcvtph2qq Vx,Wx (66),(ev) | vcvtusi2sh Vx,Hx,Wx (F3),(ev)
+7c: vcvttph2uw Vx,Wx (ev) | vcvttph2w Vx,Wx (66),(ev)
+7d: vcvtph2uw Vx,Wx (ev) | vcvtph2w Vx,Wx (66),(ev) | vcvtuw2ph Vx,Wx (F2),(ev) | vcvtw2ph Vx,Wx (F3),(ev)
+7e: vmovw Wx,Vx (66),(ev)
+EndTable
+
+Table: EVEX map 6
+Referrer:
+AVXcode: 6
+13: vcvtph2psx Vx,Wx (66),(ev) | vcvtsh2ss Vx,Hx,Wx (ev)
+2c: vscalefph Vx,Hx,Wx (66),(ev)
+2d: vscalefsh Vx,Hx,Wx (66),(ev)
+42: vgetexpph Vx,Wx (66),(ev)
+43: vgetexpsh Vx,Hx,Wx (66),(ev)
+4c: vrcpph Vx,Wx (66),(ev)
+4d: vrcpsh Vx,Hx,Wx (66),(ev)
+4e: vrsqrtph Vx,Wx (66),(ev)
+4f: vrsqrtsh Vx,Hx,Wx (66),(ev)
+56: vfcmaddcph Vx,Hx,Wx (F2),(ev) | vfmaddcph Vx,Hx,Wx (F3),(ev)
+57: vfcmaddcsh Vx,Hx,Wx (F2),(ev) | vfmaddcsh Vx,Hx,Wx (F3),(ev)
+96: vfmaddsub132ph Vx,Hx,Wx (66),(ev)
+97: vfmsubadd132ph Vx,Hx,Wx (66),(ev)
+98: vfmadd132ph Vx,Hx,Wx (66),(ev)
+99: vfmadd132sh Vx,Hx,Wx (66),(ev)
+9a: vfmsub132ph Vx,Hx,Wx (66),(ev)
+9b: vfmsub132sh Vx,Hx,Wx (66),(ev)
+9c: vfnmadd132ph Vx,Hx,Wx (66),(ev)
+9d: vfnmadd132sh Vx,Hx,Wx (66),(ev)
+9e: vfnmsub132ph Vx,Hx,Wx (66),(ev)
+9f: vfnmsub132sh Vx,Hx,Wx (66),(ev)
+a6: vfmaddsub213ph Vx,Hx,Wx (66),(ev)
+a7: vfmsubadd213ph Vx,Hx,Wx (66),(ev)
+a8: vfmadd213ph Vx,Hx,Wx (66),(ev)
+a9: vfmadd213sh Vx,Hx,Wx (66),(ev)
+aa: vfmsub213ph Vx,Hx,Wx (66),(ev)
+ab: vfmsub213sh Vx,Hx,Wx (66),(ev)
+ac: vfnmadd213ph Vx,Hx,Wx (66),(ev)
+ad: vfnmadd213sh Vx,Hx,Wx (66),(ev)
+ae: vfnmsub213ph Vx,Hx,Wx (66),(ev)
+af: vfnmsub213sh Vx,Hx,Wx (66),(ev)
+b6: vfmaddsub231ph Vx,Hx,Wx (66),(ev)
+b7: vfmsubadd231ph Vx,Hx,Wx (66),(ev)
+b8: vfmadd231ph Vx,Hx,Wx (66),(ev)
+b9: vfmadd231sh Vx,Hx,Wx (66),(ev)
+ba: vfmsub231ph Vx,Hx,Wx (66),(ev)
+bb: vfmsub231sh Vx,Hx,Wx (66),(ev)
+bc: vfnmadd231ph Vx,Hx,Wx (66),(ev)
+bd: vfnmadd231sh Vx,Hx,Wx (66),(ev)
+be: vfnmsub231ph Vx,Hx,Wx (66),(ev)
+bf: vfnmsub231sh Vx,Hx,Wx (66),(ev)
+d6: vfcmulcph Vx,Hx,Wx (F2),(ev) | vfmulcph Vx,Hx,Wx (F3),(ev)
+d7: vfcmulcsh Vx,Hx,Wx (F2),(ev) | vfmulcsh Vx,Hx,Wx (F3),(ev)
EndTable
GrpTable: Grp1
@@ -962,6 +1047,7 @@ GrpTable: Grp6
3: LTR Ew
4: VERR Ew
5: VERW Ew
+6: LKGS Ew (F2)
EndTable
GrpTable: Grp7
@@ -970,7 +1056,7 @@ GrpTable: Grp7
2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B)
3: LIDT Ms
4: SMSW Mw/Rv
-5: rdpkru (110),(11B) | wrpkru (111),(11B) | SAVEPREVSSP (F3),(010),(11B) | RSTORSSP Mq (F3) | SETSSBSY (F3),(000),(11B)
+5: rdpkru (110),(11B) | wrpkru (111),(11B) | SAVEPREVSSP (F3),(010),(11B) | RSTORSSP Mq (F3) | SETSSBSY (F3),(000),(11B) | CLUI (F3),(110),(11B) | SERIALIZE (000),(11B) | STUI (F3),(111),(11B) | TESTUI (F3)(101)(11B) | UIRET (F3),(100),(11B) | XRESLDTRK (F2),(000),(11B) | XSUSLDTRK (F2),(001),(11B)
6: LMSW Ew
7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
EndTable
@@ -987,7 +1073,7 @@ GrpTable: Grp9
3: xrstors
4: xsavec
5: xsaves
-6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
+6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) | SENDUIPI Gq (F3)
7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
EndTable
diff --git a/tools/arch/x86/tools/gen-insn-attr-x86.awk b/tools/arch/x86/tools/gen-insn-attr-x86.awk
index a42015b305f4..af38469afd14 100644
--- a/tools/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/tools/arch/x86/tools/gen-insn-attr-x86.awk
@@ -362,6 +362,9 @@ function convert_operands(count,opnd, i,j,imm,mod)
END {
if (awkchecked != "")
exit 1
+
+ print "#ifndef __BOOT_COMPRESSED\n"
+
# print escape opcode map's array
print "/* Escape opcode map array */"
print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
@@ -388,6 +391,51 @@ END {
for (j = 0; j < max_lprefix; j++)
if (atable[i,j])
print " ["i"]["j"] = "atable[i,j]","
- print "};"
+ print "};\n"
+
+ print "#else /* !__BOOT_COMPRESSED */\n"
+
+ print "/* Escape opcode map array */"
+ print "static const insn_attr_t *inat_escape_tables[INAT_ESC_MAX + 1]" \
+ "[INAT_LSTPFX_MAX + 1];"
+ print ""
+
+ print "/* Group opcode map array */"
+ print "static const insn_attr_t *inat_group_tables[INAT_GRP_MAX + 1]"\
+ "[INAT_LSTPFX_MAX + 1];"
+ print ""
+
+ print "/* AVX opcode map array */"
+ print "static const insn_attr_t *inat_avx_tables[X86_VEX_M_MAX + 1]"\
+ "[INAT_LSTPFX_MAX + 1];"
+ print ""
+
+ print "static void inat_init_tables(void)"
+ print "{"
+
+ # print escape opcode map's array
+ print "\t/* Print Escape opcode map array */"
+ for (i = 0; i < geid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (etable[i,j])
+ print "\tinat_escape_tables["i"]["j"] = "etable[i,j]";"
+ print ""
+
+ # print group opcode map's array
+ print "\t/* Print Group opcode map array */"
+ for (i = 0; i < ggid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (gtable[i,j])
+ print "\tinat_group_tables["i"]["j"] = "gtable[i,j]";"
+ print ""
+ # print AVX opcode map's array
+ print "\t/* Print AVX opcode map array */"
+ for (i = 0; i < gaid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (atable[i,j])
+ print "\tinat_avx_tables["i"]["j"] = "atable[i,j]";"
+
+ print "}"
+ print "#endif"
}
diff --git a/tools/bootconfig/Makefile b/tools/bootconfig/Makefile
index da5975775337..566c3e0ee561 100644
--- a/tools/bootconfig/Makefile
+++ b/tools/bootconfig/Makefile
@@ -15,9 +15,9 @@ CFLAGS = -Wall -g -I$(CURDIR)/include
ALL_TARGETS := bootconfig
ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
-all: $(ALL_PROGRAMS)
+all: $(ALL_PROGRAMS) test
-$(OUTPUT)bootconfig: main.c $(LIBSRC)
+$(OUTPUT)bootconfig: main.c include/linux/bootconfig.h $(LIBSRC)
$(CC) $(filter %.c,$^) $(CFLAGS) -o $@
test: $(ALL_PROGRAMS) test-bootconfig.sh
diff --git a/tools/bootconfig/include/linux/bootconfig.h b/tools/bootconfig/include/linux/bootconfig.h
index 078cbd2ba651..6784296a0692 100644
--- a/tools/bootconfig/include/linux/bootconfig.h
+++ b/tools/bootconfig/include/linux/bootconfig.h
@@ -2,6 +2,53 @@
#ifndef _BOOTCONFIG_LINUX_BOOTCONFIG_H
#define _BOOTCONFIG_LINUX_BOOTCONFIG_H
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <ctype.h>
+#include <errno.h>
+#include <string.h>
+
+
+#ifndef fallthrough
+# define fallthrough
+#endif
+
+#define WARN_ON(cond) \
+ ((cond) ? printf("Internal warning(%s:%d, %s): %s\n", \
+ __FILE__, __LINE__, __func__, #cond) : 0)
+
+#define unlikely(cond) (cond)
+
+/* Copied from lib/string.c */
+static inline char *skip_spaces(const char *str)
+{
+ while (isspace(*str))
+ ++str;
+ return (char *)str;
+}
+
+static inline char *strim(char *s)
+{
+ size_t size;
+ char *end;
+
+ size = strlen(s);
+ if (!size)
+ return s;
+
+ end = s + size - 1;
+ while (end >= s && isspace(*end))
+ end--;
+ *(end + 1) = '\0';
+
+ return skip_spaces(s);
+}
+
+#define __init
+#define __initdata
+
#include "../../../../include/linux/bootconfig.h"
#endif
diff --git a/tools/bootconfig/include/linux/bug.h b/tools/bootconfig/include/linux/bug.h
deleted file mode 100644
index 7b65a389c0dd..000000000000
--- a/tools/bootconfig/include/linux/bug.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SKC_LINUX_BUG_H
-#define _SKC_LINUX_BUG_H
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#define WARN_ON(cond) \
- ((cond) ? printf("Internal warning(%s:%d, %s): %s\n", \
- __FILE__, __LINE__, __func__, #cond) : 0)
-
-#endif
diff --git a/tools/bootconfig/include/linux/ctype.h b/tools/bootconfig/include/linux/ctype.h
deleted file mode 100644
index c56ecc136448..000000000000
--- a/tools/bootconfig/include/linux/ctype.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SKC_LINUX_CTYPE_H
-#define _SKC_LINUX_CTYPE_H
-
-#include <ctype.h>
-
-#endif
diff --git a/tools/bootconfig/include/linux/errno.h b/tools/bootconfig/include/linux/errno.h
deleted file mode 100644
index 5d9f91ec2fda..000000000000
--- a/tools/bootconfig/include/linux/errno.h
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SKC_LINUX_ERRNO_H
-#define _SKC_LINUX_ERRNO_H
-
-#include <asm/errno.h>
-
-#endif
diff --git a/tools/bootconfig/include/linux/kernel.h b/tools/bootconfig/include/linux/kernel.h
deleted file mode 100644
index 2d93320aa374..000000000000
--- a/tools/bootconfig/include/linux/kernel.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SKC_LINUX_KERNEL_H
-#define _SKC_LINUX_KERNEL_H
-
-#include <stdlib.h>
-#include <stdbool.h>
-
-#include <linux/printk.h>
-
-typedef unsigned short u16;
-typedef unsigned int u32;
-
-#define unlikely(cond) (cond)
-
-#define __init
-#define __initdata
-
-#endif
diff --git a/tools/bootconfig/include/linux/memblock.h b/tools/bootconfig/include/linux/memblock.h
deleted file mode 100644
index 7862f217d85d..000000000000
--- a/tools/bootconfig/include/linux/memblock.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XBC_LINUX_MEMBLOCK_H
-#define _XBC_LINUX_MEMBLOCK_H
-
-#include <stdlib.h>
-
-#define __pa(addr) (addr)
-#define SMP_CACHE_BYTES 0
-#define memblock_alloc(size, align) malloc(size)
-#define memblock_free(paddr, size) free(paddr)
-
-#endif
diff --git a/tools/bootconfig/include/linux/printk.h b/tools/bootconfig/include/linux/printk.h
deleted file mode 100644
index 036e667596eb..000000000000
--- a/tools/bootconfig/include/linux/printk.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SKC_LINUX_PRINTK_H
-#define _SKC_LINUX_PRINTK_H
-
-#include <stdio.h>
-
-#define printk(fmt, ...) printf(fmt, ##__VA_ARGS__)
-
-#define pr_err printk
-#define pr_warn printk
-#define pr_info printk
-#define pr_debug printk
-
-#endif
diff --git a/tools/bootconfig/include/linux/string.h b/tools/bootconfig/include/linux/string.h
deleted file mode 100644
index 8267af75153a..000000000000
--- a/tools/bootconfig/include/linux/string.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _SKC_LINUX_STRING_H
-#define _SKC_LINUX_STRING_H
-
-#include <string.h>
-
-/* Copied from lib/string.c */
-static inline char *skip_spaces(const char *str)
-{
- while (isspace(*str))
- ++str;
- return (char *)str;
-}
-
-static inline char *strim(char *s)
-{
- size_t size;
- char *end;
-
- size = strlen(s);
- if (!size)
- return s;
-
- end = s + size - 1;
- while (end >= s && isspace(*end))
- end--;
- *(end + 1) = '\0';
-
- return skip_spaces(s);
-}
-
-#endif
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 16b9a420e6fd..156b62a163c5 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -10,17 +10,25 @@
#include <unistd.h>
#include <string.h>
#include <errno.h>
+#include <endian.h>
-#include <linux/kernel.h>
#include <linux/bootconfig.h>
-static int xbc_show_array(struct xbc_node *node)
+#define pr_err(fmt, ...) fprintf(stderr, fmt, ##__VA_ARGS__)
+
+static int xbc_show_value(struct xbc_node *node, bool semicolon)
{
- const char *val;
+ const char *val, *eol;
+ char q;
int i = 0;
+ eol = semicolon ? ";\n" : "\n";
xbc_array_for_each_value(node, val) {
- printf("\"%s\"%s", val, node->next ? ", " : ";\n");
+ if (strchr(val, '"'))
+ q = '\'';
+ else
+ q = '"';
+ printf("%c%s%c%s", q, val, q, xbc_node_is_array(node) ? ", " : eol);
i++;
}
return i;
@@ -28,33 +36,55 @@ static int xbc_show_array(struct xbc_node *node)
static void xbc_show_compact_tree(void)
{
- struct xbc_node *node, *cnode;
+ struct xbc_node *node, *cnode = NULL, *vnode;
int depth = 0, i;
node = xbc_root_node();
while (node && xbc_node_is_key(node)) {
for (i = 0; i < depth; i++)
printf("\t");
- cnode = xbc_node_get_child(node);
+ if (!cnode)
+ cnode = xbc_node_get_child(node);
while (cnode && xbc_node_is_key(cnode) && !cnode->next) {
+ vnode = xbc_node_get_child(cnode);
+ /*
+ * If @cnode has value and subkeys, this
+ * should show it as below.
+ *
+ * key(@node) {
+ * key(@cnode) = value;
+ * key(@cnode) {
+ * subkeys;
+ * }
+ * }
+ */
+ if (vnode && xbc_node_is_value(vnode) && vnode->next)
+ break;
printf("%s.", xbc_node_get_data(node));
node = cnode;
- cnode = xbc_node_get_child(node);
+ cnode = vnode;
}
if (cnode && xbc_node_is_key(cnode)) {
printf("%s {\n", xbc_node_get_data(node));
depth++;
node = cnode;
+ cnode = NULL;
continue;
} else if (cnode && xbc_node_is_value(cnode)) {
printf("%s = ", xbc_node_get_data(node));
- if (cnode->next)
- xbc_show_array(cnode);
- else
- printf("\"%s\";\n", xbc_node_get_data(cnode));
+ xbc_show_value(cnode, true);
+ /*
+ * If @node has value and subkeys, continue
+ * looping on subkeys with same node.
+ */
+ if (cnode->next) {
+ cnode = xbc_node_get_next(cnode);
+ continue;
+ }
} else {
printf("%s;\n", xbc_node_get_data(node));
}
+ cnode = NULL;
if (node->next) {
node = xbc_node_get_next(node);
@@ -66,29 +96,42 @@ static void xbc_show_compact_tree(void)
return;
if (!xbc_node_get_child(node)->next)
continue;
- depth--;
- for (i = 0; i < depth; i++)
- printf("\t");
- printf("}\n");
+ if (depth) {
+ depth--;
+ for (i = 0; i < depth; i++)
+ printf("\t");
+ printf("}\n");
+ }
}
node = xbc_node_get_next(node);
}
}
-/* Simple real checksum */
-int checksum(unsigned char *buf, int len)
+static void xbc_show_list(void)
{
- int i, sum = 0;
-
- for (i = 0; i < len; i++)
- sum += buf[i];
+ char key[XBC_KEYLEN_MAX];
+ struct xbc_node *leaf;
+ const char *val;
+ int ret;
- return sum;
+ xbc_for_each_key_value(leaf, val) {
+ ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
+ if (ret < 0) {
+ fprintf(stderr, "Failed to compose key %d\n", ret);
+ break;
+ }
+ printf("%s = ", key);
+ if (!val || val[0] == '\0') {
+ printf("\"\"\n");
+ continue;
+ }
+ xbc_show_value(xbc_node_get_child(leaf), false);
+ }
}
#define PAGE_SIZE 4096
-int load_xbc_fd(int fd, char **buf, int size)
+static int load_xbc_fd(int fd, char **buf, int size)
{
int ret;
@@ -105,7 +148,7 @@ int load_xbc_fd(int fd, char **buf, int size)
}
/* Return the read size or -errno */
-int load_xbc_file(const char *path, char **buf)
+static int load_xbc_file(const char *path, char **buf)
{
struct stat stat;
int fd, ret;
@@ -124,11 +167,17 @@ int load_xbc_file(const char *path, char **buf)
return ret;
}
-int load_xbc_from_initrd(int fd, char **buf)
+static int pr_errno(const char *msg, int err)
+{
+ pr_err("%s: %d\n", msg, err);
+ return err;
+}
+
+static int load_xbc_from_initrd(int fd, char **buf)
{
struct stat stat;
int ret;
- u32 size = 0, csum = 0, rcsum;
+ uint32_t size = 0, csum = 0, rcsum;
char magic[BOOTCONFIG_MAGIC_LEN];
const char *msg;
@@ -139,26 +188,26 @@ int load_xbc_from_initrd(int fd, char **buf)
if (stat.st_size < 8 + BOOTCONFIG_MAGIC_LEN)
return 0;
- if (lseek(fd, -BOOTCONFIG_MAGIC_LEN, SEEK_END) < 0) {
- pr_err("Failed to lseek: %d\n", -errno);
- return -errno;
- }
+ if (lseek(fd, -BOOTCONFIG_MAGIC_LEN, SEEK_END) < 0)
+ return pr_errno("Failed to lseek for magic", -errno);
+
if (read(fd, magic, BOOTCONFIG_MAGIC_LEN) < 0)
- return -errno;
+ return pr_errno("Failed to read", -errno);
+
/* Check the bootconfig magic bytes */
if (memcmp(magic, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN) != 0)
return 0;
- if (lseek(fd, -(8 + BOOTCONFIG_MAGIC_LEN), SEEK_END) < 0) {
- pr_err("Failed to lseek: %d\n", -errno);
- return -errno;
- }
+ if (lseek(fd, -(8 + BOOTCONFIG_MAGIC_LEN), SEEK_END) < 0)
+ return pr_errno("Failed to lseek for size", -errno);
- if (read(fd, &size, sizeof(u32)) < 0)
- return -errno;
+ if (read(fd, &size, sizeof(uint32_t)) < 0)
+ return pr_errno("Failed to read size", -errno);
+ size = le32toh(size);
- if (read(fd, &csum, sizeof(u32)) < 0)
- return -errno;
+ if (read(fd, &csum, sizeof(uint32_t)) < 0)
+ return pr_errno("Failed to read checksum", -errno);
+ csum = le32toh(csum);
/* Wrong size error */
if (stat.st_size < size + 8 + BOOTCONFIG_MAGIC_LEN) {
@@ -167,23 +216,21 @@ int load_xbc_from_initrd(int fd, char **buf)
}
if (lseek(fd, stat.st_size - (size + 8 + BOOTCONFIG_MAGIC_LEN),
- SEEK_SET) < 0) {
- pr_err("Failed to lseek: %d\n", -errno);
- return -errno;
- }
+ SEEK_SET) < 0)
+ return pr_errno("Failed to lseek", -errno);
ret = load_xbc_fd(fd, buf, size);
if (ret < 0)
return ret;
/* Wrong Checksum */
- rcsum = checksum((unsigned char *)*buf, size);
+ rcsum = xbc_calc_checksum(*buf, size);
if (csum != rcsum) {
pr_err("checksum error: %d != %d\n", csum, rcsum);
return -EINVAL;
}
- ret = xbc_init(*buf, &msg, NULL);
+ ret = xbc_init(*buf, size, &msg, NULL);
/* Wrong data */
if (ret < 0) {
pr_err("parse error: %s.\n", msg);
@@ -193,30 +240,92 @@ int load_xbc_from_initrd(int fd, char **buf)
return size;
}
-int show_xbc(const char *path)
+static void show_xbc_error(const char *data, const char *msg, int pos)
+{
+ int lin = 1, col, i;
+
+ if (pos < 0) {
+ pr_err("Error: %s.\n", msg);
+ return;
+ }
+
+ /* Note that pos starts from 0 but lin and col should start from 1. */
+ col = pos + 1;
+ for (i = 0; i < pos; i++) {
+ if (data[i] == '\n') {
+ lin++;
+ col = pos - i;
+ }
+ }
+ pr_err("Parse Error: %s at %d:%d\n", msg, lin, col);
+
+}
+
+static int init_xbc_with_error(char *buf, int len)
+{
+ char *copy = strdup(buf);
+ const char *msg;
+ int ret, pos;
+
+ if (!copy)
+ return -ENOMEM;
+
+ ret = xbc_init(buf, len, &msg, &pos);
+ if (ret < 0)
+ show_xbc_error(copy, msg, pos);
+ free(copy);
+
+ return ret;
+}
+
+static int show_xbc(const char *path, bool list)
{
int ret, fd;
char *buf = NULL;
+ struct stat st;
+
+ ret = stat(path, &st);
+ if (ret < 0) {
+ ret = -errno;
+ pr_err("Failed to stat %s: %d\n", path, ret);
+ return ret;
+ }
fd = open(path, O_RDONLY);
if (fd < 0) {
- pr_err("Failed to open initrd %s: %d\n", path, fd);
- return -errno;
+ ret = -errno;
+ pr_err("Failed to open initrd %s: %d\n", path, ret);
+ return ret;
}
ret = load_xbc_from_initrd(fd, &buf);
- if (ret < 0)
+ close(fd);
+ if (ret < 0) {
pr_err("Failed to load a boot config from initrd: %d\n", ret);
+ goto out;
+ }
+ /* Assume a bootconfig file if it is enough small */
+ if (ret == 0 && st.st_size <= XBC_DATA_MAX) {
+ ret = load_xbc_file(path, &buf);
+ if (ret < 0) {
+ pr_err("Failed to load a boot config: %d\n", ret);
+ goto out;
+ }
+ if (init_xbc_with_error(buf, ret) < 0)
+ goto out;
+ }
+ if (list)
+ xbc_show_list();
else
xbc_show_compact_tree();
-
- close(fd);
+ ret = 0;
+out:
free(buf);
return ret;
}
-int delete_xbc(const char *path)
+static int delete_xbc(const char *path)
{
struct stat stat;
int ret = 0, fd, size;
@@ -224,8 +333,9 @@ int delete_xbc(const char *path)
fd = open(path, O_RDWR);
if (fd < 0) {
- pr_err("Failed to open initrd %s: %d\n", path, fd);
- return -errno;
+ ret = -errno;
+ pr_err("Failed to open initrd %s: %d\n", path, ret);
+ return ret;
}
size = load_xbc_from_initrd(fd, &buf);
@@ -247,34 +357,15 @@ int delete_xbc(const char *path)
return ret;
}
-static void show_xbc_error(const char *data, const char *msg, int pos)
-{
- int lin = 1, col, i;
-
- if (pos < 0) {
- pr_err("Error: %s.\n", msg);
- return;
- }
-
- /* Note that pos starts from 0 but lin and col should start from 1. */
- col = pos + 1;
- for (i = 0; i < pos; i++) {
- if (data[i] == '\n') {
- lin++;
- col = pos - i;
- }
- }
- pr_err("Parse Error: %s at %d:%d\n", msg, lin, col);
-
-}
-
-int apply_xbc(const char *path, const char *xbc_path)
+static int apply_xbc(const char *path, const char *xbc_path)
{
- u32 size, csum;
- char *buf, *data;
- int ret, fd;
+ char *buf, *data, *p;
+ size_t total_size;
+ struct stat stat;
const char *msg;
- int pos;
+ uint32_t size, csum;
+ int pos, pad;
+ int ret, fd;
ret = load_xbc_file(xbc_path, &buf);
if (ret < 0) {
@@ -282,18 +373,17 @@ int apply_xbc(const char *path, const char *xbc_path)
return ret;
}
size = strlen(buf) + 1;
- csum = checksum((unsigned char *)buf, size);
+ csum = xbc_calc_checksum(buf, size);
- /* Prepare xbc_path data */
- data = malloc(size + 8);
+ /* Backup the bootconfig data */
+ data = calloc(size + BOOTCONFIG_ALIGN +
+ sizeof(uint32_t) + sizeof(uint32_t) + BOOTCONFIG_MAGIC_LEN, 1);
if (!data)
return -ENOMEM;
- strcpy(data, buf);
- *(u32 *)(data + size) = size;
- *(u32 *)(data + size + 4) = csum;
+ memcpy(data, buf, size);
/* Check the data format */
- ret = xbc_init(buf, &msg, &pos);
+ ret = xbc_init(buf, size, &msg, &pos);
if (ret < 0) {
show_xbc_error(data, msg, pos);
free(data);
@@ -302,53 +392,94 @@ int apply_xbc(const char *path, const char *xbc_path)
return ret;
}
printf("Apply %s to %s\n", xbc_path, path);
+ xbc_get_info(&ret, NULL);
printf("\tNumber of nodes: %d\n", ret);
printf("\tSize: %u bytes\n", (unsigned int)size);
printf("\tChecksum: %d\n", (unsigned int)csum);
/* TODO: Check the options by schema */
- xbc_destroy_all();
+ xbc_exit();
free(buf);
/* Remove old boot config if exists */
ret = delete_xbc(path);
if (ret < 0) {
pr_err("Failed to delete previous boot config: %d\n", ret);
+ free(data);
return ret;
}
/* Apply new one */
fd = open(path, O_RDWR | O_APPEND);
if (fd < 0) {
- pr_err("Failed to open %s: %d\n", path, fd);
- return fd;
- }
- /* TODO: Ensure the @path is initramfs/initrd image */
- ret = write(fd, data, size + 8);
- if (ret < 0) {
- pr_err("Failed to apply a boot config: %d\n", ret);
+ ret = -errno;
+ pr_err("Failed to open %s: %d\n", path, ret);
+ free(data);
return ret;
}
- /* Write a magic word of the bootconfig */
- ret = write(fd, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
- if (ret < 0) {
- pr_err("Failed to apply a boot config magic: %d\n", ret);
- return ret;
+ /* TODO: Ensure the @path is initramfs/initrd image */
+ if (fstat(fd, &stat) < 0) {
+ ret = -errno;
+ pr_err("Failed to get the size of %s\n", path);
+ goto out;
}
+
+ /* To align up the total size to BOOTCONFIG_ALIGN, get padding size */
+ total_size = stat.st_size + size + sizeof(uint32_t) * 2 + BOOTCONFIG_MAGIC_LEN;
+ pad = ((total_size + BOOTCONFIG_ALIGN - 1) & (~BOOTCONFIG_ALIGN_MASK)) - total_size;
+ size += pad;
+
+ /* Add a footer */
+ p = data + size;
+ *(uint32_t *)p = htole32(size);
+ p += sizeof(uint32_t);
+
+ *(uint32_t *)p = htole32(csum);
+ p += sizeof(uint32_t);
+
+ memcpy(p, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
+ p += BOOTCONFIG_MAGIC_LEN;
+
+ total_size = p - data;
+
+ ret = write(fd, data, total_size);
+ if (ret < total_size) {
+ if (ret < 0)
+ ret = -errno;
+ pr_err("Failed to apply a boot config: %d\n", ret);
+ if (ret >= 0)
+ goto out_rollback;
+ } else
+ ret = 0;
+
+out:
close(fd);
free(data);
- return 0;
+ return ret;
+
+out_rollback:
+ /* Map the partial write to -ENOSPC */
+ if (ret >= 0)
+ ret = -ENOSPC;
+ if (ftruncate(fd, stat.st_size) < 0) {
+ ret = -errno;
+ pr_err("Failed to rollback the write error: %d\n", ret);
+ pr_err("The initrd %s may be corrupted. Recommend to rebuild.\n", path);
+ }
+ goto out;
}
-int usage(void)
+static int usage(void)
{
printf("Usage: bootconfig [OPTIONS] <INITRD>\n"
+ "Or bootconfig <CONFIG>\n"
" Apply, delete or show boot config to initrd.\n"
" Options:\n"
" -a <config>: Apply boot config to initrd\n"
- " -d : Delete boot config file from initrd\n\n"
- " If no option is given, show current applied boot config.\n");
+ " -d : Delete boot config file from initrd\n"
+ " -l : list boot config in initrd or file\n\n"
+ " If no option is given, show the bootconfig in the given file.\n");
return -1;
}
@@ -356,10 +487,10 @@ int main(int argc, char **argv)
{
char *path = NULL;
char *apply = NULL;
- bool delete = false;
+ bool delete = false, list = false;
int opt;
- while ((opt = getopt(argc, argv, "hda:")) != -1) {
+ while ((opt = getopt(argc, argv, "hda:l")) != -1) {
switch (opt) {
case 'd':
delete = true;
@@ -367,14 +498,17 @@ int main(int argc, char **argv)
case 'a':
apply = optarg;
break;
+ case 'l':
+ list = true;
+ break;
case 'h':
default:
return usage();
}
}
- if (apply && delete) {
- pr_err("Error: You can not specify both -a and -d at once.\n");
+ if ((apply && delete) || (delete && list) || (apply && list)) {
+ pr_err("Error: You can give one of -a, -d or -l at once.\n");
return usage();
}
@@ -390,5 +524,5 @@ int main(int argc, char **argv)
else if (delete)
return delete_xbc(path);
- return show_xbc(path);
+ return show_xbc(path, list);
}
diff --git a/tools/bootconfig/samples/good-mixed-append.bconf b/tools/bootconfig/samples/good-mixed-append.bconf
new file mode 100644
index 000000000000..b99a089a05f5
--- /dev/null
+++ b/tools/bootconfig/samples/good-mixed-append.bconf
@@ -0,0 +1,4 @@
+key = foo
+keyx.subkey = value
+key += bar
+
diff --git a/tools/bootconfig/samples/bad-mixed-kv1.bconf b/tools/bootconfig/samples/good-mixed-kv1.bconf
index 1761547dd05c..1761547dd05c 100644
--- a/tools/bootconfig/samples/bad-mixed-kv1.bconf
+++ b/tools/bootconfig/samples/good-mixed-kv1.bconf
diff --git a/tools/bootconfig/samples/bad-mixed-kv2.bconf b/tools/bootconfig/samples/good-mixed-kv2.bconf
index 6b32e0c3878c..6b32e0c3878c 100644
--- a/tools/bootconfig/samples/bad-mixed-kv2.bconf
+++ b/tools/bootconfig/samples/good-mixed-kv2.bconf
diff --git a/tools/bootconfig/samples/good-mixed-kv3.bconf b/tools/bootconfig/samples/good-mixed-kv3.bconf
new file mode 100644
index 000000000000..2ce2b02224b8
--- /dev/null
+++ b/tools/bootconfig/samples/good-mixed-kv3.bconf
@@ -0,0 +1,6 @@
+# mixed key and subkeys with braces
+key = value
+key {
+ subkey1
+ subkey2 = foo
+}
diff --git a/tools/bootconfig/samples/good-mixed-override.bconf b/tools/bootconfig/samples/good-mixed-override.bconf
new file mode 100644
index 000000000000..18195b2873b6
--- /dev/null
+++ b/tools/bootconfig/samples/good-mixed-override.bconf
@@ -0,0 +1,4 @@
+key.foo = bar
+key = value
+# mixed key value can be overridden
+key := value2
diff --git a/tools/bootconfig/samples/good-override.bconf b/tools/bootconfig/samples/good-override.bconf
new file mode 100644
index 000000000000..7d31d5f8fbd8
--- /dev/null
+++ b/tools/bootconfig/samples/good-override.bconf
@@ -0,0 +1,6 @@
+# Override the value
+key.word = 1,2,4
+key.word := 2,3
+
+# No pre-defined key
+key.new.word := "new"
diff --git a/tools/bootconfig/scripts/bconf2ftrace.sh b/tools/bootconfig/scripts/bconf2ftrace.sh
new file mode 100755
index 000000000000..850c2073433e
--- /dev/null
+++ b/tools/bootconfig/scripts/bconf2ftrace.sh
@@ -0,0 +1,301 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+usage() {
+ echo "Ftrace boottime trace test tool"
+ echo "Usage: $0 [--apply|--init] [--debug] BOOTCONFIG-FILE"
+ echo " --apply: Test actual apply to tracefs (need sudo)"
+ echo " --init: Initialize ftrace before applying (imply --apply)"
+ exit 1
+}
+
+[ $# -eq 0 ] && usage
+
+BCONF=
+DEBUG=
+APPLY=
+INIT=
+while [ x"$1" != x ]; do
+ case "$1" in
+ "--debug")
+ DEBUG=$1;;
+ "--apply")
+ APPLY=$1;;
+ "--init")
+ APPLY=$1
+ INIT=$1;;
+ *)
+ [ ! -f $1 ] && usage
+ BCONF=$1;;
+ esac
+ shift 1
+done
+
+if [ x"$APPLY" != x ]; then
+ if [ `id -u` -ne 0 ]; then
+ echo "This must be run by root user. Try sudo." 1>&2
+ exec sudo $0 $DEBUG $APPLY $BCONF
+ fi
+fi
+
+run_cmd() { # command
+ echo "$*"
+ if [ x"$APPLY" != x ]; then # apply command
+ eval $*
+ fi
+}
+
+if [ x"$DEBUG" != x ]; then
+ set -x
+fi
+
+TRACEFS=`grep -m 1 -w tracefs /proc/mounts | cut -f 2 -d " "`
+if [ -z "$TRACEFS" ]; then
+ if ! grep -wq debugfs /proc/mounts; then
+ echo "Error: No tracefs/debugfs was mounted." 1>&2
+ exit 1
+ fi
+ TRACEFS=`grep -m 1 -w debugfs /proc/mounts | cut -f 2 -d " "`/tracing
+ if [ ! -d $TRACEFS ]; then
+ echo "Error: ftrace is not enabled on this kernel." 1>&2
+ exit 1
+ fi
+fi
+
+if [ x"$INIT" != x ]; then
+ . `dirname $0`/ftrace.sh
+ (cd $TRACEFS; initialize_ftrace)
+fi
+
+. `dirname $0`/xbc.sh
+
+######## main #########
+set -e
+
+xbc_init $BCONF
+
+set_value_of() { # key file
+ if xbc_has_key $1; then
+ val=`xbc_get_val $1 1`
+ run_cmd "echo '$val' >> $2"
+ fi
+}
+
+set_array_of() { # key file
+ if xbc_has_key $1; then
+ xbc_get_val $1 | while read line; do
+ run_cmd "echo '$line' >> $2"
+ done
+ fi
+}
+
+compose_synth() { # event_name branch
+ echo -n "$1 "
+ xbc_get_val $2 | while read field; do echo -n "$field; "; done
+}
+
+print_hist_array() { # prefix key
+ __sep="="
+ if xbc_has_key ${1}.${2}; then
+ echo -n ":$2"
+ xbc_get_val ${1}.${2} | while read field; do
+ echo -n "$__sep$field"; __sep=","
+ done
+ fi
+}
+
+print_hist_action_array() { # prefix key
+ __sep="("
+ echo -n ".$2"
+ xbc_get_val ${1}.${2} | while read field; do
+ echo -n "$__sep$field"; __sep=","
+ done
+ echo -n ")"
+}
+
+print_hist_one_action() { # prefix handler param
+ echo -n ":${2}("`xbc_get_val ${1}.${3}`")"
+ if xbc_has_key "${1}.trace"; then
+ print_hist_action_array ${1} "trace"
+ elif xbc_has_key "${1}.save"; then
+ print_hist_action_array ${1} "save"
+ elif xbc_has_key "${1}.snapshot"; then
+ echo -n ".snapshot()"
+ fi
+}
+
+print_hist_actions() { # prefix handler param
+ for __hdr in `xbc_subkeys ${1}.${2} 1 ".[0-9]"`; do
+ print_hist_one_action ${1}.${2}.$__hdr ${2} ${3}
+ done
+ if xbc_has_key ${1}.${2}.${3} ; then
+ print_hist_one_action ${1}.${2} ${2} ${3}
+ fi
+}
+
+print_hist_var() { # prefix varname
+ echo -n ":${2}="`xbc_get_val ${1}.var.${2} | tr -d [:space:]`
+}
+
+print_one_histogram() { # prefix
+ echo -n "hist"
+ print_hist_array $1 "keys"
+ print_hist_array $1 "values"
+ print_hist_array $1 "sort"
+ if xbc_has_key "${1}.size"; then
+ echo -n ":size="`xbc_get_val ${1}.size`
+ fi
+ if xbc_has_key "${1}.name"; then
+ echo -n ":name="`xbc_get_val ${1}.name`
+ fi
+ for __var in `xbc_subkeys "${1}.var" 1`; do
+ print_hist_var ${1} ${__var}
+ done
+ if xbc_has_key "${1}.pause"; then
+ echo -n ":pause"
+ elif xbc_has_key "${1}.continue"; then
+ echo -n ":continue"
+ elif xbc_has_key "${1}.clear"; then
+ echo -n ":clear"
+ fi
+ print_hist_actions ${1} "onmax" "var"
+ print_hist_actions ${1} "onchange" "var"
+ print_hist_actions ${1} "onmatch" "event"
+
+ if xbc_has_key "${1}.filter"; then
+ echo -n " if "`xbc_get_val ${1}.filter`
+ fi
+}
+
+setup_one_histogram() { # prefix trigger-file
+ run_cmd "echo '`print_one_histogram ${1}`' >> ${2}"
+}
+
+setup_histograms() { # prefix trigger-file
+ for __hist in `xbc_subkeys ${1} 1 ".[0-9]"`; do
+ setup_one_histogram ${1}.$__hist ${2}
+ done
+ if xbc_has_key ${1}.keys; then
+ setup_one_histogram ${1} ${2}
+ fi
+}
+
+setup_event() { # prefix group event [instance]
+ branch=$1.$2.$3
+ if [ "$4" ]; then
+ eventdir="$TRACEFS/instances/$4/events/$2/$3"
+ else
+ eventdir="$TRACEFS/events/$2/$3"
+ fi
+ # group enable
+ if [ "$3" = "enable" ]; then
+ run_cmd "echo 1 > ${eventdir}"
+ return
+ fi
+
+ case $2 in
+ kprobes)
+ xbc_get_val ${branch}.probes | while read line; do
+ run_cmd "echo 'p:kprobes/$3 $line' >> $TRACEFS/kprobe_events"
+ done
+ ;;
+ synthetic)
+ run_cmd "echo '`compose_synth $3 ${branch}.fields`' >> $TRACEFS/synthetic_events"
+ ;;
+ esac
+
+ set_value_of ${branch}.filter ${eventdir}/filter
+ set_array_of ${branch}.actions ${eventdir}/trigger
+
+ setup_histograms ${branch}.hist ${eventdir}/trigger
+
+ if xbc_has_key ${branch}.enable; then
+ run_cmd "echo 1 > ${eventdir}/enable"
+ fi
+}
+
+setup_events() { # prefix("ftrace" or "ftrace.instance.INSTANCE") [instance]
+ prefix="${1}.event"
+ if xbc_has_branch ${1}.event; then
+ for grpev in `xbc_subkeys ${1}.event 2`; do
+ setup_event $prefix ${grpev%.*} ${grpev#*.} $2
+ done
+ fi
+ if xbc_has_branch ${1}.event.enable; then
+ if [ "$2" ]; then
+ run_cmd "echo 1 > $TRACEFS/instances/$2/events/enable"
+ else
+ run_cmd "echo 1 > $TRACEFS/events/enable"
+ fi
+ fi
+}
+
+size2kb() { # size[KB|MB]
+ case $1 in
+ *KB)
+ echo ${1%KB};;
+ *MB)
+ expr ${1%MB} \* 1024;;
+ *)
+ expr $1 / 1024 ;;
+ esac
+}
+
+setup_instance() { # [instance]
+ if [ "$1" ]; then
+ instance="ftrace.instance.${1}"
+ instancedir=$TRACEFS/instances/$1
+ else
+ instance="ftrace"
+ instancedir=$TRACEFS
+ fi
+
+ set_array_of ${instance}.options ${instancedir}/trace_options
+ set_value_of ${instance}.trace_clock ${instancedir}/trace_clock
+ set_value_of ${instance}.cpumask ${instancedir}/tracing_cpumask
+ set_value_of ${instance}.tracing_on ${instancedir}/tracing_on
+ set_value_of ${instance}.tracer ${instancedir}/current_tracer
+ set_array_of ${instance}.ftrace.filters \
+ ${instancedir}/set_ftrace_filter
+ set_array_of ${instance}.ftrace.notrace \
+ ${instancedir}/set_ftrace_notrace
+
+ if xbc_has_key ${instance}.alloc_snapshot; then
+ run_cmd "echo 1 > ${instancedir}/snapshot"
+ fi
+
+ if xbc_has_key ${instance}.buffer_size; then
+ size=`xbc_get_val ${instance}.buffer_size 1`
+ size=`eval size2kb $size`
+ run_cmd "echo $size >> ${instancedir}/buffer_size_kb"
+ fi
+
+ setup_events ${instance} $1
+ set_array_of ${instance}.events ${instancedir}/set_event
+}
+
+# ftrace global configs (kernel.*)
+if xbc_has_key "kernel.dump_on_oops"; then
+ dump_mode=`xbc_get_val "kernel.dump_on_oops" 1`
+ [ "$dump_mode" ] && dump_mode=`eval echo $dump_mode` || dump_mode=1
+ run_cmd "echo \"$dump_mode\" > /proc/sys/kernel/ftrace_dump_on_oops"
+fi
+
+set_value_of kernel.fgraph_max_depth $TRACEFS/max_graph_depth
+set_array_of kernel.fgraph_filters $TRACEFS/set_graph_function
+set_array_of kernel.fgraph_notraces $TRACEFS/set_graph_notrace
+
+# Per-instance/per-event configs
+if ! xbc_has_branch "ftrace" ; then
+ exit 0
+fi
+
+setup_instance # root instance
+
+if xbc_has_branch "ftrace.instance"; then
+ for i in `xbc_subkeys "ftrace.instance" 1`; do
+ run_cmd "mkdir -p $TRACEFS/instances/$i"
+ setup_instance $i
+ done
+fi
+
diff --git a/tools/bootconfig/scripts/ftrace.sh b/tools/bootconfig/scripts/ftrace.sh
new file mode 100644
index 000000000000..186eed923041
--- /dev/null
+++ b/tools/bootconfig/scripts/ftrace.sh
@@ -0,0 +1,109 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+clear_trace() { # reset trace output
+ echo > trace
+}
+
+disable_tracing() { # stop trace recording
+ echo 0 > tracing_on
+}
+
+enable_tracing() { # start trace recording
+ echo 1 > tracing_on
+}
+
+reset_tracer() { # reset the current tracer
+ echo nop > current_tracer
+}
+
+reset_trigger_file() {
+ # remove action triggers first
+ grep -H ':on[^:]*(' $@ |
+ while read line; do
+ cmd=`echo $line | cut -f2- -d: | cut -f1 -d"["`
+ file=`echo $line | cut -f1 -d:`
+ echo "!$cmd" >> $file
+ done
+ grep -Hv ^# $@ |
+ while read line; do
+ cmd=`echo $line | cut -f2- -d: | cut -f1 -d"["`
+ file=`echo $line | cut -f1 -d:`
+ echo "!$cmd" > $file
+ done
+}
+
+reset_trigger() { # reset all current setting triggers
+ if [ -d events/synthetic ]; then
+ reset_trigger_file events/synthetic/*/trigger
+ fi
+ reset_trigger_file events/*/*/trigger
+}
+
+reset_events_filter() { # reset all current setting filters
+ grep -v ^none events/*/*/filter |
+ while read line; do
+ echo 0 > `echo $line | cut -f1 -d:`
+ done
+}
+
+reset_ftrace_filter() { # reset all triggers in set_ftrace_filter
+ if [ ! -f set_ftrace_filter ]; then
+ return 0
+ fi
+ echo > set_ftrace_filter
+ grep -v '^#' set_ftrace_filter | while read t; do
+ tr=`echo $t | cut -d: -f2`
+ if [ "$tr" = "" ]; then
+ continue
+ fi
+ if ! grep -q "$t" set_ftrace_filter; then
+ continue;
+ fi
+ name=`echo $t | cut -d: -f1 | cut -d' ' -f1`
+ if [ $tr = "enable_event" -o $tr = "disable_event" ]; then
+ tr=`echo $t | cut -d: -f2-4`
+ limit=`echo $t | cut -d: -f5`
+ else
+ tr=`echo $t | cut -d: -f2`
+ limit=`echo $t | cut -d: -f3`
+ fi
+ if [ "$limit" != "unlimited" ]; then
+ tr="$tr:$limit"
+ fi
+ echo "!$name:$tr" > set_ftrace_filter
+ done
+}
+
+disable_events() {
+ echo 0 > events/enable
+}
+
+clear_synthetic_events() { # reset all current synthetic events
+ grep -v ^# synthetic_events |
+ while read line; do
+ echo "!$line" >> synthetic_events
+ done
+}
+
+initialize_ftrace() { # Reset ftrace to initial-state
+# As the initial state, ftrace will be set to nop tracer,
+# no events, no triggers, no filters, no function filters,
+# no probes, and tracing on.
+ disable_tracing
+ reset_tracer
+ reset_trigger
+ reset_events_filter
+ reset_ftrace_filter
+ disable_events
+ [ -f set_event_pid ] && echo > set_event_pid
+ [ -f set_ftrace_pid ] && echo > set_ftrace_pid
+ [ -f set_ftrace_notrace ] && echo > set_ftrace_notrace
+ [ -f set_graph_function ] && echo | tee set_graph_*
+ [ -f stack_trace_filter ] && echo > stack_trace_filter
+ [ -f kprobe_events ] && echo > kprobe_events
+ [ -f uprobe_events ] && echo > uprobe_events
+ [ -f synthetic_events ] && echo > synthetic_events
+ [ -f snapshot ] && echo 0 > snapshot
+ clear_trace
+ enable_tracing
+}
diff --git a/tools/bootconfig/scripts/ftrace2bconf.sh b/tools/bootconfig/scripts/ftrace2bconf.sh
new file mode 100755
index 000000000000..1603801cf126
--- /dev/null
+++ b/tools/bootconfig/scripts/ftrace2bconf.sh
@@ -0,0 +1,260 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+usage() {
+ echo "Dump boot-time tracing bootconfig from ftrace"
+ echo "Usage: $0 [--debug] [ > BOOTCONFIG-FILE]"
+ exit 1
+}
+
+DEBUG=
+while [ x"$1" != x ]; do
+ case "$1" in
+ "--debug")
+ DEBUG=$1;;
+ -*)
+ usage
+ ;;
+ esac
+ shift 1
+done
+
+if [ x"$DEBUG" != x ]; then
+ set -x
+fi
+
+TRACEFS=`grep -m 1 -w tracefs /proc/mounts | cut -f 2 -d " "`
+if [ -z "$TRACEFS" ]; then
+ if ! grep -wq debugfs /proc/mounts; then
+ echo "Error: No tracefs/debugfs was mounted."
+ exit 1
+ fi
+ TRACEFS=`grep -m 1 -w debugfs /proc/mounts | cut -f 2 -d " "`/tracing
+ if [ ! -d $TRACEFS ]; then
+ echo "Error: ftrace is not enabled on this kernel." 1>&2
+ exit 1
+ fi
+fi
+
+######## main #########
+
+set -e
+
+emit_kv() { # key =|+= value
+ echo "$@"
+}
+
+global_options() {
+ val=`cat $TRACEFS/max_graph_depth`
+ [ $val != 0 ] && emit_kv kernel.fgraph_max_depth = $val
+ if grep -qv "^#" $TRACEFS/set_graph_function $TRACEFS/set_graph_notrace ; then
+ cat 1>&2 << EOF
+# WARN: kernel.fgraph_filters and kernel.fgraph_notrace are not supported, since the wild card expression was expanded and lost from memory.
+EOF
+ fi
+}
+
+kprobe_event_options() {
+ cat $TRACEFS/kprobe_events | while read p args; do
+ case $p in
+ r*)
+ cat 1>&2 << EOF
+# WARN: A return probe found but it is not supported by bootconfig. Skip it.
+EOF
+ continue;;
+ esac
+ p=${p#*:}
+ event=${p#*/}
+ group=${p%/*}
+ if [ $group != "kprobes" ]; then
+ cat 1>&2 << EOF
+# WARN: kprobes group name $group is changed to "kprobes" for bootconfig.
+EOF
+ fi
+ emit_kv $PREFIX.event.kprobes.$event.probes += $args
+ done
+}
+
+synth_event_options() {
+ cat $TRACEFS/synthetic_events | while read event fields; do
+ emit_kv $PREFIX.event.synthetic.$event.fields = `echo $fields | sed "s/;/,/g"`
+ done
+}
+
+# Variables resolver
+DEFINED_VARS=
+UNRESOLVED_EVENTS=
+
+defined_vars() { # event-dir
+ grep "^hist" $1/trigger | grep -o ':[a-zA-Z0-9]*='
+}
+referred_vars() {
+ grep "^hist" $1/trigger | grep -o '$[a-zA-Z0-9]*'
+}
+
+event_is_enabled() { # enable-file
+ test -f $1 && grep -q "1" $1
+}
+
+per_event_options() { # event-dir
+ evdir=$1
+ # Check the special event which has no filter and no trigger
+ [ ! -f $evdir/filter ] && return
+
+ if grep -q "^hist:" $evdir/trigger; then
+ # hist action can refer the undefined variables
+ __vars=`defined_vars $evdir`
+ for v in `referred_vars $evdir`; do
+ if echo $DEFINED_VARS $__vars | grep -vqw ${v#$}; then
+ # $v is not defined yet, defer it
+ UNRESOLVED_EVENTS="$UNRESOLVED_EVENTS $evdir"
+ return;
+ fi
+ done
+ DEFINED_VARS="$DEFINED_VARS "`defined_vars $evdir`
+ fi
+ grep -v "^#" $evdir/trigger | while read action active; do
+ emit_kv $PREFIX.event.$group.$event.actions += \'$action\'
+ done
+
+ if [ $GROUP_ENABLED -eq 0 ] && event_is_enabled $evdir/enable; then
+ emit_kv $PREFIX.event.$group.$event.enable
+ fi
+ val=`cat $evdir/filter`
+ if [ "$val" != "none" ]; then
+ emit_kv $PREFIX.event.$group.$event.filter = "$val"
+ fi
+}
+
+retry_unresolved() {
+ unresolved=$UNRESOLVED_EVENTS
+ UNRESOLVED_EVENTS=
+ for evdir in $unresolved; do
+ event=${evdir##*/}
+ group=${evdir%/*}; group=${group##*/}
+ per_event_options $evdir
+ done
+}
+
+event_options() {
+ # PREFIX and INSTANCE must be set
+ if [ $PREFIX = "ftrace" ]; then
+ # define the dynamic events
+ kprobe_event_options
+ synth_event_options
+ fi
+ ALL_ENABLED=0
+ if event_is_enabled $INSTANCE/events/enable; then
+ emit_kv $PREFIX.event.enable
+ ALL_ENABLED=1
+ fi
+ for group in `ls $INSTANCE/events/` ; do
+ [ ! -d $INSTANCE/events/$group ] && continue
+ GROUP_ENABLED=$ALL_ENABLED
+ if [ $ALL_ENABLED -eq 0 ] && \
+ event_is_enabled $INSTANCE/events/$group/enable ;then
+ emit_kv $PREFIX.event.$group.enable
+ GROUP_ENABLED=1
+ fi
+ for event in `ls $INSTANCE/events/$group/` ;do
+ [ ! -d $INSTANCE/events/$group/$event ] && continue
+ per_event_options $INSTANCE/events/$group/$event
+ done
+ done
+ retry=0
+ while [ $retry -lt 3 ]; do
+ retry_unresolved
+ retry=$((retry + 1))
+ done
+ if [ "$UNRESOLVED_EVENTS" ]; then
+ cat 1>&2 << EOF
+! ERROR: hist triggers in $UNRESOLVED_EVENTS use some undefined variables.
+EOF
+ fi
+}
+
+is_default_trace_option() { # option
+grep -qw $1 << EOF
+print-parent
+nosym-offset
+nosym-addr
+noverbose
+noraw
+nohex
+nobin
+noblock
+trace_printk
+annotate
+nouserstacktrace
+nosym-userobj
+noprintk-msg-only
+context-info
+nolatency-format
+record-cmd
+norecord-tgid
+overwrite
+nodisable_on_free
+irq-info
+markers
+noevent-fork
+nopause-on-trace
+function-trace
+nofunction-fork
+nodisplay-graph
+nostacktrace
+notest_nop_accept
+notest_nop_refuse
+EOF
+}
+
+instance_options() { # [instance-name]
+ if [ $# -eq 0 ]; then
+ PREFIX="ftrace"
+ INSTANCE=$TRACEFS
+ else
+ PREFIX="ftrace.instance.$1"
+ INSTANCE=$TRACEFS/instances/$1
+ fi
+ val=
+ for i in `cat $INSTANCE/trace_options`; do
+ is_default_trace_option $i && continue
+ val="$val, $i"
+ done
+ [ "$val" ] && emit_kv $PREFIX.options = "${val#,}"
+ val="local"
+ for i in `cat $INSTANCE/trace_clock` ; do
+ [ "${i#*]}" ] && continue
+ i=${i%]}; val=${i#[}
+ done
+ [ $val != "local" ] && emit_kv $PREFIX.trace_clock = $val
+ val=`cat $INSTANCE/buffer_size_kb`
+ if echo $val | grep -vq "expanded" ; then
+ emit_kv $PREFIX.buffer_size = $val"KB"
+ fi
+ if grep -q "is allocated" $INSTANCE/snapshot ; then
+ emit_kv $PREFIX.alloc_snapshot
+ fi
+ val=`cat $INSTANCE/tracing_cpumask`
+ if [ `echo $val | sed -e s/f//g`x != x ]; then
+ emit_kv $PREFIX.cpumask = $val
+ fi
+ val=`cat $INSTANCE/tracing_on`
+ if [ "$val" = "0" ]; then
+ emit_kv $PREFIX.tracing_on = 0
+ fi
+
+ val=`cat $INSTANCE/current_tracer`
+ [ $val != nop ] && emit_kv $PREFIX.tracer = $val
+ if grep -qv "^#" $INSTANCE/set_ftrace_filter $INSTANCE/set_ftrace_notrace; then
+ cat 1>&2 << EOF
+# WARN: kernel.ftrace.filters and kernel.ftrace.notrace are not supported, since the wild card expression was expanded and lost from memory.
+EOF
+ fi
+ event_options
+}
+
+global_options
+instance_options
+for i in `ls $TRACEFS/instances` ; do
+ instance_options $i
+done
diff --git a/tools/bootconfig/scripts/xbc.sh b/tools/bootconfig/scripts/xbc.sh
new file mode 100644
index 000000000000..1f0ebf50dd2d
--- /dev/null
+++ b/tools/bootconfig/scripts/xbc.sh
@@ -0,0 +1,56 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0-only
+
+# bootconfig utility functions
+
+XBC_TMPFILE=
+XBC_BASEDIR=`dirname $0`
+BOOTCONFIG=${BOOTCONFIG:=$XBC_BASEDIR/../bootconfig}
+if [ ! -x "$BOOTCONFIG" ]; then
+ BOOTCONFIG=`which bootconfig`
+ if [ -z "$BOOTCONFIG" ]; then
+ echo "Erorr: bootconfig command is not found" 1>&2
+ exit 1
+ fi
+fi
+
+xbc_cleanup() {
+ if [ "$XBC_TMPFILE" ]; then
+ rm -f "$XBC_TMPFILE"
+ fi
+}
+
+xbc_init() { # bootconfig-file
+ xbc_cleanup
+ XBC_TMPFILE=`mktemp bconf-XXXX`
+ trap xbc_cleanup EXIT TERM
+
+ $BOOTCONFIG -l $1 > $XBC_TMPFILE || exit 1
+}
+
+nr_args() { # args
+ echo $#
+}
+
+xbc_get_val() { # key [maxnum]
+ if [ "$2" ]; then
+ MAXOPT="-L $2"
+ fi
+ grep "^$1 =" $XBC_TMPFILE | cut -d= -f2- | \
+ sed -e 's/", /" /g' -e "s/',/' /g" | \
+ xargs $MAXOPT -n 1 echo
+}
+
+xbc_has_key() { # key
+ grep -q "^$1 =" $XBC_TMPFILE
+}
+
+xbc_has_branch() { # prefix-key
+ grep -q "^$1" $XBC_TMPFILE
+}
+
+xbc_subkeys() { # prefix-key depth [subkey-pattern]
+ __keys=`echo $1 | sed "s/\./ /g"`
+ __s=`nr_args $__keys`
+ grep "^$1$3" $XBC_TMPFILE | cut -d= -f1| cut -d. -f$((__s + 1))-$((__s + $2)) | uniq
+}
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
index 81b350ffd03f..a2c484c243f5 100755
--- a/tools/bootconfig/test-bootconfig.sh
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -9,6 +9,7 @@ else
TESTDIR=.
fi
BOOTCONF=${TESTDIR}/bootconfig
+ALIGN=4
INITRD=`mktemp ${TESTDIR}/initrd-XXXX`
TEMPCONF=`mktemp ${TESTDIR}/temp-XXXX.bconf`
@@ -25,7 +26,7 @@ trap cleanup EXIT TERM
NO=1
xpass() { # pass test command
- echo "test case $NO ($3)... "
+ echo "test case $NO ($*)... "
if ! ($@ && echo "\t\t[OK]"); then
echo "\t\t[NG]"; NG=$((NG + 1))
fi
@@ -33,7 +34,7 @@ xpass() { # pass test command
}
xfail() { # fail test command
- echo "test case $NO ($3)... "
+ echo "test case $NO ($*)... "
if ! (! $@ && echo "\t\t[OK]"); then
echo "\t\t[NG]"; NG=$((NG + 1))
fi
@@ -55,8 +56,14 @@ echo "Apply command test"
xpass $BOOTCONF -a $TEMPCONF $INITRD
new_size=$(stat -c %s $INITRD)
+echo "Show command test"
+xpass $BOOTCONF $INITRD
+
echo "File size check"
-xpass test $new_size -eq $(expr $bconf_size + $initrd_size + 9 + 12)
+total_size=$(expr $bconf_size + $initrd_size + 9 + 12 + $ALIGN - 1 )
+total_size=$(expr $total_size / $ALIGN)
+total_size=$(expr $total_size \* $ALIGN)
+xpass test $new_size -eq $total_size
echo "Apply command repeat test"
xpass $BOOTCONF -a $TEMPCONF $INITRD
@@ -80,10 +87,14 @@ xfail grep -i "error" $OUTFILE
echo "Max node number check"
-echo -n > $TEMPCONF
-for i in `seq 1 1024` ; do
- echo "node$i" >> $TEMPCONF
-done
+awk '
+BEGIN {
+ for (i = 0; i < 26; i += 1)
+ printf("%c\n", 65 + i % 26)
+ for (i = 26; i < 8192; i += 1)
+ printf("%c%c%c\n", 65 + i % 26, 65 + (i / 26) % 26, 65 + (i / 26 / 26))
+}
+' > $TEMPCONF
xpass $BOOTCONF -a $TEMPCONF $INITRD
echo "badnode" >> $TEMPCONF
@@ -114,6 +125,51 @@ xpass grep -q "bar" $OUTFILE
xpass grep -q "baz" $OUTFILE
xpass grep -q "qux" $OUTFILE
+echo "Override same-key values"
+cat > $TEMPCONF << EOF
+key = bar, baz
+key := qux
+EOF
+echo > $INITRD
+
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+$BOOTCONF $INITRD > $OUTFILE
+xfail grep -q "bar" $OUTFILE
+xfail grep -q "baz" $OUTFILE
+xpass grep -q "qux" $OUTFILE
+
+echo "Double/single quotes test"
+echo "key = '\"string\"';" > $TEMPCONF
+$BOOTCONF -a $TEMPCONF $INITRD
+$BOOTCONF $INITRD > $TEMPCONF
+cat $TEMPCONF
+xpass grep \'\"string\"\' $TEMPCONF
+
+echo "Repeat same-key tree"
+cat > $TEMPCONF << EOF
+foo
+bar
+foo { buz }
+EOF
+echo > $INITRD
+
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+$BOOTCONF $INITRD > $OUTFILE
+xpass grep -q "bar" $OUTFILE
+
+
+echo "Remove/keep tailing spaces"
+cat > $TEMPCONF << EOF
+foo = val # comment
+bar = "val2 " # comment
+EOF
+echo > $INITRD
+
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+$BOOTCONF $INITRD > $OUTFILE
+xfail grep -q val[[:space:]] $OUTFILE
+xpass grep -q val2[[:space:]] $OUTFILE
+
echo "=== expected failure cases ==="
for i in samples/bad-* ; do
xfail $BOOTCONF -a $i $INITRD
@@ -124,9 +180,16 @@ for i in samples/good-* ; do
xpass $BOOTCONF -a $i $INITRD
done
+
+echo
+echo "=== Summary ==="
+echo "# of Passed: $(expr $NO - $NG - 1)"
+echo "# of Failed: $NG"
+
echo
if [ $NG -eq 0 ]; then
echo "All tests passed"
else
echo "$NG tests failed"
+ exit 1
fi
diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile
index f897eeeb0b4f..243b79f2b451 100644
--- a/tools/bpf/Makefile
+++ b/tools/bpf/Makefile
@@ -3,14 +3,14 @@ include ../scripts/Makefile.include
prefix ?= /usr/local
-CC = gcc
LEX = flex
YACC = bison
MAKE = make
INSTALL ?= install
CFLAGS += -Wall -O2
-CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
+CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/tools/include/uapi \
+ -I$(srctree)/tools/include
# This will work when bpf is built in tools env. where srctree
# isn't set and when invoked from selftests build, where srctree
@@ -34,11 +34,11 @@ else
endif
FEATURE_USER = .bpf
-FEATURE_TESTS = libbfd disassembler-four-args
-FEATURE_DISPLAY = libbfd disassembler-four-args
+FEATURE_TESTS = libbfd disassembler-four-args disassembler-init-styled
+FEATURE_DISPLAY = libbfd
check_feat := 1
-NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean
+NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean resolve_btfids_clean
ifdef MAKECMDGOALS
ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
check_feat := 0
@@ -56,6 +56,9 @@ endif
ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif
+ifeq ($(feature-disassembler-init-styled), 1)
+CFLAGS += -DDISASM_INIT_STYLED
+endif
$(OUTPUT)%.yacc.c: $(srctree)/tools/bpf/%.y
$(QUIET_BISON)$(YACC) -o $@ -d $<
@@ -64,12 +67,12 @@ $(OUTPUT)%.lex.c: $(srctree)/tools/bpf/%.l
$(QUIET_FLEX)$(LEX) -o $@ $<
$(OUTPUT)%.o: $(srctree)/tools/bpf/%.c
- $(QUIET_CC)$(COMPILE.c) -o $@ $<
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $<
$(OUTPUT)%.yacc.o: $(OUTPUT)%.yacc.c
- $(QUIET_CC)$(COMPILE.c) -o $@ $<
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $<
$(OUTPUT)%.lex.o: $(OUTPUT)%.lex.c
- $(QUIET_CC)$(COMPILE.c) -o $@ $<
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -o $@ $<
PROGS = $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg $(OUTPUT)bpf_asm
@@ -89,7 +92,7 @@ $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c
$(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c
$(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c
-clean: bpftool_clean runqslower_clean
+clean: bpftool_clean runqslower_clean resolve_btfids_clean
$(call QUIET_CLEAN, bpf-progs)
$(Q)$(RM) -r -- $(OUTPUT)*.o $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg \
$(OUTPUT)bpf_asm $(OUTPUT)bpf_exp.yacc.* $(OUTPUT)bpf_exp.lex.*
@@ -97,7 +100,7 @@ clean: bpftool_clean runqslower_clean
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf
$(Q)$(RM) -r -- $(OUTPUT)feature
-install: $(PROGS) bpftool_install runqslower_install
+install: $(PROGS) bpftool_install
$(call QUIET_INSTALL, bpf_jit_disasm)
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/bin
$(Q)$(INSTALL) $(OUTPUT)bpf_jit_disasm $(DESTDIR)$(prefix)/bin/bpf_jit_disasm
@@ -118,11 +121,15 @@ bpftool_clean:
runqslower:
$(call descend,runqslower)
-runqslower_install:
- $(call descend,runqslower,install)
-
runqslower_clean:
$(call descend,runqslower,clean)
+resolve_btfids:
+ $(call descend,resolve_btfids)
+
+resolve_btfids_clean:
+ $(call descend,resolve_btfids,clean)
+
.PHONY: all install clean bpftool bpftool_install bpftool_clean \
- runqslower runqslower_install runqslower_clean
+ runqslower runqslower_clean \
+ resolve_btfids resolve_btfids_clean
diff --git a/tools/bpf/Makefile.helpers b/tools/bpf/Makefile.helpers
deleted file mode 100644
index 854d084026dd..000000000000
--- a/tools/bpf/Makefile.helpers
+++ /dev/null
@@ -1,60 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-ifndef allow-override
- include ../scripts/Makefile.include
- include ../scripts/utilities.mak
-else
- # Assume Makefile.helpers is being run from bpftool/Documentation
- # subdirectory. Go up two more directories to fetch bpf.h header and
- # associated script.
- UP2DIR := ../../
-endif
-
-INSTALL ?= install
-RM ?= rm -f
-RMDIR ?= rmdir --ignore-fail-on-non-empty
-
-ifeq ($(V),1)
- Q =
-else
- Q = @
-endif
-
-prefix ?= /usr/local
-mandir ?= $(prefix)/man
-man7dir = $(mandir)/man7
-
-HELPERS_RST = bpf-helpers.rst
-MAN7_RST = $(HELPERS_RST)
-
-_DOC_MAN7 = $(patsubst %.rst,%.7,$(MAN7_RST))
-DOC_MAN7 = $(addprefix $(OUTPUT),$(_DOC_MAN7))
-
-helpers: man7
-man7: $(DOC_MAN7)
-
-RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
-
-$(OUTPUT)$(HELPERS_RST): $(UP2DIR)../../include/uapi/linux/bpf.h
- $(QUIET_GEN)$(UP2DIR)../../scripts/bpf_helpers_doc.py --filename $< > $@
-
-$(OUTPUT)%.7: $(OUTPUT)%.rst
-ifndef RST2MAN_DEP
- $(error "rst2man not found, but required to generate man pages")
-endif
- $(QUIET_GEN)rst2man $< > $@
-
-helpers-clean:
- $(call QUIET_CLEAN, eBPF_helpers-manpage)
- $(Q)$(RM) $(DOC_MAN7) $(OUTPUT)$(HELPERS_RST)
-
-helpers-install: helpers
- $(call QUIET_INSTALL, eBPF_helpers-manpage)
- $(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man7dir)
- $(Q)$(INSTALL) -m 644 $(DOC_MAN7) $(DESTDIR)$(man7dir)
-
-helpers-uninstall:
- $(call QUIET_UNINST, eBPF_helpers-manpage)
- $(Q)$(RM) $(addprefix $(DESTDIR)$(man7dir)/,$(_DOC_MAN7))
- $(Q)$(RMDIR) $(DESTDIR)$(man7dir)
-
-.PHONY: helpers helpers-clean helpers-install helpers-uninstall
diff --git a/tools/bpf/bpf_asm.c b/tools/bpf/bpf_asm.c
index e5f95e3eede3..0063c3c029e7 100644
--- a/tools/bpf/bpf_asm.c
+++ b/tools/bpf/bpf_asm.c
@@ -11,7 +11,7 @@
*
* How to get into it:
*
- * 1) read Documentation/networking/filter.txt
+ * 1) read Documentation/networking/filter.rst
* 2) Run `bpf_asm [-c] <filter-prog file>` to translate into binary
* blob that is loadable with xt_bpf, cls_bpf et al. Note: -c will
* pretty print a C-like construct.
diff --git a/tools/bpf/bpf_dbg.c b/tools/bpf/bpf_dbg.c
index 9d3766e653a9..00e560a17baf 100644
--- a/tools/bpf/bpf_dbg.c
+++ b/tools/bpf/bpf_dbg.c
@@ -13,7 +13,7 @@
* for making a verdict when multiple simple BPF programs are combined
* into one in order to prevent parsing same headers multiple times.
*
- * More on how to debug BPF opcodes see Documentation/networking/filter.txt
+ * More on how to debug BPF opcodes see Documentation/networking/filter.rst
* which is the main document on BPF. Mini howto for getting started:
*
* 1) `./bpf_dbg` to enter the shell (shell cmds denoted with '>'):
@@ -890,7 +890,7 @@ static int bpf_run_stepping(struct sock_filter *f, uint16_t bpf_len,
bool stop = false;
int i = 1;
- while (bpf_curr.Rs == false && stop == false) {
+ while (!bpf_curr.Rs && !stop) {
bpf_safe_regs();
if (i++ == next)
@@ -1198,7 +1198,7 @@ static int cmd_run(char *num)
else
return CMD_OK;
bpf_reset();
- } while (pcap_next_pkt() && (!has_limit || (has_limit && ++i < pkts)));
+ } while (pcap_next_pkt() && (!has_limit || (++i < pkts)));
rl_printf("bpf passes:%u fails:%u\n", pass, fail);
diff --git a/tools/bpf/bpf_exp.y b/tools/bpf/bpf_exp.y
index 8d48e896be50..dfb7254a24e8 100644
--- a/tools/bpf/bpf_exp.y
+++ b/tools/bpf/bpf_exp.y
@@ -185,13 +185,13 @@ ldx
| OP_LDXB number '*' '(' '[' number ']' '&' number ')' {
if ($2 != 4 || $9 != 0xf) {
fprintf(stderr, "ldxb offset not supported!\n");
- exit(0);
+ exit(1);
} else {
bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } }
| OP_LDX number '*' '(' '[' number ']' '&' number ')' {
if ($2 != 4 || $9 != 0xf) {
fprintf(stderr, "ldxb offset not supported!\n");
- exit(0);
+ exit(1);
} else {
bpf_set_curr_instr(BPF_LDX | BPF_MSH | BPF_B, 0, 0, $6); } }
;
@@ -472,7 +472,7 @@ static void bpf_assert_max(void)
{
if (curr_instr >= BPF_MAXINSNS) {
fprintf(stderr, "only max %u insns allowed!\n", BPF_MAXINSNS);
- exit(0);
+ exit(1);
}
}
@@ -522,7 +522,7 @@ static int bpf_find_insns_offset(const char *label)
if (ret == -ENOENT) {
fprintf(stderr, "no such label \'%s\'!\n", label);
- exit(0);
+ exit(1);
}
return ret;
@@ -549,9 +549,11 @@ static uint8_t bpf_encode_jt_jf_offset(int off, int i)
{
int delta = off - i - 1;
- if (delta < 0 || delta > 255)
- fprintf(stderr, "warning: insn #%d jumps to insn #%d, "
+ if (delta < 0 || delta > 255) {
+ fprintf(stderr, "error: insn #%d jumps to insn #%d, "
"which is out of range\n", i, off);
+ exit(1);
+ }
return (uint8_t) delta;
}
diff --git a/tools/bpf/bpf_jit_disasm.c b/tools/bpf/bpf_jit_disasm.c
index c8ae95804728..a90a5d110f92 100644
--- a/tools/bpf/bpf_jit_disasm.c
+++ b/tools/bpf/bpf_jit_disasm.c
@@ -28,6 +28,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <limits.h>
+#include <tools/dis-asm-compat.h>
#define CMD_ACTION_SIZE_BUFFER 10
#define CMD_ACTION_READ_ALL 3
@@ -64,7 +65,9 @@ static void get_asm_insns(uint8_t *image, size_t len, int opcodes)
assert(bfdf);
assert(bfd_check_format(bfdf, bfd_object));
- init_disassemble_info(&info, stdout, (fprintf_ftype) fprintf);
+ init_disassemble_info_compat(&info, stdout,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
info.arch = bfd_get_arch(bfdf);
info.mach = bfd_get_mach(bfdf);
info.buffer = image;
diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
index 26cde83e1ca3..a736f64dc5dc 100644
--- a/tools/bpf/bpftool/.gitignore
+++ b/tools/bpf/bpftool/.gitignore
@@ -1,10 +1,10 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
*.d
-/_bpftool
+/bootstrap/
/bpftool
bpftool*.8
-bpf-helpers.*
FEATURE-DUMP.bpftool
feature
libbpf
-profiler.skel.h
+/*.skel.h
+/vmlinux.h
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index 815ac9804aee..ac8487dcff1d 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -1,6 +1,5 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
include ../../../scripts/Makefile.include
-include ../../../scripts/utilities.mak
INSTALL ?= install
RM ?= rm -f
@@ -16,35 +15,43 @@ prefix ?= /usr/local
mandir ?= $(prefix)/man
man8dir = $(mandir)/man8
-# Load targets for building eBPF helpers man page.
-include ../../Makefile.helpers
-
-MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst))
+MAN8_RST = $(wildcard bpftool*.rst)
_DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
-man: man8 helpers
+man: man8
man8: $(DOC_MAN8)
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+RST2MAN_OPTS += --verbose --strip-comments
+
+list_pages = $(sort $(basename $(filter-out $(1),$(MAN8_RST))))
+see_also = $(subst " ",, \
+ "\n" \
+ "SEE ALSO\n" \
+ "========\n" \
+ "\t**bpf**\ (2),\n" \
+ "\t**bpf-helpers**\\ (7)" \
+ $(foreach page,$(call list_pages,$(1)),",\n\t**$(page)**\\ (8)") \
+ "\n")
$(OUTPUT)%.8: %.rst
ifndef RST2MAN_DEP
$(error "rst2man not found, but required to generate man pages")
endif
- $(QUIET_GEN)rst2man $< > $@
+ $(QUIET_GEN)( cat $< ; printf "%b" $(call see_also,$<) ) | rst2man $(RST2MAN_OPTS) > $@
-clean: helpers-clean
+clean:
$(call QUIET_CLEAN, Documentation)
$(Q)$(RM) $(DOC_MAN8)
-install: man helpers-install
+install: man
$(call QUIET_INSTALL, Documentation-man)
$(Q)$(INSTALL) -d -m 755 $(DESTDIR)$(man8dir)
$(Q)$(INSTALL) -m 644 $(DOC_MAN8) $(DESTDIR)$(man8dir)
-uninstall: helpers-uninstall
+uninstall:
$(call QUIET_UNINST, Documentation-man)
$(Q)$(RM) $(addprefix $(DESTDIR)$(man8dir)/,$(_DOC_MAN8))
$(Q)$(RMDIR) $(DESTDIR)$(man8dir)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
index 39615f8e145b..342716f74ec4 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
================
bpftool-btf
================
@@ -7,12 +9,14 @@ tool for inspection of BTF data
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
**bpftool** [*OPTIONS*] **btf** *COMMAND*
- *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-B** | **--base-btf** } }
*COMMANDS* := { **dump** | **help** }
@@ -36,6 +40,11 @@ DESCRIPTION
otherwise list all BTF objects currently loaded on the
system.
+ Since Linux 5.8 bpftool is able to discover information about
+ processes that hold open file descriptors (FDs) against BTF
+ objects. On such kernels bpftool will automatically emit this
+ information as well.
+
**bpftool btf dump** *BTF_SRC*
Dump BTF entries from a given *BTF_SRC*.
@@ -66,26 +75,26 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
+
+ -B, --base-btf *FILE*
+ Pass a base BTF object. Base BTF objects are typically used
+ with BTF objects for kernel modules. To avoid duplicating
+ all kernel symbols required by modules, BTF objects for
+ modules are "split", they are built incrementally on top of
+ the kernel (vmlinux) BTF object. So the base BTF reference
+ should usually point to the kernel BTF.
+
+ When the main BTF object to process (for example, the
+ module BTF to dump) is passed as a *FILE*, bpftool attempts
+ to autodetect the path for the base object, and passing
+ this option is optional. When the main BTF object is passed
+ through other handles, this option becomes necessary.
EXAMPLES
========
**# bpftool btf dump id 1226**
+
::
[1] PTR '(anon)' type_id=2
@@ -99,6 +108,7 @@ EXAMPLES
This gives an example of default output for all supported BTF kinds.
**$ cat prog.c**
+
::
struct fwd_struct;
@@ -139,6 +149,7 @@ This gives an example of default output for all supported BTF kinds.
}
**$ bpftool btf dump file prog.o**
+
::
[1] PTR '(anon)' type_id=2
@@ -225,14 +236,33 @@ All the standard ways to specify map or program are supported:
**# bpftool btf dump prog pinned /sys/fs/bpf/prog_name**
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-map**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8)
+|
+| **# bpftool btf dump file /sys/kernel/btf/i2c_smbus**
+| (or)
+| **# I2C_SMBUS_ID=$(bpftool btf show -p | jq '.[] | select(.name=="i2c_smbus").id')**
+| **# bpftool btf dump id ${I2C_SMBUS_ID} -B /sys/kernel/btf/vmlinux**
+
+::
+
+ [104848] STRUCT 'i2c_smbus_alert' size=40 vlen=2
+ 'alert' type_id=393 bits_offset=0
+ 'ara' type_id=56050 bits_offset=256
+ [104849] STRUCT 'alert_data' size=12 vlen=3
+ 'addr' type_id=16 bits_offset=0
+ 'type' type_id=56053 bits_offset=32
+ 'data' type_id=7 bits_offset=64
+ [104850] PTR '(anon)' type_id=104848
+ [104851] PTR '(anon)' type_id=104849
+ [104852] FUNC 'i2c_register_spd' type_id=84745 linkage=static
+ [104853] FUNC 'smbalert_driver_init' type_id=1213 linkage=static
+ [104854] FUNC_PROTO '(anon)' ret_type_id=18 vlen=1
+ 'ara' type_id=56050
+ [104855] FUNC 'i2c_handle_smbus_alert' type_id=104854 linkage=static
+ [104856] FUNC 'smbalert_remove' type_id=104854 linkage=static
+ [104857] FUNC_PROTO '(anon)' ret_type_id=18 vlen=2
+ 'ara' type_id=56050
+ 'id' type_id=56056
+ [104858] FUNC 'smbalert_probe' type_id=104857 linkage=static
+ [104859] FUNC 'smbalert_work' type_id=9695 linkage=static
+ [104860] FUNC 'smbus_alert' type_id=71367 linkage=static
+ [104861] FUNC 'smbus_do_alert' type_id=84827 linkage=static
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index 06a28b07787d..bd015ec9847b 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
================
bpftool-cgroup
================
@@ -7,12 +9,14 @@ tool for inspection and simple manipulation of eBPF progs
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
**bpftool** [*OPTIONS*] **cgroup** *COMMAND*
- *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } }
*COMMANDS* :=
{ **show** | **list** | **tree** | **attach** | **detach** | **help** }
@@ -20,17 +24,24 @@ SYNOPSIS
CGROUP COMMANDS
===============
-| **bpftool** **cgroup { show | list }** *CGROUP* [**effective**]
+| **bpftool** **cgroup** { **show** | **list** } *CGROUP* [**effective**]
| **bpftool** **cgroup tree** [*CGROUP_ROOT*] [**effective**]
| **bpftool** **cgroup attach** *CGROUP* *ATTACH_TYPE* *PROG* [*ATTACH_FLAGS*]
| **bpftool** **cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
| **bpftool** **cgroup help**
|
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
-| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
-| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
-| **sendmsg4** | **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** |
-| **getsockopt** | **setsockopt** }
+| *ATTACH_TYPE* := { **cgroup_inet_ingress** | **cgroup_inet_egress** |
+| **cgroup_inet_sock_create** | **cgroup_sock_ops** |
+| **cgroup_device** | **cgroup_inet4_bind** | **cgroup_inet6_bind** |
+| **cgroup_inet4_post_bind** | **cgroup_inet6_post_bind** |
+| **cgroup_inet4_connect** | **cgroup_inet6_connect** |
+| **cgroup_inet4_getpeername** | **cgroup_inet6_getpeername** |
+| **cgroup_inet4_getsockname** | **cgroup_inet6_getsockname** |
+| **cgroup_udp4_sendmsg** | **cgroup_udp6_sendmsg** |
+| **cgroup_udp4_recvmsg** | **cgroup_udp6_recvmsg** |
+| **cgroup_sysctl** | **cgroup_getsockopt** | **cgroup_setsockopt** |
+| **cgroup_inet_sock_release** }
| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
@@ -96,12 +107,17 @@ DESCRIPTION
**sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an
unconnected udp6 socket (since 4.18);
**recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
- an unconnected udp4 socket (since 5.2);
+ an unconnected udp4 socket (since 5.2);
**recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
- an unconnected udp6 socket (since 5.2);
+ an unconnected udp6 socket (since 5.2);
**sysctl** sysctl access (since 5.2);
**getsockopt** call to getsockopt (since 5.3);
- **setsockopt** call to setsockopt (since 5.3).
+ **setsockopt** call to setsockopt (since 5.3);
+ **getpeername4** call to getpeername(2) for an inet4 socket (since 5.8);
+ **getpeername6** call to getpeername(2) for an inet6 socket (since 5.8);
+ **getsockname4** call to getsockname(2) for an inet4 socket (since 5.8);
+ **getsockname6** call to getsockname(2) for an inet6 socket (since 5.8).
+ **sock_release** closing an userspace inet socket (since 5.9).
**bpftool cgroup detach** *CGROUP* *ATTACH_TYPE* *PROG*
Detach *PROG* from the cgroup *CGROUP* and attach type
@@ -112,26 +128,11 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
Show file names of pinned programs.
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
-
EXAMPLES
========
|
@@ -154,15 +155,3 @@ EXAMPLES
::
ID AttachType AttachFlags Name
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-map**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
index b04156cfd7a3..e44039f89be7 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
===============
bpftool-feature
===============
@@ -7,28 +9,32 @@ tool for inspection of eBPF-related parameters for Linux kernel or net device
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
**bpftool** [*OPTIONS*] **feature** *COMMAND*
- *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+ *OPTIONS* := { |COMMON_OPTIONS| }
*COMMANDS* := { **probe** | **help** }
FEATURE COMMANDS
================
-| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature probe** [*COMPONENT*] [**full**] [**unprivileged**] [**macros** [**prefix** *PREFIX*]]
+| **bpftool** **feature list_builtins** *GROUP*
| **bpftool** **feature help**
|
| *COMPONENT* := { **kernel** | **dev** *NAME* }
+| *GROUP* := { **prog_types** | **map_types** | **attach_types** | **link_types** | **helpers** }
DESCRIPTION
===========
**bpftool feature probe** [**kernel**] [**full**] [**macros** [**prefix** *PREFIX*]]
Probe the running kernel and dump a number of eBPF-related
- parameters, such as availability of the **bpf()** system call,
+ parameters, such as availability of the **bpf**\ () system call,
JIT status, eBPF program types availability, eBPF helper
functions availability, and more.
@@ -49,6 +55,16 @@ DESCRIPTION
Keyword **kernel** can be omitted. If no probe target is
specified, probing the kernel is the default behaviour.
+ When the **unprivileged** keyword is used, bpftool will dump
+ only the features available to a user who does not have the
+ **CAP_SYS_ADMIN** capability set. The features available in
+ that case usually represent a small subset of the parameters
+ supported by the system. Unprivileged users MUST use the
+ **unprivileged** keyword: This is to avoid misdetection if
+ bpftool is inadvertently run as non-root, for example. This
+ keyword is unavailable if bpftool was compiled without
+ libcap.
+
**bpftool feature probe dev** *NAME* [**full**] [**macros** [**prefix** *PREFIX*]]
Probe network device for supported eBPF features and dump
results to the console.
@@ -56,36 +72,19 @@ DESCRIPTION
The keywords **full**, **macros** and **prefix** have the
same role as when probing the kernel.
+ **bpftool feature list_builtins** *GROUP*
+ List items known to bpftool. These can be BPF program types
+ (**prog_types**), BPF map types (**map_types**), attach types
+ (**attach_types**), link types (**link_types**), or BPF helper
+ functions (**helpers**). The command does not probe the system, but
+ simply lists the elements that bpftool knows from compilation time,
+ as provided from libbpf (for all object types) or from the BPF UAPI
+ header (list of helpers). This can be used in scripts to iterate over
+ BPF types or helpers.
+
**bpftool feature help**
Print short help message.
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-map**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-btf**\ (8)
+ .. include:: common_options.rst
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index 94d91322895a..68454ef28f58 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
================
bpftool-gen
================
@@ -7,23 +9,48 @@ tool for BPF code-generation
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
**bpftool** [*OPTIONS*] **gen** *COMMAND*
- *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-L** | **--use-loader** } }
- *COMMAND* := { **skeleton | **help** }
+ *COMMAND* := { **object** | **skeleton** | **help** }
GEN COMMANDS
=============
-| **bpftool** **gen skeleton** *FILE*
+| **bpftool** **gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
+| **bpftool** **gen skeleton** *FILE* [**name** *OBJECT_NAME*]
+| **bpftool** **gen subskeleton** *FILE* [**name** *OBJECT_NAME*]
+| **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
| **bpftool** **gen help**
DESCRIPTION
===========
+ **bpftool gen object** *OUTPUT_FILE* *INPUT_FILE* [*INPUT_FILE*...]
+ Statically link (combine) together one or more *INPUT_FILE*'s
+ into a single resulting *OUTPUT_FILE*. All the files involved
+ are BPF ELF object files.
+
+ The rules of BPF static linking are mostly the same as for
+ user-space object files, but in addition to combining data
+ and instruction sections, .BTF and .BTF.ext (if present in
+ any of the input files) data are combined together. .BTF
+ data is deduplicated, so all the common types across
+ *INPUT_FILE*'s will only be represented once in the resulting
+ BTF information.
+
+ BPF static linking allows to partition BPF source code into
+ individually compiled files that are then linked into
+ a single resulting BPF object file, which can be used to
+ generated BPF skeleton (with **gen skeleton** command) or
+ passed directly into **libbpf** (using **bpf_object__open()**
+ family of APIs).
+
**bpftool gen skeleton** *FILE*
Generate BPF skeleton C header file for a given *FILE*.
@@ -36,12 +63,12 @@ DESCRIPTION
etc. Skeleton eliminates the need to lookup mentioned
components by name. Instead, if skeleton instantiation
succeeds, they are populated in skeleton structure as valid
- libbpf types (e.g., struct bpf_map pointer) and can be
+ libbpf types (e.g., **struct bpf_map** pointer) and can be
passed to existing generic libbpf APIs.
In addition to simple and reliable access to maps and
- programs, skeleton provides a storage for BPF links (struct
- bpf_link) for each BPF program within BPF object. When
+ programs, skeleton provides a storage for BPF links (**struct
+ bpf_link**) for each BPF program within BPF object. When
requested, supported BPF programs will be automatically
attached and resulting BPF links stored for further use by
user in pre-allocated fields in skeleton struct. For BPF
@@ -75,21 +102,24 @@ DESCRIPTION
specific maps, programs, etc.
As part of skeleton, few custom functions are generated.
- Each of them is prefixed with object name, derived from
- object file name. I.e., if BPF object file name is
- **example.o**, BPF object name will be **example**. The
- following custom functions are provided in such case:
+ Each of them is prefixed with object name. Object name can
+ either be derived from object file name, i.e., if BPF object
+ file name is **example.o**, BPF object name will be
+ **example**. Object name can be also specified explicitly
+ through **name** *OBJECT_NAME* parameter. The following
+ custom functions are provided (assuming **example** as
+ the object name):
- **example__open** and **example__open_opts**.
These functions are used to instantiate skeleton. It
- corresponds to libbpf's **bpf_object__open()** API.
+ corresponds to libbpf's **bpf_object__open**\ () API.
**_opts** variants accepts extra **bpf_object_open_opts**
options.
- **example__load**.
This function creates maps, loads and verifies BPF
programs, initializes global data maps. It corresponds to
- libppf's **bpf_object__load** API.
+ libppf's **bpf_object__load**\ () API.
- **example__open_and_load** combines **example__open** and
**example__load** invocations in one commonly used
@@ -121,49 +151,78 @@ DESCRIPTION
(non-read-only) data from userspace, with same simplicity
as for BPF side.
+ **bpftool gen subskeleton** *FILE*
+ Generate BPF subskeleton C header file for a given *FILE*.
+
+ Subskeletons are similar to skeletons, except they do not own
+ the corresponding maps, programs, or global variables. They
+ require that the object file used to generate them is already
+ loaded into a *bpf_object* by some other means.
+
+ This functionality is useful when a library is included into a
+ larger BPF program. A subskeleton for the library would have
+ access to all objects and globals defined in it, without
+ having to know about the larger program.
+
+ Consequently, there are only two functions defined
+ for subskeletons:
+
+ - **example__open(bpf_object\*)**
+ Instantiates a subskeleton from an already opened (but not
+ necessarily loaded) **bpf_object**.
+
+ - **example__destroy()**
+ Frees the storage for the subskeleton but *does not* unload
+ any BPF programs or maps.
+
+ **bpftool** **gen min_core_btf** *INPUT* *OUTPUT* *OBJECT* [*OBJECT*...]
+ Generate a minimum BTF file as *OUTPUT*, derived from a given
+ *INPUT* BTF file, containing all needed BTF types so one, or
+ more, given eBPF objects CO-RE relocations may be satisfied.
+
+ When kernels aren't compiled with CONFIG_DEBUG_INFO_BTF,
+ libbpf, when loading an eBPF object, has to rely on external
+ BTF files to be able to calculate CO-RE relocations.
+
+ Usually, an external BTF file is built from existing kernel
+ DWARF data using pahole. It contains all the types used by
+ its respective kernel image and, because of that, is big.
+
+ The min_core_btf feature builds smaller BTF files, customized
+ to one or multiple eBPF objects, so they can be distributed
+ together with an eBPF CO-RE based application, turning the
+ application portable to different kernel versions.
+
+ Check examples bellow for more information how to use it.
+
**bpftool gen help**
Print short help message.
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON,
- this option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ -L, --use-loader
+ For skeletons, generate a "light" skeleton (also known as "loader"
+ skeleton). A light skeleton contains a loader eBPF program. It does
+ not use the majority of the libbpf infrastructure, and does not need
+ libelf.
EXAMPLES
========
-**$ cat example.c**
+**$ cat example1.bpf.c**
+
::
#include <stdbool.h>
#include <linux/ptrace.h>
#include <linux/bpf.h>
- #include "bpf_helpers.h"
+ #include <bpf/bpf_helpers.h>
const volatile int param1 = 42;
bool global_flag = true;
struct { int x; } data = {};
- struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __uint(max_entries, 128);
- __type(key, int);
- __type(value, long);
- } my_map SEC(".maps");
-
SEC("raw_tp/sys_enter")
int handle_sys_enter(struct pt_regs *ctx)
{
@@ -175,6 +234,21 @@ EXAMPLES
return 0;
}
+**$ cat example2.bpf.c**
+
+::
+
+ #include <linux/ptrace.h>
+ #include <linux/bpf.h>
+ #include <bpf/bpf_helpers.h>
+
+ struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 128);
+ __type(key, int);
+ __type(value, long);
+ } my_map SEC(".maps");
+
SEC("raw_tp/sys_exit")
int handle_sys_exit(struct pt_regs *ctx)
{
@@ -184,9 +258,20 @@ EXAMPLES
}
This is example BPF application with two BPF programs and a mix of BPF maps
-and global variables.
+and global variables. Source code is split across two source code files.
+
+**$ clang -target bpf -g example1.bpf.c -o example1.bpf.o**
+
+**$ clang -target bpf -g example2.bpf.c -o example2.bpf.o**
+
+**$ bpftool gen object example.bpf.o example1.bpf.o example2.bpf.o**
+
+This set of commands compiles *example1.bpf.c* and *example2.bpf.c*
+individually and then statically links respective object files into the final
+BPF ELF object file *example.bpf.o*.
+
+**$ bpftool gen skeleton example.bpf.o name example | tee example.skel.h**
-**$ bpftool gen skeleton example.o**
::
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
@@ -240,7 +325,8 @@ and global variables.
#endif /* __EXAMPLE_SKEL_H__ */
-**$ cat example_user.c**
+**$ cat example.c**
+
::
#include "example.skel.h"
@@ -282,7 +368,8 @@ and global variables.
return err;
}
-**# ./example_user**
+**# ./example**
+
::
my_map name: my_map
@@ -291,15 +378,69 @@ and global variables.
This is a stripped-out version of skeleton generated for above example code.
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-map**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-btf**\ (8)
+min_core_btf
+------------
+
+**$ bpftool btf dump file 5.4.0-example.btf format raw**
+
+::
+
+ [1] INT 'long unsigned int' size=8 bits_offset=0 nr_bits=64 encoding=(none)
+ [2] CONST '(anon)' type_id=1
+ [3] VOLATILE '(anon)' type_id=1
+ [4] ARRAY '(anon)' type_id=1 index_type_id=21 nr_elems=2
+ [5] PTR '(anon)' type_id=8
+ [6] CONST '(anon)' type_id=5
+ [7] INT 'char' size=1 bits_offset=0 nr_bits=8 encoding=(none)
+ [8] CONST '(anon)' type_id=7
+ [9] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)
+ <long output>
+
+**$ bpftool btf dump file one.bpf.o format raw**
+
+::
+
+ [1] PTR '(anon)' type_id=2
+ [2] STRUCT 'trace_event_raw_sys_enter' size=64 vlen=4
+ 'ent' type_id=3 bits_offset=0
+ 'id' type_id=7 bits_offset=64
+ 'args' type_id=9 bits_offset=128
+ '__data' type_id=12 bits_offset=512
+ [3] STRUCT 'trace_entry' size=8 vlen=4
+ 'type' type_id=4 bits_offset=0
+ 'flags' type_id=5 bits_offset=16
+ 'preempt_count' type_id=5 bits_offset=24
+ <long output>
+
+**$ bpftool gen min_core_btf 5.4.0-example.btf 5.4.0-smaller.btf one.bpf.o**
+
+**$ bpftool btf dump file 5.4.0-smaller.btf format raw**
+
+::
+
+ [1] TYPEDEF 'pid_t' type_id=6
+ [2] STRUCT 'trace_event_raw_sys_enter' size=64 vlen=1
+ 'args' type_id=4 bits_offset=128
+ [3] STRUCT 'task_struct' size=9216 vlen=2
+ 'pid' type_id=1 bits_offset=17920
+ 'real_parent' type_id=7 bits_offset=18048
+ [4] ARRAY '(anon)' type_id=5 index_type_id=8 nr_elems=6
+ [5] INT 'long unsigned int' size=8 bits_offset=0 nr_bits=64 encoding=(none)
+ [6] TYPEDEF '__kernel_pid_t' type_id=8
+ [7] PTR '(anon)' type_id=3
+ [8] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED
+ <end>
+
+Now, the "5.4.0-smaller.btf" file may be used by libbpf as an external BTF file
+when loading the "one.bpf.o" object into the "5.4.0-example" kernel. Note that
+the generated BTF file won't allow other eBPF objects to be loaded, just the
+ones given to min_core_btf.
+
+::
+
+ LIBBPF_OPTS(bpf_object_open_opts, opts, .btf_custom_path = "5.4.0-smaller.btf");
+ struct bpf_object *obj;
+
+ obj = bpf_object__open_file("one.bpf.o", &opts);
+
+ ...
diff --git a/tools/bpf/bpftool/Documentation/bpftool-iter.rst b/tools/bpf/bpftool/Documentation/bpftool-iter.rst
new file mode 100644
index 000000000000..84839d488621
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-iter.rst
@@ -0,0 +1,76 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+============
+bpftool-iter
+============
+-------------------------------------------------------------------------------
+tool to create BPF iterators
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **iter** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| }
+
+ *COMMANDS* := { **pin** | **help** }
+
+ITER COMMANDS
+===================
+
+| **bpftool** **iter pin** *OBJ* *PATH* [**map** *MAP*]
+| **bpftool** **iter help**
+|
+| *OBJ* := /a/file/of/bpf_iter_target.o
+| *MAP* := { **id** *MAP_ID* | **pinned** *FILE* }
+
+DESCRIPTION
+===========
+ **bpftool iter pin** *OBJ* *PATH* [**map** *MAP*]
+ A bpf iterator combines a kernel iterating of
+ particular kernel data (e.g., tasks, bpf_maps, etc.)
+ and a bpf program called for each kernel data object
+ (e.g., one task, one bpf_map, etc.). User space can
+ *read* kernel iterator output through *read()* syscall.
+
+ The *pin* command creates a bpf iterator from *OBJ*,
+ and pin it to *PATH*. The *PATH* should be located
+ in *bpffs* mount. It must not contain a dot
+ character ('.'), which is reserved for future extensions
+ of *bpffs*.
+
+ Map element bpf iterator requires an additional parameter
+ *MAP* so bpf program can iterate over map elements for
+ that map. User can have a bpf program in kernel to run
+ with each map element, do checking, filtering, aggregation,
+ etc. without copying data to user space.
+
+ User can then *cat PATH* to see the bpf iterator output.
+
+ **bpftool iter help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+EXAMPLES
+========
+**# bpftool iter pin bpf_iter_netlink.o /sys/fs/bpf/my_netlink**
+
+::
+
+ Create a file-based bpf iterator from bpf_iter_netlink.o and pin it
+ to /sys/fs/bpf/my_netlink
+
+**# bpftool iter pin bpf_iter_hashmap.o /sys/fs/bpf/my_hashmap map id 20**
+
+::
+
+ Create a file-based bpf iterator from bpf_iter_hashmap.o and map with
+ id 20, and pin it to /sys/fs/bpf/my_hashmap
diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst
new file mode 100644
index 000000000000..52a4eee4af54
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst
@@ -0,0 +1,112 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+================
+bpftool-link
+================
+-------------------------------------------------------------------------------
+tool for inspection and simple manipulation of eBPF links
+-------------------------------------------------------------------------------
+
+:Manual section: 8
+
+.. include:: substitutions.rst
+
+SYNOPSIS
+========
+
+ **bpftool** [*OPTIONS*] **link** *COMMAND*
+
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
+
+ *COMMANDS* := { **show** | **list** | **pin** | **help** }
+
+LINK COMMANDS
+=============
+
+| **bpftool** **link { show | list }** [*LINK*]
+| **bpftool** **link pin** *LINK* *FILE*
+| **bpftool** **link detach** *LINK*
+| **bpftool** **link help**
+|
+| *LINK* := { **id** *LINK_ID* | **pinned** *FILE* }
+
+
+DESCRIPTION
+===========
+ **bpftool link { show | list }** [*LINK*]
+ Show information about active links. If *LINK* is
+ specified show information only about given link,
+ otherwise list all links currently active on the system.
+
+ Output will start with link ID followed by link type and
+ zero or more named attributes, some of which depend on type
+ of link.
+
+ Since Linux 5.8 bpftool is able to discover information about
+ processes that hold open file descriptors (FDs) against BPF
+ links. On such kernels bpftool will automatically emit this
+ information as well.
+
+ **bpftool link pin** *LINK* *FILE*
+ Pin link *LINK* as *FILE*.
+
+ Note: *FILE* must be located in *bpffs* mount. It must not
+ contain a dot character ('.'), which is reserved for future
+ extensions of *bpffs*.
+
+ **bpftool link detach** *LINK*
+ Force-detach link *LINK*. BPF link and its underlying BPF
+ program will stay valid, but they will be detached from the
+ respective BPF hook and BPF link will transition into
+ a defunct state until last open file descriptor for that
+ link is closed.
+
+ **bpftool link help**
+ Print short help message.
+
+OPTIONS
+=======
+ .. include:: common_options.rst
+
+ -f, --bpffs
+ When showing BPF links, show file names of pinned
+ links.
+
+ -n, --nomount
+ Do not automatically attempt to mount any virtual file system
+ (such as tracefs or BPF virtual file system) when necessary.
+
+EXAMPLES
+========
+**# bpftool link show**
+
+::
+
+ 10: cgroup prog 25
+ cgroup_id 614 attach_type egress
+ pids test_progs(223)
+
+**# bpftool --json --pretty link show**
+
+::
+
+ [{
+ "type": "cgroup",
+ "prog_id": 25,
+ "cgroup_id": 614,
+ "attach_type": "egress",
+ "pids": [{
+ "pid": 223,
+ "comm": "test_progs"
+ }
+ ]
+ }
+ ]
+
+|
+| **# bpftool link pin id 10 /sys/fs/bpf/link**
+| **# ls -l /sys/fs/bpf/**
+
+::
+
+ -rw------- 1 root root 0 Apr 23 21:39 link
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index cdeae8ae90ba..11250c4734fe 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
================
bpftool-map
================
@@ -7,23 +9,26 @@ tool for inspection and simple manipulation of eBPF maps
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
**bpftool** [*OPTIONS*] **map** *COMMAND*
- *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
+ *OPTIONS* := { |COMMON_OPTIONS| | { **-f** | **--bpffs** } | { **-n** | **--nomount** } }
*COMMANDS* :=
- { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext**
- | **delete** | **pin** | **help** }
+ { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
+ **delete** | **pin** | **help** }
MAP COMMANDS
=============
-| **bpftool** **map { show | list }** [*MAP*]
+| **bpftool** **map** { **show** | **list** } [*MAP*]
| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
-| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
+| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] \
+| [**dev** *NAME*]
| **bpftool** **map dump** *MAP*
| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
| **bpftool** **map lookup** *MAP* [**key** *DATA*]
@@ -49,7 +54,8 @@ MAP COMMANDS
| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
-| | **queue** | **stack** }
+| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage**
+| | **task_storage** | **bloom_filter** | **user_ringbuf** | **cgrp_storage** }
DESCRIPTION
===========
@@ -62,10 +68,27 @@ DESCRIPTION
Output will start with map ID followed by map type and
zero or more named attributes (depending on kernel version).
- **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
+ Since Linux 5.8 bpftool is able to discover information about
+ processes that hold open file descriptors (FDs) against BPF
+ maps. On such kernels bpftool will automatically emit this
+ information as well.
+
+ **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] [**dev** *NAME*]
Create a new map with given parameters and pin it to *bpffs*
as *FILE*.
+ *FLAGS* should be an integer which is the combination of
+ desired flags, e.g. 1024 for **BPF_F_MMAPABLE** (see bpf.h
+ UAPI header for existing flags).
+
+ To create maps of type array-of-maps or hash-of-maps, the
+ **inner_map** keyword must be used to pass an inner map. The
+ kernel needs it to collect metadata related to the inner maps
+ that the new map will work with.
+
+ Keyword **dev** expects a network interface name, and is used
+ to request hardware offload for the map.
+
**bpftool map dump** *MAP*
Dump all entries in a given *MAP*. In case of **name**,
*MAP* may match several maps which will all be dumped.
@@ -78,7 +101,7 @@ DESCRIPTION
exists; **noexist** update only if entry doesn't exist.
If the **hex** keyword is provided in front of the bytes
- sequence, the bytes are parsed as hexadeximal values, even if
+ sequence, the bytes are parsed as hexadecimal values, even if
no "0x" prefix is added. If the keyword is not provided, then
the bytes are parsed as decimal values, unless a "0x" prefix
(for hexadecimal) or a "0" prefix (for octal) is provided.
@@ -100,10 +123,10 @@ DESCRIPTION
extensions of *bpffs*.
**bpftool** **map event_pipe** *MAP* [**cpu** *N* **index** *M*]
- Read events from a BPF_MAP_TYPE_PERF_EVENT_ARRAY map.
+ Read events from a **BPF_MAP_TYPE_PERF_EVENT_ARRAY** map.
Install perf rings into a perf event array map and dump
- output of any bpf_perf_event_output() call in the kernel.
+ output of any **bpf_perf_event_output**\ () call in the kernel.
By default read the number of CPUs on the system and
install perf ring for each CPU in the corresponding index
in the array.
@@ -116,24 +139,24 @@ DESCRIPTION
receiving events if it installed its rings earlier.
**bpftool map peek** *MAP*
- Peek next **value** in the queue or stack.
+ Peek next value in the queue or stack.
**bpftool map push** *MAP* **value** *VALUE*
- Push **value** onto the stack.
+ Push *VALUE* onto the stack.
**bpftool map pop** *MAP*
- Pop and print **value** from the stack.
+ Pop and print value from the stack.
**bpftool map enqueue** *MAP* **value** *VALUE*
- Enqueue **value** into the queue.
+ Enqueue *VALUE* into the queue.
**bpftool map dequeue** *MAP*
- Dequeue and print **value** from the queue.
+ Dequeue and print value from the queue.
**bpftool map freeze** *MAP*
Freeze the map as read-only from user space. Entries from a
frozen map can not longer be updated or deleted with the
- **bpf\ ()** system call. This operation is not reversible,
+ **bpf**\ () system call. This operation is not reversible,
and the map remains immutable from user space until its
destruction. However, read and write permissions for BPF
programs to the map remain unchanged.
@@ -143,18 +166,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
Show file names of pinned maps.
@@ -163,17 +175,15 @@ OPTIONS
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
-
EXAMPLES
========
**# bpftool map show**
+
::
10: hash name some_map flags 0x0
- key 4B value 8B max_entries 2048 memlock 167936B
+ key 4B value 8B max_entries 2048 memlock 167936B
+ pids systemd(1)
The following three commands are equivalent:
@@ -190,6 +200,7 @@ The following three commands are equivalent:
**# bpftool map dump id 10**
+
::
key: 00 01 02 03 value: 00 01 02 03 04 05 06 07
@@ -197,6 +208,7 @@ The following three commands are equivalent:
Found 2 elements
**# bpftool map getnext id 10 key 0 1 2 3**
+
::
key:
@@ -263,15 +275,3 @@ would be lost as soon as bpftool exits).
key: 00 00 00 00 value: 22 02 00 00
Found 1 element
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index 8651b00b81ea..f4e0a516335a 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
================
bpftool-net
================
@@ -7,12 +9,14 @@ tool for inspection of netdev/tc related bpf prog attachments
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
**bpftool** [*OPTIONS*] **net** *COMMAND*
- *OPTIONS* := { [{ **-j** | **--json** }] [{ **-p** | **--pretty** }] }
+ *OPTIONS* := { |COMMON_OPTIONS| }
*COMMANDS* :=
{ **show** | **list** | **attach** | **detach** | **help** }
@@ -20,7 +24,7 @@ SYNOPSIS
NET COMMANDS
============
-| **bpftool** **net { show | list }** [ **dev** *NAME* ]
+| **bpftool** **net** { **show** | **list** } [ **dev** *NAME* ]
| **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ]
| **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME*
| **bpftool** **net help**
@@ -31,66 +35,51 @@ NET COMMANDS
DESCRIPTION
===========
**bpftool net { show | list }** [ **dev** *NAME* ]
- List bpf program attachments in the kernel networking subsystem.
-
- Currently, only device driver xdp attachments and tc filter
- classification/action attachments are implemented, i.e., for
- program types **BPF_PROG_TYPE_SCHED_CLS**,
- **BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
- For programs attached to a particular cgroup, e.g.,
- **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
- **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
- users can use **bpftool cgroup** to dump cgroup attachments.
- For sk_{filter, skb, msg, reuseport} and lwt/seg6
- bpf programs, users should consult other tools, e.g., iproute2.
-
- The current output will start with all xdp program attachments, followed by
- all tc class/qdisc bpf program attachments. Both xdp programs and
- tc programs are ordered based on ifindex number. If multiple bpf
- programs attached to the same networking device through **tc filter**,
- the order will be first all bpf programs attached to tc classes, then
- all bpf programs attached to non clsact qdiscs, and finally all
- bpf programs attached to root and clsact qdisc.
+ List bpf program attachments in the kernel networking subsystem.
+
+ Currently, only device driver xdp attachments and tc filter
+ classification/action attachments are implemented, i.e., for
+ program types **BPF_PROG_TYPE_SCHED_CLS**,
+ **BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
+ For programs attached to a particular cgroup, e.g.,
+ **BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
+ **BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ users can use **bpftool cgroup** to dump cgroup attachments.
+ For sk_{filter, skb, msg, reuseport} and lwt/seg6
+ bpf programs, users should consult other tools, e.g., iproute2.
+
+ The current output will start with all xdp program attachments, followed by
+ all tc class/qdisc bpf program attachments. Both xdp programs and
+ tc programs are ordered based on ifindex number. If multiple bpf
+ programs attached to the same networking device through **tc filter**,
+ the order will be first all bpf programs attached to tc classes, then
+ all bpf programs attached to non clsact qdiscs, and finally all
+ bpf programs attached to root and clsact qdisc.
**bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ]
- Attach bpf program *PROG* to network interface *NAME* with
- type specified by *ATTACH_TYPE*. Previously attached bpf program
- can be replaced by the command used with **overwrite** option.
- Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
+ Attach bpf program *PROG* to network interface *NAME* with
+ type specified by *ATTACH_TYPE*. Previously attached bpf program
+ can be replaced by the command used with **overwrite** option.
+ Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
- *ATTACH_TYPE* can be of:
- **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it;
- **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
- **xdpdrv** - Native XDP. runs earliest point in driver's receive path;
- **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
+ *ATTACH_TYPE* can be of:
+ **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it;
+ **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
+ **xdpdrv** - Native XDP. runs earliest point in driver's receive path;
+ **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
**bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME*
- Detach bpf program attached to network interface *NAME* with
- type specified by *ATTACH_TYPE*. To detach bpf program, same
- *ATTACH_TYPE* previously used for attach must be specified.
- Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
+ Detach bpf program attached to network interface *NAME* with
+ type specified by *ATTACH_TYPE*. To detach bpf program, same
+ *ATTACH_TYPE* previously used for attach must be specified.
+ Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
**bpftool net help**
Print short help message.
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -187,16 +176,3 @@ EXAMPLES
::
xdp:
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-map**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index e252bd0bc434..5fea633a82f1 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
================
bpftool-perf
================
@@ -7,12 +9,14 @@ tool for inspection of perf related bpf prog attachments
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
**bpftool** [*OPTIONS*] **perf** *COMMAND*
- *OPTIONS* := { [{ **-j** | **--json** }] [{ **-p** | **--pretty** }] }
+ *OPTIONS* := { |COMMON_OPTIONS| }
*COMMANDS* :=
{ **show** | **list** | **help** }
@@ -20,7 +24,7 @@ SYNOPSIS
PERF COMMANDS
=============
-| **bpftool** **perf { show | list }**
+| **bpftool** **perf** { **show** | **list** }
| **bpftool** **perf help**
DESCRIPTION
@@ -40,22 +44,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -78,16 +67,3 @@ EXAMPLES
{"pid":21765,"fd":5,"prog_id":7,"fd_type":"kretprobe","func":"__x64_sys_nanosleep","offset":0}, \
{"pid":21767,"fd":5,"prog_id":8,"fd_type":"tracepoint","tracepoint":"sys_enter_nanosleep"}, \
{"pid":21800,"fd":5,"prog_id":9,"fd_type":"uprobe","filename":"/home/yhs/a.out","offset":1159}]
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-map**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-net**\ (8),
- **bpftool-btf**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 9f19404f470e..9443c524bb76 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
================
bpftool-prog
================
@@ -7,25 +9,29 @@ tool for inspection and simple manipulation of eBPF progs
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
**bpftool** [*OPTIONS*] **prog** *COMMAND*
- *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-f** | **--bpffs** } }
+ *OPTIONS* := { |COMMON_OPTIONS| |
+ { **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } |
+ { **-L** | **--use-loader** } }
*COMMANDS* :=
- { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load**
- | **loadall** | **help** }
+ { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** |
+ **loadall** | **help** }
PROG COMMANDS
=============
-| **bpftool** **prog { show | list }** [*PROG*]
-| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual** | **linum**}]
-| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes** | **linum**}]
+| **bpftool** **prog** { **show** | **list** } [*PROG*]
+| **bpftool** **prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }]
+| **bpftool** **prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }]
| **bpftool** **prog pin** *PROG* *FILE*
-| **bpftool** **prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*]
+| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog detach** *PROG* *ATTACH_TYPE* [*MAP*]
| **bpftool** **prog tracelog**
@@ -41,16 +47,19 @@ PROG COMMANDS
| **cgroup/sock** | **cgroup/dev** | **lwt_in** | **lwt_out** | **lwt_xmit** |
| **lwt_seg6local** | **sockops** | **sk_skb** | **sk_msg** | **lirc_mode2** |
| **cgroup/bind4** | **cgroup/bind6** | **cgroup/post_bind4** | **cgroup/post_bind6** |
-| **cgroup/connect4** | **cgroup/connect6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
+| **cgroup/connect4** | **cgroup/connect6** | **cgroup/getpeername4** | **cgroup/getpeername6** |
+| **cgroup/getsockname4** | **cgroup/getsockname6** | **cgroup/sendmsg4** | **cgroup/sendmsg6** |
| **cgroup/recvmsg4** | **cgroup/recvmsg6** | **cgroup/sysctl** |
-| **cgroup/getsockopt** | **cgroup/setsockopt** |
-| **struct_ops** | **fentry** | **fexit** | **freplace**
+| **cgroup/getsockopt** | **cgroup/setsockopt** | **cgroup/sock_release** |
+| **struct_ops** | **fentry** | **fexit** | **freplace** | **sk_lookup**
| }
-| *ATTACH_TYPE* := {
-| **msg_verdict** | **stream_verdict** | **stream_parser** | **flow_dissector**
+| *ATTACH_TYPE* := {
+| **sk_msg_verdict** | **sk_skb_verdict** | **sk_skb_stream_verdict** |
+| **sk_skb_stream_parser** | **flow_dissector**
| }
-| *METRIC* := {
-| **cycles** | **instructions** | **l1d_loads** | **llc_misses**
+| *METRICs* := {
+| **cycles** | **instructions** | **l1d_loads** | **llc_misses** |
+| **itlb_misses** | **dtlb_misses**
| }
@@ -74,7 +83,12 @@ DESCRIPTION
program run. Activation or deactivation of the feature is
performed via the **kernel.bpf_stats_enabled** sysctl knob.
- **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** | **linum** }]
+ Since Linux 5.8 bpftool is able to discover information about
+ processes that hold open file descriptors (FDs) against BPF
+ programs. On such kernels bpftool will automatically emit this
+ information as well.
+
+ **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }]
Dump eBPF instructions of the programs from the kernel. By
default, eBPF will be disassembled and printed to standard
output in human-readable format. In this case, **opcodes**
@@ -92,11 +106,10 @@ DESCRIPTION
CFG in DOT format, on standard output.
If the programs have line_info available, the source line will
- be displayed by default. If **linum** is specified,
- the filename, line number and line column will also be
- displayed on top of the source line.
+ be displayed. If **linum** is specified, the filename, line
+ number and line column will also be displayed.
- **bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** | **linum** }]
+ **bpftool prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }]
Dump jited image (host machine code) of the program.
If *FILE* is specified image will be written to a file,
@@ -106,9 +119,8 @@ DESCRIPTION
**opcodes** controls if raw opcodes will be printed.
If the prog has line_info available, the source line will
- be displayed by default. If **linum** is specified,
- the filename, line number and line column will also be
- displayed on top of the source line.
+ be displayed. If **linum** is specified, the filename, line
+ number and line column will also be displayed.
**bpftool prog pin** *PROG* *FILE*
Pin program *PROG* as *FILE*.
@@ -117,7 +129,7 @@ DESCRIPTION
contain a dot character ('.'), which is reserved for future
extensions of *bpffs*.
- **bpftool prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*]
+ **bpftool prog { load | loadall }** *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
Load bpf program(s) from binary *OBJ* and pin as *PATH*.
**bpftool prog load** pins only the first program from the
*OBJ* as *PATH*. **bpftool prog loadall** pins all programs
@@ -136,6 +148,17 @@ DESCRIPTION
Optional **pinmaps** argument can be provided to pin all
maps under *MAP_DIR* directory.
+ If **autoattach** is specified program will be attached
+ before pin. In that case, only the link (representing the
+ program attached to its hook) is pinned, not the program as
+ such, so the path won't show in **bpftool prog show -f**,
+ only show in **bpftool link show -f**. Also, this only works
+ when bpftool (libbpf) is able to infer all necessary
+ information from the object file, in particular, it's not
+ supported for all program types. If a program does not
+ support autoattach, bpftool falls back to regular pinning
+ for that program instead.
+
Note: *PATH* must be located in *bpffs* mount. It must not
contain a dot character ('.'), which is reserved for future
extensions of *bpffs*.
@@ -155,7 +178,7 @@ DESCRIPTION
**bpftool prog tracelog**
Dump the trace pipe of the system to the console (stdout).
Hit <Ctrl+C> to stop printing. BPF programs can write to this
- trace pipe at runtime with the **bpf_trace_printk()** helper.
+ trace pipe at runtime with the **bpf_trace_printk**\ () helper.
This should be used only for debugging purposes. For
streaming data from BPF programs to user space, one can use
perf events (see also **bpftool-map**\ (8)).
@@ -195,27 +218,16 @@ DESCRIPTION
**bpftool prog profile** *PROG* [**duration** *DURATION*] *METRICs*
Profile *METRICs* for bpf program *PROG* for *DURATION*
- seconds or until user hits Ctrl-C. *DURATION* is optional.
+ seconds or until user hits <Ctrl+C>. *DURATION* is optional.
If *DURATION* is not specified, the profiling will run up to
- UINT_MAX seconds.
+ **UINT_MAX** seconds.
**bpftool prog help**
Print short help message.
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
When showing BPF programs, show file names of pinned
@@ -228,10 +240,19 @@ OPTIONS
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
+ -L, --use-loader
+ Load program as a "loader" program. This is useful to debug
+ the generation of such programs. When this option is in
+ use, bpftool attempts to load the programs from the object
+ file into the kernel, but does not pin them (therefore, the
+ *PATH* must not be provided).
+
+ When combined with the **-d**\ \|\ **--debug** option,
+ additional debug messages are generated, and the execution
+ of the loader program will use the **bpf_trace_printk**\ ()
+ helper to log each step of loading BTF, creating the maps,
+ and loading the programs (see **bpftool prog tracelog** as
+ a way to dump those messages).
EXAMPLES
========
@@ -242,6 +263,7 @@ EXAMPLES
10: xdp name some_prog tag 005a3d2123620c8b gpl run_time_ns 81632 run_cnt 10
loaded_at 2017-09-29T20:11:00+0000 uid 0
xlated 528B jited 370B memlock 4096B map_ids 10
+ pids systemd(1)
**# bpftool --json --pretty prog show**
@@ -261,13 +283,18 @@ EXAMPLES
"bytes_jited": 370,
"bytes_memlock": 4096,
"map_ids": [10
+ ],
+ "pids": [{
+ "pid": 1,
+ "comm": "systemd"
+ }
]
}
]
|
| **# bpftool prog dump xlated id 10 file /tmp/t**
-| **# ls -l /tmp/t**
+| **$ ls -l /tmp/t**
::
@@ -325,19 +352,21 @@ EXAMPLES
| **# bpftool prog profile id 337 duration 10 cycles instructions llc_misses**
::
+
51397 run_cnt
40176203 cycles (83.05%)
42518139 instructions # 1.06 insns per cycle (83.39%)
123 llc_misses # 2.89 LLC misses per million insns (83.15%)
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-map**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-btf**\ (8)
+|
+| Output below is for the trace logs.
+| Run in separate terminals:
+| **# bpftool prog tracelog**
+| **# bpftool prog load -L -d file.o**
+
+::
+
+ bpftool-620059 [004] d... 2634685.517903: bpf_trace_printk: btf_load size 665 r=5
+ bpftool-620059 [004] d... 2634685.517912: bpf_trace_printk: map_create sample_map idx 0 type 2 value_size 4 value_btf_id 0 r=6
+ bpftool-620059 [004] d... 2634685.517997: bpf_trace_printk: prog_load sample insn_cnt 13 r=7
+ bpftool-620059 [004] d... 2634685.517999: bpf_trace_printk: close(5) = 0
diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
index f045cc89dd6d..8022b5321dbe 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
==================
bpftool-struct_ops
==================
@@ -7,12 +9,14 @@ tool to register/unregister/introspect BPF struct_ops
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
**bpftool** [*OPTIONS*] **struct_ops** *COMMAND*
- *OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+ *OPTIONS* := { |COMMON_OPTIONS| }
*COMMANDS* :=
{ **show** | **list** | **dump** | **register** | **unregister** | **help** }
@@ -22,7 +26,7 @@ STRUCT_OPS COMMANDS
| **bpftool** **struct_ops { show | list }** [*STRUCT_OPS_MAP*]
| **bpftool** **struct_ops dump** [*STRUCT_OPS_MAP*]
-| **bpftool** **struct_ops register** *OBJ*
+| **bpftool** **struct_ops register** *OBJ* [*LINK_DIR*]
| **bpftool** **struct_ops unregister** *STRUCT_OPS_MAP*
| **bpftool** **struct_ops help**
|
@@ -47,10 +51,14 @@ DESCRIPTION
for the given struct_ops. Otherwise, it dumps all struct_ops
currently existing in the system.
- **bpftool struct_ops register** *OBJ*
+ **bpftool struct_ops register** *OBJ* [*LINK_DIR*]
Register bpf struct_ops from *OBJ*. All struct_ops under
- the ELF section ".struct_ops" will be registered to
- its kernel subsystem.
+ the ELF section ".struct_ops" and ".struct_ops.link" will
+ be registered to its kernel subsystem. For each
+ struct_ops in the ".struct_ops.link" section, a link
+ will be created. You can give *LINK_DIR* to provide a
+ directory path where these links will be pinned with the
+ same name as their corresponding map name.
**bpftool struct_ops unregister** *STRUCT_OPS_MAP*
Unregister the *STRUCT_OPS_MAP* from the kernel subsystem.
@@ -60,23 +68,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -98,19 +90,3 @@ EXAMPLES
::
Registered tcp_congestion_ops cubic id 110
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-map**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-btf**\ (8)
- **bpftool-gen**\ (8)
-
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 34239fda69ed..6965c94dfdaf 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
================
BPFTOOL
================
@@ -7,6 +9,8 @@ tool for inspection and simple manipulation of eBPF programs and maps
:Manual section: 8
+.. include:: substitutions.rst
+
SYNOPSIS
========
@@ -16,17 +20,19 @@ SYNOPSIS
**bpftool** **version**
- *OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** }
+ *OBJECT* := { **map** | **program** | **link** | **cgroup** | **perf** | **net** | **feature** |
+ **btf** | **gen** | **struct_ops** | **iter** }
- *OPTIONS* := { { **-V** | **--version** } | { **-h** | **--help** }
- | { **-j** | **--json** } [{ **-p** | **--pretty** }] }
+ *OPTIONS* := { { **-V** | **--version** } | |COMMON_OPTIONS| }
*MAP-COMMANDS* :=
- { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext**
- | **delete** | **pin** | **event_pipe** | **help** }
+ { **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
+ **delete** | **pin** | **event_pipe** | **help** }
- *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin**
- | **load** | **attach** | **detach** | **help** }
+ *PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** |
+ **load** | **attach** | **detach** | **help** }
+
+ *LINK-COMMANDS* := { **show** | **list** | **pin** | **detach** | **help** }
*CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }
@@ -36,6 +42,14 @@ SYNOPSIS
*FEATURE-COMMANDS* := { **probe** | **help** }
+ *BTF-COMMANDS* := { **show** | **list** | **dump** | **help** }
+
+ *GEN-COMMANDS* := { **object** | **skeleton** | **min_core_btf** | **help** }
+
+ *STRUCT-OPS-COMMANDS* := { **show** | **list** | **dump** | **register** | **unregister** | **help** }
+
+ *ITER-COMMANDS* := { **pin** | **help** }
+
DESCRIPTION
===========
*bpftool* allows for inspection and simple modification of BPF objects
@@ -46,18 +60,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-m, --mapcompat
Allow loading maps with unknown map definitions.
@@ -65,21 +68,3 @@ OPTIONS
-n, --nomount
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
-
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool-prog**\ (8),
- **bpftool-map**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-gen**\ (8),
diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst
new file mode 100644
index 000000000000..30df7a707f02
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/common_options.rst
@@ -0,0 +1,25 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+-h, --help
+ Print short help message (similar to **bpftool help**).
+
+-V, --version
+ Print bpftool's version number (similar to **bpftool version**), the
+ number of the libbpf version in use, and optional features that were
+ included when bpftool was compiled. Optional features include linking
+ against LLVM or libbfd to provide the disassembler for JIT-ted
+ programs (**bpftool prog dump jited**) and usage of BPF skeletons
+ (some features like **bpftool prog profile** or showing pids
+ associated to BPF objects may rely on it).
+
+-j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+-p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+-d, --debug
+ Print all logs available, even debug-level information. This includes
+ logs from libbpf as well as from the verifier, when attempting to
+ load programs.
diff --git a/tools/bpf/bpftool/Documentation/substitutions.rst b/tools/bpf/bpftool/Documentation/substitutions.rst
new file mode 100644
index 000000000000..827e3ffb1766
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/substitutions.rst
@@ -0,0 +1,3 @@
+.. SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+
+.. |COMMON_OPTIONS| replace:: { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** }
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index f584d1fdfc64..681fbcc5ed50 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -1,6 +1,5 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
include ../../scripts/Makefile.include
-include ../../scripts/utilities.mak
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -14,40 +13,75 @@ else
Q = @
endif
-BPF_DIR = $(srctree)/tools/lib/bpf/
+BPF_DIR = $(srctree)/tools/lib/bpf
ifneq ($(OUTPUT),)
- LIBBPF_OUTPUT = $(OUTPUT)/libbpf/
- LIBBPF_PATH = $(LIBBPF_OUTPUT)
+ _OUTPUT := $(OUTPUT)
else
- LIBBPF_PATH = $(BPF_DIR)
+ _OUTPUT := $(CURDIR)/
endif
-
-LIBBPF = $(LIBBPF_PATH)libbpf.a
-
-BPFTOOL_VERSION := $(shell make -rR --no-print-directory -sC ../../.. kernelversion)
-
-$(LIBBPF): FORCE
- $(if $(LIBBPF_OUTPUT),@mkdir -p $(LIBBPF_OUTPUT))
- $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) $(LIBBPF_OUTPUT)libbpf.a
-
-$(LIBBPF)-clean:
+BOOTSTRAP_OUTPUT := $(_OUTPUT)bootstrap/
+
+LIBBPF_OUTPUT := $(_OUTPUT)libbpf/
+LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
+LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include
+LIBBPF_HDRS_DIR := $(LIBBPF_INCLUDE)/bpf
+LIBBPF := $(LIBBPF_OUTPUT)libbpf.a
+
+LIBBPF_BOOTSTRAP_OUTPUT := $(BOOTSTRAP_OUTPUT)libbpf/
+LIBBPF_BOOTSTRAP_DESTDIR := $(LIBBPF_BOOTSTRAP_OUTPUT)
+LIBBPF_BOOTSTRAP_INCLUDE := $(LIBBPF_BOOTSTRAP_DESTDIR)include
+LIBBPF_BOOTSTRAP_HDRS_DIR := $(LIBBPF_BOOTSTRAP_INCLUDE)/bpf
+LIBBPF_BOOTSTRAP := $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a
+
+# We need to copy hashmap.h, nlattr.h, relo_core.h and libbpf_internal.h
+# which are not otherwise exported by libbpf, but still required by bpftool.
+LIBBPF_INTERNAL_HDRS := $(addprefix $(LIBBPF_HDRS_DIR)/,hashmap.h nlattr.h relo_core.h libbpf_internal.h)
+LIBBPF_BOOTSTRAP_INTERNAL_HDRS := $(addprefix $(LIBBPF_BOOTSTRAP_HDRS_DIR)/,hashmap.h relo_core.h libbpf_internal.h)
+
+$(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT) $(LIBBPF_HDRS_DIR) $(LIBBPF_BOOTSTRAP_HDRS_DIR):
+ $(QUIET_MKDIR)mkdir -p $@
+
+$(LIBBPF): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_OUTPUT)
+ $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) \
+ DESTDIR=$(LIBBPF_DESTDIR:/=) prefix= $(LIBBPF) install_headers
+
+$(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_DIR)
+ $(call QUIET_INSTALL, $@)
+ $(Q)install -m 644 -t $(LIBBPF_HDRS_DIR) $<
+
+$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
+ $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
+ DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR:/=) prefix= \
+ ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" $@ install_headers
+
+$(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR)
+ $(call QUIET_INSTALL, $@)
+ $(Q)install -m 644 -t $(LIBBPF_BOOTSTRAP_HDRS_DIR) $<
+
+$(LIBBPF)-clean: FORCE | $(LIBBPF_OUTPUT)
$(call QUIET_CLEAN, libbpf)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) clean >/dev/null
+$(LIBBPF_BOOTSTRAP)-clean: FORCE | $(LIBBPF_BOOTSTRAP_OUTPUT)
+ $(call QUIET_CLEAN, libbpf-bootstrap)
+ $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) clean >/dev/null
+
prefix ?= /usr/local
bash_compdir ?= /usr/share/bash-completion/completions
CFLAGS += -O2
CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers
-CFLAGS += $(filter-out -Wswitch-enum,$(EXTRA_WARNINGS))
+CFLAGS += $(filter-out -Wswitch-enum -Wnested-externs,$(EXTRA_WARNINGS))
CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \
+ -I$(or $(OUTPUT),.) \
+ -I$(LIBBPF_INCLUDE) \
-I$(srctree)/kernel/bpf/ \
-I$(srctree)/tools/include \
- -I$(srctree)/tools/include/uapi \
- -I$(srctree)/tools/lib \
- -I$(srctree)/tools/perf
+ -I$(srctree)/tools/include/uapi
+ifneq ($(BPFTOOL_VERSION),)
CFLAGS += -DBPFTOOL_VERSION='"$(BPFTOOL_VERSION)"'
+endif
ifneq ($(EXTRA_CFLAGS),)
CFLAGS += $(EXTRA_CFLAGS)
endif
@@ -55,16 +89,26 @@ ifneq ($(EXTRA_LDFLAGS),)
LDFLAGS += $(EXTRA_LDFLAGS)
endif
-LIBS = $(LIBBPF) -lelf -lz
-
INSTALL ?= install
RM ?= rm -f
-CLANG ?= clang
FEATURE_USER = .bpftool
-FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib \
- clang-bpf-global-var
-FEATURE_DISPLAY = libbfd disassembler-four-args zlib clang-bpf-global-var
+
+FEATURE_TESTS := clang-bpf-co-re
+FEATURE_TESTS += llvm
+FEATURE_TESTS += libcap
+FEATURE_TESTS += libbfd
+FEATURE_TESTS += libbfd-liberty
+FEATURE_TESTS += libbfd-liberty-z
+FEATURE_TESTS += disassembler-four-args
+FEATURE_TESTS += disassembler-init-styled
+
+FEATURE_DISPLAY := clang-bpf-co-re
+FEATURE_DISPLAY += llvm
+FEATURE_DISPLAY += libcap
+FEATURE_DISPLAY += libbfd
+FEATURE_DISPLAY += libbfd-liberty
+FEATURE_DISPLAY += libbfd-liberty-z
check_feat := 1
NON_CHECK_FEAT_TARGETS := clean uninstall doc doc-clean doc-install doc-uninstall
@@ -82,86 +126,148 @@ include $(FEATURES_DUMP)
endif
endif
-ifeq ($(feature-disassembler-four-args), 1)
-CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
-endif
-
-ifeq ($(feature-reallocarray), 0)
-CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+LIBS = $(LIBBPF) -lelf -lz
+LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lz
+ifeq ($(feature-libcap), 1)
+CFLAGS += -DUSE_LIBCAP
+LIBS += -lcap
endif
include $(wildcard $(OUTPUT)*.d)
all: $(OUTPUT)bpftool
-BFD_SRCS = jit_disasm.c
+SRCS := $(wildcard *.c)
+
+ifeq ($(feature-llvm),1)
+ # If LLVM is available, use it for JIT disassembly
+ CFLAGS += -DHAVE_LLVM_SUPPORT
+ LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets
+ CFLAGS += $(shell $(LLVM_CONFIG) --cflags --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ LIBS += $(shell $(LLVM_CONFIG) --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ ifeq ($(shell $(LLVM_CONFIG) --shared-mode),static)
+ LIBS += $(shell $(LLVM_CONFIG) --system-libs $(LLVM_CONFIG_LIB_COMPONENTS))
+ LIBS += -lstdc++
+ endif
+ LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags)
+else
+ # Fall back on libbfd
+ ifeq ($(feature-libbfd),1)
+ LIBS += -lbfd -ldl -lopcodes
+ else ifeq ($(feature-libbfd-liberty),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty
+ else ifeq ($(feature-libbfd-liberty-z),1)
+ LIBS += -lbfd -ldl -lopcodes -liberty -lz
+ endif
+
+ # If one of the above feature combinations is set, we support libbfd
+ ifneq ($(filter -lbfd,$(LIBS)),)
+ CFLAGS += -DHAVE_LIBBFD_SUPPORT
+
+ # Libbfd interface changed over time, figure out what we need
+ ifeq ($(feature-disassembler-four-args), 1)
+ CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
+ endif
+ ifeq ($(feature-disassembler-init-styled), 1)
+ CFLAGS += -DDISASM_INIT_STYLED
+ endif
+ endif
+endif
+ifeq ($(filter -DHAVE_LLVM_SUPPORT -DHAVE_LIBBFD_SUPPORT,$(CFLAGS)),)
+ # No support for JIT disassembly
+ SRCS := $(filter-out jit_disasm.c,$(SRCS))
+endif
-SRCS = $(filter-out $(BFD_SRCS),$(wildcard *.c))
+HOST_CFLAGS = $(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),\
+ $(subst $(CLANG_CROSS_FLAGS),,$(CFLAGS)))
-ifeq ($(feature-libbfd),1)
- LIBS += -lbfd -ldl -lopcodes
-else ifeq ($(feature-libbfd-liberty),1)
- LIBS += -lbfd -ldl -lopcodes -liberty
-else ifeq ($(feature-libbfd-liberty-z),1)
- LIBS += -lbfd -ldl -lopcodes -liberty -lz
-endif
+BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
-ifneq ($(filter -lbfd,$(LIBS)),)
-CFLAGS += -DHAVE_LIBBFD_SUPPORT
-SRCS += $(BFD_SRCS)
-endif
+BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o)
+$(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP)
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
-_OBJS = $(filter-out $(OUTPUT)prog.o,$(OBJS)) $(OUTPUT)_prog.o
+$(OBJS): $(LIBBPF) $(LIBBPF_INTERNAL_HDRS)
-ifeq ($(feature-clang-bpf-global-var),1)
- __OBJS = $(OBJS)
-else
- __OBJS = $(_OBJS)
-endif
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+
+bootstrap: $(BPFTOOL_BOOTSTRAP)
-$(OUTPUT)_prog.o: prog.c
- $(QUIET_CC)$(COMPILE.c) -MMD -DBPFTOOL_WITHOUT_SKELETONS -o $@ $<
+ifneq ($(VMLINUX_BTF)$(VMLINUX_H),)
+ifeq ($(feature-clang-bpf-co-re),1)
-$(OUTPUT)_bpftool: $(_OBJS) $(LIBBPF)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(_OBJS) $(LIBS)
+BUILD_BPF_SKELS := 1
-skeleton/profiler.bpf.o: skeleton/profiler.bpf.c $(LIBBPF)
+$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP)
+ifeq ($(VMLINUX_H),)
+ $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) btf dump file $< format c > $@
+else
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
$(QUIET_CLANG)$(CLANG) \
+ -I$(or $(OUTPUT),.) \
-I$(srctree)/tools/include/uapi/ \
- -I$(LIBBPF_PATH) -I$(srctree)/tools/lib \
- -g -O2 -target bpf -c $< -o $@
+ -I$(LIBBPF_BOOTSTRAP_INCLUDE) \
+ -g -O2 -Wall -fno-stack-protector \
+ -target bpf -c $< -o $@
+ $(Q)$(LLVM_STRIP) -g $@
+
+$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
+ $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@
-profiler.skel.h: $(OUTPUT)_bpftool skeleton/profiler.bpf.o
- $(QUIET_GEN)$(OUTPUT)./_bpftool gen skeleton skeleton/profiler.bpf.o > $@
+$(OUTPUT)prog.o: $(OUTPUT)profiler.skel.h
-$(OUTPUT)prog.o: prog.c profiler.skel.h
- $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
+$(OUTPUT)pids.o: $(OUTPUT)pid_iter.skel.h
+
+endif
+endif
+
+CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS)
+
+$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
+ $(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
- $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
+
+$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
+ $(QUIET_LINK)$(HOSTCC) $(HOST_CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
-$(OUTPUT)feature.o: | zdep
+$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@
-$(OUTPUT)bpftool: $(__OBJS) $(LIBBPF)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(__OBJS) $(LIBS)
+$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_BOOTSTRAP_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT)
+ $(QUIET_CC)$(HOSTCC) $(HOST_CFLAGS) -c -MMD $< -o $@
$(OUTPUT)%.o: %.c
- $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $<
+ $(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
-clean: $(LIBBPF)-clean
+feature-detect-clean:
+ $(call QUIET_CLEAN, feature-detect)
+ $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
+
+clean: $(LIBBPF)-clean $(LIBBPF_BOOTSTRAP)-clean feature-detect-clean
$(call QUIET_CLEAN, bpftool)
$(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
- $(Q)$(RM) -- $(OUTPUT)_bpftool profiler.skel.h skeleton/profiler.bpf.o
- $(Q)$(RM) -r -- $(OUTPUT)libbpf/
+ $(Q)$(RM) -- $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
+ $(Q)$(RM) -r -- $(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT)
$(call QUIET_CLEAN, core-gen)
$(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool
$(Q)$(RM) -r -- $(OUTPUT)feature/
-install: $(OUTPUT)bpftool
+install-bin: $(OUTPUT)bpftool
$(call QUIET_INSTALL, bpftool)
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(prefix)/sbin
$(Q)$(INSTALL) $(OUTPUT)bpftool $(DESTDIR)$(prefix)/sbin/bpftool
+
+install: install-bin
$(Q)$(INSTALL) -m 0755 -d $(DESTDIR)$(bash_compdir)
$(Q)$(INSTALL) -m 0644 bash-completion/bpftool $(DESTDIR)$(bash_compdir)
@@ -184,9 +290,10 @@ doc-uninstall:
FORCE:
-zdep:
- @if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi
-
-.PHONY: all FORCE clean install uninstall zdep
+.SECONDARY:
+.PHONY: all FORCE bootstrap clean install-bin install uninstall
.PHONY: doc doc-clean doc-install doc-uninstall
.DEFAULT_GOAL := all
+
+# Delete partially updated (corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index 45ee99b159e2..e7234d1a5306 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -98,6 +98,12 @@ _bpftool_get_btf_ids()
command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
}
+_bpftool_get_link_ids()
+{
+ COMPREPLY+=( $( compgen -W "$( bpftool -jp link 2>&1 | \
+ command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) )
+}
+
_bpftool_get_obj_map_names()
{
local obj
@@ -249,19 +255,23 @@ _bpftool_map_update_get_name()
_bpftool()
{
- local cur prev words objword
+ local cur prev words objword json=0
_init_completion || return
# Deal with options
if [[ ${words[cword]} == -* ]]; then
- local c='--version --json --pretty --bpffs --mapcompat --debug'
+ local c='--version --json --pretty --bpffs --mapcompat --debug \
+ --use-loader --base-btf'
COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
return 0
fi
+ if _bpftool_search_list -j --json -p --pretty; then
+ json=1
+ fi
# Deal with simplest keywords
case $prev in
- help|hex|opcodes|visual|linum)
+ help|hex)
return 0
;;
tag)
@@ -272,7 +282,7 @@ _bpftool()
_sysfs_get_netdevs
return 0
;;
- file|pinned)
+ file|pinned|-B|--base-btf)
_filedir
return 0
;;
@@ -285,7 +295,8 @@ _bpftool()
# Remove all options so completions don't have to deal with them.
local i
for (( i=1; i < ${#words[@]}; )); do
- if [[ ${words[i]::1} == - ]]; then
+ if [[ ${words[i]::1} == - ]] &&
+ [[ ${words[i]} != "-B" ]] && [[ ${words[i]} != "--base-btf" ]]; then
words=( "${words[@]:0:i}" "${words[@]:i+1}" )
[[ $i -le $cword ]] && cword=$(( cword - 1 ))
else
@@ -337,7 +348,8 @@ _bpftool()
local PROG_TYPE='id pinned tag name'
local MAP_TYPE='id pinned name'
- local METRIC_TYPE='cycles instructions l1d_loads llc_misses'
+ local METRIC_TYPE='cycles instructions l1d_loads llc_misses \
+ itlb_misses dtlb_misses'
case $command in
show|list)
[[ $prev != "$command" ]] && return 0
@@ -357,13 +369,16 @@ _bpftool()
return 0
;;
*)
- _bpftool_once_attr 'file'
- if _bpftool_search_list 'xlated'; then
- COMPREPLY+=( $( compgen -W 'opcodes visual linum' -- \
- "$cur" ) )
- else
- COMPREPLY+=( $( compgen -W 'opcodes linum' -- \
- "$cur" ) )
+ # "file" is not compatible with other keywords here
+ if _bpftool_search_list 'file'; then
+ return 0
+ fi
+ if ! _bpftool_search_list 'linum opcodes visual'; then
+ _bpftool_once_attr 'file'
+ fi
+ _bpftool_once_attr 'linum opcodes'
+ if _bpftool_search_list 'xlated' && [[ "$json" == 0 ]]; then
+ _bpftool_once_attr 'visual'
fi
return 0
;;
@@ -398,8 +413,10 @@ _bpftool()
return 0
;;
5)
- COMPREPLY=( $( compgen -W 'msg_verdict stream_verdict \
- stream_parser flow_dissector' -- "$cur" ) )
+ local BPFTOOL_PROG_ATTACH_TYPES='sk_msg_verdict \
+ sk_skb_verdict sk_skb_stream_verdict sk_skb_stream_parser \
+ flow_dissector'
+ COMPREPLY=( $( compgen -W "$BPFTOOL_PROG_ATTACH_TYPES" -- "$cur" ) )
return 0
;;
6)
@@ -458,7 +475,7 @@ _bpftool()
case $prev in
type)
- COMPREPLY=( $( compgen -W "socket kprobe \
+ local BPFTOOL_PROG_LOAD_TYPES='socket kprobe \
kretprobe classifier flow_dissector \
action tracepoint raw_tracepoint \
xdp perf_event cgroup/skb cgroup/sock \
@@ -466,13 +483,15 @@ _bpftool()
lwt_seg6local sockops sk_skb sk_msg \
lirc_mode2 cgroup/bind4 cgroup/bind6 \
cgroup/connect4 cgroup/connect6 \
+ cgroup/getpeername4 cgroup/getpeername6 \
+ cgroup/getsockname4 cgroup/getsockname6 \
cgroup/sendmsg4 cgroup/sendmsg6 \
cgroup/recvmsg4 cgroup/recvmsg6 \
cgroup/post_bind4 cgroup/post_bind6 \
cgroup/sysctl cgroup/getsockopt \
- cgroup/setsockopt struct_ops \
- fentry fexit freplace" -- \
- "$cur" ) )
+ cgroup/setsockopt cgroup/sock_release struct_ops \
+ fentry fexit freplace sk_lookup'
+ COMPREPLY=( $( compgen -W "$BPFTOOL_PROG_LOAD_TYPES" -- "$cur" ) )
return 0
;;
id)
@@ -489,9 +508,7 @@ _bpftool()
;;
*)
COMPREPLY=( $( compgen -W "map" -- "$cur" ) )
- _bpftool_once_attr 'type'
- _bpftool_once_attr 'dev'
- _bpftool_once_attr 'pinmaps'
+ _bpftool_once_attr 'type dev pinmaps autoattach'
return 0
;;
esac
@@ -604,6 +621,35 @@ _bpftool()
;;
esac
;;
+ iter)
+ case $command in
+ pin)
+ case $prev in
+ $command)
+ _filedir
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ _bpftool_get_map_names
+ ;;
+ pinned)
+ _filedir
+ ;;
+ *)
+ _bpftool_one_of_list $MAP_TYPE
+ ;;
+ esac
+ return 0
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'pin help' \
+ -- "$cur" ) )
+ ;;
+ esac
+ ;;
map)
local MAP_TYPE='id pinned name'
case $command in
@@ -661,27 +707,36 @@ _bpftool()
return 0
;;
type)
- COMPREPLY=( $( compgen -W 'hash array prog_array \
- perf_event_array percpu_hash percpu_array \
- stack_trace cgroup_array lru_hash \
- lru_percpu_hash lpm_trie array_of_maps \
- hash_of_maps devmap devmap_hash sockmap cpumap \
- xskmap sockhash cgroup_storage reuseport_sockarray \
- percpu_cgroup_storage queue stack' -- \
- "$cur" ) )
+ local BPFTOOL_MAP_CREATE_TYPES="$(bpftool feature list_builtins map_types 2>/dev/null | \
+ grep -v '^unspec$')"
+ COMPREPLY=( $( compgen -W "$BPFTOOL_MAP_CREATE_TYPES" -- "$cur" ) )
return 0
;;
- key|value|flags|name|entries)
+ key|value|flags|entries)
return 0
;;
+ inner_map)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ case $pprev in
+ inner_map)
+ _bpftool_get_map_names
+ ;;
+ *)
+ return 0
+ ;;
+ esac
+ ;;
*)
- _bpftool_once_attr 'type'
- _bpftool_once_attr 'key'
- _bpftool_once_attr 'value'
- _bpftool_once_attr 'entries'
- _bpftool_once_attr 'name'
- _bpftool_once_attr 'flags'
- _bpftool_once_attr 'dev'
+ _bpftool_once_attr 'type key value entries name flags dev'
+ if _bpftool_search_list 'array_of_maps' 'hash_of_maps'; then
+ _bpftool_once_attr 'inner_map'
+ fi
return 0
;;
esac
@@ -822,8 +877,7 @@ _bpftool()
return 0
;;
*)
- _bpftool_once_attr 'cpu'
- _bpftool_once_attr 'index'
+ _bpftool_once_attr 'cpu index'
return 0
;;
esac
@@ -923,12 +977,41 @@ _bpftool()
;;
gen)
case $command in
+ object)
+ _filedir
+ return 0
+ ;;
skeleton)
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ *)
+ _bpftool_once_attr 'name'
+ return 0
+ ;;
+ esac
+ ;;
+ subskeleton)
+ case $prev in
+ $command)
+ _filedir
+ return 0
+ ;;
+ *)
+ _bpftool_once_attr 'name'
+ return 0
+ ;;
+ esac
+ ;;
+ min_core_btf)
_filedir
+ return 0
;;
*)
[[ $prev == $object ]] && \
- COMPREPLY=( $( compgen -W 'skeleton help' -- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'object skeleton subskeleton help min_core_btf' -- "$cur" ) )
;;
esac
;;
@@ -946,33 +1029,33 @@ _bpftool()
return 0
;;
attach|detach)
- local ATTACH_TYPES='ingress egress sock_create sock_ops \
- device bind4 bind6 post_bind4 post_bind6 connect4 \
- connect6 sendmsg4 sendmsg6 recvmsg4 recvmsg6 sysctl \
- getsockopt setsockopt'
+ local BPFTOOL_CGROUP_ATTACH_TYPES="$(bpftool feature list_builtins attach_types 2>/dev/null | \
+ grep '^cgroup_')"
local ATTACH_FLAGS='multi override'
local PROG_TYPE='id pinned tag name'
- case $prev in
- $command)
- _filedir
- return 0
- ;;
- ingress|egress|sock_create|sock_ops|device|bind4|bind6|\
- post_bind4|post_bind6|connect4|connect6|sendmsg4|\
- sendmsg6|recvmsg4|recvmsg6|sysctl|getsockopt|\
- setsockopt)
+ # Check for $prev = $command first
+ if [ $prev = $command ]; then
+ _filedir
+ return 0
+ # Then check for attach type. This is done outside of the
+ # "case $prev in" to avoid writing the whole list of attach
+ # types again as pattern to match (where we cannot reuse
+ # our variable).
+ elif [[ $BPFTOOL_CGROUP_ATTACH_TYPES =~ $prev ]]; then
COMPREPLY=( $( compgen -W "$PROG_TYPE" -- \
"$cur" ) )
return 0
- ;;
+ fi
+ # case/esac for the other cases
+ case $prev in
id)
_bpftool_get_prog_ids
return 0
;;
*)
- if ! _bpftool_search_list "$ATTACH_TYPES"; then
- COMPREPLY=( $( compgen -W "$ATTACH_TYPES" -- \
- "$cur" ) )
+ if ! _bpftool_search_list "$BPFTOOL_CGROUP_ATTACH_TYPES"; then
+ COMPREPLY=( $( compgen -W \
+ "$BPFTOOL_CGROUP_ATTACH_TYPES" -- "$cur" ) )
elif [[ "$command" == "attach" ]]; then
# We have an attach type on the command line,
# but it is not the previous word, or
@@ -1073,12 +1156,50 @@ _bpftool()
COMPREPLY+=( $( compgen -W 'macros' -- "$cur" ) )
fi
_bpftool_one_of_list 'kernel dev'
- _bpftool_once_attr 'full'
+ _bpftool_once_attr 'full unprivileged'
+ return 0
+ ;;
+ list_builtins)
+ [[ $prev != "$command" ]] && return 0
+ COMPREPLY=( $( compgen -W 'prog_types map_types \
+ attach_types link_types helpers' -- "$cur" ) )
+ ;;
+ *)
+ [[ $prev == $object ]] && \
+ COMPREPLY=( $( compgen -W 'help list_builtins probe' -- "$cur" ) )
+ ;;
+ esac
+ ;;
+ link)
+ case $command in
+ show|list|pin|detach)
+ case $prev in
+ id)
+ _bpftool_get_link_ids
+ return 0
+ ;;
+ esac
+ ;;
+ esac
+
+ local LINK_TYPE='id pinned'
+ case $command in
+ show|list)
+ [[ $prev != "$command" ]] && return 0
+ COMPREPLY=( $( compgen -W "$LINK_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ pin|detach)
+ if [[ $prev == "$command" ]]; then
+ COMPREPLY=( $( compgen -W "$LINK_TYPE" -- "$cur" ) )
+ else
+ _filedir
+ fi
return 0
;;
*)
[[ $prev == $object ]] && \
- COMPREPLY=( $( compgen -W 'help probe' -- "$cur" ) )
+ COMPREPLY=( $( compgen -W 'help pin show list' -- "$cur" ) )
;;
esac
;;
diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
index bcaf55b59498..91fcb75babe3 100644
--- a/tools/bpf/bpftool/btf.c
+++ b/tools/bpf/bpftool/btf.c
@@ -8,14 +8,14 @@
#include <stdio.h>
#include <string.h>
#include <unistd.h>
-#include <bpf/bpf.h>
-#include <bpf/btf.h>
-#include <bpf/libbpf.h>
#include <linux/btf.h>
-#include <linux/hashtable.h>
#include <sys/types.h>
#include <sys/stat.h>
-#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/hashmap.h>
+#include <bpf/libbpf.h>
#include "json_writer.h"
#include "main.h"
@@ -37,16 +37,10 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_FUNC_PROTO] = "FUNC_PROTO",
[BTF_KIND_VAR] = "VAR",
[BTF_KIND_DATASEC] = "DATASEC",
-};
-
-struct btf_attach_table {
- DECLARE_HASHTABLE(table, 16);
-};
-
-struct btf_attach_point {
- __u32 obj_id;
- __u32 btf_id;
- struct hlist_node hash;
+ [BTF_KIND_FLOAT] = "FLOAT",
+ [BTF_KIND_DECL_TAG] = "DECL_TAG",
+ [BTF_KIND_TYPE_TAG] = "TYPE_TAG",
+ [BTF_KIND_ENUM64] = "ENUM64",
};
static const char *btf_int_enc_str(__u8 encoding)
@@ -71,7 +65,9 @@ static const char *btf_var_linkage_str(__u32 linkage)
case BTF_VAR_STATIC:
return "static";
case BTF_VAR_GLOBAL_ALLOCATED:
- return "global-alloc";
+ return "global";
+ case BTF_VAR_GLOBAL_EXTERN:
+ return "extern";
default:
return "(unknown)";
}
@@ -98,26 +94,28 @@ static const char *btf_str(const struct btf *btf, __u32 off)
return btf__name_by_offset(btf, off) ? : "(invalid)";
}
+static int btf_kind_safe(int kind)
+{
+ return kind <= BTF_KIND_MAX ? kind : BTF_KIND_UNKN;
+}
+
static int dump_btf_type(const struct btf *btf, __u32 id,
const struct btf_type *t)
{
json_writer_t *w = json_wtr;
- int kind, safe_kind;
-
- kind = BTF_INFO_KIND(t->info);
- safe_kind = kind <= BTF_KIND_MAX ? kind : BTF_KIND_UNKN;
+ int kind = btf_kind(t);
if (json_output) {
jsonw_start_object(w);
jsonw_uint_field(w, "id", id);
- jsonw_string_field(w, "kind", btf_kind_str[safe_kind]);
+ jsonw_string_field(w, "kind", btf_kind_str[btf_kind_safe(kind)]);
jsonw_string_field(w, "name", btf_str(btf, t->name_off));
} else {
- printf("[%u] %s '%s'", id, btf_kind_str[safe_kind],
+ printf("[%u] %s '%s'", id, btf_kind_str[btf_kind_safe(kind)],
btf_str(btf, t->name_off));
}
- switch (BTF_INFO_KIND(t->info)) {
+ switch (kind) {
case BTF_KIND_INT: {
__u32 v = *(__u32 *)(t + 1);
const char *enc;
@@ -141,6 +139,7 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPEDEF:
+ case BTF_KIND_TYPE_TAG:
if (json_output)
jsonw_uint_field(w, "type_id", t->type);
else
@@ -209,26 +208,76 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
case BTF_KIND_ENUM: {
const struct btf_enum *v = (const void *)(t + 1);
__u16 vlen = BTF_INFO_VLEN(t->info);
+ const char *encoding;
int i;
+ encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
if (json_output) {
+ jsonw_string_field(w, "encoding", encoding);
jsonw_uint_field(w, "size", t->size);
jsonw_uint_field(w, "vlen", vlen);
jsonw_name(w, "values");
jsonw_start_array(w);
} else {
- printf(" size=%u vlen=%u", t->size, vlen);
+ printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
+ }
+ for (i = 0; i < vlen; i++, v++) {
+ const char *name = btf_str(btf, v->name_off);
+
+ if (json_output) {
+ jsonw_start_object(w);
+ jsonw_string_field(w, "name", name);
+ if (btf_kflag(t))
+ jsonw_int_field(w, "val", v->val);
+ else
+ jsonw_uint_field(w, "val", v->val);
+ jsonw_end_object(w);
+ } else {
+ if (btf_kflag(t))
+ printf("\n\t'%s' val=%d", name, v->val);
+ else
+ printf("\n\t'%s' val=%u", name, v->val);
+ }
+ }
+ if (json_output)
+ jsonw_end_array(w);
+ break;
+ }
+ case BTF_KIND_ENUM64: {
+ const struct btf_enum64 *v = btf_enum64(t);
+ __u16 vlen = btf_vlen(t);
+ const char *encoding;
+ int i;
+
+ encoding = btf_kflag(t) ? "SIGNED" : "UNSIGNED";
+ if (json_output) {
+ jsonw_string_field(w, "encoding", encoding);
+ jsonw_uint_field(w, "size", t->size);
+ jsonw_uint_field(w, "vlen", vlen);
+ jsonw_name(w, "values");
+ jsonw_start_array(w);
+ } else {
+ printf(" encoding=%s size=%u vlen=%u", encoding, t->size, vlen);
}
for (i = 0; i < vlen; i++, v++) {
const char *name = btf_str(btf, v->name_off);
+ __u64 val = ((__u64)v->val_hi32 << 32) | v->val_lo32;
if (json_output) {
jsonw_start_object(w);
jsonw_string_field(w, "name", name);
- jsonw_uint_field(w, "val", v->val);
+ if (btf_kflag(t))
+ jsonw_int_field(w, "val", val);
+ else
+ jsonw_uint_field(w, "val", val);
jsonw_end_object(w);
} else {
- printf("\n\t'%s' val=%u", name, v->val);
+ if (btf_kflag(t))
+ printf("\n\t'%s' val=%lldLL", name,
+ (unsigned long long)val);
+ else
+ printf("\n\t'%s' val=%lluULL", name,
+ (unsigned long long)val);
}
}
if (json_output)
@@ -300,7 +349,8 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
break;
}
case BTF_KIND_DATASEC: {
- const struct btf_var_secinfo *v = (const void *)(t+1);
+ const struct btf_var_secinfo *v = (const void *)(t + 1);
+ const struct btf_type *vt;
__u16 vlen = BTF_INFO_VLEN(t->info);
int i;
@@ -322,12 +372,37 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
} else {
printf("\n\ttype_id=%u offset=%u size=%u",
v->type, v->offset, v->size);
+
+ if (v->type < btf__type_cnt(btf)) {
+ vt = btf__type_by_id(btf, v->type);
+ printf(" (%s '%s')",
+ btf_kind_str[btf_kind_safe(btf_kind(vt))],
+ btf_str(btf, vt->name_off));
+ }
}
}
if (json_output)
jsonw_end_array(w);
break;
}
+ case BTF_KIND_FLOAT: {
+ if (json_output)
+ jsonw_uint_field(w, "size", t->size);
+ else
+ printf(" size=%u", t->size);
+ break;
+ }
+ case BTF_KIND_DECL_TAG: {
+ const struct btf_decl_tag *tag = (const void *)(t + 1);
+
+ if (json_output) {
+ jsonw_uint_field(w, "type_id", t->type);
+ jsonw_int_field(w, "component_idx", tag->component_idx);
+ } else {
+ printf(" type_id=%u component_idx=%d", t->type, tag->component_idx);
+ }
+ break;
+ }
default:
break;
}
@@ -358,9 +433,15 @@ static int dump_btf_raw(const struct btf *btf,
dump_btf_type(btf, root_type_ids[i], t);
}
} else {
- int cnt = btf__get_nr_types(btf);
+ const struct btf *base;
+ int cnt = btf__type_cnt(btf);
+ int start_id = 1;
- for (i = 1; i <= cnt; i++) {
+ base = btf__base_btf(btf);
+ if (base)
+ start_id = btf__type_cnt(base);
+
+ for (i = start_id; i < cnt; i++) {
t = btf__type_by_id(btf, i);
dump_btf_type(btf, i, t);
}
@@ -385,9 +466,9 @@ static int dump_btf_c(const struct btf *btf,
struct btf_dump *d;
int err = 0, i;
- d = btf_dump__new(btf, NULL, NULL, btf_dump_printf);
- if (IS_ERR(d))
- return PTR_ERR(d);
+ d = btf_dump__new(btf, btf_dump_printf, NULL, NULL);
+ if (!d)
+ return -errno;
printf("#ifndef __VMLINUX_H__\n");
printf("#define __VMLINUX_H__\n");
@@ -403,9 +484,9 @@ static int dump_btf_c(const struct btf *btf,
goto done;
}
} else {
- int cnt = btf__get_nr_types(btf);
+ int cnt = btf__type_cnt(btf);
- for (i = 1; i <= cnt; i++) {
+ for (i = 1; i < cnt; i++) {
err = btf_dump__dump_type(d, i);
if (err)
goto done;
@@ -423,71 +504,65 @@ done:
return err;
}
-static struct btf *btf__parse_raw(const char *file)
-{
- struct btf *btf;
- struct stat st;
- __u8 *buf;
- FILE *f;
-
- if (stat(file, &st))
- return NULL;
-
- f = fopen(file, "rb");
- if (!f)
- return NULL;
-
- buf = malloc(st.st_size);
- if (!buf) {
- btf = ERR_PTR(-ENOMEM);
- goto exit_close;
- }
+static const char sysfs_vmlinux[] = "/sys/kernel/btf/vmlinux";
- if ((size_t) st.st_size != fread(buf, 1, st.st_size, f)) {
- btf = ERR_PTR(-EINVAL);
- goto exit_free;
- }
+static struct btf *get_vmlinux_btf_from_sysfs(void)
+{
+ struct btf *base;
- btf = btf__new(buf, st.st_size);
+ base = btf__parse(sysfs_vmlinux, NULL);
+ if (!base)
+ p_err("failed to parse vmlinux BTF at '%s': %d\n",
+ sysfs_vmlinux, -errno);
-exit_free:
- free(buf);
-exit_close:
- fclose(f);
- return btf;
+ return base;
}
-static bool is_btf_raw(const char *file)
+#define BTF_NAME_BUFF_LEN 64
+
+static bool btf_is_kernel_module(__u32 btf_id)
{
- __u16 magic = 0;
- int fd, nb_read;
+ struct bpf_btf_info btf_info = {};
+ char btf_name[BTF_NAME_BUFF_LEN];
+ int btf_fd;
+ __u32 len;
+ int err;
- fd = open(file, O_RDONLY);
- if (fd < 0)
+ btf_fd = bpf_btf_get_fd_by_id(btf_id);
+ if (btf_fd < 0) {
+ p_err("can't get BTF object by id (%u): %s", btf_id, strerror(errno));
return false;
+ }
- nb_read = read(fd, &magic, sizeof(magic));
- close(fd);
- return nb_read == sizeof(magic) && magic == BTF_MAGIC;
+ len = sizeof(btf_info);
+ btf_info.name = ptr_to_u64(btf_name);
+ btf_info.name_len = sizeof(btf_name);
+ err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
+ close(btf_fd);
+ if (err) {
+ p_err("can't get BTF (ID %u) object info: %s", btf_id, strerror(errno));
+ return false;
+ }
+
+ return btf_info.kernel_btf && strncmp(btf_name, "vmlinux", sizeof(btf_name)) != 0;
}
static int do_dump(int argc, char **argv)
{
- struct btf *btf = NULL;
+ struct btf *btf = NULL, *base = NULL;
__u32 root_type_ids[2];
int root_type_cnt = 0;
bool dump_c = false;
__u32 btf_id = -1;
const char *src;
int fd = -1;
- int err;
+ int err = 0;
if (!REQ_ARGS(2)) {
usage();
return -1;
}
src = GET_ARG();
-
if (is_prefix(src, "map")) {
struct bpf_map_info info = {};
__u32 len = sizeof(info);
@@ -531,7 +606,7 @@ static int do_dump(int argc, char **argv)
if (fd < 0)
return -1;
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
goto done;
@@ -548,16 +623,18 @@ static int do_dump(int argc, char **argv)
}
NEXT_ARG();
} else if (is_prefix(src, "file")) {
- if (is_btf_raw(*argv))
- btf = btf__parse_raw(*argv);
- else
- btf = btf__parse_elf(*argv, NULL);
+ const char sysfs_prefix[] = "/sys/kernel/btf/";
+
+ if (!base_btf &&
+ strncmp(*argv, sysfs_prefix, sizeof(sysfs_prefix) - 1) == 0 &&
+ strcmp(*argv, sysfs_vmlinux) != 0)
+ base = get_vmlinux_btf_from_sysfs();
- if (IS_ERR(btf)) {
- err = PTR_ERR(btf);
- btf = NULL;
+ btf = btf__parse_split(*argv, base ?: base_btf);
+ if (!btf) {
+ err = -errno;
p_err("failed to load BTF from %s: %s",
- *argv, strerror(err));
+ *argv, strerror(errno));
goto done;
}
NEXT_ARG();
@@ -572,6 +649,7 @@ static int do_dump(int argc, char **argv)
NEXT_ARG();
if (argc < 1) {
p_err("expecting value for 'format' option\n");
+ err = -EINVAL;
goto done;
}
if (strcmp(*argv, "c") == 0) {
@@ -581,24 +659,28 @@ static int do_dump(int argc, char **argv)
} else {
p_err("unrecognized format specifier: '%s', possible values: raw, c",
*argv);
+ err = -EINVAL;
goto done;
}
NEXT_ARG();
} else {
p_err("unrecognized option: '%s'", *argv);
+ err = -EINVAL;
goto done;
}
}
if (!btf) {
- err = btf__get_from_id(btf_id, &btf);
- if (err) {
- p_err("get btf by id (%u): %s", btf_id, strerror(err));
- goto done;
+ if (!base_btf && btf_is_kernel_module(btf_id)) {
+ p_info("Warning: valid base BTF was not specified with -B option, falling back to standard base BTF (%s)",
+ sysfs_vmlinux);
+ base_btf = get_vmlinux_btf_from_sysfs();
}
+
+ btf = btf__load_from_kernel_by_id_split(btf_id, base_btf);
if (!btf) {
- err = ENOENT;
- p_err("can't find btf with ID (%u)", btf_id);
+ err = -errno;
+ p_err("get btf by id (%u): %s", btf_id, strerror(errno));
goto done;
}
}
@@ -617,6 +699,7 @@ static int do_dump(int argc, char **argv)
done:
close(fd);
btf__free(btf);
+ btf__free(base);
return err;
}
@@ -647,21 +730,8 @@ static int btf_parse_fd(int *argc, char ***argv)
return fd;
}
-static void delete_btf_table(struct btf_attach_table *tab)
-{
- struct btf_attach_point *obj;
- struct hlist_node *tmp;
-
- unsigned int bkt;
-
- hash_for_each_safe(tab->table, bkt, tmp, obj, hash) {
- hash_del(&obj->hash);
- free(obj);
- }
-}
-
static int
-build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
+build_btf_type_table(struct hashmap *tab, enum bpf_obj_type type,
void *info, __u32 *len)
{
static const char * const names[] = {
@@ -669,7 +739,6 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
[BPF_OBJ_PROG] = "prog",
[BPF_OBJ_MAP] = "map",
};
- struct btf_attach_point *obj_node;
__u32 btf_id, id = 0;
int err;
int fd;
@@ -720,7 +789,10 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
}
memset(info, 0, *len);
- err = bpf_obj_get_info_by_fd(fd, info, len);
+ if (type == BPF_OBJ_PROG)
+ err = bpf_prog_get_info_by_fd(fd, info, len);
+ else
+ err = bpf_map_get_info_by_fd(fd, info, len);
close(fd);
if (err) {
p_err("can't get %s info: %s", names[type],
@@ -743,27 +815,24 @@ build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type,
if (!btf_id)
continue;
- obj_node = calloc(1, sizeof(*obj_node));
- if (!obj_node) {
- p_err("failed to allocate memory: %s", strerror(errno));
+ err = hashmap__append(tab, btf_id, id);
+ if (err) {
+ p_err("failed to append entry to hashmap for BTF ID %u, object ID %u: %s",
+ btf_id, id, strerror(-err));
goto err_free;
}
-
- obj_node->obj_id = id;
- obj_node->btf_id = btf_id;
- hash_add(tab->table, &obj_node->hash, obj_node->btf_id);
}
return 0;
err_free:
- delete_btf_table(tab);
+ hashmap__free(tab);
return err;
}
static int
-build_btf_tables(struct btf_attach_table *btf_prog_table,
- struct btf_attach_table *btf_map_table)
+build_btf_tables(struct hashmap *btf_prog_table,
+ struct hashmap *btf_map_table)
{
struct bpf_prog_info prog_info;
__u32 prog_len = sizeof(prog_info);
@@ -779,7 +848,7 @@ build_btf_tables(struct btf_attach_table *btf_prog_table,
err = build_btf_type_table(btf_map_table, BPF_OBJ_MAP, &map_info,
&map_len);
if (err) {
- delete_btf_table(btf_prog_table);
+ hashmap__free(btf_prog_table);
return err;
}
@@ -788,38 +857,44 @@ build_btf_tables(struct btf_attach_table *btf_prog_table,
static void
show_btf_plain(struct bpf_btf_info *info, int fd,
- struct btf_attach_table *btf_prog_table,
- struct btf_attach_table *btf_map_table)
+ struct hashmap *btf_prog_table,
+ struct hashmap *btf_map_table)
{
- struct btf_attach_point *obj;
+ struct hashmap_entry *entry;
+ const char *name = u64_to_ptr(info->name);
int n;
printf("%u: ", info->id);
+ if (info->kernel_btf)
+ printf("name [%s] ", name);
+ else if (name && name[0])
+ printf("name %s ", name);
+ else
+ printf("name <anon> ");
printf("size %uB", info->btf_size);
n = 0;
- hash_for_each_possible(btf_prog_table->table, obj, hash, info->id) {
- if (obj->btf_id == info->id)
- printf("%s%u", n++ == 0 ? " prog_ids " : ",",
- obj->obj_id);
+ hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
+ printf("%s%lu", n++ == 0 ? " prog_ids " : ",", entry->value);
}
n = 0;
- hash_for_each_possible(btf_map_table->table, obj, hash, info->id) {
- if (obj->btf_id == info->id)
- printf("%s%u", n++ == 0 ? " map_ids " : ",",
- obj->obj_id);
+ hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
+ printf("%s%lu", n++ == 0 ? " map_ids " : ",", entry->value);
}
+ emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
+
printf("\n");
}
static void
show_btf_json(struct bpf_btf_info *info, int fd,
- struct btf_attach_table *btf_prog_table,
- struct btf_attach_table *btf_map_table)
+ struct hashmap *btf_prog_table,
+ struct hashmap *btf_map_table)
{
- struct btf_attach_point *obj;
+ struct hashmap_entry *entry;
+ const char *name = u64_to_ptr(info->name);
jsonw_start_object(json_wtr); /* btf object */
jsonw_uint_field(json_wtr, "id", info->id);
@@ -827,37 +902,56 @@ show_btf_json(struct bpf_btf_info *info, int fd,
jsonw_name(json_wtr, "prog_ids");
jsonw_start_array(json_wtr); /* prog_ids */
- hash_for_each_possible(btf_prog_table->table, obj, hash,
- info->id) {
- if (obj->btf_id == info->id)
- jsonw_uint(json_wtr, obj->obj_id);
+ hashmap__for_each_key_entry(btf_prog_table, entry, info->id) {
+ jsonw_uint(json_wtr, entry->value);
}
jsonw_end_array(json_wtr); /* prog_ids */
jsonw_name(json_wtr, "map_ids");
jsonw_start_array(json_wtr); /* map_ids */
- hash_for_each_possible(btf_map_table->table, obj, hash,
- info->id) {
- if (obj->btf_id == info->id)
- jsonw_uint(json_wtr, obj->obj_id);
+ hashmap__for_each_key_entry(btf_map_table, entry, info->id) {
+ jsonw_uint(json_wtr, entry->value);
}
jsonw_end_array(json_wtr); /* map_ids */
+
+ emit_obj_refs_json(refs_table, info->id, json_wtr); /* pids */
+
+ jsonw_bool_field(json_wtr, "kernel", info->kernel_btf);
+
+ if (name && name[0])
+ jsonw_string_field(json_wtr, "name", name);
+
jsonw_end_object(json_wtr); /* btf object */
}
static int
-show_btf(int fd, struct btf_attach_table *btf_prog_table,
- struct btf_attach_table *btf_map_table)
+show_btf(int fd, struct hashmap *btf_prog_table,
+ struct hashmap *btf_map_table)
{
- struct bpf_btf_info info = {};
+ struct bpf_btf_info info;
__u32 len = sizeof(info);
+ char name[64];
int err;
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ memset(&info, 0, sizeof(info));
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get BTF object info: %s", strerror(errno));
return -1;
}
+ /* if kernel support emitting BTF object name, pass name pointer */
+ if (info.name_len) {
+ memset(&info, 0, sizeof(info));
+ info.name_len = sizeof(name);
+ info.name = ptr_to_u64(name);
+ len = sizeof(info);
+
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get BTF object info: %s", strerror(errno));
+ return -1;
+ }
+ }
if (json_output)
show_btf_json(&info, fd, btf_prog_table, btf_map_table);
@@ -869,8 +963,8 @@ show_btf(int fd, struct btf_attach_table *btf_prog_table,
static int do_show(int argc, char **argv)
{
- struct btf_attach_table btf_prog_table;
- struct btf_attach_table btf_map_table;
+ struct hashmap *btf_prog_table;
+ struct hashmap *btf_map_table;
int err, fd = -1;
__u32 id = 0;
@@ -886,17 +980,28 @@ static int do_show(int argc, char **argv)
return BAD_ARG();
}
- hash_init(btf_prog_table.table);
- hash_init(btf_map_table.table);
- err = build_btf_tables(&btf_prog_table, &btf_map_table);
+ btf_prog_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ btf_map_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(btf_prog_table) || IS_ERR(btf_map_table)) {
+ hashmap__free(btf_prog_table);
+ hashmap__free(btf_map_table);
+ if (fd >= 0)
+ close(fd);
+ p_err("failed to create hashmap for object references");
+ return -1;
+ }
+ err = build_btf_tables(btf_prog_table, btf_map_table);
if (err) {
if (fd >= 0)
close(fd);
return err;
}
+ build_obj_refs_table(&refs_table, BPF_OBJ_BTF);
if (fd >= 0) {
- err = show_btf(fd, &btf_prog_table, &btf_map_table);
+ err = show_btf(fd, btf_prog_table, btf_map_table);
close(fd);
goto exit_free;
}
@@ -928,7 +1033,7 @@ static int do_show(int argc, char **argv)
break;
}
- err = show_btf(fd, &btf_prog_table, &btf_map_table);
+ err = show_btf(fd, btf_prog_table, btf_map_table);
close(fd);
if (err)
break;
@@ -938,8 +1043,9 @@ static int do_show(int argc, char **argv)
jsonw_end_array(json_wtr); /* root array */
exit_free:
- delete_btf_table(&btf_prog_table);
- delete_btf_table(&btf_map_table);
+ hashmap__free(btf_prog_table);
+ hashmap__free(btf_map_table);
+ delete_obj_refs_table(refs_table);
return err;
}
@@ -952,17 +1058,18 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s btf { show | list } [id BTF_ID]\n"
- " %s btf dump BTF_SRC [format FORMAT]\n"
- " %s btf help\n"
+ "Usage: %1$s %2$s { show | list } [id BTF_ID]\n"
+ " %1$s %2$s dump BTF_SRC [format FORMAT]\n"
+ " %1$s %2$s help\n"
"\n"
" BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n"
" FORMAT := { raw | c }\n"
" " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
- " " HELP_SPEC_OPTIONS "\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-B|--base-btf} }\n"
"",
- bin_name, bin_name, bin_name);
+ bin_name, "btf");
return 0;
}
diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
index 497807bec675..294de231db99 100644
--- a/tools/bpf/bpftool/btf_dumper.c
+++ b/tools/bpf/bpftool/btf_dumper.c
@@ -32,14 +32,16 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
const struct btf_type *func_proto,
__u32 prog_id)
{
- struct bpf_prog_info_linear *prog_info = NULL;
const struct btf_type *func_type;
+ int prog_fd = -1, func_sig_len;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
const char *prog_name = NULL;
- struct bpf_func_info *finfo;
struct btf *prog_btf = NULL;
- struct bpf_prog_info *info;
- int prog_fd, func_sig_len;
+ struct bpf_func_info finfo;
+ __u32 finfo_rec_size;
char prog_str[1024];
+ int err;
/* Get the ptr's func_proto */
func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0,
@@ -52,23 +54,30 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
/* Get the bpf_prog's name. Obtain from func_info. */
prog_fd = bpf_prog_get_fd_by_id(prog_id);
- if (prog_fd == -1)
+ if (prog_fd < 0)
goto print;
- prog_info = bpf_program__get_prog_info_linear(prog_fd,
- 1UL << BPF_PROG_INFO_FUNC_INFO);
- close(prog_fd);
- if (IS_ERR(prog_info)) {
- prog_info = NULL;
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err)
+ goto print;
+
+ if (!info.btf_id || !info.nr_func_info)
goto print;
- }
- info = &prog_info->info;
- if (!info->btf_id || !info->nr_func_info ||
- btf__get_from_id(info->btf_id, &prog_btf))
+ finfo_rec_size = info.func_info_rec_size;
+ memset(&info, 0, sizeof(info));
+ info.nr_func_info = 1;
+ info.func_info_rec_size = finfo_rec_size;
+ info.func_info = ptr_to_u64(&finfo);
+
+ err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
+ if (err)
+ goto print;
+
+ prog_btf = btf__load_from_kernel_by_id(info.btf_id);
+ if (!prog_btf)
goto print;
- finfo = (struct bpf_func_info *)info->func_info;
- func_type = btf__type_by_id(prog_btf, finfo->type_id);
+ func_type = btf__type_by_id(prog_btf, finfo.type_id);
if (!func_type || !btf_is_func(func_type))
goto print;
@@ -90,7 +99,8 @@ print:
prog_str[sizeof(prog_str) - 1] = '\0';
jsonw_string(d->jw, prog_str);
btf__free(prog_btf);
- free(prog_info);
+ if (prog_fd >= 0)
+ close(prog_fd);
return 0;
}
@@ -172,6 +182,32 @@ static int btf_dumper_enum(const struct btf_dumper *d,
return 0;
}
+static int btf_dumper_enum64(const struct btf_dumper *d,
+ const struct btf_type *t,
+ const void *data)
+{
+ const struct btf_enum64 *enums = btf_enum64(t);
+ __u32 val_lo32, val_hi32;
+ __u64 value;
+ __u16 i;
+
+ value = *(__u64 *)data;
+ val_lo32 = (__u32)value;
+ val_hi32 = value >> 32;
+
+ for (i = 0; i < btf_vlen(t); i++) {
+ if (val_lo32 == enums[i].val_lo32 && val_hi32 == enums[i].val_hi32) {
+ jsonw_string(d->jw,
+ btf__name_by_offset(d->btf,
+ enums[i].name_off));
+ return 0;
+ }
+ }
+
+ jsonw_int(d->jw, value);
+ return 0;
+}
+
static bool is_str_array(const struct btf *btf, const struct btf_array *arr,
const char *s)
{
@@ -271,8 +307,8 @@ static void btf_int128_print(json_writer_t *jw, const void *data,
}
}
-static void btf_int128_shift(__u64 *print_num, u16 left_shift_bits,
- u16 right_shift_bits)
+static void btf_int128_shift(__u64 *print_num, __u16 left_shift_bits,
+ __u16 right_shift_bits)
{
__u64 upper_num, lower_num;
@@ -416,7 +452,7 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
*(char *)data);
break;
case BTF_INT_BOOL:
- jsonw_bool(jw, *(int *)data);
+ jsonw_bool(jw, *(bool *)data);
break;
default:
/* shouldn't happen */
@@ -532,6 +568,8 @@ static int btf_dumper_do_type(const struct btf_dumper *d, __u32 type_id,
return btf_dumper_array(d, type_id, data);
case BTF_KIND_ENUM:
return btf_dumper_enum(d, t, data);
+ case BTF_KIND_ENUM64:
+ return btf_dumper_enum64(d, t, data);
case BTF_KIND_PTR:
btf_dumper_ptr(d, t, data);
return 0;
@@ -596,6 +634,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
switch (BTF_INFO_KIND(t->info)) {
case BTF_KIND_INT:
case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FLOAT:
BTF_PRINT_ARG("%s ", btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_STRUCT:
@@ -607,6 +646,7 @@ static int __btf_dumper_type_only(const struct btf *btf, __u32 type_id,
btf__name_by_offset(btf, t->name_off));
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
BTF_PRINT_ARG("enum %s ",
btf__name_by_offset(btf, t->name_off));
break;
@@ -781,3 +821,86 @@ void btf_dump_linfo_json(const struct btf *btf,
BPF_LINE_INFO_LINE_COL(linfo->line_col));
}
}
+
+static void dotlabel_puts(const char *s)
+{
+ for (; *s; ++s) {
+ switch (*s) {
+ case '\\':
+ case '"':
+ case '{':
+ case '}':
+ case '<':
+ case '>':
+ case '|':
+ case ' ':
+ putchar('\\');
+ /* fallthrough */
+ default:
+ putchar(*s);
+ }
+ }
+}
+
+static const char *shorten_path(const char *path)
+{
+ const unsigned int MAX_PATH_LEN = 32;
+ size_t len = strlen(path);
+ const char *shortpath;
+
+ if (len <= MAX_PATH_LEN)
+ return path;
+
+ /* Search for last '/' under the MAX_PATH_LEN limit */
+ shortpath = strchr(path + len - MAX_PATH_LEN, '/');
+ if (shortpath) {
+ if (shortpath < path + strlen("..."))
+ /* We removed a very short prefix, e.g. "/w", and we'll
+ * make the path longer by prefixing with the ellipsis.
+ * Not worth it, keep initial path.
+ */
+ return path;
+ return shortpath;
+ }
+
+ /* File base name length is > MAX_PATH_LEN, search for last '/' */
+ shortpath = strrchr(path, '/');
+ if (shortpath)
+ return shortpath;
+
+ return path;
+}
+
+void btf_dump_linfo_dotlabel(const struct btf *btf,
+ const struct bpf_line_info *linfo, bool linum)
+{
+ const char *line = btf__name_by_offset(btf, linfo->line_off);
+
+ if (!line || !strlen(line))
+ return;
+ line = ltrim(line);
+
+ if (linum) {
+ const char *file = btf__name_by_offset(btf, linfo->file_name_off);
+ const char *shortfile;
+
+ /* More forgiving on file because linum option is
+ * expected to provide more info than the already
+ * available src line.
+ */
+ if (!file)
+ shortfile = "";
+ else
+ shortfile = shorten_path(file);
+
+ printf("; [%s", shortfile > file ? "..." : "");
+ dotlabel_puts(shortfile);
+ printf(" line:%u col:%u]\\l\\\n",
+ BPF_LINE_INFO_LINE_NUM(linfo->line_col),
+ BPF_LINE_INFO_LINE_COL(linfo->line_col));
+ }
+
+ printf("; ");
+ dotlabel_puts(line);
+ printf("\\l\\\n");
+}
diff --git a/tools/bpf/bpftool/cfg.c b/tools/bpf/bpftool/cfg.c
index 3e21f994f262..eec437cca2ea 100644
--- a/tools/bpf/bpftool/cfg.c
+++ b/tools/bpf/bpftool/cfg.c
@@ -157,7 +157,7 @@ static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur,
return false;
}
-static bool is_jmp_insn(u8 code)
+static bool is_jmp_insn(__u8 code)
{
return BPF_CLASS(code) == BPF_JMP || BPF_CLASS(code) == BPF_JMP32;
}
@@ -176,7 +176,7 @@ static bool func_partition_bb_head(struct func_node *func)
for (; cur <= end; cur++) {
if (is_jmp_insn(cur->code)) {
- u8 opcode = BPF_OP(cur->code);
+ __u8 opcode = BPF_OP(cur->code);
if (opcode == BPF_EXIT || opcode == BPF_CALL)
continue;
@@ -380,7 +380,9 @@ static void cfg_destroy(struct cfg *cfg)
}
}
-static void draw_bb_node(struct func_node *func, struct bb_node *bb)
+static void
+draw_bb_node(struct func_node *func, struct bb_node *bb, struct dump_data *dd,
+ bool opcodes, bool linum)
{
const char *shape;
@@ -398,13 +400,10 @@ static void draw_bb_node(struct func_node *func, struct bb_node *bb)
printf("EXIT");
} else {
unsigned int start_idx;
- struct dump_data dd = {};
-
- printf("{");
- kernel_syms_load(&dd);
+ printf("{\\\n");
start_idx = bb->head - func->start;
- dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
- kernel_syms_destroy(&dd);
+ dump_xlated_for_graph(dd, bb->head, bb->tail, start_idx,
+ opcodes, linum);
printf("}");
}
@@ -430,12 +429,14 @@ static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
}
}
-static void func_output_bb_def(struct func_node *func)
+static void
+func_output_bb_def(struct func_node *func, struct dump_data *dd,
+ bool opcodes, bool linum)
{
struct bb_node *bb;
list_for_each_entry(bb, &func->bbs, l) {
- draw_bb_node(func, bb);
+ draw_bb_node(func, bb, dd, opcodes, linum);
}
}
@@ -455,7 +456,8 @@ static void func_output_edges(struct func_node *func)
func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
}
-static void cfg_dump(struct cfg *cfg)
+static void
+cfg_dump(struct cfg *cfg, struct dump_data *dd, bool opcodes, bool linum)
{
struct func_node *func;
@@ -463,14 +465,15 @@ static void cfg_dump(struct cfg *cfg)
list_for_each_entry(func, &cfg->funcs, l) {
printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
func->idx, func->idx);
- func_output_bb_def(func);
+ func_output_bb_def(func, dd, opcodes, linum);
func_output_edges(func);
printf("}\n");
}
printf("}\n");
}
-void dump_xlated_cfg(void *buf, unsigned int len)
+void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes, bool linum)
{
struct bpf_insn *insn = buf;
struct cfg cfg;
@@ -479,7 +482,7 @@ void dump_xlated_cfg(void *buf, unsigned int len)
if (cfg_build(&cfg, insn, len))
return;
- cfg_dump(&cfg);
+ cfg_dump(&cfg, dd, opcodes, linum);
cfg_destroy(&cfg);
}
diff --git a/tools/bpf/bpftool/cfg.h b/tools/bpf/bpftool/cfg.h
index e144257ea6d2..b3793f4e1783 100644
--- a/tools/bpf/bpftool/cfg.h
+++ b/tools/bpf/bpftool/cfg.h
@@ -4,6 +4,9 @@
#ifndef __BPF_TOOL_CFG_H
#define __BPF_TOOL_CFG_H
-void dump_xlated_cfg(void *buf, unsigned int len);
+#include "xlated_dumper.h"
+
+void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
+ bool opcodes, bool linum);
#endif /* __BPF_TOOL_CFG_H */
diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
index 62c6a1d7cd18..ac846b0805b4 100644
--- a/tools/bpf/bpftool/cgroup.c
+++ b/tools/bpf/bpftool/cgroup.c
@@ -15,62 +15,92 @@
#include <unistd.h>
#include <bpf/bpf.h>
+#include <bpf/btf.h>
#include "main.h"
#define HELP_SPEC_ATTACH_FLAGS \
"ATTACH_FLAGS := { multi | override }"
-#define HELP_SPEC_ATTACH_TYPES \
- " ATTACH_TYPE := { ingress | egress | sock_create |\n" \
- " sock_ops | device | bind4 | bind6 |\n" \
- " post_bind4 | post_bind6 | connect4 |\n" \
- " connect6 | sendmsg4 | sendmsg6 |\n" \
- " recvmsg4 | recvmsg6 | sysctl |\n" \
- " getsockopt | setsockopt }"
+#define HELP_SPEC_ATTACH_TYPES \
+ " ATTACH_TYPE := { cgroup_inet_ingress | cgroup_inet_egress |\n" \
+ " cgroup_inet_sock_create | cgroup_sock_ops |\n" \
+ " cgroup_device | cgroup_inet4_bind |\n" \
+ " cgroup_inet6_bind | cgroup_inet4_post_bind |\n" \
+ " cgroup_inet6_post_bind | cgroup_inet4_connect |\n" \
+ " cgroup_inet6_connect | cgroup_inet4_getpeername |\n" \
+ " cgroup_inet6_getpeername | cgroup_inet4_getsockname |\n" \
+ " cgroup_inet6_getsockname | cgroup_udp4_sendmsg |\n" \
+ " cgroup_udp6_sendmsg | cgroup_udp4_recvmsg |\n" \
+ " cgroup_udp6_recvmsg | cgroup_sysctl |\n" \
+ " cgroup_getsockopt | cgroup_setsockopt |\n" \
+ " cgroup_inet_sock_release }"
static unsigned int query_flags;
-
-static const char * const attach_type_strings[] = {
- [BPF_CGROUP_INET_INGRESS] = "ingress",
- [BPF_CGROUP_INET_EGRESS] = "egress",
- [BPF_CGROUP_INET_SOCK_CREATE] = "sock_create",
- [BPF_CGROUP_SOCK_OPS] = "sock_ops",
- [BPF_CGROUP_DEVICE] = "device",
- [BPF_CGROUP_INET4_BIND] = "bind4",
- [BPF_CGROUP_INET6_BIND] = "bind6",
- [BPF_CGROUP_INET4_CONNECT] = "connect4",
- [BPF_CGROUP_INET6_CONNECT] = "connect6",
- [BPF_CGROUP_INET4_POST_BIND] = "post_bind4",
- [BPF_CGROUP_INET6_POST_BIND] = "post_bind6",
- [BPF_CGROUP_UDP4_SENDMSG] = "sendmsg4",
- [BPF_CGROUP_UDP6_SENDMSG] = "sendmsg6",
- [BPF_CGROUP_SYSCTL] = "sysctl",
- [BPF_CGROUP_UDP4_RECVMSG] = "recvmsg4",
- [BPF_CGROUP_UDP6_RECVMSG] = "recvmsg6",
- [BPF_CGROUP_GETSOCKOPT] = "getsockopt",
- [BPF_CGROUP_SETSOCKOPT] = "setsockopt",
- [__MAX_BPF_ATTACH_TYPE] = NULL,
-};
+static struct btf *btf_vmlinux;
+static __u32 btf_vmlinux_id;
static enum bpf_attach_type parse_attach_type(const char *str)
{
+ const char *attach_type_str;
enum bpf_attach_type type;
- for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
- if (attach_type_strings[type] &&
- is_prefix(str, attach_type_strings[type]))
+ for (type = 0; ; type++) {
+ attach_type_str = libbpf_bpf_attach_type_str(type);
+ if (!attach_type_str)
+ break;
+ if (!strcmp(str, attach_type_str))
+ return type;
+ }
+
+ /* Also check traditionally used attach type strings. For these we keep
+ * allowing prefixed usage.
+ */
+ for (type = 0; ; type++) {
+ attach_type_str = bpf_attach_type_input_str(type);
+ if (!attach_type_str)
+ break;
+ if (is_prefix(str, attach_type_str))
return type;
}
return __MAX_BPF_ATTACH_TYPE;
}
-static int show_bpf_prog(int id, const char *attach_type_str,
+static void guess_vmlinux_btf_id(__u32 attach_btf_obj_id)
+{
+ struct bpf_btf_info btf_info = {};
+ __u32 btf_len = sizeof(btf_info);
+ char name[16] = {};
+ int err;
+ int fd;
+
+ btf_info.name = ptr_to_u64(name);
+ btf_info.name_len = sizeof(name);
+
+ fd = bpf_btf_get_fd_by_id(attach_btf_obj_id);
+ if (fd < 0)
+ return;
+
+ err = bpf_btf_get_info_by_fd(fd, &btf_info, &btf_len);
+ if (err)
+ goto out;
+
+ if (btf_info.kernel_btf && strncmp(name, "vmlinux", sizeof(name)) == 0)
+ btf_vmlinux_id = btf_info.id;
+
+out:
+ close(fd);
+}
+
+static int show_bpf_prog(int id, enum bpf_attach_type attach_type,
const char *attach_flags_str,
int level)
{
+ char prog_name[MAX_PROG_FULL_NAME];
+ const char *attach_btf_name = NULL;
struct bpf_prog_info info = {};
+ const char *attach_type_str;
__u32 info_len = sizeof(info);
int prog_fd;
@@ -78,26 +108,58 @@ static int show_bpf_prog(int id, const char *attach_type_str,
if (prog_fd < 0)
return -1;
- if (bpf_obj_get_info_by_fd(prog_fd, &info, &info_len)) {
+ if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len)) {
close(prog_fd);
return -1;
}
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+
+ if (btf_vmlinux) {
+ if (!btf_vmlinux_id)
+ guess_vmlinux_btf_id(info.attach_btf_obj_id);
+
+ if (btf_vmlinux_id == info.attach_btf_obj_id &&
+ info.attach_btf_id < btf__type_cnt(btf_vmlinux)) {
+ const struct btf_type *t =
+ btf__type_by_id(btf_vmlinux, info.attach_btf_id);
+ attach_btf_name =
+ btf__name_by_offset(btf_vmlinux, t->name_off);
+ }
+ }
+
+ get_prog_full_name(&info, prog_fd, prog_name, sizeof(prog_name));
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_uint_field(json_wtr, "id", info.id);
- jsonw_string_field(json_wtr, "attach_type",
- attach_type_str);
- jsonw_string_field(json_wtr, "attach_flags",
- attach_flags_str);
- jsonw_string_field(json_wtr, "name", info.name);
+ if (attach_type_str)
+ jsonw_string_field(json_wtr, "attach_type", attach_type_str);
+ else
+ jsonw_uint_field(json_wtr, "attach_type", attach_type);
+ if (!(query_flags & BPF_F_QUERY_EFFECTIVE))
+ jsonw_string_field(json_wtr, "attach_flags", attach_flags_str);
+ jsonw_string_field(json_wtr, "name", prog_name);
+ if (attach_btf_name)
+ jsonw_string_field(json_wtr, "attach_btf_name", attach_btf_name);
+ jsonw_uint_field(json_wtr, "attach_btf_obj_id", info.attach_btf_obj_id);
+ jsonw_uint_field(json_wtr, "attach_btf_id", info.attach_btf_id);
jsonw_end_object(json_wtr);
} else {
- printf("%s%-8u %-15s %-15s %-15s\n", level ? " " : "",
- info.id,
- attach_type_str,
- attach_flags_str,
- info.name);
+ printf("%s%-8u ", level ? " " : "", info.id);
+ if (attach_type_str)
+ printf("%-15s", attach_type_str);
+ else
+ printf("type %-10u", attach_type);
+ if (query_flags & BPF_F_QUERY_EFFECTIVE)
+ printf(" %-15s", prog_name);
+ else
+ printf(" %-15s %-15s", attach_flags_str, prog_name);
+ if (attach_btf_name)
+ printf(" %-15s", attach_btf_name);
+ else if (info.attach_btf_id)
+ printf(" attach_btf_obj_id=%d attach_btf_id=%d",
+ info.attach_btf_obj_id, info.attach_btf_id);
+ printf("\n");
}
close(prog_fd);
@@ -136,47 +198,90 @@ static int cgroup_has_attached_progs(int cgroup_fd)
return no_prog ? 0 : 1;
}
+
+static int show_effective_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ int level)
+{
+ LIBBPF_OPTS(bpf_prog_query_opts, p);
+ __u32 prog_ids[1024] = {0};
+ __u32 iter;
+ int ret;
+
+ p.query_flags = query_flags;
+ p.prog_cnt = ARRAY_SIZE(prog_ids);
+ p.prog_ids = prog_ids;
+
+ ret = bpf_prog_query_opts(cgroup_fd, type, &p);
+ if (ret)
+ return ret;
+
+ if (p.prog_cnt == 0)
+ return 0;
+
+ for (iter = 0; iter < p.prog_cnt; iter++)
+ show_bpf_prog(prog_ids[iter], type, NULL, level);
+
+ return 0;
+}
+
static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
int level)
{
+ LIBBPF_OPTS(bpf_prog_query_opts, p);
+ __u32 prog_attach_flags[1024] = {0};
const char *attach_flags_str;
__u32 prog_ids[1024] = {0};
- __u32 prog_cnt, iter;
- __u32 attach_flags;
char buf[32];
+ __u32 iter;
int ret;
- prog_cnt = ARRAY_SIZE(prog_ids);
- ret = bpf_prog_query(cgroup_fd, type, query_flags, &attach_flags,
- prog_ids, &prog_cnt);
+ p.query_flags = query_flags;
+ p.prog_cnt = ARRAY_SIZE(prog_ids);
+ p.prog_ids = prog_ids;
+ p.prog_attach_flags = prog_attach_flags;
+
+ ret = bpf_prog_query_opts(cgroup_fd, type, &p);
if (ret)
return ret;
- if (prog_cnt == 0)
+ if (p.prog_cnt == 0)
return 0;
- switch (attach_flags) {
- case BPF_F_ALLOW_MULTI:
- attach_flags_str = "multi";
- break;
- case BPF_F_ALLOW_OVERRIDE:
- attach_flags_str = "override";
- break;
- case 0:
- attach_flags_str = "";
- break;
- default:
- snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
- attach_flags_str = buf;
- }
+ for (iter = 0; iter < p.prog_cnt; iter++) {
+ __u32 attach_flags;
+
+ attach_flags = prog_attach_flags[iter] ?: p.attach_flags;
+
+ switch (attach_flags) {
+ case BPF_F_ALLOW_MULTI:
+ attach_flags_str = "multi";
+ break;
+ case BPF_F_ALLOW_OVERRIDE:
+ attach_flags_str = "override";
+ break;
+ case 0:
+ attach_flags_str = "";
+ break;
+ default:
+ snprintf(buf, sizeof(buf), "unknown(%x)", attach_flags);
+ attach_flags_str = buf;
+ }
- for (iter = 0; iter < prog_cnt; iter++)
- show_bpf_prog(prog_ids[iter], attach_type_strings[type],
+ show_bpf_prog(prog_ids[iter], type,
attach_flags_str, level);
+ }
return 0;
}
+static int show_bpf_progs(int cgroup_fd, enum bpf_attach_type type,
+ int level)
+{
+ return query_flags & BPF_F_QUERY_EFFECTIVE ?
+ show_effective_bpf_progs(cgroup_fd, type, level) :
+ show_attached_bpf_progs(cgroup_fd, type, level);
+}
+
static int do_show(int argc, char **argv)
{
enum bpf_attach_type type;
@@ -224,10 +329,13 @@ static int do_show(int argc, char **argv)
if (json_output)
jsonw_start_array(json_wtr);
+ else if (query_flags & BPF_F_QUERY_EFFECTIVE)
+ printf("%-8s %-15s %-15s\n", "ID", "AttachType", "Name");
else
printf("%-8s %-15s %-15s %-15s\n", "ID", "AttachType",
"AttachFlags", "Name");
+ btf_vmlinux = libbpf_find_kernel_btf();
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
/*
* Not all attach types may be supported, so it's expected,
@@ -235,7 +343,7 @@ static int do_show(int argc, char **argv)
* If we were able to get the show for at least one
* attach type, let's return 0.
*/
- if (show_attached_bpf_progs(cgroup_fd, type, 0) == 0)
+ if (show_bpf_progs(cgroup_fd, type, 0) == 0)
ret = 0;
}
@@ -291,8 +399,9 @@ static int do_show_tree_fn(const char *fpath, const struct stat *sb,
printf("%s\n", fpath);
}
+ btf_vmlinux = libbpf_find_kernel_btf();
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++)
- show_attached_bpf_progs(cgroup_fd, type, ftw->level);
+ show_bpf_progs(cgroup_fd, type, ftw->level);
if (errno == EINVAL)
/* Last attach type does not support query.
@@ -366,6 +475,11 @@ static int do_show_tree(int argc, char **argv)
if (json_output)
jsonw_start_array(json_wtr);
+ else if (query_flags & BPF_F_QUERY_EFFECTIVE)
+ printf("%s\n"
+ "%-8s %-15s %-15s\n",
+ "CgroupPath",
+ "ID", "AttachType", "Name");
else
printf("%s\n"
"%-8s %-15s %-15s %-15s\n",
@@ -508,20 +622,19 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s { show | list } CGROUP [**effective**]\n"
- " %s %s tree [CGROUP_ROOT] [**effective**]\n"
- " %s %s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
- " %s %s detach CGROUP ATTACH_TYPE PROG\n"
- " %s %s help\n"
+ "Usage: %1$s %2$s { show | list } CGROUP [**effective**]\n"
+ " %1$s %2$s tree [CGROUP_ROOT] [**effective**]\n"
+ " %1$s %2$s attach CGROUP ATTACH_TYPE PROG [ATTACH_FLAGS]\n"
+ " %1$s %2$s detach CGROUP ATTACH_TYPE PROG\n"
+ " %1$s %2$s help\n"
"\n"
HELP_SPEC_ATTACH_TYPES "\n"
" " HELP_SPEC_ATTACH_FLAGS "\n"
" " HELP_SPEC_PROGRAM "\n"
- " " HELP_SPEC_OPTIONS "\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-f|--bpffs} }\n"
"",
- bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2]);
+ bin_name, argv[-2]);
return 0;
}
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index f2223dbdfb0a..1360c82ae732 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
-#include <fts.h>
+#include <ftw.h>
#include <libgen.h>
#include <mntent.h>
#include <stdbool.h>
@@ -12,16 +15,21 @@
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
-#include <linux/limits.h>
-#include <linux/magic.h>
#include <net/if.h>
#include <sys/mount.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/vfs.h>
+#include <linux/filter.h>
+#include <linux/limits.h>
+#include <linux/magic.h>
+#include <linux/unistd.h>
+
#include <bpf/bpf.h>
+#include <bpf/hashmap.h>
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
+#include <bpf/btf.h>
#include "main.h"
@@ -70,11 +78,73 @@ static bool is_bpffs(char *path)
return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
}
+/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
+ * memcg-based memory accounting for BPF maps and programs. This was done in
+ * commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
+ * accounting'"), in Linux 5.11.
+ *
+ * Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
+ * so by checking for the availability of a given BPF helper and this has
+ * failed on some kernels with backports in the past, see commit 6b4384ff1088
+ * ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
+ * Instead, we can probe by lowering the process-based rlimit to 0, trying to
+ * load a BPF object, and resetting the rlimit. If the load succeeds then
+ * memcg-based accounting is supported.
+ *
+ * This would be too dangerous to do in the library, because multithreaded
+ * applications might attempt to load items while the rlimit is at 0. Given
+ * that bpftool is single-threaded, this is fine to do here.
+ */
+static bool known_to_need_rlimit(void)
+{
+ struct rlimit rlim_init, rlim_cur_zero = {};
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ size_t insn_cnt = ARRAY_SIZE(insns);
+ union bpf_attr attr;
+ int prog_fd, err;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = insn_cnt;
+ attr.license = ptr_to_u64("GPL");
+
+ if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
+ return false;
+
+ /* Drop the soft limit to zero. We maintain the hard limit to its
+ * current value, because lowering it would be a permanent operation
+ * for unprivileged users.
+ */
+ rlim_cur_zero.rlim_max = rlim_init.rlim_max;
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
+ return false;
+
+ /* Do not use bpf_prog_load() from libbpf here, because it calls
+ * bump_rlimit_memlock(), interfering with the current probe.
+ */
+ prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+ err = errno;
+
+ /* reset soft rlimit to its initial value */
+ setrlimit(RLIMIT_MEMLOCK, &rlim_init);
+
+ if (prog_fd < 0)
+ return err == EPERM;
+
+ close(prog_fd);
+ return false;
+}
+
void set_max_rlimit(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
- setrlimit(RLIMIT_MEMLOCK, &rinf);
+ if (known_to_need_rlimit())
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
}
static int
@@ -123,24 +193,35 @@ int mount_tracefs(const char *target)
return err;
}
-int open_obj_pinned(char *path, bool quiet)
+int open_obj_pinned(const char *path, bool quiet)
{
- int fd;
+ char *pname;
+ int fd = -1;
+
+ pname = strdup(path);
+ if (!pname) {
+ if (!quiet)
+ p_err("mem alloc failed");
+ goto out_ret;
+ }
- fd = bpf_obj_get(path);
+ fd = bpf_obj_get(pname);
if (fd < 0) {
if (!quiet)
- p_err("bpf obj get (%s): %s", path,
- errno == EACCES && !is_bpffs(dirname(path)) ?
+ p_err("bpf obj get (%s): %s", pname,
+ errno == EACCES && !is_bpffs(dirname(pname)) ?
"directory not in bpf file system (bpffs)" :
strerror(errno));
- return -1;
+ goto out_free;
}
+out_free:
+ free(pname);
+out_ret:
return fd;
}
-int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type)
+int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
{
enum bpf_obj_type type;
int fd;
@@ -171,6 +252,11 @@ int mount_bpffs_for_pin(const char *name)
int err = 0;
file = malloc(strlen(name) + 1);
+ if (!file) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+
strcpy(file, name);
dir = dirname(file);
@@ -216,6 +302,9 @@ int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
int err;
int fd;
+ if (!REQ_ARGS(3))
+ return -EINVAL;
+
fd = get_fd(&argc, &argv);
if (fd < 0)
return fd;
@@ -232,6 +321,7 @@ const char *get_fd_type_name(enum bpf_obj_type type)
[BPF_OBJ_UNKNOWN] = "unknown",
[BPF_OBJ_PROG] = "prog",
[BPF_OBJ_MAP] = "map",
+ [BPF_OBJ_LINK] = "link",
};
if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
@@ -240,6 +330,49 @@ const char *get_fd_type_name(enum bpf_obj_type type)
return names[type];
}
+void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
+ char *name_buff, size_t buff_len)
+{
+ const char *prog_name = prog_info->name;
+ const struct btf_type *func_type;
+ const struct bpf_func_info finfo = {};
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
+ struct btf *prog_btf = NULL;
+
+ if (buff_len <= BPF_OBJ_NAME_LEN ||
+ strlen(prog_info->name) < BPF_OBJ_NAME_LEN - 1)
+ goto copy_name;
+
+ if (!prog_info->btf_id || prog_info->nr_func_info == 0)
+ goto copy_name;
+
+ info.nr_func_info = 1;
+ info.func_info_rec_size = prog_info->func_info_rec_size;
+ if (info.func_info_rec_size > sizeof(finfo))
+ info.func_info_rec_size = sizeof(finfo);
+ info.func_info = ptr_to_u64(&finfo);
+
+ if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
+ goto copy_name;
+
+ prog_btf = btf__load_from_kernel_by_id(info.btf_id);
+ if (!prog_btf)
+ goto copy_name;
+
+ func_type = btf__type_by_id(prog_btf, finfo.type_id);
+ if (!func_type || !btf_is_func(func_type))
+ goto copy_name;
+
+ prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
+
+copy_name:
+ snprintf(name_buff, buff_len, "%s", prog_name);
+
+ if (prog_btf)
+ btf__free(prog_btf);
+}
+
int get_fd_type(int fd)
{
char path[PATH_MAX];
@@ -262,6 +395,8 @@ int get_fd_type(int fd)
return BPF_OBJ_MAP;
else if (strstr(buf, "bpf-prog"))
return BPF_OBJ_PROG;
+ else if (strstr(buf, "bpf-link"))
+ return BPF_OBJ_LINK;
return BPF_OBJ_UNKNOWN;
}
@@ -328,84 +463,95 @@ void print_hex_data_json(uint8_t *data, size_t len)
jsonw_end_array(json_wtr);
}
-int build_pinned_obj_table(struct pinned_obj_table *tab,
- enum bpf_obj_type type)
+/* extra params for nftw cb */
+static struct hashmap *build_fn_table;
+static enum bpf_obj_type build_fn_type;
+
+static int do_build_table_cb(const char *fpath, const struct stat *sb,
+ int typeflag, struct FTW *ftwbuf)
{
- struct bpf_prog_info pinned_info = {};
- struct pinned_obj *obj_node = NULL;
+ struct bpf_prog_info pinned_info;
__u32 len = sizeof(pinned_info);
- struct mntent *mntent = NULL;
enum bpf_obj_type objtype;
+ int fd, err = 0;
+ char *path;
+
+ if (typeflag != FTW_F)
+ goto out_ret;
+
+ fd = open_obj_pinned(fpath, true);
+ if (fd < 0)
+ goto out_ret;
+
+ objtype = get_fd_type(fd);
+ if (objtype != build_fn_type)
+ goto out_close;
+
+ memset(&pinned_info, 0, sizeof(pinned_info));
+ if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
+ goto out_close;
+
+ path = strdup(fpath);
+ if (!path) {
+ err = -1;
+ goto out_close;
+ }
+
+ err = hashmap__append(build_fn_table, pinned_info.id, path);
+ if (err) {
+ p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
+ pinned_info.id, path, strerror(errno));
+ free(path);
+ goto out_close;
+ }
+
+out_close:
+ close(fd);
+out_ret:
+ return err;
+}
+
+int build_pinned_obj_table(struct hashmap *tab,
+ enum bpf_obj_type type)
+{
+ struct mntent *mntent = NULL;
FILE *mntfile = NULL;
- FTSENT *ftse = NULL;
- FTS *fts = NULL;
- int fd, err;
+ int flags = FTW_PHYS;
+ int nopenfd = 16;
+ int err = 0;
mntfile = setmntent("/proc/mounts", "r");
if (!mntfile)
return -1;
+ build_fn_table = tab;
+ build_fn_type = type;
+
while ((mntent = getmntent(mntfile))) {
- char *path[] = { mntent->mnt_dir, NULL };
+ char *path = mntent->mnt_dir;
if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
continue;
-
- fts = fts_open(path, 0, NULL);
- if (!fts)
- continue;
-
- while ((ftse = fts_read(fts))) {
- if (!(ftse->fts_info & FTS_F))
- continue;
- fd = open_obj_pinned(ftse->fts_path, true);
- if (fd < 0)
- continue;
-
- objtype = get_fd_type(fd);
- if (objtype != type) {
- close(fd);
- continue;
- }
- memset(&pinned_info, 0, sizeof(pinned_info));
- err = bpf_obj_get_info_by_fd(fd, &pinned_info, &len);
- if (err) {
- close(fd);
- continue;
- }
-
- obj_node = malloc(sizeof(*obj_node));
- if (!obj_node) {
- close(fd);
- fts_close(fts);
- fclose(mntfile);
- return -1;
- }
-
- memset(obj_node, 0, sizeof(*obj_node));
- obj_node->id = pinned_info.id;
- obj_node->path = strdup(ftse->fts_path);
- hash_add(tab->table, &obj_node->hash, obj_node->id);
-
- close(fd);
- }
- fts_close(fts);
+ err = nftw(path, do_build_table_cb, nopenfd, flags);
+ if (err)
+ break;
}
fclose(mntfile);
- return 0;
+ return err;
}
-void delete_pinned_obj_table(struct pinned_obj_table *tab)
+void delete_pinned_obj_table(struct hashmap *map)
{
- struct pinned_obj *obj;
- struct hlist_node *tmp;
- unsigned int bkt;
+ struct hashmap_entry *entry;
+ size_t bkt;
- hash_for_each_safe(tab->table, bkt, tmp, obj, hash) {
- hash_del(&obj->hash);
- free(obj->path);
- free(obj);
- }
+ if (!map)
+ return;
+
+ hashmap__for_each_entry(map, entry, bkt)
+ free(entry->pvalue);
+
+ hashmap__free(map);
}
unsigned int get_page_size(void)
@@ -485,12 +631,11 @@ static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
}
const char *
-ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
- const char **opt)
+ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
{
+ __maybe_unused int device_id;
char devname[IF_NAMESIZE];
int vendor_id;
- int device_id;
if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
p_err("Can't get net device name for ifindex %d: %s", ifindex,
@@ -505,6 +650,7 @@ ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
}
switch (vendor_id) {
+#ifdef HAVE_LIBBFD_SUPPORT
case 0x19ee:
device_id = read_sysfs_netdev_hex_int(devname, "device");
if (device_id != 0x4000 &&
@@ -513,8 +659,10 @@ ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
*opt = "ctx4";
return "NFP-6xxx";
+#endif /* HAVE_LIBBFD_SUPPORT */
+ /* No NFP support in LLVM, we have no valid triple to return. */
default:
- p_err("Can't get bfd arch name for device vendor id 0x%04x",
+ p_err("Can't get arch name for device vendor id 0x%04x",
vendor_id);
return NULL;
}
@@ -579,3 +727,381 @@ print_all_levels(__maybe_unused enum libbpf_print_level level,
{
return vfprintf(stderr, format, args);
}
+
+static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
+{
+ char prog_name[MAX_PROG_FULL_NAME];
+ unsigned int id = 0;
+ int fd, nb_fds = 0;
+ void *tmp;
+ int err;
+
+ while (true) {
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+
+ err = bpf_prog_get_next_id(id, &id);
+ if (err) {
+ if (errno != ENOENT) {
+ p_err("%s", strerror(errno));
+ goto err_close_fds;
+ }
+ return nb_fds;
+ }
+
+ fd = bpf_prog_get_fd_by_id(id);
+ if (fd < 0) {
+ p_err("can't get prog by id (%u): %s",
+ id, strerror(errno));
+ goto err_close_fds;
+ }
+
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get prog info (%u): %s",
+ id, strerror(errno));
+ goto err_close_fd;
+ }
+
+ if (tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) {
+ close(fd);
+ continue;
+ }
+
+ if (!tag) {
+ get_prog_full_name(&info, fd, prog_name,
+ sizeof(prog_name));
+ if (strncmp(nametag, prog_name, sizeof(prog_name))) {
+ close(fd);
+ continue;
+ }
+ }
+
+ if (nb_fds > 0) {
+ tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
+ if (!tmp) {
+ p_err("failed to realloc");
+ goto err_close_fd;
+ }
+ *fds = tmp;
+ }
+ (*fds)[nb_fds++] = fd;
+ }
+
+err_close_fd:
+ close(fd);
+err_close_fds:
+ while (--nb_fds >= 0)
+ close((*fds)[nb_fds]);
+ return -1;
+}
+
+int prog_parse_fds(int *argc, char ***argv, int **fds)
+{
+ if (is_prefix(**argv, "id")) {
+ unsigned int id;
+ char *endptr;
+
+ NEXT_ARGP();
+
+ id = strtoul(**argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as ID", **argv);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ (*fds)[0] = bpf_prog_get_fd_by_id(id);
+ if ((*fds)[0] < 0) {
+ p_err("get by id (%u): %s", id, strerror(errno));
+ return -1;
+ }
+ return 1;
+ } else if (is_prefix(**argv, "tag")) {
+ unsigned char tag[BPF_TAG_SIZE];
+
+ NEXT_ARGP();
+
+ if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
+ tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
+ != BPF_TAG_SIZE) {
+ p_err("can't parse tag");
+ return -1;
+ }
+ NEXT_ARGP();
+
+ return prog_fd_by_nametag(tag, fds, true);
+ } else if (is_prefix(**argv, "name")) {
+ char *name;
+
+ NEXT_ARGP();
+
+ name = **argv;
+ if (strlen(name) > MAX_PROG_FULL_NAME - 1) {
+ p_err("can't parse name");
+ return -1;
+ }
+ NEXT_ARGP();
+
+ return prog_fd_by_nametag(name, fds, false);
+ } else if (is_prefix(**argv, "pinned")) {
+ char *path;
+
+ NEXT_ARGP();
+
+ path = **argv;
+ NEXT_ARGP();
+
+ (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
+ if ((*fds)[0] < 0)
+ return -1;
+ return 1;
+ }
+
+ p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
+ return -1;
+}
+
+int prog_parse_fd(int *argc, char ***argv)
+{
+ int *fds = NULL;
+ int nb_fds, fd;
+
+ fds = malloc(sizeof(int));
+ if (!fds) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+ nb_fds = prog_parse_fds(argc, argv, &fds);
+ if (nb_fds != 1) {
+ if (nb_fds > 1) {
+ p_err("several programs match this handle");
+ while (nb_fds--)
+ close(fds[nb_fds]);
+ }
+ fd = -1;
+ goto exit_free;
+ }
+
+ fd = fds[0];
+exit_free:
+ free(fds);
+ return fd;
+}
+
+static int map_fd_by_name(char *name, int **fds)
+{
+ unsigned int id = 0;
+ int fd, nb_fds = 0;
+ void *tmp;
+ int err;
+
+ while (true) {
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+
+ err = bpf_map_get_next_id(id, &id);
+ if (err) {
+ if (errno != ENOENT) {
+ p_err("%s", strerror(errno));
+ goto err_close_fds;
+ }
+ return nb_fds;
+ }
+
+ fd = bpf_map_get_fd_by_id(id);
+ if (fd < 0) {
+ p_err("can't get map by id (%u): %s",
+ id, strerror(errno));
+ goto err_close_fds;
+ }
+
+ err = bpf_map_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get map info (%u): %s",
+ id, strerror(errno));
+ goto err_close_fd;
+ }
+
+ if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
+ close(fd);
+ continue;
+ }
+
+ if (nb_fds > 0) {
+ tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
+ if (!tmp) {
+ p_err("failed to realloc");
+ goto err_close_fd;
+ }
+ *fds = tmp;
+ }
+ (*fds)[nb_fds++] = fd;
+ }
+
+err_close_fd:
+ close(fd);
+err_close_fds:
+ while (--nb_fds >= 0)
+ close((*fds)[nb_fds]);
+ return -1;
+}
+
+int map_parse_fds(int *argc, char ***argv, int **fds)
+{
+ if (is_prefix(**argv, "id")) {
+ unsigned int id;
+ char *endptr;
+
+ NEXT_ARGP();
+
+ id = strtoul(**argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as ID", **argv);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ (*fds)[0] = bpf_map_get_fd_by_id(id);
+ if ((*fds)[0] < 0) {
+ p_err("get map by id (%u): %s", id, strerror(errno));
+ return -1;
+ }
+ return 1;
+ } else if (is_prefix(**argv, "name")) {
+ char *name;
+
+ NEXT_ARGP();
+
+ name = **argv;
+ if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
+ p_err("can't parse name");
+ return -1;
+ }
+ NEXT_ARGP();
+
+ return map_fd_by_name(name, fds);
+ } else if (is_prefix(**argv, "pinned")) {
+ char *path;
+
+ NEXT_ARGP();
+
+ path = **argv;
+ NEXT_ARGP();
+
+ (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
+ if ((*fds)[0] < 0)
+ return -1;
+ return 1;
+ }
+
+ p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
+ return -1;
+}
+
+int map_parse_fd(int *argc, char ***argv)
+{
+ int *fds = NULL;
+ int nb_fds, fd;
+
+ fds = malloc(sizeof(int));
+ if (!fds) {
+ p_err("mem alloc failed");
+ return -1;
+ }
+ nb_fds = map_parse_fds(argc, argv, &fds);
+ if (nb_fds != 1) {
+ if (nb_fds > 1) {
+ p_err("several maps match this handle");
+ while (nb_fds--)
+ close(fds[nb_fds]);
+ }
+ fd = -1;
+ goto exit_free;
+ }
+
+ fd = fds[0];
+exit_free:
+ free(fds);
+ return fd;
+}
+
+int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
+ __u32 *info_len)
+{
+ int err;
+ int fd;
+
+ fd = map_parse_fd(argc, argv);
+ if (fd < 0)
+ return -1;
+
+ err = bpf_map_get_info_by_fd(fd, info, info_len);
+ if (err) {
+ p_err("can't get map info: %s", strerror(errno));
+ close(fd);
+ return err;
+ }
+
+ return fd;
+}
+
+size_t hash_fn_for_key_as_id(long key, void *ctx)
+{
+ return key;
+}
+
+bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
+{
+ return k1 == k2;
+}
+
+const char *bpf_attach_type_input_str(enum bpf_attach_type t)
+{
+ switch (t) {
+ case BPF_CGROUP_INET_INGRESS: return "ingress";
+ case BPF_CGROUP_INET_EGRESS: return "egress";
+ case BPF_CGROUP_INET_SOCK_CREATE: return "sock_create";
+ case BPF_CGROUP_INET_SOCK_RELEASE: return "sock_release";
+ case BPF_CGROUP_SOCK_OPS: return "sock_ops";
+ case BPF_CGROUP_DEVICE: return "device";
+ case BPF_CGROUP_INET4_BIND: return "bind4";
+ case BPF_CGROUP_INET6_BIND: return "bind6";
+ case BPF_CGROUP_INET4_CONNECT: return "connect4";
+ case BPF_CGROUP_INET6_CONNECT: return "connect6";
+ case BPF_CGROUP_INET4_POST_BIND: return "post_bind4";
+ case BPF_CGROUP_INET6_POST_BIND: return "post_bind6";
+ case BPF_CGROUP_INET4_GETPEERNAME: return "getpeername4";
+ case BPF_CGROUP_INET6_GETPEERNAME: return "getpeername6";
+ case BPF_CGROUP_INET4_GETSOCKNAME: return "getsockname4";
+ case BPF_CGROUP_INET6_GETSOCKNAME: return "getsockname6";
+ case BPF_CGROUP_UDP4_SENDMSG: return "sendmsg4";
+ case BPF_CGROUP_UDP6_SENDMSG: return "sendmsg6";
+ case BPF_CGROUP_SYSCTL: return "sysctl";
+ case BPF_CGROUP_UDP4_RECVMSG: return "recvmsg4";
+ case BPF_CGROUP_UDP6_RECVMSG: return "recvmsg6";
+ case BPF_CGROUP_GETSOCKOPT: return "getsockopt";
+ case BPF_CGROUP_SETSOCKOPT: return "setsockopt";
+ case BPF_TRACE_RAW_TP: return "raw_tp";
+ case BPF_TRACE_FENTRY: return "fentry";
+ case BPF_TRACE_FEXIT: return "fexit";
+ case BPF_MODIFY_RETURN: return "mod_ret";
+ case BPF_SK_REUSEPORT_SELECT: return "sk_skb_reuseport_select";
+ case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: return "sk_skb_reuseport_select_or_migrate";
+ default: return libbpf_bpf_attach_type_str(t);
+ }
+}
+
+int pathname_concat(char *buf, int buf_sz, const char *path,
+ const char *name)
+{
+ int len;
+
+ len = snprintf(buf, buf_sz, "%s/%s", path, name);
+ if (len < 0)
+ return -EINVAL;
+ if (len >= buf_sz)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index 88718ee6a438..da16e6a27ccc 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -3,9 +3,13 @@
#include <ctype.h>
#include <errno.h>
+#include <fcntl.h>
#include <string.h>
#include <unistd.h>
#include <net/if.h>
+#ifdef USE_LIBCAP
+#include <sys/capability.h>
+#endif
#include <sys/utsname.h>
#include <sys/vfs.h>
@@ -35,8 +39,18 @@ static const char * const helper_name[] = {
#undef BPF_HELPER_MAKE_ENTRY
+static bool full_mode;
+#ifdef USE_LIBCAP
+static bool run_as_unprivileged;
+#endif
+
/* Miscellaneous utility functions */
+static bool grep(const char *buffer, const char *pattern)
+{
+ return !!strstr(buffer, pattern);
+}
+
static bool check_procfs(void)
{
struct statfs st_fs;
@@ -72,13 +86,12 @@ print_bool_feature(const char *feat_name, const char *plain_name,
printf("%s is %savailable\n", plain_name, res ? "" : "NOT ");
}
-static void print_kernel_option(const char *name, const char *value)
+static void print_kernel_option(const char *name, const char *value,
+ const char *define_prefix)
{
char *endptr;
int res;
- /* No support for C-style ouptut */
-
if (json_output) {
if (!value) {
jsonw_null_field(json_wtr, name);
@@ -90,6 +103,12 @@ static void print_kernel_option(const char *name, const char *value)
jsonw_int_field(json_wtr, name, res);
else
jsonw_string_field(json_wtr, name, value);
+ } else if (define_prefix) {
+ if (value)
+ printf("#define %s%s %s\n", define_prefix,
+ name, value);
+ else
+ printf("/* %s%s is not set */\n", define_prefix, name);
} else {
if (value)
printf("%s is set to %s\n", name, value);
@@ -122,6 +141,32 @@ static void print_end_section(void)
/* Probing functions */
+static int get_vendor_id(int ifindex)
+{
+ char ifname[IF_NAMESIZE], path[64], buf[8];
+ ssize_t len;
+ int fd;
+
+ if (!if_indextoname(ifindex, ifname))
+ return -1;
+
+ snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
+
+ fd = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ return -1;
+
+ len = read(fd, buf, sizeof(buf));
+ close(fd);
+ if (len < 0)
+ return -1;
+ if (len >= (ssize_t)sizeof(buf))
+ return -1;
+ buf[len] = '\0';
+
+ return strtol(buf, NULL, 0);
+}
+
static int read_procfs(const char *path)
{
char *endptr, *line = NULL;
@@ -162,7 +207,10 @@ static void probe_unprivileged_disabled(void)
printf("bpf() syscall for unprivileged users is enabled\n");
break;
case 1:
- printf("bpf() syscall restricted to privileged users\n");
+ printf("bpf() syscall restricted to privileged users (without recovery)\n");
+ break;
+ case 2:
+ printf("bpf() syscall restricted to privileged users (admin can change)\n");
break;
case -1:
printf("Unable to retrieve required privileges for bpf() syscall\n");
@@ -307,77 +355,88 @@ static bool read_next_kernel_config_option(gzFile file, char *buf, size_t n,
return false;
}
-static void probe_kernel_image_config(void)
+static void probe_kernel_image_config(const char *define_prefix)
{
- static const char * const options[] = {
+ static const struct {
+ const char * const name;
+ bool macro_dump;
+ } options[] = {
/* Enable BPF */
- "CONFIG_BPF",
+ { "CONFIG_BPF", },
/* Enable bpf() syscall */
- "CONFIG_BPF_SYSCALL",
+ { "CONFIG_BPF_SYSCALL", },
/* Does selected architecture support eBPF JIT compiler */
- "CONFIG_HAVE_EBPF_JIT",
+ { "CONFIG_HAVE_EBPF_JIT", },
/* Compile eBPF JIT compiler */
- "CONFIG_BPF_JIT",
+ { "CONFIG_BPF_JIT", },
/* Avoid compiling eBPF interpreter (use JIT only) */
- "CONFIG_BPF_JIT_ALWAYS_ON",
+ { "CONFIG_BPF_JIT_ALWAYS_ON", },
+ /* Kernel BTF debug information available */
+ { "CONFIG_DEBUG_INFO_BTF", },
+ /* Kernel module BTF debug information available */
+ { "CONFIG_DEBUG_INFO_BTF_MODULES", },
/* cgroups */
- "CONFIG_CGROUPS",
+ { "CONFIG_CGROUPS", },
/* BPF programs attached to cgroups */
- "CONFIG_CGROUP_BPF",
+ { "CONFIG_CGROUP_BPF", },
/* bpf_get_cgroup_classid() helper */
- "CONFIG_CGROUP_NET_CLASSID",
+ { "CONFIG_CGROUP_NET_CLASSID", },
/* bpf_skb_{,ancestor_}cgroup_id() helpers */
- "CONFIG_SOCK_CGROUP_DATA",
+ { "CONFIG_SOCK_CGROUP_DATA", },
/* Tracing: attach BPF to kprobes, tracepoints, etc. */
- "CONFIG_BPF_EVENTS",
+ { "CONFIG_BPF_EVENTS", },
/* Kprobes */
- "CONFIG_KPROBE_EVENTS",
+ { "CONFIG_KPROBE_EVENTS", },
/* Uprobes */
- "CONFIG_UPROBE_EVENTS",
+ { "CONFIG_UPROBE_EVENTS", },
/* Tracepoints */
- "CONFIG_TRACING",
+ { "CONFIG_TRACING", },
/* Syscall tracepoints */
- "CONFIG_FTRACE_SYSCALLS",
+ { "CONFIG_FTRACE_SYSCALLS", },
/* bpf_override_return() helper support for selected arch */
- "CONFIG_FUNCTION_ERROR_INJECTION",
+ { "CONFIG_FUNCTION_ERROR_INJECTION", },
/* bpf_override_return() helper */
- "CONFIG_BPF_KPROBE_OVERRIDE",
+ { "CONFIG_BPF_KPROBE_OVERRIDE", },
/* Network */
- "CONFIG_NET",
+ { "CONFIG_NET", },
/* AF_XDP sockets */
- "CONFIG_XDP_SOCKETS",
+ { "CONFIG_XDP_SOCKETS", },
/* BPF_PROG_TYPE_LWT_* and related helpers */
- "CONFIG_LWTUNNEL_BPF",
+ { "CONFIG_LWTUNNEL_BPF", },
/* BPF_PROG_TYPE_SCHED_ACT, TC (traffic control) actions */
- "CONFIG_NET_ACT_BPF",
+ { "CONFIG_NET_ACT_BPF", },
/* BPF_PROG_TYPE_SCHED_CLS, TC filters */
- "CONFIG_NET_CLS_BPF",
+ { "CONFIG_NET_CLS_BPF", },
/* TC clsact qdisc */
- "CONFIG_NET_CLS_ACT",
+ { "CONFIG_NET_CLS_ACT", },
/* Ingress filtering with TC */
- "CONFIG_NET_SCH_INGRESS",
+ { "CONFIG_NET_SCH_INGRESS", },
/* bpf_skb_get_xfrm_state() helper */
- "CONFIG_XFRM",
+ { "CONFIG_XFRM", },
/* bpf_get_route_realm() helper */
- "CONFIG_IP_ROUTE_CLASSID",
+ { "CONFIG_IP_ROUTE_CLASSID", },
/* BPF_PROG_TYPE_LWT_SEG6_LOCAL and related helpers */
- "CONFIG_IPV6_SEG6_BPF",
+ { "CONFIG_IPV6_SEG6_BPF", },
/* BPF_PROG_TYPE_LIRC_MODE2 and related helpers */
- "CONFIG_BPF_LIRC_MODE2",
+ { "CONFIG_BPF_LIRC_MODE2", },
/* BPF stream parser and BPF socket maps */
- "CONFIG_BPF_STREAM_PARSER",
+ { "CONFIG_BPF_STREAM_PARSER", },
/* xt_bpf module for passing BPF programs to netfilter */
- "CONFIG_NETFILTER_XT_MATCH_BPF",
+ { "CONFIG_NETFILTER_XT_MATCH_BPF", },
/* bpfilter back-end for iptables */
- "CONFIG_BPFILTER",
+ { "CONFIG_BPFILTER", },
/* bpftilter module with "user mode helper" */
- "CONFIG_BPFILTER_UMH",
+ { "CONFIG_BPFILTER_UMH", },
/* test_bpf module for BPF tests */
- "CONFIG_TEST_BPF",
+ { "CONFIG_TEST_BPF", },
+
+ /* Misc configs useful in BPF C programs */
+ /* jiffies <-> sec conversion for bpf_jiffies64() helper */
+ { "CONFIG_HZ", true, }
};
char *values[ARRAY_SIZE(options)] = { };
struct utsname utsn;
@@ -419,28 +478,31 @@ static void probe_kernel_image_config(void)
while (read_next_kernel_config_option(file, buf, sizeof(buf), &value)) {
for (i = 0; i < ARRAY_SIZE(options); i++) {
- if (values[i] || strcmp(buf, options[i]))
+ if ((define_prefix && !options[i].macro_dump) ||
+ values[i] || strcmp(buf, options[i].name))
continue;
values[i] = strdup(value);
}
}
-end_parse:
- if (file)
- gzclose(file);
-
for (i = 0; i < ARRAY_SIZE(options); i++) {
- print_kernel_option(options[i], values[i]);
+ if (define_prefix && !options[i].macro_dump)
+ continue;
+ print_kernel_option(options[i].name, values[i], define_prefix);
free(values[i]);
}
+
+end_parse:
+ if (file)
+ gzclose(file);
}
static bool probe_bpf_syscall(const char *define_prefix)
{
bool res;
- bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0);
+ bpf_prog_load(BPF_PROG_TYPE_UNSPEC, NULL, NULL, NULL, 0, NULL);
res = (errno != ENOSYS);
print_bool_feature("have_bpf_syscall",
@@ -451,17 +513,50 @@ static bool probe_bpf_syscall(const char *define_prefix)
return res;
}
+static bool
+probe_prog_load_ifindex(enum bpf_prog_type prog_type,
+ const struct bpf_insn *insns, size_t insns_cnt,
+ char *log_buf, size_t log_buf_sz,
+ __u32 ifindex)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = log_buf,
+ .log_size = log_buf_sz,
+ .log_level = log_buf ? 1 : 0,
+ .prog_ifindex = ifindex,
+ );
+ int fd;
+
+ errno = 0;
+ fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0 && errno != EINVAL && errno != EOPNOTSUPP;
+}
+
+static bool probe_prog_type_ifindex(enum bpf_prog_type prog_type, __u32 ifindex)
+{
+ /* nfp returns -EINVAL on exit(0) with TC offload */
+ struct bpf_insn insns[2] = {
+ BPF_MOV64_IMM(BPF_REG_0, 2),
+ BPF_EXIT_INSN()
+ };
+
+ return probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns),
+ NULL, 0, ifindex);
+}
+
static void
-probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
- const char *define_prefix, __u32 ifindex)
+probe_prog_type(enum bpf_prog_type prog_type, const char *prog_type_str,
+ bool *supported_types, const char *define_prefix, __u32 ifindex)
{
char feat_name[128], plain_desc[128], define_name[128];
const char *plain_comment = "eBPF program_type ";
size_t maxlen;
bool res;
- if (ifindex)
- /* Only test offload-able program types */
+ if (ifindex) {
switch (prog_type) {
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_XDP:
@@ -470,60 +565,144 @@ probe_prog_type(enum bpf_prog_type prog_type, bool *supported_types,
return;
}
- res = bpf_probe_prog_type(prog_type, ifindex);
+ res = probe_prog_type_ifindex(prog_type, ifindex);
+ } else {
+ res = libbpf_probe_bpf_prog_type(prog_type, NULL) > 0;
+ }
+
+#ifdef USE_LIBCAP
+ /* Probe may succeed even if program load fails, for unprivileged users
+ * check that we did not fail because of insufficient permissions
+ */
+ if (run_as_unprivileged && errno == EPERM)
+ res = false;
+#endif
supported_types[prog_type] |= res;
maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
- if (strlen(prog_type_name[prog_type]) > maxlen) {
+ if (strlen(prog_type_str) > maxlen) {
p_info("program type name too long");
return;
}
- sprintf(feat_name, "have_%s_prog_type", prog_type_name[prog_type]);
- sprintf(define_name, "%s_prog_type", prog_type_name[prog_type]);
+ sprintf(feat_name, "have_%s_prog_type", prog_type_str);
+ sprintf(define_name, "%s_prog_type", prog_type_str);
uppercase(define_name, sizeof(define_name));
- sprintf(plain_desc, "%s%s", plain_comment, prog_type_name[prog_type]);
+ sprintf(plain_desc, "%s%s", plain_comment, prog_type_str);
print_bool_feature(feat_name, plain_desc, define_name, res,
define_prefix);
}
+static bool probe_map_type_ifindex(enum bpf_map_type map_type, __u32 ifindex)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
+ int key_size, value_size, max_entries;
+ int fd;
+
+ opts.map_ifindex = ifindex;
+
+ key_size = sizeof(__u32);
+ value_size = sizeof(__u32);
+ max_entries = 1;
+
+ fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries,
+ &opts);
+ if (fd >= 0)
+ close(fd);
+
+ return fd >= 0;
+}
+
static void
-probe_map_type(enum bpf_map_type map_type, const char *define_prefix,
- __u32 ifindex)
+probe_map_type(enum bpf_map_type map_type, char const *map_type_str,
+ const char *define_prefix, __u32 ifindex)
{
char feat_name[128], plain_desc[128], define_name[128];
const char *plain_comment = "eBPF map_type ";
size_t maxlen;
bool res;
- res = bpf_probe_map_type(map_type, ifindex);
+ if (ifindex) {
+ switch (map_type) {
+ case BPF_MAP_TYPE_HASH:
+ case BPF_MAP_TYPE_ARRAY:
+ break;
+ default:
+ return;
+ }
+
+ res = probe_map_type_ifindex(map_type, ifindex);
+ } else {
+ res = libbpf_probe_bpf_map_type(map_type, NULL) > 0;
+ }
+
+ /* Probe result depends on the success of map creation, no additional
+ * check required for unprivileged users
+ */
maxlen = sizeof(plain_desc) - strlen(plain_comment) - 1;
- if (strlen(map_type_name[map_type]) > maxlen) {
+ if (strlen(map_type_str) > maxlen) {
p_info("map type name too long");
return;
}
- sprintf(feat_name, "have_%s_map_type", map_type_name[map_type]);
- sprintf(define_name, "%s_map_type", map_type_name[map_type]);
+ sprintf(feat_name, "have_%s_map_type", map_type_str);
+ sprintf(define_name, "%s_map_type", map_type_str);
uppercase(define_name, sizeof(define_name));
- sprintf(plain_desc, "%s%s", plain_comment, map_type_name[map_type]);
+ sprintf(plain_desc, "%s%s", plain_comment, map_type_str);
print_bool_feature(feat_name, plain_desc, define_name, res,
define_prefix);
}
-static void
+static bool
+probe_helper_ifindex(enum bpf_func_id id, enum bpf_prog_type prog_type,
+ __u32 ifindex)
+{
+ struct bpf_insn insns[2] = {
+ BPF_EMIT_CALL(id),
+ BPF_EXIT_INSN()
+ };
+ char buf[4096] = {};
+ bool res;
+
+ probe_prog_load_ifindex(prog_type, insns, ARRAY_SIZE(insns), buf,
+ sizeof(buf), ifindex);
+ res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
+
+ switch (get_vendor_id(ifindex)) {
+ case 0x19ee: /* Netronome specific */
+ res = res && !grep(buf, "not supported by FW") &&
+ !grep(buf, "unsupported function id");
+ break;
+ default:
+ break;
+ }
+
+ return res;
+}
+
+static bool
probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
const char *define_prefix, unsigned int id,
const char *ptype_name, __u32 ifindex)
{
- bool res;
+ bool res = false;
- if (!supported_type)
- res = false;
- else
- res = bpf_probe_helper(id, prog_type, ifindex);
+ if (supported_type) {
+ if (ifindex)
+ res = probe_helper_ifindex(id, prog_type, ifindex);
+ else
+ res = libbpf_probe_bpf_helper(prog_type, id, NULL) > 0;
+#ifdef USE_LIBCAP
+ /* Probe may succeed even if program load fails, for
+ * unprivileged users check that we did not fail because of
+ * insufficient permissions
+ */
+ if (run_as_unprivileged && errno == EPERM)
+ res = false;
+#endif
+ }
if (json_output) {
if (res)
@@ -536,16 +715,18 @@ probe_helper_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
if (res)
printf("\n\t- %s", helper_name[id]);
}
+
+ return res;
}
static void
-probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
- const char *define_prefix, bool full_mode,
- __u32 ifindex)
+probe_helpers_for_progtype(enum bpf_prog_type prog_type,
+ const char *prog_type_str, bool supported_type,
+ const char *define_prefix, __u32 ifindex)
{
- const char *ptype_name = prog_type_name[prog_type];
char feat_name[128];
unsigned int id;
+ bool probe_res = false;
if (ifindex)
/* Only test helpers for offload-able program types */
@@ -558,12 +739,12 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
}
if (json_output) {
- sprintf(feat_name, "%s_available_helpers", ptype_name);
+ sprintf(feat_name, "%s_available_helpers", prog_type_str);
jsonw_name(json_wtr, feat_name);
jsonw_start_array(json_wtr);
} else if (!define_prefix) {
printf("eBPF helpers supported for program type %s:",
- ptype_name);
+ prog_type_str);
}
for (id = 1; id < ARRAY_SIZE(helper_name); id++) {
@@ -572,33 +753,139 @@ probe_helpers_for_progtype(enum bpf_prog_type prog_type, bool supported_type,
*/
switch (id) {
case BPF_FUNC_trace_printk:
+ case BPF_FUNC_trace_vprintk:
case BPF_FUNC_probe_write_user:
if (!full_mode)
continue;
/* fallthrough */
default:
- probe_helper_for_progtype(prog_type, supported_type,
- define_prefix, id, ptype_name,
+ probe_res |= probe_helper_for_progtype(prog_type, supported_type,
+ define_prefix, id, prog_type_str,
ifindex);
}
}
if (json_output)
jsonw_end_array(json_wtr);
- else if (!define_prefix)
+ else if (!define_prefix) {
printf("\n");
+ if (!probe_res) {
+ if (!supported_type)
+ printf("\tProgram type not supported\n");
+ else
+ printf("\tCould not determine which helpers are available\n");
+ }
+ }
+
+
}
static void
-probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
+probe_misc_feature(struct bpf_insn *insns, size_t len,
+ const char *define_prefix, __u32 ifindex,
+ const char *feat_name, const char *plain_name,
+ const char *define_name)
{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .prog_ifindex = ifindex,
+ );
bool res;
+ int fd;
+
+ errno = 0;
+ fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
+ insns, len, &opts);
+ res = fd >= 0 || !errno;
+
+ if (fd >= 0)
+ close(fd);
+
+ print_bool_feature(feat_name, plain_name, define_name, res,
+ define_prefix);
+}
+
+/*
+ * Probe for availability of kernel commit (5.3):
+ *
+ * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
+ */
+static void probe_large_insn_limit(const char *define_prefix, __u32 ifindex)
+{
+ struct bpf_insn insns[BPF_MAXINSNS + 1];
+ int i;
+
+ for (i = 0; i < BPF_MAXINSNS; i++)
+ insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
+ insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
- res = bpf_probe_large_insn_limit(ifindex);
- print_bool_feature("have_large_insn_limit",
+ probe_misc_feature(insns, ARRAY_SIZE(insns),
+ define_prefix, ifindex,
+ "have_large_insn_limit",
"Large program size limit",
- "LARGE_INSN_LIMIT",
- res, define_prefix);
+ "LARGE_INSN_LIMIT");
+}
+
+/*
+ * Probe for bounded loop support introduced in commit 2589726d12a1
+ * ("bpf: introduce bounded loops").
+ */
+static void
+probe_bounded_loops(const char *define_prefix, __u32 ifindex)
+{
+ struct bpf_insn insns[4] = {
+ BPF_MOV64_IMM(BPF_REG_0, 10),
+ BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, -2),
+ BPF_EXIT_INSN()
+ };
+
+ probe_misc_feature(insns, ARRAY_SIZE(insns),
+ define_prefix, ifindex,
+ "have_bounded_loops",
+ "Bounded loop support",
+ "BOUNDED_LOOPS");
+}
+
+/*
+ * Probe for the v2 instruction set extension introduced in commit 92b31a9af73b
+ * ("bpf: add BPF_J{LT,LE,SLT,SLE} instructions").
+ */
+static void
+probe_v2_isa_extension(const char *define_prefix, __u32 ifindex)
+{
+ struct bpf_insn insns[4] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLT, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN()
+ };
+
+ probe_misc_feature(insns, ARRAY_SIZE(insns),
+ define_prefix, ifindex,
+ "have_v2_isa_extension",
+ "ISA extension v2",
+ "V2_ISA_EXTENSION");
+}
+
+/*
+ * Probe for the v3 instruction set extension introduced in commit 092ed0968bb6
+ * ("bpf: verifier support JMP32").
+ */
+static void
+probe_v3_isa_extension(const char *define_prefix, __u32 ifindex)
+{
+ struct bpf_insn insns[4] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JLT, BPF_REG_0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN()
+ };
+
+ probe_misc_feature(insns, ARRAY_SIZE(insns),
+ define_prefix, ifindex,
+ "have_v3_isa_extension",
+ "ISA extension v3",
+ "V3_ISA_EXTENSION");
}
static void
@@ -607,23 +894,22 @@ section_system_config(enum probe_component target, const char *define_prefix)
switch (target) {
case COMPONENT_KERNEL:
case COMPONENT_UNSPEC:
- if (define_prefix)
- break;
-
print_start_section("system_config",
"Scanning system configuration...",
- NULL, /* define_comment never used here */
- NULL); /* define_prefix always NULL here */
- if (check_procfs()) {
- probe_unprivileged_disabled();
- probe_jit_enable();
- probe_jit_harden();
- probe_jit_kallsyms();
- probe_jit_limit();
- } else {
- p_info("/* procfs not mounted, skipping related probes */");
+ "/*** Misc kernel config items ***/",
+ define_prefix);
+ if (!define_prefix) {
+ if (check_procfs()) {
+ probe_unprivileged_disabled();
+ probe_jit_enable();
+ probe_jit_harden();
+ probe_jit_kallsyms();
+ probe_jit_limit();
+ } else {
+ p_info("/* procfs not mounted, skipping related probes */");
+ }
}
- probe_kernel_image_config();
+ probe_kernel_image_config(define_prefix);
print_end_section();
break;
default:
@@ -649,39 +935,56 @@ static void
section_program_types(bool *supported_types, const char *define_prefix,
__u32 ifindex)
{
- unsigned int i;
+ unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
+ const char *prog_type_str;
print_start_section("program_types",
"Scanning eBPF program types...",
"/*** eBPF program types ***/",
define_prefix);
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
- probe_prog_type(i, supported_types, define_prefix, ifindex);
+ while (true) {
+ prog_type++;
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (!prog_type_str)
+ break;
+
+ probe_prog_type(prog_type, prog_type_str, supported_types, define_prefix,
+ ifindex);
+ }
print_end_section();
}
static void section_map_types(const char *define_prefix, __u32 ifindex)
{
- unsigned int i;
+ unsigned int map_type = BPF_MAP_TYPE_UNSPEC;
+ const char *map_type_str;
print_start_section("map_types",
"Scanning eBPF map types...",
"/*** eBPF map types ***/",
define_prefix);
- for (i = BPF_MAP_TYPE_UNSPEC + 1; i < map_type_name_size; i++)
- probe_map_type(i, define_prefix, ifindex);
+ while (true) {
+ map_type++;
+ map_type_str = libbpf_bpf_map_type_str(map_type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (!map_type_str)
+ break;
+
+ probe_map_type(map_type, map_type_str, define_prefix, ifindex);
+ }
print_end_section();
}
static void
-section_helpers(bool *supported_types, const char *define_prefix,
- bool full_mode, __u32 ifindex)
+section_helpers(bool *supported_types, const char *define_prefix, __u32 ifindex)
{
- unsigned int i;
+ unsigned int prog_type = BPF_PROG_TYPE_UNSPEC;
+ const char *prog_type_str;
print_start_section("helpers",
"Scanning eBPF helper functions...",
@@ -703,9 +1006,18 @@ section_helpers(bool *supported_types, const char *define_prefix,
" %sBPF__PROG_TYPE_ ## prog_type ## __HELPER_ ## helper\n",
define_prefix, define_prefix, define_prefix,
define_prefix);
- for (i = BPF_PROG_TYPE_UNSPEC + 1; i < ARRAY_SIZE(prog_type_name); i++)
- probe_helpers_for_progtype(i, supported_types[i],
- define_prefix, full_mode, ifindex);
+ while (true) {
+ prog_type++;
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (!prog_type_str)
+ break;
+
+ probe_helpers_for_progtype(prog_type, prog_type_str,
+ supported_types[prog_type],
+ define_prefix,
+ ifindex);
+ }
print_end_section();
}
@@ -717,26 +1029,144 @@ static void section_misc(const char *define_prefix, __u32 ifindex)
"/*** eBPF misc features ***/",
define_prefix);
probe_large_insn_limit(define_prefix, ifindex);
+ probe_bounded_loops(define_prefix, ifindex);
+ probe_v2_isa_extension(define_prefix, ifindex);
+ probe_v3_isa_extension(define_prefix, ifindex);
print_end_section();
}
+#ifdef USE_LIBCAP
+#define capability(c) { c, false, #c }
+#define capability_msg(a, i) a[i].set ? "" : a[i].name, a[i].set ? "" : ", "
+#endif
+
+static int handle_perms(void)
+{
+#ifdef USE_LIBCAP
+ struct {
+ cap_value_t cap;
+ bool set;
+ char name[14]; /* strlen("CAP_SYS_ADMIN") */
+ } bpf_caps[] = {
+ capability(CAP_SYS_ADMIN),
+#ifdef CAP_BPF
+ capability(CAP_BPF),
+ capability(CAP_NET_ADMIN),
+ capability(CAP_PERFMON),
+#endif
+ };
+ cap_value_t cap_list[ARRAY_SIZE(bpf_caps)];
+ unsigned int i, nb_bpf_caps = 0;
+ bool cap_sys_admin_only = true;
+ cap_flag_value_t val;
+ int res = -1;
+ cap_t caps;
+
+ caps = cap_get_proc();
+ if (!caps) {
+ p_err("failed to get capabilities for process: %s",
+ strerror(errno));
+ return -1;
+ }
+
+#ifdef CAP_BPF
+ if (CAP_IS_SUPPORTED(CAP_BPF))
+ cap_sys_admin_only = false;
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(bpf_caps); i++) {
+ const char *cap_name = bpf_caps[i].name;
+ cap_value_t cap = bpf_caps[i].cap;
+
+ if (cap_get_flag(caps, cap, CAP_EFFECTIVE, &val)) {
+ p_err("bug: failed to retrieve %s status: %s", cap_name,
+ strerror(errno));
+ goto exit_free;
+ }
+
+ if (val == CAP_SET) {
+ bpf_caps[i].set = true;
+ cap_list[nb_bpf_caps++] = cap;
+ }
+
+ if (cap_sys_admin_only)
+ /* System does not know about CAP_BPF, meaning that
+ * CAP_SYS_ADMIN is the only capability required. We
+ * just checked it, break.
+ */
+ break;
+ }
+
+ if ((run_as_unprivileged && !nb_bpf_caps) ||
+ (!run_as_unprivileged && nb_bpf_caps == ARRAY_SIZE(bpf_caps)) ||
+ (!run_as_unprivileged && cap_sys_admin_only && nb_bpf_caps)) {
+ /* We are all good, exit now */
+ res = 0;
+ goto exit_free;
+ }
+
+ if (!run_as_unprivileged) {
+ if (cap_sys_admin_only)
+ p_err("missing %s, required for full feature probing; run as root or use 'unprivileged'",
+ bpf_caps[0].name);
+ else
+ p_err("missing %s%s%s%s%s%s%s%srequired for full feature probing; run as root or use 'unprivileged'",
+ capability_msg(bpf_caps, 0),
+#ifdef CAP_BPF
+ capability_msg(bpf_caps, 1),
+ capability_msg(bpf_caps, 2),
+ capability_msg(bpf_caps, 3)
+#else
+ "", "", "", "", "", ""
+#endif /* CAP_BPF */
+ );
+ goto exit_free;
+ }
+
+ /* if (run_as_unprivileged && nb_bpf_caps > 0), drop capabilities. */
+ if (cap_set_flag(caps, CAP_EFFECTIVE, nb_bpf_caps, cap_list,
+ CAP_CLEAR)) {
+ p_err("bug: failed to clear capabilities: %s", strerror(errno));
+ goto exit_free;
+ }
+
+ if (cap_set_proc(caps)) {
+ p_err("failed to drop capabilities: %s", strerror(errno));
+ goto exit_free;
+ }
+
+ res = 0;
+
+exit_free:
+ if (cap_free(caps) && !res) {
+ p_err("failed to clear storage object for capabilities: %s",
+ strerror(errno));
+ res = -1;
+ }
+
+ return res;
+#else
+ /* Detection assumes user has specific privileges.
+ * We do not use libcap so let's approximate, and restrict usage to
+ * root user only.
+ */
+ if (geteuid()) {
+ p_err("full feature probing requires root privileges");
+ return -1;
+ }
+
+ return 0;
+#endif /* USE_LIBCAP */
+}
+
static int do_probe(int argc, char **argv)
{
enum probe_component target = COMPONENT_UNSPEC;
const char *define_prefix = NULL;
bool supported_types[128] = {};
- bool full_mode = false;
__u32 ifindex = 0;
char *ifname;
- /* Detection assumes user has sufficient privileges (CAP_SYS_ADMIN).
- * Let's approximate, and restrict usage to root user only.
- */
- if (geteuid()) {
- p_err("please run this command as root user");
- return -1;
- }
-
set_max_rlimit();
while (argc) {
@@ -785,6 +1215,14 @@ static int do_probe(int argc, char **argv)
if (!REQ_ARGS(1))
return -1;
define_prefix = GET_ARG();
+ } else if (is_prefix(*argv, "unprivileged")) {
+#ifdef USE_LIBCAP
+ run_as_unprivileged = true;
+ NEXT_ARG();
+#else
+ p_err("unprivileged run not supported, recompile bpftool with libcap");
+ return -1;
+#endif
} else {
p_err("expected no more arguments, 'kernel', 'dev', 'macros' or 'prefix', got: '%s'?",
*argv);
@@ -792,6 +1230,12 @@ static int do_probe(int argc, char **argv)
}
}
+ /* Full feature detection requires specific privileges.
+ * Let's approximate, and warn if user is not root.
+ */
+ if (handle_perms())
+ return -1;
+
if (json_output) {
define_prefix = NULL;
jsonw_start_object(json_wtr);
@@ -803,7 +1247,7 @@ static int do_probe(int argc, char **argv)
goto exit_close_json;
section_program_types(supported_types, define_prefix, ifindex);
section_map_types(define_prefix, ifindex);
- section_helpers(supported_types, define_prefix, full_mode, ifindex);
+ section_helpers(supported_types, define_prefix, ifindex);
section_misc(define_prefix, ifindex);
exit_close_json:
@@ -814,6 +1258,58 @@ exit_close_json:
return 0;
}
+static const char *get_helper_name(unsigned int id)
+{
+ if (id >= ARRAY_SIZE(helper_name))
+ return NULL;
+
+ return helper_name[id];
+}
+
+static int do_list_builtins(int argc, char **argv)
+{
+ const char *(*get_name)(unsigned int id);
+ unsigned int id = 0;
+
+ if (argc < 1)
+ usage();
+
+ if (is_prefix(*argv, "prog_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_prog_type_str;
+ } else if (is_prefix(*argv, "map_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_map_type_str;
+ } else if (is_prefix(*argv, "attach_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_attach_type_str;
+ } else if (is_prefix(*argv, "link_types")) {
+ get_name = (const char *(*)(unsigned int))libbpf_bpf_link_type_str;
+ } else if (is_prefix(*argv, "helpers")) {
+ get_name = get_helper_name;
+ } else {
+ p_err("expected 'prog_types', 'map_types', 'attach_types', 'link_types' or 'helpers', got: %s", *argv);
+ return -1;
+ }
+
+ if (json_output)
+ jsonw_start_array(json_wtr); /* root array */
+
+ while (true) {
+ const char *name;
+
+ name = get_name(id++);
+ if (!name)
+ break;
+ if (json_output)
+ jsonw_string(json_wtr, name);
+ else
+ printf("%s\n", name);
+ }
+
+ if (json_output)
+ jsonw_end_array(json_wtr); /* root array */
+
+ return 0;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -822,19 +1318,23 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s probe [COMPONENT] [full] [macros [prefix PREFIX]]\n"
- " %s %s help\n"
+ "Usage: %1$s %2$s probe [COMPONENT] [full] [unprivileged] [macros [prefix PREFIX]]\n"
+ " %1$s %2$s list_builtins GROUP\n"
+ " %1$s %2$s help\n"
"\n"
" COMPONENT := { kernel | dev NAME }\n"
+ " GROUP := { prog_types | map_types | attach_types | link_types | helpers }\n"
+ " " HELP_SPEC_OPTIONS " }\n"
"",
- bin_name, argv[-2], bin_name, argv[-2]);
+ bin_name, argv[-2]);
return 0;
}
static const struct cmd cmds[] = {
- { "probe", do_probe },
- { "help", do_help },
+ { "probe", do_probe },
+ { "list_builtins", do_list_builtins },
+ { "help", do_help },
{ 0 }
};
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index f8113b3646f5..2883660d6b67 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -14,17 +14,15 @@
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include <bpf/libbpf_internal.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
-#include <unistd.h>
#include <bpf/btf.h>
-#include "bpf/libbpf_internal.h"
#include "json_writer.h"
#include "main.h"
-
#define MAX_OBJ_NAME_LEN 64
static void sanitize_identifier(char *name)
@@ -36,6 +34,11 @@ static void sanitize_identifier(char *name)
name[i] = '_';
}
+static bool str_has_prefix(const char *str, const char *prefix)
+{
+ return strncmp(str, prefix, strlen(prefix)) == 0;
+}
+
static bool str_has_suffix(const char *str, const char *suffix)
{
size_t i, n1 = strlen(str), n2 = strlen(suffix);
@@ -61,35 +64,59 @@ static void get_obj_name(char *name, const char *file)
sanitize_identifier(name);
}
-static void get_header_guard(char *guard, const char *obj_name)
+static void get_header_guard(char *guard, const char *obj_name, const char *suffix)
{
int i;
- sprintf(guard, "__%s_SKEL_H__", obj_name);
+ sprintf(guard, "__%s_%s__", obj_name, suffix);
for (i = 0; guard[i]; i++)
guard[i] = toupper(guard[i]);
}
-static const char *get_map_ident(const struct bpf_map *map)
+static bool get_map_ident(const struct bpf_map *map, char *buf, size_t buf_sz)
{
+ static const char *sfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
const char *name = bpf_map__name(map);
+ int i, n;
- if (!bpf_map__is_internal(map))
- return name;
-
- if (str_has_suffix(name, ".data"))
- return "data";
- else if (str_has_suffix(name, ".rodata"))
- return "rodata";
- else if (str_has_suffix(name, ".bss"))
- return "bss";
- else if (str_has_suffix(name, ".kconfig"))
- return "kconfig";
- else
- return NULL;
+ if (!bpf_map__is_internal(map)) {
+ snprintf(buf, buf_sz, "%s", name);
+ return true;
+ }
+
+ for (i = 0, n = ARRAY_SIZE(sfxs); i < n; i++) {
+ const char *sfx = sfxs[i], *p;
+
+ p = strstr(name, sfx);
+ if (p) {
+ snprintf(buf, buf_sz, "%s", p + 1);
+ sanitize_identifier(buf);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool get_datasec_ident(const char *sec_name, char *buf, size_t buf_sz)
+{
+ static const char *pfxs[] = { ".data", ".rodata", ".bss", ".kconfig" };
+ int i, n;
+
+ for (i = 0, n = ARRAY_SIZE(pfxs); i < n; i++) {
+ const char *pfx = pfxs[i];
+
+ if (str_has_prefix(sec_name, pfx)) {
+ snprintf(buf, buf_sz, "%s", sec_name + 1);
+ sanitize_identifier(buf);
+ return true;
+ }
+ }
+
+ return false;
}
-static void codegen_btf_dump_printf(void *ct, const char *fmt, va_list args)
+static void codegen_btf_dump_printf(void *ctx, const char *fmt, va_list args)
{
vprintf(fmt, args);
}
@@ -103,20 +130,15 @@ static int codegen_datasec_def(struct bpf_object *obj,
const char *sec_name = btf__name_by_offset(btf, sec->name_off);
const struct btf_var_secinfo *sec_var = btf_var_secinfos(sec);
int i, err, off = 0, pad_cnt = 0, vlen = btf_vlen(sec);
- const char *sec_ident;
- char var_ident[256];
-
- if (strcmp(sec_name, ".data") == 0)
- sec_ident = "data";
- else if (strcmp(sec_name, ".bss") == 0)
- sec_ident = "bss";
- else if (strcmp(sec_name, ".rodata") == 0)
- sec_ident = "rodata";
- else if (strcmp(sec_name, ".kconfig") == 0)
- sec_ident = "kconfig";
- else
+ char var_ident[256], sec_ident[256];
+ bool strip_mods = false;
+
+ if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
return 0;
+ if (strcmp(sec_name, ".kconfig") != 0)
+ strip_mods = true;
+
printf(" struct %s__%s {\n", obj_name, sec_ident);
for (i = 0; i < vlen; i++, sec_var++) {
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
@@ -124,16 +146,14 @@ static int codegen_datasec_def(struct bpf_object *obj,
DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
.field_name = var_ident,
.indent_level = 2,
+ .strip_mods = strip_mods,
);
int need_off = sec_var->offset, align_off, align;
__u32 var_type_id = var->type;
- const struct btf_type *t;
- t = btf__type_by_id(btf, var_type_id);
- while (btf_is_mod(t)) {
- var_type_id = t->type;
- t = btf__type_by_id(btf, var_type_id);
- }
+ /* static variables are not exposed through BPF skeleton */
+ if (btf_var(var)->linkage == BTF_VAR_STATIC)
+ continue;
if (off > need_off) {
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
@@ -147,6 +167,20 @@ static int codegen_datasec_def(struct bpf_object *obj,
var_name, align);
return -EINVAL;
}
+ /* Assume 32-bit architectures when generating data section
+ * struct memory layout. Given bpftool can't know which target
+ * host architecture it's emitting skeleton for, we need to be
+ * conservative and assume 32-bit one to ensure enough padding
+ * bytes are generated for pointer and long types. This will
+ * still work correctly for 64-bit architectures, because in
+ * the worst case we'll generate unnecessary padding field,
+ * which on 64-bit architectures is not strictly necessary and
+ * would be handled by natural 8-byte alignment. But it still
+ * will be a correct memory layout, based on recorded offsets
+ * in BTF.
+ */
+ if (align > 4)
+ align = 4;
align_off = (off + align - 1) / align * align;
if (align_off != need_off) {
@@ -175,33 +209,173 @@ static int codegen_datasec_def(struct bpf_object *obj,
return 0;
}
+static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
+{
+ int n = btf__type_cnt(btf), i;
+ char sec_ident[256];
+
+ for (i = 1; i < n; i++) {
+ const struct btf_type *t = btf__type_by_id(btf, i);
+ const char *name;
+
+ if (!btf_is_datasec(t))
+ continue;
+
+ name = btf__str_by_offset(btf, t->name_off);
+ if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
+ continue;
+
+ if (strcmp(sec_ident, map_ident) == 0)
+ return t;
+ }
+ return NULL;
+}
+
+static bool is_internal_mmapable_map(const struct bpf_map *map, char *buf, size_t sz)
+{
+ if (!bpf_map__is_internal(map) || !(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
+ return false;
+
+ if (!get_map_ident(map, buf, sz))
+ return false;
+
+ return true;
+}
+
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
- int n = btf__get_nr_types(btf);
struct btf_dump *d;
- int i, err = 0;
+ struct bpf_map *map;
+ const struct btf_type *sec;
+ char map_ident[256];
+ int err = 0;
- d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf);
- if (IS_ERR(d))
- return PTR_ERR(d);
+ d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
+ if (!d)
+ return -errno;
- for (i = 1; i <= n; i++) {
- const struct btf_type *t = btf__type_by_id(btf, i);
+ bpf_object__for_each_map(map, obj) {
+ /* only generate definitions for memory-mapped internal maps */
+ if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
+ continue;
- if (!btf_is_datasec(t))
+ sec = find_type_for_map(btf, map_ident);
+
+ /* In some cases (e.g., sections like .rodata.cst16 containing
+ * compiler allocated string constants only) there will be
+ * special internal maps with no corresponding DATASEC BTF
+ * type. In such case, generate empty structs for each such
+ * map. It will still be memory-mapped and its contents
+ * accessible from user-space through BPF skeleton.
+ */
+ if (!sec) {
+ printf(" struct %s__%s {\n", obj_name, map_ident);
+ printf(" } *%s;\n", map_ident);
+ } else {
+ err = codegen_datasec_def(obj, btf, d, sec, obj_name);
+ if (err)
+ goto out;
+ }
+ }
+
+
+out:
+ btf_dump__free(d);
+ return err;
+}
+
+static bool btf_is_ptr_to_func_proto(const struct btf *btf,
+ const struct btf_type *v)
+{
+ return btf_is_ptr(v) && btf_is_func_proto(btf__type_by_id(btf, v->type));
+}
+
+static int codegen_subskel_datasecs(struct bpf_object *obj, const char *obj_name)
+{
+ struct btf *btf = bpf_object__btf(obj);
+ struct btf_dump *d;
+ struct bpf_map *map;
+ const struct btf_type *sec, *var;
+ const struct btf_var_secinfo *sec_var;
+ int i, err = 0, vlen;
+ char map_ident[256], sec_ident[256];
+ bool strip_mods = false, needs_typeof = false;
+ const char *sec_name, *var_name;
+ __u32 var_type_id;
+
+ d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
+ if (!d)
+ return -errno;
+
+ bpf_object__for_each_map(map, obj) {
+ /* only generate definitions for memory-mapped internal maps */
+ if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
continue;
- err = codegen_datasec_def(obj, btf, d, t, obj_name);
- if (err)
- goto out;
+ sec = find_type_for_map(btf, map_ident);
+ if (!sec)
+ continue;
+
+ sec_name = btf__name_by_offset(btf, sec->name_off);
+ if (!get_datasec_ident(sec_name, sec_ident, sizeof(sec_ident)))
+ continue;
+
+ strip_mods = strcmp(sec_name, ".kconfig") != 0;
+ printf(" struct %s__%s {\n", obj_name, sec_ident);
+
+ sec_var = btf_var_secinfos(sec);
+ vlen = btf_vlen(sec);
+ for (i = 0; i < vlen; i++, sec_var++) {
+ DECLARE_LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts,
+ .indent_level = 2,
+ .strip_mods = strip_mods,
+ /* we'll print the name separately */
+ .field_name = "",
+ );
+
+ var = btf__type_by_id(btf, sec_var->type);
+ var_name = btf__name_by_offset(btf, var->name_off);
+ var_type_id = var->type;
+
+ /* static variables are not exposed through BPF skeleton */
+ if (btf_var(var)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ /* The datasec member has KIND_VAR but we want the
+ * underlying type of the variable (e.g. KIND_INT).
+ */
+ var = skip_mods_and_typedefs(btf, var->type, NULL);
+
+ printf("\t\t");
+ /* Func and array members require special handling.
+ * Instead of producing `typename *var`, they produce
+ * `typeof(typename) *var`. This allows us to keep a
+ * similar syntax where the identifier is just prefixed
+ * by *, allowing us to ignore C declaration minutiae.
+ */
+ needs_typeof = btf_is_array(var) || btf_is_ptr_to_func_proto(btf, var);
+ if (needs_typeof)
+ printf("typeof(");
+
+ err = btf_dump__emit_type_decl(d, var_type_id, &opts);
+ if (err)
+ goto out;
+
+ if (needs_typeof)
+ printf(")");
+
+ printf(" *%s;\n", var_name);
+ }
+ printf(" } %s;\n", sec_ident);
}
+
out:
btf_dump__free(d);
return err;
}
-static int codegen(const char *template, ...)
+static void codegen(const char *template, ...)
{
const char *src, *end;
int skip_tabs = 0, n;
@@ -212,7 +386,7 @@ static int codegen(const char *template, ...)
n = strlen(template);
s = malloc(n + 1);
if (!s)
- return -ENOMEM;
+ exit(-1);
src = template;
dst = s;
@@ -223,9 +397,10 @@ static int codegen(const char *template, ...)
} else if (c == '\n') {
break;
} else {
- p_err("unrecognized character at pos %td in template '%s'",
- src - template - 1, template);
- return -EINVAL;
+ p_err("unrecognized character at pos %td in template '%s': '%c'",
+ src - template - 1, template, c);
+ free(s);
+ exit(-1);
}
}
@@ -235,7 +410,8 @@ static int codegen(const char *template, ...)
if (*src != '\t') {
p_err("not enough tabs at pos %td in template '%s'",
src - template - 1, template);
- return -EINVAL;
+ free(s);
+ exit(-1);
}
}
/* trim trailing whitespace */
@@ -256,19 +432,488 @@ static int codegen(const char *template, ...)
va_end(args);
free(s);
- return n;
+}
+
+static void print_hex(const char *data, int data_sz)
+{
+ int i, len;
+
+ for (i = 0, len = 0; i < data_sz; i++) {
+ int w = data[i] ? 4 : 2;
+
+ len += w;
+ if (len > 78) {
+ printf("\\\n");
+ len = w;
+ }
+ if (!data[i])
+ printf("\\0");
+ else
+ printf("\\x%02x", (unsigned char)data[i]);
+ }
+}
+
+static size_t bpf_map_mmap_sz(const struct bpf_map *map)
+{
+ long page_sz = sysconf(_SC_PAGE_SIZE);
+ size_t map_sz;
+
+ map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
+ map_sz = roundup(map_sz, page_sz);
+ return map_sz;
+}
+
+/* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
+static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
+{
+ struct btf *btf = bpf_object__btf(obj);
+ struct bpf_map *map;
+ struct btf_var_secinfo *sec_var;
+ int i, vlen;
+ const struct btf_type *sec;
+ char map_ident[256], var_ident[256];
+
+ if (!btf)
+ return;
+
+ codegen("\
+ \n\
+ __attribute__((unused)) static void \n\
+ %1$s__assert(struct %1$s *s __attribute__((unused))) \n\
+ { \n\
+ #ifdef __cplusplus \n\
+ #define _Static_assert static_assert \n\
+ #endif \n\
+ ", obj_name);
+
+ bpf_object__for_each_map(map, obj) {
+ if (!is_internal_mmapable_map(map, map_ident, sizeof(map_ident)))
+ continue;
+
+ sec = find_type_for_map(btf, map_ident);
+ if (!sec) {
+ /* best effort, couldn't find the type for this map */
+ continue;
+ }
+
+ sec_var = btf_var_secinfos(sec);
+ vlen = btf_vlen(sec);
+
+ for (i = 0; i < vlen; i++, sec_var++) {
+ const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
+ const char *var_name = btf__name_by_offset(btf, var->name_off);
+ long var_size;
+
+ /* static variables are not exposed through BPF skeleton */
+ if (btf_var(var)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ var_size = btf__resolve_size(btf, var->type);
+ if (var_size < 0)
+ continue;
+
+ var_ident[0] = '\0';
+ strncat(var_ident, var_name, sizeof(var_ident) - 1);
+ sanitize_identifier(var_ident);
+
+ printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
+ map_ident, var_ident, var_size, var_ident);
+ }
+ }
+ codegen("\
+ \n\
+ #ifdef __cplusplus \n\
+ #undef _Static_assert \n\
+ #endif \n\
+ } \n\
+ ");
+}
+
+static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
+{
+ struct bpf_program *prog;
+
+ bpf_object__for_each_program(prog, obj) {
+ const char *tp_name;
+
+ codegen("\
+ \n\
+ \n\
+ static inline int \n\
+ %1$s__%2$s__attach(struct %1$s *skel) \n\
+ { \n\
+ int prog_fd = skel->progs.%2$s.prog_fd; \n\
+ ", obj_name, bpf_program__name(prog));
+
+ switch (bpf_program__type(prog)) {
+ case BPF_PROG_TYPE_RAW_TRACEPOINT:
+ tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
+ printf("\tint fd = skel_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
+ break;
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+ if (bpf_program__expected_attach_type(prog) == BPF_TRACE_ITER)
+ printf("\tint fd = skel_link_create(prog_fd, 0, BPF_TRACE_ITER);\n");
+ else
+ printf("\tint fd = skel_raw_tracepoint_open(NULL, prog_fd);\n");
+ break;
+ default:
+ printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
+ break;
+ }
+ codegen("\
+ \n\
+ \n\
+ if (fd > 0) \n\
+ skel->links.%1$s_fd = fd; \n\
+ return fd; \n\
+ } \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ \n\
+ static inline int \n\
+ %1$s__attach(struct %1$s *skel) \n\
+ { \n\
+ int ret = 0; \n\
+ \n\
+ ", obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
+ ", obj_name, bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ return ret < 0 ? ret : 0; \n\
+ } \n\
+ \n\
+ static inline void \n\
+ %1$s__detach(struct %1$s *skel) \n\
+ { \n\
+ ", obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ skel_closenz(skel->links.%1$s_fd); \n\
+ ", bpf_program__name(prog));
+ }
+
+ codegen("\
+ \n\
+ } \n\
+ ");
+}
+
+static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
+{
+ struct bpf_program *prog;
+ struct bpf_map *map;
+ char ident[256];
+
+ codegen("\
+ \n\
+ static void \n\
+ %1$s__destroy(struct %1$s *skel) \n\
+ { \n\
+ if (!skel) \n\
+ return; \n\
+ %1$s__detach(skel); \n\
+ ",
+ obj_name);
+
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ skel_closenz(skel->progs.%1$s.prog_fd); \n\
+ ", bpf_program__name(prog));
+ }
+
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ if (bpf_map__is_internal(map) &&
+ (bpf_map__map_flags(map) & BPF_F_MMAPABLE))
+ printf("\tskel_free_map_data(skel->%1$s, skel->maps.%1$s.initial_value, %2$zd);\n",
+ ident, bpf_map_mmap_sz(map));
+ codegen("\
+ \n\
+ skel_closenz(skel->maps.%1$s.map_fd); \n\
+ ", ident);
+ }
+ codegen("\
+ \n\
+ skel_free(skel); \n\
+ } \n\
+ ",
+ obj_name);
+}
+
+static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
+{
+ DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
+ struct bpf_map *map;
+ char ident[256];
+ int err = 0;
+
+ err = bpf_object__gen_loader(obj, &opts);
+ if (err)
+ return err;
+
+ err = bpf_object__load(obj);
+ if (err) {
+ p_err("failed to load object file");
+ goto out;
+ }
+ /* If there was no error during load then gen_loader_opts
+ * are populated with the loader program.
+ */
+
+ /* finish generating 'struct skel' */
+ codegen("\
+ \n\
+ }; \n\
+ ", obj_name);
+
+
+ codegen_attach_detach(obj, obj_name);
+
+ codegen_destroy(obj, obj_name);
+
+ codegen("\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open(void) \n\
+ { \n\
+ struct %1$s *skel; \n\
+ \n\
+ skel = skel_alloc(sizeof(*skel)); \n\
+ if (!skel) \n\
+ goto cleanup; \n\
+ skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
+ ",
+ obj_name, opts.data_sz);
+ bpf_object__for_each_map(map, obj) {
+ const void *mmap_data = NULL;
+ size_t mmap_size = 0;
+
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
+
+ codegen("\
+ \n\
+ skel->%1$s = skel_prep_map_data((void *)\"\\ \n\
+ ", ident);
+ mmap_data = bpf_map__initial_value(map, &mmap_size);
+ print_hex(mmap_data, mmap_size);
+ codegen("\
+ \n\
+ \", %1$zd, %2$zd); \n\
+ if (!skel->%3$s) \n\
+ goto cleanup; \n\
+ skel->maps.%3$s.initial_value = (__u64) (long) skel->%3$s;\n\
+ ", bpf_map_mmap_sz(map), mmap_size, ident);
+ }
+ codegen("\
+ \n\
+ return skel; \n\
+ cleanup: \n\
+ %1$s__destroy(skel); \n\
+ return NULL; \n\
+ } \n\
+ \n\
+ static inline int \n\
+ %1$s__load(struct %1$s *skel) \n\
+ { \n\
+ struct bpf_load_and_run_opts opts = {}; \n\
+ int err; \n\
+ \n\
+ opts.ctx = (struct bpf_loader_ctx *)skel; \n\
+ opts.data_sz = %2$d; \n\
+ opts.data = (void *)\"\\ \n\
+ ",
+ obj_name, opts.data_sz);
+ print_hex(opts.data, opts.data_sz);
+ codegen("\
+ \n\
+ \"; \n\
+ ");
+
+ codegen("\
+ \n\
+ opts.insns_sz = %d; \n\
+ opts.insns = (void *)\"\\ \n\
+ ",
+ opts.insns_sz);
+ print_hex(opts.insns, opts.insns_sz);
+ codegen("\
+ \n\
+ \"; \n\
+ err = bpf_load_and_run(&opts); \n\
+ if (err < 0) \n\
+ return err; \n\
+ ", obj_name);
+ bpf_object__for_each_map(map, obj) {
+ const char *mmap_flags;
+
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
+
+ if (bpf_map__map_flags(map) & BPF_F_RDONLY_PROG)
+ mmap_flags = "PROT_READ";
+ else
+ mmap_flags = "PROT_READ | PROT_WRITE";
+
+ codegen("\
+ \n\
+ skel->%1$s = skel_finalize_map_data(&skel->maps.%1$s.initial_value, \n\
+ %2$zd, %3$s, skel->maps.%1$s.map_fd);\n\
+ if (!skel->%1$s) \n\
+ return -ENOMEM; \n\
+ ",
+ ident, bpf_map_mmap_sz(map), mmap_flags);
+ }
+ codegen("\
+ \n\
+ return 0; \n\
+ } \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open_and_load(void) \n\
+ { \n\
+ struct %1$s *skel; \n\
+ \n\
+ skel = %1$s__open(); \n\
+ if (!skel) \n\
+ return NULL; \n\
+ if (%1$s__load(skel)) { \n\
+ %1$s__destroy(skel); \n\
+ return NULL; \n\
+ } \n\
+ return skel; \n\
+ } \n\
+ \n\
+ ", obj_name);
+
+ codegen_asserts(obj, obj_name);
+
+ codegen("\
+ \n\
+ \n\
+ #endif /* %s */ \n\
+ ",
+ header_guard);
+ err = 0;
+out:
+ return err;
+}
+
+static void
+codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped)
+{
+ struct bpf_map *map;
+ char ident[256];
+ size_t i;
+
+ if (!map_cnt)
+ return;
+
+ codegen("\
+ \n\
+ \n\
+ /* maps */ \n\
+ s->map_cnt = %zu; \n\
+ s->map_skel_sz = sizeof(*s->maps); \n\
+ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
+ if (!s->maps) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ ",
+ map_cnt
+ );
+ i = 0;
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+
+ codegen("\
+ \n\
+ \n\
+ s->maps[%zu].name = \"%s\"; \n\
+ s->maps[%zu].map = &obj->maps.%s; \n\
+ ",
+ i, bpf_map__name(map), i, ident);
+ /* memory-mapped internal maps */
+ if (mmaped && is_internal_mmapable_map(map, ident, sizeof(ident))) {
+ printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
+ i, ident);
+ }
+ i++;
+ }
+}
+
+static void
+codegen_progs_skeleton(struct bpf_object *obj, size_t prog_cnt, bool populate_links)
+{
+ struct bpf_program *prog;
+ int i;
+
+ if (!prog_cnt)
+ return;
+
+ codegen("\
+ \n\
+ \n\
+ /* programs */ \n\
+ s->prog_cnt = %zu; \n\
+ s->prog_skel_sz = sizeof(*s->progs); \n\
+ s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);\n\
+ if (!s->progs) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ ",
+ prog_cnt
+ );
+ i = 0;
+ bpf_object__for_each_program(prog, obj) {
+ codegen("\
+ \n\
+ \n\
+ s->progs[%1$zu].name = \"%2$s\"; \n\
+ s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
+ ",
+ i, bpf_program__name(prog));
+
+ if (populate_links) {
+ codegen("\
+ \n\
+ s->progs[%1$zu].link = &obj->links.%2$s;\n\
+ ",
+ i, bpf_program__name(prog));
+ }
+ i++;
+ }
}
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
- size_t i, map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
+ size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
- char obj_name[MAX_OBJ_NAME_LEN], *obj_data;
+ char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
struct bpf_object *obj = NULL;
- const char *file, *ident;
+ const char *file;
+ char ident[256];
struct bpf_program *prog;
- int fd, len, err = -1;
+ int fd, err = -1;
struct bpf_map *map;
struct btf *btf;
struct stat st;
@@ -279,6 +924,28 @@ static int do_skeleton(int argc, char **argv)
}
file = GET_ARG();
+ while (argc) {
+ if (!REQ_ARGS(2))
+ return -1;
+
+ if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+
+ if (obj_name[0] != '\0') {
+ p_err("object name already specified");
+ return -1;
+ }
+
+ strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
+ obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
+ } else {
+ p_err("unknown arg %s", *argv);
+ return -1;
+ }
+
+ NEXT_ARG();
+ }
+
if (argc) {
p_err("extra unknown arguments");
return -1;
@@ -301,18 +968,24 @@ static int do_skeleton(int argc, char **argv)
p_err("failed to mmap() %s: %s", file, strerror(errno));
goto out;
}
- get_obj_name(obj_name, file);
+ if (obj_name[0] == '\0')
+ get_obj_name(obj_name, file);
opts.object_name = obj_name;
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ opts.kernel_log_level = 1 + 2 + 4;
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
- if (IS_ERR(obj)) {
- obj = NULL;
- p_err("failed to open BPF object file: %ld", PTR_ERR(obj));
+ if (!obj) {
+ char err_buf[256];
+
+ err = -errno;
+ libbpf_strerror(err, err_buf, sizeof(err_buf));
+ p_err("failed to open BPF object file: %s", err_buf);
goto out;
}
bpf_object__for_each_map(map, obj) {
- ident = get_map_ident(map);
- if (!ident) {
+ if (!get_map_ident(map, ident, sizeof(ident))) {
p_err("ignoring unrecognized internal map '%s'...",
bpf_map__name(map));
continue;
@@ -323,15 +996,32 @@ static int do_skeleton(int argc, char **argv)
prog_cnt++;
}
- get_header_guard(header_guard, obj_name);
- codegen("\
+ get_header_guard(header_guard, obj_name, "SKEL_H");
+ if (use_loader) {
+ codegen("\
+ \n\
+ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
+ /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
+ #ifndef %2$s \n\
+ #define %2$s \n\
+ \n\
+ #include <bpf/skel_internal.h> \n\
+ \n\
+ struct %1$s { \n\
+ struct bpf_loader_ctx ctx; \n\
+ ",
+ obj_name, header_guard
+ );
+ } else {
+ codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
\n\
- /* THIS FILE IS AUTOGENERATED! */ \n\
+ /* THIS FILE IS AUTOGENERATED BY BPFTOOL! */ \n\
#ifndef %2$s \n\
#define %2$s \n\
\n\
+ #include <errno.h> \n\
#include <stdlib.h> \n\
#include <bpf/libbpf.h> \n\
\n\
@@ -340,15 +1030,18 @@ static int do_skeleton(int argc, char **argv)
struct bpf_object *obj; \n\
",
obj_name, header_guard
- );
+ );
+ }
if (map_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_map(map, obj) {
- ident = get_map_ident(map);
- if (!ident)
+ if (!get_map_ident(map, ident, sizeof(ident)))
continue;
- printf("\t\tstruct bpf_map *%s;\n", ident);
+ if (use_loader)
+ printf("\t\tstruct bpf_map_desc %s;\n", ident);
+ else
+ printf("\t\tstruct bpf_map *%s;\n", ident);
}
printf("\t} maps;\n");
}
@@ -356,14 +1049,22 @@ static int do_skeleton(int argc, char **argv)
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
- printf("\t\tstruct bpf_program *%s;\n",
- bpf_program__name(prog));
+ if (use_loader)
+ printf("\t\tstruct bpf_prog_desc %s;\n",
+ bpf_program__name(prog));
+ else
+ printf("\t\tstruct bpf_program *%s;\n",
+ bpf_program__name(prog));
}
printf("\t} progs;\n");
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
- printf("\t\tstruct bpf_link *%s;\n",
- bpf_program__name(prog));
+ if (use_loader)
+ printf("\t\tint %s_fd;\n",
+ bpf_program__name(prog));
+ else
+ printf("\t\tstruct bpf_link *%s;\n",
+ bpf_program__name(prog));
}
printf("\t} links;\n");
}
@@ -374,9 +1075,23 @@ static int do_skeleton(int argc, char **argv)
if (err)
goto out;
}
+ if (use_loader) {
+ err = gen_trace(obj, obj_name, header_guard);
+ goto out;
+ }
codegen("\
\n\
+ \n\
+ #ifdef __cplusplus \n\
+ static inline struct %1$s *open(const struct bpf_object_open_opts *opts = nullptr);\n\
+ static inline struct %1$s *open_and_load(); \n\
+ static inline int load(struct %1$s *skel); \n\
+ static inline int attach(struct %1$s *skel); \n\
+ static inline void detach(struct %1$s *skel); \n\
+ static inline void destroy(struct %1$s *skel); \n\
+ static inline const void *elf_bytes(size_t *sz); \n\
+ #endif /* __cplusplus */ \n\
}; \n\
\n\
static void \n\
@@ -396,18 +1111,26 @@ static int do_skeleton(int argc, char **argv)
%1$s__open_opts(const struct bpf_object_open_opts *opts) \n\
{ \n\
struct %1$s *obj; \n\
+ int err; \n\
\n\
- obj = (typeof(obj))calloc(1, sizeof(*obj)); \n\
- if (!obj) \n\
+ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
+ if (!obj) { \n\
+ errno = ENOMEM; \n\
return NULL; \n\
- if (%1$s__create_skeleton(obj)) \n\
- goto err; \n\
- if (bpf_object__open_skeleton(obj->skeleton, opts)) \n\
- goto err; \n\
+ } \n\
+ \n\
+ err = %1$s__create_skeleton(obj); \n\
+ if (err) \n\
+ goto err_out; \n\
+ \n\
+ err = bpf_object__open_skeleton(obj->skeleton, opts);\n\
+ if (err) \n\
+ goto err_out; \n\
\n\
return obj; \n\
- err: \n\
+ err_out: \n\
%1$s__destroy(obj); \n\
+ errno = -err; \n\
return NULL; \n\
} \n\
\n\
@@ -427,12 +1150,15 @@ static int do_skeleton(int argc, char **argv)
%1$s__open_and_load(void) \n\
{ \n\
struct %1$s *obj; \n\
+ int err; \n\
\n\
obj = %1$s__open(); \n\
if (!obj) \n\
return NULL; \n\
- if (%1$s__load(obj)) { \n\
+ err = %1$s__load(obj); \n\
+ if (err) { \n\
%1$s__destroy(obj); \n\
+ errno = -err; \n\
return NULL; \n\
} \n\
return obj; \n\
@@ -447,7 +1173,7 @@ static int do_skeleton(int argc, char **argv)
static inline void \n\
%1$s__detach(struct %1$s *obj) \n\
{ \n\
- return bpf_object__detach_skeleton(obj->skeleton); \n\
+ bpf_object__detach_skeleton(obj->skeleton); \n\
} \n\
",
obj_name
@@ -456,15 +1182,19 @@ static int do_skeleton(int argc, char **argv)
codegen("\
\n\
\n\
+ static inline const void *%1$s__elf_bytes(size_t *sz); \n\
+ \n\
static inline int \n\
%1$s__create_skeleton(struct %1$s *obj) \n\
{ \n\
struct bpf_object_skeleton *s; \n\
+ int err; \n\
\n\
- s = (typeof(s))calloc(1, sizeof(*s)); \n\
- if (!s) \n\
- return -1; \n\
- obj->skeleton = s; \n\
+ s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
+ if (!s) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
\n\
s->sz = sizeof(*s); \n\
s->name = \"%1$s\"; \n\
@@ -472,104 +1202,363 @@ static int do_skeleton(int argc, char **argv)
",
obj_name
);
- if (map_cnt) {
- codegen("\
- \n\
+
+ codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/);
+ codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/);
+
+ codegen("\
+ \n\
\n\
- /* maps */ \n\
- s->map_cnt = %zu; \n\
- s->map_skel_sz = sizeof(*s->maps); \n\
- s->maps = (typeof(s->maps))calloc(s->map_cnt, s->map_skel_sz);\n\
- if (!s->maps) \n\
- goto err; \n\
- ",
- map_cnt
- );
- i = 0;
- bpf_object__for_each_map(map, obj) {
- ident = get_map_ident(map);
+ s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\
+ \n\
+ obj->skeleton = s; \n\
+ return 0; \n\
+ err: \n\
+ bpf_object__destroy_skeleton(s); \n\
+ return err; \n\
+ } \n\
+ \n\
+ static inline const void *%2$s__elf_bytes(size_t *sz) \n\
+ { \n\
+ *sz = %1$d; \n\
+ return (const void *)\"\\ \n\
+ "
+ , file_sz, obj_name);
- if (!ident)
- continue;
+ /* embed contents of BPF object file */
+ print_hex(obj_data, file_sz);
- codegen("\
- \n\
+ codegen("\
+ \n\
+ \"; \n\
+ } \n\
\n\
- s->maps[%zu].name = \"%s\"; \n\
- s->maps[%zu].map = &obj->maps.%s; \n\
- ",
- i, bpf_map__name(map), i, ident);
- /* memory-mapped internal maps */
- if (bpf_map__is_internal(map) &&
- (bpf_map__def(map)->map_flags & BPF_F_MMAPABLE)) {
- printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
- i, ident);
+ #ifdef __cplusplus \n\
+ struct %1$s *%1$s::open(const struct bpf_object_open_opts *opts) { return %1$s__open_opts(opts); }\n\
+ struct %1$s *%1$s::open_and_load() { return %1$s__open_and_load(); } \n\
+ int %1$s::load(struct %1$s *skel) { return %1$s__load(skel); } \n\
+ int %1$s::attach(struct %1$s *skel) { return %1$s__attach(skel); } \n\
+ void %1$s::detach(struct %1$s *skel) { %1$s__detach(skel); } \n\
+ void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); } \n\
+ const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
+ #endif /* __cplusplus */ \n\
+ \n\
+ ",
+ obj_name);
+
+ codegen_asserts(obj, obj_name);
+
+ codegen("\
+ \n\
+ \n\
+ #endif /* %1$s */ \n\
+ ",
+ header_guard);
+ err = 0;
+out:
+ bpf_object__close(obj);
+ if (obj_data)
+ munmap(obj_data, mmap_sz);
+ close(fd);
+ return err;
+}
+
+/* Subskeletons are like skeletons, except they don't own the bpf_object,
+ * associated maps, links, etc. Instead, they know about the existence of
+ * variables, maps, programs and are able to find their locations
+ * _at runtime_ from an already loaded bpf_object.
+ *
+ * This allows for library-like BPF objects to have userspace counterparts
+ * with access to their own items without having to know anything about the
+ * final BPF object that the library was linked into.
+ */
+static int do_subskeleton(int argc, char **argv)
+{
+ char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SUBSKEL_H__")];
+ size_t i, len, file_sz, map_cnt = 0, prog_cnt = 0, mmap_sz, var_cnt = 0, var_idx = 0;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data;
+ struct bpf_object *obj = NULL;
+ const char *file, *var_name;
+ char ident[256];
+ int fd, err = -1, map_type_id;
+ const struct bpf_map *map;
+ struct bpf_program *prog;
+ struct btf *btf;
+ const struct btf_type *map_type, *var_type;
+ const struct btf_var_secinfo *var;
+ struct stat st;
+
+ if (!REQ_ARGS(1)) {
+ usage();
+ return -1;
+ }
+ file = GET_ARG();
+
+ while (argc) {
+ if (!REQ_ARGS(2))
+ return -1;
+
+ if (is_prefix(*argv, "name")) {
+ NEXT_ARG();
+
+ if (obj_name[0] != '\0') {
+ p_err("object name already specified");
+ return -1;
}
- i++;
+
+ strncpy(obj_name, *argv, MAX_OBJ_NAME_LEN - 1);
+ obj_name[MAX_OBJ_NAME_LEN - 1] = '\0';
+ } else {
+ p_err("unknown arg %s", *argv);
+ return -1;
}
+
+ NEXT_ARG();
}
- if (prog_cnt) {
- codegen("\
- \n\
+
+ if (argc) {
+ p_err("extra unknown arguments");
+ return -1;
+ }
+
+ if (use_loader) {
+ p_err("cannot use loader for subskeletons");
+ return -1;
+ }
+
+ if (stat(file, &st)) {
+ p_err("failed to stat() %s: %s", file, strerror(errno));
+ return -1;
+ }
+ file_sz = st.st_size;
+ mmap_sz = roundup(file_sz, sysconf(_SC_PAGE_SIZE));
+ fd = open(file, O_RDONLY);
+ if (fd < 0) {
+ p_err("failed to open() %s: %s", file, strerror(errno));
+ return -1;
+ }
+ obj_data = mmap(NULL, mmap_sz, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (obj_data == MAP_FAILED) {
+ obj_data = NULL;
+ p_err("failed to mmap() %s: %s", file, strerror(errno));
+ goto out;
+ }
+ if (obj_name[0] == '\0')
+ get_obj_name(obj_name, file);
+
+ /* The empty object name allows us to use bpf_map__name and produce
+ * ELF section names out of it. (".data" instead of "obj.data")
+ */
+ opts.object_name = "";
+ obj = bpf_object__open_mem(obj_data, file_sz, &opts);
+ if (!obj) {
+ char err_buf[256];
+
+ libbpf_strerror(errno, err_buf, sizeof(err_buf));
+ p_err("failed to open BPF object file: %s", err_buf);
+ obj = NULL;
+ goto out;
+ }
+
+ btf = bpf_object__btf(obj);
+ if (!btf) {
+ err = -1;
+ p_err("need btf type information for %s", obj_name);
+ goto out;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ prog_cnt++;
+ }
+
+ /* First, count how many variables we have to find.
+ * We need this in advance so the subskel can allocate the right
+ * amount of storage.
+ */
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+
+ /* Also count all maps that have a name */
+ map_cnt++;
+
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
+
+ map_type_id = bpf_map__btf_value_type_id(map);
+ if (map_type_id <= 0) {
+ err = map_type_id;
+ goto out;
+ }
+ map_type = btf__type_by_id(btf, map_type_id);
+
+ var = btf_var_secinfos(map_type);
+ len = btf_vlen(map_type);
+ for (i = 0; i < len; i++, var++) {
+ var_type = btf__type_by_id(btf, var->type);
+
+ if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ var_cnt++;
+ }
+ }
+
+ get_header_guard(header_guard, obj_name, "SUBSKEL_H");
+ codegen("\
+ \n\
+ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
\n\
- /* programs */ \n\
- s->prog_cnt = %zu; \n\
- s->prog_skel_sz = sizeof(*s->progs); \n\
- s->progs = (typeof(s->progs))calloc(s->prog_cnt, s->prog_skel_sz);\n\
- if (!s->progs) \n\
- goto err; \n\
- ",
- prog_cnt
- );
- i = 0;
- bpf_object__for_each_program(prog, obj) {
- codegen("\
- \n\
+ /* THIS FILE IS AUTOGENERATED! */ \n\
+ #ifndef %2$s \n\
+ #define %2$s \n\
\n\
- s->progs[%1$zu].name = \"%2$s\"; \n\
- s->progs[%1$zu].prog = &obj->progs.%2$s;\n\
- s->progs[%1$zu].link = &obj->links.%2$s;\n\
- ",
- i, bpf_program__name(prog));
- i++;
+ #include <errno.h> \n\
+ #include <stdlib.h> \n\
+ #include <bpf/libbpf.h> \n\
+ \n\
+ struct %1$s { \n\
+ struct bpf_object *obj; \n\
+ struct bpf_object_subskeleton *subskel; \n\
+ ", obj_name, header_guard);
+
+ if (map_cnt) {
+ printf("\tstruct {\n");
+ bpf_object__for_each_map(map, obj) {
+ if (!get_map_ident(map, ident, sizeof(ident)))
+ continue;
+ printf("\t\tstruct bpf_map *%s;\n", ident);
}
+ printf("\t} maps;\n");
}
+
+ if (prog_cnt) {
+ printf("\tstruct {\n");
+ bpf_object__for_each_program(prog, obj) {
+ printf("\t\tstruct bpf_program *%s;\n",
+ bpf_program__name(prog));
+ }
+ printf("\t} progs;\n");
+ }
+
+ err = codegen_subskel_datasecs(obj, obj_name);
+ if (err)
+ goto out;
+
+ /* emit code that will allocate enough storage for all symbols */
codegen("\
\n\
\n\
- s->data_sz = %d; \n\
- s->data = (void *)\"\\ \n\
+ #ifdef __cplusplus \n\
+ static inline struct %1$s *open(const struct bpf_object *src);\n\
+ static inline void destroy(struct %1$s *skel); \n\
+ #endif /* __cplusplus */ \n\
+ }; \n\
+ \n\
+ static inline void \n\
+ %1$s__destroy(struct %1$s *skel) \n\
+ { \n\
+ if (!skel) \n\
+ return; \n\
+ if (skel->subskel) \n\
+ bpf_object__destroy_subskeleton(skel->subskel);\n\
+ free(skel); \n\
+ } \n\
+ \n\
+ static inline struct %1$s * \n\
+ %1$s__open(const struct bpf_object *src) \n\
+ { \n\
+ struct %1$s *obj; \n\
+ struct bpf_object_subskeleton *s; \n\
+ int err; \n\
+ \n\
+ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\
+ if (!obj) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ s = (struct bpf_object_subskeleton *)calloc(1, sizeof(*s));\n\
+ if (!s) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
+ s->sz = sizeof(*s); \n\
+ s->obj = src; \n\
+ s->var_skel_sz = sizeof(*s->vars); \n\
+ obj->subskel = s; \n\
+ \n\
+ /* vars */ \n\
+ s->var_cnt = %2$d; \n\
+ s->vars = (struct bpf_var_skeleton *)calloc(%2$d, sizeof(*s->vars));\n\
+ if (!s->vars) { \n\
+ err = -ENOMEM; \n\
+ goto err; \n\
+ } \n\
",
- file_sz);
+ obj_name, var_cnt
+ );
- /* embed contents of BPF object file */
- for (i = 0, len = 0; i < file_sz; i++) {
- int w = obj_data[i] ? 4 : 2;
+ /* walk through each symbol and emit the runtime representation */
+ bpf_object__for_each_map(map, obj) {
+ if (!is_internal_mmapable_map(map, ident, sizeof(ident)))
+ continue;
- len += w;
- if (len > 78) {
- printf("\\\n");
- len = w;
+ map_type_id = bpf_map__btf_value_type_id(map);
+ if (map_type_id <= 0)
+ /* skip over internal maps with no type*/
+ continue;
+
+ map_type = btf__type_by_id(btf, map_type_id);
+ var = btf_var_secinfos(map_type);
+ len = btf_vlen(map_type);
+ for (i = 0; i < len; i++, var++) {
+ var_type = btf__type_by_id(btf, var->type);
+ var_name = btf__name_by_offset(btf, var_type->name_off);
+
+ if (btf_var(var_type)->linkage == BTF_VAR_STATIC)
+ continue;
+
+ /* Note that we use the dot prefix in .data as the
+ * field access operator i.e. maps%s becomes maps.data
+ */
+ codegen("\
+ \n\
+ \n\
+ s->vars[%3$d].name = \"%1$s\"; \n\
+ s->vars[%3$d].map = &obj->maps.%2$s; \n\
+ s->vars[%3$d].addr = (void **) &obj->%2$s.%1$s;\n\
+ ", var_name, ident, var_idx);
+
+ var_idx++;
}
- if (!obj_data[i])
- printf("\\0");
- else
- printf("\\x%02x", (unsigned char)obj_data[i]);
}
+ codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/);
+ codegen_progs_skeleton(obj, prog_cnt, false /*links*/);
+
codegen("\
\n\
- \"; \n\
\n\
- return 0; \n\
+ err = bpf_object__open_subskeleton(s); \n\
+ if (err) \n\
+ goto err; \n\
+ \n\
+ return obj; \n\
err: \n\
- bpf_object__destroy_skeleton(s); \n\
- return -1; \n\
+ %1$s__destroy(obj); \n\
+ errno = -err; \n\
+ return NULL; \n\
} \n\
\n\
- #endif /* %s */ \n\
+ #ifdef __cplusplus \n\
+ struct %1$s *%1$s::open(const struct bpf_object *src) { return %1$s__open(src); }\n\
+ void %1$s::destroy(struct %1$s *skel) { %1$s__destroy(skel); }\n\
+ #endif /* __cplusplus */ \n\
+ \n\
+ #endif /* %2$s */ \n\
",
- header_guard);
+ obj_name, header_guard);
err = 0;
out:
bpf_object__close(obj);
@@ -579,6 +1568,47 @@ out:
return err;
}
+static int do_object(int argc, char **argv)
+{
+ struct bpf_linker *linker;
+ const char *output_file, *file;
+ int err = 0;
+
+ if (!REQ_ARGS(2)) {
+ usage();
+ return -1;
+ }
+
+ output_file = GET_ARG();
+
+ linker = bpf_linker__new(output_file, NULL);
+ if (!linker) {
+ p_err("failed to create BPF linker instance");
+ return -1;
+ }
+
+ while (argc) {
+ file = GET_ARG();
+
+ err = bpf_linker__add_file(linker, file, NULL);
+ if (err) {
+ p_err("failed to link '%s': %s (%d)", file, strerror(errno), errno);
+ goto out;
+ }
+ }
+
+ err = bpf_linker__finalize(linker);
+ if (err) {
+ p_err("failed to finalize ELF file: %s (%d)", strerror(errno), errno);
+ goto out;
+ }
+
+ err = 0;
+out:
+ bpf_linker__free(linker);
+ return err;
+}
+
static int do_help(int argc, char **argv)
{
if (json_output) {
@@ -587,19 +1617,713 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %1$s gen skeleton FILE\n"
- " %1$s gen help\n"
+ "Usage: %1$s %2$s object OUTPUT_FILE INPUT_FILE [INPUT_FILE...]\n"
+ " %1$s %2$s skeleton FILE [name OBJECT_NAME]\n"
+ " %1$s %2$s subskeleton FILE [name OBJECT_NAME]\n"
+ " %1$s %2$s min_core_btf INPUT OUTPUT OBJECT [OBJECT...]\n"
+ " %1$s %2$s help\n"
"\n"
- " " HELP_SPEC_OPTIONS "\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-L|--use-loader} }\n"
"",
- bin_name);
+ bin_name, "gen");
+
+ return 0;
+}
+
+static int btf_save_raw(const struct btf *btf, const char *path)
+{
+ const void *data;
+ FILE *f = NULL;
+ __u32 data_sz;
+ int err = 0;
+
+ data = btf__raw_data(btf, &data_sz);
+ if (!data)
+ return -ENOMEM;
+
+ f = fopen(path, "wb");
+ if (!f)
+ return -errno;
+
+ if (fwrite(data, 1, data_sz, f) != data_sz)
+ err = -errno;
+
+ fclose(f);
+ return err;
+}
+
+struct btfgen_info {
+ struct btf *src_btf;
+ struct btf *marked_btf; /* btf structure used to mark used types */
+};
+
+static size_t btfgen_hash_fn(long key, void *ctx)
+{
+ return key;
+}
+
+static bool btfgen_equal_fn(long k1, long k2, void *ctx)
+{
+ return k1 == k2;
+}
+
+static void btfgen_free_info(struct btfgen_info *info)
+{
+ if (!info)
+ return;
+
+ btf__free(info->src_btf);
+ btf__free(info->marked_btf);
+
+ free(info);
+}
+
+static struct btfgen_info *
+btfgen_new_info(const char *targ_btf_path)
+{
+ struct btfgen_info *info;
+ int err;
+
+ info = calloc(1, sizeof(*info));
+ if (!info)
+ return NULL;
+
+ info->src_btf = btf__parse(targ_btf_path, NULL);
+ if (!info->src_btf) {
+ err = -errno;
+ p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
+ goto err_out;
+ }
+
+ info->marked_btf = btf__parse(targ_btf_path, NULL);
+ if (!info->marked_btf) {
+ err = -errno;
+ p_err("failed parsing '%s' BTF file: %s", targ_btf_path, strerror(errno));
+ goto err_out;
+ }
+
+ return info;
+
+err_out:
+ btfgen_free_info(info);
+ errno = -err;
+ return NULL;
+}
+
+#define MARKED UINT32_MAX
+
+static void btfgen_mark_member(struct btfgen_info *info, int type_id, int idx)
+{
+ const struct btf_type *t = btf__type_by_id(info->marked_btf, type_id);
+ struct btf_member *m = btf_members(t) + idx;
+
+ m->name_off = MARKED;
+}
+
+static int
+btfgen_mark_type(struct btfgen_info *info, unsigned int type_id, bool follow_pointers)
+{
+ const struct btf_type *btf_type = btf__type_by_id(info->src_btf, type_id);
+ struct btf_type *cloned_type;
+ struct btf_param *param;
+ struct btf_array *array;
+ int err, i;
+
+ if (type_id == 0)
+ return 0;
+
+ /* mark type on cloned BTF as used */
+ cloned_type = (struct btf_type *) btf__type_by_id(info->marked_btf, type_id);
+ cloned_type->name_off = MARKED;
+
+ /* recursively mark other types needed by it */
+ switch (btf_kind(btf_type)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ break;
+ case BTF_KIND_PTR:
+ if (follow_pointers) {
+ err = btfgen_mark_type(info, btf_type->type, follow_pointers);
+ if (err)
+ return err;
+ }
+ break;
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_TYPEDEF:
+ err = btfgen_mark_type(info, btf_type->type, follow_pointers);
+ if (err)
+ return err;
+ break;
+ case BTF_KIND_ARRAY:
+ array = btf_array(btf_type);
+
+ /* mark array type */
+ err = btfgen_mark_type(info, array->type, follow_pointers);
+ /* mark array's index type */
+ err = err ? : btfgen_mark_type(info, array->index_type, follow_pointers);
+ if (err)
+ return err;
+ break;
+ case BTF_KIND_FUNC_PROTO:
+ /* mark ret type */
+ err = btfgen_mark_type(info, btf_type->type, follow_pointers);
+ if (err)
+ return err;
+
+ /* mark parameters types */
+ param = btf_params(btf_type);
+ for (i = 0; i < btf_vlen(btf_type); i++) {
+ err = btfgen_mark_type(info, param->type, follow_pointers);
+ if (err)
+ return err;
+ param++;
+ }
+ break;
+ /* tells if some other type needs to be handled */
+ default:
+ p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int btfgen_record_field_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+ struct btf *btf = info->src_btf;
+ const struct btf_type *btf_type;
+ struct btf_member *btf_member;
+ struct btf_array *array;
+ unsigned int type_id = targ_spec->root_type_id;
+ int idx, err;
+
+ /* mark root type */
+ btf_type = btf__type_by_id(btf, type_id);
+ err = btfgen_mark_type(info, type_id, false);
+ if (err)
+ return err;
+
+ /* mark types for complex types (arrays, unions, structures) */
+ for (int i = 1; i < targ_spec->raw_len; i++) {
+ /* skip typedefs and mods */
+ while (btf_is_mod(btf_type) || btf_is_typedef(btf_type)) {
+ type_id = btf_type->type;
+ btf_type = btf__type_by_id(btf, type_id);
+ }
+
+ switch (btf_kind(btf_type)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ idx = targ_spec->raw_spec[i];
+ btf_member = btf_members(btf_type) + idx;
+
+ /* mark member */
+ btfgen_mark_member(info, type_id, idx);
+
+ /* mark member's type */
+ type_id = btf_member->type;
+ btf_type = btf__type_by_id(btf, type_id);
+ err = btfgen_mark_type(info, type_id, false);
+ if (err)
+ return err;
+ break;
+ case BTF_KIND_ARRAY:
+ array = btf_array(btf_type);
+ type_id = array->type;
+ btf_type = btf__type_by_id(btf, type_id);
+ break;
+ default:
+ p_err("unsupported kind: %s (%d)",
+ btf_kind_str(btf_type), btf_type->type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
+ * this function does not rely on the target spec for inferring members, but
+ * uses the associated BTF.
+ *
+ * The `behind_ptr` argument is used to stop marking of composite types reached
+ * through a pointer. This way, we can keep BTF size in check while providing
+ * reasonable match semantics.
+ */
+static int btfgen_mark_type_match(struct btfgen_info *info, __u32 type_id, bool behind_ptr)
+{
+ const struct btf_type *btf_type;
+ struct btf *btf = info->src_btf;
+ struct btf_type *cloned_type;
+ int i, err;
+
+ if (type_id == 0)
+ return 0;
+
+ btf_type = btf__type_by_id(btf, type_id);
+ /* mark type on cloned BTF as used */
+ cloned_type = (struct btf_type *)btf__type_by_id(info->marked_btf, type_id);
+ cloned_type->name_off = MARKED;
+
+ switch (btf_kind(btf_type)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *m = btf_members(btf_type);
+ __u16 vlen = btf_vlen(btf_type);
+
+ if (behind_ptr)
+ break;
+
+ for (i = 0; i < vlen; i++, m++) {
+ /* mark member */
+ btfgen_mark_member(info, type_id, i);
+
+ /* mark member's type */
+ err = btfgen_mark_type_match(info, m->type, false);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_CONST:
+ case BTF_KIND_FWD:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ return btfgen_mark_type_match(info, btf_type->type, behind_ptr);
+ case BTF_KIND_PTR:
+ return btfgen_mark_type_match(info, btf_type->type, true);
+ case BTF_KIND_ARRAY: {
+ struct btf_array *array;
+
+ array = btf_array(btf_type);
+ /* mark array type */
+ err = btfgen_mark_type_match(info, array->type, false);
+ /* mark array's index type */
+ err = err ? : btfgen_mark_type_match(info, array->index_type, false);
+ if (err)
+ return err;
+ break;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ __u16 vlen = btf_vlen(btf_type);
+ struct btf_param *param;
+
+ /* mark ret type */
+ err = btfgen_mark_type_match(info, btf_type->type, false);
+ if (err)
+ return err;
+
+ /* mark parameters types */
+ param = btf_params(btf_type);
+ for (i = 0; i < vlen; i++) {
+ err = btfgen_mark_type_match(info, param->type, false);
+ if (err)
+ return err;
+ param++;
+ }
+ break;
+ }
+ /* tells if some other type needs to be handled */
+ default:
+ p_err("unsupported kind: %s (%d)", btf_kind_str(btf_type), type_id);
+ return -EINVAL;
+ }
return 0;
}
+/* Mark types, members, and member types. Compared to btfgen_record_field_relo,
+ * this function does not rely on the target spec for inferring members, but
+ * uses the associated BTF.
+ */
+static int btfgen_record_type_match_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+ return btfgen_mark_type_match(info, targ_spec->root_type_id, false);
+}
+
+static int btfgen_record_type_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+ return btfgen_mark_type(info, targ_spec->root_type_id, true);
+}
+
+static int btfgen_record_enumval_relo(struct btfgen_info *info, struct bpf_core_spec *targ_spec)
+{
+ return btfgen_mark_type(info, targ_spec->root_type_id, false);
+}
+
+static int btfgen_record_reloc(struct btfgen_info *info, struct bpf_core_spec *res)
+{
+ switch (res->relo_kind) {
+ case BPF_CORE_FIELD_BYTE_OFFSET:
+ case BPF_CORE_FIELD_BYTE_SIZE:
+ case BPF_CORE_FIELD_EXISTS:
+ case BPF_CORE_FIELD_SIGNED:
+ case BPF_CORE_FIELD_LSHIFT_U64:
+ case BPF_CORE_FIELD_RSHIFT_U64:
+ return btfgen_record_field_relo(info, res);
+ case BPF_CORE_TYPE_ID_LOCAL: /* BPF_CORE_TYPE_ID_LOCAL doesn't require kernel BTF */
+ return 0;
+ case BPF_CORE_TYPE_ID_TARGET:
+ case BPF_CORE_TYPE_EXISTS:
+ case BPF_CORE_TYPE_SIZE:
+ return btfgen_record_type_relo(info, res);
+ case BPF_CORE_TYPE_MATCHES:
+ return btfgen_record_type_match_relo(info, res);
+ case BPF_CORE_ENUMVAL_EXISTS:
+ case BPF_CORE_ENUMVAL_VALUE:
+ return btfgen_record_enumval_relo(info, res);
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct bpf_core_cand_list *
+btfgen_find_cands(const struct btf *local_btf, const struct btf *targ_btf, __u32 local_id)
+{
+ const struct btf_type *local_type;
+ struct bpf_core_cand_list *cands = NULL;
+ struct bpf_core_cand local_cand = {};
+ size_t local_essent_len;
+ const char *local_name;
+ int err;
+
+ local_cand.btf = local_btf;
+ local_cand.id = local_id;
+
+ local_type = btf__type_by_id(local_btf, local_id);
+ if (!local_type) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ local_name = btf__name_by_offset(local_btf, local_type->name_off);
+ if (!local_name) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ local_essent_len = bpf_core_essential_name_len(local_name);
+
+ cands = calloc(1, sizeof(*cands));
+ if (!cands)
+ return NULL;
+
+ err = bpf_core_add_cands(&local_cand, local_essent_len, targ_btf, "vmlinux", 1, cands);
+ if (err)
+ goto err_out;
+
+ return cands;
+
+err_out:
+ bpf_core_free_cands(cands);
+ errno = -err;
+ return NULL;
+}
+
+/* Record relocation information for a single BPF object */
+static int btfgen_record_obj(struct btfgen_info *info, const char *obj_path)
+{
+ const struct btf_ext_info_sec *sec;
+ const struct bpf_core_relo *relo;
+ const struct btf_ext_info *seg;
+ struct hashmap_entry *entry;
+ struct hashmap *cand_cache = NULL;
+ struct btf_ext *btf_ext = NULL;
+ unsigned int relo_idx;
+ struct btf *btf = NULL;
+ size_t i;
+ int err;
+
+ btf = btf__parse(obj_path, &btf_ext);
+ if (!btf) {
+ err = -errno;
+ p_err("failed to parse BPF object '%s': %s", obj_path, strerror(errno));
+ return err;
+ }
+
+ if (!btf_ext) {
+ p_err("failed to parse BPF object '%s': section %s not found",
+ obj_path, BTF_EXT_ELF_SEC);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (btf_ext->core_relo_info.len == 0) {
+ err = 0;
+ goto out;
+ }
+
+ cand_cache = hashmap__new(btfgen_hash_fn, btfgen_equal_fn, NULL);
+ if (IS_ERR(cand_cache)) {
+ err = PTR_ERR(cand_cache);
+ goto out;
+ }
+
+ seg = &btf_ext->core_relo_info;
+ for_each_btf_ext_sec(seg, sec) {
+ for_each_btf_ext_rec(seg, sec, relo_idx, relo) {
+ struct bpf_core_spec specs_scratch[3] = {};
+ struct bpf_core_relo_res targ_res = {};
+ struct bpf_core_cand_list *cands = NULL;
+ const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off);
+
+ if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
+ !hashmap__find(cand_cache, relo->type_id, &cands)) {
+ cands = btfgen_find_cands(btf, info->src_btf, relo->type_id);
+ if (!cands) {
+ err = -errno;
+ goto out;
+ }
+
+ err = hashmap__set(cand_cache, relo->type_id, cands,
+ NULL, NULL);
+ if (err)
+ goto out;
+ }
+
+ err = bpf_core_calc_relo_insn(sec_name, relo, relo_idx, btf, cands,
+ specs_scratch, &targ_res);
+ if (err)
+ goto out;
+
+ /* specs_scratch[2] is the target spec */
+ err = btfgen_record_reloc(info, &specs_scratch[2]);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ btf__free(btf);
+ btf_ext__free(btf_ext);
+
+ if (!IS_ERR_OR_NULL(cand_cache)) {
+ hashmap__for_each_entry(cand_cache, entry, i) {
+ bpf_core_free_cands(entry->pvalue);
+ }
+ hashmap__free(cand_cache);
+ }
+
+ return err;
+}
+
+static int btfgen_remap_id(__u32 *type_id, void *ctx)
+{
+ unsigned int *ids = ctx;
+
+ *type_id = ids[*type_id];
+
+ return 0;
+}
+
+/* Generate BTF from relocation information previously recorded */
+static struct btf *btfgen_get_btf(struct btfgen_info *info)
+{
+ struct btf *btf_new = NULL;
+ unsigned int *ids = NULL;
+ unsigned int i, n = btf__type_cnt(info->marked_btf);
+ int err = 0;
+
+ btf_new = btf__new_empty();
+ if (!btf_new) {
+ err = -errno;
+ goto err_out;
+ }
+
+ ids = calloc(n, sizeof(*ids));
+ if (!ids) {
+ err = -errno;
+ goto err_out;
+ }
+
+ /* first pass: add all marked types to btf_new and add their new ids to the ids map */
+ for (i = 1; i < n; i++) {
+ const struct btf_type *cloned_type, *type;
+ const char *name;
+ int new_id;
+
+ cloned_type = btf__type_by_id(info->marked_btf, i);
+
+ if (cloned_type->name_off != MARKED)
+ continue;
+
+ type = btf__type_by_id(info->src_btf, i);
+
+ /* add members for struct and union */
+ if (btf_is_composite(type)) {
+ struct btf_member *cloned_m, *m;
+ unsigned short vlen;
+ int idx_src;
+
+ name = btf__str_by_offset(info->src_btf, type->name_off);
+
+ if (btf_is_struct(type))
+ err = btf__add_struct(btf_new, name, type->size);
+ else
+ err = btf__add_union(btf_new, name, type->size);
+
+ if (err < 0)
+ goto err_out;
+ new_id = err;
+
+ cloned_m = btf_members(cloned_type);
+ m = btf_members(type);
+ vlen = btf_vlen(cloned_type);
+ for (idx_src = 0; idx_src < vlen; idx_src++, cloned_m++, m++) {
+ /* add only members that are marked as used */
+ if (cloned_m->name_off != MARKED)
+ continue;
+
+ name = btf__str_by_offset(info->src_btf, m->name_off);
+ err = btf__add_field(btf_new, name, m->type,
+ btf_member_bit_offset(cloned_type, idx_src),
+ btf_member_bitfield_size(cloned_type, idx_src));
+ if (err < 0)
+ goto err_out;
+ }
+ } else {
+ err = btf__add_type(btf_new, info->src_btf, type);
+ if (err < 0)
+ goto err_out;
+ new_id = err;
+ }
+
+ /* add ID mapping */
+ ids[i] = new_id;
+ }
+
+ /* second pass: fix up type ids */
+ for (i = 1; i < btf__type_cnt(btf_new); i++) {
+ struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i);
+
+ err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids);
+ if (err)
+ goto err_out;
+ }
+
+ free(ids);
+ return btf_new;
+
+err_out:
+ btf__free(btf_new);
+ free(ids);
+ errno = -err;
+ return NULL;
+}
+
+/* Create minimized BTF file for a set of BPF objects.
+ *
+ * The BTFGen algorithm is divided in two main parts: (1) collect the
+ * BTF types that are involved in relocations and (2) generate the BTF
+ * object using the collected types.
+ *
+ * In order to collect the types involved in the relocations, we parse
+ * the BTF and BTF.ext sections of the BPF objects and use
+ * bpf_core_calc_relo_insn() to get the target specification, this
+ * indicates how the types and fields are used in a relocation.
+ *
+ * Types are recorded in different ways according to the kind of the
+ * relocation. For field-based relocations only the members that are
+ * actually used are saved in order to reduce the size of the generated
+ * BTF file. For type-based relocations empty struct / unions are
+ * generated and for enum-based relocations the whole type is saved.
+ *
+ * The second part of the algorithm generates the BTF object. It creates
+ * an empty BTF object and fills it with the types recorded in the
+ * previous step. This function takes care of only adding the structure
+ * and union members that were marked as used and it also fixes up the
+ * type IDs on the generated BTF object.
+ */
+static int minimize_btf(const char *src_btf, const char *dst_btf, const char *objspaths[])
+{
+ struct btfgen_info *info;
+ struct btf *btf_new = NULL;
+ int err, i;
+
+ info = btfgen_new_info(src_btf);
+ if (!info) {
+ err = -errno;
+ p_err("failed to allocate info structure: %s", strerror(errno));
+ goto out;
+ }
+
+ for (i = 0; objspaths[i] != NULL; i++) {
+ err = btfgen_record_obj(info, objspaths[i]);
+ if (err) {
+ p_err("error recording relocations for %s: %s", objspaths[i],
+ strerror(errno));
+ goto out;
+ }
+ }
+
+ btf_new = btfgen_get_btf(info);
+ if (!btf_new) {
+ err = -errno;
+ p_err("error generating BTF: %s", strerror(errno));
+ goto out;
+ }
+
+ err = btf_save_raw(btf_new, dst_btf);
+ if (err) {
+ p_err("error saving btf file: %s", strerror(errno));
+ goto out;
+ }
+
+out:
+ btf__free(btf_new);
+ btfgen_free_info(info);
+
+ return err;
+}
+
+static int do_min_core_btf(int argc, char **argv)
+{
+ const char *input, *output, **objs;
+ int i, err;
+
+ if (!REQ_ARGS(3)) {
+ usage();
+ return -1;
+ }
+
+ input = GET_ARG();
+ output = GET_ARG();
+
+ objs = (const char **) calloc(argc + 1, sizeof(*objs));
+ if (!objs) {
+ p_err("failed to allocate array for object names");
+ return -ENOMEM;
+ }
+
+ i = 0;
+ while (argc)
+ objs[i++] = GET_ARG();
+
+ err = minimize_btf(input, output, objs);
+ free(objs);
+ return err;
+}
+
static const struct cmd cmds[] = {
- { "skeleton", do_skeleton },
- { "help", do_help },
+ { "object", do_object },
+ { "skeleton", do_skeleton },
+ { "subskeleton", do_subskeleton },
+ { "min_core_btf", do_min_core_btf},
+ { "help", do_help },
{ 0 }
};
diff --git a/tools/bpf/bpftool/iter.c b/tools/bpf/bpftool/iter.c
new file mode 100644
index 000000000000..9a1d2365a297
--- /dev/null
+++ b/tools/bpf/bpftool/iter.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (C) 2020 Facebook
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <errno.h>
+#include <unistd.h>
+#include <linux/err.h>
+#include <bpf/libbpf.h>
+
+#include "main.h"
+
+static int do_pin(int argc, char **argv)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, iter_opts);
+ union bpf_iter_link_info linfo;
+ const char *objfile, *path;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ struct bpf_link *link;
+ int err = -1, map_fd = -1;
+
+ if (!REQ_ARGS(2))
+ usage();
+
+ objfile = GET_ARG();
+ path = GET_ARG();
+
+ /* optional arguments */
+ if (argc) {
+ if (is_prefix(*argv, "map")) {
+ NEXT_ARG();
+
+ if (!REQ_ARGS(2)) {
+ p_err("incorrect map spec");
+ return -1;
+ }
+
+ map_fd = map_parse_fd(&argc, &argv);
+ if (map_fd < 0)
+ return -1;
+
+ memset(&linfo, 0, sizeof(linfo));
+ linfo.map.map_fd = map_fd;
+ iter_opts.link_info = &linfo;
+ iter_opts.link_info_len = sizeof(linfo);
+ }
+ }
+
+ obj = bpf_object__open(objfile);
+ if (!obj) {
+ err = -errno;
+ p_err("can't open objfile %s", objfile);
+ goto close_map_fd;
+ }
+
+ err = bpf_object__load(obj);
+ if (err) {
+ p_err("can't load objfile %s", objfile);
+ goto close_obj;
+ }
+
+ prog = bpf_object__next_program(obj, NULL);
+ if (!prog) {
+ err = -errno;
+ p_err("can't find bpf program in objfile %s", objfile);
+ goto close_obj;
+ }
+
+ link = bpf_program__attach_iter(prog, &iter_opts);
+ if (!link) {
+ err = -errno;
+ p_err("attach_iter failed for program %s",
+ bpf_program__name(prog));
+ goto close_obj;
+ }
+
+ err = mount_bpffs_for_pin(path);
+ if (err)
+ goto close_link;
+
+ err = bpf_link__pin(link, path);
+ if (err) {
+ p_err("pin_iter failed for program %s to path %s",
+ bpf_program__name(prog), path);
+ goto close_link;
+ }
+
+close_link:
+ bpf_link__destroy(link);
+close_obj:
+ bpf_object__close(obj);
+close_map_fd:
+ if (map_fd >= 0)
+ close(map_fd);
+ return err;
+}
+
+static int do_help(int argc, char **argv)
+{
+ fprintf(stderr,
+ "Usage: %1$s %2$s pin OBJ PATH [map MAP]\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " " HELP_SPEC_MAP "\n"
+ " " HELP_SPEC_OPTIONS " }\n"
+ "",
+ bin_name, "iter");
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "help", do_help },
+ { "pin", do_pin },
+ { 0 }
+};
+
+int do_iter(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/jit_disasm.c b/tools/bpf/bpftool/jit_disasm.c
index f7f5885aa3ba..7b8d9ec89ebd 100644
--- a/tools/bpf/bpftool/jit_disasm.c
+++ b/tools/bpf/bpftool/jit_disasm.c
@@ -11,44 +11,159 @@
* Licensed under the GNU General Public License, version 2.0 (GPLv2)
*/
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <stdio.h>
#include <stdarg.h>
#include <stdint.h>
-#include <stdio.h>
#include <stdlib.h>
-#include <assert.h>
#include <unistd.h>
#include <string.h>
-#include <bfd.h>
-#include <dis-asm.h>
#include <sys/stat.h>
#include <limits.h>
#include <bpf/libbpf.h>
+#ifdef HAVE_LLVM_SUPPORT
+#include <llvm-c/Core.h>
+#include <llvm-c/Disassembler.h>
+#include <llvm-c/Target.h>
+#include <llvm-c/TargetMachine.h>
+#endif
+
+#ifdef HAVE_LIBBFD_SUPPORT
+#include <bfd.h>
+#include <dis-asm.h>
+#include <tools/dis-asm-compat.h>
+#endif
+
#include "json_writer.h"
#include "main.h"
-static void get_exec_path(char *tpath, size_t size)
+static int oper_count;
+
+#ifdef HAVE_LLVM_SUPPORT
+#define DISASM_SPACER
+
+typedef LLVMDisasmContextRef disasm_ctx_t;
+
+static int printf_json(char *s)
+{
+ s = strtok(s, " \t");
+ jsonw_string_field(json_wtr, "operation", s);
+
+ jsonw_name(json_wtr, "operands");
+ jsonw_start_array(json_wtr);
+ oper_count = 1;
+
+ while ((s = strtok(NULL, " \t,()")) != 0) {
+ jsonw_string(json_wtr, s);
+ oper_count++;
+ }
+ return 0;
+}
+
+/* This callback to set the ref_type is necessary to have the LLVM disassembler
+ * print PC-relative addresses instead of byte offsets for branch instruction
+ * targets.
+ */
+static const char *
+symbol_lookup_callback(__maybe_unused void *disasm_info,
+ __maybe_unused uint64_t ref_value,
+ uint64_t *ref_type, __maybe_unused uint64_t ref_PC,
+ __maybe_unused const char **ref_name)
+{
+ *ref_type = LLVMDisassembler_ReferenceType_InOut_None;
+ return NULL;
+}
+
+static int
+init_context(disasm_ctx_t *ctx, const char *arch,
+ __maybe_unused const char *disassembler_options,
+ __maybe_unused unsigned char *image, __maybe_unused ssize_t len)
+{
+ char *triple;
+
+ if (arch)
+ triple = LLVMNormalizeTargetTriple(arch);
+ else
+ triple = LLVMGetDefaultTargetTriple();
+ if (!triple) {
+ p_err("Failed to retrieve triple");
+ return -1;
+ }
+ *ctx = LLVMCreateDisasm(triple, NULL, 0, NULL, symbol_lookup_callback);
+ LLVMDisposeMessage(triple);
+
+ if (!*ctx) {
+ p_err("Failed to create disassembler");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void destroy_context(disasm_ctx_t *ctx)
+{
+ LLVMDisposeMessage(*ctx);
+}
+
+static int
+disassemble_insn(disasm_ctx_t *ctx, unsigned char *image, ssize_t len, int pc)
+{
+ char buf[256];
+ int count;
+
+ count = LLVMDisasmInstruction(*ctx, image + pc, len - pc, pc,
+ buf, sizeof(buf));
+ if (json_output)
+ printf_json(buf);
+ else
+ printf("%s", buf);
+
+ return count;
+}
+
+int disasm_init(void)
+{
+ LLVMInitializeAllTargetInfos();
+ LLVMInitializeAllTargetMCs();
+ LLVMInitializeAllDisassemblers();
+ return 0;
+}
+#endif /* HAVE_LLVM_SUPPORT */
+
+#ifdef HAVE_LIBBFD_SUPPORT
+#define DISASM_SPACER "\t"
+
+typedef struct {
+ struct disassemble_info *info;
+ disassembler_ftype disassemble;
+ bfd *bfdf;
+} disasm_ctx_t;
+
+static int get_exec_path(char *tpath, size_t size)
{
const char *path = "/proc/self/exe";
ssize_t len;
len = readlink(path, tpath, size - 1);
- assert(len > 0);
+ if (len <= 0)
+ return -1;
+
tpath[len] = 0;
+
+ return 0;
}
-static int oper_count;
-static int fprintf_json(void *out, const char *fmt, ...)
+static int printf_json(void *out, const char *fmt, va_list ap)
{
- va_list ap;
char *s;
+ int err;
- va_start(ap, fmt);
- if (vasprintf(&s, fmt, ap) < 0)
+ err = vasprintf(&s, fmt, ap);
+ if (err < 0)
return -1;
- va_end(ap);
if (!oper_count) {
int i;
@@ -72,37 +187,72 @@ static int fprintf_json(void *out, const char *fmt, ...)
return 0;
}
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch, const char *disassembler_options,
- const struct btf *btf,
- const struct bpf_prog_linfo *prog_linfo,
- __u64 func_ksym, unsigned int func_idx,
- bool linum)
+static int fprintf_json(void *out, const char *fmt, ...)
{
- const struct bpf_line_info *linfo = NULL;
- disassembler_ftype disassemble;
- struct disassemble_info info;
- unsigned int nr_skip = 0;
- int count, i, pc = 0;
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = printf_json(out, fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+
+static int fprintf_json_styled(void *out,
+ enum disassembler_style style __maybe_unused,
+ const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = printf_json(out, fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+
+static int init_context(disasm_ctx_t *ctx, const char *arch,
+ const char *disassembler_options,
+ unsigned char *image, ssize_t len)
+{
+ struct disassemble_info *info;
char tpath[PATH_MAX];
bfd *bfdf;
- if (!len)
- return;
-
memset(tpath, 0, sizeof(tpath));
- get_exec_path(tpath, sizeof(tpath));
+ if (get_exec_path(tpath, sizeof(tpath))) {
+ p_err("failed to create disassembler (get_exec_path)");
+ return -1;
+ }
+
+ ctx->bfdf = bfd_openr(tpath, NULL);
+ if (!ctx->bfdf) {
+ p_err("failed to create disassembler (bfd_openr)");
+ return -1;
+ }
+ if (!bfd_check_format(ctx->bfdf, bfd_object)) {
+ p_err("failed to create disassembler (bfd_check_format)");
+ goto err_close;
+ }
+ bfdf = ctx->bfdf;
- bfdf = bfd_openr(tpath, NULL);
- assert(bfdf);
- assert(bfd_check_format(bfdf, bfd_object));
+ ctx->info = malloc(sizeof(struct disassemble_info));
+ if (!ctx->info) {
+ p_err("mem alloc failed");
+ goto err_close;
+ }
+ info = ctx->info;
if (json_output)
- init_disassemble_info(&info, stdout,
- (fprintf_ftype) fprintf_json);
+ init_disassemble_info_compat(info, stdout,
+ (fprintf_ftype) fprintf_json,
+ fprintf_json_styled);
else
- init_disassemble_info(&info, stdout,
- (fprintf_ftype) fprintf);
+ init_disassemble_info_compat(info, stdout,
+ (fprintf_ftype) fprintf,
+ fprintf_styled);
/* Update architecture info for offload. */
if (arch) {
@@ -112,28 +262,77 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
bfdf->arch_info = inf;
} else {
p_err("No libbfd support for %s", arch);
- return;
+ goto err_free;
}
}
- info.arch = bfd_get_arch(bfdf);
- info.mach = bfd_get_mach(bfdf);
+ info->arch = bfd_get_arch(bfdf);
+ info->mach = bfd_get_mach(bfdf);
if (disassembler_options)
- info.disassembler_options = disassembler_options;
- info.buffer = image;
- info.buffer_length = len;
+ info->disassembler_options = disassembler_options;
+ info->buffer = image;
+ info->buffer_length = len;
- disassemble_init_for_target(&info);
+ disassemble_init_for_target(info);
#ifdef DISASM_FOUR_ARGS_SIGNATURE
- disassemble = disassembler(info.arch,
- bfd_big_endian(bfdf),
- info.mach,
- bfdf);
+ ctx->disassemble = disassembler(info->arch,
+ bfd_big_endian(bfdf),
+ info->mach,
+ bfdf);
#else
- disassemble = disassembler(bfdf);
+ ctx->disassemble = disassembler(bfdf);
#endif
- assert(disassemble);
+ if (!ctx->disassemble) {
+ p_err("failed to create disassembler");
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ free(info);
+err_close:
+ bfd_close(ctx->bfdf);
+ return -1;
+}
+
+static void destroy_context(disasm_ctx_t *ctx)
+{
+ free(ctx->info);
+ bfd_close(ctx->bfdf);
+}
+
+static int
+disassemble_insn(disasm_ctx_t *ctx, __maybe_unused unsigned char *image,
+ __maybe_unused ssize_t len, int pc)
+{
+ return ctx->disassemble(pc, ctx->info);
+}
+
+int disasm_init(void)
+{
+ bfd_init();
+ return 0;
+}
+#endif /* HAVE_LIBBPFD_SUPPORT */
+
+int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum)
+{
+ const struct bpf_line_info *linfo = NULL;
+ unsigned int nr_skip = 0;
+ int count, i, pc = 0;
+ disasm_ctx_t ctx;
+
+ if (!len)
+ return -1;
+
+ if (init_context(&ctx, arch, disassembler_options, image, len))
+ return -1;
if (json_output)
jsonw_start_array(json_wtr);
@@ -158,10 +357,11 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
if (linfo)
btf_dump_linfo_plain(btf, linfo, "; ",
linum);
- printf("%4x:\t", pc);
+ printf("%4x:" DISASM_SPACER, pc);
}
- count = disassemble(pc, &info);
+ count = disassemble_insn(&ctx, image, len, pc);
+
if (json_output) {
/* Operand array, was started in fprintf_json. Before
* that, make sure we have a _null_ value if no operand
@@ -197,11 +397,7 @@ void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
if (json_output)
jsonw_end_array(json_wtr);
- bfd_close(bfdf);
-}
+ destroy_context(&ctx);
-int disasm_init(void)
-{
- bfd_init();
return 0;
}
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index 86501cd3c763..be379613d118 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -75,14 +75,11 @@ static void jsonw_puts(json_writer_t *self, const char *str)
fputs("\\b", self->out);
break;
case '\\':
- fputs("\\n", self->out);
+ fputs("\\\\", self->out);
break;
case '"':
fputs("\\\"", self->out);
break;
- case '\'':
- fputs("\\\'", self->out);
- break;
default:
putc(*str, self->out);
}
@@ -119,6 +116,12 @@ void jsonw_pretty(json_writer_t *self, bool on)
self->pretty = on;
}
+void jsonw_reset(json_writer_t *self)
+{
+ assert(self->depth == 0);
+ self->sep = '\0';
+}
+
/* Basic blocks */
static void jsonw_begin(json_writer_t *self, int c)
{
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
index 35cf1f00f96c..5aaffd3b837b 100644
--- a/tools/bpf/bpftool/json_writer.h
+++ b/tools/bpf/bpftool/json_writer.h
@@ -14,6 +14,7 @@
#include <stdbool.h>
#include <stdint.h>
#include <stdarg.h>
+#include <stdio.h>
#include <linux/compiler.h>
/* Opaque class structure */
@@ -27,6 +28,9 @@ void jsonw_destroy(json_writer_t **self_p);
/* Cause output to have pretty whitespace */
void jsonw_pretty(json_writer_t *self, bool on);
+/* Reset separator to create new JSON */
+void jsonw_reset(json_writer_t *self);
+
/* Add property name */
void jsonw_name(json_writer_t *self, const char *name);
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
new file mode 100644
index 000000000000..d98dbc50cf4c
--- /dev/null
+++ b/tools/bpf/bpftool/link.c
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2020 Facebook */
+
+#include <errno.h>
+#include <linux/err.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_arp.h>
+#include <net/if.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/hashmap.h>
+
+#include "json_writer.h"
+#include "main.h"
+
+static struct hashmap *link_table;
+
+static int link_parse_fd(int *argc, char ***argv)
+{
+ int fd;
+
+ if (is_prefix(**argv, "id")) {
+ unsigned int id;
+ char *endptr;
+
+ NEXT_ARGP();
+
+ id = strtoul(**argv, &endptr, 0);
+ if (*endptr) {
+ p_err("can't parse %s as ID", **argv);
+ return -1;
+ }
+ NEXT_ARGP();
+
+ fd = bpf_link_get_fd_by_id(id);
+ if (fd < 0)
+ p_err("failed to get link with ID %d: %s", id, strerror(errno));
+ return fd;
+ } else if (is_prefix(**argv, "pinned")) {
+ char *path;
+
+ NEXT_ARGP();
+
+ path = **argv;
+ NEXT_ARGP();
+
+ return open_obj_pinned_any(path, BPF_OBJ_LINK);
+ }
+
+ p_err("expected 'id' or 'pinned', got: '%s'?", **argv);
+ return -1;
+}
+
+static void
+show_link_header_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ const char *link_type_str;
+
+ jsonw_uint_field(wtr, "id", info->id);
+ link_type_str = libbpf_bpf_link_type_str(info->type);
+ if (link_type_str)
+ jsonw_string_field(wtr, "type", link_type_str);
+ else
+ jsonw_uint_field(wtr, "type", info->type);
+
+ jsonw_uint_field(json_wtr, "prog_id", info->prog_id);
+}
+
+static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
+{
+ const char *attach_type_str;
+
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+ if (attach_type_str)
+ jsonw_string_field(wtr, "attach_type", attach_type_str);
+ else
+ jsonw_uint_field(wtr, "attach_type", attach_type);
+}
+
+static bool is_iter_map_target(const char *target_name)
+{
+ return strcmp(target_name, "bpf_map_elem") == 0 ||
+ strcmp(target_name, "bpf_sk_storage_map") == 0;
+}
+
+static bool is_iter_cgroup_target(const char *target_name)
+{
+ return strcmp(target_name, "cgroup") == 0;
+}
+
+static const char *cgroup_order_string(__u32 order)
+{
+ switch (order) {
+ case BPF_CGROUP_ITER_ORDER_UNSPEC:
+ return "order_unspec";
+ case BPF_CGROUP_ITER_SELF_ONLY:
+ return "self_only";
+ case BPF_CGROUP_ITER_DESCENDANTS_PRE:
+ return "descendants_pre";
+ case BPF_CGROUP_ITER_DESCENDANTS_POST:
+ return "descendants_post";
+ case BPF_CGROUP_ITER_ANCESTORS_UP:
+ return "ancestors_up";
+ default: /* won't happen */
+ return "unknown";
+ }
+}
+
+static bool is_iter_task_target(const char *target_name)
+{
+ return strcmp(target_name, "task") == 0 ||
+ strcmp(target_name, "task_file") == 0 ||
+ strcmp(target_name, "task_vma") == 0;
+}
+
+static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ const char *target_name = u64_to_ptr(info->iter.target_name);
+
+ jsonw_string_field(wtr, "target_name", target_name);
+
+ if (is_iter_map_target(target_name))
+ jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
+ else if (is_iter_task_target(target_name)) {
+ if (info->iter.task.tid)
+ jsonw_uint_field(wtr, "tid", info->iter.task.tid);
+ else if (info->iter.task.pid)
+ jsonw_uint_field(wtr, "pid", info->iter.task.pid);
+ }
+
+ if (is_iter_cgroup_target(target_name)) {
+ jsonw_lluint_field(wtr, "cgroup_id", info->iter.cgroup.cgroup_id);
+ jsonw_string_field(wtr, "order",
+ cgroup_order_string(info->iter.cgroup.order));
+ }
+}
+
+void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr)
+{
+ jsonw_uint_field(json_wtr, "pf",
+ info->netfilter.pf);
+ jsonw_uint_field(json_wtr, "hook",
+ info->netfilter.hooknum);
+ jsonw_int_field(json_wtr, "prio",
+ info->netfilter.priority);
+ jsonw_uint_field(json_wtr, "flags",
+ info->netfilter.flags);
+}
+
+static int get_prog_info(int prog_id, struct bpf_prog_info *info)
+{
+ __u32 len = sizeof(*info);
+ int err, prog_fd;
+
+ prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (prog_fd < 0)
+ return prog_fd;
+
+ memset(info, 0, sizeof(*info));
+ err = bpf_prog_get_info_by_fd(prog_fd, info, &len);
+ if (err)
+ p_err("can't get prog info: %s", strerror(errno));
+ close(prog_fd);
+ return err;
+}
+
+static int show_link_close_json(int fd, struct bpf_link_info *info)
+{
+ struct bpf_prog_info prog_info;
+ const char *prog_type_str;
+ int err;
+
+ jsonw_start_object(json_wtr);
+
+ show_link_header_json(info, json_wtr);
+
+ switch (info->type) {
+ case BPF_LINK_TYPE_RAW_TRACEPOINT:
+ jsonw_string_field(json_wtr, "tp_name",
+ u64_to_ptr(info->raw_tracepoint.tp_name));
+ break;
+ case BPF_LINK_TYPE_TRACING:
+ err = get_prog_info(info->prog_id, &prog_info);
+ if (err)
+ return err;
+
+ prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (prog_type_str)
+ jsonw_string_field(json_wtr, "prog_type", prog_type_str);
+ else
+ jsonw_uint_field(json_wtr, "prog_type", prog_info.type);
+
+ show_link_attach_type_json(info->tracing.attach_type,
+ json_wtr);
+ break;
+ case BPF_LINK_TYPE_CGROUP:
+ jsonw_lluint_field(json_wtr, "cgroup_id",
+ info->cgroup.cgroup_id);
+ show_link_attach_type_json(info->cgroup.attach_type, json_wtr);
+ break;
+ case BPF_LINK_TYPE_ITER:
+ show_iter_json(info, json_wtr);
+ break;
+ case BPF_LINK_TYPE_NETNS:
+ jsonw_uint_field(json_wtr, "netns_ino",
+ info->netns.netns_ino);
+ show_link_attach_type_json(info->netns.attach_type, json_wtr);
+ break;
+ case BPF_LINK_TYPE_NETFILTER:
+ netfilter_dump_json(info, json_wtr);
+ break;
+
+ default:
+ break;
+ }
+
+ if (!hashmap__empty(link_table)) {
+ struct hashmap_entry *entry;
+
+ jsonw_name(json_wtr, "pinned");
+ jsonw_start_array(json_wtr);
+ hashmap__for_each_key_entry(link_table, entry, info->id)
+ jsonw_string(json_wtr, entry->pvalue);
+ jsonw_end_array(json_wtr);
+ }
+
+ emit_obj_refs_json(refs_table, info->id, json_wtr);
+
+ jsonw_end_object(json_wtr);
+
+ return 0;
+}
+
+static void show_link_header_plain(struct bpf_link_info *info)
+{
+ const char *link_type_str;
+
+ printf("%u: ", info->id);
+ link_type_str = libbpf_bpf_link_type_str(info->type);
+ if (link_type_str)
+ printf("%s ", link_type_str);
+ else
+ printf("type %u ", info->type);
+
+ printf("prog %u ", info->prog_id);
+}
+
+static void show_link_attach_type_plain(__u32 attach_type)
+{
+ const char *attach_type_str;
+
+ attach_type_str = libbpf_bpf_attach_type_str(attach_type);
+ if (attach_type_str)
+ printf("attach_type %s ", attach_type_str);
+ else
+ printf("attach_type %u ", attach_type);
+}
+
+static void show_iter_plain(struct bpf_link_info *info)
+{
+ const char *target_name = u64_to_ptr(info->iter.target_name);
+
+ printf("target_name %s ", target_name);
+
+ if (is_iter_map_target(target_name))
+ printf("map_id %u ", info->iter.map.map_id);
+ else if (is_iter_task_target(target_name)) {
+ if (info->iter.task.tid)
+ printf("tid %u ", info->iter.task.tid);
+ else if (info->iter.task.pid)
+ printf("pid %u ", info->iter.task.pid);
+ }
+
+ if (is_iter_cgroup_target(target_name)) {
+ printf("cgroup_id %llu ", info->iter.cgroup.cgroup_id);
+ printf("order %s ",
+ cgroup_order_string(info->iter.cgroup.order));
+ }
+}
+
+static const char * const pf2name[] = {
+ [NFPROTO_INET] = "inet",
+ [NFPROTO_IPV4] = "ip",
+ [NFPROTO_ARP] = "arp",
+ [NFPROTO_NETDEV] = "netdev",
+ [NFPROTO_BRIDGE] = "bridge",
+ [NFPROTO_IPV6] = "ip6",
+};
+
+static const char * const inethook2name[] = {
+ [NF_INET_PRE_ROUTING] = "prerouting",
+ [NF_INET_LOCAL_IN] = "input",
+ [NF_INET_FORWARD] = "forward",
+ [NF_INET_LOCAL_OUT] = "output",
+ [NF_INET_POST_ROUTING] = "postrouting",
+};
+
+static const char * const arphook2name[] = {
+ [NF_ARP_IN] = "input",
+ [NF_ARP_OUT] = "output",
+};
+
+void netfilter_dump_plain(const struct bpf_link_info *info)
+{
+ const char *hookname = NULL, *pfname = NULL;
+ unsigned int hook = info->netfilter.hooknum;
+ unsigned int pf = info->netfilter.pf;
+
+ if (pf < ARRAY_SIZE(pf2name))
+ pfname = pf2name[pf];
+
+ switch (pf) {
+ case NFPROTO_BRIDGE: /* bridge shares numbers with enum nf_inet_hooks */
+ case NFPROTO_IPV4:
+ case NFPROTO_IPV6:
+ case NFPROTO_INET:
+ if (hook < ARRAY_SIZE(inethook2name))
+ hookname = inethook2name[hook];
+ break;
+ case NFPROTO_ARP:
+ if (hook < ARRAY_SIZE(arphook2name))
+ hookname = arphook2name[hook];
+ default:
+ break;
+ }
+
+ if (pfname)
+ printf("\n\t%s", pfname);
+ else
+ printf("\n\tpf: %d", pf);
+
+ if (hookname)
+ printf(" %s", hookname);
+ else
+ printf(", hook %u,", hook);
+
+ printf(" prio %d", info->netfilter.priority);
+
+ if (info->netfilter.flags)
+ printf(" flags 0x%x", info->netfilter.flags);
+}
+
+static int show_link_close_plain(int fd, struct bpf_link_info *info)
+{
+ struct bpf_prog_info prog_info;
+ const char *prog_type_str;
+ int err;
+
+ show_link_header_plain(info);
+
+ switch (info->type) {
+ case BPF_LINK_TYPE_RAW_TRACEPOINT:
+ printf("\n\ttp '%s' ",
+ (const char *)u64_to_ptr(info->raw_tracepoint.tp_name));
+ break;
+ case BPF_LINK_TYPE_TRACING:
+ err = get_prog_info(info->prog_id, &prog_info);
+ if (err)
+ return err;
+
+ prog_type_str = libbpf_bpf_prog_type_str(prog_info.type);
+ /* libbpf will return NULL for variants unknown to it. */
+ if (prog_type_str)
+ printf("\n\tprog_type %s ", prog_type_str);
+ else
+ printf("\n\tprog_type %u ", prog_info.type);
+
+ show_link_attach_type_plain(info->tracing.attach_type);
+ break;
+ case BPF_LINK_TYPE_CGROUP:
+ printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id);
+ show_link_attach_type_plain(info->cgroup.attach_type);
+ break;
+ case BPF_LINK_TYPE_ITER:
+ show_iter_plain(info);
+ break;
+ case BPF_LINK_TYPE_NETNS:
+ printf("\n\tnetns_ino %u ", info->netns.netns_ino);
+ show_link_attach_type_plain(info->netns.attach_type);
+ break;
+ case BPF_LINK_TYPE_NETFILTER:
+ netfilter_dump_plain(info);
+ break;
+ default:
+ break;
+ }
+
+ if (!hashmap__empty(link_table)) {
+ struct hashmap_entry *entry;
+
+ hashmap__for_each_key_entry(link_table, entry, info->id)
+ printf("\n\tpinned %s", (char *)entry->pvalue);
+ }
+ emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
+
+ printf("\n");
+
+ return 0;
+}
+
+static int do_show_link(int fd)
+{
+ struct bpf_link_info info;
+ __u32 len = sizeof(info);
+ char buf[256];
+ int err;
+
+ memset(&info, 0, sizeof(info));
+again:
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ p_err("can't get link info: %s",
+ strerror(errno));
+ close(fd);
+ return err;
+ }
+ if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT &&
+ !info.raw_tracepoint.tp_name) {
+ info.raw_tracepoint.tp_name = (unsigned long)&buf;
+ info.raw_tracepoint.tp_name_len = sizeof(buf);
+ goto again;
+ }
+ if (info.type == BPF_LINK_TYPE_ITER &&
+ !info.iter.target_name) {
+ info.iter.target_name = (unsigned long)&buf;
+ info.iter.target_name_len = sizeof(buf);
+ goto again;
+ }
+
+ if (json_output)
+ show_link_close_json(fd, &info);
+ else
+ show_link_close_plain(fd, &info);
+
+ close(fd);
+ return 0;
+}
+
+static int do_show(int argc, char **argv)
+{
+ __u32 id = 0;
+ int err, fd;
+
+ if (show_pinned) {
+ link_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(link_table)) {
+ p_err("failed to create hashmap for pinned paths");
+ return -1;
+ }
+ build_pinned_obj_table(link_table, BPF_OBJ_LINK);
+ }
+ build_obj_refs_table(&refs_table, BPF_OBJ_LINK);
+
+ if (argc == 2) {
+ fd = link_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return fd;
+ return do_show_link(fd);
+ }
+
+ if (argc)
+ return BAD_ARG();
+
+ if (json_output)
+ jsonw_start_array(json_wtr);
+ while (true) {
+ err = bpf_link_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT)
+ break;
+ p_err("can't get next link: %s%s", strerror(errno),
+ errno == EINVAL ? " -- kernel too old?" : "");
+ break;
+ }
+
+ fd = bpf_link_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue;
+ p_err("can't get link by id (%u): %s",
+ id, strerror(errno));
+ break;
+ }
+
+ err = do_show_link(fd);
+ if (err)
+ break;
+ }
+ if (json_output)
+ jsonw_end_array(json_wtr);
+
+ delete_obj_refs_table(refs_table);
+
+ if (show_pinned)
+ delete_pinned_obj_table(link_table);
+
+ return errno == ENOENT ? 0 : -1;
+}
+
+static int do_pin(int argc, char **argv)
+{
+ int err;
+
+ err = do_pin_any(argc, argv, link_parse_fd);
+ if (!err && json_output)
+ jsonw_null(json_wtr);
+ return err;
+}
+
+static int do_detach(int argc, char **argv)
+{
+ int err, fd;
+
+ if (argc != 2) {
+ p_err("link specifier is invalid or missing\n");
+ return 1;
+ }
+
+ fd = link_parse_fd(&argc, &argv);
+ if (fd < 0)
+ return 1;
+
+ err = bpf_link_detach(fd);
+ if (err)
+ err = -errno;
+ close(fd);
+ if (err) {
+ p_err("failed link detach: %s", strerror(-err));
+ return 1;
+ }
+
+ if (json_output)
+ jsonw_null(json_wtr);
+
+ return 0;
+}
+
+static int do_help(int argc, char **argv)
+{
+ if (json_output) {
+ jsonw_null(json_wtr);
+ return 0;
+ }
+
+ fprintf(stderr,
+ "Usage: %1$s %2$s { show | list } [LINK]\n"
+ " %1$s %2$s pin LINK FILE\n"
+ " %1$s %2$s detach LINK\n"
+ " %1$s %2$s help\n"
+ "\n"
+ " " HELP_SPEC_LINK "\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-f|--bpffs} | {-n|--nomount} }\n"
+ "",
+ bin_name, argv[-2]);
+
+ return 0;
+}
+
+static const struct cmd cmds[] = {
+ { "show", do_show },
+ { "list", do_show },
+ { "help", do_help },
+ { "pin", do_pin },
+ { "detach", do_detach },
+ { 0 }
+};
+
+int do_link(int argc, char **argv)
+{
+ return cmd_select(cmds, argc, argv, do_help);
+}
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 466c269eabdd..08d0ac543c67 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -10,6 +10,8 @@
#include <string.h>
#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/hashmap.h>
#include <bpf/libbpf.h>
#include "main.h"
@@ -28,8 +30,9 @@ bool show_pinned;
bool block_mount;
bool verifier_logs;
bool relaxed_maps;
-struct pinned_obj_table prog_table;
-struct pinned_obj_table map_table;
+bool use_loader;
+struct btf *base_btf;
+struct hashmap *refs_table;
static void __noreturn clean_and_exit(int i)
{
@@ -58,23 +61,125 @@ static int do_help(int argc, char **argv)
" %s batch file FILE\n"
" %s version\n"
"\n"
- " OBJECT := { prog | map | cgroup | perf | net | feature | btf | gen | struct_ops }\n"
- " " HELP_SPEC_OPTIONS "\n"
+ " OBJECT := { prog | map | link | cgroup | perf | net | feature | btf | gen | struct_ops | iter }\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-V|--version} }\n"
"",
bin_name, bin_name, bin_name);
return 0;
}
+static int do_batch(int argc, char **argv);
+static int do_version(int argc, char **argv);
+
+static const struct cmd commands[] = {
+ { "help", do_help },
+ { "batch", do_batch },
+ { "prog", do_prog },
+ { "map", do_map },
+ { "link", do_link },
+ { "cgroup", do_cgroup },
+ { "perf", do_perf },
+ { "net", do_net },
+ { "feature", do_feature },
+ { "btf", do_btf },
+ { "gen", do_gen },
+ { "struct_ops", do_struct_ops },
+ { "iter", do_iter },
+ { "version", do_version },
+ { 0 }
+};
+
+#ifndef BPFTOOL_VERSION
+/* bpftool's major and minor version numbers are aligned on libbpf's. There is
+ * an offset of 6 for the version number, because bpftool's version was higher
+ * than libbpf's when we adopted this scheme. The patch number remains at 0
+ * for now. Set BPFTOOL_VERSION to override.
+ */
+#define BPFTOOL_MAJOR_VERSION (LIBBPF_MAJOR_VERSION + 6)
+#define BPFTOOL_MINOR_VERSION LIBBPF_MINOR_VERSION
+#define BPFTOOL_PATCH_VERSION 0
+#endif
+
+static void
+print_feature(const char *feature, bool state, unsigned int *nb_features)
+{
+ if (state) {
+ printf("%s %s", *nb_features ? "," : "", feature);
+ *nb_features = *nb_features + 1;
+ }
+}
+
static int do_version(int argc, char **argv)
{
+#ifdef HAVE_LIBBFD_SUPPORT
+ const bool has_libbfd = true;
+#else
+ const bool has_libbfd = false;
+#endif
+#ifdef HAVE_LLVM_SUPPORT
+ const bool has_llvm = true;
+#else
+ const bool has_llvm = false;
+#endif
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+ const bool has_skeletons = false;
+#else
+ const bool has_skeletons = true;
+#endif
+ bool bootstrap = false;
+ int i;
+
+ for (i = 0; commands[i].cmd; i++) {
+ if (!strcmp(commands[i].cmd, "prog")) {
+ /* Assume we run a bootstrap version if "bpftool prog"
+ * is not available.
+ */
+ bootstrap = !commands[i].func;
+ break;
+ }
+ }
+
if (json_output) {
- jsonw_start_object(json_wtr);
+ jsonw_start_object(json_wtr); /* root object */
+
jsonw_name(json_wtr, "version");
+#ifdef BPFTOOL_VERSION
jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
- jsonw_end_object(json_wtr);
+#else
+ jsonw_printf(json_wtr, "\"%d.%d.%d\"", BPFTOOL_MAJOR_VERSION,
+ BPFTOOL_MINOR_VERSION, BPFTOOL_PATCH_VERSION);
+#endif
+ jsonw_name(json_wtr, "libbpf_version");
+ jsonw_printf(json_wtr, "\"%d.%d\"",
+ libbpf_major_version(), libbpf_minor_version());
+
+ jsonw_name(json_wtr, "features");
+ jsonw_start_object(json_wtr); /* features */
+ jsonw_bool_field(json_wtr, "libbfd", has_libbfd);
+ jsonw_bool_field(json_wtr, "llvm", has_llvm);
+ jsonw_bool_field(json_wtr, "skeletons", has_skeletons);
+ jsonw_bool_field(json_wtr, "bootstrap", bootstrap);
+ jsonw_end_object(json_wtr); /* features */
+
+ jsonw_end_object(json_wtr); /* root object */
} else {
+ unsigned int nb_features = 0;
+
+#ifdef BPFTOOL_VERSION
printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
+#else
+ printf("%s v%d.%d.%d\n", bin_name, BPFTOOL_MAJOR_VERSION,
+ BPFTOOL_MINOR_VERSION, BPFTOOL_PATCH_VERSION);
+#endif
+ printf("using libbpf %s\n", libbpf_version_string());
+ printf("features:");
+ print_feature("libbfd", has_libbfd, &nb_features);
+ print_feature("llvm", has_llvm, &nb_features);
+ print_feature("skeletons", has_skeletons, &nb_features);
+ print_feature("bootstrap", bootstrap, &nb_features);
+ printf("\n");
}
return 0;
}
@@ -91,9 +196,16 @@ int cmd_select(const struct cmd *cmds, int argc, char **argv,
if (argc < 1 && cmds[0].func)
return cmds[0].func(argc, argv);
- for (i = 0; cmds[i].func; i++)
- if (is_prefix(*argv, cmds[i].cmd))
+ for (i = 0; cmds[i].cmd; i++) {
+ if (is_prefix(*argv, cmds[i].cmd)) {
+ if (!cmds[i].func) {
+ p_err("command '%s' is not supported in bootstrap mode",
+ cmds[i].cmd);
+ return -1;
+ }
return cmds[i].func(argc - 1, argv + 1);
+ }
+ }
help(argc - 1, argv + 1);
@@ -208,24 +320,6 @@ static int make_args(char *line, char *n_argv[], int maxargs, int cmd_nb)
return n_argc;
}
-static int do_batch(int argc, char **argv);
-
-static const struct cmd cmds[] = {
- { "help", do_help },
- { "batch", do_batch },
- { "prog", do_prog },
- { "map", do_map },
- { "cgroup", do_cgroup },
- { "perf", do_perf },
- { "net", do_net },
- { "feature", do_feature },
- { "btf", do_btf },
- { "gen", do_gen },
- { "struct_ops", do_struct_ops },
- { "version", do_version },
- { 0 }
-};
-
static int do_batch(int argc, char **argv)
{
char buf[BATCH_LINE_LEN_MAX], contline[BATCH_LINE_LEN_MAX];
@@ -234,18 +328,18 @@ static int do_batch(int argc, char **argv)
int n_argc;
FILE *fp;
char *cp;
- int err;
+ int err = 0;
int i;
if (argc < 2) {
p_err("too few parameters for batch");
return -1;
- } else if (!is_prefix(*argv, "file")) {
- p_err("expected 'file', got: %s", *argv);
- return -1;
} else if (argc > 2) {
p_err("too many parameters for batch");
return -1;
+ } else if (!is_prefix(*argv, "file")) {
+ p_err("expected 'file', got: %s", *argv);
+ return -1;
}
NEXT_ARG();
@@ -298,8 +392,10 @@ static int do_batch(int argc, char **argv)
n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines);
if (!n_argc)
continue;
- if (n_argc < 0)
+ if (n_argc < 0) {
+ err = n_argc;
goto err_close;
+ }
if (json_output) {
jsonw_start_object(json_wtr);
@@ -311,7 +407,7 @@ static int do_batch(int argc, char **argv)
jsonw_name(json_wtr, "output");
}
- err = cmd_select(cmds, n_argc, n_argv, do_help);
+ err = cmd_select(commands, n_argc, n_argv, do_help);
if (json_output)
jsonw_end_object(json_wtr);
@@ -328,7 +424,6 @@ static int do_batch(int argc, char **argv)
} else {
if (!json_output)
printf("processed %d commands\n", lines);
- err = 0;
}
err_close:
if (fp != stdin)
@@ -351,26 +446,39 @@ int main(int argc, char **argv)
{ "mapcompat", no_argument, NULL, 'm' },
{ "nomount", no_argument, NULL, 'n' },
{ "debug", no_argument, NULL, 'd' },
+ { "use-loader", no_argument, NULL, 'L' },
+ { "base-btf", required_argument, NULL, 'B' },
{ 0 }
};
+ bool version_requested = false;
int opt, ret;
+ setlinebuf(stdout);
+
+#ifdef USE_LIBCAP
+ /* Libcap < 2.63 hooks before main() to compute the number of
+ * capabilities of the running kernel, and doing so it calls prctl()
+ * which may fail and set errno to non-zero.
+ * Let's reset errno to make sure this does not interfere with the
+ * batch mode.
+ */
+ errno = 0;
+#endif
+
last_do_help = do_help;
pretty_output = false;
json_output = false;
show_pinned = false;
block_mount = false;
- bin_name = argv[0];
-
- hash_init(prog_table.table);
- hash_init(map_table.table);
+ bin_name = "bpftool";
opterr = 0;
- while ((opt = getopt_long(argc, argv, "Vhpjfmnd",
+ while ((opt = getopt_long(argc, argv, "VhpjfLmndB:l",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
- return do_version(argc, argv);
+ version_requested = true;
+ break;
case 'h':
return do_help(argc, argv);
case 'p':
@@ -400,6 +508,17 @@ int main(int argc, char **argv)
libbpf_set_print(print_all_levels);
verifier_logs = true;
break;
+ case 'B':
+ base_btf = btf__parse(optarg, NULL);
+ if (!base_btf) {
+ p_err("failed to parse base BTF at '%s': %d\n",
+ optarg, -errno);
+ return -1;
+ }
+ break;
+ case 'L':
+ use_loader = true;
+ break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
@@ -414,15 +533,15 @@ int main(int argc, char **argv)
if (argc < 0)
usage();
- ret = cmd_select(cmds, argc, argv, do_help);
+ if (version_requested)
+ return do_version(argc, argv);
+
+ ret = cmd_select(commands, argc, argv, do_help);
if (json_output)
jsonw_destroy(&json_wtr);
- if (show_pinned) {
- delete_pinned_obj_table(&prog_table);
- delete_pinned_obj_table(&map_table);
- }
+ btf__free(base_btf);
return ret;
}
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 86f14ce26fd7..a49534d7eafa 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -8,17 +8,28 @@
#undef GCC_VERSION
#include <stdbool.h>
#include <stdio.h>
+#include <stdlib.h>
#include <linux/bpf.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
-#include <linux/hashtable.h>
-#include <tools/libc_compat.h>
+#include <bpf/hashmap.h>
#include <bpf/libbpf.h>
#include "json_writer.h"
-#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
+/* Make sure we do not use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
+static inline __u64 ptr_to_u64(const void *ptr)
+{
+ return (__u64)(unsigned long)ptr;
+}
+
+static inline void *u64_to_ptr(__u64 ptr)
+{
+ return (void *)(unsigned long)ptr;
+}
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
@@ -46,50 +57,19 @@
#define HELP_SPEC_PROGRAM \
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG | name PROG_NAME }"
#define HELP_SPEC_OPTIONS \
- "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} |\n" \
- "\t {-m|--mapcompat} | {-n|--nomount} }"
+ "OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug}"
#define HELP_SPEC_MAP \
"MAP := { id MAP_ID | pinned FILE | name MAP_NAME }"
+#define HELP_SPEC_LINK \
+ "LINK := { id LINK_ID | pinned FILE }"
-static const char * const prog_type_name[] = {
- [BPF_PROG_TYPE_UNSPEC] = "unspec",
- [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
- [BPF_PROG_TYPE_KPROBE] = "kprobe",
- [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
- [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
- [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
- [BPF_PROG_TYPE_XDP] = "xdp",
- [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
- [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
- [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
- [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
- [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
- [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
- [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
- [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
- [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
- [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
- [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
- [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
- [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
- [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
- [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
- [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
- [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
- [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
- [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
- [BPF_PROG_TYPE_TRACING] = "tracing",
- [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
- [BPF_PROG_TYPE_EXT] = "ext",
-};
-
-extern const char * const map_type_name[];
-extern const size_t map_type_name_size;
-
+/* keep in sync with the definition in skeleton/pid_iter.bpf.c */
enum bpf_obj_type {
BPF_OBJ_UNKNOWN,
BPF_OBJ_PROG,
BPF_OBJ_MAP,
+ BPF_OBJ_LINK,
+ BPF_OBJ_BTF,
};
extern const char *bin_name;
@@ -97,11 +77,13 @@ extern const char *bin_name;
extern json_writer_t *json_wtr;
extern bool json_output;
extern bool show_pinned;
+extern bool show_pids;
extern bool block_mount;
extern bool verifier_logs;
extern bool relaxed_maps;
-extern struct pinned_obj_table prog_table;
-extern struct pinned_obj_table map_table;
+extern bool use_loader;
+extern struct btf *base_btf;
+extern struct hashmap *refs_table;
void __printf(1, 2) p_err(const char *fmt, ...);
void __printf(1, 2) p_info(const char *fmt, ...);
@@ -115,22 +97,31 @@ void set_max_rlimit(void);
int mount_tracefs(const char *target);
-struct pinned_obj_table {
- DECLARE_HASHTABLE(table, 16);
+struct obj_ref {
+ int pid;
+ char comm[16];
};
-struct pinned_obj {
- __u32 id;
- char *path;
- struct hlist_node hash;
+struct obj_refs {
+ int ref_cnt;
+ bool has_bpf_cookie;
+ struct obj_ref *refs;
+ __u64 bpf_cookie;
};
struct btf;
struct bpf_line_info;
-int build_pinned_obj_table(struct pinned_obj_table *table,
+int build_pinned_obj_table(struct hashmap *table,
enum bpf_obj_type type);
-void delete_pinned_obj_table(struct pinned_obj_table *tab);
+void delete_pinned_obj_table(struct hashmap *table);
+__weak int build_obj_refs_table(struct hashmap **table,
+ enum bpf_obj_type type);
+__weak void delete_obj_refs_table(struct hashmap *table);
+__weak void emit_obj_refs_json(struct hashmap *table, __u32 id,
+ json_writer_t *json_wtr);
+__weak void emit_obj_refs_plain(struct hashmap *table, __u32 id,
+ const char *prefix);
void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
@@ -142,54 +133,67 @@ struct cmd {
int cmd_select(const struct cmd *cmds, int argc, char **argv,
int (*help)(int argc, char **argv));
+#define MAX_PROG_FULL_NAME 128
+void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
+ char *name_buff, size_t buff_len);
+
int get_fd_type(int fd);
const char *get_fd_type_name(enum bpf_obj_type type);
char *get_fdinfo(int fd, const char *key);
-int open_obj_pinned(char *path, bool quiet);
-int open_obj_pinned_any(char *path, enum bpf_obj_type exp_type);
+int open_obj_pinned(const char *path, bool quiet);
+int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type);
int mount_bpffs_for_pin(const char *name);
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
int do_pin_fd(int fd, const char *name);
-int do_prog(int argc, char **arg);
-int do_map(int argc, char **arg);
-int do_event_pipe(int argc, char **argv);
-int do_cgroup(int argc, char **arg);
-int do_perf(int argc, char **arg);
-int do_net(int argc, char **arg);
-int do_tracelog(int argc, char **arg);
-int do_feature(int argc, char **argv);
-int do_btf(int argc, char **argv);
+/* commands available in bootstrap mode */
int do_gen(int argc, char **argv);
-int do_struct_ops(int argc, char **argv);
+int do_btf(int argc, char **argv);
+
+/* non-bootstrap only commands */
+int do_prog(int argc, char **arg) __weak;
+int do_map(int argc, char **arg) __weak;
+int do_link(int argc, char **arg) __weak;
+int do_event_pipe(int argc, char **argv) __weak;
+int do_cgroup(int argc, char **arg) __weak;
+int do_perf(int argc, char **arg) __weak;
+int do_net(int argc, char **arg) __weak;
+int do_tracelog(int argc, char **arg) __weak;
+int do_feature(int argc, char **argv) __weak;
+int do_struct_ops(int argc, char **argv) __weak;
+int do_iter(int argc, char **argv) __weak;
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
int prog_parse_fd(int *argc, char ***argv);
+int prog_parse_fds(int *argc, char ***argv, int **fds);
int map_parse_fd(int *argc, char ***argv);
-int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
+int map_parse_fds(int *argc, char ***argv, int **fds);
+int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
+ __u32 *info_len);
struct bpf_prog_linfo;
-#ifdef HAVE_LIBBFD_SUPPORT
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch, const char *disassembler_options,
- const struct btf *btf,
- const struct bpf_prog_linfo *prog_linfo,
- __u64 func_ksym, unsigned int func_idx,
- bool linum);
+#if defined(HAVE_LLVM_SUPPORT) || defined(HAVE_LIBBFD_SUPPORT)
+int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum);
int disasm_init(void);
#else
static inline
-void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
- const char *arch, const char *disassembler_options,
- const struct btf *btf,
- const struct bpf_prog_linfo *prog_linfo,
- __u64 func_ksym, unsigned int func_idx,
- bool linum)
+int disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
+ const char *arch, const char *disassembler_options,
+ const struct btf *btf,
+ const struct bpf_prog_linfo *prog_linfo,
+ __u64 func_ksym, unsigned int func_idx,
+ bool linum)
{
+ return 0;
}
static inline int disasm_init(void)
{
- p_err("No libbfd support");
+ p_err("No JIT disassembly support");
return -1;
}
#endif
@@ -199,8 +203,7 @@ void print_hex_data_json(uint8_t *data, size_t len);
unsigned int get_page_size(void);
unsigned int get_possible_cpus(void);
const char *
-ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
- const char **opt);
+ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt);
struct btf_dumper {
const struct btf *btf;
@@ -226,6 +229,8 @@ void btf_dump_linfo_plain(const struct btf *btf,
const char *prefix, bool linum);
void btf_dump_linfo_json(const struct btf *btf,
const struct bpf_line_info *linfo, bool linum);
+void btf_dump_linfo_dotlabel(const struct btf *btf,
+ const struct bpf_line_info *linfo, bool linum);
struct nlattr;
struct ifinfomsg;
@@ -236,4 +241,33 @@ int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
int print_all_levels(__maybe_unused enum libbpf_print_level level,
const char *format, va_list args);
+
+size_t hash_fn_for_key_as_id(long key, void *ctx);
+bool equal_fn_for_key_as_id(long k1, long k2, void *ctx);
+
+/* bpf_attach_type_input_str - convert the provided attach type value into a
+ * textual representation that we accept for input purposes.
+ *
+ * This function is similar in nature to libbpf_bpf_attach_type_str, but
+ * recognizes some attach type names that have been used by the program in the
+ * past and which do not follow the string inference scheme that libbpf uses.
+ * These textual representations should only be used for user input.
+ *
+ * @t: The attach type
+ * Returns a pointer to a static string identifying the attach type. NULL is
+ * returned for unknown bpf_attach_type values.
+ */
+const char *bpf_attach_type_input_str(enum bpf_attach_type t);
+
+static inline bool hashmap__empty(struct hashmap *map)
+{
+ return map ? hashmap__size(map) == 0 : true;
+}
+
+int pathname_concat(char *buf, int buf_sz, const char *path,
+ const char *name);
+
+/* print netfilter bpf_link info */
+void netfilter_dump_plain(const struct bpf_link_info *info);
+void netfilter_dump_json(const struct bpf_link_info *info, json_writer_t *wtr);
#endif
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 693a632f6813..aaeb8939e137 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
-#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/err.h>
@@ -17,41 +16,12 @@
#include <bpf/bpf.h>
#include <bpf/btf.h>
+#include <bpf/hashmap.h>
#include "json_writer.h"
#include "main.h"
-const char * const map_type_name[] = {
- [BPF_MAP_TYPE_UNSPEC] = "unspec",
- [BPF_MAP_TYPE_HASH] = "hash",
- [BPF_MAP_TYPE_ARRAY] = "array",
- [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
- [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
- [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
- [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
- [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
- [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
- [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
- [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
- [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
- [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
- [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
- [BPF_MAP_TYPE_DEVMAP] = "devmap",
- [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
- [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
- [BPF_MAP_TYPE_CPUMAP] = "cpumap",
- [BPF_MAP_TYPE_XSKMAP] = "xskmap",
- [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
- [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
- [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
- [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
- [BPF_MAP_TYPE_QUEUE] = "queue",
- [BPF_MAP_TYPE_STACK] = "stack",
- [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
- [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
-};
-
-const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
+static struct hashmap *map_table;
static bool map_is_per_cpu(__u32 type)
{
@@ -74,12 +44,18 @@ static bool map_is_map_of_progs(__u32 type)
static int map_type_from_str(const char *type)
{
+ const char *map_type_str;
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(map_type_name); i++)
+ for (i = 0; ; i++) {
+ map_type_str = libbpf_bpf_map_type_str(i);
+ if (!map_type_str)
+ break;
+
/* Don't allow prefixing in case of possible future shadowing */
- if (map_type_name[i] && !strcmp(map_type_name[i], type))
+ if (!strcmp(map_type_str, type))
return i;
+ }
return -1;
}
@@ -92,168 +68,12 @@ static void *alloc_value(struct bpf_map_info *info)
return malloc(info->value_size);
}
-static int map_fd_by_name(char *name, int **fds)
-{
- unsigned int id = 0;
- int fd, nb_fds = 0;
- void *tmp;
- int err;
-
- while (true) {
- struct bpf_map_info info = {};
- __u32 len = sizeof(info);
-
- err = bpf_map_get_next_id(id, &id);
- if (err) {
- if (errno != ENOENT) {
- p_err("%s", strerror(errno));
- goto err_close_fds;
- }
- return nb_fds;
- }
-
- fd = bpf_map_get_fd_by_id(id);
- if (fd < 0) {
- p_err("can't get map by id (%u): %s",
- id, strerror(errno));
- goto err_close_fds;
- }
-
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
- if (err) {
- p_err("can't get map info (%u): %s",
- id, strerror(errno));
- goto err_close_fd;
- }
-
- if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
- close(fd);
- continue;
- }
-
- if (nb_fds > 0) {
- tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
- if (!tmp) {
- p_err("failed to realloc");
- goto err_close_fd;
- }
- *fds = tmp;
- }
- (*fds)[nb_fds++] = fd;
- }
-
-err_close_fd:
- close(fd);
-err_close_fds:
- while (--nb_fds >= 0)
- close((*fds)[nb_fds]);
- return -1;
-}
-
-static int map_parse_fds(int *argc, char ***argv, int **fds)
-{
- if (is_prefix(**argv, "id")) {
- unsigned int id;
- char *endptr;
-
- NEXT_ARGP();
-
- id = strtoul(**argv, &endptr, 0);
- if (*endptr) {
- p_err("can't parse %s as ID", **argv);
- return -1;
- }
- NEXT_ARGP();
-
- (*fds)[0] = bpf_map_get_fd_by_id(id);
- if ((*fds)[0] < 0) {
- p_err("get map by id (%u): %s", id, strerror(errno));
- return -1;
- }
- return 1;
- } else if (is_prefix(**argv, "name")) {
- char *name;
-
- NEXT_ARGP();
-
- name = **argv;
- if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
- p_err("can't parse name");
- return -1;
- }
- NEXT_ARGP();
-
- return map_fd_by_name(name, fds);
- } else if (is_prefix(**argv, "pinned")) {
- char *path;
-
- NEXT_ARGP();
-
- path = **argv;
- NEXT_ARGP();
-
- (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
- if ((*fds)[0] < 0)
- return -1;
- return 1;
- }
-
- p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
- return -1;
-}
-
-int map_parse_fd(int *argc, char ***argv)
-{
- int *fds = NULL;
- int nb_fds, fd;
-
- fds = malloc(sizeof(int));
- if (!fds) {
- p_err("mem alloc failed");
- return -1;
- }
- nb_fds = map_parse_fds(argc, argv, &fds);
- if (nb_fds != 1) {
- if (nb_fds > 1) {
- p_err("several maps match this handle");
- while (nb_fds--)
- close(fds[nb_fds]);
- }
- fd = -1;
- goto exit_free;
- }
-
- fd = fds[0];
-exit_free:
- free(fds);
- return fd;
-}
-
-int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len)
-{
- int err;
- int fd;
-
- fd = map_parse_fd(argc, argv);
- if (fd < 0)
- return -1;
-
- err = bpf_obj_get_info_by_fd(fd, info, info_len);
- if (err) {
- p_err("can't get map info: %s", strerror(errno));
- close(fd);
- return err;
- }
-
- return fd;
-}
-
static int do_dump_btf(const struct btf_dumper *d,
struct bpf_map_info *map_info, void *key,
void *value)
{
__u32 value_id;
- int ret;
+ int ret = 0;
/* start of key-value pair */
jsonw_start_object(d->jw);
@@ -367,8 +187,9 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
jsonw_end_object(json_wtr);
}
-static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
- const char *error_msg)
+static void
+print_entry_error_msg(struct bpf_map_info *info, unsigned char *key,
+ const char *error_msg)
{
int msg_size = strlen(error_msg);
bool single_line, break_names;
@@ -386,6 +207,40 @@ static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
printf("\n");
}
+static void
+print_entry_error(struct bpf_map_info *map_info, void *key, int lookup_errno)
+{
+ /* For prog_array maps or arrays of maps, failure to lookup the value
+ * means there is no entry for that key. Do not print an error message
+ * in that case.
+ */
+ if ((map_is_map_of_maps(map_info->type) ||
+ map_is_map_of_progs(map_info->type)) && lookup_errno == ENOENT)
+ return;
+
+ if (json_output) {
+ jsonw_start_object(json_wtr); /* entry */
+ jsonw_name(json_wtr, "key");
+ print_hex_data_json(key, map_info->key_size);
+ jsonw_name(json_wtr, "value");
+ jsonw_start_object(json_wtr); /* error */
+ jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
+ jsonw_end_object(json_wtr); /* error */
+ jsonw_end_object(json_wtr); /* entry */
+ } else {
+ const char *msg = NULL;
+
+ if (lookup_errno == ENOENT)
+ msg = "<no entry>";
+ else if (lookup_errno == ENOSPC &&
+ map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
+ msg = "<cannot read>";
+
+ print_entry_error_msg(map_info, key,
+ msg ? : strerror(lookup_errno));
+ }
+}
+
static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
unsigned char *value)
{
@@ -586,9 +441,12 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
static void show_map_header_json(struct bpf_map_info *info, json_writer_t *wtr)
{
+ const char *map_type_str;
+
jsonw_uint_field(wtr, "id", info->id);
- if (info->type < ARRAY_SIZE(map_type_name))
- jsonw_string_field(wtr, "type", map_type_name[info->type]);
+ map_type_str = libbpf_bpf_map_type_str(info->type);
+ if (map_type_str)
+ jsonw_string_field(wtr, "type", map_type_str);
else
jsonw_uint_field(wtr, "type", info->type);
@@ -618,7 +476,7 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
jsonw_uint_field(json_wtr, "max_entries", info->max_entries);
if (memlock)
- jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
+ jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
free(memlock);
if (info->type == BPF_MAP_TYPE_PROG_ARRAY) {
@@ -627,10 +485,12 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
+ const char *prog_type_str;
- if (prog_type < ARRAY_SIZE(prog_type_name))
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ if (prog_type_str)
jsonw_string_field(json_wtr, "owner_prog_type",
- prog_type_name[prog_type]);
+ prog_type_str);
else
jsonw_uint_field(json_wtr, "owner_prog_type",
prog_type);
@@ -653,18 +513,18 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
if (info->btf_id)
jsonw_int_field(json_wtr, "btf_id", info->btf_id);
- if (!hash_empty(map_table.table)) {
- struct pinned_obj *obj;
+ if (!hashmap__empty(map_table)) {
+ struct hashmap_entry *entry;
jsonw_name(json_wtr, "pinned");
jsonw_start_array(json_wtr);
- hash_for_each_possible(map_table.table, obj, hash, info->id) {
- if (obj->id == info->id)
- jsonw_string(json_wtr, obj->path);
- }
+ hashmap__for_each_key_entry(map_table, entry, info->id)
+ jsonw_string(json_wtr, entry->pvalue);
jsonw_end_array(json_wtr);
}
+ emit_obj_refs_json(refs_table, info->id, json_wtr);
+
jsonw_end_object(json_wtr);
return 0;
@@ -672,9 +532,13 @@ static int show_map_close_json(int fd, struct bpf_map_info *info)
static void show_map_header_plain(struct bpf_map_info *info)
{
+ const char *map_type_str;
+
printf("%u: ", info->id);
- if (info->type < ARRAY_SIZE(map_type_name))
- printf("%s ", map_type_name[info->type]);
+
+ map_type_str = libbpf_bpf_map_type_str(info->type);
+ if (map_type_str)
+ printf("%s ", map_type_str);
else
printf("type %u ", info->type);
@@ -710,10 +574,11 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
printf("\n\t");
if (owner_prog_type) {
unsigned int prog_type = atoi(owner_prog_type);
+ const char *prog_type_str;
- if (prog_type < ARRAY_SIZE(prog_type_name))
- printf("owner_prog_type %s ",
- prog_type_name[prog_type]);
+ prog_type_str = libbpf_bpf_prog_type_str(prog_type);
+ if (prog_type_str)
+ printf("owner_prog_type %s ", prog_type_str);
else
printf("owner_prog_type %d ", prog_type);
}
@@ -726,25 +591,20 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
}
close(fd);
- if (!hash_empty(map_table.table)) {
- struct pinned_obj *obj;
+ if (!hashmap__empty(map_table)) {
+ struct hashmap_entry *entry;
- hash_for_each_possible(map_table.table, obj, hash, info->id) {
- if (obj->id == info->id)
- printf("\n\tpinned %s", obj->path);
- }
+ hashmap__for_each_key_entry(map_table, entry, info->id)
+ printf("\n\tpinned %s", (char *)entry->pvalue);
}
- printf("\n");
if (frozen_str) {
frozen = atoi(frozen_str);
free(frozen_str);
}
- if (!info->btf_id && !frozen)
- return 0;
-
- printf("\t");
+ if (info->btf_id || frozen)
+ printf("\n\t");
if (info->btf_id)
printf("btf_id %d", info->btf_id);
@@ -752,6 +612,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info)
if (frozen)
printf("%sfrozen", info->btf_id ? " " : "");
+ emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
+
printf("\n");
return 0;
}
@@ -776,7 +638,7 @@ static int do_show_subset(int argc, char **argv)
if (json_output && nb_fds > 1)
jsonw_start_array(json_wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
- err = bpf_obj_get_info_by_fd(fds[i], &info, &len);
+ err = bpf_map_get_info_by_fd(fds[i], &info, &len);
if (err) {
p_err("can't get map info: %s",
strerror(errno));
@@ -808,8 +670,16 @@ static int do_show(int argc, char **argv)
int err;
int fd;
- if (show_pinned)
- build_pinned_obj_table(&map_table, BPF_OBJ_MAP);
+ if (show_pinned) {
+ map_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(map_table)) {
+ p_err("failed to create hashmap for pinned paths");
+ return -1;
+ }
+ build_pinned_obj_table(map_table, BPF_OBJ_MAP);
+ }
+ build_obj_refs_table(&refs_table, BPF_OBJ_MAP);
if (argc == 2)
return do_show_subset(argc, argv);
@@ -838,7 +708,7 @@ static int do_show(int argc, char **argv)
break;
}
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_map_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
@@ -853,6 +723,11 @@ static int do_show(int argc, char **argv)
if (json_output)
jsonw_end_array(json_wtr);
+ delete_obj_refs_table(refs_table);
+
+ if (show_pinned)
+ delete_pinned_obj_table(map_table);
+
return errno == ENOENT ? 0 : -1;
}
@@ -860,56 +735,23 @@ static int dump_map_elem(int fd, void *key, void *value,
struct bpf_map_info *map_info, struct btf *btf,
json_writer_t *btf_wtr)
{
- int num_elems = 0;
- int lookup_errno;
-
- if (!bpf_map_lookup_elem(fd, key, value)) {
- if (json_output) {
- print_entry_json(map_info, key, value, btf);
- } else {
- if (btf) {
- struct btf_dumper d = {
- .btf = btf,
- .jw = btf_wtr,
- .is_plain_text = true,
- };
-
- do_dump_btf(&d, map_info, key, value);
- } else {
- print_entry_plain(map_info, key, value);
- }
- num_elems++;
- }
- return num_elems;
+ if (bpf_map_lookup_elem(fd, key, value)) {
+ print_entry_error(map_info, key, errno);
+ return -1;
}
- /* lookup error handling */
- lookup_errno = errno;
-
- if (map_is_map_of_maps(map_info->type) ||
- map_is_map_of_progs(map_info->type))
- return 0;
-
if (json_output) {
- jsonw_start_object(json_wtr);
- jsonw_name(json_wtr, "key");
- print_hex_data_json(key, map_info->key_size);
- jsonw_name(json_wtr, "value");
- jsonw_start_object(json_wtr);
- jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
- jsonw_end_object(json_wtr);
- jsonw_end_object(json_wtr);
- } else {
- const char *msg = NULL;
-
- if (lookup_errno == ENOENT)
- msg = "<no entry>";
- else if (lookup_errno == ENOSPC &&
- map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
- msg = "<cannot read>";
+ print_entry_json(map_info, key, value, btf);
+ } else if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
- print_entry_error(map_info, key,
- msg ? : strerror(lookup_errno));
+ do_dump_btf(&d, map_info, key, value);
+ } else {
+ print_entry_plain(map_info, key, value);
}
return 0;
@@ -922,7 +764,7 @@ static int maps_have_btf(int *fds, int nb_fds)
int err, i;
for (i = 0; i < nb_fds; i++) {
- err = bpf_obj_get_info_by_fd(fds[i], &info, &len);
+ err = bpf_map_get_info_by_fd(fds[i], &info, &len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
return -1;
@@ -937,42 +779,38 @@ static int maps_have_btf(int *fds, int nb_fds)
static struct btf *btf_vmlinux;
-static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
+static int get_map_kv_btf(const struct bpf_map_info *info, struct btf **btf)
{
- struct btf *btf = NULL;
+ int err = 0;
if (info->btf_vmlinux_value_type_id) {
if (!btf_vmlinux) {
btf_vmlinux = libbpf_find_kernel_btf();
- if (IS_ERR(btf_vmlinux))
+ if (!btf_vmlinux) {
p_err("failed to get kernel btf");
+ return -errno;
+ }
}
- return btf_vmlinux;
+ *btf = btf_vmlinux;
} else if (info->btf_value_type_id) {
- int err;
-
- err = btf__get_from_id(info->btf_id, &btf);
- if (err || !btf) {
+ *btf = btf__load_from_kernel_by_id(info->btf_id);
+ if (!*btf) {
+ err = -errno;
p_err("failed to get btf");
- btf = err ? ERR_PTR(err) : ERR_PTR(-ESRCH);
}
+ } else {
+ *btf = NULL;
}
- return btf;
+ return err;
}
static void free_map_kv_btf(struct btf *btf)
{
- if (!IS_ERR(btf) && btf != btf_vmlinux)
+ if (btf != btf_vmlinux)
btf__free(btf);
}
-static void free_btf_vmlinux(void)
-{
- if (!IS_ERR(btf_vmlinux))
- btf__free(btf_vmlinux);
-}
-
static int
map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
bool show_header)
@@ -993,9 +831,8 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
prev_key = NULL;
if (wtr) {
- btf = get_map_kv_btf(info);
- if (IS_ERR(btf)) {
- err = PTR_ERR(btf);
+ err = get_map_kv_btf(info, &btf);
+ if (err) {
goto exit_free;
}
@@ -1010,9 +847,13 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
}
if (info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY &&
- info->value_size != 8)
+ info->value_size != 8) {
+ const char *map_type_str;
+
+ map_type_str = libbpf_bpf_map_type_str(info->type);
p_info("Warning: cannot read values from %s map with value_size != 8",
- map_type_name[info->type]);
+ map_type_str);
+ }
while (true) {
err = bpf_map_get_next_key(fd, prev_key, key);
if (err) {
@@ -1020,7 +861,8 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
err = 0;
break;
}
- num_elems += dump_map_elem(fd, key, value, info, btf, wtr);
+ if (!dump_map_elem(fd, key, value, info, btf, wtr))
+ num_elems++;
prev_key = key;
}
@@ -1083,7 +925,7 @@ static int do_dump(int argc, char **argv)
if (wtr && nb_fds > 1)
jsonw_start_array(wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
- if (bpf_obj_get_info_by_fd(fds[i], &info, &len)) {
+ if (bpf_map_get_info_by_fd(fds[i], &info, &len)) {
p_err("can't get map info: %s", strerror(errno));
break;
}
@@ -1105,7 +947,7 @@ exit_close:
close(fds[i]);
exit_free:
free(fds);
- free_btf_vmlinux();
+ btf__free(btf_vmlinux);
return err;
}
@@ -1182,14 +1024,10 @@ static void print_key_value(struct bpf_map_info *info, void *key,
void *value)
{
json_writer_t *btf_wtr;
- struct btf *btf = NULL;
- int err;
+ struct btf *btf;
- err = btf__get_from_id(info->btf_id, &btf);
- if (err) {
- p_err("failed to get btf");
+ if (get_map_kv_btf(info, &btf))
return;
- }
if (json_output) {
print_entry_json(info, key, value, btf);
@@ -1392,9 +1230,12 @@ static int do_pin(int argc, char **argv)
static int do_create(int argc, char **argv)
{
- struct bpf_create_map_attr attr = { NULL, };
+ LIBBPF_OPTS(bpf_map_create_opts, attr);
+ enum bpf_map_type map_type = BPF_MAP_TYPE_UNSPEC;
+ __u32 key_size = 0, value_size = 0, max_entries = 0;
+ const char *map_name = NULL;
const char *pinfile;
- int err, fd;
+ int err = -1, fd;
if (!REQ_ARGS(7))
return -1;
@@ -1407,78 +1248,96 @@ static int do_create(int argc, char **argv)
if (is_prefix(*argv, "type")) {
NEXT_ARG();
- if (attr.map_type) {
+ if (map_type) {
p_err("map type already specified");
- return -1;
+ goto exit;
}
- attr.map_type = map_type_from_str(*argv);
- if ((int)attr.map_type < 0) {
+ map_type = map_type_from_str(*argv);
+ if ((int)map_type < 0) {
p_err("unrecognized map type: %s", *argv);
- return -1;
+ goto exit;
}
NEXT_ARG();
} else if (is_prefix(*argv, "name")) {
NEXT_ARG();
- attr.name = GET_ARG();
+ map_name = GET_ARG();
} else if (is_prefix(*argv, "key")) {
- if (parse_u32_arg(&argc, &argv, &attr.key_size,
+ if (parse_u32_arg(&argc, &argv, &key_size,
"key size"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "value")) {
- if (parse_u32_arg(&argc, &argv, &attr.value_size,
+ if (parse_u32_arg(&argc, &argv, &value_size,
"value size"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "entries")) {
- if (parse_u32_arg(&argc, &argv, &attr.max_entries,
+ if (parse_u32_arg(&argc, &argv, &max_entries,
"max entries"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "flags")) {
if (parse_u32_arg(&argc, &argv, &attr.map_flags,
"flags"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "dev")) {
NEXT_ARG();
if (attr.map_ifindex) {
p_err("offload device already specified");
- return -1;
+ goto exit;
}
attr.map_ifindex = if_nametoindex(*argv);
if (!attr.map_ifindex) {
p_err("unrecognized netdevice '%s': %s",
*argv, strerror(errno));
- return -1;
+ goto exit;
}
NEXT_ARG();
+ } else if (is_prefix(*argv, "inner_map")) {
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int inner_map_fd;
+
+ NEXT_ARG();
+ if (!REQ_ARGS(2))
+ usage();
+ inner_map_fd = map_parse_fd_and_info(&argc, &argv,
+ &info, &len);
+ if (inner_map_fd < 0)
+ return -1;
+ attr.inner_map_fd = inner_map_fd;
} else {
p_err("unknown arg %s", *argv);
- return -1;
+ goto exit;
}
}
- if (!attr.name) {
+ if (!map_name) {
p_err("map name not specified");
- return -1;
+ goto exit;
}
set_max_rlimit();
- fd = bpf_create_map_xattr(&attr);
+ fd = bpf_map_create(map_type, map_name, key_size, value_size, max_entries, &attr);
if (fd < 0) {
p_err("map create failed: %s", strerror(errno));
- return -1;
+ goto exit;
}
err = do_pin_fd(fd, pinfile);
close(fd);
if (err)
- return err;
+ goto exit;
if (json_output)
jsonw_null(json_wtr);
- return 0;
+
+exit:
+ if (attr.inner_map_fd > 0)
+ close(attr.inner_map_fd);
+
+ return err;
}
static int do_pop_dequeue(int argc, char **argv)
@@ -1561,24 +1420,24 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s { show | list } [MAP]\n"
- " %s %s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n"
- " entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
- " [dev NAME]\n"
- " %s %s dump MAP\n"
- " %s %s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
- " %s %s lookup MAP [key DATA]\n"
- " %s %s getnext MAP [key DATA]\n"
- " %s %s delete MAP key DATA\n"
- " %s %s pin MAP FILE\n"
- " %s %s event_pipe MAP [cpu N index M]\n"
- " %s %s peek MAP\n"
- " %s %s push MAP value VALUE\n"
- " %s %s pop MAP\n"
- " %s %s enqueue MAP value VALUE\n"
- " %s %s dequeue MAP\n"
- " %s %s freeze MAP\n"
- " %s %s help\n"
+ "Usage: %1$s %2$s { show | list } [MAP]\n"
+ " %1$s %2$s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n"
+ " entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
+ " [inner_map MAP] [dev NAME]\n"
+ " %1$s %2$s dump MAP\n"
+ " %1$s %2$s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
+ " %1$s %2$s lookup MAP [key DATA]\n"
+ " %1$s %2$s getnext MAP [key DATA]\n"
+ " %1$s %2$s delete MAP key DATA\n"
+ " %1$s %2$s pin MAP FILE\n"
+ " %1$s %2$s event_pipe MAP [cpu N index M]\n"
+ " %1$s %2$s peek MAP\n"
+ " %1$s %2$s push MAP value VALUE\n"
+ " %1$s %2$s pop MAP\n"
+ " %1$s %2$s enqueue MAP value VALUE\n"
+ " %1$s %2$s dequeue MAP\n"
+ " %1$s %2$s freeze MAP\n"
+ " %1$s %2$s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
" DATA := { [hex] BYTES }\n"
@@ -1589,14 +1448,12 @@ static int do_help(int argc, char **argv)
" percpu_array | stack_trace | cgroup_array | lru_hash |\n"
" lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
- " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage }\n"
- " " HELP_SPEC_OPTIONS "\n"
+ " cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
+ " queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
+ " task_storage | bloom_filter | user_ringbuf | cgrp_storage }\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-f|--bpffs} | {-n|--nomount} }\n"
"",
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
bin_name, argv[-2]);
return 0;
diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
index d9b29c17fbb8..21d7d447e1f3 100644
--- a/tools/bpf/bpftool/map_perf_ring.c
+++ b/tools/bpf/bpftool/map_perf_ring.c
@@ -22,7 +22,6 @@
#include <sys/syscall.h>
#include <bpf/bpf.h>
-#include <perf-sys.h>
#include "main.h"
@@ -30,16 +29,9 @@
static volatile bool stop;
-struct event_ring_info {
- int fd;
- int key;
- unsigned int cpu;
- void *mem;
-};
-
struct perf_event_sample {
struct perf_event_header header;
- u64 time;
+ __u64 time;
__u32 size;
unsigned char data[];
};
@@ -125,7 +117,7 @@ int do_event_pipe(int argc, char **argv)
.wakeup_events = 1,
};
struct bpf_map_info map_info = {};
- struct perf_buffer_raw_opts opts = {};
+ LIBBPF_OPTS(perf_buffer_raw_opts, opts);
struct event_pipe_ctx ctx = {
.all_cpus = true,
.cpu = -1,
@@ -191,18 +183,14 @@ int do_event_pipe(int argc, char **argv)
ctx.idx = 0;
}
- opts.attr = &perf_attr;
- opts.event_cb = print_bpf_output;
- opts.ctx = &ctx;
opts.cpu_cnt = ctx.all_cpus ? 0 : 1;
opts.cpus = &ctx.cpu;
opts.map_keys = &ctx.idx;
-
- pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &opts);
- err = libbpf_get_error(pb);
- if (err) {
+ pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr,
+ print_bpf_output, &ctx, &opts);
+ if (!pb) {
p_err("failed to create perf buffer: %s (%d)",
- strerror(err), err);
+ strerror(errno), errno);
goto err_close_map;
}
@@ -217,7 +205,7 @@ int do_event_pipe(int argc, char **argv)
err = perf_buffer__poll(pb, 200);
if (err < 0 && err != -EINTR) {
p_err("perf buffer polling failed: %s (%d)",
- strerror(err), err);
+ strerror(errno), errno);
goto err_close_pb;
}
}
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index c5e3895b7c8b..26a49965bf71 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -1,27 +1,33 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (C) 2018 Facebook
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
+#include <time.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <net/if.h>
-#include <linux/if.h>
#include <linux/rtnetlink.h>
+#include <linux/socket.h>
#include <linux/tc_act/tc_bpf.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "bpf/nlattr.h"
-#include "bpf/libbpf_internal.h"
#include "main.h"
#include "netlink_dumper.h"
+#ifndef SOL_NETLINK
+#define SOL_NETLINK 270
+#endif
+
struct ip_devname_ifindex {
char devname[64];
int ifindex;
@@ -85,6 +91,266 @@ static enum net_attach_type parse_attach_type(const char *str)
return net_attach_type_size;
}
+typedef int (*dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
+
+typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, dump_nlmsg_t, void *cookie);
+
+static int netlink_open(__u32 *nl_pid)
+{
+ struct sockaddr_nl sa;
+ socklen_t addrlen;
+ int one = 1, ret;
+ int sock;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock < 0)
+ return -errno;
+
+ if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
+ &one, sizeof(one)) < 0) {
+ p_err("Netlink error reporting not supported");
+ }
+
+ if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ addrlen = sizeof(sa);
+ if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ if (addrlen != sizeof(sa)) {
+ ret = -LIBBPF_ERRNO__INTERNAL;
+ goto cleanup;
+ }
+
+ *nl_pid = sa.nl_pid;
+ return sock;
+
+cleanup:
+ close(sock);
+ return ret;
+}
+
+static int netlink_recv(int sock, __u32 nl_pid, __u32 seq,
+ __dump_nlmsg_t _fn, dump_nlmsg_t fn,
+ void *cookie)
+{
+ bool multipart = true;
+ struct nlmsgerr *err;
+ struct nlmsghdr *nh;
+ char buf[4096];
+ int len, ret;
+
+ while (multipart) {
+ multipart = false;
+ len = recv(sock, buf, sizeof(buf), 0);
+ if (len < 0) {
+ ret = -errno;
+ goto done;
+ }
+
+ if (len == 0)
+ break;
+
+ for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, (unsigned int)len);
+ nh = NLMSG_NEXT(nh, len)) {
+ if (nh->nlmsg_pid != nl_pid) {
+ ret = -LIBBPF_ERRNO__WRNGPID;
+ goto done;
+ }
+ if (nh->nlmsg_seq != seq) {
+ ret = -LIBBPF_ERRNO__INVSEQ;
+ goto done;
+ }
+ if (nh->nlmsg_flags & NLM_F_MULTI)
+ multipart = true;
+ switch (nh->nlmsg_type) {
+ case NLMSG_ERROR:
+ err = (struct nlmsgerr *)NLMSG_DATA(nh);
+ if (!err->error)
+ continue;
+ ret = err->error;
+ libbpf_nla_dump_errormsg(nh);
+ goto done;
+ case NLMSG_DONE:
+ return 0;
+ default:
+ break;
+ }
+ if (_fn) {
+ ret = _fn(nh, fn, cookie);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
+static int __dump_class_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_class_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_class_nlmsg(cookie, t, tb);
+}
+
+static int netlink_get_class(int sock, unsigned int nl_pid, int ifindex,
+ dump_nlmsg_t dump_class_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETTCLASS,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg,
+ dump_class_nlmsg, cookie);
+}
+
+static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_qdisc_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_qdisc_nlmsg(cookie, t, tb);
+}
+
+static int netlink_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
+ dump_nlmsg_t dump_qdisc_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETQDISC,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg,
+ dump_qdisc_nlmsg, cookie);
+}
+
+static int __dump_filter_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_filter_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_filter_nlmsg(cookie, t, tb);
+}
+
+static int netlink_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
+ dump_nlmsg_t dump_filter_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETTFILTER,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ .t.tcm_parent = handle,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg,
+ dump_filter_nlmsg, cookie);
+}
+
+static int __dump_link_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_link_nlmsg, void *cookie)
+{
+ struct nlattr *tb[IFLA_MAX + 1], *attr;
+ struct ifinfomsg *ifi = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
+ attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
+ if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_link_nlmsg(cookie, ifi, tb);
+}
+
+static int netlink_get_link(int sock, unsigned int nl_pid,
+ dump_nlmsg_t dump_link_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct ifinfomsg ifm;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlh.nlmsg_type = RTM_GETLINK,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .ifm.ifi_family = AF_PACKET,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
+ dump_link_nlmsg, cookie);
+}
+
static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_netdev_t *netinfo = cookie;
@@ -168,14 +434,14 @@ static int show_dev_tc_bpf(int sock, unsigned int nl_pid,
tcinfo.array_len = 0;
tcinfo.is_qdisc = false;
- ret = libbpf_nl_get_class(sock, nl_pid, dev->ifindex,
- dump_class_qdisc_nlmsg, &tcinfo);
+ ret = netlink_get_class(sock, nl_pid, dev->ifindex,
+ dump_class_qdisc_nlmsg, &tcinfo);
if (ret)
goto out;
tcinfo.is_qdisc = true;
- ret = libbpf_nl_get_qdisc(sock, nl_pid, dev->ifindex,
- dump_class_qdisc_nlmsg, &tcinfo);
+ ret = netlink_get_qdisc(sock, nl_pid, dev->ifindex,
+ dump_class_qdisc_nlmsg, &tcinfo);
if (ret)
goto out;
@@ -183,9 +449,9 @@ static int show_dev_tc_bpf(int sock, unsigned int nl_pid,
filter_info.ifindex = dev->ifindex;
for (i = 0; i < tcinfo.used_len; i++) {
filter_info.kind = tcinfo.handle_array[i].kind;
- ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex,
- tcinfo.handle_array[i].handle,
- dump_filter_nlmsg, &filter_info);
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex,
+ tcinfo.handle_array[i].handle,
+ dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
}
@@ -193,22 +459,22 @@ static int show_dev_tc_bpf(int sock, unsigned int nl_pid,
/* root, ingress and egress handle */
handle = TC_H_ROOT;
filter_info.kind = "root";
- ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle,
- dump_filter_nlmsg, &filter_info);
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
filter_info.kind = "clsact/ingress";
- ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle,
- dump_filter_nlmsg, &filter_info);
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_EGRESS);
filter_info.kind = "clsact/egress";
- ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle,
- dump_filter_nlmsg, &filter_info);
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
@@ -287,7 +553,7 @@ static int do_attach_detach_xdp(int progfd, enum net_attach_type attach_type,
if (attach_type == NET_ATTACH_TYPE_XDP_OFFLOAD)
flags |= XDP_FLAGS_HW_MODE;
- return bpf_set_link_xdp_fd(ifindex, progfd, flags);
+ return bpf_xdp_attach(ifindex, progfd, flags, NULL);
}
static int do_attach(int argc, char **argv)
@@ -313,8 +579,8 @@ static int do_attach(int argc, char **argv)
ifindex = net_parse_dev(&argc, &argv);
if (ifindex < 1) {
- close(progfd);
- return -EINVAL;
+ err = -EINVAL;
+ goto cleanup;
}
if (argc) {
@@ -322,8 +588,8 @@ static int do_attach(int argc, char **argv)
overwrite = true;
} else {
p_err("expected 'overwrite', got: '%s'?", *argv);
- close(progfd);
- return -EINVAL;
+ err = -EINVAL;
+ goto cleanup;
}
}
@@ -331,17 +597,17 @@ static int do_attach(int argc, char **argv)
if (is_prefix("xdp", attach_type_strings[attach_type]))
err = do_attach_detach_xdp(progfd, attach_type, ifindex,
overwrite);
-
- if (err < 0) {
+ if (err) {
p_err("interface %s attach failed: %s",
attach_type_strings[attach_type], strerror(-err));
- return err;
+ goto cleanup;
}
if (json_output)
jsonw_null(json_wtr);
-
- return 0;
+cleanup:
+ close(progfd);
+ return err;
}
static int do_detach(int argc, char **argv)
@@ -381,12 +647,114 @@ static int do_detach(int argc, char **argv)
return 0;
}
+static int netfilter_link_compar(const void *a, const void *b)
+{
+ const struct bpf_link_info *nfa = a;
+ const struct bpf_link_info *nfb = b;
+ int delta;
+
+ delta = nfa->netfilter.pf - nfb->netfilter.pf;
+ if (delta)
+ return delta;
+
+ delta = nfa->netfilter.hooknum - nfb->netfilter.hooknum;
+ if (delta)
+ return delta;
+
+ if (nfa->netfilter.priority < nfb->netfilter.priority)
+ return -1;
+ if (nfa->netfilter.priority > nfb->netfilter.priority)
+ return 1;
+
+ return nfa->netfilter.flags - nfb->netfilter.flags;
+}
+
+static void show_link_netfilter(void)
+{
+ unsigned int nf_link_len = 0, nf_link_count = 0;
+ struct bpf_link_info *nf_link_info = NULL;
+ __u32 id = 0;
+
+ while (true) {
+ struct bpf_link_info info;
+ int fd, err;
+ __u32 len;
+
+ err = bpf_link_get_next_id(id, &id);
+ if (err) {
+ if (errno == ENOENT)
+ break;
+ p_err("can't get next link: %s (id %d)", strerror(errno), id);
+ break;
+ }
+
+ fd = bpf_link_get_fd_by_id(id);
+ if (fd < 0) {
+ p_err("can't get link by id (%u): %s", id, strerror(errno));
+ continue;
+ }
+
+ memset(&info, 0, sizeof(info));
+ len = sizeof(info);
+
+ err = bpf_link_get_info_by_fd(fd, &info, &len);
+
+ close(fd);
+
+ if (err) {
+ p_err("can't get link info for fd %d: %s", fd, strerror(errno));
+ continue;
+ }
+
+ if (info.type != BPF_LINK_TYPE_NETFILTER)
+ continue;
+
+ if (nf_link_count >= nf_link_len) {
+ static const unsigned int max_link_count = INT_MAX / sizeof(info);
+ struct bpf_link_info *expand;
+
+ if (nf_link_count > max_link_count) {
+ p_err("cannot handle more than %u links\n", max_link_count);
+ break;
+ }
+
+ nf_link_len += 16;
+
+ expand = realloc(nf_link_info, nf_link_len * sizeof(info));
+ if (!expand) {
+ p_err("realloc: %s", strerror(errno));
+ break;
+ }
+
+ nf_link_info = expand;
+ }
+
+ nf_link_info[nf_link_count] = info;
+ nf_link_count++;
+ }
+
+ qsort(nf_link_info, nf_link_count, sizeof(*nf_link_info), netfilter_link_compar);
+
+ for (id = 0; id < nf_link_count; id++) {
+ NET_START_OBJECT;
+ if (json_output)
+ netfilter_dump_json(&nf_link_info[id], json_wtr);
+ else
+ netfilter_dump_plain(&nf_link_info[id]);
+
+ NET_DUMP_UINT("id", " prog_id %u", nf_link_info[id].prog_id);
+ NET_END_OBJECT;
+ }
+
+ free(nf_link_info);
+}
+
static int do_show(int argc, char **argv)
{
struct bpf_attach_info attach_info = {};
int i, sock, ret, filter_idx = -1;
struct bpf_netdev_t dev_array;
- unsigned int nl_pid;
+ unsigned int nl_pid = 0;
char err_buf[256];
if (argc == 2) {
@@ -401,7 +769,7 @@ static int do_show(int argc, char **argv)
if (ret)
return -1;
- sock = libbpf_netlink_open(&nl_pid);
+ sock = netlink_open(&nl_pid);
if (sock < 0) {
fprintf(stderr, "failed to open netlink sock\n");
return -1;
@@ -416,7 +784,7 @@ static int do_show(int argc, char **argv)
jsonw_start_array(json_wtr);
NET_START_OBJECT;
NET_START_ARRAY("xdp", "%s:\n");
- ret = libbpf_nl_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array);
+ ret = netlink_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array);
NET_END_ARRAY("\n");
if (!ret) {
@@ -435,6 +803,10 @@ static int do_show(int argc, char **argv)
NET_DUMP_UINT("id", "id %u", attach_info.flow_dissector_id);
NET_END_ARRAY("\n");
+ NET_START_ARRAY("netfilter", "%s:\n");
+ show_link_netfilter();
+ NET_END_ARRAY("\n");
+
NET_END_OBJECT;
if (json_output)
jsonw_end_array(json_wtr);
@@ -458,20 +830,21 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s { show | list } [dev <devname>]\n"
- " %s %s attach ATTACH_TYPE PROG dev <devname> [ overwrite ]\n"
- " %s %s detach ATTACH_TYPE dev <devname>\n"
- " %s %s help\n"
+ "Usage: %1$s %2$s { show | list } [dev <devname>]\n"
+ " %1$s %2$s attach ATTACH_TYPE PROG dev <devname> [ overwrite ]\n"
+ " %1$s %2$s detach ATTACH_TYPE dev <devname>\n"
+ " %1$s %2$s help\n"
"\n"
" " HELP_SPEC_PROGRAM "\n"
" ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload }\n"
+ " " HELP_SPEC_OPTIONS " }\n"
"\n"
"Note: Only xdp and tc attachments are supported now.\n"
" For progs attached to cgroups, use \"bpftool cgroup\"\n"
" to dump program attachments. For program types\n"
" sk_{filter,skb,msg,reuseport} and lwt/seg6, please\n"
- " consult iproute2.\n",
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
+ " consult iproute2.\n"
+ "",
bin_name, argv[-2]);
return 0;
diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c
index 3341aa14acda..91743445e4c7 100644
--- a/tools/bpf/bpftool/perf.c
+++ b/tools/bpf/bpftool/perf.c
@@ -2,7 +2,9 @@
// Copyright (C) 2018 Facebook
// Author: Yonghong Song <yhs@fb.com>
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
@@ -11,7 +13,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
-#include <ftw.h>
+#include <dirent.h>
#include <bpf/bpf.h>
@@ -147,81 +149,83 @@ static void print_perf_plain(int pid, int fd, __u32 prog_id, __u32 fd_type,
}
}
-static int show_proc(const char *fpath, const struct stat *sb,
- int tflag, struct FTW *ftwbuf)
+static int show_proc(void)
{
+ struct dirent *proc_de, *pid_fd_de;
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
- int err, pid = 0, fd = 0;
+ DIR *proc, *pid_fd;
+ int err, pid, fd;
const char *pch;
char buf[4096];
- /* prefix always /proc */
- pch = fpath + 5;
- if (*pch == '\0')
- return 0;
+ proc = opendir("/proc");
+ if (!proc)
+ return -1;
- /* pid should be all numbers */
- pch++;
- while (isdigit(*pch)) {
- pid = pid * 10 + *pch - '0';
- pch++;
- }
- if (*pch == '\0')
- return 0;
- if (*pch != '/')
- return FTW_SKIP_SUBTREE;
-
- /* check /proc/<pid>/fd directory */
- pch++;
- if (strncmp(pch, "fd", 2))
- return FTW_SKIP_SUBTREE;
- pch += 2;
- if (*pch == '\0')
- return 0;
- if (*pch != '/')
- return FTW_SKIP_SUBTREE;
-
- /* check /proc/<pid>/fd/<fd_num> */
- pch++;
- while (isdigit(*pch)) {
- fd = fd * 10 + *pch - '0';
- pch++;
- }
- if (*pch != '\0')
- return FTW_SKIP_SUBTREE;
+ while ((proc_de = readdir(proc))) {
+ pid = 0;
+ pch = proc_de->d_name;
- /* query (pid, fd) for potential perf events */
- len = sizeof(buf);
- err = bpf_task_fd_query(pid, fd, 0, buf, &len, &prog_id, &fd_type,
- &probe_offset, &probe_addr);
- if (err < 0)
- return 0;
+ /* pid should be all numbers */
+ while (isdigit(*pch)) {
+ pid = pid * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
- if (json_output)
- print_perf_json(pid, fd, prog_id, fd_type, buf, probe_offset,
- probe_addr);
- else
- print_perf_plain(pid, fd, prog_id, fd_type, buf, probe_offset,
- probe_addr);
+ err = snprintf(buf, sizeof(buf), "/proc/%s/fd", proc_de->d_name);
+ if (err < 0 || err >= (int)sizeof(buf))
+ continue;
+
+ pid_fd = opendir(buf);
+ if (!pid_fd)
+ continue;
+ while ((pid_fd_de = readdir(pid_fd))) {
+ fd = 0;
+ pch = pid_fd_de->d_name;
+
+ /* fd should be all numbers */
+ while (isdigit(*pch)) {
+ fd = fd * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
+
+ /* query (pid, fd) for potential perf events */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(pid, fd, 0, buf, &len,
+ &prog_id, &fd_type,
+ &probe_offset, &probe_addr);
+ if (err < 0)
+ continue;
+
+ if (json_output)
+ print_perf_json(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ else
+ print_perf_plain(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ }
+ closedir(pid_fd);
+ }
+ closedir(proc);
return 0;
}
static int do_show(int argc, char **argv)
{
- int flags = FTW_ACTIONRETVAL | FTW_PHYS;
- int err = 0, nopenfd = 16;
+ int err;
if (!has_perf_query_support())
return -1;
if (json_output)
jsonw_start_array(json_wtr);
- if (nftw("/proc", show_proc, nopenfd, flags) == -1) {
- p_err("%s", strerror(errno));
- err = -1;
- }
+ err = show_proc();
if (json_output)
jsonw_end_array(json_wtr);
@@ -231,7 +235,10 @@ static int do_show(int argc, char **argv)
static int do_help(int argc, char **argv)
{
fprintf(stderr,
- "Usage: %s %s { show | list | help }\n"
+ "Usage: %1$s %2$s { show | list }\n"
+ " %1$s %2$s help }\n"
+ "\n"
+ " " HELP_SPEC_OPTIONS " }\n"
"",
bin_name, argv[-2]);
diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c
new file mode 100644
index 000000000000..00c77edb6331
--- /dev/null
+++ b/tools/bpf/bpftool/pids.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2020 Facebook */
+#include <errno.h>
+#include <linux/err.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <bpf/bpf.h>
+#include <bpf/hashmap.h>
+
+#include "main.h"
+#include "skeleton/pid_iter.h"
+
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+
+int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
+{
+ return -ENOTSUP;
+}
+void delete_obj_refs_table(struct hashmap *map) {}
+void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix) {}
+void emit_obj_refs_json(struct hashmap *map, __u32 id, json_writer_t *json_writer) {}
+
+#else /* BPFTOOL_WITHOUT_SKELETONS */
+
+#include "pid_iter.skel.h"
+
+static void add_ref(struct hashmap *map, struct pid_iter_entry *e)
+{
+ struct hashmap_entry *entry;
+ struct obj_refs *refs;
+ struct obj_ref *ref;
+ int err, i;
+ void *tmp;
+
+ hashmap__for_each_key_entry(map, entry, e->id) {
+ refs = entry->pvalue;
+
+ for (i = 0; i < refs->ref_cnt; i++) {
+ if (refs->refs[i].pid == e->pid)
+ return;
+ }
+
+ tmp = realloc(refs->refs, (refs->ref_cnt + 1) * sizeof(*ref));
+ if (!tmp) {
+ p_err("failed to re-alloc memory for ID %u, PID %d, COMM %s...",
+ e->id, e->pid, e->comm);
+ return;
+ }
+ refs->refs = tmp;
+ ref = &refs->refs[refs->ref_cnt];
+ ref->pid = e->pid;
+ memcpy(ref->comm, e->comm, sizeof(ref->comm));
+ refs->ref_cnt++;
+
+ return;
+ }
+
+ /* new ref */
+ refs = calloc(1, sizeof(*refs));
+ if (!refs) {
+ p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
+ e->id, e->pid, e->comm);
+ return;
+ }
+
+ refs->refs = malloc(sizeof(*refs->refs));
+ if (!refs->refs) {
+ free(refs);
+ p_err("failed to alloc memory for ID %u, PID %d, COMM %s...",
+ e->id, e->pid, e->comm);
+ return;
+ }
+ ref = &refs->refs[0];
+ ref->pid = e->pid;
+ memcpy(ref->comm, e->comm, sizeof(ref->comm));
+ refs->ref_cnt = 1;
+ refs->has_bpf_cookie = e->has_bpf_cookie;
+ refs->bpf_cookie = e->bpf_cookie;
+
+ err = hashmap__append(map, e->id, refs);
+ if (err)
+ p_err("failed to append entry to hashmap for ID %u: %s",
+ e->id, strerror(errno));
+}
+
+static int __printf(2, 0)
+libbpf_print_none(__maybe_unused enum libbpf_print_level level,
+ __maybe_unused const char *format,
+ __maybe_unused va_list args)
+{
+ return 0;
+}
+
+int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
+{
+ struct pid_iter_entry *e;
+ char buf[4096 / sizeof(*e) * sizeof(*e)];
+ struct pid_iter_bpf *skel;
+ int err, ret, fd = -1, i;
+ libbpf_print_fn_t default_print;
+
+ *map = hashmap__new(hash_fn_for_key_as_id, equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(*map)) {
+ p_err("failed to create hashmap for PID references");
+ return -1;
+ }
+ set_max_rlimit();
+
+ skel = pid_iter_bpf__open();
+ if (!skel) {
+ p_err("failed to open PID iterator skeleton");
+ return -1;
+ }
+
+ skel->rodata->obj_type = type;
+
+ /* we don't want output polluted with libbpf errors if bpf_iter is not
+ * supported
+ */
+ default_print = libbpf_set_print(libbpf_print_none);
+ err = pid_iter_bpf__load(skel);
+ libbpf_set_print(default_print);
+ if (err) {
+ /* too bad, kernel doesn't support BPF iterators yet */
+ err = 0;
+ goto out;
+ }
+ err = pid_iter_bpf__attach(skel);
+ if (err) {
+ /* if we loaded above successfully, attach has to succeed */
+ p_err("failed to attach PID iterator: %d", err);
+ goto out;
+ }
+
+ fd = bpf_iter_create(bpf_link__fd(skel->links.iter));
+ if (fd < 0) {
+ err = -errno;
+ p_err("failed to create PID iterator session: %d", err);
+ goto out;
+ }
+
+ while (true) {
+ ret = read(fd, buf, sizeof(buf));
+ if (ret < 0) {
+ if (errno == EAGAIN)
+ continue;
+ err = -errno;
+ p_err("failed to read PID iterator output: %d", err);
+ goto out;
+ }
+ if (ret == 0)
+ break;
+ if (ret % sizeof(*e)) {
+ err = -EINVAL;
+ p_err("invalid PID iterator output format");
+ goto out;
+ }
+ ret /= sizeof(*e);
+
+ e = (void *)buf;
+ for (i = 0; i < ret; i++, e++) {
+ add_ref(*map, e);
+ }
+ }
+ err = 0;
+out:
+ if (fd >= 0)
+ close(fd);
+ pid_iter_bpf__destroy(skel);
+ return err;
+}
+
+void delete_obj_refs_table(struct hashmap *map)
+{
+ struct hashmap_entry *entry;
+ size_t bkt;
+
+ if (!map)
+ return;
+
+ hashmap__for_each_entry(map, entry, bkt) {
+ struct obj_refs *refs = entry->pvalue;
+
+ free(refs->refs);
+ free(refs);
+ }
+
+ hashmap__free(map);
+}
+
+void emit_obj_refs_json(struct hashmap *map, __u32 id,
+ json_writer_t *json_writer)
+{
+ struct hashmap_entry *entry;
+
+ if (hashmap__empty(map))
+ return;
+
+ hashmap__for_each_key_entry(map, entry, id) {
+ struct obj_refs *refs = entry->pvalue;
+ int i;
+
+ if (refs->ref_cnt == 0)
+ break;
+
+ if (refs->has_bpf_cookie)
+ jsonw_lluint_field(json_writer, "bpf_cookie", refs->bpf_cookie);
+
+ jsonw_name(json_writer, "pids");
+ jsonw_start_array(json_writer);
+ for (i = 0; i < refs->ref_cnt; i++) {
+ struct obj_ref *ref = &refs->refs[i];
+
+ jsonw_start_object(json_writer);
+ jsonw_int_field(json_writer, "pid", ref->pid);
+ jsonw_string_field(json_writer, "comm", ref->comm);
+ jsonw_end_object(json_writer);
+ }
+ jsonw_end_array(json_writer);
+ break;
+ }
+}
+
+void emit_obj_refs_plain(struct hashmap *map, __u32 id, const char *prefix)
+{
+ struct hashmap_entry *entry;
+
+ if (hashmap__empty(map))
+ return;
+
+ hashmap__for_each_key_entry(map, entry, id) {
+ struct obj_refs *refs = entry->pvalue;
+ int i;
+
+ if (refs->ref_cnt == 0)
+ break;
+
+ if (refs->has_bpf_cookie)
+ printf("\n\tbpf_cookie %llu", (unsigned long long) refs->bpf_cookie);
+
+ printf("%s", prefix);
+ for (i = 0; i < refs->ref_cnt; i++) {
+ struct obj_ref *ref = &refs->refs[i];
+
+ printf("%s%s(%d)", i == 0 ? "" : ", ", ref->comm, ref->pid);
+ }
+ break;
+ }
+}
+
+
+#endif
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index f6a5974a7b0a..91b6075b2db3 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
@@ -16,6 +18,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/syscall.h>
+#include <dirent.h>
#include <linux/err.h>
#include <linux/perf_event.h>
@@ -23,30 +26,58 @@
#include <bpf/bpf.h>
#include <bpf/btf.h>
+#include <bpf/hashmap.h>
#include <bpf/libbpf.h>
+#include <bpf/libbpf_internal.h>
+#include <bpf/skel_internal.h>
#include "cfg.h"
#include "main.h"
#include "xlated_dumper.h"
+#define BPF_METADATA_PREFIX "bpf_metadata_"
+#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
+
enum dump_mode {
DUMP_JITED,
DUMP_XLATED,
};
+static const bool attach_types[] = {
+ [BPF_SK_SKB_STREAM_PARSER] = true,
+ [BPF_SK_SKB_STREAM_VERDICT] = true,
+ [BPF_SK_SKB_VERDICT] = true,
+ [BPF_SK_MSG_VERDICT] = true,
+ [BPF_FLOW_DISSECTOR] = true,
+ [__MAX_BPF_ATTACH_TYPE] = false,
+};
+
+/* Textual representations traditionally used by the program and kept around
+ * for the sake of backwards compatibility.
+ */
static const char * const attach_type_strings[] = {
[BPF_SK_SKB_STREAM_PARSER] = "stream_parser",
[BPF_SK_SKB_STREAM_VERDICT] = "stream_verdict",
+ [BPF_SK_SKB_VERDICT] = "skb_verdict",
[BPF_SK_MSG_VERDICT] = "msg_verdict",
- [BPF_FLOW_DISSECTOR] = "flow_dissector",
[__MAX_BPF_ATTACH_TYPE] = NULL,
};
+static struct hashmap *prog_table;
+
static enum bpf_attach_type parse_attach_type(const char *str)
{
enum bpf_attach_type type;
for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) {
+ if (attach_types[type]) {
+ const char *attach_type_str;
+
+ attach_type_str = libbpf_bpf_attach_type_str(type);
+ if (!strcmp(str, attach_type_str))
+ return type;
+ }
+
if (attach_type_strings[type] &&
is_prefix(str, attach_type_strings[type]))
return type;
@@ -55,6 +86,76 @@ static enum bpf_attach_type parse_attach_type(const char *str)
return __MAX_BPF_ATTACH_TYPE;
}
+static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
+ void **info_data, size_t *const info_data_sz)
+{
+ struct bpf_prog_info holder = {};
+ size_t needed = 0;
+ void *ptr;
+
+ if (mode == DUMP_JITED) {
+ holder.jited_prog_len = info->jited_prog_len;
+ needed += info->jited_prog_len;
+ } else {
+ holder.xlated_prog_len = info->xlated_prog_len;
+ needed += info->xlated_prog_len;
+ }
+
+ holder.nr_jited_ksyms = info->nr_jited_ksyms;
+ needed += info->nr_jited_ksyms * sizeof(__u64);
+
+ holder.nr_jited_func_lens = info->nr_jited_func_lens;
+ needed += info->nr_jited_func_lens * sizeof(__u32);
+
+ holder.nr_func_info = info->nr_func_info;
+ holder.func_info_rec_size = info->func_info_rec_size;
+ needed += info->nr_func_info * info->func_info_rec_size;
+
+ holder.nr_line_info = info->nr_line_info;
+ holder.line_info_rec_size = info->line_info_rec_size;
+ needed += info->nr_line_info * info->line_info_rec_size;
+
+ holder.nr_jited_line_info = info->nr_jited_line_info;
+ holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
+ needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
+
+ if (needed > *info_data_sz) {
+ ptr = realloc(*info_data, needed);
+ if (!ptr)
+ return -1;
+
+ *info_data = ptr;
+ *info_data_sz = needed;
+ }
+ ptr = *info_data;
+
+ if (mode == DUMP_JITED) {
+ holder.jited_prog_insns = ptr_to_u64(ptr);
+ ptr += holder.jited_prog_len;
+ } else {
+ holder.xlated_prog_insns = ptr_to_u64(ptr);
+ ptr += holder.xlated_prog_len;
+ }
+
+ holder.jited_ksyms = ptr_to_u64(ptr);
+ ptr += holder.nr_jited_ksyms * sizeof(__u64);
+
+ holder.jited_func_lens = ptr_to_u64(ptr);
+ ptr += holder.nr_jited_func_lens * sizeof(__u32);
+
+ holder.func_info = ptr_to_u64(ptr);
+ ptr += holder.nr_func_info * holder.func_info_rec_size;
+
+ holder.line_info = ptr_to_u64(ptr);
+ ptr += holder.nr_line_info * holder.line_info_rec_size;
+
+ holder.jited_line_info = ptr_to_u64(ptr);
+ ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
+
+ *info = holder;
+ return 0;
+}
+
static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
{
struct timespec real_time_ts, boot_time_ts;
@@ -86,198 +187,246 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
strftime(buf, size, "%FT%T%z", &load_tm);
}
-static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
+static void show_prog_maps(int fd, __u32 num_maps)
{
- unsigned int id = 0;
- int fd, nb_fds = 0;
- void *tmp;
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+ __u32 map_ids[num_maps];
+ unsigned int i;
int err;
- while (true) {
- struct bpf_prog_info info = {};
- __u32 len = sizeof(info);
+ info.nr_map_ids = num_maps;
+ info.map_ids = ptr_to_u64(map_ids);
- err = bpf_prog_get_next_id(id, &id);
- if (err) {
- if (errno != ENOENT) {
- p_err("%s", strerror(errno));
- goto err_close_fds;
- }
- return nb_fds;
- }
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
+ if (err || !info.nr_map_ids)
+ return;
- fd = bpf_prog_get_fd_by_id(id);
- if (fd < 0) {
- p_err("can't get prog by id (%u): %s",
- id, strerror(errno));
- goto err_close_fds;
- }
+ if (json_output) {
+ jsonw_name(json_wtr, "map_ids");
+ jsonw_start_array(json_wtr);
+ for (i = 0; i < info.nr_map_ids; i++)
+ jsonw_uint(json_wtr, map_ids[i]);
+ jsonw_end_array(json_wtr);
+ } else {
+ printf(" map_ids ");
+ for (i = 0; i < info.nr_map_ids; i++)
+ printf("%u%s", map_ids[i],
+ i == info.nr_map_ids - 1 ? "" : ",");
+ }
+}
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
- if (err) {
- p_err("can't get prog info (%u): %s",
- id, strerror(errno));
- goto err_close_fd;
+static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
+{
+ struct bpf_prog_info prog_info;
+ __u32 prog_info_len;
+ __u32 map_info_len;
+ void *value = NULL;
+ __u32 *map_ids;
+ int nr_maps;
+ int key = 0;
+ int map_fd;
+ int ret;
+ __u32 i;
+
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info_len = sizeof(prog_info);
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ return NULL;
+
+ if (!prog_info.nr_map_ids)
+ return NULL;
+
+ map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
+ if (!map_ids)
+ return NULL;
+
+ nr_maps = prog_info.nr_map_ids;
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info.nr_map_ids = nr_maps;
+ prog_info.map_ids = ptr_to_u64(map_ids);
+ prog_info_len = sizeof(prog_info);
+
+ ret = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ goto free_map_ids;
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ map_fd = bpf_map_get_fd_by_id(map_ids[i]);
+ if (map_fd < 0)
+ goto free_map_ids;
+
+ memset(map_info, 0, sizeof(*map_info));
+ map_info_len = sizeof(*map_info);
+ ret = bpf_map_get_info_by_fd(map_fd, map_info, &map_info_len);
+ if (ret < 0) {
+ close(map_fd);
+ goto free_map_ids;
}
- if ((tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) ||
- (!tag && strncmp(nametag, info.name, BPF_OBJ_NAME_LEN))) {
- close(fd);
+ if (map_info->type != BPF_MAP_TYPE_ARRAY ||
+ map_info->key_size != sizeof(int) ||
+ map_info->max_entries != 1 ||
+ !map_info->btf_value_type_id ||
+ !strstr(map_info->name, ".rodata")) {
+ close(map_fd);
continue;
}
- if (nb_fds > 0) {
- tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
- if (!tmp) {
- p_err("failed to realloc");
- goto err_close_fd;
- }
- *fds = tmp;
+ value = malloc(map_info->value_size);
+ if (!value) {
+ close(map_fd);
+ goto free_map_ids;
}
- (*fds)[nb_fds++] = fd;
+
+ if (bpf_map_lookup_elem(map_fd, &key, value)) {
+ close(map_fd);
+ free(value);
+ value = NULL;
+ goto free_map_ids;
+ }
+
+ close(map_fd);
+ break;
}
-err_close_fd:
- close(fd);
-err_close_fds:
- while (--nb_fds >= 0)
- close((*fds)[nb_fds]);
- return -1;
+free_map_ids:
+ free(map_ids);
+ return value;
}
-static int prog_parse_fds(int *argc, char ***argv, int **fds)
+static bool has_metadata_prefix(const char *s)
{
- if (is_prefix(**argv, "id")) {
- unsigned int id;
- char *endptr;
-
- NEXT_ARGP();
+ return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
+}
- id = strtoul(**argv, &endptr, 0);
- if (*endptr) {
- p_err("can't parse %s as ID", **argv);
- return -1;
- }
- NEXT_ARGP();
+static void show_prog_metadata(int fd, __u32 num_maps)
+{
+ const struct btf_type *t_datasec, *t_var;
+ struct bpf_map_info map_info;
+ struct btf_var_secinfo *vsi;
+ bool printed_header = false;
+ unsigned int i, vlen;
+ void *value = NULL;
+ const char *name;
+ struct btf *btf;
+ int err;
- (*fds)[0] = bpf_prog_get_fd_by_id(id);
- if ((*fds)[0] < 0) {
- p_err("get by id (%u): %s", id, strerror(errno));
- return -1;
- }
- return 1;
- } else if (is_prefix(**argv, "tag")) {
- unsigned char tag[BPF_TAG_SIZE];
+ if (!num_maps)
+ return;
- NEXT_ARGP();
+ memset(&map_info, 0, sizeof(map_info));
+ value = find_metadata(fd, &map_info);
+ if (!value)
+ return;
- if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
- tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
- != BPF_TAG_SIZE) {
- p_err("can't parse tag");
- return -1;
- }
- NEXT_ARGP();
+ btf = btf__load_from_kernel_by_id(map_info.btf_id);
+ if (!btf)
+ goto out_free;
- return prog_fd_by_nametag(tag, fds, true);
- } else if (is_prefix(**argv, "name")) {
- char *name;
+ t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
+ if (!btf_is_datasec(t_datasec))
+ goto out_free;
- NEXT_ARGP();
+ vlen = btf_vlen(t_datasec);
+ vsi = btf_var_secinfos(t_datasec);
- name = **argv;
- if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
- p_err("can't parse name");
- return -1;
- }
- NEXT_ARGP();
+ /* We don't proceed to check the kinds of the elements of the DATASEC.
+ * The verifier enforces them to be BTF_KIND_VAR.
+ */
- return prog_fd_by_nametag(name, fds, false);
- } else if (is_prefix(**argv, "pinned")) {
- char *path;
+ if (json_output) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = json_wtr,
+ .is_plain_text = false,
+ };
- NEXT_ARGP();
+ for (i = 0; i < vlen; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ name = btf__name_by_offset(btf, t_var->name_off);
- path = **argv;
- NEXT_ARGP();
+ if (!has_metadata_prefix(name))
+ continue;
- (*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
- if ((*fds)[0] < 0)
- return -1;
- return 1;
- }
+ if (!printed_header) {
+ jsonw_name(json_wtr, "metadata");
+ jsonw_start_object(json_wtr);
+ printed_header = true;
+ }
- p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
- return -1;
-}
+ jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
+ err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
+ if (err) {
+ p_err("btf dump failed: %d", err);
+ break;
+ }
+ }
+ if (printed_header)
+ jsonw_end_object(json_wtr);
+ } else {
+ json_writer_t *btf_wtr;
+ struct btf_dumper d = {
+ .btf = btf,
+ .is_plain_text = true,
+ };
-int prog_parse_fd(int *argc, char ***argv)
-{
- int *fds = NULL;
- int nb_fds, fd;
+ for (i = 0; i < vlen; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ name = btf__name_by_offset(btf, t_var->name_off);
- fds = malloc(sizeof(int));
- if (!fds) {
- p_err("mem alloc failed");
- return -1;
- }
- nb_fds = prog_parse_fds(argc, argv, &fds);
- if (nb_fds != 1) {
- if (nb_fds > 1) {
- p_err("several programs match this handle");
- while (nb_fds--)
- close(fds[nb_fds]);
- }
- fd = -1;
- goto exit_free;
- }
+ if (!has_metadata_prefix(name))
+ continue;
- fd = fds[0];
-exit_free:
- free(fds);
- return fd;
-}
+ if (!printed_header) {
+ printf("\tmetadata:");
-static void show_prog_maps(int fd, u32 num_maps)
-{
- struct bpf_prog_info info = {};
- __u32 len = sizeof(info);
- __u32 map_ids[num_maps];
- unsigned int i;
- int err;
+ btf_wtr = jsonw_new(stdout);
+ if (!btf_wtr) {
+ p_err("jsonw alloc failed");
+ goto out_free;
+ }
+ d.jw = btf_wtr,
- info.nr_map_ids = num_maps;
- info.map_ids = ptr_to_u64(map_ids);
+ printed_header = true;
+ }
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
- if (err || !info.nr_map_ids)
- return;
+ printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
- if (json_output) {
- jsonw_name(json_wtr, "map_ids");
- jsonw_start_array(json_wtr);
- for (i = 0; i < info.nr_map_ids; i++)
- jsonw_uint(json_wtr, map_ids[i]);
- jsonw_end_array(json_wtr);
- } else {
- printf(" map_ids ");
- for (i = 0; i < info.nr_map_ids; i++)
- printf("%u%s", map_ids[i],
- i == info.nr_map_ids - 1 ? "" : ",");
+ jsonw_reset(btf_wtr);
+ err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
+ if (err) {
+ p_err("btf dump failed: %d", err);
+ break;
+ }
+ }
+ if (printed_header)
+ jsonw_destroy(&btf_wtr);
}
+
+out_free:
+ btf__free(btf);
+ free(value);
}
-static void print_prog_header_json(struct bpf_prog_info *info)
+static void print_prog_header_json(struct bpf_prog_info *info, int fd)
{
+ const char *prog_type_str;
+ char prog_name[MAX_PROG_FULL_NAME];
+
jsonw_uint_field(json_wtr, "id", info->id);
- if (info->type < ARRAY_SIZE(prog_type_name))
- jsonw_string_field(json_wtr, "type",
- prog_type_name[info->type]);
+ prog_type_str = libbpf_bpf_prog_type_str(info->type);
+
+ if (prog_type_str)
+ jsonw_string_field(json_wtr, "type", prog_type_str);
else
jsonw_uint_field(json_wtr, "type", info->type);
- if (*info->name)
- jsonw_string_field(json_wtr, "name", info->name);
+ if (*info->name) {
+ get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
+ jsonw_string_field(json_wtr, "name", prog_name);
+ }
jsonw_name(json_wtr, "tag");
jsonw_printf(json_wtr, "\"" BPF_TAG_FMT "\"",
@@ -289,6 +438,8 @@ static void print_prog_header_json(struct bpf_prog_info *info)
jsonw_uint_field(json_wtr, "run_time_ns", info->run_time_ns);
jsonw_uint_field(json_wtr, "run_cnt", info->run_cnt);
}
+ if (info->recursion_misses)
+ jsonw_uint_field(json_wtr, "recursion_misses", info->recursion_misses);
}
static void print_prog_json(struct bpf_prog_info *info, int fd)
@@ -296,7 +447,7 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
char *memlock;
jsonw_start_object(json_wtr);
- print_prog_header_json(info);
+ print_prog_header_json(info, fd);
print_dev_json(info->ifindex, info->netns_dev, info->netns_ino);
if (info->load_time) {
@@ -321,7 +472,7 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
memlock = get_fdinfo(fd, "memlock");
if (memlock)
- jsonw_int_field(json_wtr, "bytes_memlock", atoi(memlock));
+ jsonw_int_field(json_wtr, "bytes_memlock", atoll(memlock));
free(memlock);
if (info->nr_map_ids)
@@ -330,31 +481,39 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
if (info->btf_id)
jsonw_int_field(json_wtr, "btf_id", info->btf_id);
- if (!hash_empty(prog_table.table)) {
- struct pinned_obj *obj;
+ if (!hashmap__empty(prog_table)) {
+ struct hashmap_entry *entry;
jsonw_name(json_wtr, "pinned");
jsonw_start_array(json_wtr);
- hash_for_each_possible(prog_table.table, obj, hash, info->id) {
- if (obj->id == info->id)
- jsonw_string(json_wtr, obj->path);
- }
+ hashmap__for_each_key_entry(prog_table, entry, info->id)
+ jsonw_string(json_wtr, entry->pvalue);
jsonw_end_array(json_wtr);
}
+ emit_obj_refs_json(refs_table, info->id, json_wtr);
+
+ show_prog_metadata(fd, info->nr_map_ids);
+
jsonw_end_object(json_wtr);
}
-static void print_prog_header_plain(struct bpf_prog_info *info)
+static void print_prog_header_plain(struct bpf_prog_info *info, int fd)
{
+ const char *prog_type_str;
+ char prog_name[MAX_PROG_FULL_NAME];
+
printf("%u: ", info->id);
- if (info->type < ARRAY_SIZE(prog_type_name))
- printf("%s ", prog_type_name[info->type]);
+ prog_type_str = libbpf_bpf_prog_type_str(info->type);
+ if (prog_type_str)
+ printf("%s ", prog_type_str);
else
printf("type %u ", info->type);
- if (*info->name)
- printf("name %s ", info->name);
+ if (*info->name) {
+ get_prog_full_name(info, fd, prog_name, sizeof(prog_name));
+ printf("name %s ", prog_name);
+ }
printf("tag ");
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
@@ -363,6 +522,8 @@ static void print_prog_header_plain(struct bpf_prog_info *info)
if (info->run_time_ns)
printf(" run_time_ns %lld run_cnt %lld",
info->run_time_ns, info->run_cnt);
+ if (info->recursion_misses)
+ printf(" recursion_misses %lld", info->recursion_misses);
printf("\n");
}
@@ -370,7 +531,7 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
{
char *memlock;
- print_prog_header_plain(info);
+ print_prog_header_plain(info, fd);
if (info->load_time) {
char buf[32];
@@ -396,19 +557,21 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
if (info->nr_map_ids)
show_prog_maps(fd, info->nr_map_ids);
- if (!hash_empty(prog_table.table)) {
- struct pinned_obj *obj;
+ if (!hashmap__empty(prog_table)) {
+ struct hashmap_entry *entry;
- hash_for_each_possible(prog_table.table, obj, hash, info->id) {
- if (obj->id == info->id)
- printf("\n\tpinned %s", obj->path);
- }
+ hashmap__for_each_key_entry(prog_table, entry, info->id)
+ printf("\n\tpinned %s", (char *)entry->pvalue);
}
if (info->btf_id)
printf("\n\tbtf_id %d", info->btf_id);
+ emit_obj_refs_plain(refs_table, info->id, "\n\tpids ");
+
printf("\n");
+
+ show_prog_metadata(fd, info->nr_map_ids);
}
static int show_prog(int fd)
@@ -417,7 +580,7 @@ static int show_prog(int fd)
__u32 len = sizeof(info);
int err;
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
return -1;
@@ -471,8 +634,16 @@ static int do_show(int argc, char **argv)
int err;
int fd;
- if (show_pinned)
- build_pinned_obj_table(&prog_table, BPF_OBJ_PROG);
+ if (show_pinned) {
+ prog_table = hashmap__new(hash_fn_for_key_as_id,
+ equal_fn_for_key_as_id, NULL);
+ if (IS_ERR(prog_table)) {
+ p_err("failed to create hashmap for pinned paths");
+ return -1;
+ }
+ build_pinned_obj_table(prog_table, BPF_OBJ_PROG);
+ }
+ build_obj_refs_table(&refs_table, BPF_OBJ_PROG);
if (argc == 2)
return do_show_subset(argc, argv);
@@ -514,6 +685,11 @@ static int do_show(int argc, char **argv)
if (json_output)
jsonw_end_array(json_wtr);
+ delete_obj_refs_table(refs_table);
+
+ if (show_pinned)
+ delete_pinned_obj_table(prog_table);
+
return err;
}
@@ -529,31 +705,34 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
char func_sig[1024];
unsigned char *buf;
__u32 member_len;
+ int fd, err = -1;
ssize_t n;
- int fd;
if (mode == DUMP_JITED) {
if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
p_info("no instructions returned");
return -1;
}
- buf = (unsigned char *)(info->jited_prog_insns);
+ buf = u64_to_ptr(info->jited_prog_insns);
member_len = info->jited_prog_len;
} else { /* DUMP_XLATED */
if (info->xlated_prog_len == 0 || !info->xlated_prog_insns) {
p_err("error retrieving insn dump: kernel.kptr_restrict set?");
return -1;
}
- buf = (unsigned char *)info->xlated_prog_insns;
+ buf = u64_to_ptr(info->xlated_prog_insns);
member_len = info->xlated_prog_len;
}
- if (info->btf_id && btf__get_from_id(info->btf_id, &btf)) {
- p_err("failed to get btf");
- return -1;
+ if (info->btf_id) {
+ btf = btf__load_from_kernel_by_id(info->btf_id);
+ if (!btf) {
+ p_err("failed to get btf");
+ return -1;
+ }
}
- func_info = (void *)info->func_info;
+ func_info = u64_to_ptr(info->func_info);
if (info->nr_line_info) {
prog_linfo = bpf_prog_linfo__new(info);
@@ -566,15 +745,15 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
if (fd < 0) {
p_err("can't open file %s: %s", filepath,
strerror(errno));
- return -1;
+ goto exit_free;
}
n = write(fd, buf, member_len);
close(fd);
- if (n != member_len) {
+ if (n != (ssize_t)member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
- return -1;
+ goto exit_free;
}
if (json_output)
@@ -583,12 +762,10 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
const char *name = NULL;
if (info->ifindex) {
- name = ifindex_to_bfd_params(info->ifindex,
- info->netns_dev,
- info->netns_ino,
- &disasm_opt);
+ name = ifindex_to_arch(info->ifindex, info->netns_dev,
+ info->netns_ino, &disasm_opt);
if (!name)
- return -1;
+ goto exit_free;
}
if (info->nr_jited_func_lens && info->jited_func_lens) {
@@ -601,13 +778,13 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
__u32 i;
if (info->nr_jited_ksyms) {
kernel_syms_load(&dd);
- ksyms = (__u64 *) info->jited_ksyms;
+ ksyms = u64_to_ptr(info->jited_ksyms);
}
if (json_output)
jsonw_start_array(json_wtr);
- lens = (__u32 *) info->jited_func_lens;
+ lens = u64_to_ptr(info->jited_func_lens);
for (i = 0; i < info->nr_jited_func_lens; i++) {
if (ksyms) {
sym = kernel_syms_search(&dd, ksyms[i]);
@@ -641,10 +818,11 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
printf("%s:\n", sym_name);
}
- disasm_print_insn(img, lens[i], opcodes,
- name, disasm_opt, btf,
- prog_linfo, ksyms[i], i,
- linum);
+ if (disasm_print_insn(img, lens[i], opcodes,
+ name, disasm_opt, btf,
+ prog_linfo, ksyms[i], i,
+ linum))
+ goto exit_free;
img += lens[i];
@@ -657,47 +835,51 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
if (json_output)
jsonw_end_array(json_wtr);
} else {
- disasm_print_insn(buf, member_len, opcodes, name,
- disasm_opt, btf, NULL, 0, 0, false);
+ if (disasm_print_insn(buf, member_len, opcodes, name,
+ disasm_opt, btf, NULL, 0, 0,
+ false))
+ goto exit_free;
}
- } else if (visual) {
- if (json_output)
- jsonw_null(json_wtr);
- else
- dump_xlated_cfg(buf, member_len);
} else {
kernel_syms_load(&dd);
dd.nr_jited_ksyms = info->nr_jited_ksyms;
- dd.jited_ksyms = (__u64 *) info->jited_ksyms;
+ dd.jited_ksyms = u64_to_ptr(info->jited_ksyms);
dd.btf = btf;
dd.func_info = func_info;
dd.finfo_rec_size = info->func_info_rec_size;
dd.prog_linfo = prog_linfo;
if (json_output)
- dump_xlated_json(&dd, buf, member_len, opcodes,
- linum);
+ dump_xlated_json(&dd, buf, member_len, opcodes, linum);
+ else if (visual)
+ dump_xlated_cfg(&dd, buf, member_len, opcodes, linum);
else
- dump_xlated_plain(&dd, buf, member_len, opcodes,
- linum);
+ dump_xlated_plain(&dd, buf, member_len, opcodes, linum);
kernel_syms_destroy(&dd);
}
- return 0;
+ err = 0;
+
+exit_free:
+ btf__free(btf);
+ bpf_prog_linfo__free(prog_linfo);
+ return err;
}
static int do_dump(int argc, char **argv)
{
- struct bpf_prog_info_linear *info_linear;
+ struct bpf_prog_info info;
+ __u32 info_len = sizeof(info);
+ size_t info_data_sz = 0;
+ void *info_data = NULL;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
enum dump_mode mode;
bool linum = false;
- int *fds = NULL;
int nb_fds, i = 0;
+ int *fds = NULL;
int err = -1;
- __u64 arrays;
if (is_prefix(*argv, "jited")) {
if (disasm_init())
@@ -723,77 +905,87 @@ static int do_dump(int argc, char **argv)
if (nb_fds < 1)
goto exit_free;
- if (is_prefix(*argv, "file")) {
- NEXT_ARG();
- if (!argc) {
- p_err("expected file path");
- goto exit_close;
- }
- if (nb_fds > 1) {
- p_err("several programs matched");
- goto exit_close;
- }
+ while (argc) {
+ if (is_prefix(*argv, "file")) {
+ NEXT_ARG();
+ if (!argc) {
+ p_err("expected file path");
+ goto exit_close;
+ }
+ if (nb_fds > 1) {
+ p_err("several programs matched");
+ goto exit_close;
+ }
- filepath = *argv;
- NEXT_ARG();
- } else if (is_prefix(*argv, "opcodes")) {
- opcodes = true;
- NEXT_ARG();
- } else if (is_prefix(*argv, "visual")) {
- if (nb_fds > 1) {
- p_err("several programs matched");
+ filepath = *argv;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "opcodes")) {
+ opcodes = true;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "visual")) {
+ if (nb_fds > 1) {
+ p_err("several programs matched");
+ goto exit_close;
+ }
+
+ visual = true;
+ NEXT_ARG();
+ } else if (is_prefix(*argv, "linum")) {
+ linum = true;
+ NEXT_ARG();
+ } else {
+ usage();
goto exit_close;
}
-
- visual = true;
- NEXT_ARG();
- } else if (is_prefix(*argv, "linum")) {
- linum = true;
- NEXT_ARG();
}
- if (argc) {
- usage();
+ if (filepath && (opcodes || visual || linum)) {
+ p_err("'file' is not compatible with 'opcodes', 'visual', or 'linum'");
+ goto exit_close;
+ }
+ if (json_output && visual) {
+ p_err("'visual' is not compatible with JSON output");
goto exit_close;
}
-
- if (mode == DUMP_JITED)
- arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
- else
- arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
-
- arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
- arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
- arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
- arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
- arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
if (json_output && nb_fds > 1)
jsonw_start_array(json_wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
- info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
- if (IS_ERR_OR_NULL(info_linear)) {
+ memset(&info, 0, sizeof(info));
+
+ err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
+ if (err) {
+ p_err("can't get prog info: %s", strerror(errno));
+ break;
+ }
+
+ err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
+ if (err) {
+ p_err("can't grow prog info_data");
+ break;
+ }
+
+ err = bpf_prog_get_info_by_fd(fds[i], &info, &info_len);
+ if (err) {
p_err("can't get prog info: %s", strerror(errno));
break;
}
if (json_output && nb_fds > 1) {
jsonw_start_object(json_wtr); /* prog object */
- print_prog_header_json(&info_linear->info);
+ print_prog_header_json(&info, fds[i]);
jsonw_name(json_wtr, "insns");
} else if (nb_fds > 1) {
- print_prog_header_plain(&info_linear->info);
+ print_prog_header_plain(&info, fds[i]);
}
- err = prog_dump(&info_linear->info, mode, filepath, opcodes,
- visual, linum);
+ err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
if (json_output && nb_fds > 1)
jsonw_end_object(json_wtr); /* prog object */
else if (i != nb_fds - 1 && nb_fds > 1)
printf("\n");
- free(info_linear);
if (err)
break;
close(fds[i]);
@@ -805,6 +997,7 @@ exit_close:
for (; i < nb_fds; i++)
close(fds[i]);
exit_free:
+ free(info_data);
free(fds);
return err;
}
@@ -850,7 +1043,7 @@ static int parse_attach_detach_args(int argc, char **argv, int *progfd,
}
if (*attach_type == BPF_FLOW_DISSECTOR) {
- *mapfd = -1;
+ *mapfd = 0;
return 0;
}
@@ -1072,12 +1265,12 @@ static int do_run(int argc, char **argv)
{
char *data_fname_in = NULL, *data_fname_out = NULL;
char *ctx_fname_in = NULL, *ctx_fname_out = NULL;
- struct bpf_prog_test_run_attr test_attr = {0};
const unsigned int default_size = SZ_32K;
void *data_in = NULL, *data_out = NULL;
void *ctx_in = NULL, *ctx_out = NULL;
unsigned int repeat = 1;
int fd, err;
+ LIBBPF_OPTS(bpf_test_run_opts, test_attr);
if (!REQ_ARGS(4))
return -1;
@@ -1195,14 +1388,13 @@ static int do_run(int argc, char **argv)
goto free_ctx_in;
}
- test_attr.prog_fd = fd;
test_attr.repeat = repeat;
test_attr.data_in = data_in;
test_attr.data_out = data_out;
test_attr.ctx_in = ctx_in;
test_attr.ctx_out = ctx_out;
- err = bpf_prog_test_run_xattr(&test_attr);
+ err = bpf_prog_test_run_opts(fd, &test_attr);
if (err) {
p_err("failed to run program: %s", strerror(errno));
goto free_ctx_out;
@@ -1266,18 +1458,66 @@ get_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
return ret;
}
+static int
+auto_attach_program(struct bpf_program *prog, const char *path)
+{
+ struct bpf_link *link;
+ int err;
+
+ link = bpf_program__attach(prog);
+ if (!link) {
+ p_info("Program %s does not support autoattach, falling back to pinning",
+ bpf_program__name(prog));
+ return bpf_obj_pin(bpf_program__fd(prog), path);
+ }
+
+ err = bpf_link__pin(link, path);
+ bpf_link__destroy(link);
+ return err;
+}
+
+static int
+auto_attach_programs(struct bpf_object *obj, const char *path)
+{
+ struct bpf_program *prog;
+ char buf[PATH_MAX];
+ int err;
+
+ bpf_object__for_each_program(prog, obj) {
+ err = pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog));
+ if (err)
+ goto err_unpin_programs;
+
+ err = auto_attach_program(prog, buf);
+ if (err)
+ goto err_unpin_programs;
+ }
+
+ return 0;
+
+err_unpin_programs:
+ while ((prog = bpf_object__prev_program(obj, prog))) {
+ if (pathname_concat(buf, sizeof(buf), path, bpf_program__name(prog)))
+ continue;
+
+ bpf_program__unpin(prog, buf);
+ }
+
+ return err;
+}
+
static int load_with_options(int argc, char **argv, bool first_prog_only)
{
enum bpf_prog_type common_prog_type = BPF_PROG_TYPE_UNSPEC;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts,
.relaxed_maps = relaxed_maps,
);
- struct bpf_object_load_attr load_attr = { 0 };
enum bpf_attach_type expected_attach_type;
struct map_replace *map_replace = NULL;
struct bpf_program *prog = NULL, *pos;
unsigned int old_map_fds = 0;
const char *pinmaps = NULL;
+ bool auto_attach = false;
struct bpf_object *obj;
struct bpf_map *map;
const char *pinfile;
@@ -1294,8 +1534,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
while (argc) {
if (is_prefix(*argv, "type")) {
- char *type;
-
NEXT_ARG();
if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
@@ -1305,21 +1543,26 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
if (!REQ_ARGS(1))
goto err_free_reuse_maps;
- /* Put a '/' at the end of type to appease libbpf */
- type = malloc(strlen(*argv) + 2);
- if (!type) {
- p_err("mem alloc failed");
- goto err_free_reuse_maps;
- }
- *type = 0;
- strcat(type, *argv);
- strcat(type, "/");
+ err = libbpf_prog_type_by_name(*argv, &common_prog_type,
+ &expected_attach_type);
+ if (err < 0) {
+ /* Put a '/' at the end of type to appease libbpf */
+ char *type = malloc(strlen(*argv) + 2);
- err = get_prog_type_by_name(type, &common_prog_type,
- &expected_attach_type);
- free(type);
- if (err < 0)
- goto err_free_reuse_maps;
+ if (!type) {
+ p_err("mem alloc failed");
+ goto err_free_reuse_maps;
+ }
+ *type = 0;
+ strcat(type, *argv);
+ strcat(type, "/");
+
+ err = get_prog_type_by_name(type, &common_prog_type,
+ &expected_attach_type);
+ free(type);
+ if (err < 0)
+ goto err_free_reuse_maps;
+ }
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
@@ -1357,9 +1600,9 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
if (fd < 0)
goto err_free_reuse_maps;
- new_map_replace = reallocarray(map_replace,
- old_map_fds + 1,
- sizeof(*map_replace));
+ new_map_replace = libbpf_reallocarray(map_replace,
+ old_map_fds + 1,
+ sizeof(*map_replace));
if (!new_map_replace) {
p_err("mem alloc failed");
goto err_free_reuse_maps;
@@ -1394,6 +1637,9 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_free_reuse_maps;
pinmaps = GET_ARG();
+ } else if (is_prefix(*argv, "autoattach")) {
+ auto_attach = true;
+ NEXT_ARG();
} else {
p_err("expected no more arguments, 'type', 'map' or 'dev', got: '%s'?",
*argv);
@@ -1403,8 +1649,12 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
set_max_rlimit();
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ open_opts.kernel_log_level = 1 + 2 + 4;
+
obj = bpf_object__open_file(file, &open_opts);
- if (IS_ERR_OR_NULL(obj)) {
+ if (!obj) {
p_err("failed to open object file");
goto err_free_reuse_maps;
}
@@ -1413,7 +1663,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
enum bpf_prog_type prog_type = common_prog_type;
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
- const char *sec_name = bpf_program__title(pos, false);
+ const char *sec_name = bpf_program__section_name(pos);
err = get_prog_type_by_name(sec_name, &prog_type,
&expected_attach_type);
@@ -1422,7 +1672,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
}
bpf_program__set_ifindex(pos, ifindex);
- bpf_program__set_type(pos, prog_type);
+ if (bpf_program__type(pos) != prog_type)
+ bpf_program__set_type(pos, prog_type);
bpf_program__set_expected_attach_type(pos, expected_attach_type);
}
@@ -1457,7 +1708,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
j = 0;
idx = 0;
bpf_object__for_each_map(map, obj) {
- if (!bpf_map__is_offload_neutral(map))
+ if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
bpf_map__set_ifindex(map, ifindex);
if (j < old_map_fds && idx == map_replace[j].idx) {
@@ -1482,12 +1733,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_close_obj;
}
- load_attr.obj = obj;
- if (verifier_logs)
- /* log_level1 + log_level2 + stats, but not stable UAPI */
- load_attr.log_level = 1 + 2 + 4;
-
- err = bpf_object__load_xattr(&load_attr);
+ err = bpf_object__load(obj);
if (err) {
p_err("failed to load object file");
goto err_close_obj;
@@ -1498,20 +1744,26 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
goto err_close_obj;
if (first_prog_only) {
- prog = bpf_program__next(NULL, obj);
+ prog = bpf_object__next_program(obj, NULL);
if (!prog) {
p_err("object file doesn't contain any bpf program");
goto err_close_obj;
}
- err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
+ if (auto_attach)
+ err = auto_attach_program(prog, pinfile);
+ else
+ err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
if (err) {
p_err("failed to pin program %s",
- bpf_program__title(prog, false));
+ bpf_program__section_name(prog));
goto err_close_obj;
}
} else {
- err = bpf_object__pin_programs(obj, pinfile);
+ if (auto_attach)
+ err = auto_attach_programs(obj, pinfile);
+ else
+ err = bpf_object__pin_programs(obj, pinfile);
if (err) {
p_err("failed to pin all programs");
goto err_close_obj;
@@ -1550,8 +1802,110 @@ err_free_reuse_maps:
return -1;
}
+static int count_open_fds(void)
+{
+ DIR *dp = opendir("/proc/self/fd");
+ struct dirent *de;
+ int cnt = -3;
+
+ if (!dp)
+ return -1;
+
+ while ((de = readdir(dp)))
+ cnt++;
+
+ closedir(dp);
+ return cnt;
+}
+
+static int try_loader(struct gen_loader_opts *gen)
+{
+ struct bpf_load_and_run_opts opts = {};
+ struct bpf_loader_ctx *ctx;
+ int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
+ sizeof(struct bpf_prog_desc));
+ int log_buf_sz = (1u << 24) - 1;
+ int err, fds_before, fd_delta;
+ char *log_buf = NULL;
+
+ ctx = alloca(ctx_sz);
+ memset(ctx, 0, ctx_sz);
+ ctx->sz = ctx_sz;
+ if (verifier_logs) {
+ ctx->log_level = 1 + 2 + 4;
+ ctx->log_size = log_buf_sz;
+ log_buf = malloc(log_buf_sz);
+ if (!log_buf)
+ return -ENOMEM;
+ ctx->log_buf = (long) log_buf;
+ }
+ opts.ctx = ctx;
+ opts.data = gen->data;
+ opts.data_sz = gen->data_sz;
+ opts.insns = gen->insns;
+ opts.insns_sz = gen->insns_sz;
+ fds_before = count_open_fds();
+ err = bpf_load_and_run(&opts);
+ fd_delta = count_open_fds() - fds_before;
+ if (err < 0 || verifier_logs) {
+ fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
+ if (fd_delta && err < 0)
+ fprintf(stderr, "loader prog leaked %d FDs\n",
+ fd_delta);
+ }
+ free(log_buf);
+ return err;
+}
+
+static int do_loader(int argc, char **argv)
+{
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
+ DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
+ struct bpf_object *obj;
+ const char *file;
+ int err = 0;
+
+ if (!REQ_ARGS(1))
+ return -1;
+ file = GET_ARG();
+
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ open_opts.kernel_log_level = 1 + 2 + 4;
+
+ obj = bpf_object__open_file(file, &open_opts);
+ if (!obj) {
+ p_err("failed to open object file");
+ goto err_close_obj;
+ }
+
+ err = bpf_object__gen_loader(obj, &gen);
+ if (err)
+ goto err_close_obj;
+
+ err = bpf_object__load(obj);
+ if (err) {
+ p_err("failed to load object file");
+ goto err_close_obj;
+ }
+
+ if (verifier_logs) {
+ struct dump_data dd = {};
+
+ kernel_syms_load(&dd);
+ dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
+ kernel_syms_destroy(&dd);
+ }
+ err = try_loader(&gen);
+err_close_obj:
+ bpf_object__close(obj);
+ return err;
+}
+
static int do_load(int argc, char **argv)
{
+ if (use_loader)
+ return do_loader(argc, argv);
return load_with_options(argc, argv, true);
}
@@ -1627,6 +1981,34 @@ struct profile_metric {
.ratio_desc = "LLC misses per million insns",
.ratio_mul = 1e6,
},
+ {
+ .name = "itlb_misses",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_ITLB |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ .exclude_user = 1
+ },
+ .ratio_metric = 2,
+ .ratio_desc = "itlb misses per million insns",
+ .ratio_mul = 1e6,
+ },
+ {
+ .name = "dtlb_misses",
+ .attr = {
+ .type = PERF_TYPE_HW_CACHE,
+ .config =
+ PERF_COUNT_HW_CACHE_DTLB |
+ (PERF_COUNT_HW_CACHE_OP_READ << 8) |
+ (PERF_COUNT_HW_CACHE_RESULT_MISS << 16),
+ .exclude_user = 1
+ },
+ .ratio_metric = 2,
+ .ratio_desc = "dtlb misses per million insns",
+ .ratio_mul = 1e6,
+ },
};
static __u64 profile_total_count;
@@ -1639,7 +2021,7 @@ static int profile_parse_metrics(int argc, char **argv)
int selected_cnt = 0;
unsigned int i;
- metric_cnt = sizeof(metrics) / sizeof(struct profile_metric);
+ metric_cnt = ARRAY_SIZE(metrics);
while (argc > 0) {
for (i = 0; i < metric_cnt; i++) {
@@ -1771,35 +2153,58 @@ static void profile_print_readings(void)
static char *profile_target_name(int tgt_fd)
{
- struct bpf_prog_info_linear *info_linear;
- struct bpf_func_info *func_info;
+ struct bpf_func_info func_info;
+ struct bpf_prog_info info = {};
+ __u32 info_len = sizeof(info);
const struct btf_type *t;
+ __u32 func_info_rec_size;
+ struct btf *btf = NULL;
char *name = NULL;
- struct btf *btf;
+ int err;
- info_linear = bpf_program__get_prog_info_linear(
- tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
- if (IS_ERR_OR_NULL(info_linear)) {
- p_err("failed to get info_linear for prog FD %d", tgt_fd);
- return NULL;
+ err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
+ if (err) {
+ p_err("failed to get info for prog FD %d", tgt_fd);
+ goto out;
}
- if (info_linear->info.btf_id == 0 ||
- btf__get_from_id(info_linear->info.btf_id, &btf)) {
+ if (info.btf_id == 0) {
p_err("prog FD %d doesn't have valid btf", tgt_fd);
goto out;
}
- func_info = (struct bpf_func_info *)(info_linear->info.func_info);
- t = btf__type_by_id(btf, func_info[0].type_id);
+ func_info_rec_size = info.func_info_rec_size;
+ if (info.nr_func_info == 0) {
+ p_err("found 0 func_info for prog FD %d", tgt_fd);
+ goto out;
+ }
+
+ memset(&info, 0, sizeof(info));
+ info.nr_func_info = 1;
+ info.func_info_rec_size = func_info_rec_size;
+ info.func_info = ptr_to_u64(&func_info);
+
+ err = bpf_prog_get_info_by_fd(tgt_fd, &info, &info_len);
+ if (err) {
+ p_err("failed to get func_info for prog FD %d", tgt_fd);
+ goto out;
+ }
+
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ if (!btf) {
+ p_err("failed to load btf for prog FD %d", tgt_fd);
+ goto out;
+ }
+
+ t = btf__type_by_id(btf, func_info.type_id);
if (!t) {
p_err("btf %d doesn't have type %d",
- info_linear->info.btf_id, func_info[0].type_id);
+ info.btf_id, func_info.type_id);
goto out;
}
name = strdup(btf__name_by_offset(btf, t->name_off));
out:
- free(info_linear);
+ btf__free(btf);
return name;
}
@@ -1820,10 +2225,38 @@ static void profile_close_perf_events(struct profiler_bpf *obj)
profile_perf_event_cnt = 0;
}
+static int profile_open_perf_event(int mid, int cpu, int map_fd)
+{
+ int pmu_fd;
+
+ pmu_fd = syscall(__NR_perf_event_open, &metrics[mid].attr,
+ -1 /*pid*/, cpu, -1 /*group_fd*/, 0);
+ if (pmu_fd < 0) {
+ if (errno == ENODEV) {
+ p_info("cpu %d may be offline, skip %s profiling.",
+ cpu, metrics[mid].name);
+ profile_perf_event_cnt++;
+ return 0;
+ }
+ return -1;
+ }
+
+ if (bpf_map_update_elem(map_fd,
+ &profile_perf_event_cnt,
+ &pmu_fd, BPF_ANY) ||
+ ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ close(pmu_fd);
+ return -1;
+ }
+
+ profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
+ return 0;
+}
+
static int profile_open_perf_events(struct profiler_bpf *obj)
{
unsigned int cpu, m;
- int map_fd, pmu_fd;
+ int map_fd;
profile_perf_events = calloc(
sizeof(int), obj->rodata->num_cpu * obj->rodata->num_metric);
@@ -1842,17 +2275,11 @@ static int profile_open_perf_events(struct profiler_bpf *obj)
if (!metrics[m].selected)
continue;
for (cpu = 0; cpu < obj->rodata->num_cpu; cpu++) {
- pmu_fd = syscall(__NR_perf_event_open, &metrics[m].attr,
- -1/*pid*/, cpu, -1/*group_fd*/, 0);
- if (pmu_fd < 0 ||
- bpf_map_update_elem(map_fd, &profile_perf_event_cnt,
- &pmu_fd, BPF_ANY) ||
- ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0)) {
+ if (profile_open_perf_event(m, cpu, map_fd)) {
p_err("failed to create event %s on cpu %d",
metrics[m].name, cpu);
return -1;
}
- profile_perf_events[profile_perf_event_cnt++] = pmu_fd;
}
}
return 0;
@@ -1924,10 +2351,10 @@ static int do_profile(int argc, char **argv)
profile_obj->rodata->num_metric = num_metric;
/* adjust map sizes */
- bpf_map__resize(profile_obj->maps.events, num_metric * num_cpu);
- bpf_map__resize(profile_obj->maps.fentry_readings, num_metric);
- bpf_map__resize(profile_obj->maps.accum_readings, num_metric);
- bpf_map__resize(profile_obj->maps.counts, 1);
+ bpf_map__set_max_entries(profile_obj->maps.events, num_metric * num_cpu);
+ bpf_map__set_max_entries(profile_obj->maps.fentry_readings, num_metric);
+ bpf_map__set_max_entries(profile_obj->maps.accum_readings, num_metric);
+ bpf_map__set_max_entries(profile_obj->maps.counts, 1);
/* change target name */
profile_tgt_name = profile_target_name(profile_tgt_fd);
@@ -1984,24 +2411,25 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s { show | list } [PROG]\n"
- " %s %s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
- " %s %s dump jited PROG [{ file FILE | opcodes | linum }]\n"
- " %s %s pin PROG FILE\n"
- " %s %s { load | loadall } OBJ PATH \\\n"
+ "Usage: %1$s %2$s { show | list } [PROG]\n"
+ " %1$s %2$s dump xlated PROG [{ file FILE | [opcodes] [linum] [visual] }]\n"
+ " %1$s %2$s dump jited PROG [{ file FILE | [opcodes] [linum] }]\n"
+ " %1$s %2$s pin PROG FILE\n"
+ " %1$s %2$s { load | loadall } OBJ PATH \\\n"
" [type TYPE] [dev NAME] \\\n"
" [map { idx IDX | name NAME } MAP]\\\n"
" [pinmaps MAP_DIR]\n"
- " %s %s attach PROG ATTACH_TYPE [MAP]\n"
- " %s %s detach PROG ATTACH_TYPE [MAP]\n"
- " %s %s run PROG \\\n"
+ " [autoattach]\n"
+ " %1$s %2$s attach PROG ATTACH_TYPE [MAP]\n"
+ " %1$s %2$s detach PROG ATTACH_TYPE [MAP]\n"
+ " %1$s %2$s run PROG \\\n"
" data_in FILE \\\n"
" [data_out FILE [data_size_out L]] \\\n"
" [ctx_in FILE [ctx_out FILE [ctx_size_out M]]] \\\n"
" [repeat N]\n"
- " %s %s profile PROG [duration DURATION] METRICs\n"
- " %s %s tracelog\n"
- " %s %s help\n"
+ " %1$s %2$s profile PROG [duration DURATION] METRICs\n"
+ " %1$s %2$s tracelog\n"
+ " %1$s %2$s help\n"
"\n"
" " HELP_SPEC_MAP "\n"
" " HELP_SPEC_PROGRAM "\n"
@@ -2012,18 +2440,19 @@ static int do_help(int argc, char **argv)
" sk_reuseport | flow_dissector | cgroup/sysctl |\n"
" cgroup/bind4 | cgroup/bind6 | cgroup/post_bind4 |\n"
" cgroup/post_bind6 | cgroup/connect4 | cgroup/connect6 |\n"
- " cgroup/sendmsg4 | cgroup/sendmsg6 | cgroup/recvmsg4 |\n"
- " cgroup/recvmsg6 | cgroup/getsockopt | cgroup/setsockopt |\n"
- " struct_ops | fentry | fexit | freplace }\n"
- " ATTACH_TYPE := { msg_verdict | stream_verdict | stream_parser |\n"
- " flow_dissector }\n"
- " METRIC := { cycles | instructions | l1d_loads | llc_misses }\n"
- " " HELP_SPEC_OPTIONS "\n"
+ " cgroup/getpeername4 | cgroup/getpeername6 |\n"
+ " cgroup/getsockname4 | cgroup/getsockname6 | cgroup/sendmsg4 |\n"
+ " cgroup/sendmsg6 | cgroup/recvmsg4 | cgroup/recvmsg6 |\n"
+ " cgroup/getsockopt | cgroup/setsockopt | cgroup/sock_release |\n"
+ " struct_ops | fentry | fexit | freplace | sk_lookup }\n"
+ " ATTACH_TYPE := { sk_msg_verdict | sk_skb_verdict | sk_skb_stream_verdict |\n"
+ " sk_skb_stream_parser | flow_dissector }\n"
+ " METRIC := { cycles | instructions | l1d_loads | llc_misses | itlb_misses | dtlb_misses }\n"
+ " " HELP_SPEC_OPTIONS " |\n"
+ " {-f|--bpffs} | {-m|--mapcompat} | {-n|--nomount} |\n"
+ " {-L|--use-loader} }\n"
"",
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2]);
+ bin_name, argv[-2]);
return 0;
}
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
new file mode 100644
index 000000000000..eb05ea53afb1
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (c) 2020 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_tracing.h>
+#include "pid_iter.h"
+
+/* keep in sync with the definition in main.h */
+enum bpf_obj_type {
+ BPF_OBJ_UNKNOWN,
+ BPF_OBJ_PROG,
+ BPF_OBJ_MAP,
+ BPF_OBJ_LINK,
+ BPF_OBJ_BTF,
+};
+
+extern const void bpf_link_fops __ksym;
+extern const void bpf_map_fops __ksym;
+extern const void bpf_prog_fops __ksym;
+extern const void btf_fops __ksym;
+
+const volatile enum bpf_obj_type obj_type = BPF_OBJ_UNKNOWN;
+
+static __always_inline __u32 get_obj_id(void *ent, enum bpf_obj_type type)
+{
+ switch (type) {
+ case BPF_OBJ_PROG:
+ return BPF_CORE_READ((struct bpf_prog *)ent, aux, id);
+ case BPF_OBJ_MAP:
+ return BPF_CORE_READ((struct bpf_map *)ent, id);
+ case BPF_OBJ_BTF:
+ return BPF_CORE_READ((struct btf *)ent, id);
+ case BPF_OBJ_LINK:
+ return BPF_CORE_READ((struct bpf_link *)ent, id);
+ default:
+ return 0;
+ }
+}
+
+/* could be used only with BPF_LINK_TYPE_PERF_EVENT links */
+static __u64 get_bpf_cookie(struct bpf_link *link)
+{
+ struct bpf_perf_link *perf_link;
+ struct perf_event *event;
+
+ perf_link = container_of(link, struct bpf_perf_link, link);
+ event = BPF_CORE_READ(perf_link, perf_file, private_data);
+ return BPF_CORE_READ(event, bpf_cookie);
+}
+
+SEC("iter/task_file")
+int iter(struct bpf_iter__task_file *ctx)
+{
+ struct file *file = ctx->file;
+ struct task_struct *task = ctx->task;
+ struct pid_iter_entry e;
+ const void *fops;
+
+ if (!file || !task)
+ return 0;
+
+ switch (obj_type) {
+ case BPF_OBJ_PROG:
+ fops = &bpf_prog_fops;
+ break;
+ case BPF_OBJ_MAP:
+ fops = &bpf_map_fops;
+ break;
+ case BPF_OBJ_BTF:
+ fops = &btf_fops;
+ break;
+ case BPF_OBJ_LINK:
+ fops = &bpf_link_fops;
+ break;
+ default:
+ return 0;
+ }
+
+ if (file->f_op != fops)
+ return 0;
+
+ __builtin_memset(&e, 0, sizeof(e));
+ e.pid = task->tgid;
+ e.id = get_obj_id(file->private_data, obj_type);
+
+ if (obj_type == BPF_OBJ_LINK) {
+ struct bpf_link *link = (struct bpf_link *) file->private_data;
+
+ if (BPF_CORE_READ(link, type) == BPF_LINK_TYPE_PERF_EVENT) {
+ e.has_bpf_cookie = true;
+ e.bpf_cookie = get_bpf_cookie(link);
+ }
+ }
+
+ bpf_probe_read_kernel_str(&e.comm, sizeof(e.comm),
+ task->group_leader->comm);
+ bpf_seq_write(ctx->meta->seq, &e, sizeof(e));
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/bpf/bpftool/skeleton/pid_iter.h b/tools/bpf/bpftool/skeleton/pid_iter.h
new file mode 100644
index 000000000000..bbb570d4cca6
--- /dev/null
+++ b/tools/bpf/bpftool/skeleton/pid_iter.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (c) 2020 Facebook */
+#ifndef __PID_ITER_H
+#define __PID_ITER_H
+
+struct pid_iter_entry {
+ __u32 id;
+ int pid;
+ __u64 bpf_cookie;
+ bool has_bpf_cookie;
+ char comm[16];
+};
+
+#endif
diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c
index 20034c12f7c5..ce5b65e07ab1 100644
--- a/tools/bpf/bpftool/skeleton/profiler.bpf.c
+++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c
@@ -1,7 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
// Copyright (c) 2020 Facebook
-#include "profiler.h"
-#include <linux/bpf.h>
+#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -71,7 +70,7 @@ int BPF_PROG(fentry_XXX)
static inline void
fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
{
- struct bpf_perf_event_value *before, diff, *accum;
+ struct bpf_perf_event_value *before, diff;
before = bpf_map_lookup_elem(&fentry_readings, &id);
/* only account samples with a valid fentry_reading */
@@ -96,7 +95,7 @@ int BPF_PROG(fexit_XXX)
{
struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
u32 cpu = bpf_get_smp_processor_id();
- u32 i, one = 1, zero = 0;
+ u32 i, zero = 0;
int err;
u64 *count;
@@ -116,4 +115,4 @@ int BPF_PROG(fexit_XXX)
return 0;
}
-char LICENSE[] SEC("license") = "GPL";
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/bpf/bpftool/skeleton/profiler.h b/tools/bpf/bpftool/skeleton/profiler.h
deleted file mode 100644
index 1f767e9510f7..000000000000
--- a/tools/bpf/bpftool/skeleton/profiler.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-#ifndef __PROFILER_H
-#define __PROFILER_H
-
-/* useful typedefs from vmlinux.h */
-
-typedef signed char __s8;
-typedef unsigned char __u8;
-typedef short int __s16;
-typedef short unsigned int __u16;
-typedef int __s32;
-typedef unsigned int __u32;
-typedef long long int __s64;
-typedef long long unsigned int __u64;
-
-typedef __s8 s8;
-typedef __u8 u8;
-typedef __s16 s16;
-typedef __u16 u16;
-typedef __s32 s32;
-typedef __u32 u32;
-typedef __s64 s64;
-typedef __u64 u64;
-
-enum {
- false = 0,
- true = 1,
-};
-
-#ifdef __CHECKER__
-#define __bitwise__ __attribute__((bitwise))
-#else
-#define __bitwise__
-#endif
-
-typedef __u16 __bitwise__ __le16;
-typedef __u16 __bitwise__ __be16;
-typedef __u32 __bitwise__ __le32;
-typedef __u32 __bitwise__ __be32;
-typedef __u64 __bitwise__ __le64;
-typedef __u64 __bitwise__ __be64;
-
-typedef __u16 __bitwise__ __sum16;
-typedef __u32 __bitwise__ __wsum;
-
-#endif /* __PROFILER_H */
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
index 2a7befbd11ad..57c3da70aa31 100644
--- a/tools/bpf/bpftool/struct_ops.c
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -32,7 +32,7 @@ static const struct btf *get_btf_vmlinux(void)
return btf_vmlinux;
btf_vmlinux = libbpf_find_kernel_btf();
- if (IS_ERR(btf_vmlinux))
+ if (!btf_vmlinux)
p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y");
return btf_vmlinux;
@@ -45,7 +45,7 @@ static const char *get_kern_struct_ops_name(const struct bpf_map_info *info)
const char *st_ops_name;
kern_btf = get_btf_vmlinux();
- if (IS_ERR(kern_btf))
+ if (!kern_btf)
return "<btf_vmlinux_not_found>";
t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id);
@@ -63,10 +63,8 @@ static __s32 get_map_info_type_id(void)
return map_info_type_id;
kern_btf = get_btf_vmlinux();
- if (IS_ERR(kern_btf)) {
- map_info_type_id = PTR_ERR(kern_btf);
- return map_info_type_id;
- }
+ if (!kern_btf)
+ return 0;
map_info_type_id = btf__find_by_name_kind(kern_btf, "bpf_map_info",
BTF_KIND_STRUCT);
@@ -153,7 +151,7 @@ static int get_next_struct_ops_map(const char *name, int *res_fd,
return -1;
}
- err = bpf_obj_get_info_by_fd(fd, info, &info_len);
+ err = bpf_map_get_info_by_fd(fd, info, &info_len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
@@ -252,7 +250,7 @@ static struct res do_one_id(const char *id_str, work_func func, void *data,
}
fd = bpf_map_get_fd_by_id(id);
- if (fd == -1) {
+ if (fd < 0) {
p_err("can't get map by id (%lu): %s", id, strerror(errno));
res.nr_errs++;
return res;
@@ -264,7 +262,7 @@ static struct res do_one_id(const char *id_str, work_func func, void *data,
goto done;
}
- if (bpf_obj_get_info_by_fd(fd, info, &info_len)) {
+ if (bpf_map_get_info_by_fd(fd, info, &info_len)) {
p_err("can't get map info: %s", strerror(errno));
res.nr_errs++;
goto done;
@@ -415,7 +413,7 @@ static int do_dump(int argc, char **argv)
}
kern_btf = get_btf_vmlinux();
- if (IS_ERR(kern_btf))
+ if (!kern_btf)
return -1;
if (!json_output) {
@@ -477,24 +475,51 @@ static int do_unregister(int argc, char **argv)
return cmd_retval(&res, true);
}
+static int pin_link(struct bpf_link *link, const char *pindir,
+ const char *name)
+{
+ char pinfile[PATH_MAX];
+ int err;
+
+ err = pathname_concat(pinfile, sizeof(pinfile), pindir, name);
+ if (err)
+ return -1;
+
+ return bpf_link__pin(link, pinfile);
+}
+
static int do_register(int argc, char **argv)
{
- const struct bpf_map_def *def;
+ LIBBPF_OPTS(bpf_object_open_opts, open_opts);
+ __u32 link_info_len = sizeof(struct bpf_link_info);
+ struct bpf_link_info link_info = {};
struct bpf_map_info info = {};
__u32 info_len = sizeof(info);
int nr_errs = 0, nr_maps = 0;
+ const char *linkdir = NULL;
struct bpf_object *obj;
struct bpf_link *link;
struct bpf_map *map;
const char *file;
- if (argc != 1)
+ if (argc != 1 && argc != 2)
usage();
file = GET_ARG();
+ if (argc == 1)
+ linkdir = GET_ARG();
+
+ if (linkdir && mount_bpffs_for_pin(linkdir)) {
+ p_err("can't mount bpffs for pinning");
+ return -1;
+ }
+
+ if (verifier_logs)
+ /* log_level1 + log_level2 + stats, but not stable UAPI */
+ open_opts.kernel_log_level = 1 + 2 + 4;
- obj = bpf_object__open(file);
- if (IS_ERR_OR_NULL(obj))
+ obj = bpf_object__open_file(file, &open_opts);
+ if (!obj)
return -1;
set_max_rlimit();
@@ -505,35 +530,56 @@ static int do_register(int argc, char **argv)
}
bpf_object__for_each_map(map, obj) {
- def = bpf_map__def(map);
- if (def->type != BPF_MAP_TYPE_STRUCT_OPS)
+ if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS)
continue;
link = bpf_map__attach_struct_ops(map);
- if (IS_ERR(link)) {
+ if (!link) {
p_err("can't register struct_ops %s: %s",
- bpf_map__name(map),
- strerror(-PTR_ERR(link)));
+ bpf_map__name(map), strerror(errno));
nr_errs++;
continue;
}
nr_maps++;
- bpf_link__disconnect(link);
- bpf_link__destroy(link);
-
- if (!bpf_obj_get_info_by_fd(bpf_map__fd(map), &info,
- &info_len))
- p_info("Registered %s %s id %u",
- get_kern_struct_ops_name(&info),
- bpf_map__name(map),
- info.id);
- else
+ if (bpf_map_get_info_by_fd(bpf_map__fd(map), &info,
+ &info_len)) {
/* Not p_err. The struct_ops was attached
* successfully.
*/
p_info("Registered %s but can't find id: %s",
bpf_map__name(map), strerror(errno));
+ goto clean_link;
+ }
+ if (!(bpf_map__map_flags(map) & BPF_F_LINK)) {
+ p_info("Registered %s %s id %u",
+ get_kern_struct_ops_name(&info),
+ info.name,
+ info.id);
+ goto clean_link;
+ }
+ if (bpf_link_get_info_by_fd(bpf_link__fd(link),
+ &link_info,
+ &link_info_len)) {
+ p_err("Registered %s but can't find link id: %s",
+ bpf_map__name(map), strerror(errno));
+ nr_errs++;
+ goto clean_link;
+ }
+ if (linkdir && pin_link(link, linkdir, info.name)) {
+ p_err("can't pin link %u for %s: %s",
+ link_info.id, info.name,
+ strerror(errno));
+ nr_errs++;
+ goto clean_link;
+ }
+ p_info("Registered %s %s map id %u link id %u",
+ get_kern_struct_ops_name(&info),
+ info.name, info.id, link_info.id);
+
+clean_link:
+ bpf_link__disconnect(link);
+ bpf_link__destroy(link);
}
bpf_object__close(obj);
@@ -560,16 +606,15 @@ static int do_help(int argc, char **argv)
}
fprintf(stderr,
- "Usage: %s %s { show | list } [STRUCT_OPS_MAP]\n"
- " %s %s dump [STRUCT_OPS_MAP]\n"
- " %s %s register OBJ\n"
- " %s %s unregister STRUCT_OPS_MAP\n"
- " %s %s help\n"
+ "Usage: %1$s %2$s { show | list } [STRUCT_OPS_MAP]\n"
+ " %1$s %2$s dump [STRUCT_OPS_MAP]\n"
+ " %1$s %2$s register OBJ [LINK_DIR]\n"
+ " %1$s %2$s unregister STRUCT_OPS_MAP\n"
+ " %1$s %2$s help\n"
"\n"
- " OPTIONS := { {-j|--json} [{-p|--pretty}] }\n"
- " STRUCT_OPS_MAP := [ id STRUCT_OPS_MAP_ID | name STRUCT_OPS_MAP_NAME ]\n",
- bin_name, argv[-2], bin_name, argv[-2],
- bin_name, argv[-2], bin_name, argv[-2],
+ " STRUCT_OPS_MAP := [ id STRUCT_OPS_MAP_ID | name STRUCT_OPS_MAP_NAME ]\n"
+ " " HELP_SPEC_OPTIONS " }\n"
+ "",
bin_name, argv[-2]);
return 0;
@@ -592,5 +637,6 @@ int do_struct_ops(int argc, char **argv)
err = cmd_select(cmds, argc, argv, do_help);
btf__free(btf_vmlinux);
+
return err;
}
diff --git a/tools/bpf/bpftool/tracelog.c b/tools/bpf/bpftool/tracelog.c
index e80a5c79b38f..bf1f02212797 100644
--- a/tools/bpf/bpftool/tracelog.c
+++ b/tools/bpf/bpftool/tracelog.c
@@ -9,7 +9,7 @@
#include <string.h>
#include <unistd.h>
#include <linux/magic.h>
-#include <sys/fcntl.h>
+#include <fcntl.h>
#include <sys/vfs.h>
#include "main.h"
diff --git a/tools/bpf/bpftool/xlated_dumper.c b/tools/bpf/bpftool/xlated_dumper.c
index 8608cd68cdd0..da608e10c843 100644
--- a/tools/bpf/bpftool/xlated_dumper.c
+++ b/tools/bpf/bpftool/xlated_dumper.c
@@ -1,13 +1,16 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2018 Netronome Systems, Inc. */
+#ifndef _GNU_SOURCE
#define _GNU_SOURCE
+#endif
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <bpf/libbpf.h>
+#include <bpf/libbpf_internal.h>
#include "disasm.h"
#include "json_writer.h"
@@ -32,8 +35,8 @@ void kernel_syms_load(struct dump_data *dd)
return;
while (fgets(buff, sizeof(buff), fp)) {
- tmp = reallocarray(dd->sym_mapping, dd->sym_count + 1,
- sizeof(*dd->sym_mapping));
+ tmp = libbpf_reallocarray(dd->sym_mapping, dd->sym_count + 1,
+ sizeof(*dd->sym_mapping));
if (!tmp) {
out:
free(dd->sym_mapping);
@@ -196,6 +199,12 @@ static const char *print_imm(void *private_data,
else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
+ else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "map[idx:%u]+%u", insn->imm, (insn + 1)->imm);
+ else if (insn->src_reg == BPF_PSEUDO_FUNC)
+ snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
+ "subprog[%+d]", insn->imm);
else
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"0x%llx", (unsigned long long)full_imm);
@@ -352,7 +361,8 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
}
void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
- unsigned int start_idx)
+ unsigned int start_idx,
+ bool opcodes, bool linum)
{
const struct bpf_insn_cbs cbs = {
.cb_print = print_insn_for_graph,
@@ -360,14 +370,61 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
.cb_imm = print_imm,
.private_data = dd,
};
+ const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
+ const struct bpf_line_info *last_linfo = NULL;
+ struct bpf_func_info *record = dd->func_info;
struct bpf_insn *insn_start = buf_start;
struct bpf_insn *insn_end = buf_end;
struct bpf_insn *cur = insn_start;
+ struct btf *btf = dd->btf;
+ bool double_insn = false;
+ char func_sig[1024];
for (; cur <= insn_end; cur++) {
- printf("% 4d: ", (int)(cur - insn_start + start_idx));
+ unsigned int insn_off;
+
+ if (double_insn) {
+ double_insn = false;
+ continue;
+ }
+ double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
+
+ insn_off = (unsigned int)(cur - insn_start + start_idx);
+ if (btf && record) {
+ if (record->insn_off == insn_off) {
+ btf_dumper_type_only(btf, record->type_id,
+ func_sig,
+ sizeof(func_sig));
+ if (func_sig[0] != '\0')
+ printf("; %s:\\l\\\n", func_sig);
+ record = (void *)record + dd->finfo_rec_size;
+ }
+ }
+
+ if (prog_linfo) {
+ const struct bpf_line_info *linfo;
+
+ linfo = bpf_prog_linfo__lfind(prog_linfo, insn_off, 0);
+ if (linfo && linfo != last_linfo) {
+ btf_dump_linfo_dotlabel(btf, linfo, linum);
+ last_linfo = linfo;
+ }
+ }
+
+ printf("%d: ", insn_off);
print_bpf_insn(&cbs, cur, true);
+
+ if (opcodes) {
+ printf("\\ \\ \\ \\ ");
+ fprint_hex(stdout, cur, 8, " ");
+ if (double_insn && cur <= insn_end - 1) {
+ printf(" ");
+ fprint_hex(stdout, cur + 1, 8, " ");
+ }
+ printf("\\l\\\n");
+ }
+
if (cur != insn_end)
- printf(" | ");
+ printf("| ");
}
}
diff --git a/tools/bpf/bpftool/xlated_dumper.h b/tools/bpf/bpftool/xlated_dumper.h
index 54847e174273..9a946377b0e6 100644
--- a/tools/bpf/bpftool/xlated_dumper.h
+++ b/tools/bpf/bpftool/xlated_dumper.h
@@ -34,6 +34,7 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
bool opcodes, bool linum);
void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end,
- unsigned int start_index);
+ unsigned int start_index,
+ bool opcodes, bool linum);
#endif
diff --git a/tools/bpf/resolve_btfids/.gitignore b/tools/bpf/resolve_btfids/.gitignore
new file mode 100644
index 000000000000..52d5e9721d92
--- /dev/null
+++ b/tools/bpf/resolve_btfids/.gitignore
@@ -0,0 +1,4 @@
+/fixdep
+/resolve_btfids
+/libbpf/
+/libsubcmd/
diff --git a/tools/bpf/resolve_btfids/Build b/tools/bpf/resolve_btfids/Build
new file mode 100644
index 000000000000..077de3829c72
--- /dev/null
+++ b/tools/bpf/resolve_btfids/Build
@@ -0,0 +1,12 @@
+hostprogs := resolve_btfids
+
+resolve_btfids-y += main.o
+resolve_btfids-y += rbtree.o
+resolve_btfids-y += zalloc.o
+resolve_btfids-y += string.o
+resolve_btfids-y += ctype.o
+resolve_btfids-y += str_error_r.o
+
+$(OUTPUT)%.o: ../../lib/%.c FORCE
+ $(call rule_mkdir)
+ $(call if_changed_dep,host_cc_o_c)
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
new file mode 100644
index 000000000000..ac548a7baa73
--- /dev/null
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: GPL-2.0-only
+include ../../scripts/Makefile.include
+include ../../scripts/Makefile.arch
+
+srctree := $(abspath $(CURDIR)/../../../)
+
+ifeq ($(V),1)
+ Q =
+ msg =
+else
+ Q = @
+ ifeq ($(silent),1)
+ msg =
+ else
+ msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
+ endif
+ MAKEFLAGS=--no-print-directory
+endif
+
+# Overrides for the prepare step libraries.
+HOST_OVERRIDES := AR="$(HOSTAR)" CC="$(HOSTCC)" LD="$(HOSTLD)" ARCH="$(HOSTARCH)" \
+ CROSS_COMPILE="" EXTRA_CFLAGS="$(HOSTCFLAGS)"
+
+RM ?= rm
+HOSTCC ?= gcc
+HOSTLD ?= ld
+HOSTAR ?= ar
+CROSS_COMPILE =
+
+OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
+
+LIBBPF_SRC := $(srctree)/tools/lib/bpf/
+SUBCMD_SRC := $(srctree)/tools/lib/subcmd/
+
+BPFOBJ := $(OUTPUT)/libbpf/libbpf.a
+LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/
+SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a
+SUBCMD_OUT := $(abspath $(dir $(SUBCMDOBJ)))/
+
+LIBBPF_DESTDIR := $(LIBBPF_OUT)
+LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include
+
+SUBCMD_DESTDIR := $(SUBCMD_OUT)
+SUBCMD_INCLUDE := $(SUBCMD_DESTDIR)include
+
+BINARY := $(OUTPUT)/resolve_btfids
+BINARY_IN := $(BINARY)-in.o
+
+all: $(BINARY)
+
+prepare: $(BPFOBJ) $(SUBCMDOBJ)
+
+$(OUTPUT) $(OUTPUT)/libsubcmd $(LIBBPF_OUT):
+ $(call msg,MKDIR,,$@)
+ $(Q)mkdir -p $(@)
+
+$(SUBCMDOBJ): fixdep FORCE | $(OUTPUT)/libsubcmd
+ $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(SUBCMD_OUT) \
+ DESTDIR=$(SUBCMD_DESTDIR) $(HOST_OVERRIDES) prefix= subdir= \
+ $(abspath $@) install_headers
+
+$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) \
+ DESTDIR=$(LIBBPF_DESTDIR) $(HOST_OVERRIDES) prefix= subdir= \
+ $(abspath $@) install_headers
+
+LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
+LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
+
+HOSTCFLAGS += -g \
+ -I$(srctree)/tools/include \
+ -I$(srctree)/tools/include/uapi \
+ -I$(LIBBPF_INCLUDE) \
+ -I$(SUBCMD_INCLUDE) \
+ $(LIBELF_FLAGS)
+
+LIBS = $(LIBELF_LIBS) -lz
+
+export srctree OUTPUT HOSTCFLAGS Q HOSTCC HOSTLD HOSTAR
+include $(srctree)/tools/build/Makefile.include
+
+$(BINARY_IN): fixdep FORCE prepare | $(OUTPUT)
+ $(Q)$(MAKE) $(build)=resolve_btfids
+
+$(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
+ $(call msg,LINK,$@)
+ $(Q)$(HOSTCC) $(BINARY_IN) $(KBUILD_HOSTLDFLAGS) -o $@ $(BPFOBJ) $(SUBCMDOBJ) $(LIBS)
+
+clean_objects := $(wildcard $(OUTPUT)/*.o \
+ $(OUTPUT)/.*.o.cmd \
+ $(OUTPUT)/.*.o.d \
+ $(LIBBPF_OUT) \
+ $(LIBBPF_DESTDIR) \
+ $(SUBCMD_OUT) \
+ $(SUBCMD_DESTDIR) \
+ $(OUTPUT)/resolve_btfids)
+
+ifneq ($(clean_objects),)
+clean: fixdep-clean
+ $(call msg,CLEAN,$(BINARY))
+ $(Q)$(RM) -rf $(clean_objects)
+else
+clean:
+endif
+
+tags:
+ $(call msg,GEN,,tags)
+ $(Q)ctags -R . $(LIBBPF_SRC) $(SUBCMD_SRC)
+
+FORCE:
+
+.PHONY: all FORCE clean tags prepare
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
new file mode 100644
index 000000000000..27a23196d58e
--- /dev/null
+++ b/tools/bpf/resolve_btfids/main.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+/*
+ * resolve_btfids scans ELF object for .BTF_ids section and resolves
+ * its symbols with BTF ID values.
+ *
+ * Each symbol points to 4 bytes data and is expected to have
+ * following name syntax:
+ *
+ * __BTF_ID__<type>__<symbol>[__<id>]
+ *
+ * type is:
+ *
+ * func - lookup BTF_KIND_FUNC symbol with <symbol> name
+ * and store its ID into the data:
+ *
+ * __BTF_ID__func__vfs_close__1:
+ * .zero 4
+ *
+ * struct - lookup BTF_KIND_STRUCT symbol with <symbol> name
+ * and store its ID into the data:
+ *
+ * __BTF_ID__struct__sk_buff__1:
+ * .zero 4
+ *
+ * union - lookup BTF_KIND_UNION symbol with <symbol> name
+ * and store its ID into the data:
+ *
+ * __BTF_ID__union__thread_union__1:
+ * .zero 4
+ *
+ * typedef - lookup BTF_KIND_TYPEDEF symbol with <symbol> name
+ * and store its ID into the data:
+ *
+ * __BTF_ID__typedef__pid_t__1:
+ * .zero 4
+ *
+ * set - store symbol size into first 4 bytes and sort following
+ * ID list
+ *
+ * __BTF_ID__set__list:
+ * .zero 4
+ * list:
+ * __BTF_ID__func__vfs_getattr__3:
+ * .zero 4
+ * __BTF_ID__func__vfs_fallocate__4:
+ * .zero 4
+ *
+ * set8 - store symbol size into first 4 bytes and sort following
+ * ID list
+ *
+ * __BTF_ID__set8__list:
+ * .zero 8
+ * list:
+ * __BTF_ID__func__vfs_getattr__3:
+ * .zero 4
+ * .word (1 << 0) | (1 << 2)
+ * __BTF_ID__func__vfs_fallocate__5:
+ * .zero 4
+ * .word (1 << 3) | (1 << 1) | (1 << 2)
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <libelf.h>
+#include <gelf.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <linux/rbtree.h>
+#include <linux/zalloc.h>
+#include <linux/err.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
+#include <subcmd/parse-options.h>
+
+#define BTF_IDS_SECTION ".BTF_ids"
+#define BTF_ID "__BTF_ID__"
+
+#define BTF_STRUCT "struct"
+#define BTF_UNION "union"
+#define BTF_TYPEDEF "typedef"
+#define BTF_FUNC "func"
+#define BTF_SET "set"
+#define BTF_SET8 "set8"
+
+#define ADDR_CNT 100
+
+struct btf_id {
+ struct rb_node rb_node;
+ char *name;
+ union {
+ int id;
+ int cnt;
+ };
+ int addr_cnt;
+ bool is_set;
+ bool is_set8;
+ Elf64_Addr addr[ADDR_CNT];
+};
+
+struct object {
+ const char *path;
+ const char *btf;
+ const char *base_btf_path;
+
+ struct {
+ int fd;
+ Elf *elf;
+ Elf_Data *symbols;
+ Elf_Data *idlist;
+ int symbols_shndx;
+ int idlist_shndx;
+ size_t strtabidx;
+ unsigned long idlist_addr;
+ } efile;
+
+ struct rb_root sets;
+ struct rb_root structs;
+ struct rb_root unions;
+ struct rb_root typedefs;
+ struct rb_root funcs;
+
+ int nr_funcs;
+ int nr_structs;
+ int nr_unions;
+ int nr_typedefs;
+};
+
+static int verbose;
+
+static int eprintf(int level, int var, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (var >= level) {
+ va_start(args, fmt);
+ ret = vfprintf(stderr, fmt, args);
+ va_end(args);
+ }
+ return ret;
+}
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_debug(fmt, ...) \
+ eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n, fmt, ...) \
+ eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+ eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+
+static bool is_btf_id(const char *name)
+{
+ return name && !strncmp(name, BTF_ID, sizeof(BTF_ID) - 1);
+}
+
+static struct btf_id *btf_id__find(struct rb_root *root, const char *name)
+{
+ struct rb_node *p = root->rb_node;
+ struct btf_id *id;
+ int cmp;
+
+ while (p) {
+ id = rb_entry(p, struct btf_id, rb_node);
+ cmp = strcmp(id->name, name);
+ if (cmp < 0)
+ p = p->rb_left;
+ else if (cmp > 0)
+ p = p->rb_right;
+ else
+ return id;
+ }
+ return NULL;
+}
+
+static struct btf_id *
+btf_id__add(struct rb_root *root, char *name, bool unique)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct btf_id *id;
+ int cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ id = rb_entry(parent, struct btf_id, rb_node);
+ cmp = strcmp(id->name, name);
+ if (cmp < 0)
+ p = &(*p)->rb_left;
+ else if (cmp > 0)
+ p = &(*p)->rb_right;
+ else
+ return unique ? NULL : id;
+ }
+
+ id = zalloc(sizeof(*id));
+ if (id) {
+ pr_debug("adding symbol %s\n", name);
+ id->name = name;
+ rb_link_node(&id->rb_node, parent, p);
+ rb_insert_color(&id->rb_node, root);
+ }
+ return id;
+}
+
+static char *get_id(const char *prefix_end)
+{
+ /*
+ * __BTF_ID__func__vfs_truncate__0
+ * prefix_end = ^
+ * pos = ^
+ */
+ int len = strlen(prefix_end);
+ int pos = sizeof("__") - 1;
+ char *p, *id;
+
+ if (pos >= len)
+ return NULL;
+
+ id = strdup(prefix_end + pos);
+ if (id) {
+ /*
+ * __BTF_ID__func__vfs_truncate__0
+ * id = ^
+ *
+ * cut the unique id part
+ */
+ p = strrchr(id, '_');
+ p--;
+ if (*p != '_') {
+ free(id);
+ return NULL;
+ }
+ *p = '\0';
+ }
+ return id;
+}
+
+static struct btf_id *add_set(struct object *obj, char *name, bool is_set8)
+{
+ /*
+ * __BTF_ID__set__name
+ * name = ^
+ * id = ^
+ */
+ char *id = name + (is_set8 ? sizeof(BTF_SET8 "__") : sizeof(BTF_SET "__")) - 1;
+ int len = strlen(name);
+
+ if (id >= name + len) {
+ pr_err("FAILED to parse set name: %s\n", name);
+ return NULL;
+ }
+
+ return btf_id__add(&obj->sets, id, true);
+}
+
+static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
+{
+ char *id;
+
+ id = get_id(name + size);
+ if (!id) {
+ pr_err("FAILED to parse symbol name: %s\n", name);
+ return NULL;
+ }
+
+ return btf_id__add(root, id, false);
+}
+
+/* Older libelf.h and glibc elf.h might not yet define the ELF compression types. */
+#ifndef SHF_COMPRESSED
+#define SHF_COMPRESSED (1 << 11) /* Section with compressed data. */
+#endif
+
+/*
+ * The data of compressed section should be aligned to 4
+ * (for 32bit) or 8 (for 64 bit) bytes. The binutils ld
+ * sets sh_addralign to 1, which makes libelf fail with
+ * misaligned section error during the update:
+ * FAILED elf_update(WRITE): invalid section alignment
+ *
+ * While waiting for ld fix, we fix the compressed sections
+ * sh_addralign value manualy.
+ */
+static int compressed_section_fix(Elf *elf, Elf_Scn *scn, GElf_Shdr *sh)
+{
+ int expected = gelf_getclass(elf) == ELFCLASS32 ? 4 : 8;
+
+ if (!(sh->sh_flags & SHF_COMPRESSED))
+ return 0;
+
+ if (sh->sh_addralign == expected)
+ return 0;
+
+ pr_debug2(" - fixing wrong alignment sh_addralign %u, expected %u\n",
+ sh->sh_addralign, expected);
+
+ sh->sh_addralign = expected;
+
+ if (gelf_update_shdr(scn, sh) == 0) {
+ pr_err("FAILED cannot update section header: %s\n",
+ elf_errmsg(-1));
+ return -1;
+ }
+ return 0;
+}
+
+static int elf_collect(struct object *obj)
+{
+ Elf_Scn *scn = NULL;
+ size_t shdrstrndx;
+ int idx = 0;
+ Elf *elf;
+ int fd;
+
+ fd = open(obj->path, O_RDWR, 0666);
+ if (fd == -1) {
+ pr_err("FAILED cannot open %s: %s\n",
+ obj->path, strerror(errno));
+ return -1;
+ }
+
+ elf_version(EV_CURRENT);
+
+ elf = elf_begin(fd, ELF_C_RDWR_MMAP, NULL);
+ if (!elf) {
+ close(fd);
+ pr_err("FAILED cannot create ELF descriptor: %s\n",
+ elf_errmsg(-1));
+ return -1;
+ }
+
+ obj->efile.fd = fd;
+ obj->efile.elf = elf;
+
+ elf_flagelf(elf, ELF_C_SET, ELF_F_LAYOUT);
+
+ if (elf_getshdrstrndx(elf, &shdrstrndx) != 0) {
+ pr_err("FAILED cannot get shdr str ndx\n");
+ return -1;
+ }
+
+ /*
+ * Scan all the elf sections and look for save data
+ * from .BTF_ids section and symbols.
+ */
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ Elf_Data *data;
+ GElf_Shdr sh;
+ char *name;
+
+ idx++;
+ if (gelf_getshdr(scn, &sh) != &sh) {
+ pr_err("FAILED get section(%d) header\n", idx);
+ return -1;
+ }
+
+ name = elf_strptr(elf, shdrstrndx, sh.sh_name);
+ if (!name) {
+ pr_err("FAILED get section(%d) name\n", idx);
+ return -1;
+ }
+
+ data = elf_getdata(scn, 0);
+ if (!data) {
+ pr_err("FAILED to get section(%d) data from %s\n",
+ idx, name);
+ return -1;
+ }
+
+ pr_debug2("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
+ idx, name, (unsigned long) data->d_size,
+ (int) sh.sh_link, (unsigned long) sh.sh_flags,
+ (int) sh.sh_type);
+
+ if (sh.sh_type == SHT_SYMTAB) {
+ obj->efile.symbols = data;
+ obj->efile.symbols_shndx = idx;
+ obj->efile.strtabidx = sh.sh_link;
+ } else if (!strcmp(name, BTF_IDS_SECTION)) {
+ obj->efile.idlist = data;
+ obj->efile.idlist_shndx = idx;
+ obj->efile.idlist_addr = sh.sh_addr;
+ }
+
+ if (compressed_section_fix(elf, scn, &sh))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int symbols_collect(struct object *obj)
+{
+ Elf_Scn *scn = NULL;
+ int n, i;
+ GElf_Shdr sh;
+ char *name;
+
+ scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
+ if (!scn)
+ return -1;
+
+ if (gelf_getshdr(scn, &sh) != &sh)
+ return -1;
+
+ n = sh.sh_size / sh.sh_entsize;
+
+ /*
+ * Scan symbols and look for the ones starting with
+ * __BTF_ID__* over .BTF_ids section.
+ */
+ for (i = 0; i < n; i++) {
+ char *prefix;
+ struct btf_id *id;
+ GElf_Sym sym;
+
+ if (!gelf_getsym(obj->efile.symbols, i, &sym))
+ return -1;
+
+ if (sym.st_shndx != obj->efile.idlist_shndx)
+ continue;
+
+ name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
+ sym.st_name);
+
+ if (!is_btf_id(name))
+ continue;
+
+ /*
+ * __BTF_ID__TYPE__vfs_truncate__0
+ * prefix = ^
+ */
+ prefix = name + sizeof(BTF_ID) - 1;
+
+ /* struct */
+ if (!strncmp(prefix, BTF_STRUCT, sizeof(BTF_STRUCT) - 1)) {
+ obj->nr_structs++;
+ id = add_symbol(&obj->structs, prefix, sizeof(BTF_STRUCT) - 1);
+ /* union */
+ } else if (!strncmp(prefix, BTF_UNION, sizeof(BTF_UNION) - 1)) {
+ obj->nr_unions++;
+ id = add_symbol(&obj->unions, prefix, sizeof(BTF_UNION) - 1);
+ /* typedef */
+ } else if (!strncmp(prefix, BTF_TYPEDEF, sizeof(BTF_TYPEDEF) - 1)) {
+ obj->nr_typedefs++;
+ id = add_symbol(&obj->typedefs, prefix, sizeof(BTF_TYPEDEF) - 1);
+ /* func */
+ } else if (!strncmp(prefix, BTF_FUNC, sizeof(BTF_FUNC) - 1)) {
+ obj->nr_funcs++;
+ id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1);
+ /* set8 */
+ } else if (!strncmp(prefix, BTF_SET8, sizeof(BTF_SET8) - 1)) {
+ id = add_set(obj, prefix, true);
+ /*
+ * SET8 objects store list's count, which is encoded
+ * in symbol's size, together with 'cnt' field hence
+ * that - 1.
+ */
+ if (id) {
+ id->cnt = sym.st_size / sizeof(uint64_t) - 1;
+ id->is_set8 = true;
+ }
+ /* set */
+ } else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) {
+ id = add_set(obj, prefix, false);
+ /*
+ * SET objects store list's count, which is encoded
+ * in symbol's size, together with 'cnt' field hence
+ * that - 1.
+ */
+ if (id) {
+ id->cnt = sym.st_size / sizeof(int) - 1;
+ id->is_set = true;
+ }
+ } else {
+ pr_err("FAILED unsupported prefix %s\n", prefix);
+ return -1;
+ }
+
+ if (!id)
+ return -ENOMEM;
+
+ if (id->addr_cnt >= ADDR_CNT) {
+ pr_err("FAILED symbol %s crossed the number of allowed lists\n",
+ id->name);
+ return -1;
+ }
+ id->addr[id->addr_cnt++] = sym.st_value;
+ }
+
+ return 0;
+}
+
+static int symbols_resolve(struct object *obj)
+{
+ int nr_typedefs = obj->nr_typedefs;
+ int nr_structs = obj->nr_structs;
+ int nr_unions = obj->nr_unions;
+ int nr_funcs = obj->nr_funcs;
+ struct btf *base_btf = NULL;
+ int err, type_id;
+ struct btf *btf;
+ __u32 nr_types;
+
+ if (obj->base_btf_path) {
+ base_btf = btf__parse(obj->base_btf_path, NULL);
+ err = libbpf_get_error(base_btf);
+ if (err) {
+ pr_err("FAILED: load base BTF from %s: %s\n",
+ obj->base_btf_path, strerror(-err));
+ return -1;
+ }
+ }
+
+ btf = btf__parse_split(obj->btf ?: obj->path, base_btf);
+ err = libbpf_get_error(btf);
+ if (err) {
+ pr_err("FAILED: load BTF from %s: %s\n",
+ obj->btf ?: obj->path, strerror(-err));
+ goto out;
+ }
+
+ err = -1;
+ nr_types = btf__type_cnt(btf);
+
+ /*
+ * Iterate all the BTF types and search for collected symbol IDs.
+ */
+ for (type_id = 1; type_id < nr_types; type_id++) {
+ const struct btf_type *type;
+ struct rb_root *root;
+ struct btf_id *id;
+ const char *str;
+ int *nr;
+
+ type = btf__type_by_id(btf, type_id);
+ if (!type) {
+ pr_err("FAILED: malformed BTF, can't resolve type for ID %d\n",
+ type_id);
+ goto out;
+ }
+
+ if (btf_is_func(type) && nr_funcs) {
+ nr = &nr_funcs;
+ root = &obj->funcs;
+ } else if (btf_is_struct(type) && nr_structs) {
+ nr = &nr_structs;
+ root = &obj->structs;
+ } else if (btf_is_union(type) && nr_unions) {
+ nr = &nr_unions;
+ root = &obj->unions;
+ } else if (btf_is_typedef(type) && nr_typedefs) {
+ nr = &nr_typedefs;
+ root = &obj->typedefs;
+ } else
+ continue;
+
+ str = btf__name_by_offset(btf, type->name_off);
+ if (!str) {
+ pr_err("FAILED: malformed BTF, can't resolve name for ID %d\n",
+ type_id);
+ goto out;
+ }
+
+ id = btf_id__find(root, str);
+ if (id) {
+ if (id->id) {
+ pr_info("WARN: multiple IDs found for '%s': %d, %d - using %d\n",
+ str, id->id, type_id, id->id);
+ } else {
+ id->id = type_id;
+ (*nr)--;
+ }
+ }
+ }
+
+ err = 0;
+out:
+ btf__free(base_btf);
+ btf__free(btf);
+ return err;
+}
+
+static int id_patch(struct object *obj, struct btf_id *id)
+{
+ Elf_Data *data = obj->efile.idlist;
+ int *ptr = data->d_buf;
+ int i;
+
+ /* For set, set8, id->id may be 0 */
+ if (!id->id && !id->is_set && !id->is_set8)
+ pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name);
+
+ for (i = 0; i < id->addr_cnt; i++) {
+ unsigned long addr = id->addr[i];
+ unsigned long idx = addr - obj->efile.idlist_addr;
+
+ pr_debug("patching addr %5lu: ID %7d [%s]\n",
+ idx, id->id, id->name);
+
+ if (idx >= data->d_size) {
+ pr_err("FAILED patching index %lu out of bounds %lu\n",
+ idx, data->d_size);
+ return -1;
+ }
+
+ idx = idx / sizeof(int);
+ ptr[idx] = id->id;
+ }
+
+ return 0;
+}
+
+static int __symbols_patch(struct object *obj, struct rb_root *root)
+{
+ struct rb_node *next;
+ struct btf_id *id;
+
+ next = rb_first(root);
+ while (next) {
+ id = rb_entry(next, struct btf_id, rb_node);
+
+ if (id_patch(obj, id))
+ return -1;
+
+ next = rb_next(next);
+ }
+ return 0;
+}
+
+static int cmp_id(const void *pa, const void *pb)
+{
+ const int *a = pa, *b = pb;
+
+ return *a - *b;
+}
+
+static int sets_patch(struct object *obj)
+{
+ Elf_Data *data = obj->efile.idlist;
+ int *ptr = data->d_buf;
+ struct rb_node *next;
+
+ next = rb_first(&obj->sets);
+ while (next) {
+ unsigned long addr, idx;
+ struct btf_id *id;
+ int *base;
+ int cnt;
+
+ id = rb_entry(next, struct btf_id, rb_node);
+ addr = id->addr[0];
+ idx = addr - obj->efile.idlist_addr;
+
+ /* sets are unique */
+ if (id->addr_cnt != 1) {
+ pr_err("FAILED malformed data for set '%s'\n",
+ id->name);
+ return -1;
+ }
+
+ idx = idx / sizeof(int);
+ base = &ptr[idx] + (id->is_set8 ? 2 : 1);
+ cnt = ptr[idx];
+
+ pr_debug("sorting addr %5lu: cnt %6d [%s]\n",
+ (idx + 1) * sizeof(int), cnt, id->name);
+
+ qsort(base, cnt, id->is_set8 ? sizeof(uint64_t) : sizeof(int), cmp_id);
+
+ next = rb_next(next);
+ }
+ return 0;
+}
+
+static int symbols_patch(struct object *obj)
+{
+ int err;
+
+ if (__symbols_patch(obj, &obj->structs) ||
+ __symbols_patch(obj, &obj->unions) ||
+ __symbols_patch(obj, &obj->typedefs) ||
+ __symbols_patch(obj, &obj->funcs) ||
+ __symbols_patch(obj, &obj->sets))
+ return -1;
+
+ if (sets_patch(obj))
+ return -1;
+
+ /* Set type to ensure endian translation occurs. */
+ obj->efile.idlist->d_type = ELF_T_WORD;
+
+ elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY);
+
+ err = elf_update(obj->efile.elf, ELF_C_WRITE);
+ if (err < 0) {
+ pr_err("FAILED elf_update(WRITE): %s\n",
+ elf_errmsg(-1));
+ }
+
+ pr_debug("update %s for %s\n",
+ err >= 0 ? "ok" : "failed", obj->path);
+ return err < 0 ? -1 : 0;
+}
+
+static const char * const resolve_btfids_usage[] = {
+ "resolve_btfids [<options>] <ELF object>",
+ NULL
+};
+
+int main(int argc, const char **argv)
+{
+ struct object obj = {
+ .efile = {
+ .idlist_shndx = -1,
+ .symbols_shndx = -1,
+ },
+ .structs = RB_ROOT,
+ .unions = RB_ROOT,
+ .typedefs = RB_ROOT,
+ .funcs = RB_ROOT,
+ .sets = RB_ROOT,
+ };
+ struct option btfid_options[] = {
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show errors, etc)"),
+ OPT_STRING(0, "btf", &obj.btf, "BTF data",
+ "BTF data"),
+ OPT_STRING('b', "btf_base", &obj.base_btf_path, "file",
+ "path of file providing base BTF"),
+ OPT_END()
+ };
+ int err = -1;
+
+ argc = parse_options(argc, argv, btfid_options, resolve_btfids_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (argc != 1)
+ usage_with_options(resolve_btfids_usage, btfid_options);
+
+ obj.path = argv[0];
+
+ if (elf_collect(&obj))
+ goto out;
+
+ /*
+ * We did not find .BTF_ids section or symbols section,
+ * nothing to do..
+ */
+ if (obj.efile.idlist_shndx == -1 ||
+ obj.efile.symbols_shndx == -1) {
+ pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n");
+ err = 0;
+ goto out;
+ }
+
+ if (symbols_collect(&obj))
+ goto out;
+
+ if (symbols_resolve(&obj))
+ goto out;
+
+ if (symbols_patch(&obj))
+ goto out;
+
+ err = 0;
+out:
+ if (obj.efile.elf) {
+ elf_end(obj.efile.elf);
+ close(obj.efile.fd);
+ }
+ return err;
+}
diff --git a/tools/bpf/runqslower/Makefile b/tools/bpf/runqslower/Makefile
index 39edd68afa8e..47acf6936516 100644
--- a/tools/bpf/runqslower/Makefile
+++ b/tools/bpf/runqslower/Makefile
@@ -1,84 +1,92 @@
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-OUTPUT := .output
-CLANG ?= clang
-LLC ?= llc
-LLVM_STRIP ?= llvm-strip
-DEFAULT_BPFTOOL := $(OUTPUT)/sbin/bpftool
+include ../../scripts/Makefile.include
+
+OUTPUT ?= $(abspath .output)/
+
+BPFTOOL_OUTPUT := $(OUTPUT)bpftool/
+DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)bootstrap/bpftool
BPFTOOL ?= $(DEFAULT_BPFTOOL)
LIBBPF_SRC := $(abspath ../../lib/bpf)
-BPFOBJ := $(OUTPUT)/libbpf.a
-BPF_INCLUDE := $(OUTPUT)
-INCLUDES := -I$(BPF_INCLUDE) -I$(OUTPUT) -I$(abspath ../../lib)
-CFLAGS := -g -Wall
+BPFOBJ_OUTPUT := $(OUTPUT)libbpf/
+BPFOBJ := $(BPFOBJ_OUTPUT)libbpf.a
+BPF_DESTDIR := $(BPFOBJ_OUTPUT)
+BPF_INCLUDE := $(BPF_DESTDIR)/include
+INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../include/uapi)
+CFLAGS := -g -Wall $(CLANG_CROSS_FLAGS)
+CFLAGS += $(EXTRA_CFLAGS)
+LDFLAGS += $(EXTRA_LDFLAGS)
# Try to detect best kernel BTF source
KERNEL_REL := $(shell uname -r)
-VMLINUX_BTF_PATHS := /sys/kernel/btf/vmlinux /boot/vmlinux-$(KERNEL_REL)
+VMLINUX_BTF_PATHS := $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../vmlinux /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(KERNEL_REL)
VMLINUX_BTF_PATH := $(or $(VMLINUX_BTF),$(firstword \
$(wildcard $(VMLINUX_BTF_PATHS))))
-abs_out := $(abspath $(OUTPUT))
ifeq ($(V),1)
Q =
-msg =
else
Q = @
-msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
MAKEFLAGS += --no-print-directory
submake_extras := feature_display=0
endif
.DELETE_ON_ERROR:
-.PHONY: all clean runqslower
+.PHONY: all clean runqslower libbpf_hdrs
all: runqslower
runqslower: $(OUTPUT)/runqslower
clean:
- $(call msg,CLEAN)
- $(Q)rm -rf $(OUTPUT) runqslower
+ $(call QUIET_CLEAN, runqslower)
+ $(Q)$(RM) -r $(BPFOBJ_OUTPUT) $(BPFTOOL_OUTPUT)
+ $(Q)$(RM) $(OUTPUT)*.o $(OUTPUT)*.d
+ $(Q)$(RM) $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
+ $(Q)$(RM) $(OUTPUT)runqslower
+ $(Q)$(RM) -r .output
+
+libbpf_hdrs: $(BPFOBJ)
$(OUTPUT)/runqslower: $(OUTPUT)/runqslower.o $(BPFOBJ)
- $(call msg,BINARY,$@)
- $(Q)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
+ $(QUIET_LINK)$(CC) $(CFLAGS) $^ -lelf -lz -o $@
$(OUTPUT)/runqslower.o: runqslower.h $(OUTPUT)/runqslower.skel.h \
- $(OUTPUT)/runqslower.bpf.o
+ $(OUTPUT)/runqslower.bpf.o | libbpf_hdrs
-$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h
+$(OUTPUT)/runqslower.bpf.o: $(OUTPUT)/vmlinux.h runqslower.h | libbpf_hdrs
$(OUTPUT)/%.skel.h: $(OUTPUT)/%.bpf.o | $(BPFTOOL)
- $(call msg,GEN-SKEL,$@)
- $(Q)$(BPFTOOL) gen skeleton $< > $@
+ $(QUIET_GEN)$(BPFTOOL) gen skeleton $< > $@
$(OUTPUT)/%.bpf.o: %.bpf.c $(BPFOBJ) | $(OUTPUT)
- $(call msg,BPF,$@)
- $(Q)$(CLANG) -g -O2 -target bpf $(INCLUDES) \
+ $(QUIET_GEN)$(CLANG) -g -O2 -target bpf $(INCLUDES) \
-c $(filter %.c,$^) -o $@ && \
$(LLVM_STRIP) -g $@
$(OUTPUT)/%.o: %.c | $(OUTPUT)
- $(call msg,CC,$@)
- $(Q)$(CC) $(CFLAGS) $(INCLUDES) -c $(filter %.c,$^) -o $@
+ $(QUIET_CC)$(CC) $(CFLAGS) $(INCLUDES) -c $(filter %.c,$^) -o $@
-$(OUTPUT):
- $(call msg,MKDIR,$@)
- $(Q)mkdir -p $(OUTPUT)
+$(OUTPUT) $(BPFOBJ_OUTPUT) $(BPFTOOL_OUTPUT):
+ $(QUIET_MKDIR)mkdir -p $@
$(OUTPUT)/vmlinux.h: $(VMLINUX_BTF_PATH) | $(OUTPUT) $(BPFTOOL)
- $(call msg,GEN,$@)
+ifeq ($(VMLINUX_H),)
$(Q)if [ ! -e "$(VMLINUX_BTF_PATH)" ] ; then \
echo "Couldn't find kernel BTF; set VMLINUX_BTF to" \
"specify its location." >&2; \
exit 1;\
fi
- $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@
+ $(QUIET_GEN)$(BPFTOOL) btf dump file $(VMLINUX_BTF_PATH) format c > $@
+else
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
-$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
- $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \
- OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
+$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(BPFOBJ_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(BPFOBJ_OUTPUT) \
+ DESTDIR=$(BPFOBJ_OUTPUT) prefix= $(abspath $@) install_headers
-$(DEFAULT_BPFTOOL):
- $(Q)$(MAKE) $(submake_extras) -C ../bpftool \
- prefix= OUTPUT=$(abs_out)/ DESTDIR=$(abs_out) install
+$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C ../bpftool OUTPUT=$(BPFTOOL_OUTPUT) bootstrap
diff --git a/tools/bpf/runqslower/runqslower.bpf.c b/tools/bpf/runqslower/runqslower.bpf.c
index 1f18a409f044..9a5c1f008fe6 100644
--- a/tools/bpf/runqslower/runqslower.bpf.c
+++ b/tools/bpf/runqslower/runqslower.bpf.c
@@ -11,9 +11,9 @@ const volatile __u64 min_us = 0;
const volatile pid_t targ_pid = 0;
struct {
- __uint(type, BPF_MAP_TYPE_HASH);
- __uint(max_entries, 10240);
- __type(key, u32);
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
__type(value, u64);
} start SEC(".maps");
@@ -25,15 +25,20 @@ struct {
/* record enqueue timestamp */
__always_inline
-static int trace_enqueue(u32 tgid, u32 pid)
+static int trace_enqueue(struct task_struct *t)
{
- u64 ts;
+ u32 pid = t->pid;
+ u64 *ptr;
if (!pid || (targ_pid && targ_pid != pid))
return 0;
- ts = bpf_ktime_get_ns();
- bpf_map_update_elem(&start, &pid, &ts, 0);
+ ptr = bpf_task_storage_get(&start, t, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!ptr)
+ return 0;
+
+ *ptr = bpf_ktime_get_ns();
return 0;
}
@@ -43,7 +48,7 @@ int handle__sched_wakeup(u64 *ctx)
/* TP_PROTO(struct task_struct *p) */
struct task_struct *p = (void *)ctx[0];
- return trace_enqueue(p->tgid, p->pid);
+ return trace_enqueue(p);
}
SEC("tp_btf/sched_wakeup_new")
@@ -52,7 +57,7 @@ int handle__sched_wakeup_new(u64 *ctx)
/* TP_PROTO(struct task_struct *p) */
struct task_struct *p = (void *)ctx[0];
- return trace_enqueue(p->tgid, p->pid);
+ return trace_enqueue(p);
}
SEC("tp_btf/sched_switch")
@@ -63,19 +68,23 @@ int handle__sched_switch(u64 *ctx)
*/
struct task_struct *prev = (struct task_struct *)ctx[1];
struct task_struct *next = (struct task_struct *)ctx[2];
- struct event event = {};
+ struct runq_event event = {};
u64 *tsp, delta_us;
long state;
u32 pid;
/* ivcsw: treat like an enqueue event and store timestamp */
- if (prev->state == TASK_RUNNING)
- trace_enqueue(prev->tgid, prev->pid);
+ if (prev->__state == TASK_RUNNING)
+ trace_enqueue(prev);
pid = next->pid;
+ /* For pid mismatch, save a bpf_task_storage_get */
+ if (!pid || (targ_pid && targ_pid != pid))
+ return 0;
+
/* fetch timestamp and calculate delta */
- tsp = bpf_map_lookup_elem(&start, &pid);
+ tsp = bpf_task_storage_get(&start, next, 0, 0);
if (!tsp)
return 0; /* missed enqueue */
@@ -91,7 +100,7 @@ int handle__sched_switch(u64 *ctx)
bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU,
&event, sizeof(event));
- bpf_map_delete_elem(&start, &pid);
+ bpf_task_storage_delete(&start, next);
return 0;
}
diff --git a/tools/bpf/runqslower/runqslower.c b/tools/bpf/runqslower/runqslower.c
index d89715844952..83c5993a139a 100644
--- a/tools/bpf/runqslower/runqslower.c
+++ b/tools/bpf/runqslower/runqslower.c
@@ -4,7 +4,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/resource.h>
#include <time.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
@@ -88,19 +87,9 @@ int libbpf_print_fn(enum libbpf_print_level level,
return vfprintf(stderr, format, args);
}
-static int bump_memlock_rlimit(void)
-{
- struct rlimit rlim_new = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- return setrlimit(RLIMIT_MEMLOCK, &rlim_new);
-}
-
void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
{
- const struct event *e = data;
+ const struct runq_event *e = data;
struct tm *tm;
char ts[32];
time_t t;
@@ -123,7 +112,6 @@ int main(int argc, char **argv)
.parser = parse_arg,
.doc = argp_program_doc,
};
- struct perf_buffer_opts pb_opts;
struct perf_buffer *pb = NULL;
struct runqslower_bpf *obj;
int err;
@@ -134,11 +122,8 @@ int main(int argc, char **argv)
libbpf_set_print(libbpf_print_fn);
- err = bump_memlock_rlimit();
- if (err) {
- fprintf(stderr, "failed to increase rlimit: %d", err);
- return 1;
- }
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
obj = runqslower_bpf__open();
if (!obj) {
@@ -165,9 +150,8 @@ int main(int argc, char **argv)
printf("Tracing run queue latency higher than %llu us\n", env.min_us);
printf("%-8s %-16s %-6s %14s\n", "TIME", "COMM", "PID", "LAT(us)");
- pb_opts.sample_cb = handle_event;
- pb_opts.lost_cb = handle_lost_events;
- pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64, &pb_opts);
+ pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64,
+ handle_event, handle_lost_events, NULL, NULL);
err = libbpf_get_error(pb);
if (err) {
pb = NULL;
diff --git a/tools/bpf/runqslower/runqslower.h b/tools/bpf/runqslower/runqslower.h
index 9db225425e5f..4f70f07200c2 100644
--- a/tools/bpf/runqslower/runqslower.h
+++ b/tools/bpf/runqslower/runqslower.h
@@ -4,7 +4,7 @@
#define TASK_COMM_LEN 16
-struct event {
+struct runq_event {
char task[TASK_COMM_LEN];
__u64 delta_us;
pid_t pid;
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 9ec01f4454f9..c2a95ab47379 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -74,7 +74,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
# dependencies in the cmd file
if_changed_dep = $(if $(strip $(any-prereq) $(arg-check)), \
@set -e; \
- $(echo-cmd) $(cmd_$(1)) && $(dep-cmd))
+ $(echo-cmd) $(cmd_$(1)); \
+ $(dep-cmd))
# if_changed - execute command if any prerequisite is newer than
# target, or command line has changed
@@ -98,4 +99,28 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX
###
## HOSTCC C flags
-host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(KBUILD_HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
+host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
+
+# output directory for tests below
+TMPOUT = .tmp_$$$$
+
+# try-run
+# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
+# Exit code chooses option. "$$TMP" serves as a temporary file and is
+# automatically cleaned up.
+try-run = $(shell set -e; \
+ TMP=$(TMPOUT)/tmp; \
+ mkdir -p $(TMPOUT); \
+ trap "rm -rf $(TMPOUT)" EXIT; \
+ if ($(1)) >/dev/null 2>&1; \
+ then echo "$(2)"; \
+ else echo "$(3)"; \
+ fi)
+
+# cc-option
+# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
+cc-option = $(call try-run, \
+ $(CC) -Werror $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+
+# delete partially updated (i.e. corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 727050c40f09..17cdf01e29a0 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -15,10 +15,6 @@ endef
$(call allow-override,CC,$(CROSS_COMPILE)gcc)
$(call allow-override,LD,$(CROSS_COMPILE)ld)
-HOSTCC ?= gcc
-HOSTLD ?= ld
-HOSTAR ?= ar
-
export HOSTCC HOSTLD HOSTAR
ifeq ($(V),1)
@@ -34,10 +30,18 @@ build := -f $(srctree)/tools/build/Makefile.build dir=. obj
all: $(OUTPUT)fixdep
+# Make sure there's anything to clean,
+# feature contains check for existing OUTPUT
+TMP_O := $(if $(OUTPUT),$(OUTPUT)feature/,./)
+
clean:
$(call QUIET_CLEAN, fixdep)
- $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+ $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)rm -f $(OUTPUT)fixdep
+ $(call QUIET_CLEAN, feature-detect)
+ifneq ($(wildcard $(TMP_O)),)
+ $(Q)$(MAKE) -C feature OUTPUT=$(TMP_O) clean >/dev/null
+endif
$(OUTPUT)fixdep-in.o: FORCE
$(Q)$(MAKE) $(build)=fixdep
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index cd72016c3cfa..89430338a3d9 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -51,39 +51,40 @@ subdir-obj-y :=
build-file := $(dir)/Build
-include $(build-file)
-quiet_cmd_flex = FLEX $@
-quiet_cmd_bison = BISON $@
+quiet_cmd_flex = FLEX $@
+quiet_cmd_bison = BISON $@
+quiet_cmd_test = TEST $@
# Create directory unless it exists
-quiet_cmd_mkdir = MKDIR $(dir $@)
+quiet_cmd_mkdir = MKDIR $(dir $@)
cmd_mkdir = mkdir -p $(dir $@)
rule_mkdir = $(if $(wildcard $(dir $@)),,@$(call echo-cmd,mkdir) $(cmd_mkdir))
# Compile command
-quiet_cmd_cc_o_c = CC $@
+quiet_cmd_cc_o_c = CC $@
cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
-quiet_cmd_host_cc_o_c = HOSTCC $@
+quiet_cmd_host_cc_o_c = HOSTCC $@
cmd_host_cc_o_c = $(HOSTCC) $(host_c_flags) -c -o $@ $<
-quiet_cmd_cxx_o_c = CXX $@
+quiet_cmd_cxx_o_c = CXX $@
cmd_cxx_o_c = $(CXX) $(cxx_flags) -c -o $@ $<
-quiet_cmd_cpp_i_c = CPP $@
+quiet_cmd_cpp_i_c = CPP $@
cmd_cpp_i_c = $(CC) $(c_flags) -E -o $@ $<
-quiet_cmd_cc_s_c = AS $@
+quiet_cmd_cc_s_c = AS $@
cmd_cc_s_c = $(CC) $(c_flags) -S -o $@ $<
-quiet_cmd_gen = GEN $@
+quiet_cmd_gen = GEN $@
# Link agregate command
# If there's nothing to link, create empty $@ object.
-quiet_cmd_ld_multi = LD $@
+quiet_cmd_ld_multi = LD $@
cmd_ld_multi = $(if $(strip $(obj-y)),\
$(LD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@)
-quiet_cmd_host_ld_multi = HOSTLD $@
+quiet_cmd_host_ld_multi = HOSTLD $@
cmd_host_ld_multi = $(if $(strip $(obj-y)),\
$(HOSTLD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(HOSTAR) rcs $@)
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index 3e0c019ef297..934e2777a2db 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -8,7 +8,7 @@ endif
feature_check = $(eval $(feature_check_code))
define feature_check_code
- feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
+ feature-$(1) := $(shell $(MAKE) OUTPUT=$(OUTPUT_FEATURES) CC="$(CC)" CXX="$(CXX)" CFLAGS="$(EXTRA_CFLAGS) $(FEATURE_CHECK_CFLAGS-$(1))" CXXFLAGS="$(EXTRA_CXXFLAGS) $(FEATURE_CHECK_CXXFLAGS-$(1))" LDFLAGS="$(LDFLAGS) $(FEATURE_CHECK_LDFLAGS-$(1))" -C $(feature_dir) $(OUTPUT_FEATURES)test-$1.bin >/dev/null 2>/dev/null && echo 1 || echo 0)
endef
feature_set = $(eval $(feature_set_code))
@@ -34,27 +34,24 @@ FEATURE_TESTS_BASIC := \
dwarf_getlocations \
eventfd \
fortify-source \
- sync-compare-and-swap \
get_current_dir_name \
gettid \
glibc \
- gtk2 \
- gtk2-infobar \
- libaudit \
libbfd \
+ libbfd-buildid \
libcap \
libelf \
libelf-getphdrnum \
libelf-gelf_getnote \
libelf-getshdrstrndx \
- libelf-mmap \
libnuma \
numa_num_possible_cpus \
libperl \
libpython \
- libpython-version \
libslang \
libslang-include-subdir \
+ libtraceevent \
+ libtracefs \
libcrypto \
libunwind \
pthread-attr-setaffinity-np \
@@ -67,12 +64,14 @@ FEATURE_TESTS_BASIC := \
lzma \
get_cpuid \
bpf \
+ scandirat \
sched_getcpu \
sdt \
setns \
libaio \
libzstd \
disassembler-four-args \
+ disassembler-init-styled \
file-handle
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
@@ -82,6 +81,9 @@ FEATURE_TESTS_EXTRA := \
compile-32 \
compile-x32 \
cplus-demangle \
+ cxa-demangle \
+ gtk2 \
+ gtk2-infobar \
hello \
libbabeltrace \
libbfd-liberty \
@@ -98,7 +100,17 @@ FEATURE_TESTS_EXTRA := \
llvm \
llvm-version \
clang \
- libbpf
+ libbpf \
+ libbpf-btf__load_from_kernel_by_id \
+ libbpf-bpf_prog_load \
+ libbpf-bpf_object__next_program \
+ libbpf-bpf_object__next_map \
+ libbpf-bpf_program__set_insns \
+ libbpf-bpf_create_map \
+ libpfm4 \
+ libdebuginfod \
+ clang-bpf-co-re
+
FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
@@ -110,9 +122,8 @@ FEATURE_DISPLAY ?= \
dwarf \
dwarf_getlocations \
glibc \
- gtk2 \
- libaudit \
libbfd \
+ libbfd-buildid \
libcap \
libelf \
libnuma \
@@ -127,8 +138,13 @@ FEATURE_DISPLAY ?= \
get_cpuid \
bpf \
libaio \
- libzstd \
- disassembler-four-args
+ libzstd
+
+#
+# Declare group members of a feature to display the logical OR of the detection
+# result instead of each member result.
+#
+FEATURE_GROUP_MEMBERS-libbfd = libbfd-liberty libbfd-liberty-z
# Set FEATURE_CHECK_(C|LD)FLAGS-all for all FEATURE_TESTS features.
# If in the future we need per-feature checks/flags for features not
@@ -170,19 +186,28 @@ endif
#
# Print the result of the feature test:
#
-feature_print_status = $(eval $(feature_print_status_code)) $(info $(MSG))
+feature_print_status = $(eval $(feature_print_status_code))
+
+feature_group = $(eval $(feature_gen_group)) $(GROUP)
+
+define feature_gen_group
+ GROUP := $(1)
+ ifneq ($(feature_verbose),1)
+ GROUP += $(FEATURE_GROUP_MEMBERS-$(1))
+ endif
+endef
define feature_print_status_code
- ifeq ($(feature-$(1)), 1)
- MSG = $(shell printf '...%30s: [ \033[32mon\033[m ]' $(1))
+ ifneq (,$(filter 1,$(foreach feat,$(call feature_group,$(feat)),$(feature-$(feat)))))
+ MSG = $(shell printf '...%40s: [ \033[32mon\033[m ]' $(1))
else
- MSG = $(shell printf '...%30s: [ \033[31mOFF\033[m ]' $(1))
+ MSG = $(shell printf '...%40s: [ \033[31mOFF\033[m ]' $(1))
endif
endef
-feature_print_text = $(eval $(feature_print_text_code)) $(info $(MSG))
+feature_print_text = $(eval $(feature_print_text_code))
define feature_print_text_code
- MSG = $(shell printf '...%30s: %s' $(1) $(2))
+ MSG = $(shell printf '...%40s: %s' $(1) $(2))
endef
#
@@ -237,17 +262,29 @@ ifeq ($(VF),1)
feature_verbose := 1
endif
-ifeq ($(feature_display),1)
- $(info )
- $(info Auto-detecting system features:)
- $(foreach feat,$(FEATURE_DISPLAY),$(call feature_print_status,$(feat),))
- ifneq ($(feature_verbose),1)
- $(info )
- endif
+ifneq ($(feature_verbose),1)
+ #
+ # Determine the features to omit from the displayed message, as only the
+ # logical OR of the detection result will be shown.
+ #
+ FEATURE_OMIT := $(foreach feat,$(FEATURE_DISPLAY),$(FEATURE_GROUP_MEMBERS-$(feat)))
endif
-ifeq ($(feature_verbose),1)
- TMP := $(filter-out $(FEATURE_DISPLAY),$(FEATURE_TESTS))
- $(foreach feat,$(TMP),$(call feature_print_status,$(feat),))
+feature_display_entries = $(eval $(feature_display_entries_code))
+define feature_display_entries_code
+ ifeq ($(feature_display),1)
+ $$(info )
+ $$(info Auto-detecting system features:)
+ $(foreach feat,$(filter-out $(FEATURE_OMIT),$(FEATURE_DISPLAY)),$(call feature_print_status,$(feat),) $$(info $(MSG)))
+ endif
+
+ ifeq ($(feature_verbose),1)
+ $(eval TMP := $(filter-out $(FEATURE_DISPLAY),$(FEATURE_TESTS)))
+ $(foreach feat,$(TMP),$(call feature_print_status,$(feat),) $$(info $(MSG)))
+ endif
+endef
+
+ifeq ($(FEATURE_DISPLAY_DEFERRED),)
+ $(call feature_display_entries)
$(info )
endif
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 92012381393a..0f0aa9b7d7b5 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -1,4 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+include ../../scripts/Makefile.include
+
FILES= \
test-all.bin \
test-backtrace.bin \
@@ -7,7 +9,6 @@ FILES= \
test-dwarf_getlocations.bin \
test-eventfd.bin \
test-fortify-source.bin \
- test-sync-compare-and-swap.bin \
test-get_current_dir_name.bin \
test-glibc.bin \
test-gtk2.bin \
@@ -15,24 +16,28 @@ FILES= \
test-hello.bin \
test-libaudit.bin \
test-libbfd.bin \
+ test-libbfd-buildid.bin \
test-disassembler-four-args.bin \
+ test-disassembler-init-styled.bin \
test-reallocarray.bin \
test-libbfd-liberty.bin \
test-libbfd-liberty-z.bin \
test-cplus-demangle.bin \
+ test-cxa-demangle.bin \
test-libcap.bin \
test-libelf.bin \
test-libelf-getphdrnum.bin \
test-libelf-gelf_getnote.bin \
test-libelf-getshdrstrndx.bin \
- test-libelf-mmap.bin \
+ test-libdebuginfod.bin \
test-libnuma.bin \
test-numa_num_possible_cpus.bin \
test-libperl.bin \
test-libpython.bin \
- test-libpython-version.bin \
test-libslang.bin \
test-libslang-include-subdir.bin \
+ test-libtraceevent.bin \
+ test-libtracefs.bin \
test-libcrypto.bin \
test-libunwind.bin \
test-libunwind-debug-frame.bin \
@@ -60,6 +65,7 @@ FILES= \
test-gettid.bin \
test-jvmti.bin \
test-jvmti-cmlr.bin \
+ test-scandirat.bin \
test-sched_getcpu.bin \
test-setns.bin \
test-libopencsd.bin \
@@ -68,21 +74,20 @@ FILES= \
test-llvm-version.bin \
test-libaio.bin \
test-libzstd.bin \
- test-clang-bpf-global-var.bin \
- test-file-handle.bin
+ test-clang-bpf-co-re.bin \
+ test-file-handle.bin \
+ test-libpfm4.bin
FILES := $(addprefix $(OUTPUT),$(FILES))
-CC ?= $(CROSS_COMPILE)gcc
-CXX ?= $(CROSS_COMPILE)g++
PKG_CONFIG ?= $(CROSS_COMPILE)pkg-config
-LLVM_CONFIG ?= llvm-config
-CLANG ?= clang
all: $(FILES)
__BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS)
BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1
+ BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+ BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap
__BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS)
BUILDXX = $(__BUILDXX) > $(@:.bin=.make.output) 2>&1
@@ -90,7 +95,7 @@ __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(
###############################
$(OUTPUT)test-all.bin:
- $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -laudit -I/usr/include/slang -lslang $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null) $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma
+ $(BUILD_ALL) || $(BUILD_ALL) -lopcodes -liberty
$(OUTPUT)test-hello.bin:
$(BUILD)
@@ -125,6 +130,9 @@ $(OUTPUT)test-get_current_dir_name.bin:
$(OUTPUT)test-glibc.bin:
$(BUILD)
+$(OUTPUT)test-scandirat.bin:
+ $(BUILD)
+
$(OUTPUT)test-sched_getcpu.bin:
$(BUILD)
@@ -146,9 +154,6 @@ $(OUTPUT)test-dwarf.bin:
$(OUTPUT)test-dwarf_getlocations.bin:
$(BUILD) $(DWARFLIBS)
-$(OUTPUT)test-libelf-mmap.bin:
- $(BUILD) -lelf
-
$(OUTPUT)test-libelf-getphdrnum.bin:
$(BUILD) -lelf
@@ -158,6 +163,9 @@ $(OUTPUT)test-libelf-gelf_getnote.bin:
$(OUTPUT)test-libelf-getshdrstrndx.bin:
$(BUILD) -lelf
+$(OUTPUT)test-libdebuginfod.bin:
+ $(BUILD) -ldebuginfod
+
$(OUTPUT)test-libnuma.bin:
$(BUILD) -lnuma
@@ -196,6 +204,12 @@ $(OUTPUT)test-libslang.bin:
$(OUTPUT)test-libslang-include-subdir.bin:
$(BUILD) -lslang
+$(OUTPUT)test-libtraceevent.bin:
+ $(BUILD) -ltraceevent
+
+$(OUTPUT)test-libtracefs.bin:
+ $(BUILD) -ltracefs
+
$(OUTPUT)test-libcrypto.bin:
$(BUILD) -lcrypto
@@ -211,23 +225,35 @@ strip-libs = $(filter-out -l%,$(1))
PERL_EMBED_LDOPTS = $(shell perl -MExtUtils::Embed -e ldopts 2>/dev/null)
PERL_EMBED_LDFLAGS = $(call strip-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
-PERL_EMBED_CCOPTS = `perl -MExtUtils::Embed -e ccopts 2>/dev/null`
+PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
+ifeq ($(CC_NO_CLANG), 0)
+ PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
+ PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS))
+ PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
+ FLAGS_PERL_EMBED += -Wno-compound-token-split-by-macro
+endif
+
$(OUTPUT)test-libperl.bin:
$(BUILD) $(FLAGS_PERL_EMBED)
$(OUTPUT)test-libpython.bin:
$(BUILD) $(FLAGS_PYTHON_EMBED)
-$(OUTPUT)test-libpython-version.bin:
- $(BUILD)
-
$(OUTPUT)test-libbfd.bin:
- $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+ $(BUILD_BFD)
+
+$(OUTPUT)test-libbfd-buildid.bin:
+ $(BUILD_BFD) || $(BUILD_BFD) -liberty || $(BUILD_BFD) -liberty -lz
$(OUTPUT)test-disassembler-four-args.bin:
- $(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
+ $(BUILD_BFD) -lopcodes || $(BUILD_BFD) -lopcodes -liberty || \
+ $(BUILD_BFD) -lopcodes -liberty -lz
+
+$(OUTPUT)test-disassembler-init-styled.bin:
+ $(BUILD_BFD) -lopcodes || $(BUILD_BFD) -lopcodes -liberty || \
+ $(BUILD_BFD) -lopcodes -liberty -lz
$(OUTPUT)test-reallocarray.bin:
$(BUILD)
@@ -241,6 +267,9 @@ $(OUTPUT)test-libbfd-liberty-z.bin:
$(OUTPUT)test-cplus-demangle.bin:
$(BUILD) -liberty
+$(OUTPUT)test-cxa-demangle.bin:
+ $(BUILDXX)
+
$(OUTPUT)test-backtrace.bin:
$(BUILD)
@@ -253,9 +282,6 @@ $(OUTPUT)test-libdw-dwarf-unwind.bin:
$(OUTPUT)test-libbabeltrace.bin:
$(BUILD) # -lbabeltrace provided by $(FEATURE_CHECK_LDFLAGS-libbabeltrace)
-$(OUTPUT)test-sync-compare-and-swap.bin:
- $(BUILD)
-
$(OUTPUT)test-compile-32.bin:
$(CC) -m32 -o $@ test-compile.c
@@ -277,6 +303,27 @@ $(OUTPUT)test-bpf.bin:
$(OUTPUT)test-libbpf.bin:
$(BUILD) -lbpf
+$(OUTPUT)test-libbpf-btf__load_from_kernel_by_id.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_prog_load.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_map_create.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_object__next_program.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_object__next_map.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-bpf_program__set_insns.bin:
+ $(BUILD) -lbpf
+
+$(OUTPUT)test-libbpf-btf__raw_data.bin:
+ $(BUILD) -lbpf
+
$(OUTPUT)test-sdt.bin:
$(BUILD)
@@ -293,7 +340,7 @@ $(OUTPUT)test-jvmti-cmlr.bin:
$(BUILD)
$(OUTPUT)test-llvm.bin:
- $(BUILDXX) -std=gnu++11 \
+ $(BUILDXX) -std=gnu++14 \
-I$(shell $(LLVM_CONFIG) --includedir) \
-L$(shell $(LLVM_CONFIG) --libdir) \
$(shell $(LLVM_CONFIG) --libs Core BPF) \
@@ -301,12 +348,12 @@ $(OUTPUT)test-llvm.bin:
> $(@:.bin=.make.output) 2>&1
$(OUTPUT)test-llvm-version.bin:
- $(BUILDXX) -std=gnu++11 \
+ $(BUILDXX) -std=gnu++14 \
-I$(shell $(LLVM_CONFIG) --includedir) \
> $(@:.bin=.make.output) 2>&1
$(OUTPUT)test-clang.bin:
- $(BUILDXX) -std=gnu++11 \
+ $(BUILDXX) -std=gnu++14 \
-I$(shell $(LLVM_CONFIG) --includedir) \
-L$(shell $(LLVM_CONFIG) --libdir) \
-Wl,--start-group -lclangBasic -lclangDriver \
@@ -324,13 +371,16 @@ $(OUTPUT)test-libaio.bin:
$(OUTPUT)test-libzstd.bin:
$(BUILD) -lzstd
-$(OUTPUT)test-clang-bpf-global-var.bin:
+$(OUTPUT)test-clang-bpf-co-re.bin:
$(CLANG) -S -g -target bpf -o - $(patsubst %.bin,%.c,$(@F)) | \
grep BTF_KIND_VAR
$(OUTPUT)test-file-handle.bin:
$(BUILD)
+$(OUTPUT)test-libpfm4.bin:
+ $(BUILD) -lpfm
+
###############################
clean:
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 88145e8cde1a..6f4bf386a3b5 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -14,10 +14,6 @@
# include "test-libpython.c"
#undef main
-#define main main_test_libpython_version
-# include "test-libpython-version.c"
-#undef main
-
#define main main_test_libperl
# include "test-libperl.c"
#undef main
@@ -30,10 +26,6 @@
# include "test-libelf.c"
#undef main
-#define main main_test_libelf_mmap
-# include "test-libelf-mmap.c"
-#undef main
-
#define main main_test_get_current_dir_name
# include "test-get_current_dir_name.c"
#undef main
@@ -74,26 +66,18 @@
# include "test-libunwind.c"
#undef main
-#define main main_test_libaudit
-# include "test-libaudit.c"
-#undef main
-
#define main main_test_libslang
# include "test-libslang.c"
#undef main
-#define main main_test_gtk2
-# include "test-gtk2.c"
-#undef main
-
-#define main main_test_gtk2_infobar
-# include "test-gtk2-infobar.c"
-#undef main
-
#define main main_test_libbfd
# include "test-libbfd.c"
#undef main
+#define main main_test_libbfd_buildid
+# include "test-libbfd-buildid.c"
+#undef main
+
#define main main_test_backtrace
# include "test-backtrace.c"
#undef main
@@ -118,10 +102,6 @@
# include "test-libdw-dwarf-unwind.c"
#undef main
-#define main main_test_sync_compare_and_swap
-# include "test-sync-compare-and-swap.c"
-#undef main
-
#define main main_test_zlib
# include "test-zlib.c"
#undef main
@@ -134,6 +114,10 @@
# include "test-pthread-barrier.c"
#undef main
+#define main main_test_scandirat
+# include "test-scandirat.c"
+#undef main
+
#define main main_test_sched_getcpu
# include "test-sched_getcpu.c"
#undef main
@@ -186,6 +170,10 @@
# include "test-disassembler-four-args.c"
#undef main
+#define main main_test_disassembler_init_styled
+# include "test-disassembler-init-styled.c"
+#undef main
+
#define main main_test_libzstd
# include "test-libzstd.c"
#undef main
@@ -193,11 +181,9 @@
int main(int argc, char *argv[])
{
main_test_libpython();
- main_test_libpython_version();
main_test_libperl();
main_test_hello();
main_test_libelf();
- main_test_libelf_mmap();
main_test_get_current_dir_name();
main_test_gettid();
main_test_glibc();
@@ -208,18 +194,15 @@ int main(int argc, char *argv[])
main_test_libelf_gelf_getnote();
main_test_libelf_getshdrstrndx();
main_test_libunwind();
- main_test_libaudit();
main_test_libslang();
- main_test_gtk2(argc, argv);
- main_test_gtk2_infobar(argc, argv);
main_test_libbfd();
+ main_test_libbfd_buildid();
main_test_backtrace();
main_test_libnuma();
main_test_numa_num_possible_cpus();
main_test_timerfd();
main_test_stackprotector_all();
main_test_libdw_dwarf_unwind();
- main_test_sync_compare_and_swap(argc, argv);
main_test_zlib();
main_test_pthread_attr_setaffinity_np();
main_test_pthread_barrier();
@@ -227,6 +210,7 @@ int main(int argc, char *argv[])
main_test_get_cpuid();
main_test_bpf();
main_test_libcrypto();
+ main_test_scandirat();
main_test_sched_getcpu();
main_test_sdt();
main_test_setns();
diff --git a/tools/build/feature/test-bpf.c b/tools/build/feature/test-bpf.c
index 82070eadfc07..727d22e34a6e 100644
--- a/tools/build/feature/test-bpf.c
+++ b/tools/build/feature/test-bpf.c
@@ -14,6 +14,12 @@
# define __NR_bpf 349
# elif defined(__s390__)
# define __NR_bpf 351
+# elif defined(__mips__) && defined(_ABIO32)
+# define __NR_bpf 4355
+# elif defined(__mips__) && defined(_ABIN32)
+# define __NR_bpf 6319
+# elif defined(__mips__) && defined(_ABI64)
+# define __NR_bpf 5315
# else
# error __NR_bpf not defined. libbpf does not support your arch.
# endif
diff --git a/tools/build/feature/test-clang-bpf-co-re.c b/tools/build/feature/test-clang-bpf-co-re.c
new file mode 100644
index 000000000000..cb5265bfdd83
--- /dev/null
+++ b/tools/build/feature/test-clang-bpf-co-re.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+struct test {
+ int a;
+ int b;
+} __attribute__((preserve_access_index));
+
+volatile struct test global_value_for_test = {};
diff --git a/tools/build/feature/test-clang-bpf-global-var.c b/tools/build/feature/test-clang-bpf-global-var.c
deleted file mode 100644
index 221f1481d52e..000000000000
--- a/tools/build/feature/test-clang-bpf-global-var.c
+++ /dev/null
@@ -1,4 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (c) 2020 Facebook
-
-volatile int global_value_for_test = 1;
diff --git a/tools/build/feature/test-cxa-demangle.cpp b/tools/build/feature/test-cxa-demangle.cpp
new file mode 100644
index 000000000000..a3e712f65c37
--- /dev/null
+++ b/tools/build/feature/test-cxa-demangle.cpp
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include <cxxabi.h>
+
+int main(void)
+{
+ size_t len = 256;
+ char *output = (char*)malloc(len);
+ int status;
+
+ output = abi::__cxa_demangle("FieldName__9ClassNameFd", output, &len, &status);
+
+ printf("demangled symbol: {%s}\n", output);
+
+ return 0;
+}
diff --git a/tools/build/feature/test-disassembler-init-styled.c b/tools/build/feature/test-disassembler-init-styled.c
new file mode 100644
index 000000000000..f1ce0ec3bee9
--- /dev/null
+++ b/tools/build/feature/test-disassembler-init-styled.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <dis-asm.h>
+
+int main(void)
+{
+ struct disassemble_info info;
+
+ init_disassemble_info(&info, stdout,
+ NULL, NULL);
+
+ return 0;
+}
diff --git a/tools/build/feature/test-libbfd-buildid.c b/tools/build/feature/test-libbfd-buildid.c
new file mode 100644
index 000000000000..157644b04c05
--- /dev/null
+++ b/tools/build/feature/test-libbfd-buildid.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <bfd.h>
+
+int main(void)
+{
+ bfd *abfd = bfd_openr("Pedro", 0);
+ return abfd && (!abfd->build_id || abfd->build_id->size > 0x506564726f);
+}
diff --git a/tools/build/feature/test-libbpf.c b/tools/build/feature/test-libbpf.c
index a508756cf4cc..cd9989f52119 100644
--- a/tools/build/feature/test-libbpf.c
+++ b/tools/build/feature/test-libbpf.c
@@ -1,6 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
#include <bpf/libbpf.h>
+#if !defined(LIBBPF_MAJOR_VERSION) || (LIBBPF_MAJOR_VERSION < 1)
+#error At least libbpf 1.0 is required for Linux tools.
+#endif
+
int main(void)
{
return bpf_object__open("test") ? 0 : -1;
diff --git a/tools/build/feature/test-libcrypto.c b/tools/build/feature/test-libcrypto.c
index a98174e0569c..bc34a5bbb504 100644
--- a/tools/build/feature/test-libcrypto.c
+++ b/tools/build/feature/test-libcrypto.c
@@ -1,16 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
+#include <openssl/evp.h>
#include <openssl/sha.h>
#include <openssl/md5.h>
int main(void)
{
- MD5_CTX context;
+ EVP_MD_CTX *mdctx;
unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
unsigned char dat[] = "12345";
+ unsigned int digest_len;
- MD5_Init(&context);
- MD5_Update(&context, &dat[0], sizeof(dat));
- MD5_Final(&md[0], &context);
+ mdctx = EVP_MD_CTX_new();
+ if (!mdctx)
+ return 0;
+
+ EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
+ EVP_DigestUpdate(mdctx, &dat[0], sizeof(dat));
+ EVP_DigestFinal_ex(mdctx, &md[0], &digest_len);
+ EVP_MD_CTX_free(mdctx);
SHA1(&dat[0], sizeof(dat), &md[0]);
diff --git a/tools/build/feature/test-libdebuginfod.c b/tools/build/feature/test-libdebuginfod.c
new file mode 100644
index 000000000000..da22548b8413
--- /dev/null
+++ b/tools/build/feature/test-libdebuginfod.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <elfutils/debuginfod.h>
+
+int main(void)
+{
+ debuginfod_client* c = debuginfod_begin();
+ return (long)c;
+}
diff --git a/tools/build/feature/test-libelf-mmap.c b/tools/build/feature/test-libelf-mmap.c
deleted file mode 100644
index 2c3ef81affe2..000000000000
--- a/tools/build/feature/test-libelf-mmap.c
+++ /dev/null
@@ -1,9 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <libelf.h>
-
-int main(void)
-{
- Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0);
-
- return (long)elf;
-}
diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c
index 2b0e02c38870..eb6303ff446e 100644
--- a/tools/build/feature/test-libopencsd.c
+++ b/tools/build/feature/test-libopencsd.c
@@ -4,9 +4,9 @@
/*
* Check OpenCSD library version is sufficient to provide required features
*/
-#define OCSD_MIN_VER ((0 << 16) | (11 << 8) | (0))
+#define OCSD_MIN_VER ((1 << 16) | (1 << 8) | (1))
#if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER)
-#error "OpenCSD >= 0.11.0 is required"
+#error "OpenCSD >= 1.1.1 is required"
#endif
int main(void)
diff --git a/tools/build/feature/test-libpfm4.c b/tools/build/feature/test-libpfm4.c
new file mode 100644
index 000000000000..af49b259459e
--- /dev/null
+++ b/tools/build/feature/test-libpfm4.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <sys/types.h>
+#include <perfmon/pfmlib.h>
+
+int main(void)
+{
+ pfm_initialize();
+ return 0;
+}
diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c
deleted file mode 100644
index 47714b942d4d..000000000000
--- a/tools/build/feature/test-libpython-version.c
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <Python.h>
-
-#if PY_VERSION_HEX >= 0x03000000
- #error
-#endif
-
-int main(void)
-{
- return 0;
-}
diff --git a/tools/build/feature/test-libtraceevent.c b/tools/build/feature/test-libtraceevent.c
new file mode 100644
index 000000000000..416b11ffd4b4
--- /dev/null
+++ b/tools/build/feature/test-libtraceevent.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <traceevent/trace-seq.h>
+
+int main(void)
+{
+ int rv = 0;
+ struct trace_seq s;
+ trace_seq_init(&s);
+ rv += !(s.state == TRACE_SEQ__GOOD);
+ trace_seq_destroy(&s);
+ return rv;
+}
diff --git a/tools/build/feature/test-libtracefs.c b/tools/build/feature/test-libtracefs.c
new file mode 100644
index 000000000000..8eff16c0c10b
--- /dev/null
+++ b/tools/build/feature/test-libtracefs.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <tracefs/tracefs.h>
+
+int main(void)
+{
+ struct tracefs_instance *inst = tracefs_instance_create("dummy");
+
+ tracefs_instance_destroy(inst);
+ return 0;
+}
diff --git a/tools/build/feature/test-scandirat.c b/tools/build/feature/test-scandirat.c
new file mode 100644
index 000000000000..d7e19e1858a5
--- /dev/null
+++ b/tools/build/feature/test-scandirat.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <dirent.h>
+
+int main(void)
+{
+ // expects non-NULL, arg3 is 'restrict' so "pointers" have to be different
+ return scandirat(/*dirfd=*/ 0, /*dirp=*/ (void *)1, /*namelist=*/ (void *)2, /*filter=*/ (void *)3, /*compar=*/ (void *)4);
+}
+
+#undef _GNU_SOURCE
diff --git a/tools/build/feature/test-sync-compare-and-swap.c b/tools/build/feature/test-sync-compare-and-swap.c
deleted file mode 100644
index 1e38d1930a97..000000000000
--- a/tools/build/feature/test-sync-compare-and-swap.c
+++ /dev/null
@@ -1,15 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdint.h>
-
-volatile uint64_t x;
-
-int main(int argc, char *argv[])
-{
- uint64_t old, new = argc;
-
- argv = argv;
- do {
- old = __sync_val_compare_and_swap(&x, 0, 0);
- } while (!__sync_bool_compare_and_swap(&x, old, new));
- return old == new;
-}
diff --git a/tools/certs/print-cert-tbs-hash.sh b/tools/certs/print-cert-tbs-hash.sh
new file mode 100755
index 000000000000..c93df5387ec9
--- /dev/null
+++ b/tools/certs/print-cert-tbs-hash.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright © 2020, Microsoft Corporation. All rights reserved.
+#
+# Author: Mickaël Salaün <mic@linux.microsoft.com>
+#
+# Compute and print the To Be Signed (TBS) hash of a certificate. This is used
+# as description of keys in the blacklist keyring to identify certificates.
+# This output should be redirected, without newline, in a file (hash0.txt) and
+# signed to create a PKCS#7 file (hash0.p7s). Both of these files can then be
+# loaded in the kernel with.
+#
+# Exemple on a workstation:
+# ./print-cert-tbs-hash.sh certificate-to-invalidate.pem > hash0.txt
+# openssl smime -sign -in hash0.txt -inkey builtin-private-key.pem \
+# -signer builtin-certificate.pem -certfile certificate-chain.pem \
+# -noattr -binary -outform DER -out hash0.p7s
+#
+# Exemple on a managed system:
+# keyctl padd blacklist "$(< hash0.txt)" %:.blacklist < hash0.p7s
+
+set -u -e -o pipefail
+
+CERT="${1:-}"
+BASENAME="$(basename -- "${BASH_SOURCE[0]}")"
+
+if [ $# -ne 1 ] || [ ! -f "${CERT}" ]; then
+ echo "usage: ${BASENAME} <certificate>" >&2
+ exit 1
+fi
+
+# Checks that it is indeed a certificate (PEM or DER encoded) and exclude the
+# optional PEM text header.
+if ! PEM="$(openssl x509 -inform DER -in "${CERT}" 2>/dev/null || openssl x509 -in "${CERT}")"; then
+ echo "ERROR: Failed to parse certificate" >&2
+ exit 1
+fi
+
+# TBSCertificate starts at the second entry.
+# Cf. https://tools.ietf.org/html/rfc3280#section-4.1
+#
+# Exemple of first lines printed by openssl asn1parse:
+# 0:d=0 hl=4 l= 763 cons: SEQUENCE
+# 4:d=1 hl=4 l= 483 cons: SEQUENCE
+# 8:d=2 hl=2 l= 3 cons: cont [ 0 ]
+# 10:d=3 hl=2 l= 1 prim: INTEGER :02
+# 13:d=2 hl=2 l= 20 prim: INTEGER :3CEB2CB8818D968AC00EEFE195F0DF9665328B7B
+# 35:d=2 hl=2 l= 13 cons: SEQUENCE
+# 37:d=3 hl=2 l= 9 prim: OBJECT :sha256WithRSAEncryption
+RANGE_AND_DIGEST_RE='
+2s/^\s*\([0-9]\+\):d=\s*[0-9]\+\s\+hl=\s*[0-9]\+\s\+l=\s*\([0-9]\+\)\s\+cons:\s*SEQUENCE\s*$/\1 \2/p;
+7s/^\s*[0-9]\+:d=\s*[0-9]\+\s\+hl=\s*[0-9]\+\s\+l=\s*[0-9]\+\s\+prim:\s*OBJECT\s*:\(.*\)$/\1/p;
+'
+
+RANGE_AND_DIGEST=($(echo "${PEM}" | \
+ openssl asn1parse -in - | \
+ sed -n -e "${RANGE_AND_DIGEST_RE}"))
+
+if [ "${#RANGE_AND_DIGEST[@]}" != 3 ]; then
+ echo "ERROR: Failed to parse TBSCertificate." >&2
+ exit 1
+fi
+
+OFFSET="${RANGE_AND_DIGEST[0]}"
+END="$(( OFFSET + RANGE_AND_DIGEST[1] ))"
+DIGEST="${RANGE_AND_DIGEST[2]}"
+
+# The signature hash algorithm is used by Linux to blacklist certificates.
+# Cf. crypto/asymmetric_keys/x509_cert_parser.c:x509_note_pkey_algo()
+DIGEST_MATCH=""
+while read -r DIGEST_ITEM; do
+ if [ -z "${DIGEST_ITEM}" ]; then
+ break
+ fi
+ if echo "${DIGEST}" | grep -qiF "${DIGEST_ITEM}"; then
+ DIGEST_MATCH="${DIGEST_ITEM}"
+ break
+ fi
+done < <(openssl list -digest-commands | tr ' ' '\n' | sort -ur)
+
+if [ -z "${DIGEST_MATCH}" ]; then
+ echo "ERROR: Unknown digest algorithm: ${DIGEST}" >&2
+ exit 1
+fi
+
+echo "${PEM}" | \
+ openssl x509 -in - -outform DER | \
+ dd "bs=1" "skip=${OFFSET}" "count=${END}" "status=none" | \
+ openssl dgst "-${DIGEST_MATCH}" - | \
+ awk '{printf "tbs:" $2}'
diff --git a/tools/cgroup/iocost_monitor.py b/tools/cgroup/iocost_monitor.py
index 7427a5ee761b..0dbbc67400fc 100644
--- a/tools/cgroup/iocost_monitor.py
+++ b/tools/cgroup/iocost_monitor.py
@@ -28,7 +28,8 @@ parser.add_argument('devname', metavar='DEV',
parser.add_argument('--cgroup', action='append', metavar='REGEX',
help='Regex for target cgroups, ')
parser.add_argument('--interval', '-i', metavar='SECONDS', type=float, default=1,
- help='Monitoring interval in seconds')
+ help='Monitoring interval in seconds (0 exits immediately '
+ 'after checking requirements)')
parser.add_argument('--json', action='store_true',
help='Output in json')
args = parser.parse_args()
@@ -44,8 +45,7 @@ except:
err('The kernel does not have iocost enabled')
IOC_RUNNING = prog['IOC_RUNNING'].value_()
-NR_USAGE_SLOTS = prog['NR_USAGE_SLOTS'].value_()
-HWEIGHT_WHOLE = prog['HWEIGHT_WHOLE'].value_()
+WEIGHT_ONE = prog['WEIGHT_ONE'].value_()
VTIME_PER_SEC = prog['VTIME_PER_SEC'].value_()
VTIME_PER_USEC = prog['VTIME_PER_USEC'].value_()
AUTOP_SSD_FAST = prog['AUTOP_SSD_FAST'].value_()
@@ -61,6 +61,11 @@ autop_names = {
}
class BlkgIterator:
+ def __init__(self, root_blkcg, q_id, include_dying=False):
+ self.include_dying = include_dying
+ self.blkgs = []
+ self.walk(root_blkcg, q_id, '')
+
def blkcg_name(blkcg):
return blkcg.css.cgroup.kn.name.string_().decode('utf-8')
@@ -82,11 +87,6 @@ class BlkgIterator:
blkcg.css.children.address_of_(), 'css.sibling'):
self.walk(c, q_id, path)
- def __init__(self, root_blkcg, q_id, include_dying=False):
- self.include_dying = include_dying
- self.blkgs = []
- self.walk(root_blkcg, q_id, '')
-
def __iter__(self):
return iter(self.blkgs)
@@ -99,7 +99,7 @@ class IocStat:
self.period_ms = ioc.period_us.value_() / 1_000
self.period_at = ioc.period_at.value_() / 1_000_000
self.vperiod_at = ioc.period_at_vtime.value_() / VTIME_PER_SEC
- self.vrate_pct = ioc.vtime_rate.counter.value_() * 100 / VTIME_PER_USEC
+ self.vrate_pct = ioc.vtime_base_rate.value_() * 100 / VTIME_PER_USEC
self.busy_level = ioc.busy_level.value_()
self.autop_idx = ioc.autop_idx.value_()
self.user_cost_model = ioc.user_cost_model.value_()
@@ -112,14 +112,14 @@ class IocStat:
def dict(self, now):
return { 'device' : devname,
- 'timestamp' : str(now),
- 'enabled' : str(int(self.enabled)),
- 'running' : str(int(self.running)),
- 'period_ms' : str(self.period_ms),
- 'period_at' : str(self.period_at),
- 'period_vtime_at' : str(self.vperiod_at),
- 'busy_level' : str(self.busy_level),
- 'vrate_pct' : str(self.vrate_pct), }
+ 'timestamp' : now,
+ 'enabled' : self.enabled,
+ 'running' : self.running,
+ 'period_ms' : self.period_ms,
+ 'period_at' : self.period_at,
+ 'period_vtime_at' : self.vperiod_at,
+ 'busy_level' : self.busy_level,
+ 'vrate_pct' : self.vrate_pct, }
def table_preamble_str(self):
state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF'
@@ -135,7 +135,7 @@ class IocStat:
def table_header_str(self):
return f'{"":25} active {"weight":>9} {"hweight%":>13} {"inflt%":>6} ' \
- f'{"dbt":>3} {"delay":>6} {"usages%"}'
+ f'{"debt":>7} {"delay":>7} {"usage%"}'
class IocgStat:
def __init__(self, iocg):
@@ -143,11 +143,11 @@ class IocgStat:
blkg = iocg.pd.blkg
self.is_active = not list_empty(iocg.active_list.address_of_())
- self.weight = iocg.weight.value_()
- self.active = iocg.active.value_()
- self.inuse = iocg.inuse.value_()
- self.hwa_pct = iocg.hweight_active.value_() * 100 / HWEIGHT_WHOLE
- self.hwi_pct = iocg.hweight_inuse.value_() * 100 / HWEIGHT_WHOLE
+ self.weight = iocg.weight.value_() / WEIGHT_ONE
+ self.active = iocg.active.value_() / WEIGHT_ONE
+ self.inuse = iocg.inuse.value_() / WEIGHT_ONE
+ self.hwa_pct = iocg.hweight_active.value_() * 100 / WEIGHT_ONE
+ self.hwi_pct = iocg.hweight_inuse.value_() * 100 / WEIGHT_ONE
self.address = iocg.value_()
vdone = iocg.done_vtime.counter.value_()
@@ -159,49 +159,39 @@ class IocgStat:
else:
self.inflight_pct = 0
- self.debt_ms = iocg.abs_vdebt.counter.value_() / VTIME_PER_USEC / 1000
- self.use_delay = blkg.use_delay.counter.value_()
- self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
-
- usage_idx = iocg.usage_idx.value_()
- self.usages = []
- self.usage = 0
- for i in range(NR_USAGE_SLOTS):
- usage = iocg.usages[(usage_idx + i) % NR_USAGE_SLOTS].value_()
- upct = usage * 100 / HWEIGHT_WHOLE
- self.usages.append(upct)
- self.usage = max(self.usage, upct)
+ self.usage = (100 * iocg.usage_delta_us.value_() /
+ ioc.period_us.value_()) if self.active else 0
+ self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
+ if blkg.use_delay.counter.value_() != 0:
+ self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
+ else:
+ self.delay_ms = 0
def dict(self, now, path):
out = { 'cgroup' : path,
- 'timestamp' : str(now),
- 'is_active' : str(int(self.is_active)),
- 'weight' : str(self.weight),
- 'weight_active' : str(self.active),
- 'weight_inuse' : str(self.inuse),
- 'hweight_active_pct' : str(self.hwa_pct),
- 'hweight_inuse_pct' : str(self.hwi_pct),
- 'inflight_pct' : str(self.inflight_pct),
- 'debt_ms' : str(self.debt_ms),
- 'use_delay' : str(self.use_delay),
- 'delay_ms' : str(self.delay_ms),
- 'usage_pct' : str(self.usage),
- 'address' : str(hex(self.address)) }
- for i in range(len(self.usages)):
- out[f'usage_pct_{i}'] = str(self.usages[i])
+ 'timestamp' : now,
+ 'is_active' : self.is_active,
+ 'weight' : self.weight,
+ 'weight_active' : self.active,
+ 'weight_inuse' : self.inuse,
+ 'hweight_active_pct' : self.hwa_pct,
+ 'hweight_inuse_pct' : self.hwi_pct,
+ 'inflight_pct' : self.inflight_pct,
+ 'debt_ms' : self.debt_ms,
+ 'delay_ms' : self.delay_ms,
+ 'usage_pct' : self.usage,
+ 'address' : self.address }
return out
def table_row_str(self, path):
out = f'{path[-28:]:28} ' \
f'{"*" if self.is_active else " "} ' \
- f'{self.inuse:5}/{self.active:5} ' \
+ f'{round(self.inuse):5}/{round(self.active):5} ' \
f'{self.hwi_pct:6.2f}/{self.hwa_pct:6.2f} ' \
f'{self.inflight_pct:6.2f} ' \
- f'{min(math.ceil(self.debt_ms), 999):3} ' \
- f'{min(self.use_delay, 99):2}*'\
- f'{min(math.ceil(self.delay_ms), 999):03} '
- for u in self.usages:
- out += f'{min(round(u), 999):03d}:'
+ f'{self.debt_ms:7.2f} ' \
+ f'{self.delay_ms:7.2f} '\
+ f'{min(self.usage, 999):6.2f}'
out = out.rstrip(':')
return out
@@ -243,6 +233,9 @@ for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree.address_of_()):
if ioc is None:
err(f'Could not find ioc for {devname}');
+if interval == 0:
+ sys.exit(0)
+
# Keep printing
while True:
now = time.time()
diff --git a/tools/cgroup/memcg_shrinker.py b/tools/cgroup/memcg_shrinker.py
new file mode 100644
index 000000000000..e81c3017ada9
--- /dev/null
+++ b/tools/cgroup/memcg_shrinker.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+#
+# Copyright (C) 2022 Roman Gushchin <roman.gushchin@linux.dev>
+# Copyright (C) 2022 Meta
+
+import os
+import argparse
+
+
+def scan_cgroups(cgroup_root):
+ cgroups = {}
+
+ for root, subdirs, _ in os.walk(cgroup_root):
+ for cgroup in subdirs:
+ path = os.path.join(root, cgroup)
+ ino = os.stat(path).st_ino
+ cgroups[ino] = path
+
+ # (memcg ino, path)
+ return cgroups
+
+
+def scan_shrinkers(shrinker_debugfs):
+ shrinkers = []
+
+ for root, subdirs, _ in os.walk(shrinker_debugfs):
+ for shrinker in subdirs:
+ count_path = os.path.join(root, shrinker, "count")
+ with open(count_path) as f:
+ for line in f.readlines():
+ items = line.split(' ')
+ ino = int(items[0])
+ # (count, shrinker, memcg ino)
+ shrinkers.append((int(items[1]), shrinker, ino))
+ return shrinkers
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Display biggest shrinkers')
+ parser.add_argument('-n', '--lines', type=int, help='Number of lines to print')
+
+ args = parser.parse_args()
+
+ cgroups = scan_cgroups("/sys/fs/cgroup/")
+ shrinkers = scan_shrinkers("/sys/kernel/debug/shrinker/")
+ shrinkers.sort(reverse = True, key = lambda x: x[0])
+
+ n = 0
+ for s in shrinkers:
+ count, name, ino = (s[0], s[1], s[2])
+ if count == 0:
+ break
+
+ if ino == 0 or ino == 1:
+ cg = "/"
+ else:
+ try:
+ cg = cgroups[ino]
+ except KeyError:
+ cg = "unknown (%d)" % ino
+
+ print("%-8s %-20s %s" % (count, name, cg))
+
+ n += 1
+ if args.lines and n >= args.lines:
+ break
+
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/cgroup/memcg_slabinfo.py b/tools/cgroup/memcg_slabinfo.py
new file mode 100644
index 000000000000..1d3a90d93fe2
--- /dev/null
+++ b/tools/cgroup/memcg_slabinfo.py
@@ -0,0 +1,226 @@
+#!/usr/bin/env drgn
+#
+# Copyright (C) 2020 Roman Gushchin <guro@fb.com>
+# Copyright (C) 2020 Facebook
+
+from os import stat
+import argparse
+import sys
+
+from drgn.helpers.linux import list_for_each_entry, list_empty
+from drgn.helpers.linux import for_each_page
+from drgn.helpers.linux.cpumask import for_each_online_cpu
+from drgn.helpers.linux.percpu import per_cpu_ptr
+from drgn import container_of, FaultError, Object, cast
+
+
+DESC = """
+This is a drgn script to provide slab statistics for memory cgroups.
+It supports cgroup v2 and v1 and can emulate memory.kmem.slabinfo
+interface of cgroup v1.
+For drgn, visit https://github.com/osandov/drgn.
+"""
+
+
+MEMCGS = {}
+
+OO_SHIFT = 16
+OO_MASK = ((1 << OO_SHIFT) - 1)
+
+
+def err(s):
+ print('slabinfo.py: error: %s' % s, file=sys.stderr, flush=True)
+ sys.exit(1)
+
+
+def find_memcg_ids(css=prog['root_mem_cgroup'].css, prefix=''):
+ if not list_empty(css.children.address_of_()):
+ for css in list_for_each_entry('struct cgroup_subsys_state',
+ css.children.address_of_(),
+ 'sibling'):
+ name = prefix + '/' + css.cgroup.kn.name.string_().decode('utf-8')
+ memcg = container_of(css, 'struct mem_cgroup', 'css')
+ MEMCGS[css.cgroup.kn.id.value_()] = memcg
+ find_memcg_ids(css, name)
+
+
+def is_root_cache(s):
+ try:
+ return False if s.memcg_params.root_cache else True
+ except AttributeError:
+ return True
+
+
+def cache_name(s):
+ if is_root_cache(s):
+ return s.name.string_().decode('utf-8')
+ else:
+ return s.memcg_params.root_cache.name.string_().decode('utf-8')
+
+
+# SLUB
+
+def oo_order(s):
+ return s.oo.x >> OO_SHIFT
+
+
+def oo_objects(s):
+ return s.oo.x & OO_MASK
+
+
+def count_partial(n, fn):
+ nr_objs = 0
+ for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
+ 'slab_list'):
+ nr_objs += fn(slab)
+ return nr_objs
+
+
+def count_free(slab):
+ return slab.objects - slab.inuse
+
+
+def slub_get_slabinfo(s, cfg):
+ nr_slabs = 0
+ nr_objs = 0
+ nr_free = 0
+
+ for node in range(cfg['nr_nodes']):
+ n = s.node[node]
+ nr_slabs += n.nr_slabs.counter.value_()
+ nr_objs += n.total_objects.counter.value_()
+ nr_free += count_partial(n, count_free)
+
+ return {'active_objs': nr_objs - nr_free,
+ 'num_objs': nr_objs,
+ 'active_slabs': nr_slabs,
+ 'num_slabs': nr_slabs,
+ 'objects_per_slab': oo_objects(s),
+ 'cache_order': oo_order(s),
+ 'limit': 0,
+ 'batchcount': 0,
+ 'shared': 0,
+ 'shared_avail': 0}
+
+
+def cache_show(s, cfg, objs):
+ if cfg['allocator'] == 'SLUB':
+ sinfo = slub_get_slabinfo(s, cfg)
+ else:
+ err('SLAB isn\'t supported yet')
+
+ if cfg['shared_slab_pages']:
+ sinfo['active_objs'] = objs
+ sinfo['num_objs'] = objs
+
+ print('%-17s %6lu %6lu %6u %4u %4d'
+ ' : tunables %4u %4u %4u'
+ ' : slabdata %6lu %6lu %6lu' % (
+ cache_name(s), sinfo['active_objs'], sinfo['num_objs'],
+ s.size, sinfo['objects_per_slab'], 1 << sinfo['cache_order'],
+ sinfo['limit'], sinfo['batchcount'], sinfo['shared'],
+ sinfo['active_slabs'], sinfo['num_slabs'],
+ sinfo['shared_avail']))
+
+
+def detect_kernel_config():
+ cfg = {}
+
+ cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
+
+ if prog.type('struct kmem_cache').members[1].name == 'flags':
+ cfg['allocator'] = 'SLUB'
+ elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
+ cfg['allocator'] = 'SLAB'
+ else:
+ err('Can\'t determine the slab allocator')
+
+ cfg['shared_slab_pages'] = False
+ try:
+ if prog.type('struct obj_cgroup'):
+ cfg['shared_slab_pages'] = True
+ except:
+ pass
+
+ return cfg
+
+
+def for_each_slab(prog):
+ PGSlab = 1 << prog.constant('PG_slab')
+ PGHead = 1 << prog.constant('PG_head')
+
+ for page in for_each_page(prog):
+ try:
+ if page.flags.value_() & PGSlab:
+ yield cast('struct slab *', page)
+ except FaultError:
+ pass
+
+
+def main():
+ parser = argparse.ArgumentParser(description=DESC,
+ formatter_class=
+ argparse.RawTextHelpFormatter)
+ parser.add_argument('cgroup', metavar='CGROUP',
+ help='Target memory cgroup')
+ args = parser.parse_args()
+
+ try:
+ cgroup_id = stat(args.cgroup).st_ino
+ find_memcg_ids()
+ memcg = MEMCGS[cgroup_id]
+ except KeyError:
+ err('Can\'t find the memory cgroup')
+
+ cfg = detect_kernel_config()
+
+ print('# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>'
+ ' : tunables <limit> <batchcount> <sharedfactor>'
+ ' : slabdata <active_slabs> <num_slabs> <sharedavail>')
+
+ if cfg['shared_slab_pages']:
+ obj_cgroups = set()
+ stats = {}
+ caches = {}
+
+ # find memcg pointers belonging to the specified cgroup
+ obj_cgroups.add(memcg.objcg.value_())
+ for ptr in list_for_each_entry('struct obj_cgroup',
+ memcg.objcg_list.address_of_(),
+ 'list'):
+ obj_cgroups.add(ptr.value_())
+
+ # look over all slab folios and look for objects belonging
+ # to the given memory cgroup
+ for slab in for_each_slab(prog):
+ objcg_vec_raw = slab.memcg_data.value_()
+ if objcg_vec_raw == 0:
+ continue
+ cache = slab.slab_cache
+ if not cache:
+ continue
+ addr = cache.value_()
+ caches[addr] = cache
+ # clear the lowest bit to get the true obj_cgroups
+ objcg_vec = Object(prog, 'struct obj_cgroup **',
+ value=objcg_vec_raw & ~1)
+
+ if addr not in stats:
+ stats[addr] = 0
+
+ for i in range(oo_objects(cache)):
+ if objcg_vec[i].value_() in obj_cgroups:
+ stats[addr] += 1
+
+ for addr in caches:
+ if stats[addr] > 0:
+ cache_show(caches[addr], cfg, stats[addr])
+
+ else:
+ for s in list_for_each_entry('struct kmem_cache',
+ memcg.kmem_caches.address_of_(),
+ 'memcg_params.kmem_caches_node'):
+ cache_show(s, cfg, None)
+
+
+main()
diff --git a/tools/counter/Build b/tools/counter/Build
new file mode 100644
index 000000000000..33f4a51d715e
--- /dev/null
+++ b/tools/counter/Build
@@ -0,0 +1 @@
+counter_example-y += counter_example.o
diff --git a/tools/counter/Makefile b/tools/counter/Makefile
new file mode 100644
index 000000000000..8843f0fa6119
--- /dev/null
+++ b/tools/counter/Makefile
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0
+include ../scripts/Makefile.include
+
+bindir ?= /usr/bin
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+# Do not use make's built-in rules
+# (this improves performance and avoids hard-to-debug behaviour);
+MAKEFLAGS += -r
+
+override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+
+ALL_TARGETS := counter_example
+ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
+
+all: $(ALL_PROGRAMS)
+
+export srctree OUTPUT CC LD CFLAGS
+include $(srctree)/tools/build/Makefile.include
+
+#
+# We need the following to be outside of kernel tree
+#
+$(OUTPUT)include/linux/counter.h: ../../include/uapi/linux/counter.h
+ mkdir -p $(OUTPUT)include/linux 2>&1 || true
+ ln -sf $(CURDIR)/../../include/uapi/linux/counter.h $@
+
+prepare: $(OUTPUT)include/linux/counter.h
+
+COUNTER_EXAMPLE := $(OUTPUT)counter_example.o
+$(COUNTER_EXAMPLE): prepare FORCE
+ $(Q)$(MAKE) $(build)=counter_example
+$(OUTPUT)counter_example: $(COUNTER_EXAMPLE)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+
+clean:
+ rm -f $(ALL_PROGRAMS)
+ rm -rf $(OUTPUT)include/linux/counter.h
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+
+install: $(ALL_PROGRAMS)
+ install -d -m 755 $(DESTDIR)$(bindir); \
+ for program in $(ALL_PROGRAMS); do \
+ install $$program $(DESTDIR)$(bindir); \
+ done
+
+FORCE:
+
+.PHONY: all install clean FORCE prepare
diff --git a/tools/counter/counter_example.c b/tools/counter/counter_example.c
new file mode 100644
index 000000000000..be55287b950f
--- /dev/null
+++ b/tools/counter/counter_example.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Counter - example userspace application
+ *
+ * The userspace application opens /dev/counter0, configures the
+ * COUNTER_EVENT_INDEX event channel 0 to gather Count 0 count and Count
+ * 1 count, and prints out the data as it becomes available on the
+ * character device node.
+ *
+ * Copyright (C) 2021 William Breathitt Gray
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/counter.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+
+static struct counter_watch watches[2] = {
+ {
+ /* Component data: Count 0 count */
+ .component.type = COUNTER_COMPONENT_COUNT,
+ .component.scope = COUNTER_SCOPE_COUNT,
+ .component.parent = 0,
+ /* Event type: Index */
+ .event = COUNTER_EVENT_INDEX,
+ /* Device event channel 0 */
+ .channel = 0,
+ },
+ {
+ /* Component data: Count 1 count */
+ .component.type = COUNTER_COMPONENT_COUNT,
+ .component.scope = COUNTER_SCOPE_COUNT,
+ .component.parent = 1,
+ /* Event type: Index */
+ .event = COUNTER_EVENT_INDEX,
+ /* Device event channel 0 */
+ .channel = 0,
+ },
+};
+
+int main(void)
+{
+ int fd;
+ int ret;
+ int i;
+ struct counter_event event_data[2];
+
+ fd = open("/dev/counter0", O_RDWR);
+ if (fd == -1) {
+ perror("Unable to open /dev/counter0");
+ return 1;
+ }
+
+ for (i = 0; i < 2; i++) {
+ ret = ioctl(fd, COUNTER_ADD_WATCH_IOCTL, watches + i);
+ if (ret == -1) {
+ fprintf(stderr, "Error adding watches[%d]: %s\n", i,
+ strerror(errno));
+ return 1;
+ }
+ }
+ ret = ioctl(fd, COUNTER_ENABLE_EVENTS_IOCTL);
+ if (ret == -1) {
+ perror("Error enabling events");
+ return 1;
+ }
+
+ for (;;) {
+ ret = read(fd, event_data, sizeof(event_data));
+ if (ret == -1) {
+ perror("Failed to read event data");
+ return 1;
+ }
+
+ if (ret != sizeof(event_data)) {
+ fprintf(stderr, "Failed to read event data\n");
+ return -EIO;
+ }
+
+ printf("Timestamp 0: %llu\tCount 0: %llu\n"
+ "Error Message 0: %s\n"
+ "Timestamp 1: %llu\tCount 1: %llu\n"
+ "Error Message 1: %s\n",
+ event_data[0].timestamp, event_data[0].value,
+ strerror(event_data[0].status),
+ event_data[1].timestamp, event_data[1].value,
+ strerror(event_data[1].status));
+ }
+
+ return 0;
+}
diff --git a/tools/debugging/kernel-chktaint b/tools/debugging/kernel-chktaint
index 2240cb56e6e5..279be06332be 100755
--- a/tools/debugging/kernel-chktaint
+++ b/tools/debugging/kernel-chktaint
@@ -25,7 +25,7 @@ if [ "$1"x != "x" ]; then
elif [ $1 -ge 0 ] 2>/dev/null ; then
taint=$1
else
- echo "Error: Parameter '$1' not a positive interger. Aborting." >&2
+ echo "Error: Parameter '$1' not a positive integer. Aborting." >&2
exit 1
fi
else
@@ -72,7 +72,7 @@ if [ `expr $T % 2` -eq 0 ]; then
addout " "
else
addout "S"
- echo " * SMP kernel oops on an officially SMP incapable processor (#2)"
+ echo " * kernel running on an out of specification system (#2)"
fi
T=`expr $T / 2`
@@ -187,6 +187,7 @@ else
echo " * auxiliary taint, defined for and used by distros (#16)"
fi
+
T=`expr $T / 2`
if [ `expr $T % 2` -eq 0 ]; then
addout " "
@@ -195,8 +196,16 @@ else
echo " * kernel was built with the struct randomization plugin (#17)"
fi
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "N"
+ echo " * an in-kernel test (such as a KUnit test) has been run (#18)"
+fi
+
echo "For a more detailed explanation of the various taint flags see"
-echo " Documentation/admin-guide/tainted-kernels.rst in the the Linux kernel sources"
+echo " Documentation/admin-guide/tainted-kernels.rst in the Linux kernel sources"
echo " or https://kernel.org/doc/html/latest/admin-guide/tainted-kernels.html"
echo "Raw taint value as int/string: $taint/'$out'"
#EOF#
diff --git a/tools/gpio/Makefile b/tools/gpio/Makefile
index 440434027557..d29c9c49e251 100644
--- a/tools/gpio/Makefile
+++ b/tools/gpio/Makefile
@@ -78,7 +78,7 @@ $(OUTPUT)gpio-watch: $(GPIO_WATCH_IN)
clean:
rm -f $(ALL_PROGRAMS)
rm -f $(OUTPUT)include/linux/gpio.h
- find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(bindir); \
diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
index 30ed0e06f52a..5dee2b98ab60 100644
--- a/tools/gpio/gpio-event-mon.c
+++ b/tools/gpio/gpio-event-mon.c
@@ -23,17 +23,17 @@
#include <sys/ioctl.h>
#include <sys/types.h>
#include <linux/gpio.h>
+#include "gpio-utils.h"
int monitor_device(const char *device_name,
- unsigned int line,
- uint32_t handleflags,
- uint32_t eventflags,
+ unsigned int *lines,
+ unsigned int num_lines,
+ struct gpio_v2_line_config *config,
unsigned int loops)
{
- struct gpioevent_request req;
- struct gpiohandle_data data;
+ struct gpio_v2_line_values values;
char *chrdev_name;
- int fd;
+ int cfd, lfd;
int ret;
int i = 0;
@@ -41,44 +41,56 @@ int monitor_device(const char *device_name,
if (ret < 0)
return -ENOMEM;
- fd = open(chrdev_name, 0);
- if (fd == -1) {
+ cfd = open(chrdev_name, 0);
+ if (cfd == -1) {
ret = -errno;
fprintf(stderr, "Failed to open %s\n", chrdev_name);
- goto exit_close_error;
+ goto exit_free_name;
}
- req.lineoffset = line;
- req.handleflags = handleflags;
- req.eventflags = eventflags;
- strcpy(req.consumer_label, "gpio-event-mon");
-
- ret = ioctl(fd, GPIO_GET_LINEEVENT_IOCTL, &req);
- if (ret == -1) {
- ret = -errno;
- fprintf(stderr, "Failed to issue GET EVENT "
- "IOCTL (%d)\n",
- ret);
- goto exit_close_error;
- }
+ ret = gpiotools_request_line(device_name, lines, num_lines, config,
+ "gpio-event-mon");
+ if (ret < 0)
+ goto exit_device_close;
+ else
+ lfd = ret;
/* Read initial states */
- ret = ioctl(req.fd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, &data);
- if (ret == -1) {
- ret = -errno;
- fprintf(stderr, "Failed to issue GPIOHANDLE GET LINE "
- "VALUES IOCTL (%d)\n",
+ values.mask = 0;
+ values.bits = 0;
+ for (i = 0; i < num_lines; i++)
+ gpiotools_set_bit(&values.mask, i);
+ ret = gpiotools_get_values(lfd, &values);
+ if (ret < 0) {
+ fprintf(stderr,
+ "Failed to issue GPIO LINE GET VALUES IOCTL (%d)\n",
ret);
- goto exit_close_error;
+ goto exit_line_close;
}
- fprintf(stdout, "Monitoring line %d on %s\n", line, device_name);
- fprintf(stdout, "Initial line value: %d\n", data.values[0]);
+ if (num_lines == 1) {
+ fprintf(stdout, "Monitoring line %d on %s\n", lines[0], device_name);
+ fprintf(stdout, "Initial line value: %d\n",
+ gpiotools_test_bit(values.bits, 0));
+ } else {
+ fprintf(stdout, "Monitoring lines %d", lines[0]);
+ for (i = 1; i < num_lines - 1; i++)
+ fprintf(stdout, ", %d", lines[i]);
+ fprintf(stdout, " and %d on %s\n", lines[i], device_name);
+ fprintf(stdout, "Initial line values: %d",
+ gpiotools_test_bit(values.bits, 0));
+ for (i = 1; i < num_lines - 1; i++)
+ fprintf(stdout, ", %d",
+ gpiotools_test_bit(values.bits, i));
+ fprintf(stdout, " and %d\n",
+ gpiotools_test_bit(values.bits, i));
+ }
+ i = 0;
while (1) {
- struct gpioevent_data event;
+ struct gpio_v2_line_event event;
- ret = read(req.fd, &event, sizeof(event));
+ ret = read(lfd, &event, sizeof(event));
if (ret == -1) {
if (errno == -EAGAIN) {
fprintf(stderr, "nothing available\n");
@@ -96,12 +108,14 @@ int monitor_device(const char *device_name,
ret = -EIO;
break;
}
- fprintf(stdout, "GPIO EVENT %llu: ", event.timestamp);
+ fprintf(stdout, "GPIO EVENT at %" PRIu64 " on line %d (%d|%d) ",
+ (uint64_t)event.timestamp_ns, event.offset, event.line_seqno,
+ event.seqno);
switch (event.id) {
- case GPIOEVENT_EVENT_RISING_EDGE:
+ case GPIO_V2_LINE_EVENT_RISING_EDGE:
fprintf(stdout, "rising edge");
break;
- case GPIOEVENT_EVENT_FALLING_EDGE:
+ case GPIO_V2_LINE_EVENT_FALLING_EDGE:
fprintf(stdout, "falling edge");
break;
default:
@@ -114,9 +128,13 @@ int monitor_device(const char *device_name,
break;
}
-exit_close_error:
- if (close(fd) == -1)
+exit_line_close:
+ if (close(lfd) == -1)
+ perror("Failed to close line file");
+exit_device_close:
+ if (close(cfd) == -1)
perror("Failed to close GPIO character device file");
+exit_free_name:
free(chrdev_name);
return ret;
}
@@ -126,29 +144,39 @@ void print_usage(void)
fprintf(stderr, "Usage: gpio-event-mon [options]...\n"
"Listen to events on GPIO lines, 0->1 1->0\n"
" -n <name> Listen on GPIOs on a named device (must be stated)\n"
- " -o <n> Offset to monitor\n"
+ " -o <n> Offset of line to monitor (may be repeated)\n"
" -d Set line as open drain\n"
" -s Set line as open source\n"
" -r Listen for rising edges\n"
" -f Listen for falling edges\n"
+ " -w Report the wall-clock time for events\n"
+ " -t Report the hardware timestamp for events\n"
+ " -b <n> Debounce the line with period n microseconds\n"
" [-c <n>] Do <n> loops (optional, infinite loop if not stated)\n"
" -? This helptext\n"
"\n"
"Example:\n"
- "gpio-event-mon -n gpiochip0 -o 4 -r -f\n"
+ "gpio-event-mon -n gpiochip0 -o 4 -r -f -b 10000\n"
);
}
+#define EDGE_FLAGS \
+ (GPIO_V2_LINE_FLAG_EDGE_RISING | \
+ GPIO_V2_LINE_FLAG_EDGE_FALLING)
+
int main(int argc, char **argv)
{
const char *device_name = NULL;
- unsigned int line = -1;
+ unsigned int lines[GPIO_V2_LINES_MAX];
+ unsigned int num_lines = 0;
unsigned int loops = 0;
- uint32_t handleflags = GPIOHANDLE_REQUEST_INPUT;
- uint32_t eventflags = 0;
- int c;
+ struct gpio_v2_line_config config;
+ int c, attr, i;
+ unsigned long debounce_period_us = 0;
- while ((c = getopt(argc, argv, "c:n:o:dsrf?")) != -1) {
+ memset(&config, 0, sizeof(config));
+ config.flags = GPIO_V2_LINE_FLAG_INPUT;
+ while ((c = getopt(argc, argv, "c:n:o:b:dsrfwt?")) != -1) {
switch (c) {
case 'c':
loops = strtoul(optarg, NULL, 10);
@@ -157,19 +185,33 @@ int main(int argc, char **argv)
device_name = optarg;
break;
case 'o':
- line = strtoul(optarg, NULL, 10);
+ if (num_lines >= GPIO_V2_LINES_MAX) {
+ print_usage();
+ return -1;
+ }
+ lines[num_lines] = strtoul(optarg, NULL, 10);
+ num_lines++;
+ break;
+ case 'b':
+ debounce_period_us = strtoul(optarg, NULL, 10);
break;
case 'd':
- handleflags |= GPIOHANDLE_REQUEST_OPEN_DRAIN;
+ config.flags |= GPIO_V2_LINE_FLAG_OPEN_DRAIN;
break;
case 's':
- handleflags |= GPIOHANDLE_REQUEST_OPEN_SOURCE;
+ config.flags |= GPIO_V2_LINE_FLAG_OPEN_SOURCE;
break;
case 'r':
- eventflags |= GPIOEVENT_REQUEST_RISING_EDGE;
+ config.flags |= GPIO_V2_LINE_FLAG_EDGE_RISING;
break;
case 'f':
- eventflags |= GPIOEVENT_REQUEST_FALLING_EDGE;
+ config.flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
+ break;
+ case 'w':
+ config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
+ break;
+ case 't':
+ config.flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_HTE;
break;
case '?':
print_usage();
@@ -177,15 +219,23 @@ int main(int argc, char **argv)
}
}
- if (!device_name || line == -1) {
+ if (debounce_period_us) {
+ attr = config.num_attrs;
+ config.num_attrs++;
+ for (i = 0; i < num_lines; i++)
+ gpiotools_set_bit(&config.attrs[attr].mask, i);
+ config.attrs[attr].attr.id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
+ config.attrs[attr].attr.debounce_period_us = debounce_period_us;
+ }
+
+ if (!device_name || num_lines == 0) {
print_usage();
return -1;
}
- if (!eventflags) {
+ if (!(config.flags & EDGE_FLAGS)) {
printf("No flags specified, listening on both rising and "
"falling edges\n");
- eventflags = GPIOEVENT_REQUEST_BOTH_EDGES;
+ config.flags |= EDGE_FLAGS;
}
- return monitor_device(device_name, line, handleflags,
- eventflags, loops);
+ return monitor_device(device_name, lines, num_lines, &config, loops);
}
diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c
index 9fd926e8cb52..54fdf59dd320 100644
--- a/tools/gpio/gpio-hammer.c
+++ b/tools/gpio/gpio-hammer.c
@@ -22,39 +22,46 @@
#include <linux/gpio.h>
#include "gpio-utils.h"
-int hammer_device(const char *device_name, unsigned int *lines, int nlines,
+int hammer_device(const char *device_name, unsigned int *lines, int num_lines,
unsigned int loops)
{
- struct gpiohandle_data data;
+ struct gpio_v2_line_values values;
+ struct gpio_v2_line_config config;
char swirr[] = "-\\|/";
int fd;
int ret;
int i, j;
unsigned int iteration = 0;
- memset(&data.values, 0, sizeof(data.values));
- ret = gpiotools_request_linehandle(device_name, lines, nlines,
- GPIOHANDLE_REQUEST_OUTPUT, &data,
- "gpio-hammer");
+ memset(&config, 0, sizeof(config));
+ config.flags = GPIO_V2_LINE_FLAG_OUTPUT;
+
+ ret = gpiotools_request_line(device_name, lines, num_lines,
+ &config, "gpio-hammer");
if (ret < 0)
goto exit_error;
else
fd = ret;
- ret = gpiotools_get_values(fd, &data);
+ values.mask = 0;
+ values.bits = 0;
+ for (i = 0; i < num_lines; i++)
+ gpiotools_set_bit(&values.mask, i);
+
+ ret = gpiotools_get_values(fd, &values);
if (ret < 0)
goto exit_close_error;
fprintf(stdout, "Hammer lines [");
- for (i = 0; i < nlines; i++) {
+ for (i = 0; i < num_lines; i++) {
fprintf(stdout, "%d", lines[i]);
- if (i != (nlines - 1))
+ if (i != (num_lines - 1))
fprintf(stdout, ", ");
}
fprintf(stdout, "] on %s, initial states: [", device_name);
- for (i = 0; i < nlines; i++) {
- fprintf(stdout, "%d", data.values[i]);
- if (i != (nlines - 1))
+ for (i = 0; i < num_lines; i++) {
+ fprintf(stdout, "%d", gpiotools_test_bit(values.bits, i));
+ if (i != (num_lines - 1))
fprintf(stdout, ", ");
}
fprintf(stdout, "]\n");
@@ -63,15 +70,15 @@ int hammer_device(const char *device_name, unsigned int *lines, int nlines,
j = 0;
while (1) {
/* Invert all lines so we blink */
- for (i = 0; i < nlines; i++)
- data.values[i] = !data.values[i];
+ for (i = 0; i < num_lines; i++)
+ gpiotools_change_bit(&values.bits, i);
- ret = gpiotools_set_values(fd, &data);
+ ret = gpiotools_set_values(fd, &values);
if (ret < 0)
goto exit_close_error;
/* Re-read values to get status */
- ret = gpiotools_get_values(fd, &data);
+ ret = gpiotools_get_values(fd, &values);
if (ret < 0)
goto exit_close_error;
@@ -81,9 +88,10 @@ int hammer_device(const char *device_name, unsigned int *lines, int nlines,
j = 0;
fprintf(stdout, "[");
- for (i = 0; i < nlines; i++) {
- fprintf(stdout, "%d: %d", lines[i], data.values[i]);
- if (i != (nlines - 1))
+ for (i = 0; i < num_lines; i++) {
+ fprintf(stdout, "%d: %d", lines[i],
+ gpiotools_test_bit(values.bits, i));
+ if (i != (num_lines - 1))
fprintf(stdout, ", ");
}
fprintf(stdout, "]\r");
@@ -97,7 +105,7 @@ int hammer_device(const char *device_name, unsigned int *lines, int nlines,
ret = 0;
exit_close_error:
- gpiotools_release_linehandle(fd);
+ gpiotools_release_line(fd);
exit_error:
return ret;
}
@@ -121,7 +129,7 @@ int main(int argc, char **argv)
const char *device_name = NULL;
unsigned int lines[GPIOHANDLES_MAX];
unsigned int loops = 0;
- int nlines;
+ int num_lines;
int c;
int i;
@@ -158,11 +166,11 @@ int main(int argc, char **argv)
return -1;
}
- nlines = i;
+ num_lines = i;
- if (!device_name || !nlines) {
+ if (!device_name || !num_lines) {
print_usage();
return -1;
}
- return hammer_device(device_name, lines, nlines, loops);
+ return hammer_device(device_name, lines, num_lines, loops);
}
diff --git a/tools/gpio/gpio-utils.c b/tools/gpio/gpio-utils.c
index 06003789e7c7..4096bcd511d1 100644
--- a/tools/gpio/gpio-utils.c
+++ b/tools/gpio/gpio-utils.c
@@ -20,7 +20,7 @@
#define CONSUMER "gpio-utils"
/**
- * doc: Operation of gpio
+ * DOC: Operation of gpio
*
* Provide the api of gpiochip for chardev interface. There are two
* types of api. The first one provide as same function as each
@@ -32,35 +32,34 @@
* following api will request gpio lines, do the operation and then
* release these lines.
*/
+
/**
- * gpiotools_request_linehandle() - request gpio lines in a gpiochip
+ * gpiotools_request_line() - request gpio lines in a gpiochip
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0"
* @lines: An array desired lines, specified by offset
* index for the associated GPIO device.
- * @nline: The number of lines to request.
- * @flag: The new flag for requsted gpio. Reference
- * "linux/gpio.h" for the meaning of flag.
- * @data: Default value will be set to gpio when flag is
- * GPIOHANDLE_REQUEST_OUTPUT.
- * @consumer_label: The name of consumer, such as "sysfs",
+ * @num_lines: The number of lines to request.
+ * @config: The new config for requested gpio. Reference
+ * "linux/gpio.h" for config details.
+ * @consumer: The name of consumer, such as "sysfs",
* "powerkey". This is useful for other users to
* know who is using.
*
* Request gpio lines through the ioctl provided by chardev. User
* could call gpiotools_set_values() and gpiotools_get_values() to
* read and write respectively through the returned fd. Call
- * gpiotools_release_linehandle() to release these lines after that.
+ * gpiotools_release_line() to release these lines after that.
*
* Return: On success return the fd;
* On failure return the errno.
*/
-int gpiotools_request_linehandle(const char *device_name, unsigned int *lines,
- unsigned int nlines, unsigned int flag,
- struct gpiohandle_data *data,
- const char *consumer_label)
+int gpiotools_request_line(const char *device_name, unsigned int *lines,
+ unsigned int num_lines,
+ struct gpio_v2_line_config *config,
+ const char *consumer)
{
- struct gpiohandle_request req;
+ struct gpio_v2_line_request req;
char *chrdev_name;
int fd;
int i;
@@ -75,45 +74,45 @@ int gpiotools_request_linehandle(const char *device_name, unsigned int *lines,
ret = -errno;
fprintf(stderr, "Failed to open %s, %s\n",
chrdev_name, strerror(errno));
- goto exit_close_error;
+ goto exit_free_name;
}
- for (i = 0; i < nlines; i++)
- req.lineoffsets[i] = lines[i];
+ memset(&req, 0, sizeof(req));
+ for (i = 0; i < num_lines; i++)
+ req.offsets[i] = lines[i];
- req.flags = flag;
- strcpy(req.consumer_label, consumer_label);
- req.lines = nlines;
- if (flag & GPIOHANDLE_REQUEST_OUTPUT)
- memcpy(req.default_values, data, sizeof(req.default_values));
+ req.config = *config;
+ strcpy(req.consumer, consumer);
+ req.num_lines = num_lines;
- ret = ioctl(fd, GPIO_GET_LINEHANDLE_IOCTL, &req);
+ ret = ioctl(fd, GPIO_V2_GET_LINE_IOCTL, &req);
if (ret == -1) {
ret = -errno;
fprintf(stderr, "Failed to issue %s (%d), %s\n",
- "GPIO_GET_LINEHANDLE_IOCTL", ret, strerror(errno));
+ "GPIO_GET_LINE_IOCTL", ret, strerror(errno));
}
-exit_close_error:
if (close(fd) == -1)
perror("Failed to close GPIO character device file");
+exit_free_name:
free(chrdev_name);
return ret < 0 ? ret : req.fd;
}
+
/**
- * gpiotools_set_values(): Set the value of gpio(s)
+ * gpiotools_set_values() - Set the value of gpio(s)
* @fd: The fd returned by
- * gpiotools_request_linehandle().
- * @data: The array of values want to set.
+ * gpiotools_request_line().
+ * @values: The array of values want to set.
*
* Return: On success return 0;
* On failure return the errno.
*/
-int gpiotools_set_values(const int fd, struct gpiohandle_data *data)
+int gpiotools_set_values(const int fd, struct gpio_v2_line_values *values)
{
int ret;
- ret = ioctl(fd, GPIOHANDLE_SET_LINE_VALUES_IOCTL, data);
+ ret = ioctl(fd, GPIO_V2_LINE_SET_VALUES_IOCTL, values);
if (ret == -1) {
ret = -errno;
fprintf(stderr, "Failed to issue %s (%d), %s\n",
@@ -125,19 +124,19 @@ int gpiotools_set_values(const int fd, struct gpiohandle_data *data)
}
/**
- * gpiotools_get_values(): Get the value of gpio(s)
+ * gpiotools_get_values() - Get the value of gpio(s)
* @fd: The fd returned by
- * gpiotools_request_linehandle().
- * @data: The array of values get from hardware.
+ * gpiotools_request_line().
+ * @values: The array of values get from hardware.
*
* Return: On success return 0;
* On failure return the errno.
*/
-int gpiotools_get_values(const int fd, struct gpiohandle_data *data)
+int gpiotools_get_values(const int fd, struct gpio_v2_line_values *values)
{
int ret;
- ret = ioctl(fd, GPIOHANDLE_GET_LINE_VALUES_IOCTL, data);
+ ret = ioctl(fd, GPIO_V2_LINE_GET_VALUES_IOCTL, values);
if (ret == -1) {
ret = -errno;
fprintf(stderr, "Failed to issue %s (%d), %s\n",
@@ -149,20 +148,20 @@ int gpiotools_get_values(const int fd, struct gpiohandle_data *data)
}
/**
- * gpiotools_release_linehandle(): Release the line(s) of gpiochip
+ * gpiotools_release_line() - Release the line(s) of gpiochip
* @fd: The fd returned by
- * gpiotools_request_linehandle().
+ * gpiotools_request_line().
*
* Return: On success return 0;
* On failure return the errno.
*/
-int gpiotools_release_linehandle(const int fd)
+int gpiotools_release_line(const int fd)
{
int ret;
ret = close(fd);
if (ret == -1) {
- perror("Failed to close GPIO LINEHANDLE device file");
+ perror("Failed to close GPIO LINE device file");
ret = -errno;
}
@@ -170,7 +169,7 @@ int gpiotools_release_linehandle(const int fd)
}
/**
- * gpiotools_get(): Get value from specific line
+ * gpiotools_get() - Get value from specific line
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0"
* @line: number of line, such as 2.
@@ -180,47 +179,58 @@ int gpiotools_release_linehandle(const int fd)
*/
int gpiotools_get(const char *device_name, unsigned int line)
{
- struct gpiohandle_data data;
+ int ret;
+ unsigned int value;
unsigned int lines[] = {line};
- gpiotools_gets(device_name, lines, 1, &data);
- return data.values[0];
+ ret = gpiotools_gets(device_name, lines, 1, &value);
+ if (ret)
+ return ret;
+ return value;
}
/**
- * gpiotools_gets(): Get values from specific lines.
+ * gpiotools_gets() - Get values from specific lines.
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0".
* @lines: An array desired lines, specified by offset
* index for the associated GPIO device.
- * @nline: The number of lines to request.
- * @data: The array of values get from gpiochip.
+ * @num_lines: The number of lines to request.
+ * @values: The array of values get from gpiochip.
*
* Return: On success return 0;
* On failure return the errno.
*/
int gpiotools_gets(const char *device_name, unsigned int *lines,
- unsigned int nlines, struct gpiohandle_data *data)
+ unsigned int num_lines, unsigned int *values)
{
- int fd;
+ int fd, i;
int ret;
int ret_close;
+ struct gpio_v2_line_config config;
+ struct gpio_v2_line_values lv;
- ret = gpiotools_request_linehandle(device_name, lines, nlines,
- GPIOHANDLE_REQUEST_INPUT, data,
- CONSUMER);
+ memset(&config, 0, sizeof(config));
+ config.flags = GPIO_V2_LINE_FLAG_INPUT;
+ ret = gpiotools_request_line(device_name, lines, num_lines,
+ &config, CONSUMER);
if (ret < 0)
return ret;
fd = ret;
- ret = gpiotools_get_values(fd, data);
- ret_close = gpiotools_release_linehandle(fd);
+ for (i = 0; i < num_lines; i++)
+ gpiotools_set_bit(&lv.mask, i);
+ ret = gpiotools_get_values(fd, &lv);
+ if (!ret)
+ for (i = 0; i < num_lines; i++)
+ values[i] = gpiotools_test_bit(lv.bits, i);
+ ret_close = gpiotools_release_line(fd);
return ret < 0 ? ret : ret_close;
}
/**
- * gpiotools_set(): Set value to specific line
+ * gpiotools_set() - Set value to specific line
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0"
* @line: number of line, such as 2.
@@ -232,36 +242,43 @@ int gpiotools_gets(const char *device_name, unsigned int *lines,
int gpiotools_set(const char *device_name, unsigned int line,
unsigned int value)
{
- struct gpiohandle_data data;
unsigned int lines[] = {line};
- data.values[0] = value;
- return gpiotools_sets(device_name, lines, 1, &data);
+ return gpiotools_sets(device_name, lines, 1, &value);
}
/**
- * gpiotools_sets(): Set values to specific lines.
+ * gpiotools_sets() - Set values to specific lines.
* @device_name: The name of gpiochip without prefix "/dev/",
* such as "gpiochip0".
* @lines: An array desired lines, specified by offset
* index for the associated GPIO device.
- * @nline: The number of lines to request.
- * @data: The array of values set to gpiochip, must be
+ * @num_lines: The number of lines to request.
+ * @values: The array of values set to gpiochip, must be
* 0(low) or 1(high).
*
* Return: On success return 0;
* On failure return the errno.
*/
int gpiotools_sets(const char *device_name, unsigned int *lines,
- unsigned int nlines, struct gpiohandle_data *data)
+ unsigned int num_lines, unsigned int *values)
{
- int ret;
+ int ret, i;
+ struct gpio_v2_line_config config;
- ret = gpiotools_request_linehandle(device_name, lines, nlines,
- GPIOHANDLE_REQUEST_OUTPUT, data,
- CONSUMER);
+ memset(&config, 0, sizeof(config));
+ config.flags = GPIO_V2_LINE_FLAG_OUTPUT;
+ config.num_attrs = 1;
+ config.attrs[0].attr.id = GPIO_V2_LINE_ATTR_ID_OUTPUT_VALUES;
+ for (i = 0; i < num_lines; i++) {
+ gpiotools_set_bit(&config.attrs[0].mask, i);
+ gpiotools_assign_bit(&config.attrs[0].attr.values,
+ i, values[i]);
+ }
+ ret = gpiotools_request_line(device_name, lines, num_lines,
+ &config, CONSUMER);
if (ret < 0)
return ret;
- return gpiotools_release_linehandle(ret);
+ return gpiotools_release_line(ret);
}
diff --git a/tools/gpio/gpio-utils.h b/tools/gpio/gpio-utils.h
index cf37f13f3dcb..8af7c8ee19ce 100644
--- a/tools/gpio/gpio-utils.h
+++ b/tools/gpio/gpio-utils.h
@@ -12,7 +12,9 @@
#ifndef _GPIO_UTILS_H_
#define _GPIO_UTILS_H_
+#include <stdbool.h>
#include <string.h>
+#include <linux/types.h>
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
@@ -22,20 +24,50 @@ static inline int check_prefix(const char *str, const char *prefix)
strncmp(str, prefix, strlen(prefix)) == 0;
}
-int gpiotools_request_linehandle(const char *device_name, unsigned int *lines,
- unsigned int nlines, unsigned int flag,
- struct gpiohandle_data *data,
- const char *consumer_label);
-int gpiotools_set_values(const int fd, struct gpiohandle_data *data);
-int gpiotools_get_values(const int fd, struct gpiohandle_data *data);
-int gpiotools_release_linehandle(const int fd);
+int gpiotools_request_line(const char *device_name,
+ unsigned int *lines,
+ unsigned int num_lines,
+ struct gpio_v2_line_config *config,
+ const char *consumer);
+int gpiotools_set_values(const int fd, struct gpio_v2_line_values *values);
+int gpiotools_get_values(const int fd, struct gpio_v2_line_values *values);
+int gpiotools_release_line(const int fd);
int gpiotools_get(const char *device_name, unsigned int line);
int gpiotools_gets(const char *device_name, unsigned int *lines,
- unsigned int nlines, struct gpiohandle_data *data);
+ unsigned int num_lines, unsigned int *values);
int gpiotools_set(const char *device_name, unsigned int line,
unsigned int value);
int gpiotools_sets(const char *device_name, unsigned int *lines,
- unsigned int nlines, struct gpiohandle_data *data);
+ unsigned int num_lines, unsigned int *values);
+
+/* helper functions for gpio_v2_line_values bits */
+static inline void gpiotools_set_bit(__u64 *b, int n)
+{
+ *b |= _BITULL(n);
+}
+
+static inline void gpiotools_change_bit(__u64 *b, int n)
+{
+ *b ^= _BITULL(n);
+}
+
+static inline void gpiotools_clear_bit(__u64 *b, int n)
+{
+ *b &= ~_BITULL(n);
+}
+
+static inline int gpiotools_test_bit(__u64 b, int n)
+{
+ return !!(b & _BITULL(n));
+}
+
+static inline void gpiotools_assign_bit(__u64 *b, int n, bool value)
+{
+ if (value)
+ gpiotools_set_bit(b, n);
+ else
+ gpiotools_clear_bit(b, n);
+}
#endif /* _GPIO_UTILS_H_ */
diff --git a/tools/gpio/gpio-watch.c b/tools/gpio/gpio-watch.c
index 5cea24fddfa7..41e76d244192 100644
--- a/tools/gpio/gpio-watch.c
+++ b/tools/gpio/gpio-watch.c
@@ -10,6 +10,7 @@
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
+#include <inttypes.h>
#include <linux/gpio.h>
#include <poll.h>
#include <stdbool.h>
@@ -21,8 +22,8 @@
int main(int argc, char **argv)
{
- struct gpioline_info_changed chg;
- struct gpioline_info req;
+ struct gpio_v2_line_info_changed chg;
+ struct gpio_v2_line_info req;
struct pollfd pfd;
int fd, i, j, ret;
char *event, *end;
@@ -40,11 +41,11 @@ int main(int argc, char **argv)
for (i = 0, j = 2; i < argc - 2; i++, j++) {
memset(&req, 0, sizeof(req));
- req.line_offset = strtoul(argv[j], &end, 0);
+ req.offset = strtoul(argv[j], &end, 0);
if (*end != '\0')
goto err_usage;
- ret = ioctl(fd, GPIO_GET_LINEINFO_WATCH_IOCTL, &req);
+ ret = ioctl(fd, GPIO_V2_GET_LINEINFO_WATCH_IOCTL, &req);
if (ret) {
perror("unable to set up line watch");
return EXIT_FAILURE;
@@ -71,13 +72,13 @@ int main(int argc, char **argv)
}
switch (chg.event_type) {
- case GPIOLINE_CHANGED_REQUESTED:
+ case GPIO_V2_LINE_CHANGED_REQUESTED:
event = "requested";
break;
- case GPIOLINE_CHANGED_RELEASED:
+ case GPIO_V2_LINE_CHANGED_RELEASED:
event = "released";
break;
- case GPIOLINE_CHANGED_CONFIG:
+ case GPIO_V2_LINE_CHANGED_CONFIG:
event = "config changed";
break;
default:
@@ -86,8 +87,8 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
- printf("line %u: %s at %llu\n",
- chg.info.line_offset, event, chg.timestamp);
+ printf("line %u: %s at %" PRIu64 "\n",
+ chg.info.offset, event, (uint64_t)chg.timestamp_ns);
}
}
diff --git a/tools/gpio/lsgpio.c b/tools/gpio/lsgpio.c
index e1430f504c13..c61d061247e1 100644
--- a/tools/gpio/lsgpio.c
+++ b/tools/gpio/lsgpio.c
@@ -25,45 +25,77 @@
struct gpio_flag {
char *name;
- unsigned long mask;
+ unsigned long long mask;
};
struct gpio_flag flagnames[] = {
{
- .name = "kernel",
- .mask = GPIOLINE_FLAG_KERNEL,
+ .name = "used",
+ .mask = GPIO_V2_LINE_FLAG_USED,
+ },
+ {
+ .name = "input",
+ .mask = GPIO_V2_LINE_FLAG_INPUT,
},
{
.name = "output",
- .mask = GPIOLINE_FLAG_IS_OUT,
+ .mask = GPIO_V2_LINE_FLAG_OUTPUT,
},
{
.name = "active-low",
- .mask = GPIOLINE_FLAG_ACTIVE_LOW,
+ .mask = GPIO_V2_LINE_FLAG_ACTIVE_LOW,
},
{
.name = "open-drain",
- .mask = GPIOLINE_FLAG_OPEN_DRAIN,
+ .mask = GPIO_V2_LINE_FLAG_OPEN_DRAIN,
},
{
.name = "open-source",
- .mask = GPIOLINE_FLAG_OPEN_SOURCE,
+ .mask = GPIO_V2_LINE_FLAG_OPEN_SOURCE,
+ },
+ {
+ .name = "pull-up",
+ .mask = GPIO_V2_LINE_FLAG_BIAS_PULL_UP,
+ },
+ {
+ .name = "pull-down",
+ .mask = GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN,
+ },
+ {
+ .name = "bias-disabled",
+ .mask = GPIO_V2_LINE_FLAG_BIAS_DISABLED,
+ },
+ {
+ .name = "clock-realtime",
+ .mask = GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME,
},
};
-void print_flags(unsigned long flags)
+static void print_attributes(struct gpio_v2_line_info *info)
{
int i;
- int printed = 0;
+ const char *field_format = "%s";
for (i = 0; i < ARRAY_SIZE(flagnames); i++) {
- if (flags & flagnames[i].mask) {
- if (printed)
- fprintf(stdout, " ");
- fprintf(stdout, "%s", flagnames[i].name);
- printed++;
+ if (info->flags & flagnames[i].mask) {
+ fprintf(stdout, field_format, flagnames[i].name);
+ field_format = ", %s";
}
}
+
+ if ((info->flags & GPIO_V2_LINE_FLAG_EDGE_RISING) &&
+ (info->flags & GPIO_V2_LINE_FLAG_EDGE_FALLING))
+ fprintf(stdout, field_format, "both-edges");
+ else if (info->flags & GPIO_V2_LINE_FLAG_EDGE_RISING)
+ fprintf(stdout, field_format, "rising-edge");
+ else if (info->flags & GPIO_V2_LINE_FLAG_EDGE_FALLING)
+ fprintf(stdout, field_format, "falling-edge");
+
+ for (i = 0; i < info->num_attrs; i++) {
+ if (info->attrs[i].id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE)
+ fprintf(stdout, ", debounce_period=%dusec",
+ info->attrs[0].debounce_period_us);
+ }
}
int list_device(const char *device_name)
@@ -82,7 +114,7 @@ int list_device(const char *device_name)
if (fd == -1) {
ret = -errno;
fprintf(stderr, "Failed to open %s\n", chrdev_name);
- goto exit_close_error;
+ goto exit_free_name;
}
/* Inspect this GPIO chip */
@@ -97,18 +129,18 @@ int list_device(const char *device_name)
/* Loop over the lines and print info */
for (i = 0; i < cinfo.lines; i++) {
- struct gpioline_info linfo;
+ struct gpio_v2_line_info linfo;
memset(&linfo, 0, sizeof(linfo));
- linfo.line_offset = i;
+ linfo.offset = i;
- ret = ioctl(fd, GPIO_GET_LINEINFO_IOCTL, &linfo);
+ ret = ioctl(fd, GPIO_V2_GET_LINEINFO_IOCTL, &linfo);
if (ret == -1) {
ret = -errno;
perror("Failed to issue LINEINFO IOCTL\n");
goto exit_close_error;
}
- fprintf(stdout, "\tline %2d:", linfo.line_offset);
+ fprintf(stdout, "\tline %2d:", linfo.offset);
if (linfo.name[0])
fprintf(stdout, " \"%s\"", linfo.name);
else
@@ -119,7 +151,7 @@ int list_device(const char *device_name)
fprintf(stdout, " unused");
if (linfo.flags) {
fprintf(stdout, " [");
- print_flags(linfo.flags);
+ print_attributes(&linfo);
fprintf(stdout, "]");
}
fprintf(stdout, "\n");
@@ -129,6 +161,7 @@ int list_device(const char *device_name)
exit_close_error:
if (close(fd) == -1)
perror("Failed to close GPIO character device file");
+exit_free_name:
free(chrdev_name);
return ret;
}
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index b57143d9459c..fe770e679ae8 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -47,7 +47,7 @@ $(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
clean:
rm -f $(ALL_PROGRAMS)
- find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(sbindir); \
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index ee9c1bb2293e..27f5e7dfc2f7 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -44,7 +44,7 @@
/*
* KVP protocol: The user mode component first registers with the
- * the kernel component. Subsequently, the kernel component requests, data
+ * kernel component. Subsequently, the kernel component requests, data
* for the specified keys. In response to this message the user mode component
* fills in the value corresponding to the specified key. We overload the
* sequence field in the cn_msg header to define our KVP message types.
@@ -437,7 +437,7 @@ void kvp_get_os_info(void)
/*
* Parse the /etc/os-release file if present:
- * http://www.freedesktop.org/software/systemd/man/os-release.html
+ * https://www.freedesktop.org/software/systemd/man/os-release.html
*/
file = fopen("/etc/os-release", "r");
if (file != NULL) {
@@ -772,11 +772,11 @@ static int kvp_process_ip_address(void *addrp,
const char *str;
if (family == AF_INET) {
- addr = (struct sockaddr_in *)addrp;
+ addr = addrp;
str = inet_ntop(family, &addr->sin_addr, tmp, 50);
addr_length = INET_ADDRSTRLEN;
} else {
- addr6 = (struct sockaddr_in6 *)addrp;
+ addr6 = addrp;
str = inet_ntop(family, &addr6->sin6_addr.s6_addr, tmp, 50);
addr_length = INET6_ADDRSTRLEN;
}
diff --git a/tools/iio/Makefile b/tools/iio/Makefile
index 3de763d9ab70..fa720f062229 100644
--- a/tools/iio/Makefile
+++ b/tools/iio/Makefile
@@ -27,6 +27,7 @@ include $(srctree)/tools/build/Makefile.include
#
$(OUTPUT)include/linux/iio: ../../include/uapi/linux/iio
mkdir -p $(OUTPUT)include/linux/iio 2>&1 || true
+ ln -sf $(CURDIR)/../../include/uapi/linux/iio/buffer.h $@
ln -sf $(CURDIR)/../../include/uapi/linux/iio/events.h $@
ln -sf $(CURDIR)/../../include/uapi/linux/iio/types.h $@
@@ -57,7 +58,7 @@ $(OUTPUT)iio_generic_buffer: $(IIO_GENERIC_BUFFER_IN)
clean:
rm -f $(ALL_PROGRAMS)
rm -rf $(OUTPUT)include/linux/iio
- find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(bindir); \
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index f115d166c985..0a5c2bb60030 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -14,6 +14,7 @@
#include <unistd.h>
#include <stdlib.h>
+#include <dirent.h>
#include <stdbool.h>
#include <stdio.h>
#include <errno.h>
@@ -67,12 +68,16 @@ static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_THRESH_ADAPTIVE] = "thresh_adaptive",
[IIO_EV_TYPE_MAG_ADAPTIVE] = "mag_adaptive",
[IIO_EV_TYPE_CHANGE] = "change",
+ [IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
+ [IIO_EV_TYPE_GESTURE] = "gesture",
};
static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_EITHER] = "either",
[IIO_EV_DIR_RISING] = "rising",
- [IIO_EV_DIR_FALLING] = "falling"
+ [IIO_EV_DIR_FALLING] = "falling",
+ [IIO_EV_DIR_SINGLETAP] = "singletap",
+ [IIO_EV_DIR_DOUBLETAP] = "doubletap",
};
static const char * const iio_modifier_names[] = {
@@ -119,6 +124,13 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_PM2P5] = "pm2p5",
[IIO_MOD_PM4] = "pm4",
[IIO_MOD_PM10] = "pm10",
+ [IIO_MOD_O2] = "o2",
+ [IIO_MOD_LINEAR_X] = "linear_x",
+ [IIO_MOD_LINEAR_Y] = "linear_y",
+ [IIO_MOD_LINEAR_Z] = "linear_z",
+ [IIO_MOD_PITCH] = "pitch",
+ [IIO_MOD_YAW] = "yaw",
+ [IIO_MOD_ROLL] = "roll",
};
static bool event_is_known(struct iio_event_data *event)
@@ -211,6 +223,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_PM2P5:
case IIO_MOD_PM4:
case IIO_MOD_PM10:
+ case IIO_MOD_O2:
break;
default:
return false;
@@ -223,6 +236,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_EV_TYPE_THRESH_ADAPTIVE:
case IIO_EV_TYPE_MAG_ADAPTIVE:
case IIO_EV_TYPE_CHANGE:
+ case IIO_EV_TYPE_GESTURE:
break;
default:
return false;
@@ -232,6 +246,8 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_EV_DIR_EITHER:
case IIO_EV_DIR_RISING:
case IIO_EV_DIR_FALLING:
+ case IIO_EV_DIR_SINGLETAP:
+ case IIO_EV_DIR_DOUBLETAP:
case IIO_EV_DIR_NONE:
break;
default:
@@ -276,24 +292,72 @@ static void print_event(struct iio_event_data *event)
printf(", direction: %s", iio_ev_dir_text[dir]);
printf("\n");
+ fflush(stdout);
+}
+
+/* Enable or disable events in sysfs if the knob is available */
+static void enable_events(char *dev_dir, int enable)
+{
+ const struct dirent *ent;
+ char evdir[256];
+ int ret;
+ DIR *dp;
+
+ snprintf(evdir, sizeof(evdir), FORMAT_EVENTS_DIR, dev_dir);
+ evdir[sizeof(evdir)-1] = '\0';
+
+ dp = opendir(evdir);
+ if (!dp) {
+ fprintf(stderr, "Enabling/disabling events: can't open %s\n",
+ evdir);
+ return;
+ }
+
+ while (ent = readdir(dp), ent) {
+ if (iioutils_check_suffix(ent->d_name, "_en")) {
+ printf("%sabling: %s\n",
+ enable ? "En" : "Dis",
+ ent->d_name);
+ ret = write_sysfs_int(ent->d_name, evdir,
+ enable);
+ if (ret < 0)
+ fprintf(stderr, "Failed to enable/disable %s\n",
+ ent->d_name);
+ }
+ }
+
+ if (closedir(dp) == -1) {
+ perror("Enabling/disabling channels: "
+ "Failed to close directory");
+ return;
+ }
}
int main(int argc, char **argv)
{
struct iio_event_data event;
const char *device_name;
+ char *dev_dir_name = NULL;
char *chrdev_name;
int ret;
int dev_num;
int fd, event_fd;
-
- if (argc <= 1) {
- fprintf(stderr, "Usage: %s <device_name>\n", argv[0]);
+ bool all_events = false;
+
+ if (argc == 2) {
+ device_name = argv[1];
+ } else if (argc == 3) {
+ device_name = argv[2];
+ if (!strcmp(argv[1], "-a"))
+ all_events = true;
+ } else {
+ fprintf(stderr,
+ "Usage: iio_event_monitor [options] <device_name>\n"
+ "Listen and display events from IIO devices\n"
+ " -a Auto-activate all available events\n");
return -1;
}
- device_name = argv[1];
-
dev_num = find_type_by_name(device_name, "iio:device");
if (dev_num >= 0) {
printf("Found IIO device with name %s with device number %d\n",
@@ -301,6 +365,10 @@ int main(int argc, char **argv)
ret = asprintf(&chrdev_name, "/dev/iio:device%d", dev_num);
if (ret < 0)
return -ENOMEM;
+ /* Look up sysfs dir as well if we can */
+ ret = asprintf(&dev_dir_name, "%siio:device%d", iio_dir, dev_num);
+ if (ret < 0)
+ return -ENOMEM;
} else {
/*
* If we can't find an IIO device by name assume device_name is
@@ -311,6 +379,9 @@ int main(int argc, char **argv)
return -ENOMEM;
}
+ if (all_events && dev_dir_name)
+ enable_events(dev_dir_name, 1);
+
fd = open(chrdev_name, 0);
if (fd == -1) {
ret = -errno;
@@ -363,6 +434,10 @@ int main(int argc, char **argv)
perror("Failed to close event file");
error_free_chrdev_name:
+ /* Disable events after use */
+ if (all_events && dev_dir_name)
+ enable_events(dev_dir_name, 0);
+
free(chrdev_name);
return ret;
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 34d63bcebcd2..f8deae4e26a1 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -30,6 +30,8 @@
#include <inttypes.h>
#include <stdbool.h>
#include <signal.h>
+#include <sys/ioctl.h>
+#include <linux/iio/buffer.h>
#include "iio_utils.h"
/**
@@ -49,7 +51,7 @@ enum autochan {
* Has the side effect of filling the channels[i].location values used
* in processing the buffer output.
**/
-int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
+static int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
{
int bytes = 0;
int i = 0;
@@ -68,7 +70,7 @@ int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
return bytes;
}
-void print1byte(uint8_t input, struct iio_channel_info *info)
+static void print1byte(uint8_t input, struct iio_channel_info *info)
{
/*
* Shift before conversion to avoid sign extension
@@ -85,7 +87,7 @@ void print1byte(uint8_t input, struct iio_channel_info *info)
}
}
-void print2byte(uint16_t input, struct iio_channel_info *info)
+static void print2byte(uint16_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
@@ -108,7 +110,7 @@ void print2byte(uint16_t input, struct iio_channel_info *info)
}
}
-void print4byte(uint32_t input, struct iio_channel_info *info)
+static void print4byte(uint32_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
@@ -131,7 +133,7 @@ void print4byte(uint32_t input, struct iio_channel_info *info)
}
}
-void print8byte(uint64_t input, struct iio_channel_info *info)
+static void print8byte(uint64_t input, struct iio_channel_info *info)
{
/* First swap if incorrect endian */
if (info->be)
@@ -167,9 +169,8 @@ void print8byte(uint64_t input, struct iio_channel_info *info)
* to fill the location offsets.
* @num_channels: number of channels
**/
-void process_scan(char *data,
- struct iio_channel_info *channels,
- int num_channels)
+static void process_scan(char *data, struct iio_channel_info *channels,
+ int num_channels)
{
int k;
@@ -198,7 +199,7 @@ void process_scan(char *data,
printf("\n");
}
-static int enable_disable_all_channels(char *dev_dir_name, int enable)
+static int enable_disable_all_channels(char *dev_dir_name, int buffer_idx, int enable)
{
const struct dirent *ent;
char scanelemdir[256];
@@ -206,7 +207,7 @@ static int enable_disable_all_channels(char *dev_dir_name, int enable)
int ret;
snprintf(scanelemdir, sizeof(scanelemdir),
- FORMAT_SCAN_ELEMENTS_DIR, dev_dir_name);
+ FORMAT_SCAN_ELEMENTS_DIR, dev_dir_name, buffer_idx);
scanelemdir[sizeof(scanelemdir)-1] = '\0';
dp = opendir(scanelemdir);
@@ -238,12 +239,13 @@ static int enable_disable_all_channels(char *dev_dir_name, int enable)
return 0;
}
-void print_usage(void)
+static void print_usage(void)
{
fprintf(stderr, "Usage: generic_buffer [options]...\n"
"Capture, convert and output data from IIO device buffer\n"
" -a Auto-activate all available channels\n"
" -A Force-activate ALL channels\n"
+ " -b <n> The buffer which to open (by index), default 0\n"
" -c <n> Do n conversions, or loop forever if n < 0\n"
" -e Disable wait for event (new data)\n"
" -g Use trigger-less mode\n"
@@ -257,12 +259,13 @@ void print_usage(void)
" -w <n> Set delay between reads in us (event-less mode)\n");
}
-enum autochan autochannels = AUTOCHANNELS_DISABLED;
-char *dev_dir_name = NULL;
-char *buf_dir_name = NULL;
-bool current_trigger_set = false;
+static enum autochan autochannels = AUTOCHANNELS_DISABLED;
+static char *dev_dir_name = NULL;
+static char *buf_dir_name = NULL;
+static int buffer_idx = 0;
+static bool current_trigger_set = false;
-void cleanup(void)
+static void cleanup(void)
{
int ret;
@@ -287,21 +290,21 @@ void cleanup(void)
/* Disable channels if auto-enabled */
if (dev_dir_name && autochannels == AUTOCHANNELS_ACTIVE) {
- ret = enable_disable_all_channels(dev_dir_name, 0);
+ ret = enable_disable_all_channels(dev_dir_name, buffer_idx, 0);
if (ret)
fprintf(stderr, "Failed to disable all channels\n");
autochannels = AUTOCHANNELS_DISABLED;
}
}
-void sig_handler(int signum)
+static void sig_handler(int signum)
{
fprintf(stderr, "Caught signal %d\n", signum);
cleanup();
exit(-signum);
}
-void register_cleanup(void)
+static void register_cleanup(void)
{
struct sigaction sa = { .sa_handler = sig_handler };
const int signums[] = { SIGINT, SIGTERM, SIGABRT };
@@ -334,7 +337,9 @@ int main(int argc, char **argv)
unsigned long long j;
unsigned long toread;
int ret, c;
- int fp = -1;
+ struct stat st;
+ int fd = -1;
+ int buf_fd = -1;
int num_channels = 0;
char *trigger_name = NULL, *device_name = NULL;
@@ -353,7 +358,7 @@ int main(int argc, char **argv)
register_cleanup();
- while ((c = getopt_long(argc, argv, "aAc:egl:n:N:t:T:w:?", longopts,
+ while ((c = getopt_long(argc, argv, "aAb:c:egl:n:N:t:T:w:?", longopts,
NULL)) != -1) {
switch (c) {
case 'a':
@@ -362,7 +367,20 @@ int main(int argc, char **argv)
case 'A':
autochannels = AUTOCHANNELS_ENABLED;
force_autochannels = true;
- break;
+ break;
+ case 'b':
+ errno = 0;
+ buffer_idx = strtoll(optarg, &dummy, 10);
+ if (errno) {
+ ret = -errno;
+ goto error;
+ }
+ if (buffer_idx < 0) {
+ ret = -ERANGE;
+ goto error;
+ }
+
+ break;
case 'c':
errno = 0;
num_loops = strtoll(optarg, &dummy, 10);
@@ -519,7 +537,7 @@ int main(int argc, char **argv)
* Parse the files in scan_elements to identify what channels are
* present
*/
- ret = build_channel_array(dev_dir_name, &channels, &num_channels);
+ ret = build_channel_array(dev_dir_name, buffer_idx, &channels, &num_channels);
if (ret) {
fprintf(stderr, "Problem reading scan element information\n"
"diag %s\n", dev_dir_name);
@@ -536,7 +554,7 @@ int main(int argc, char **argv)
(autochannels == AUTOCHANNELS_ENABLED && force_autochannels)) {
fprintf(stderr, "Enabling all channels\n");
- ret = enable_disable_all_channels(dev_dir_name, 1);
+ ret = enable_disable_all_channels(dev_dir_name, buffer_idx, 1);
if (ret) {
fprintf(stderr, "Failed to enable all channels\n");
goto error;
@@ -545,7 +563,7 @@ int main(int argc, char **argv)
/* This flags that we need to disable the channels again */
autochannels = AUTOCHANNELS_ACTIVE;
- ret = build_channel_array(dev_dir_name, &channels,
+ ret = build_channel_array(dev_dir_name, buffer_idx, &channels,
&num_channels);
if (ret) {
fprintf(stderr, "Problem reading scan element "
@@ -566,7 +584,7 @@ int main(int argc, char **argv)
fprintf(stderr, "Enable channels manually in "
FORMAT_SCAN_ELEMENTS_DIR
"/*_en or pass -a to autoenable channels and "
- "try again.\n", dev_dir_name);
+ "try again.\n", dev_dir_name, buffer_idx);
ret = -ENOENT;
goto error;
}
@@ -577,12 +595,25 @@ int main(int argc, char **argv)
* be built rather than found.
*/
ret = asprintf(&buf_dir_name,
- "%siio:device%d/buffer", iio_dir, dev_num);
+ "%siio:device%d/buffer%d", iio_dir, dev_num, buffer_idx);
if (ret < 0) {
ret = -ENOMEM;
goto error;
}
+ if (stat(buf_dir_name, &st)) {
+ fprintf(stderr, "Could not stat() '%s', got error %d: %s\n",
+ buf_dir_name, errno, strerror(errno));
+ ret = -errno;
+ goto error;
+ }
+
+ if (!S_ISDIR(st.st_mode)) {
+ fprintf(stderr, "File '%s' is not a directory\n", buf_dir_name);
+ ret = -EFAULT;
+ goto error;
+ }
+
if (!notrigger) {
printf("%s %s\n", dev_dir_name, trigger_name);
/*
@@ -599,6 +630,35 @@ int main(int argc, char **argv)
}
}
+ ret = asprintf(&buffer_access, "/dev/iio:device%d", dev_num);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* Attempt to open non blocking the access dev */
+ fd = open(buffer_access, O_RDONLY | O_NONBLOCK);
+ if (fd == -1) { /* TODO: If it isn't there make the node */
+ ret = -errno;
+ fprintf(stderr, "Failed to open %s\n", buffer_access);
+ goto error;
+ }
+
+ /* specify for which buffer index we want an FD */
+ buf_fd = buffer_idx;
+
+ ret = ioctl(fd, IIO_BUFFER_GET_FD_IOCTL, &buf_fd);
+ if (ret == -1 || buf_fd == -1) {
+ ret = -errno;
+ if (ret == -ENODEV || ret == -EINVAL)
+ fprintf(stderr,
+ "Device does not have this many buffers\n");
+ else
+ fprintf(stderr, "Failed to retrieve buffer fd\n");
+
+ goto error;
+ }
+
/* Setup ring buffer parameters */
ret = write_sysfs_int("length", buf_dir_name, buf_len);
if (ret < 0)
@@ -608,7 +668,8 @@ int main(int argc, char **argv)
ret = write_sysfs_int("enable", buf_dir_name, 1);
if (ret < 0) {
fprintf(stderr,
- "Failed to enable buffer: %s\n", strerror(-ret));
+ "Failed to enable buffer '%s': %s\n",
+ buf_dir_name, strerror(-ret));
goto error;
}
@@ -619,24 +680,30 @@ int main(int argc, char **argv)
goto error;
}
- ret = asprintf(&buffer_access, "/dev/iio:device%d", dev_num);
- if (ret < 0) {
- ret = -ENOMEM;
- goto error;
+ /**
+ * This check is being done here for sanity reasons, however it
+ * should be omitted under normal operation.
+ * If this is buffer0, we check that we get EBUSY after this point.
+ */
+ if (buffer_idx == 0) {
+ errno = 0;
+ read_size = read(fd, data, 1);
+ if (read_size > -1 || errno != EBUSY) {
+ ret = -EFAULT;
+ perror("Reading from '%s' should not be possible after ioctl()");
+ goto error;
+ }
}
- /* Attempt to open non blocking the access dev */
- fp = open(buffer_access, O_RDONLY | O_NONBLOCK);
- if (fp == -1) { /* TODO: If it isn't there make the node */
- ret = -errno;
- fprintf(stderr, "Failed to open %s\n", buffer_access);
- goto error;
- }
+ /* close now the main chardev FD and let the buffer FD work */
+ if (close(fd) == -1)
+ perror("Failed to close character device file");
+ fd = -1;
for (j = 0; j < num_loops || num_loops < 0; j++) {
if (!noevents) {
struct pollfd pfd = {
- .fd = fp,
+ .fd = buf_fd,
.events = POLLIN,
};
@@ -648,13 +715,13 @@ int main(int argc, char **argv)
continue;
}
- toread = buf_len;
} else {
usleep(timedelay);
- toread = 64;
}
- read_size = read(fp, data, toread * scan_size);
+ toread = buf_len;
+
+ read_size = read(buf_fd, data, toread * scan_size);
if (read_size < 0) {
if (errno == EAGAIN) {
fprintf(stderr, "nothing available\n");
@@ -671,7 +738,9 @@ int main(int argc, char **argv)
error:
cleanup();
- if (fp >= 0 && close(fp) == -1)
+ if (fd >= 0 && close(fd) == -1)
+ perror("Failed to close character device");
+ if (buf_fd >= 0 && close(buf_fd) == -1)
perror("Failed to close buffer");
free(buffer_access);
free(data);
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index 7399eb7f1378..6a00a6eecaef 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -77,15 +77,17 @@ int iioutils_break_up_name(const char *full_name, char **generic_name)
* @mask: output a bit mask for the raw data
* @be: output if data in big endian
* @device_dir: the IIO device directory
+ * @buffer_idx: the IIO buffer index
* @name: the channel name
* @generic_name: the channel type name
*
* Returns a value >= 0 on success, otherwise a negative error code.
**/
-int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
- unsigned *shift, uint64_t *mask, unsigned *be,
- const char *device_dir, const char *name,
- const char *generic_name)
+static int iioutils_get_type(unsigned int *is_signed, unsigned int *bytes,
+ unsigned int *bits_used, unsigned int *shift,
+ uint64_t *mask, unsigned int *be,
+ const char *device_dir, int buffer_idx,
+ const char *name, const char *generic_name)
{
FILE *sysfsfp;
int ret;
@@ -95,7 +97,7 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
unsigned padint;
const struct dirent *ent;
- ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir);
+ ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir, buffer_idx);
if (ret < 0)
return -ENOMEM;
@@ -262,6 +264,7 @@ int iioutils_get_param_float(float *output, const char *param_name,
if (fscanf(sysfsfp, "%f", output) != 1)
ret = errno ? -errno : -ENODATA;
+ fclose(sysfsfp);
break;
}
error_free_filename:
@@ -303,12 +306,13 @@ void bsort_channel_array_by_index(struct iio_channel_info *ci_array, int cnt)
/**
* build_channel_array() - function to figure out what channels are present
* @device_dir: the IIO device directory in sysfs
+ * @buffer_idx: the IIO buffer for this channel array
* @ci_array: output the resulting array of iio_channel_info
* @counter: output the amount of array elements
*
* Returns 0 on success, otherwise a negative error code.
**/
-int build_channel_array(const char *device_dir,
+int build_channel_array(const char *device_dir, int buffer_idx,
struct iio_channel_info **ci_array, int *counter)
{
DIR *dp;
@@ -321,7 +325,7 @@ int build_channel_array(const char *device_dir,
char *filename;
*counter = 0;
- ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir);
+ ret = asprintf(&scan_el_dir, FORMAT_SCAN_ELEMENTS_DIR, device_dir, buffer_idx);
if (ret < 0)
return -ENOMEM;
@@ -342,9 +346,9 @@ int build_channel_array(const char *device_dir,
}
sysfsfp = fopen(filename, "r");
+ free(filename);
if (!sysfsfp) {
ret = -errno;
- free(filename);
goto error_close_dir;
}
@@ -354,7 +358,6 @@ int build_channel_array(const char *device_dir,
if (fclose(sysfsfp))
perror("build_channel_array(): Failed to close file");
- free(filename);
goto error_close_dir;
}
if (ret == 1)
@@ -362,11 +365,9 @@ int build_channel_array(const char *device_dir,
if (fclose(sysfsfp)) {
ret = -errno;
- free(filename);
goto error_close_dir;
}
- free(filename);
}
*ci_array = malloc(sizeof(**ci_array) * (*counter));
@@ -392,9 +393,9 @@ int build_channel_array(const char *device_dir,
}
sysfsfp = fopen(filename, "r");
+ free(filename);
if (!sysfsfp) {
ret = -errno;
- free(filename);
count--;
goto error_cleanup_array;
}
@@ -402,20 +403,17 @@ int build_channel_array(const char *device_dir,
errno = 0;
if (fscanf(sysfsfp, "%i", &current_enabled) != 1) {
ret = errno ? -errno : -ENODATA;
- free(filename);
count--;
goto error_cleanup_array;
}
if (fclose(sysfsfp)) {
ret = -errno;
- free(filename);
count--;
goto error_cleanup_array;
}
if (!current_enabled) {
- free(filename);
count--;
continue;
}
@@ -426,7 +424,6 @@ int build_channel_array(const char *device_dir,
strlen(ent->d_name) -
strlen("_en"));
if (!current->name) {
- free(filename);
ret = -ENOMEM;
count--;
goto error_cleanup_array;
@@ -436,7 +433,6 @@ int build_channel_array(const char *device_dir,
ret = iioutils_break_up_name(current->name,
&current->generic_name);
if (ret) {
- free(filename);
free(current->name);
count--;
goto error_cleanup_array;
@@ -447,17 +443,16 @@ int build_channel_array(const char *device_dir,
scan_el_dir,
current->name);
if (ret < 0) {
- free(filename);
ret = -ENOMEM;
goto error_cleanup_array;
}
sysfsfp = fopen(filename, "r");
+ free(filename);
if (!sysfsfp) {
ret = -errno;
- fprintf(stderr, "failed to open %s\n",
- filename);
- free(filename);
+ fprintf(stderr, "failed to open %s/%s_index\n",
+ scan_el_dir, current->name);
goto error_cleanup_array;
}
@@ -467,17 +462,14 @@ int build_channel_array(const char *device_dir,
if (fclose(sysfsfp))
perror("build_channel_array(): Failed to close file");
- free(filename);
goto error_cleanup_array;
}
if (fclose(sysfsfp)) {
ret = -errno;
- free(filename);
goto error_cleanup_array;
}
- free(filename);
/* Find the scale */
ret = iioutils_get_param_float(&current->scale,
"scale",
@@ -502,6 +494,7 @@ int build_channel_array(const char *device_dir,
&current->mask,
&current->be,
device_dir,
+ buffer_idx,
current->name,
current->generic_name);
if (ret < 0)
@@ -543,6 +536,10 @@ static int calc_digits(int num)
{
int count = 0;
+ /* It takes a digit to represent zero */
+ if (!num)
+ return 1;
+
while (num != 0) {
num /= 10;
count++;
diff --git a/tools/iio/iio_utils.h b/tools/iio/iio_utils.h
index 74bde4fde2c8..663c94a6c705 100644
--- a/tools/iio/iio_utils.h
+++ b/tools/iio/iio_utils.h
@@ -12,7 +12,8 @@
/* Made up value to limit allocation sizes */
#define IIO_MAX_NAME_LENGTH 64
-#define FORMAT_SCAN_ELEMENTS_DIR "%s/scan_elements"
+#define FORMAT_SCAN_ELEMENTS_DIR "%s/buffer%d"
+#define FORMAT_EVENTS_DIR "%s/events"
#define FORMAT_TYPE_FILE "%s_type"
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
@@ -57,15 +58,11 @@ static inline int iioutils_check_suffix(const char *str, const char *suffix)
}
int iioutils_break_up_name(const char *full_name, char **generic_name);
-int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
- unsigned *shift, uint64_t *mask, unsigned *be,
- const char *device_dir, const char *name,
- const char *generic_name);
int iioutils_get_param_float(float *output, const char *param_name,
const char *device_dir, const char *name,
const char *generic_name);
void bsort_channel_array_by_index(struct iio_channel_info *ci_array, int cnt);
-int build_channel_array(const char *device_dir,
+int build_channel_array(const char *device_dir, int buffer_idx,
struct iio_channel_info **ci_array, int *counter);
int find_type_by_name(const char *name, const char *type);
int write_sysfs_int(const char *filename, const char *basedir, int val);
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h
index 4c1966f7c77a..9b3c528bab92 100644
--- a/tools/include/asm-generic/atomic-gcc.h
+++ b/tools/include/asm-generic/atomic-gcc.h
@@ -4,6 +4,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/bitops.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -69,4 +70,26 @@ static inline int atomic_cmpxchg(atomic_t *v, int oldval, int newval)
return cmpxchg(&(v)->counter, oldval, newval);
}
+static inline int test_and_set_bit(long nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ long old;
+
+ addr += BIT_WORD(nr);
+
+ old = __sync_fetch_and_or(addr, mask);
+ return !!(old & mask);
+}
+
+static inline int test_and_clear_bit(long nr, unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ long old;
+
+ addr += BIT_WORD(nr);
+
+ old = __sync_fetch_and_and(addr, ~mask);
+ return !!(old & mask);
+}
+
#endif /* __TOOLS_ASM_GENERIC_ATOMIC_H */
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
index 5d2ab38965cc..9ab313e93555 100644
--- a/tools/include/asm-generic/bitops.h
+++ b/tools/include/asm-generic/bitops.h
@@ -18,7 +18,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#ifndef _TOOLS_LINUX_BITOPS_H_
#error only <linux/bitops.h> can be included directly
diff --git a/tools/include/asm-generic/bitops/atomic.h b/tools/include/asm-generic/bitops/atomic.h
index 2f6ea28764a7..ab37a221b41a 100644
--- a/tools/include/asm-generic/bitops/atomic.h
+++ b/tools/include/asm-generic/bitops/atomic.h
@@ -5,14 +5,11 @@
#include <asm/types.h>
#include <asm/bitsperlong.h>
-static inline void set_bit(int nr, unsigned long *addr)
-{
- addr[nr / __BITS_PER_LONG] |= 1UL << (nr % __BITS_PER_LONG);
-}
-
-static inline void clear_bit(int nr, unsigned long *addr)
-{
- addr[nr / __BITS_PER_LONG] &= ~(1UL << (nr % __BITS_PER_LONG));
-}
+/*
+ * Just alias the test versions, all of the compiler built-in atomics "fetch",
+ * and optimizing compile-time constants on x86 isn't worth the complexity.
+ */
+#define set_bit test_and_set_bit
+#define clear_bit test_and_clear_bit
#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */
diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h
deleted file mode 100644
index 16ed1982cb34..000000000000
--- a/tools/include/asm-generic/bitops/find.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_
-#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_
-
-#ifndef find_next_bit
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
- size, unsigned long offset);
-#endif
-
-#ifndef find_next_and_bit
-/**
- * find_next_and_bit - find the next set bit in both memory regions
- * @addr1: The first address to base the search on
- * @addr2: The second address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number for the next set bit
- * If no bits are set, returns @size.
- */
-extern unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
- unsigned long offset);
-#endif
-
-#ifndef find_next_zero_bit
-
-/**
- * find_next_zero_bit - find the next cleared bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The bitmap size in bits
- *
- * Returns the bit number of the next zero bit
- * If no bits are zero, returns @size.
- */
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset);
-#endif
-
-#ifndef find_first_bit
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first set bit.
- * If no bits are set, returns @size.
- */
-extern unsigned long find_first_bit(const unsigned long *addr,
- unsigned long size);
-
-#endif /* find_first_bit */
-
-#ifndef find_first_zero_bit
-
-/**
- * find_first_zero_bit - find the first cleared bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum number of bits to search
- *
- * Returns the bit number of the first cleared bit.
- * If no bits are zero, returns @size.
- */
-unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size);
-#endif
-
-#endif /*_TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/tools/include/asm-generic/bitops/non-atomic.h b/tools/include/asm-generic/bitops/non-atomic.h
index 7e10c4b50c5d..0c472a833408 100644
--- a/tools/include/asm-generic/bitops/non-atomic.h
+++ b/tools/include/asm-generic/bitops/non-atomic.h
@@ -2,10 +2,10 @@
#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
-#include <asm/types.h>
+#include <linux/bits.h>
/**
- * __set_bit - Set a bit in memory
+ * ___set_bit - Set a bit in memory
* @nr: the bit to set
* @addr: the address to start counting from
*
@@ -13,7 +13,8 @@
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __set_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+___set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -21,7 +22,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
*p |= mask;
}
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+___clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -30,7 +32,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
}
/**
- * __change_bit - Toggle a bit in memory
+ * ___change_bit - Toggle a bit in memory
* @nr: the bit to change
* @addr: the address to start counting from
*
@@ -38,7 +40,8 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __change_bit(int nr, volatile unsigned long *addr)
+static __always_inline void
+___change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -47,7 +50,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
}
/**
- * __test_and_set_bit - Set a bit and return its old value
+ * ___test_and_set_bit - Set a bit and return its old value
* @nr: Bit to set
* @addr: Address to count from
*
@@ -55,7 +58,8 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -66,7 +70,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
}
/**
- * __test_and_clear_bit - Clear a bit and return its old value
+ * ___test_and_clear_bit - Clear a bit and return its old value
* @nr: Bit to clear
* @addr: Address to count from
*
@@ -74,7 +78,8 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static __always_inline bool
+___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -85,8 +90,8 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
}
/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr,
- volatile unsigned long *addr)
+static __always_inline bool
+___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
@@ -97,11 +102,12 @@ static inline int __test_and_change_bit(int nr,
}
/**
- * test_bit - Determine whether a bit is set
+ * _test_bit - Determine whether a bit is set
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static inline int test_bit(int nr, const volatile unsigned long *addr)
+static __always_inline bool
+_test_bit(unsigned long nr, const volatile unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
diff --git a/tools/include/asm-generic/bitsperlong.h b/tools/include/asm-generic/bitsperlong.h
index 8f2283052333..2093d56ddd11 100644
--- a/tools/include/asm-generic/bitsperlong.h
+++ b/tools/include/asm-generic/bitsperlong.h
@@ -18,4 +18,7 @@
#define BITS_PER_LONG_LONG 64
#endif
+#define small_const_nbits(nbits) \
+ (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG && (nbits) > 0)
+
#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/tools/include/asm-generic/hugetlb_encode.h b/tools/include/asm-generic/hugetlb_encode.h
index e4732d3c2998..de687009bfe5 100644
--- a/tools/include/asm-generic/hugetlb_encode.h
+++ b/tools/include/asm-generic/hugetlb_encode.h
@@ -20,15 +20,18 @@
#define HUGETLB_FLAG_ENCODE_SHIFT 26
#define HUGETLB_FLAG_ENCODE_MASK 0x3f
-#define HUGETLB_FLAG_ENCODE_64KB (16 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_512KB (19 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1MB (20 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2MB (21 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_8MB (23 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16MB (24 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_256MB (28 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_1GB (30 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_2GB (31 << HUGETLB_FLAG_ENCODE_SHIFT)
-#define HUGETLB_FLAG_ENCODE_16GB (34 << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16KB (14U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_64KB (16U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512KB (19U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1MB (20U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2MB (21U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_8MB (23U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16MB (24U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_32MB (25U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_256MB (28U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_512MB (29U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_1GB (30U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_2GB (31U << HUGETLB_FLAG_ENCODE_SHIFT)
+#define HUGETLB_FLAG_ENCODE_16GB (34U << HUGETLB_FLAG_ENCODE_SHIFT)
#endif /* _ASM_GENERIC_HUGETLB_ENCODE_H_ */
diff --git a/tools/include/asm-generic/unaligned.h b/tools/include/asm-generic/unaligned.h
new file mode 100644
index 000000000000..47387c607035
--- /dev/null
+++ b/tools/include/asm-generic/unaligned.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copied from the kernel sources to tools/perf/:
+ */
+
+#ifndef __TOOLS_LINUX_ASM_GENERIC_UNALIGNED_H
+#define __TOOLS_LINUX_ASM_GENERIC_UNALIGNED_H
+
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
+
+#endif /* __TOOLS_LINUX_ASM_GENERIC_UNALIGNED_H */
+
diff --git a/tools/include/asm/alternative-asm.h b/tools/include/asm/alternative.h
index b54bd860dff6..b54bd860dff6 100644
--- a/tools/include/asm/alternative-asm.h
+++ b/tools/include/asm/alternative.h
diff --git a/tools/include/asm/barrier.h b/tools/include/asm/barrier.h
index dc696c151e1c..8d378c57cb01 100644
--- a/tools/include/asm/barrier.h
+++ b/tools/include/asm/barrier.h
@@ -24,8 +24,6 @@
#include "../../arch/ia64/include/asm/barrier.h"
#elif defined(__xtensa__)
#include "../../arch/xtensa/include/asm/barrier.h"
-#elif defined(__nds32__)
-#include "../../arch/nds32/include/asm/barrier.h"
#else
#include <asm-generic/barrier.h>
#endif
diff --git a/tools/include/linux/arm-smccc.h b/tools/include/linux/arm-smccc.h
new file mode 100644
index 000000000000..63ce9bebccd3
--- /dev/null
+++ b/tools/include/linux/arm-smccc.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015, Linaro Limited
+ */
+#ifndef __LINUX_ARM_SMCCC_H
+#define __LINUX_ARM_SMCCC_H
+
+#include <linux/const.h>
+
+/*
+ * This file provides common defines for ARM SMC Calling Convention as
+ * specified in
+ * https://developer.arm.com/docs/den0028/latest
+ *
+ * This code is up-to-date with version DEN 0028 C
+ */
+
+#define ARM_SMCCC_STD_CALL _AC(0,U)
+#define ARM_SMCCC_FAST_CALL _AC(1,U)
+#define ARM_SMCCC_TYPE_SHIFT 31
+
+#define ARM_SMCCC_SMC_32 0
+#define ARM_SMCCC_SMC_64 1
+#define ARM_SMCCC_CALL_CONV_SHIFT 30
+
+#define ARM_SMCCC_OWNER_MASK 0x3F
+#define ARM_SMCCC_OWNER_SHIFT 24
+
+#define ARM_SMCCC_FUNC_MASK 0xFFFF
+
+#define ARM_SMCCC_IS_FAST_CALL(smc_val) \
+ ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT))
+#define ARM_SMCCC_IS_64(smc_val) \
+ ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT))
+#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK)
+#define ARM_SMCCC_OWNER_NUM(smc_val) \
+ (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK)
+
+#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \
+ (((type) << ARM_SMCCC_TYPE_SHIFT) | \
+ ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \
+ (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \
+ ((func_num) & ARM_SMCCC_FUNC_MASK))
+
+#define ARM_SMCCC_OWNER_ARCH 0
+#define ARM_SMCCC_OWNER_CPU 1
+#define ARM_SMCCC_OWNER_SIP 2
+#define ARM_SMCCC_OWNER_OEM 3
+#define ARM_SMCCC_OWNER_STANDARD 4
+#define ARM_SMCCC_OWNER_STANDARD_HYP 5
+#define ARM_SMCCC_OWNER_VENDOR_HYP 6
+#define ARM_SMCCC_OWNER_TRUSTED_APP 48
+#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
+#define ARM_SMCCC_OWNER_TRUSTED_OS 50
+#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+
+#define ARM_SMCCC_FUNC_QUERY_CALL_UID 0xff01
+
+#define ARM_SMCCC_QUIRK_NONE 0
+#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */
+
+#define ARM_SMCCC_VERSION_1_0 0x10000
+#define ARM_SMCCC_VERSION_1_1 0x10001
+#define ARM_SMCCC_VERSION_1_2 0x10002
+#define ARM_SMCCC_VERSION_1_3 0x10003
+
+#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+
+#define ARM_SMCCC_VERSION_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0)
+
+#define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 1)
+
+#define ARM_SMCCC_ARCH_SOC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 2)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_1 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x8000)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_2 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x7fff)
+
+#define ARM_SMCCC_ARCH_WORKAROUND_3 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ 0, 0x3fff)
+
+#define ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_FUNC_QUERY_CALL_UID)
+
+/* KVM UID value: 28b46fb6-2ec5-11e9-a9ca-4b564d003a74 */
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_0 0xb66fb428U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_1 0xe911c52eU
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_2 0x564bcaa9U
+#define ARM_SMCCC_VENDOR_HYP_UID_KVM_REG_3 0x743a004dU
+
+/* KVM "vendor specific" services */
+#define ARM_SMCCC_KVM_FUNC_FEATURES 0
+#define ARM_SMCCC_KVM_FUNC_PTP 1
+#define ARM_SMCCC_KVM_FUNC_FEATURES_2 127
+#define ARM_SMCCC_KVM_NUM_FUNCS 128
+
+#define ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_FEATURES)
+
+#define SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED 1
+
+/*
+ * ptp_kvm is a feature used for time sync between vm and host.
+ * ptp_kvm module in guest kernel will get service from host using
+ * this hypercall ID.
+ */
+#define ARM_SMCCC_VENDOR_HYP_KVM_PTP_FUNC_ID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_VENDOR_HYP, \
+ ARM_SMCCC_KVM_FUNC_PTP)
+
+/* ptp_kvm counter type ID */
+#define KVM_PTP_VIRT_COUNTER 0
+#define KVM_PTP_PHYS_COUNTER 1
+
+/* Paravirtualised time calls (defined by ARM DEN0057A) */
+#define ARM_SMCCC_HV_PV_TIME_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x20)
+
+#define ARM_SMCCC_HV_PV_TIME_ST \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x21)
+
+/* TRNG entropy source calls (defined by ARM DEN0098) */
+#define ARM_SMCCC_TRNG_VERSION \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x50)
+
+#define ARM_SMCCC_TRNG_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x51)
+
+#define ARM_SMCCC_TRNG_GET_UUID \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x52)
+
+#define ARM_SMCCC_TRNG_RND32 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+#define ARM_SMCCC_TRNG_RND64 \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD, \
+ 0x53)
+
+/*
+ * Return codes defined in ARM DEN 0070A
+ * ARM DEN 0070A is now merged/consolidated into ARM DEN 0028 C
+ */
+#define SMCCC_RET_SUCCESS 0
+#define SMCCC_RET_NOT_SUPPORTED -1
+#define SMCCC_RET_NOT_REQUIRED -2
+#define SMCCC_RET_INVALID_PARAMETER -3
+
+#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/tools/include/linux/atomic.h b/tools/include/linux/atomic.h
index 00a6c4ca562b..01907b33537e 100644
--- a/tools/include/linux/atomic.h
+++ b/tools/include/linux/atomic.h
@@ -4,6 +4,8 @@
#include <asm/atomic.h>
+void atomic_long_set(atomic_long_t *v, long i);
+
/* atomic_cmpxchg_relaxed */
#ifndef atomic_cmpxchg_relaxed
#define atomic_cmpxchg_relaxed atomic_cmpxchg
diff --git a/tools/include/linux/bitfield.h b/tools/include/linux/bitfield.h
new file mode 100644
index 000000000000..6093fa6db260
--- /dev/null
+++ b/tools/include/linux/bitfield.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ */
+
+#ifndef _LINUX_BITFIELD_H
+#define _LINUX_BITFIELD_H
+
+#include <linux/build_bug.h>
+#include <asm/byteorder.h>
+
+/*
+ * Bitfield access macros
+ *
+ * FIELD_{GET,PREP} macros take as first parameter shifted mask
+ * from which they extract the base mask and shift amount.
+ * Mask must be a compilation time constant.
+ *
+ * Example:
+ *
+ * #define REG_FIELD_A GENMASK(6, 0)
+ * #define REG_FIELD_B BIT(7)
+ * #define REG_FIELD_C GENMASK(15, 8)
+ * #define REG_FIELD_D GENMASK(31, 16)
+ *
+ * Get:
+ * a = FIELD_GET(REG_FIELD_A, reg);
+ * b = FIELD_GET(REG_FIELD_B, reg);
+ *
+ * Set:
+ * reg = FIELD_PREP(REG_FIELD_A, 1) |
+ * FIELD_PREP(REG_FIELD_B, 0) |
+ * FIELD_PREP(REG_FIELD_C, c) |
+ * FIELD_PREP(REG_FIELD_D, 0x40);
+ *
+ * Modify:
+ * reg &= ~REG_FIELD_C;
+ * reg |= FIELD_PREP(REG_FIELD_C, c);
+ */
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define __scalar_type_to_unsigned_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (unsigned type)0
+
+#define __unsigned_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (unsigned char)0, \
+ __scalar_type_to_unsigned_cases(char), \
+ __scalar_type_to_unsigned_cases(short), \
+ __scalar_type_to_unsigned_cases(int), \
+ __scalar_type_to_unsigned_cases(long), \
+ __scalar_type_to_unsigned_cases(long long), \
+ default: (x)))
+
+#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
+
+#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+ ({ \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
+ _pfx "mask is not constant"); \
+ BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
+ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
+ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ _pfx "value too large for the field"); \
+ BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
+ __bf_cast_unsigned(_reg, ~0ull), \
+ _pfx "type of reg too small for mask"); \
+ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
+ (1ULL << __bf_shf(_mask))); \
+ })
+
+/**
+ * FIELD_MAX() - produce the maximum value representable by a field
+ * @_mask: shifted mask defining the field's length and position
+ *
+ * FIELD_MAX() returns the maximum value that can be held in the field
+ * specified by @_mask.
+ */
+#define FIELD_MAX(_mask) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_MAX: "); \
+ (typeof(_mask))((_mask) >> __bf_shf(_mask)); \
+ })
+
+/**
+ * FIELD_FIT() - check if value fits in the field
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to test against the field
+ *
+ * Return: true if @_val can fit inside @_mask, false if @_val is too big.
+ */
+#define FIELD_FIT(_mask, _val) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, 0ULL, "FIELD_FIT: "); \
+ !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \
+ })
+
+/**
+ * FIELD_PREP() - prepare a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ */
+#define FIELD_PREP(_mask, _val) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
+ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ })
+
+/**
+ * FIELD_GET() - extract a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg: value of entire bitfield
+ *
+ * FIELD_GET() extracts the field specified by @_mask from the
+ * bitfield passed in as @_reg by masking and shifting it down.
+ */
+#define FIELD_GET(_mask, _reg) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
+ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ })
+
+extern void __compiletime_error("value doesn't fit into mask")
+__field_overflow(void);
+extern void __compiletime_error("bad bitfield mask")
+__bad_mask(void);
+static __always_inline u64 field_multiplier(u64 field)
+{
+ if ((field | (field - 1)) & ((field | (field - 1)) + 1))
+ __bad_mask();
+ return field & -field;
+}
+static __always_inline u64 field_mask(u64 field)
+{
+ return field / field_multiplier(field);
+}
+#define field_max(field) ((typeof(field))field_mask(field))
+#define ____MAKE_OP(type,base,to,from) \
+static __always_inline __##type type##_encode_bits(base v, base field) \
+{ \
+ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
+ __field_overflow(); \
+ return to((v & field_mask(field)) * field_multiplier(field)); \
+} \
+static __always_inline __##type type##_replace_bits(__##type old, \
+ base val, base field) \
+{ \
+ return (old & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline void type##p_replace_bits(__##type *p, \
+ base val, base field) \
+{ \
+ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \
+} \
+static __always_inline base type##_get_bits(__##type v, base field) \
+{ \
+ return (from(v) & field)/field_multiplier(field); \
+}
+#define __MAKE_OP(size) \
+ ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
+ ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
+ ____MAKE_OP(u##size,u##size,,)
+____MAKE_OP(u8,u8,,)
+__MAKE_OP(16)
+__MAKE_OP(32)
+__MAKE_OP(64)
+#undef __MAKE_OP
+#undef ____MAKE_OP
+
+#endif
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 477a1cae513f..f3566ea0f932 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -1,36 +1,31 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _PERF_BITOPS_H
-#define _PERF_BITOPS_H
+#ifndef _TOOLS_LINUX_BITMAP_H
+#define _TOOLS_LINUX_BITMAP_H
#include <string.h>
#include <linux/bitops.h>
+#include <linux/find.h>
#include <stdlib.h>
#include <linux/kernel.h>
#define DECLARE_BITMAP(name,bits) \
unsigned long name[BITS_TO_LONGS(bits)]
-int __bitmap_weight(const unsigned long *bitmap, int bits);
+unsigned int __bitmap_weight(const unsigned long *bitmap, int bits);
void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
-int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits);
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits);
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits);
void bitmap_clear(unsigned long *map, unsigned int start, int len);
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
+#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
-#define BITMAP_LAST_WORD_MASK(nbits) \
-( \
- ((nbits) % BITS_PER_LONG) ? \
- (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL \
-)
-
-#define small_const_nbits(nbits) \
- (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG)
-
-static inline void bitmap_zero(unsigned long *dst, int nbits)
+static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = 0UL;
@@ -50,7 +45,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
}
-static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
+static inline bool bitmap_empty(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -58,7 +53,7 @@ static inline int bitmap_empty(const unsigned long *src, unsigned nbits)
return find_first_bit(src, nbits) == nbits;
}
-static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
+static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits));
@@ -66,7 +61,7 @@ static inline int bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static inline int bitmap_weight(const unsigned long *src, int nbits)
+static inline unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
@@ -74,7 +69,7 @@ static inline int bitmap_weight(const unsigned long *src, int nbits)
}
static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
- const unsigned long *src2, int nbits)
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
*dst = *src1 | *src2;
@@ -83,44 +78,10 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1,
}
/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- */
-static inline int test_and_set_bit(int nr, unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
-
- old = *p;
- *p = old | mask;
-
- return (old & mask) != 0;
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- */
-static inline int test_and_clear_bit(int nr, unsigned long *addr)
-{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
-
- old = *p;
- *p = old & ~mask;
-
- return (old & mask) != 0;
-}
-
-/**
- * bitmap_alloc - Allocate bitmap
+ * bitmap_zalloc - Allocate bitmap
* @nbits: Number of bits
*/
-static inline unsigned long *bitmap_alloc(int nbits)
+static inline unsigned long *bitmap_zalloc(int nbits)
{
return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
}
@@ -141,7 +102,7 @@ static inline void bitmap_free(unsigned long *bitmap)
* @buf: buffer to store output
* @size: size of @buf
*/
-size_t bitmap_scnprintf(unsigned long *bitmap, int nbits,
+size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits,
char *buf, size_t size);
/**
@@ -151,7 +112,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, int nbits,
* @src2: operand 2
* @nbits: size of bitmap
*/
-static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
+static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
@@ -167,8 +128,8 @@ static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
#define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1)
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
-static inline int bitmap_equal(const unsigned long *src1,
- const unsigned long *src2, unsigned int nbits)
+static inline bool bitmap_equal(const unsigned long *src1,
+ const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
@@ -178,4 +139,14 @@ static inline int bitmap_equal(const unsigned long *src1,
return __bitmap_equal(src1, src2, nbits);
}
-#endif /* _PERF_BITOPS_H */
+static inline bool bitmap_intersects(const unsigned long *src1,
+ const unsigned long *src2,
+ unsigned int nbits)
+{
+ if (small_const_nbits(nbits))
+ return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0;
+ else
+ return __bitmap_intersects(src1, src2, nbits);
+}
+
+#endif /* _TOOLS_LINUX_BITMAP_H */
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 5fca38fe1ba8..f18683b95ea6 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -26,6 +26,22 @@ extern unsigned int __sw_hweight32(unsigned int w);
extern unsigned long __sw_hweight64(__u64 w);
/*
+ * Defined here because those may be needed by architecture-specific static
+ * inlines.
+ */
+
+#define bitop(op, nr, addr) \
+ op(nr, addr)
+
+#define __set_bit(nr, addr) bitop(___set_bit, nr, addr)
+#define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr)
+#define __change_bit(nr, addr) bitop(___change_bit, nr, addr)
+#define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr)
+#define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr)
+#define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr)
+#define test_bit(nr, addr) bitop(_test_bit, nr, addr)
+
+/*
* Include this here because some architectures need generic_ffs/fls in
* scope
*
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
index 669d69441a62..7c0cf5031abe 100644
--- a/tools/include/linux/bits.h
+++ b/tools/include/linux/bits.h
@@ -3,10 +3,9 @@
#define __LINUX_BITS_H
#include <linux/const.h>
+#include <vdso/bits.h>
#include <asm/bitsperlong.h>
-#define BIT(nr) (UL(1) << (nr))
-#define BIT_ULL(nr) (ULL(1) << (nr))
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
#define BIT_ULL_MASK(nr) (ULL(1) << ((nr) % BITS_PER_LONG_LONG))
@@ -18,12 +17,29 @@
* position @h. For example
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
*/
-#define GENMASK(h, l) \
+#if !defined(__ASSEMBLY__)
+#include <linux/build_bug.h>
+#define GENMASK_INPUT_CHECK(h, l) \
+ (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
+ __is_constexpr((l) > (h)), (l) > (h), 0)))
+#else
+/*
+ * BUILD_BUG_ON_ZERO is not available in h files included from asm files,
+ * disable the input check if that is the case.
+ */
+#define GENMASK_INPUT_CHECK(h, l) 0
+#endif
+
+#define __GENMASK(h, l) \
(((~UL(0)) - (UL(1) << (l)) + 1) & \
(~UL(0) >> (BITS_PER_LONG - 1 - (h))))
+#define GENMASK(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-#define GENMASK_ULL(h, l) \
+#define __GENMASK_ULL(h, l) \
(((~ULL(0)) - (ULL(1) << (l)) + 1) & \
(~ULL(0) >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define GENMASK_ULL(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
#endif /* __LINUX_BITS_H */
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
new file mode 100644
index 000000000000..71e54b1e3796
--- /dev/null
+++ b/tools/include/linux/btf_ids.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_BTF_IDS_H
+#define _LINUX_BTF_IDS_H
+
+struct btf_id_set {
+ u32 cnt;
+ u32 ids[];
+};
+
+#ifdef CONFIG_DEBUG_INFO_BTF
+
+#include <linux/compiler.h> /* for __PASTE */
+
+/*
+ * Following macros help to define lists of BTF IDs placed
+ * in .BTF_ids section. They are initially filled with zeros
+ * (during compilation) and resolved later during the
+ * linking phase by resolve_btfids tool.
+ *
+ * Any change in list layout must be reflected in resolve_btfids
+ * tool logic.
+ */
+
+#define BTF_IDS_SECTION ".BTF_ids"
+
+#define ____BTF_ID(symbol) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".local " #symbol " ; \n" \
+".type " #symbol ", STT_OBJECT; \n" \
+".size " #symbol ", 4; \n" \
+#symbol ": \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define __BTF_ID(symbol) \
+ ____BTF_ID(symbol)
+
+#define __ID(prefix) \
+ __PASTE(prefix, __COUNTER__)
+
+/*
+ * The BTF_ID defines unique symbol for each ID pointing
+ * to 4 zero bytes.
+ */
+#define BTF_ID(prefix, name) \
+ __BTF_ID(__ID(__BTF_ID__##prefix##__##name##__))
+
+/*
+ * The BTF_ID_LIST macro defines pure (unsorted) list
+ * of BTF IDs, with following layout:
+ *
+ * BTF_ID_LIST(list1)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ *
+ * list1:
+ * __BTF_ID__type1__name1__1:
+ * .zero 4
+ * __BTF_ID__type2__name2__2:
+ * .zero 4
+ *
+ */
+#define __BTF_ID_LIST(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " " #name "; \n" \
+#name ":; \n" \
+".popsection; \n");
+
+#define BTF_ID_LIST(name) \
+__BTF_ID_LIST(name, local) \
+extern u32 name[];
+
+#define BTF_ID_LIST_GLOBAL(name, n) \
+__BTF_ID_LIST(name, globl)
+
+/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
+ * a single entry.
+ */
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST(name) \
+ BTF_ID(prefix, typename)
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST_GLOBAL(name, 1) \
+ BTF_ID(prefix, typename)
+
+/*
+ * The BTF_ID_UNUSED macro defines 4 zero bytes.
+ * It's used when we want to define 'unused' entry
+ * in BTF_ID_LIST, like:
+ *
+ * BTF_ID_LIST(bpf_skb_output_btf_ids)
+ * BTF_ID(struct, sk_buff)
+ * BTF_ID_UNUSED
+ * BTF_ID(struct, task_struct)
+ */
+
+#define BTF_ID_UNUSED \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+/*
+ * The BTF_SET_START/END macros pair defines sorted list of
+ * BTF IDs plus its members count, with following layout:
+ *
+ * BTF_SET_START(list)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ * BTF_SET_END(list)
+ *
+ * __BTF_ID__set__list:
+ * .zero 4
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * __BTF_ID__type2__name2__4:
+ * .zero 4
+ *
+ */
+#define __BTF_SET_START(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set__" #name "; \n" \
+"__BTF_ID__set__" #name ":; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define BTF_SET_START(name) \
+__BTF_ID_LIST(name, local) \
+__BTF_SET_START(name, local)
+
+#define BTF_SET_START_GLOBAL(name) \
+__BTF_ID_LIST(name, globl) \
+__BTF_SET_START(name, globl)
+
+#define BTF_SET_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set name;
+
+#else
+
+#define BTF_ID_LIST(name) static u32 __maybe_unused name[5];
+#define BTF_ID(prefix, name)
+#define BTF_ID_UNUSED
+#define BTF_ID_LIST_GLOBAL(name, n) u32 __maybe_unused name[n];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 __maybe_unused name[1];
+#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 __maybe_unused name[1];
+#define BTF_SET_START(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_START_GLOBAL(name) static struct btf_id_set __maybe_unused name = { 0 };
+#define BTF_SET_END(name)
+
+#endif /* CONFIG_DEBUG_INFO_BTF */
+
+#ifdef CONFIG_NET
+/* Define a list of socket types which can be the argument for
+ * skc_to_*_sock() helpers. All these sockets should have
+ * sock_common as the first argument in its memory layout.
+ */
+#define BTF_SOCK_TYPE_xxx \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET, inet_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_CONN, inet_connection_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_REQ, inet_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_INET_TW, inet_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_REQ, request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK, sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCK_COMMON, sock_common) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP, tcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_REQ, tcp_request_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_MPTCP, mptcp_sock) \
+ BTF_SOCK_TYPE(BTF_SOCK_TYPE_SOCKET, socket)
+
+enum {
+#define BTF_SOCK_TYPE(name, str) name,
+BTF_SOCK_TYPE_xxx
+#undef BTF_SOCK_TYPE
+MAX_BTF_SOCK_TYPE,
+};
+
+extern u32 btf_sock_ids[];
+#endif
+
+#define BTF_TRACING_TYPE_xxx \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
+ BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
+
+enum {
+#define BTF_TRACING_TYPE(name, type) name,
+BTF_TRACING_TYPE_xxx
+#undef BTF_TRACING_TYPE
+MAX_BTF_TRACING_TYPE,
+};
+
+extern u32 btf_tracing_ids[];
+
+#endif
diff --git a/tools/include/linux/build_bug.h b/tools/include/linux/build_bug.h
new file mode 100644
index 000000000000..b4898ff085de
--- /dev/null
+++ b/tools/include/linux/build_bug.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_BUILD_BUG_H
+#define _LINUX_BUILD_BUG_H
+
+#include <linux/compiler.h>
+
+#ifdef __CHECKER__
+#define BUILD_BUG_ON_ZERO(e) (0)
+#else /* __CHECKER__ */
+/*
+ * Force a compilation error if condition is true, but also produce a
+ * result (of value 0 and type int), so the expression can be used
+ * e.g. in a structure initializer (or where-ever else comma expressions
+ * aren't permitted).
+ */
+#define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
+#endif /* __CHECKER__ */
+
+/* Force a compilation error if a constant expression is not a power of 2 */
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
+#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
+
+/*
+ * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
+ * expression but avoids the generation of any code, even if that expression
+ * has side-effects.
+ */
+#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
+
+/**
+ * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied
+ * error message.
+ * @condition: the condition which the compiler should know is false.
+ *
+ * See BUILD_BUG_ON for description.
+ */
+#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg)
+
+/**
+ * BUILD_BUG_ON - break compile if a condition is true.
+ * @condition: the condition which the compiler should know is false.
+ *
+ * If you have some code which relies on certain constants being equal, or
+ * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to
+ * detect if someone changes it.
+ */
+#define BUILD_BUG_ON(condition) \
+ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition)
+
+/**
+ * BUILD_BUG - break compile if used.
+ *
+ * If you have some code that you expect the compiler to eliminate at
+ * build time, you should use BUILD_BUG to detect if it is
+ * unexpectedly used.
+ */
+#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed")
+
+/**
+ * static_assert - check integer constant expression at build time
+ *
+ * static_assert() is a wrapper for the C11 _Static_assert, with a
+ * little macro magic to make the message optional (defaulting to the
+ * stringification of the tested expression).
+ *
+ * Contrary to BUILD_BUG_ON(), static_assert() can be used at global
+ * scope, but requires the expression to be an integer constant
+ * expression (i.e., it is not enough that __builtin_constant_p() is
+ * true for expr).
+ *
+ * Also note that BUILD_BUG_ON() fails the build if the condition is
+ * true, while static_assert() fails the build if the expression is
+ * false.
+ */
+#ifndef static_assert
+#define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
+#define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+#endif // static_assert
+
+
+/*
+ * Compile time check that field has an expected offset
+ */
+#define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \
+ BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \
+ "Offset of " #field " in " #type " has changed.")
+
+
+#endif /* _LINUX_BUILD_BUG_H */
diff --git a/tools/include/linux/cache.h b/tools/include/linux/cache.h
new file mode 100644
index 000000000000..9e9d585f0b9d
--- /dev/null
+++ b/tools/include/linux/cache.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_LINUX_CACHE_H
+#define _TOOLS_LINUX_CACHE_H
+
+#define L1_CACHE_SHIFT 5
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+
+#endif
diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h
index 95c072b70d0e..62e7c901ac28 100644
--- a/tools/include/linux/compiler-gcc.h
+++ b/tools/include/linux/compiler-gcc.h
@@ -12,13 +12,15 @@
+ __GNUC_PATCHLEVEL__)
#endif
-#if GCC_VERSION >= 70000 && !defined(__CHECKER__)
-# define __fallthrough __attribute__ ((fallthrough))
+#if __has_attribute(__fallthrough__)
+# define fallthrough __attribute__((__fallthrough__))
+#else
+# define fallthrough do {} while (0) /* fallthrough */
#endif
-#if GCC_VERSION >= 40300
+#if __has_attribute(__error__)
# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* GCC_VERSION >= 40300 */
+#endif
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
@@ -38,7 +40,3 @@
#endif
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
-
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
-#endif
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index 1827c2f973f9..9d36c8ce1fe7 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -2,14 +2,38 @@
#ifndef _TOOLS_LINUX_COMPILER_H_
#define _TOOLS_LINUX_COMPILER_H_
-#ifdef __GNUC__
-#include <linux/compiler-gcc.h>
-#endif
+#include <linux/compiler_types.h>
#ifndef __compiletime_error
# define __compiletime_error(message)
#endif
+#ifdef __OPTIMIZE__
+# define __compiletime_assert(condition, msg, prefix, suffix) \
+ do { \
+ extern void prefix ## suffix(void) __compiletime_error(msg); \
+ if (!(condition)) \
+ prefix ## suffix(); \
+ } while (0)
+#else
+# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
+#endif
+
+#define _compiletime_assert(condition, msg, prefix, suffix) \
+ __compiletime_assert(condition, msg, prefix, suffix)
+
+/**
+ * compiletime_assert - break build and emit msg if condition is false
+ * @condition: a compile-time constant condition to check
+ * @msg: a message to emit if condition is false
+ *
+ * In tradition of POSIX assert, this macro will break the build if the
+ * supplied condition is *false*, emitting the supplied error message if the
+ * compiler has support to do so.
+ */
+#define compiletime_assert(condition, msg) \
+ _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
+
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
@@ -78,12 +102,6 @@
# define __init
#endif
-#ifndef noinline
-# define noinline
-#endif
-
-#define uninitialized_var(x) x = *(&(x))
-
#include <linux/types.h>
/*
@@ -168,8 +186,8 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
})
-#ifndef __fallthrough
-# define __fallthrough
-#endif
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a, b) a##b
+#define __PASTE(a, b) ___PASTE(a, b)
#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/compiler_types.h b/tools/include/linux/compiler_types.h
new file mode 100644
index 000000000000..1bdd834bdd57
--- /dev/null
+++ b/tools/include/linux/compiler_types.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COMPILER_TYPES_H
+#define __LINUX_COMPILER_TYPES_H
+
+/* Builtins */
+
+/*
+ * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
+ * In the meantime, to support gcc < 10, we implement __has_builtin
+ * by hand.
+ */
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
+#ifdef __CHECKER__
+/* context/locking */
+# define __must_hold(x) __attribute__((context(x,1,1)))
+# define __acquires(x) __attribute__((context(x,0,1)))
+# define __releases(x) __attribute__((context(x,1,0)))
+# define __acquire(x) __context__(x,1)
+# define __release(x) __context__(x,-1)
+# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+#else /* __CHECKER__ */
+/* context/locking */
+# define __must_hold(x)
+# define __acquires(x)
+# define __releases(x)
+# define __acquire(x) (void)0
+# define __release(x) (void)0
+# define __cond_lock(x,c) (c)
+#endif /* __CHECKER__ */
+
+/* Compiler specific macros. */
+#ifdef __GNUC__
+#include <linux/compiler-gcc.h>
+#endif
+
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
+#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/tools/include/linux/const.h b/tools/include/linux/const.h
index 7b55a55f5911..435ddd72d2c4 100644
--- a/tools/include/linux/const.h
+++ b/tools/include/linux/const.h
@@ -1,9 +1,14 @@
#ifndef _LINUX_CONST_H
#define _LINUX_CONST_H
-#include <uapi/linux/const.h>
+#include <vdso/const.h>
-#define UL(x) (_UL(x))
-#define ULL(x) (_ULL(x))
+/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
#endif /* _LINUX_CONST_H */
diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h
index b0e35eec6499..cef3b1c25335 100644
--- a/tools/include/linux/coresight-pmu.h
+++ b/tools/include/linux/coresight-pmu.h
@@ -7,30 +7,67 @@
#ifndef _LINUX_CORESIGHT_PMU_H
#define _LINUX_CORESIGHT_PMU_H
+#include <linux/bits.h>
+
#define CORESIGHT_ETM_PMU_NAME "cs_etm"
-#define CORESIGHT_ETM_PMU_SEED 0x10
-/* ETMv3.5/PTM's ETMCR config bit */
-#define ETM_OPT_CYCACC 12
-#define ETM_OPT_CTXTID 14
-#define ETM_OPT_TS 28
-#define ETM_OPT_RETSTK 29
+/*
+ * The legacy Trace ID system based on fixed calculation from the cpu
+ * number. This has been replaced by drivers using a dynamic allocation
+ * system - but need to retain the legacy algorithm for backward comparibility
+ * in certain situations:-
+ * a) new perf running on older systems that generate the legacy mapping
+ * b) older tools that may not update at the same time as the kernel.
+ */
+#define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2))
+
+/* CoreSight trace ID is currently the bottom 7 bits of the value */
+#define CORESIGHT_TRACE_ID_VAL_MASK GENMASK(6, 0)
+
+/*
+ * perf record will set the legacy meta data values as unused initially.
+ * This allows perf report to manage the decoders created when dynamic
+ * allocation in operation.
+ */
+#define CORESIGHT_TRACE_ID_UNUSED_FLAG BIT(31)
+
+/* Value to set for unused trace ID values */
+#define CORESIGHT_TRACE_ID_UNUSED_VAL 0x7F
+
+/*
+ * Below are the definition of bit offsets for perf option, and works as
+ * arbitrary values for all ETM versions.
+ *
+ * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
+ * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
+ * directly use below macros as config bits.
+ */
+#define ETM_OPT_BRANCH_BROADCAST 8
+#define ETM_OPT_CYCACC 12
+#define ETM_OPT_CTXTID 14
+#define ETM_OPT_CTXTID2 15
+#define ETM_OPT_TS 28
+#define ETM_OPT_RETSTK 29
/* ETMv4 CONFIGR programming bits for the ETM OPTs */
+#define ETM4_CFG_BIT_BB 3
#define ETM4_CFG_BIT_CYCACC 4
#define ETM4_CFG_BIT_CTXTID 6
+#define ETM4_CFG_BIT_VMID 7
#define ETM4_CFG_BIT_TS 11
#define ETM4_CFG_BIT_RETSTK 12
+#define ETM4_CFG_BIT_VMID_OPT 15
+
+/*
+ * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
+ * Used to associate a CPU with the CoreSight Trace ID.
+ * [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
+ * [59:08] - Unused (SBZ)
+ * [63:60] - Version
+ */
+#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
+#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
-static inline int coresight_get_trace_id(int cpu)
-{
- /*
- * A trace ID of value 0 is invalid, so let's start at some
- * random value that fits in 7 bits and go from there. Since
- * the common convention is to have data trace IDs be I(N) + 1,
- * set instruction trace IDs as a function of the CPU number.
- */
- return (CORESIGHT_ETM_PMU_SEED + (cpu * 2));
-}
+#define CS_AUX_HW_ID_CURR_VERSION 0
#endif
diff --git a/tools/include/linux/ctype.h b/tools/include/linux/ctype.h
index 310090b4c474..29ed3fe94404 100644
--- a/tools/include/linux/ctype.h
+++ b/tools/include/linux/ctype.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_CTYPE_H
#define _LINUX_CTYPE_H
+#include <linux/compiler.h>
+
/*
* NOTE! This ctype does not handle EOF like the standard C
* library is required to.
@@ -23,11 +25,6 @@ extern const unsigned char _ctype[];
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
-static inline int __isdigit(int c)
-{
- return '0' <= c && c <= '9';
-}
-#define isdigit(c) __isdigit(c)
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
@@ -40,6 +37,16 @@ static inline int __isdigit(int c)
#define isascii(c) (((unsigned char)(c))<=0x7f)
#define toascii(c) (((unsigned char)(c))&0x7f)
+#if __has_builtin(__builtin_isdigit)
+#define isdigit(c) __builtin_isdigit(c)
+#else
+static inline int __isdigit(int c)
+{
+ return '0' <= c && c <= '9';
+}
+#define isdigit(c) __isdigit(c)
+#endif
+
static inline unsigned char __tolower(unsigned char c)
{
if (isupper(c))
diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h
deleted file mode 100644
index 72d595ce764a..000000000000
--- a/tools/include/linux/debug_locks.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_
-#define _LIBLOCKDEP_DEBUG_LOCKS_H_
-
-#include <stddef.h>
-#include <linux/compiler.h>
-#include <asm/bug.h>
-
-#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x)
-
-extern bool debug_locks;
-extern bool debug_locks_silent;
-
-#endif
diff --git a/tools/include/linux/debugfs.h b/tools/include/linux/debugfs.h
new file mode 100644
index 000000000000..4ba06140b1be
--- /dev/null
+++ b/tools/include/linux/debugfs.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_DEBUGFS_H
+#define _TOOLS_DEBUGFS_H
+
+#endif
diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h
index 25f2bb3a991d..332b983ead1e 100644
--- a/tools/include/linux/err.h
+++ b/tools/include/linux/err.h
@@ -20,7 +20,7 @@
* Userspace note:
* The same principle works for userspace, because 'error' pointers
* fall down to the unused hole far from user space, as described
- * in Documentation/x86/x86_64/mm.rst for x86_64 arch:
+ * in Documentation/arch/x86/x86_64/mm.rst for x86_64 arch:
*
* 0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm hole caused by [48:63] sign extension
* ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
diff --git a/tools/include/linux/export.h b/tools/include/linux/export.h
index d07e586b9ba0..acb6f4daa2f0 100644
--- a/tools/include/linux/export.h
+++ b/tools/include/linux/export.h
@@ -3,8 +3,5 @@
#define EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym)
-#define EXPORT_SYMBOL_GPL_FUTURE(sym)
-#define EXPORT_UNUSED_SYMBOL(sym)
-#define EXPORT_UNUSED_SYMBOL_GPL(sym)
#endif
diff --git a/tools/include/linux/filter.h b/tools/include/linux/filter.h
index ca28b6ab8db7..736bdeccdfe4 100644
--- a/tools/include/linux/filter.h
+++ b/tools/include/linux/filter.h
@@ -169,15 +169,31 @@
.off = OFF, \
.imm = 0 })
-/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
+/*
+ * Atomic operations:
+ *
+ * BPF_ADD *(uint *) (dst_reg + off16) += src_reg
+ * BPF_AND *(uint *) (dst_reg + off16) &= src_reg
+ * BPF_OR *(uint *) (dst_reg + off16) |= src_reg
+ * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg
+ * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg);
+ * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg);
+ * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg);
+ * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg);
+ * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg)
+ * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg)
+ */
-#define BPF_STX_XADD(SIZE, DST, SRC, OFF) \
+#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \
((struct bpf_insn) { \
- .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
+ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
- .imm = 0 })
+ .imm = OP })
+
+/* Legacy alias */
+#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF)
/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
diff --git a/tools/include/linux/find.h b/tools/include/linux/find.h
new file mode 100644
index 000000000000..38c0a542b0e2
--- /dev/null
+++ b/tools/include/linux/find.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_LINUX_FIND_H_
+#define _TOOLS_LINUX_FIND_H_
+
+#ifndef _TOOLS_LINUX_BITMAP_H
+#error tools: only <linux/bitmap.h> can be included directly
+#endif
+
+#include <linux/bitops.h>
+
+unsigned long _find_next_bit(const unsigned long *addr1, unsigned long nbits,
+ unsigned long start);
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start);
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start);
+extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
+extern unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size);
+extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & *addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_and_bit(addr1, addr2, size, offset);
+}
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @size: The bitmap size in bits
+ * @offset: The bitnumber to start searching at
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_zero_bit(addr, size, offset);
+}
+#endif
+
+#ifndef find_first_bit
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_bit(addr, size);
+}
+#endif
+
+#ifndef find_first_and_bit
+/**
+ * find_first_and_bit - find the first set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_bit(addr1, addr2, size);
+}
+#endif
+
+#ifndef find_first_zero_bit
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit(addr, size);
+}
+#endif
+
+#endif /*__LINUX_FIND_H_ */
diff --git a/tools/include/linux/gfp.h b/tools/include/linux/gfp.h
index 22030756fbc0..6a10ff5f5be9 100644
--- a/tools/include/linux/gfp.h
+++ b/tools/include/linux/gfp.h
@@ -1,4 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_INCLUDE_LINUX_GFP_H
#define _TOOLS_INCLUDE_LINUX_GFP_H
+#include <linux/types.h>
+#include <linux/gfp_types.h>
+
+static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
+{
+ return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
+}
+
#endif /* _TOOLS_INCLUDE_LINUX_GFP_H */
diff --git a/tools/include/linux/gfp_types.h b/tools/include/linux/gfp_types.h
new file mode 100644
index 000000000000..5f9f1ed190a0
--- /dev/null
+++ b/tools/include/linux/gfp_types.h
@@ -0,0 +1 @@
+#include "../../../include/linux/gfp_types.h"
diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h
deleted file mode 100644
index b25580b6a9be..000000000000
--- a/tools/include/linux/hardirq.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_
-#define _LIBLOCKDEP_LINUX_HARDIRQ_H_
-
-#define SOFTIRQ_BITS 0UL
-#define HARDIRQ_BITS 0UL
-#define SOFTIRQ_SHIFT 0UL
-#define HARDIRQ_SHIFT 0UL
-#define hardirq_count() 0UL
-#define softirq_count() 0UL
-
-#endif
diff --git a/tools/include/linux/hash.h b/tools/include/linux/hash.h
index ad6fa21d977b..38edaa08f862 100644
--- a/tools/include/linux/hash.h
+++ b/tools/include/linux/hash.h
@@ -62,10 +62,7 @@ static inline u32 __hash_32_generic(u32 val)
return val * GOLDEN_RATIO_32;
}
-#ifndef HAVE_ARCH_HASH_32
-#define hash_32 hash_32_generic
-#endif
-static inline u32 hash_32_generic(u32 val, unsigned int bits)
+static inline u32 hash_32(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
diff --git a/tools/include/linux/interval_tree_generic.h b/tools/include/linux/interval_tree_generic.h
new file mode 100644
index 000000000000..aaa8a0767aa3
--- /dev/null
+++ b/tools/include/linux/interval_tree_generic.h
@@ -0,0 +1,187 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ Interval Trees
+ (C) 2012 Michel Lespinasse <walken@google.com>
+
+
+ include/linux/interval_tree_generic.h
+*/
+
+#include <linux/rbtree_augmented.h>
+
+/*
+ * Template for implementing interval trees
+ *
+ * ITSTRUCT: struct type of the interval tree nodes
+ * ITRB: name of struct rb_node field within ITSTRUCT
+ * ITTYPE: type of the interval endpoints
+ * ITSUBTREE: name of ITTYPE field within ITSTRUCT holding last-in-subtree
+ * ITSTART(n): start endpoint of ITSTRUCT node n
+ * ITLAST(n): last endpoint of ITSTRUCT node n
+ * ITSTATIC: 'static' or empty
+ * ITPREFIX: prefix to use for the inline tree definitions
+ *
+ * Note - before using this, please consider if generic version
+ * (interval_tree.h) would work for you...
+ */
+
+#define INTERVAL_TREE_DEFINE(ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, \
+ ITSTART, ITLAST, ITSTATIC, ITPREFIX) \
+ \
+/* Callbacks for augmented rbtree insert and remove */ \
+ \
+RB_DECLARE_CALLBACKS_MAX(static, ITPREFIX ## _augment, \
+ ITSTRUCT, ITRB, ITTYPE, ITSUBTREE, ITLAST) \
+ \
+/* Insert / remove interval nodes from the tree */ \
+ \
+ITSTATIC void ITPREFIX ## _insert(ITSTRUCT *node, \
+ struct rb_root_cached *root) \
+{ \
+ struct rb_node **link = &root->rb_root.rb_node, *rb_parent = NULL; \
+ ITTYPE start = ITSTART(node), last = ITLAST(node); \
+ ITSTRUCT *parent; \
+ bool leftmost = true; \
+ \
+ while (*link) { \
+ rb_parent = *link; \
+ parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
+ if (parent->ITSUBTREE < last) \
+ parent->ITSUBTREE = last; \
+ if (start < ITSTART(parent)) \
+ link = &parent->ITRB.rb_left; \
+ else { \
+ link = &parent->ITRB.rb_right; \
+ leftmost = false; \
+ } \
+ } \
+ \
+ node->ITSUBTREE = last; \
+ rb_link_node(&node->ITRB, rb_parent, link); \
+ rb_insert_augmented_cached(&node->ITRB, root, \
+ leftmost, &ITPREFIX ## _augment); \
+} \
+ \
+ITSTATIC void ITPREFIX ## _remove(ITSTRUCT *node, \
+ struct rb_root_cached *root) \
+{ \
+ rb_erase_augmented_cached(&node->ITRB, root, &ITPREFIX ## _augment); \
+} \
+ \
+/* \
+ * Iterate over intervals intersecting [start;last] \
+ * \
+ * Note that a node's interval intersects [start;last] iff: \
+ * Cond1: ITSTART(node) <= last \
+ * and \
+ * Cond2: start <= ITLAST(node) \
+ */ \
+ \
+static ITSTRUCT * \
+ITPREFIX ## _subtree_search(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
+{ \
+ while (true) { \
+ /* \
+ * Loop invariant: start <= node->ITSUBTREE \
+ * (Cond2 is satisfied by one of the subtree nodes) \
+ */ \
+ if (node->ITRB.rb_left) { \
+ ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
+ ITSTRUCT, ITRB); \
+ if (start <= left->ITSUBTREE) { \
+ /* \
+ * Some nodes in left subtree satisfy Cond2. \
+ * Iterate to find the leftmost such node N. \
+ * If it also satisfies Cond1, that's the \
+ * match we are looking for. Otherwise, there \
+ * is no matching interval as nodes to the \
+ * right of N can't satisfy Cond1 either. \
+ */ \
+ node = left; \
+ continue; \
+ } \
+ } \
+ if (ITSTART(node) <= last) { /* Cond1 */ \
+ if (start <= ITLAST(node)) /* Cond2 */ \
+ return node; /* node is leftmost match */ \
+ if (node->ITRB.rb_right) { \
+ node = rb_entry(node->ITRB.rb_right, \
+ ITSTRUCT, ITRB); \
+ if (start <= node->ITSUBTREE) \
+ continue; \
+ } \
+ } \
+ return NULL; /* No match */ \
+ } \
+} \
+ \
+ITSTATIC ITSTRUCT * \
+ITPREFIX ## _iter_first(struct rb_root_cached *root, \
+ ITTYPE start, ITTYPE last) \
+{ \
+ ITSTRUCT *node, *leftmost; \
+ \
+ if (!root->rb_root.rb_node) \
+ return NULL; \
+ \
+ /* \
+ * Fastpath range intersection/overlap between A: [a0, a1] and \
+ * B: [b0, b1] is given by: \
+ * \
+ * a0 <= b1 && b0 <= a1 \
+ * \
+ * ... where A holds the lock range and B holds the smallest \
+ * 'start' and largest 'last' in the tree. For the later, we \
+ * rely on the root node, which by augmented interval tree \
+ * property, holds the largest value in its last-in-subtree. \
+ * This allows mitigating some of the tree walk overhead for \
+ * for non-intersecting ranges, maintained and consulted in O(1). \
+ */ \
+ node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \
+ if (node->ITSUBTREE < start) \
+ return NULL; \
+ \
+ leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \
+ if (ITSTART(leftmost) > last) \
+ return NULL; \
+ \
+ return ITPREFIX ## _subtree_search(node, start, last); \
+} \
+ \
+ITSTATIC ITSTRUCT * \
+ITPREFIX ## _iter_next(ITSTRUCT *node, ITTYPE start, ITTYPE last) \
+{ \
+ struct rb_node *rb = node->ITRB.rb_right, *prev; \
+ \
+ while (true) { \
+ /* \
+ * Loop invariants: \
+ * Cond1: ITSTART(node) <= last \
+ * rb == node->ITRB.rb_right \
+ * \
+ * First, search right subtree if suitable \
+ */ \
+ if (rb) { \
+ ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
+ if (start <= right->ITSUBTREE) \
+ return ITPREFIX ## _subtree_search(right, \
+ start, last); \
+ } \
+ \
+ /* Move up the tree until we come from a node's left child */ \
+ do { \
+ rb = rb_parent(&node->ITRB); \
+ if (!rb) \
+ return NULL; \
+ prev = &node->ITRB; \
+ node = rb_entry(rb, ITSTRUCT, ITRB); \
+ rb = node->ITRB.rb_right; \
+ } while (prev == rb); \
+ \
+ /* Check if the node intersects [start;last] */ \
+ if (last < ITSTART(node)) /* !Cond1 */ \
+ return NULL; \
+ else if (start <= ITLAST(node)) /* Cond2 */ \
+ return node; \
+ } \
+}
diff --git a/tools/include/linux/io.h b/tools/include/linux/io.h
new file mode 100644
index 000000000000..e129871fe661
--- /dev/null
+++ b/tools/include/linux/io.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_IO_H
+#define _TOOLS_IO_H
+
+#endif
diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h
deleted file mode 100644
index 67e01bbadbfe..000000000000
--- a/tools/include/linux/irqflags.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_
-
-# define lockdep_hardirq_context(p) 0
-# define lockdep_softirq_context(p) 0
-# define lockdep_hardirqs_enabled(p) 0
-# define lockdep_softirqs_enabled(p) 0
-# define lockdep_hardirq_enter() do { } while (0)
-# define lockdep_hardirq_exit() do { } while (0)
-# define lockdep_softirq_enter() do { } while (0)
-# define lockdep_softirq_exit() do { } while (0)
-# define INIT_TRACE_IRQFLAGS
-
-# define stop_critical_timings() do { } while (0)
-# define start_critical_timings() do { } while (0)
-
-#define raw_local_irq_disable() do { } while (0)
-#define raw_local_irq_enable() do { } while (0)
-#define raw_local_irq_save(flags) ((flags) = 0)
-#define raw_local_irq_restore(flags) ((void)(flags))
-#define raw_local_save_flags(flags) ((flags) = 0)
-#define raw_irqs_disabled_flags(flags) ((void)(flags))
-#define raw_irqs_disabled() 0
-#define raw_safe_halt()
-
-#define local_irq_enable() do { } while (0)
-#define local_irq_disable() do { } while (0)
-#define local_irq_save(flags) ((flags) = 0)
-#define local_irq_restore(flags) ((void)(flags))
-#define local_save_flags(flags) ((flags) = 0)
-#define irqs_disabled() (1)
-#define irqs_disabled_flags(flags) ((void)(flags), 0)
-#define safe_halt() do { } while (0)
-
-#define trace_lock_release(x, y)
-#define trace_lock_acquire(a, b, c, d, e, f, g)
-
-#endif
diff --git a/tools/include/linux/jhash.h b/tools/include/linux/jhash.h
index 348c6f47e4cc..af8d0fe1c6ce 100644
--- a/tools/include/linux/jhash.h
+++ b/tools/include/linux/jhash.h
@@ -5,7 +5,7 @@
*
* Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
*
- * http://burtleburtle.net/bob/hash/
+ * https://burtleburtle.net/bob/hash/
*
* These are the credits from Bob's sources:
*
diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h
index 89ca6fe257cc..5a37ccbec54f 100644
--- a/tools/include/linux/kallsyms.h
+++ b/tools/include/linux/kallsyms.h
@@ -6,7 +6,7 @@
#include <stdio.h>
#include <unistd.h>
-#define KSYM_NAME_LEN 128
+#define KSYM_NAME_LEN 512
struct module;
@@ -20,7 +20,7 @@ static inline const char *kallsyms_lookup(unsigned long addr,
#include <execinfo.h>
#include <stdlib.h>
-static inline void print_ip_sym(unsigned long ip)
+static inline void print_ip_sym(const char *loglvl, unsigned long ip)
{
char **name;
diff --git a/tools/include/linux/kconfig.h b/tools/include/linux/kconfig.h
new file mode 100644
index 000000000000..13b86bd3b746
--- /dev/null
+++ b/tools/include/linux/kconfig.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_LINUX_KCONFIG_H
+#define _TOOLS_LINUX_KCONFIG_H
+
+/* CONFIG_CC_VERSION_TEXT (Do not delete this comment. See help in Kconfig) */
+
+#define __ARG_PLACEHOLDER_1 0,
+#define __take_second_arg(__ignored, val, ...) val
+
+/*
+ * The use of "&&" / "||" is limited in certain expressions.
+ * The following enable to calculate "and" / "or" with macro expansion only.
+ */
+#define __and(x, y) ___and(x, y)
+#define ___and(x, y) ____and(__ARG_PLACEHOLDER_##x, y)
+#define ____and(arg1_or_junk, y) __take_second_arg(arg1_or_junk y, 0)
+
+#define __or(x, y) ___or(x, y)
+#define ___or(x, y) ____or(__ARG_PLACEHOLDER_##x, y)
+#define ____or(arg1_or_junk, y) __take_second_arg(arg1_or_junk 1, y)
+
+/*
+ * Helper macros to use CONFIG_ options in C/CPP expressions. Note that
+ * these only work with boolean and tristate options.
+ */
+
+/*
+ * Getting something that works in C and CPP for an arg that may or may
+ * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1"
+ * we match on the placeholder define, insert the "0," for arg1 and generate
+ * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one).
+ * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
+ * the last step cherry picks the 2nd arg, we get a zero.
+ */
+#define __is_defined(x) ___is_defined(x)
+#define ___is_defined(val) ____is_defined(__ARG_PLACEHOLDER_##val)
+#define ____is_defined(arg1_or_junk) __take_second_arg(arg1_or_junk 1, 0)
+
+/*
+ * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
+ * otherwise. For boolean options, this is equivalent to
+ * IS_ENABLED(CONFIG_FOO).
+ */
+#define IS_BUILTIN(option) __is_defined(option)
+
+/*
+ * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
+ * otherwise.
+ */
+#define IS_MODULE(option) __is_defined(option##_MODULE)
+
+/*
+ * IS_REACHABLE(CONFIG_FOO) evaluates to 1 if the currently compiled
+ * code can call a function defined in code compiled based on CONFIG_FOO.
+ * This is similar to IS_ENABLED(), but returns false when invoked from
+ * built-in code when CONFIG_FOO is set to 'm'.
+ */
+#define IS_REACHABLE(option) __or(IS_BUILTIN(option), \
+ __and(IS_MODULE(option), __is_defined(MODULE)))
+
+/*
+ * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
+ * 0 otherwise.
+ */
+#define IS_ENABLED(option) __or(IS_BUILTIN(option), IS_MODULE(option))
+
+#endif /* _TOOLS_LINUX_KCONFIG_H */
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
index cba226948a0c..4b0673bf52c2 100644
--- a/tools/include/linux/kernel.h
+++ b/tools/include/linux/kernel.h
@@ -5,7 +5,9 @@
#include <stdarg.h>
#include <stddef.h>
#include <assert.h>
+#include <linux/build_bug.h>
#include <linux/compiler.h>
+#include <linux/math.h>
#include <endian.h>
#include <byteswap.h>
@@ -13,7 +15,7 @@
#define UINT_MAX (~0U)
#endif
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define _RET_IP_ ((unsigned long)__builtin_return_address(0))
#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
@@ -35,9 +37,6 @@
(type *)((char *)__mptr - offsetof(type, member)); })
#endif
-#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
-
#ifndef max
#define max(x, y) ({ \
typeof(x) _max1 = (x); \
@@ -54,14 +53,9 @@
_min1 < _min2 ? _min1 : _min2; })
#endif
-#ifndef roundup
-#define roundup(x, y) ( \
-{ \
- const typeof(y) __y = y; \
- (((x) + (__y - 1)) / __y) * __y; \
-} \
-)
-#endif
+#define max_t(type, x, y) max((type)x, (type)y)
+#define min_t(type, x, y) min((type)x, (type)y)
+#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
#ifndef BUG_ON
#ifdef NDEBUG
@@ -104,17 +98,9 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
int scnprintf(char * buf, size_t size, const char * fmt, ...);
int scnprintf_pad(char * buf, size_t size, const char * fmt, ...);
+#ifndef ARRAY_SIZE
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
-
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
+#endif
#define current_gfp_context(k) 0
#define synchronize_rcu()
diff --git a/tools/include/linux/list.h b/tools/include/linux/list.h
index b2fc48d5478c..a4dfb6a7cc6a 100644
--- a/tools/include/linux/list.h
+++ b/tools/include/linux/list.h
@@ -385,6 +385,17 @@ static inline void list_splice_tail_init(struct list_head *list,
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
/**
+ * list_last_entry_or_null - get the last element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_head within the struct.
+ *
+ * Note that if the list is empty, it returns NULL.
+ */
+#define list_last_entry_or_null(ptr, type, member) \
+ (!list_empty(ptr) ? list_last_entry(ptr, type, member) : NULL)
+
+/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
diff --git a/tools/include/linux/list_sort.h b/tools/include/linux/list_sort.h
new file mode 100644
index 000000000000..453105f74e05
--- /dev/null
+++ b/tools/include/linux/list_sort.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_LIST_SORT_H
+#define _LINUX_LIST_SORT_H
+
+#include <linux/types.h>
+
+struct list_head;
+
+typedef int __attribute__((nonnull(2,3))) (*list_cmp_func_t)(void *,
+ const struct list_head *, const struct list_head *);
+
+__attribute__((nonnull(2,3)))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp);
+#endif
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h
deleted file mode 100644
index e56997288f2b..000000000000
--- a/tools/include/linux/lockdep.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LOCKDEP_H_
-#define _LIBLOCKDEP_LOCKDEP_H_
-
-#include <sys/prctl.h>
-#include <sys/syscall.h>
-#include <string.h>
-#include <limits.h>
-#include <linux/utsname.h>
-#include <linux/compiler.h>
-#include <linux/export.h>
-#include <linux/kern_levels.h>
-#include <linux/err.h>
-#include <linux/rcu.h>
-#include <linux/list.h>
-#include <linux/hardirq.h>
-#include <unistd.h>
-
-#define MAX_LOCK_DEPTH 63UL
-
-#define asmlinkage
-#define __visible
-
-#include "../../../include/linux/lockdep.h"
-
-struct task_struct {
- u64 curr_chain_key;
- int lockdep_depth;
- unsigned int lockdep_recursion;
- struct held_lock held_locks[MAX_LOCK_DEPTH];
- gfp_t lockdep_reclaim_gfp;
- int pid;
- int state;
- char comm[17];
-};
-
-#define TASK_RUNNING 0
-
-extern struct task_struct *__curr(void);
-
-#define current (__curr())
-
-static inline int debug_locks_off(void)
-{
- return 1;
-}
-
-#define task_pid_nr(tsk) ((tsk)->pid)
-
-#define KSYM_NAME_LEN 128
-#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__)
-#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define pr_warn pr_err
-#define pr_cont pr_err
-
-#define list_del_rcu list_del
-
-#define atomic_t unsigned long
-#define atomic_inc(x) ((*(x))++)
-
-#define print_tainted() ""
-#define static_obj(x) 1
-
-#define debug_show_all_locks()
-extern void debug_check_no_locks_held(void);
-
-static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr)
-{
- return false;
-}
-
-#endif
diff --git a/tools/include/linux/math.h b/tools/include/linux/math.h
new file mode 100644
index 000000000000..4e7af99ec9eb
--- /dev/null
+++ b/tools/include/linux/math.h
@@ -0,0 +1,25 @@
+#ifndef _TOOLS_MATH_H
+#define _TOOLS_MATH_H
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#ifndef roundup
+#define roundup(x, y) ( \
+{ \
+ const typeof(y) __y = y; \
+ (((x) + (__y - 1)) / __y) * __y; \
+} \
+)
+#endif
+
+#endif
diff --git a/tools/include/linux/math64.h b/tools/include/linux/math64.h
new file mode 100644
index 000000000000..4ad45d5943dc
--- /dev/null
+++ b/tools/include/linux/math64.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MATH64_H
+#define _LINUX_MATH64_H
+
+#include <linux/types.h>
+
+#ifdef __x86_64__
+static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
+{
+ u64 q;
+
+ asm ("mulq %2; divq %3" : "=a" (q)
+ : "a" (a), "rm" (b), "rm" (c)
+ : "rdx");
+
+ return q;
+}
+#define mul_u64_u64_div64 mul_u64_u64_div64
+#endif
+
+#ifdef __SIZEOF_INT128__
+static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
+{
+ return (u64)(((unsigned __int128)a * b) >> shift);
+}
+
+#else
+
+#ifdef __i386__
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ u32 high, low;
+
+ asm ("mull %[b]" : "=a" (low), "=d" (high)
+ : [a] "a" (a), [b] "rm" (b) );
+
+ return low | ((u64)high) << 32;
+}
+#else
+static inline u64 mul_u32_u32(u32 a, u32 b)
+{
+ return (u64)a * b;
+}
+#endif
+
+static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
+{
+ u32 ah, al;
+ u64 ret;
+
+ al = a;
+ ah = a >> 32;
+
+ ret = mul_u32_u32(al, b) >> shift;
+ if (ah)
+ ret += mul_u32_u32(ah, b) << (32 - shift);
+
+ return ret;
+}
+
+#endif /* __SIZEOF_INT128__ */
+
+#ifndef mul_u64_u64_div64
+static inline u64 mul_u64_u64_div64(u64 a, u64 b, u64 c)
+{
+ u64 quot, rem;
+
+ quot = a / c;
+ rem = a % c;
+
+ return quot * b + (rem * b) / c;
+}
+#endif
+
+#endif /* _LINUX_MATH64_H */
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h
new file mode 100644
index 000000000000..a03d9bba5151
--- /dev/null
+++ b/tools/include/linux/mm.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_LINUX_MM_H
+#define _TOOLS_LINUX_MM_H
+
+#include <linux/mmzone.h>
+#include <uapi/linux/const.h>
+
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define PHYS_ADDR_MAX (~(phys_addr_t)0)
+
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
+
+#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
+#define __va(x) ((void *)((unsigned long)(x)))
+#define __pa(x) ((unsigned long)(x))
+
+#define pfn_to_page(pfn) ((void *)((pfn) * PAGE_SIZE))
+
+#define phys_to_virt phys_to_virt
+static inline void *phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
+
+static inline void totalram_pages_inc(void)
+{
+}
+
+static inline void totalram_pages_add(long count)
+{
+}
+
+#endif
diff --git a/tools/include/linux/objtool_types.h b/tools/include/linux/objtool_types.h
new file mode 100644
index 000000000000..453a4f4ef39d
--- /dev/null
+++ b/tools/include/linux/objtool_types.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_OBJTOOL_TYPES_H
+#define _LINUX_OBJTOOL_TYPES_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack.
+ */
+struct unwind_hint {
+ u32 ip;
+ s16 sp_offset;
+ u8 sp_reg;
+ u8 type;
+ u8 signal;
+};
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * UNWIND_HINT_TYPE_UNDEFINED: A blind spot in ORC coverage which can result in
+ * a truncated and unreliable stack unwind.
+ *
+ * UNWIND_HINT_TYPE_END_OF_STACK: The end of the kernel stack unwind before
+ * hitting user entry, boot code, or fork entry (when there are no pt_regs
+ * available).
+ *
+ * UNWIND_HINT_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP
+ * (the caller's SP right before it made the call). Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * UNWIND_HINT_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset
+ * points to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * UNWIND_HINT_TYPE_REGS_PARTIAL: Used in entry code to indicate that
+ * sp_reg+sp_offset points to the iret return frame.
+ *
+ * UNWIND_HINT_TYPE_FUNC: Generate the unwind metadata of a callable function.
+ * Useful for code which doesn't have an ELF function annotation.
+ *
+ * UNWIND_HINT_TYPE_{SAVE,RESTORE}: Save the unwind metadata at a certain
+ * location so that it can be restored later.
+ */
+#define UNWIND_HINT_TYPE_UNDEFINED 0
+#define UNWIND_HINT_TYPE_END_OF_STACK 1
+#define UNWIND_HINT_TYPE_CALL 2
+#define UNWIND_HINT_TYPE_REGS 3
+#define UNWIND_HINT_TYPE_REGS_PARTIAL 4
+/* The below hint types don't have corresponding ORC types */
+#define UNWIND_HINT_TYPE_FUNC 5
+#define UNWIND_HINT_TYPE_SAVE 6
+#define UNWIND_HINT_TYPE_RESTORE 7
+
+#endif /* _LINUX_OBJTOOL_TYPES_H */
diff --git a/tools/include/linux/overflow.h b/tools/include/linux/overflow.h
index 8712ff70995f..dcb0c1bf6866 100644
--- a/tools/include/linux/overflow.h
+++ b/tools/include/linux/overflow.h
@@ -5,12 +5,9 @@
#include <linux/compiler.h>
/*
- * In the fallback code below, we need to compute the minimum and
- * maximum values representable in a given type. These macros may also
- * be useful elsewhere, so we provide them outside the
- * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
- *
- * It would seem more obvious to do something like
+ * We need to compute the minimum and maximum values representable in a given
+ * type. These macros may also be useful elsewhere. It would seem more obvious
+ * to do something like:
*
* #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
* #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
@@ -36,8 +33,6 @@
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
-
-#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
/*
* For simplicity and code hygiene, the fallback code below insists on
* a, b and *d having the same type (similar to the min() and max()
@@ -73,135 +68,6 @@
__builtin_mul_overflow(__a, __b, __d); \
})
-#else
-
-
-/* Checking for unsigned overflow is relatively easy without causing UB. */
-#define __unsigned_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a + __b; \
- *__d < __a; \
-})
-#define __unsigned_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a - __b; \
- __a < __b; \
-})
-/*
- * If one of a or b is a compile-time constant, this avoids a division.
- */
-#define __unsigned_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = __a * __b; \
- __builtin_constant_p(__b) ? \
- __b > 0 && __a > type_max(typeof(__a)) / __b : \
- __a > 0 && __b > type_max(typeof(__b)) / __a; \
-})
-
-/*
- * For signed types, detecting overflow is much harder, especially if
- * we want to avoid UB. But the interface of these macros is such that
- * we must provide a result in *d, and in fact we must produce the
- * result promised by gcc's builtins, which is simply the possibly
- * wrapped-around value. Fortunately, we can just formally do the
- * operations in the widest relevant unsigned type (u64) and then
- * truncate the result - gcc is smart enough to generate the same code
- * with and without the (u64) casts.
- */
-
-/*
- * Adding two signed integers can overflow only if they have the same
- * sign, and overflow has happened iff the result has the opposite
- * sign.
- */
-#define __signed_add_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a + (u64)__b; \
- (((~(__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
-
-/*
- * Subtraction is similar, except that overflow can now happen only
- * when the signs are opposite. In this case, overflow has happened if
- * the result has the opposite sign of a.
- */
-#define __signed_sub_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a - (u64)__b; \
- ((((__a ^ __b)) & (*__d ^ __a)) \
- & type_min(typeof(__a))) != 0; \
-})
-
-/*
- * Signed multiplication is rather hard. gcc always follows C99, so
- * division is truncated towards 0. This means that we can write the
- * overflow check like this:
- *
- * (a > 0 && (b > MAX/a || b < MIN/a)) ||
- * (a < -1 && (b > MIN/a || b < MAX/a) ||
- * (a == -1 && b == MIN)
- *
- * The redundant casts of -1 are to silence an annoying -Wtype-limits
- * (included in -Wextra) warning: When the type is u8 or u16, the
- * __b_c_e in check_mul_overflow obviously selects
- * __unsigned_mul_overflow, but unfortunately gcc still parses this
- * code and warns about the limited range of __b.
- */
-
-#define __signed_mul_overflow(a, b, d) ({ \
- typeof(a) __a = (a); \
- typeof(b) __b = (b); \
- typeof(d) __d = (d); \
- typeof(a) __tmax = type_max(typeof(a)); \
- typeof(a) __tmin = type_min(typeof(a)); \
- (void) (&__a == &__b); \
- (void) (&__a == __d); \
- *__d = (u64)__a * (u64)__b; \
- (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
- (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
- (__b == (typeof(__b))-1 && __a == __tmin); \
-})
-
-
-#define check_add_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_add_overflow(a, b, d), \
- __unsigned_add_overflow(a, b, d))
-
-#define check_sub_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_sub_overflow(a, b, d), \
- __unsigned_sub_overflow(a, b, d))
-
-#define check_mul_overflow(a, b, d) \
- __builtin_choose_expr(is_signed_type(typeof(a)), \
- __signed_mul_overflow(a, b, d), \
- __unsigned_mul_overflow(a, b, d))
-
-
-#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
-
/**
* array_size() - Calculate size of 2-dimensional array.
*
diff --git a/tools/include/linux/pfn.h b/tools/include/linux/pfn.h
new file mode 100644
index 000000000000..7512a58189eb
--- /dev/null
+++ b/tools/include/linux/pfn.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_LINUX_PFN_H_
+#define _TOOLS_LINUX_PFN_H_
+
+#include <linux/mm.h>
+
+#define PFN_UP(x) (((x) + PAGE_SIZE - 1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
+#endif
diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
index d29725769107..2e6338ac5eed 100644
--- a/tools/include/linux/poison.h
+++ b/tools/include/linux/poison.h
@@ -35,12 +35,8 @@
*/
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
-/********** mm/debug-pagealloc.c **********/
-#ifdef CONFIG_PAGE_POISONING_ZERO
-#define PAGE_POISON 0x00
-#else
+/********** mm/page_poison.c **********/
#define PAGE_POISON 0xaa
-#endif
/********** mm/page_alloc.c ************/
diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h
deleted file mode 100644
index 8b3b03b64fda..000000000000
--- a/tools/include/linux/proc_fs.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H
-#define _TOOLS_INCLUDE_LINUX_PROC_FS_H
-
-#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */
diff --git a/tools/include/linux/rbtree.h b/tools/include/linux/rbtree.h
index e03b1ea23e0e..2680f2edb837 100644
--- a/tools/include/linux/rbtree.h
+++ b/tools/include/linux/rbtree.h
@@ -11,7 +11,7 @@
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
- See Documentation/rbtree.txt for documentation and samples.
+ See Documentation/core-api/rbtree.rst for documentation and samples.
*/
#ifndef __TOOLS_LINUX_PERF_RBTREE_H
@@ -152,4 +152,194 @@ static inline void rb_replace_node_cached(struct rb_node *victim,
rb_replace_node(victim, new, &root->rb_root);
}
-#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
+/*
+ * The below helper functions use 2 operators with 3 different
+ * calling conventions. The operators are related like:
+ *
+ * comp(a->key,b) < 0 := less(a,b)
+ * comp(a->key,b) > 0 := less(b,a)
+ * comp(a->key,b) == 0 := !less(a,b) && !less(b,a)
+ *
+ * If these operators define a partial order on the elements we make no
+ * guarantee on which of the elements matching the key is found. See
+ * rb_find().
+ *
+ * The reason for this is to allow the find() interface without requiring an
+ * on-stack dummy object, which might not be feasible due to object size.
+ */
+
+/**
+ * rb_add_cached() - insert @node into the leftmost cached tree @tree
+ * @node: node to insert
+ * @tree: leftmost cached tree to insert @node into
+ * @less: operator defining the (partial) node order
+ */
+static __always_inline void
+rb_add_cached(struct rb_node *node, struct rb_root_cached *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_root.rb_node;
+ struct rb_node *parent = NULL;
+ bool leftmost = true;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent)) {
+ link = &parent->rb_left;
+ } else {
+ link = &parent->rb_right;
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color_cached(node, tree, leftmost);
+}
+
+/**
+ * rb_add() - insert @node into @tree
+ * @node: node to insert
+ * @tree: tree to insert @node into
+ * @less: operator defining the (partial) node order
+ */
+static __always_inline void
+rb_add(struct rb_node *node, struct rb_root *tree,
+ bool (*less)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*link) {
+ parent = *link;
+ if (less(node, parent))
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+}
+
+/**
+ * rb_find_add() - find equivalent @node in @tree, or add @node
+ * @node: node to look-for / insert
+ * @tree: tree to search / modify
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @node, or NULL when no match is found and @node
+ * is inserted.
+ */
+static __always_inline struct rb_node *
+rb_find_add(struct rb_node *node, struct rb_root *tree,
+ int (*cmp)(struct rb_node *, const struct rb_node *))
+{
+ struct rb_node **link = &tree->rb_node;
+ struct rb_node *parent = NULL;
+ int c;
+
+ while (*link) {
+ parent = *link;
+ c = cmp(node, parent);
+
+ if (c < 0)
+ link = &parent->rb_left;
+ else if (c > 0)
+ link = &parent->rb_right;
+ else
+ return parent;
+ }
+
+ rb_link_node(node, parent, link);
+ rb_insert_color(node, tree);
+ return NULL;
+}
+
+/**
+ * rb_find() - find @key in tree @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining the node order
+ *
+ * Returns the rb_node matching @key or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c < 0)
+ node = node->rb_left;
+ else if (c > 0)
+ node = node->rb_right;
+ else
+ return node;
+ }
+
+ return NULL;
+}
+
+/**
+ * rb_find_first() - find the first @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the leftmost node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_find_first(const void *key, const struct rb_root *tree,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ struct rb_node *node = tree->rb_node;
+ struct rb_node *match = NULL;
+
+ while (node) {
+ int c = cmp(key, node);
+
+ if (c <= 0) {
+ if (!c)
+ match = node;
+ node = node->rb_left;
+ } else if (c > 0) {
+ node = node->rb_right;
+ }
+ }
+
+ return match;
+}
+
+/**
+ * rb_next_match() - find the next @key in @tree
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ *
+ * Returns the next node matching @key, or NULL.
+ */
+static __always_inline struct rb_node *
+rb_next_match(const void *key, struct rb_node *node,
+ int (*cmp)(const void *key, const struct rb_node *))
+{
+ node = rb_next(node);
+ if (node && cmp(key, node))
+ node = NULL;
+ return node;
+}
+
+/**
+ * rb_for_each() - iterates a subtree matching @key
+ * @node: iterator
+ * @key: key to match
+ * @tree: tree to search
+ * @cmp: operator defining node order
+ */
+#define rb_for_each(node, key, tree, cmp) \
+ for ((node) = rb_find_first((key), (tree), (cmp)); \
+ (node); (node) = rb_next_match((key), (node), (cmp)))
+
+#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
diff --git a/tools/include/linux/rbtree_augmented.h b/tools/include/linux/rbtree_augmented.h
index 381aa948610d..570bb9794421 100644
--- a/tools/include/linux/rbtree_augmented.h
+++ b/tools/include/linux/rbtree_augmented.h
@@ -23,7 +23,7 @@
* rb_insert_augmented() and rb_erase_augmented() are intended to be public.
* The rest are implementation details you are not expected to depend on.
*
- * See Documentation/rbtree.txt for documentation and samples.
+ * See Documentation/core-api/rbtree.rst for documentation and samples.
*/
struct rb_augment_callbacks {
diff --git a/tools/include/linux/sched/mm.h b/tools/include/linux/sched/mm.h
index c8d9f19c1f35..967294b8edcf 100644
--- a/tools/include/linux/sched/mm.h
+++ b/tools/include/linux/sched/mm.h
@@ -1,4 +1,6 @@
#ifndef _TOOLS_PERF_LINUX_SCHED_MM_H
#define _TOOLS_PERF_LINUX_SCHED_MM_H
+#define might_alloc(gfp) do { } while (0)
+
#endif /* _TOOLS_PERF_LINUX_SCHED_MM_H */
diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
new file mode 100644
index 000000000000..311759ea25e9
--- /dev/null
+++ b/tools/include/linux/slab.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOOLS_SLAB_H
+#define _TOOLS_SLAB_H
+
+#include <linux/types.h>
+#include <linux/gfp.h>
+
+#define SLAB_PANIC 2
+#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
+
+#define kzalloc_node(size, flags, node) kmalloc(size, flags)
+
+void *kmalloc(size_t size, gfp_t gfp);
+void kfree(void *p);
+
+bool slab_is_available(void);
+
+enum slab_state {
+ DOWN,
+ PARTIAL,
+ PARTIAL_NODE,
+ UP,
+ FULL
+};
+
+static inline void *kzalloc(size_t size, gfp_t gfp)
+{
+ return kmalloc(size, gfp | __GFP_ZERO);
+}
+
+struct list_lru;
+
+void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *, int flags);
+static inline void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
+{
+ return kmem_cache_alloc_lru(cachep, NULL, flags);
+}
+void kmem_cache_free(struct kmem_cache *cachep, void *objp);
+
+struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
+ unsigned int align, unsigned int flags,
+ void (*ctor)(void *));
+
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+ void **list);
+
+#endif /* _TOOLS_SLAB_H */
diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h
index c934572d935c..622266b197d0 100644
--- a/tools/include/linux/spinlock.h
+++ b/tools/include/linux/spinlock.h
@@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex)
return true;
}
-#include <linux/lockdep.h>
-
#endif
diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h
deleted file mode 100644
index ae343ac35bfa..000000000000
--- a/tools/include/linux/stacktrace.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_
-#define _LIBLOCKDEP_LINUX_STACKTRACE_H_
-
-#include <execinfo.h>
-
-struct stack_trace {
- unsigned int nr_entries, max_entries;
- unsigned long *entries;
- int skip;
-};
-
-static inline void print_stack_trace(struct stack_trace *trace, int spaces)
-{
- backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1);
-}
-
-#define save_stack_trace(trace) \
- ((trace)->nr_entries = \
- backtrace((void **)(trace)->entries, (trace)->max_entries))
-
-static inline int dump_stack(void)
-{
- void *array[64];
- size_t size;
-
- size = backtrace(array, 64);
- backtrace_symbols_fd(array, size, 1);
-
- return 0;
-}
-
-#endif
diff --git a/tools/include/linux/static_call_types.h b/tools/include/linux/static_call_types.h
new file mode 100644
index 000000000000..5a00b8b2cf9f
--- /dev/null
+++ b/tools/include/linux/static_call_types.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _STATIC_CALL_TYPES_H
+#define _STATIC_CALL_TYPES_H
+
+#include <linux/types.h>
+#include <linux/stringify.h>
+#include <linux/compiler.h>
+
+#define STATIC_CALL_KEY_PREFIX __SCK__
+#define STATIC_CALL_KEY_PREFIX_STR __stringify(STATIC_CALL_KEY_PREFIX)
+#define STATIC_CALL_KEY_PREFIX_LEN (sizeof(STATIC_CALL_KEY_PREFIX_STR) - 1)
+#define STATIC_CALL_KEY(name) __PASTE(STATIC_CALL_KEY_PREFIX, name)
+#define STATIC_CALL_KEY_STR(name) __stringify(STATIC_CALL_KEY(name))
+
+#define STATIC_CALL_TRAMP_PREFIX __SCT__
+#define STATIC_CALL_TRAMP_PREFIX_STR __stringify(STATIC_CALL_TRAMP_PREFIX)
+#define STATIC_CALL_TRAMP_PREFIX_LEN (sizeof(STATIC_CALL_TRAMP_PREFIX_STR) - 1)
+#define STATIC_CALL_TRAMP(name) __PASTE(STATIC_CALL_TRAMP_PREFIX, name)
+#define STATIC_CALL_TRAMP_STR(name) __stringify(STATIC_CALL_TRAMP(name))
+
+/*
+ * Flags in the low bits of static_call_site::key.
+ */
+#define STATIC_CALL_SITE_TAIL 1UL /* tail call */
+#define STATIC_CALL_SITE_INIT 2UL /* init section */
+#define STATIC_CALL_SITE_FLAGS 3UL
+
+/*
+ * The static call site table needs to be created by external tooling (objtool
+ * or a compiler plugin).
+ */
+struct static_call_site {
+ s32 addr;
+ s32 key;
+};
+
+#define DECLARE_STATIC_CALL(name, func) \
+ extern struct static_call_key STATIC_CALL_KEY(name); \
+ extern typeof(func) STATIC_CALL_TRAMP(name);
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+
+#define __raw_static_call(name) (&STATIC_CALL_TRAMP(name))
+
+#ifdef CONFIG_HAVE_STATIC_CALL_INLINE
+
+/*
+ * __ADDRESSABLE() is used to ensure the key symbol doesn't get stripped from
+ * the symbol table so that objtool can reference it when it generates the
+ * .static_call_sites section.
+ */
+#define __STATIC_CALL_ADDRESSABLE(name) \
+ __ADDRESSABLE(STATIC_CALL_KEY(name))
+
+#define __static_call(name) \
+({ \
+ __STATIC_CALL_ADDRESSABLE(name); \
+ __raw_static_call(name); \
+})
+
+struct static_call_key {
+ void *func;
+ union {
+ /* bit 0: 0 = mods, 1 = sites */
+ unsigned long type;
+ struct static_call_mod *mods;
+ struct static_call_site *sites;
+ };
+};
+
+#else /* !CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#define __STATIC_CALL_ADDRESSABLE(name)
+#define __static_call(name) __raw_static_call(name)
+
+struct static_call_key {
+ void *func;
+};
+
+#endif /* CONFIG_HAVE_STATIC_CALL_INLINE */
+
+#ifdef MODULE
+#define __STATIC_CALL_MOD_ADDRESSABLE(name)
+#define static_call_mod(name) __raw_static_call(name)
+#else
+#define __STATIC_CALL_MOD_ADDRESSABLE(name) __STATIC_CALL_ADDRESSABLE(name)
+#define static_call_mod(name) __static_call(name)
+#endif
+
+#define static_call(name) __static_call(name)
+
+#else
+
+struct static_call_key {
+ void *func;
+};
+
+#define static_call(name) \
+ ((typeof(STATIC_CALL_TRAMP(name))*)(STATIC_CALL_KEY(name).func))
+
+#endif /* CONFIG_HAVE_STATIC_CALL */
+
+#endif /* _STATIC_CALL_TYPES_H */
diff --git a/tools/include/linux/string.h b/tools/include/linux/string.h
index 5e9e781905ed..db5c99318c79 100644
--- a/tools/include/linux/string.h
+++ b/tools/include/linux/string.h
@@ -46,4 +46,5 @@ extern char * __must_check skip_spaces(const char *);
extern char *strim(char *);
+extern void *memchr_inv(const void *start, int c, size_t bytes);
#endif /* _TOOLS_LINUX_STRING_H_ */
diff --git a/tools/include/linux/types.h b/tools/include/linux/types.h
index 154eb4e3ca7c..8519386acd23 100644
--- a/tools/include/linux/types.h
+++ b/tools/include/linux/types.h
@@ -6,7 +6,10 @@
#include <stddef.h>
#include <stdint.h>
+#ifndef __SANE_USERSPACE_TYPES__
#define __SANE_USERSPACE_TYPES__ /* For PPC64, to get LL64 types */
+#endif
+
#include <asm/types.h>
#include <asm/posix_types.h>
@@ -40,14 +43,18 @@ typedef __u8 u8;
typedef __s8 s8;
#ifdef __CHECKER__
-#define __bitwise__ __attribute__((bitwise))
+#define __bitwise __attribute__((bitwise))
#else
-#define __bitwise__
+#define __bitwise
#endif
-#define __bitwise __bitwise__
#define __force
+/* This is defined in linux/compiler_types.h and is left for backward
+ * compatibility.
+ */
+#ifndef __user
#define __user
+#endif
#define __must_check
#define __cold
@@ -58,10 +65,23 @@ typedef __u32 __bitwise __be32;
typedef __u64 __bitwise __le64;
typedef __u64 __bitwise __be64;
+typedef __u16 __bitwise __sum16;
+typedef __u32 __bitwise __wsum;
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+typedef u64 phys_addr_t;
+#else
+typedef u32 phys_addr_t;
+#endif
+
typedef struct {
int counter;
} atomic_t;
+typedef struct {
+ long counter;
+} atomic_long_t;
+
#ifndef __aligned_u64
# define __aligned_u64 __u64 __attribute__((aligned(8)))
#endif
diff --git a/tools/include/nolibc/.gitignore b/tools/include/nolibc/.gitignore
new file mode 100644
index 000000000000..dea22eaaed2b
--- /dev/null
+++ b/tools/include/nolibc/.gitignore
@@ -0,0 +1 @@
+sysroot
diff --git a/tools/include/nolibc/Makefile b/tools/include/nolibc/Makefile
new file mode 100644
index 000000000000..9839feafd38a
--- /dev/null
+++ b/tools/include/nolibc/Makefile
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for nolibc installation and tests
+include ../../scripts/Makefile.include
+
+# we're in ".../tools/include/nolibc"
+ifeq ($(srctree),)
+srctree := $(patsubst %/tools/include/,%,$(dir $(CURDIR)))
+endif
+
+# when run as make -C tools/ nolibc_<foo> the arch is not set
+ifeq ($(ARCH),)
+include $(srctree)/scripts/subarch.include
+ARCH = $(SUBARCH)
+endif
+
+# OUTPUT is only set when run from the main makefile, otherwise
+# it defaults to this nolibc directory.
+OUTPUT ?= $(CURDIR)/
+
+ifeq ($(V),1)
+Q=
+else
+Q=@
+endif
+
+nolibc_arch := $(patsubst arm64,aarch64,$(ARCH))
+arch_file := arch-$(nolibc_arch).h
+all_files := ctype.h errno.h nolibc.h signal.h stackprotector.h std.h stdint.h \
+ stdio.h stdlib.h string.h sys.h time.h types.h unistd.h
+
+# install all headers needed to support a bare-metal compiler
+all: headers
+
+install: help
+
+help:
+ @echo "Supported targets under nolibc:"
+ @echo " all call \"headers\""
+ @echo " clean clean the sysroot"
+ @echo " headers prepare a sysroot in tools/include/nolibc/sysroot"
+ @echo " headers_standalone like \"headers\", and also install kernel headers"
+ @echo " help this help"
+ @echo ""
+ @echo "These targets may also be called from tools as \"make nolibc_<target>\"."
+ @echo ""
+ @echo "Currently using the following variables:"
+ @echo " ARCH = $(ARCH)"
+ @echo " OUTPUT = $(OUTPUT)"
+ @echo ""
+
+# Note: when ARCH is "x86" we concatenate both x86_64 and i386
+headers:
+ $(Q)mkdir -p $(OUTPUT)sysroot
+ $(Q)mkdir -p $(OUTPUT)sysroot/include
+ $(Q)cp $(all_files) $(OUTPUT)sysroot/include/
+ $(Q)if [ "$(ARCH)" = "x86" ]; then \
+ sed -e \
+ 's,^#ifndef _NOLIBC_ARCH_X86_64_H,#if !defined(_NOLIBC_ARCH_X86_64_H) \&\& defined(__x86_64__),' \
+ arch-x86_64.h; \
+ sed -e \
+ 's,^#ifndef _NOLIBC_ARCH_I386_H,#if !defined(_NOLIBC_ARCH_I386_H) \&\& !defined(__x86_64__),' \
+ arch-i386.h; \
+ elif [ -e "$(arch_file)" ]; then \
+ cat $(arch_file); \
+ else \
+ echo "Fatal: architecture $(ARCH) not yet supported by nolibc." >&2; \
+ exit 1; \
+ fi > $(OUTPUT)sysroot/include/arch.h
+
+headers_standalone: headers
+ $(Q)$(MAKE) -C $(srctree) headers
+ $(Q)$(MAKE) -C $(srctree) headers_install INSTALL_HDR_PATH=$(OUTPUT)sysroot
+
+clean:
+ $(call QUIET_CLEAN, nolibc) rm -rf "$(OUTPUT)sysroot"
diff --git a/tools/include/nolibc/arch-aarch64.h b/tools/include/nolibc/arch-aarch64.h
new file mode 100644
index 000000000000..383baddef701
--- /dev/null
+++ b/tools/include/nolibc/arch-aarch64.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * AARCH64 specific definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_ARCH_AARCH64_H
+#define _NOLIBC_ARCH_AARCH64_H
+
+/* The struct returned by the newfstatat() syscall. Differs slightly from the
+ * x86_64's stat one by field ordering, so be careful.
+ */
+struct sys_stat_struct {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+
+ unsigned long st_rdev;
+ unsigned long __pad1;
+ long st_size;
+ int st_blksize;
+ int __pad2;
+
+ long st_blocks;
+ long st_atime;
+ unsigned long st_atime_nsec;
+ long st_mtime;
+
+ unsigned long st_mtime_nsec;
+ long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned int __unused[2];
+};
+
+/* Syscalls for AARCH64 :
+ * - registers are 64-bit
+ * - stack is 16-byte aligned
+ * - syscall number is passed in x8
+ * - arguments are in x0, x1, x2, x3, x4, x5
+ * - the system call is performed by calling svc 0
+ * - syscall return comes in x0.
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ *
+ * On aarch64, select() is not implemented so we have to use pselect6().
+ */
+#define __ARCH_WANT_SYS_PSELECT6
+
+#define my_syscall0(num) \
+({ \
+ register long _num __asm__ ("x8") = (num); \
+ register long _arg1 __asm__ ("x0"); \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num __asm__ ("x8") = (num); \
+ register long _arg1 __asm__ ("x0") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num __asm__ ("x8") = (num); \
+ register long _arg1 __asm__ ("x0") = (long)(arg1); \
+ register long _arg2 __asm__ ("x1") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num __asm__ ("x8") = (num); \
+ register long _arg1 __asm__ ("x0") = (long)(arg1); \
+ register long _arg2 __asm__ ("x1") = (long)(arg2); \
+ register long _arg3 __asm__ ("x2") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num __asm__ ("x8") = (num); \
+ register long _arg1 __asm__ ("x0") = (long)(arg1); \
+ register long _arg2 __asm__ ("x1") = (long)(arg2); \
+ register long _arg3 __asm__ ("x2") = (long)(arg3); \
+ register long _arg4 __asm__ ("x3") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r"(_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num __asm__ ("x8") = (num); \
+ register long _arg1 __asm__ ("x0") = (long)(arg1); \
+ register long _arg2 __asm__ ("x1") = (long)(arg2); \
+ register long _arg3 __asm__ ("x2") = (long)(arg3); \
+ register long _arg4 __asm__ ("x3") = (long)(arg4); \
+ register long _arg5 __asm__ ("x4") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r" (_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num __asm__ ("x8") = (num); \
+ register long _arg1 __asm__ ("x0") = (long)(arg1); \
+ register long _arg2 __asm__ ("x1") = (long)(arg2); \
+ register long _arg3 __asm__ ("x2") = (long)(arg3); \
+ register long _arg4 __asm__ ("x3") = (long)(arg4); \
+ register long _arg5 __asm__ ("x4") = (long)(arg5); \
+ register long _arg6 __asm__ ("x5") = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ "svc #0\n" \
+ : "=r" (_arg1) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_arg6), "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+char **environ __attribute__((weak));
+const unsigned long *_auxv __attribute__((weak));
+
+/* startup code */
+void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) _start(void)
+{
+ __asm__ volatile (
+ "ldr x0, [sp]\n" // argc (x0) was in the stack
+ "add x1, sp, 8\n" // argv (x1) = sp
+ "lsl x2, x0, 3\n" // envp (x2) = 8*argc ...
+ "add x2, x2, 8\n" // + 8 (skip null)
+ "add x2, x2, x1\n" // + argv
+ "adrp x3, environ\n" // x3 = &environ (high bits)
+ "str x2, [x3, #:lo12:environ]\n" // store envp into environ
+ "mov x4, x2\n" // search for auxv (follows NULL after last env)
+ "0:\n"
+ "ldr x5, [x4], 8\n" // x5 = *x4; x4 += 8
+ "cbnz x5, 0b\n" // and stop at NULL after last env
+ "adrp x3, _auxv\n" // x3 = &_auxv (high bits)
+ "str x4, [x3, #:lo12:_auxv]\n" // store x4 into _auxv
+ "and sp, x1, -16\n" // sp must be 16-byte aligned in the callee
+ "bl main\n" // main() returns the status code, we'll exit with it.
+ "mov x8, 93\n" // NR_exit == 93
+ "svc #0\n"
+ );
+ __builtin_unreachable();
+}
+#endif // _NOLIBC_ARCH_AARCH64_H
diff --git a/tools/include/nolibc/arch-arm.h b/tools/include/nolibc/arch-arm.h
new file mode 100644
index 000000000000..42499f23e73c
--- /dev/null
+++ b/tools/include/nolibc/arch-arm.h
@@ -0,0 +1,242 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * ARM specific definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_ARCH_ARM_H
+#define _NOLIBC_ARCH_ARM_H
+
+/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
+ * exactly 56 bytes (stops before the unused array). In big endian, the format
+ * differs as devices are returned as short only.
+ */
+struct sys_stat_struct {
+#if defined(__ARMEB__)
+ unsigned short st_dev;
+ unsigned short __pad1;
+#else
+ unsigned long st_dev;
+#endif
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+
+#if defined(__ARMEB__)
+ unsigned short st_rdev;
+ unsigned short __pad2;
+#else
+ unsigned long st_rdev;
+#endif
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused[2];
+};
+
+/* Syscalls for ARM in ARM or Thumb modes :
+ * - registers are 32-bit
+ * - stack is 8-byte aligned
+ * ( http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html)
+ * - syscall number is passed in r7
+ * - arguments are in r0, r1, r2, r3, r4, r5
+ * - the system call is performed by calling svc #0
+ * - syscall return comes in r0.
+ * - only lr is clobbered.
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ * - the syscall number is always specified last in order to allow to force
+ * some registers before (gcc refuses a %-register at the last position).
+ * - in thumb mode without -fomit-frame-pointer, r7 is also used to store the
+ * frame pointer, and we cannot directly assign it as a register variable,
+ * nor can we clobber it. Instead we assign the r6 register and swap it
+ * with r7 before calling svc, and r6 is marked as clobbered.
+ * We're just using any regular register which we assign to r7 after saving
+ * it.
+ *
+ * Also, ARM supports the old_select syscall if newselect is not available
+ */
+#define __ARCH_WANT_SYS_OLD_SELECT
+
+#if (defined(__THUMBEB__) || defined(__THUMBEL__)) && \
+ !defined(NOLIBC_OMIT_FRAME_POINTER)
+/* swap r6,r7 needed in Thumb mode since we can't use nor clobber r7 */
+#define _NOLIBC_SYSCALL_REG "r6"
+#define _NOLIBC_THUMB_SET_R7 "eor r7, r6\neor r6, r7\neor r7, r6\n"
+#define _NOLIBC_THUMB_RESTORE_R7 "mov r7, r6\n"
+
+#else /* we're in ARM mode */
+/* in Arm mode we can directly use r7 */
+#define _NOLIBC_SYSCALL_REG "r7"
+#define _NOLIBC_THUMB_SET_R7 ""
+#define _NOLIBC_THUMB_RESTORE_R7 ""
+
+#endif /* end THUMB */
+
+#define my_syscall0(num) \
+({ \
+ register long _num __asm__(_NOLIBC_SYSCALL_REG) = (num); \
+ register long _arg1 __asm__ ("r0"); \
+ \
+ __asm__ volatile ( \
+ _NOLIBC_THUMB_SET_R7 \
+ "svc #0\n" \
+ _NOLIBC_THUMB_RESTORE_R7 \
+ : "=r"(_arg1), "=r"(_num) \
+ : "r"(_arg1), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num __asm__(_NOLIBC_SYSCALL_REG) = (num); \
+ register long _arg1 __asm__ ("r0") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ _NOLIBC_THUMB_SET_R7 \
+ "svc #0\n" \
+ _NOLIBC_THUMB_RESTORE_R7 \
+ : "=r"(_arg1), "=r" (_num) \
+ : "r"(_arg1), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num __asm__(_NOLIBC_SYSCALL_REG) = (num); \
+ register long _arg1 __asm__ ("r0") = (long)(arg1); \
+ register long _arg2 __asm__ ("r1") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ _NOLIBC_THUMB_SET_R7 \
+ "svc #0\n" \
+ _NOLIBC_THUMB_RESTORE_R7 \
+ : "=r"(_arg1), "=r" (_num) \
+ : "r"(_arg1), "r"(_arg2), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num __asm__(_NOLIBC_SYSCALL_REG) = (num); \
+ register long _arg1 __asm__ ("r0") = (long)(arg1); \
+ register long _arg2 __asm__ ("r1") = (long)(arg2); \
+ register long _arg3 __asm__ ("r2") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ _NOLIBC_THUMB_SET_R7 \
+ "svc #0\n" \
+ _NOLIBC_THUMB_RESTORE_R7 \
+ : "=r"(_arg1), "=r" (_num) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num __asm__(_NOLIBC_SYSCALL_REG) = (num); \
+ register long _arg1 __asm__ ("r0") = (long)(arg1); \
+ register long _arg2 __asm__ ("r1") = (long)(arg2); \
+ register long _arg3 __asm__ ("r2") = (long)(arg3); \
+ register long _arg4 __asm__ ("r3") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ _NOLIBC_THUMB_SET_R7 \
+ "svc #0\n" \
+ _NOLIBC_THUMB_RESTORE_R7 \
+ : "=r"(_arg1), "=r" (_num) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num __asm__(_NOLIBC_SYSCALL_REG) = (num); \
+ register long _arg1 __asm__ ("r0") = (long)(arg1); \
+ register long _arg2 __asm__ ("r1") = (long)(arg2); \
+ register long _arg3 __asm__ ("r2") = (long)(arg3); \
+ register long _arg4 __asm__ ("r3") = (long)(arg4); \
+ register long _arg5 __asm__ ("r4") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ _NOLIBC_THUMB_SET_R7 \
+ "svc #0\n" \
+ _NOLIBC_THUMB_RESTORE_R7 \
+ : "=r"(_arg1), "=r" (_num) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_num) \
+ : "memory", "cc", "lr" \
+ ); \
+ _arg1; \
+})
+
+char **environ __attribute__((weak));
+const unsigned long *_auxv __attribute__((weak));
+
+/* startup code */
+void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) _start(void)
+{
+ __asm__ volatile (
+ "pop {%r0}\n" // argc was in the stack
+ "mov %r1, %sp\n" // argv = sp
+
+ "add %r2, %r0, $1\n" // envp = (argc + 1) ...
+ "lsl %r2, %r2, $2\n" // * 4 ...
+ "add %r2, %r2, %r1\n" // + argv
+ "ldr %r3, 1f\n" // r3 = &environ (see below)
+ "str %r2, [r3]\n" // store envp into environ
+
+ "mov r4, r2\n" // search for auxv (follows NULL after last env)
+ "0:\n"
+ "mov r5, r4\n" // r5 = r4
+ "add r4, r4, #4\n" // r4 += 4
+ "ldr r5,[r5]\n" // r5 = *r5 = *(r4-4)
+ "cmp r5, #0\n" // and stop at NULL after last env
+ "bne 0b\n"
+ "ldr %r3, 2f\n" // r3 = &_auxv (low bits)
+ "str r4, [r3]\n" // store r4 into _auxv
+
+ "mov %r3, $8\n" // AAPCS : sp must be 8-byte aligned in the
+ "neg %r3, %r3\n" // callee, and bl doesn't push (lr=pc)
+ "and %r3, %r3, %r1\n" // so we do sp = r1(=sp) & r3(=-8);
+ "mov %sp, %r3\n" //
+
+ "bl main\n" // main() returns the status code, we'll exit with it.
+ "movs r7, $1\n" // NR_exit == 1
+ "svc $0x00\n"
+ ".align 2\n" // below are the pointers to a few variables
+ "1:\n"
+ ".word environ\n"
+ "2:\n"
+ ".word _auxv\n"
+ );
+ __builtin_unreachable();
+}
+
+#endif // _NOLIBC_ARCH_ARM_H
diff --git a/tools/include/nolibc/arch-i386.h b/tools/include/nolibc/arch-i386.h
new file mode 100644
index 000000000000..2d98d78fd3f3
--- /dev/null
+++ b/tools/include/nolibc/arch-i386.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * i386 specific definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_ARCH_I386_H
+#define _NOLIBC_ARCH_I386_H
+
+/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
+ * exactly 56 bytes (stops before the unused array).
+ */
+struct sys_stat_struct {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned short st_mode;
+ unsigned short st_nlink;
+ unsigned short st_uid;
+ unsigned short st_gid;
+
+ unsigned long st_rdev;
+ unsigned long st_size;
+ unsigned long st_blksize;
+ unsigned long st_blocks;
+
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long __unused[2];
+};
+
+/* Syscalls for i386 :
+ * - mostly similar to x86_64
+ * - registers are 32-bit
+ * - syscall number is passed in eax
+ * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively
+ * - all registers are preserved (except eax of course)
+ * - the system call is performed by calling int $0x80
+ * - syscall return comes in eax
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ * - the syscall number is always specified last in order to allow to force
+ * some registers before (gcc refuses a %-register at the last position).
+ *
+ * Also, i386 supports the old_select syscall if newselect is not available
+ */
+#define __ARCH_WANT_SYS_OLD_SELECT
+
+#define my_syscall0(num) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ register long _arg2 __asm__ ("ecx") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ register long _arg2 __asm__ ("ecx") = (long)(arg2); \
+ register long _arg3 __asm__ ("edx") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ register long _arg2 __asm__ ("ecx") = (long)(arg2); \
+ register long _arg3 __asm__ ("edx") = (long)(arg3); \
+ register long _arg4 __asm__ ("esi") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("eax") = (num); \
+ register long _arg1 __asm__ ("ebx") = (long)(arg1); \
+ register long _arg2 __asm__ ("ecx") = (long)(arg2); \
+ register long _arg3 __asm__ ("edx") = (long)(arg3); \
+ register long _arg4 __asm__ ("esi") = (long)(arg4); \
+ register long _arg5 __asm__ ("edi") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "int $0x80\n" \
+ : "=a" (_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "0"(_num) \
+ : "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ long _eax = (long)(num); \
+ long _arg6 = (long)(arg6); /* Always in memory */ \
+ __asm__ volatile ( \
+ "pushl %[_arg6]\n\t" \
+ "pushl %%ebp\n\t" \
+ "movl 4(%%esp),%%ebp\n\t" \
+ "int $0x80\n\t" \
+ "popl %%ebp\n\t" \
+ "addl $4,%%esp\n\t" \
+ : "+a"(_eax) /* %eax */ \
+ : "b"(arg1), /* %ebx */ \
+ "c"(arg2), /* %ecx */ \
+ "d"(arg3), /* %edx */ \
+ "S"(arg4), /* %esi */ \
+ "D"(arg5), /* %edi */ \
+ [_arg6]"m"(_arg6) /* memory */ \
+ : "memory", "cc" \
+ ); \
+ _eax; \
+})
+
+char **environ __attribute__((weak));
+const unsigned long *_auxv __attribute__((weak));
+
+#define __ARCH_SUPPORTS_STACK_PROTECTOR
+
+/* startup code */
+/*
+ * i386 System V ABI mandates:
+ * 1) last pushed argument must be 16-byte aligned.
+ * 2) The deepest stack frame should be set to zero
+ *
+ */
+void __attribute__((weak,noreturn,optimize("omit-frame-pointer"),no_stack_protector)) _start(void)
+{
+ __asm__ volatile (
+#ifdef NOLIBC_STACKPROTECTOR
+ "call __stack_chk_init\n" // initialize stack protector
+#endif
+ "pop %eax\n" // argc (first arg, %eax)
+ "mov %esp, %ebx\n" // argv[] (second arg, %ebx)
+ "lea 4(%ebx,%eax,4),%ecx\n" // then a NULL then envp (third arg, %ecx)
+ "mov %ecx, environ\n" // save environ
+ "xor %ebp, %ebp\n" // zero the stack frame
+ "mov %ecx, %edx\n" // search for auxv (follows NULL after last env)
+ "0:\n"
+ "add $4, %edx\n" // search for auxv using edx, it follows the
+ "cmp -4(%edx), %ebp\n" // ... NULL after last env (ebp is zero here)
+ "jnz 0b\n"
+ "mov %edx, _auxv\n" // save it into _auxv
+ "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned before
+ "sub $4, %esp\n" // the call instruction (args are aligned)
+ "push %ecx\n" // push all registers on the stack so that we
+ "push %ebx\n" // support both regparm and plain stack modes
+ "push %eax\n"
+ "call main\n" // main() returns the status code in %eax
+ "mov %eax, %ebx\n" // retrieve exit code (32-bit int)
+ "movl $1, %eax\n" // NR_exit == 1
+ "int $0x80\n" // exit now
+ "hlt\n" // ensure it does not
+ );
+ __builtin_unreachable();
+}
+
+#endif // _NOLIBC_ARCH_I386_H
diff --git a/tools/include/nolibc/arch-loongarch.h b/tools/include/nolibc/arch-loongarch.h
new file mode 100644
index 000000000000..029ee3cd6baf
--- /dev/null
+++ b/tools/include/nolibc/arch-loongarch.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * LoongArch specific definitions for NOLIBC
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#ifndef _NOLIBC_ARCH_LOONGARCH_H
+#define _NOLIBC_ARCH_LOONGARCH_H
+
+/* Syscalls for LoongArch :
+ * - stack is 16-byte aligned
+ * - syscall number is passed in a7
+ * - arguments are in a0, a1, a2, a3, a4, a5
+ * - the system call is performed by calling "syscall 0"
+ * - syscall return comes in a0
+ * - the arguments are cast to long and assigned into the target
+ * registers which are then simply passed as registers to the asm code,
+ * so that we don't have to experience issues with register constraints.
+ *
+ * On LoongArch, select() is not implemented so we have to use pselect6().
+ */
+#define __ARCH_WANT_SYS_PSELECT6
+
+#define my_syscall0(num) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0"); \
+ \
+ __asm__ volatile ( \
+ "syscall 0\n" \
+ : "=r"(_arg1) \
+ : "r"(_num) \
+ : "memory", "$t0", "$t1", "$t2", "$t3", \
+ "$t4", "$t5", "$t6", "$t7", "$t8" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ "syscall 0\n" \
+ : "+r"(_arg1) \
+ : "r"(_num) \
+ : "memory", "$t0", "$t1", "$t2", "$t3", \
+ "$t4", "$t5", "$t6", "$t7", "$t8" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ "syscall 0\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), \
+ "r"(_num) \
+ : "memory", "$t0", "$t1", "$t2", "$t3", \
+ "$t4", "$t5", "$t6", "$t7", "$t8" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ "syscall 0\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), \
+ "r"(_num) \
+ : "memory", "$t0", "$t1", "$t2", "$t3", \
+ "$t4", "$t5", "$t6", "$t7", "$t8" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ "syscall 0\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "r"(_num) \
+ : "memory", "$t0", "$t1", "$t2", "$t3", \
+ "$t4", "$t5", "$t6", "$t7", "$t8" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3") = (long)(arg4); \
+ register long _arg5 __asm__ ("a4") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "syscall 0\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_num) \
+ : "memory", "$t0", "$t1", "$t2", "$t3", \
+ "$t4", "$t5", "$t6", "$t7", "$t8" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3") = (long)(arg4); \
+ register long _arg5 __asm__ ("a4") = (long)(arg5); \
+ register long _arg6 __asm__ ("a5") = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ "syscall 0\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), "r"(_arg6), \
+ "r"(_num) \
+ : "memory", "$t0", "$t1", "$t2", "$t3", \
+ "$t4", "$t5", "$t6", "$t7", "$t8" \
+ ); \
+ _arg1; \
+})
+
+char **environ __attribute__((weak));
+const unsigned long *_auxv __attribute__((weak));
+
+#if __loongarch_grlen == 32
+#define LONGLOG "2"
+#define SZREG "4"
+#define REG_L "ld.w"
+#define LONG_S "st.w"
+#define LONG_ADD "add.w"
+#define LONG_ADDI "addi.w"
+#define LONG_SLL "slli.w"
+#define LONG_BSTRINS "bstrins.w"
+#else // __loongarch_grlen == 64
+#define LONGLOG "3"
+#define SZREG "8"
+#define REG_L "ld.d"
+#define LONG_S "st.d"
+#define LONG_ADD "add.d"
+#define LONG_ADDI "addi.d"
+#define LONG_SLL "slli.d"
+#define LONG_BSTRINS "bstrins.d"
+#endif
+
+/* startup code */
+void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) _start(void)
+{
+ __asm__ volatile (
+ REG_L " $a0, $sp, 0\n" // argc (a0) was in the stack
+ LONG_ADDI " $a1, $sp, "SZREG"\n" // argv (a1) = sp + SZREG
+ LONG_SLL " $a2, $a0, "LONGLOG"\n" // envp (a2) = SZREG*argc ...
+ LONG_ADDI " $a2, $a2, "SZREG"\n" // + SZREG (skip null)
+ LONG_ADD " $a2, $a2, $a1\n" // + argv
+
+ "move $a3, $a2\n" // iterate a3 over envp to find auxv (after NULL)
+ "0:\n" // do {
+ REG_L " $a4, $a3, 0\n" // a4 = *a3;
+ LONG_ADDI " $a3, $a3, "SZREG"\n" // a3 += sizeof(void*);
+ "bne $a4, $zero, 0b\n" // } while (a4);
+ "la.pcrel $a4, _auxv\n" // a4 = &_auxv
+ LONG_S " $a3, $a4, 0\n" // store a3 into _auxv
+
+ "la.pcrel $a3, environ\n" // a3 = &environ
+ LONG_S " $a2, $a3, 0\n" // store envp(a2) into environ
+ LONG_BSTRINS " $sp, $zero, 3, 0\n" // sp must be 16-byte aligned
+ "bl main\n" // main() returns the status code, we'll exit with it.
+ "li.w $a7, 93\n" // NR_exit == 93
+ "syscall 0\n"
+ );
+ __builtin_unreachable();
+}
+
+#endif // _NOLIBC_ARCH_LOONGARCH_H
diff --git a/tools/include/nolibc/arch-mips.h b/tools/include/nolibc/arch-mips.h
new file mode 100644
index 000000000000..bf83432d23ed
--- /dev/null
+++ b/tools/include/nolibc/arch-mips.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * MIPS specific definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_ARCH_MIPS_H
+#define _NOLIBC_ARCH_MIPS_H
+
+/* The struct returned by the stat() syscall. 88 bytes are returned by the
+ * syscall.
+ */
+struct sys_stat_struct {
+ unsigned int st_dev;
+ long st_pad1[3];
+ unsigned long st_ino;
+ unsigned int st_mode;
+ unsigned int st_nlink;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int st_rdev;
+ long st_pad2[2];
+ long st_size;
+ long st_pad3;
+
+ long st_atime;
+ long st_atime_nsec;
+ long st_mtime;
+ long st_mtime_nsec;
+
+ long st_ctime;
+ long st_ctime_nsec;
+ long st_blksize;
+ long st_blocks;
+ long st_pad4[14];
+};
+
+/* Syscalls for MIPS ABI O32 :
+ * - WARNING! there's always a delayed slot!
+ * - WARNING again, the syntax is different, registers take a '$' and numbers
+ * do not.
+ * - registers are 32-bit
+ * - stack is 8-byte aligned
+ * - syscall number is passed in v0 (starts at 0xfa0).
+ * - arguments are in a0, a1, a2, a3, then the stack. The caller needs to
+ * leave some room in the stack for the callee to save a0..a3 if needed.
+ * - Many registers are clobbered, in fact only a0..a2 and s0..s8 are
+ * preserved. See: https://www.linux-mips.org/wiki/Syscall as well as
+ * scall32-o32.S in the kernel sources.
+ * - the system call is performed by calling "syscall"
+ * - syscall return comes in v0, and register a3 needs to be checked to know
+ * if an error occurred, in which case errno is in v0.
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ */
+
+#define my_syscall0(num) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg4 __asm__ ("a3"); \
+ \
+ __asm__ volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r"(_num), "=r"(_arg4) \
+ : "r"(_num) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg4 __asm__ ("a3"); \
+ \
+ __asm__ volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r"(_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg4 __asm__ ("a3"); \
+ \
+ __asm__ volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r"(_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3"); \
+ \
+ __asm__ volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r"(_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "syscall\n" \
+ "addiu $sp, $sp, 32\n" \
+ : "=r" (_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num __asm__ ("v0") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3") = (long)(arg4); \
+ register long _arg5 = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "addiu $sp, $sp, -32\n" \
+ "sw %7, 16($sp)\n" \
+ "syscall\n " \
+ "addiu $sp, $sp, 32\n" \
+ : "=r" (_num), "=r"(_arg4) \
+ : "0"(_num), \
+ "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
+ : "memory", "cc", "at", "v1", "hi", "lo", \
+ "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
+ ); \
+ _arg4 ? -_num : _num; \
+})
+
+char **environ __attribute__((weak));
+const unsigned long *_auxv __attribute__((weak));
+
+/* startup code, note that it's called __start on MIPS */
+void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) __start(void)
+{
+ __asm__ volatile (
+ //".set nomips16\n"
+ ".set push\n"
+ ".set noreorder\n"
+ ".option pic0\n"
+ //".ent __start\n"
+ //"__start:\n"
+ "lw $a0,($sp)\n" // argc was in the stack
+ "addiu $a1, $sp, 4\n" // argv = sp + 4
+ "sll $a2, $a0, 2\n" // a2 = argc * 4
+ "add $a2, $a2, $a1\n" // envp = argv + 4*argc ...
+ "addiu $a2, $a2, 4\n" // ... + 4
+ "lui $a3, %hi(environ)\n" // load environ into a3 (hi)
+ "addiu $a3, %lo(environ)\n" // load environ into a3 (lo)
+ "sw $a2,($a3)\n" // store envp(a2) into environ
+
+ "move $t0, $a2\n" // iterate t0 over envp, look for NULL
+ "0:" // do {
+ "lw $a3, ($t0)\n" // a3=*(t0);
+ "bne $a3, $0, 0b\n" // } while (a3);
+ "addiu $t0, $t0, 4\n" // delayed slot: t0+=4;
+ "lui $a3, %hi(_auxv)\n" // load _auxv into a3 (hi)
+ "addiu $a3, %lo(_auxv)\n" // load _auxv into a3 (lo)
+ "sw $t0, ($a3)\n" // store t0 into _auxv
+
+ "li $t0, -8\n"
+ "and $sp, $sp, $t0\n" // sp must be 8-byte aligned
+ "addiu $sp,$sp,-16\n" // the callee expects to save a0..a3 there!
+ "jal main\n" // main() returns the status code, we'll exit with it.
+ "nop\n" // delayed slot
+ "move $a0, $v0\n" // retrieve 32-bit exit code from v0
+ "li $v0, 4001\n" // NR_exit == 4001
+ "syscall\n"
+ //".end __start\n"
+ ".set pop\n"
+ );
+ __builtin_unreachable();
+}
+
+#endif // _NOLIBC_ARCH_MIPS_H
diff --git a/tools/include/nolibc/arch-riscv.h b/tools/include/nolibc/arch-riscv.h
new file mode 100644
index 000000000000..e197fcb10ac0
--- /dev/null
+++ b/tools/include/nolibc/arch-riscv.h
@@ -0,0 +1,208 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * RISCV (32 and 64) specific definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_ARCH_RISCV_H
+#define _NOLIBC_ARCH_RISCV_H
+
+struct sys_stat_struct {
+ unsigned long st_dev; /* Device. */
+ unsigned long st_ino; /* File serial number. */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long st_rdev; /* Device number, if device. */
+ unsigned long __pad1;
+ long st_size; /* Size of file, in bytes. */
+ int st_blksize; /* Optimal block size for I/O. */
+ int __pad2;
+ long st_blocks; /* Number 512-byte blocks allocated. */
+ long st_atime; /* Time of last access. */
+ unsigned long st_atime_nsec;
+ long st_mtime; /* Time of last modification. */
+ unsigned long st_mtime_nsec;
+ long st_ctime; /* Time of last status change. */
+ unsigned long st_ctime_nsec;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+
+#if __riscv_xlen == 64
+#define PTRLOG "3"
+#define SZREG "8"
+#elif __riscv_xlen == 32
+#define PTRLOG "2"
+#define SZREG "4"
+#endif
+
+/* Syscalls for RISCV :
+ * - stack is 16-byte aligned
+ * - syscall number is passed in a7
+ * - arguments are in a0, a1, a2, a3, a4, a5
+ * - the system call is performed by calling ecall
+ * - syscall return comes in a0
+ * - the arguments are cast to long and assigned into the target
+ * registers which are then simply passed as registers to the asm code,
+ * so that we don't have to experience issues with register constraints.
+ *
+ * On riscv, select() is not implemented so we have to use pselect6().
+ */
+#define __ARCH_WANT_SYS_PSELECT6
+
+#define my_syscall0(num) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0"); \
+ \
+ __asm__ volatile ( \
+ "ecall\n\t" \
+ : "=r"(_arg1) \
+ : "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ "ecall\n\t" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3") = (long)(arg4); \
+ register long _arg5 __asm__ ("a4") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num __asm__ ("a7") = (num); \
+ register long _arg1 __asm__ ("a0") = (long)(arg1); \
+ register long _arg2 __asm__ ("a1") = (long)(arg2); \
+ register long _arg3 __asm__ ("a2") = (long)(arg3); \
+ register long _arg4 __asm__ ("a3") = (long)(arg4); \
+ register long _arg5 __asm__ ("a4") = (long)(arg5); \
+ register long _arg6 __asm__ ("a5") = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ "ecall\n" \
+ : "+r"(_arg1) \
+ : "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), "r"(_arg6), \
+ "r"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+char **environ __attribute__((weak));
+const unsigned long *_auxv __attribute__((weak));
+
+/* startup code */
+void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) _start(void)
+{
+ __asm__ volatile (
+ ".option push\n"
+ ".option norelax\n"
+ "lla gp, __global_pointer$\n"
+ ".option pop\n"
+ "lw a0, 0(sp)\n" // argc (a0) was in the stack
+ "add a1, sp, "SZREG"\n" // argv (a1) = sp
+ "slli a2, a0, "PTRLOG"\n" // envp (a2) = SZREG*argc ...
+ "add a2, a2, "SZREG"\n" // + SZREG (skip null)
+ "add a2,a2,a1\n" // + argv
+
+ "add a3, a2, zero\n" // iterate a3 over envp to find auxv (after NULL)
+ "0:\n" // do {
+ "ld a4, 0(a3)\n" // a4 = *a3;
+ "add a3, a3, "SZREG"\n" // a3 += sizeof(void*);
+ "bne a4, zero, 0b\n" // } while (a4);
+ "lui a4, %hi(_auxv)\n" // a4 = &_auxv (high bits)
+ "sd a3, %lo(_auxv)(a4)\n" // store a3 into _auxv
+
+ "lui a3, %hi(environ)\n" // a3 = &environ (high bits)
+ "sd a2,%lo(environ)(a3)\n" // store envp(a2) into environ
+ "andi sp,a1,-16\n" // sp must be 16-byte aligned
+ "call main\n" // main() returns the status code, we'll exit with it.
+ "li a7, 93\n" // NR_exit == 93
+ "ecall\n"
+ );
+ __builtin_unreachable();
+}
+
+#endif // _NOLIBC_ARCH_RISCV_H
diff --git a/tools/include/nolibc/arch-s390.h b/tools/include/nolibc/arch-s390.h
new file mode 100644
index 000000000000..6b0e54ed543d
--- /dev/null
+++ b/tools/include/nolibc/arch-s390.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * s390 specific definitions for NOLIBC
+ */
+
+#ifndef _NOLIBC_ARCH_S390_H
+#define _NOLIBC_ARCH_S390_H
+#include <asm/unistd.h>
+
+/* The struct returned by the stat() syscall, equivalent to stat64(). The
+ * syscall returns 116 bytes and stops in the middle of __unused.
+ */
+
+struct sys_stat_struct {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned long st_nlink;
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad1;
+ unsigned long st_rdev;
+ unsigned long st_size;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ unsigned long st_blksize;
+ long st_blocks;
+ unsigned long __unused[3];
+};
+
+/* Syscalls for s390:
+ * - registers are 64-bit
+ * - syscall number is passed in r1
+ * - arguments are in r2-r7
+ * - the system call is performed by calling the svc instruction
+ * - syscall return value is in r2
+ * - r1 and r2 are clobbered, others are preserved.
+ *
+ * Link s390 ABI: https://github.com/IBM/s390x-abi
+ *
+ */
+
+#define my_syscall0(num) \
+({ \
+ register long _num __asm__ ("1") = (num); \
+ register long _rc __asm__ ("2"); \
+ \
+ __asm__ volatile ( \
+ "svc 0\n" \
+ : "=d"(_rc) \
+ : "d"(_num) \
+ : "memory", "cc" \
+ ); \
+ _rc; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ register long _num __asm__ ("1") = (num); \
+ register long _arg1 __asm__ ("2") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ "svc 0\n" \
+ : "+d"(_arg1) \
+ : "d"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ register long _num __asm__ ("1") = (num); \
+ register long _arg1 __asm__ ("2") = (long)(arg1); \
+ register long _arg2 __asm__ ("3") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ "svc 0\n" \
+ : "+d"(_arg1) \
+ : "d"(_arg2), "d"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ register long _num __asm__ ("1") = (num); \
+ register long _arg1 __asm__ ("2") = (long)(arg1); \
+ register long _arg2 __asm__ ("3") = (long)(arg2); \
+ register long _arg3 __asm__ ("4") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ "svc 0\n" \
+ : "+d"(_arg1) \
+ : "d"(_arg2), "d"(_arg3), "d"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ register long _num __asm__ ("1") = (num); \
+ register long _arg1 __asm__ ("2") = (long)(arg1); \
+ register long _arg2 __asm__ ("3") = (long)(arg2); \
+ register long _arg3 __asm__ ("4") = (long)(arg3); \
+ register long _arg4 __asm__ ("5") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ "svc 0\n" \
+ : "+d"(_arg1) \
+ : "d"(_arg2), "d"(_arg3), "d"(_arg4), "d"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ register long _num __asm__ ("1") = (num); \
+ register long _arg1 __asm__ ("2") = (long)(arg1); \
+ register long _arg2 __asm__ ("3") = (long)(arg2); \
+ register long _arg3 __asm__ ("4") = (long)(arg3); \
+ register long _arg4 __asm__ ("5") = (long)(arg4); \
+ register long _arg5 __asm__ ("6") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "svc 0\n" \
+ : "+d"(_arg1) \
+ : "d"(_arg2), "d"(_arg3), "d"(_arg4), "d"(_arg5), \
+ "d"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ register long _num __asm__ ("1") = (num); \
+ register long _arg1 __asm__ ("2") = (long)(arg1); \
+ register long _arg2 __asm__ ("3") = (long)(arg2); \
+ register long _arg3 __asm__ ("4") = (long)(arg3); \
+ register long _arg4 __asm__ ("5") = (long)(arg4); \
+ register long _arg5 __asm__ ("6") = (long)(arg5); \
+ register long _arg6 __asm__ ("7") = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ "svc 0\n" \
+ : "+d"(_arg1) \
+ : "d"(_arg2), "d"(_arg3), "d"(_arg4), "d"(_arg5), \
+ "d"(_arg6), "d"(_num) \
+ : "memory", "cc" \
+ ); \
+ _arg1; \
+})
+
+char **environ __attribute__((weak));
+const unsigned long *_auxv __attribute__((weak));
+
+/* startup code */
+void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) _start(void)
+{
+ __asm__ volatile (
+ "lg %r2,0(%r15)\n" /* argument count */
+ "la %r3,8(%r15)\n" /* argument pointers */
+
+ "xgr %r0,%r0\n" /* r0 will be our NULL value */
+ /* search for envp */
+ "lgr %r4,%r3\n" /* start at argv */
+ "0:\n"
+ "clg %r0,0(%r4)\n" /* entry zero? */
+ "la %r4,8(%r4)\n" /* advance pointer */
+ "jnz 0b\n" /* no -> test next pointer */
+ /* yes -> r4 now contains start of envp */
+ "larl %r1,environ\n"
+ "stg %r4,0(%r1)\n"
+
+ /* search for auxv */
+ "lgr %r5,%r4\n" /* start at envp */
+ "1:\n"
+ "clg %r0,0(%r5)\n" /* entry zero? */
+ "la %r5,8(%r5)\n" /* advance pointer */
+ "jnz 1b\n" /* no -> test next pointer */
+ "larl %r1,_auxv\n" /* yes -> store value in _auxv */
+ "stg %r5,0(%r1)\n"
+
+ "aghi %r15,-160\n" /* allocate new stackframe */
+ "xc 0(8,%r15),0(%r15)\n" /* clear backchain */
+ "brasl %r14,main\n" /* ret value of main is arg to exit */
+ "lghi %r1,1\n" /* __NR_exit */
+ "svc 0\n"
+ );
+ __builtin_unreachable();
+}
+
+struct s390_mmap_arg_struct {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+};
+
+static __attribute__((unused))
+void *sys_mmap(void *addr, size_t length, int prot, int flags, int fd,
+ off_t offset)
+{
+ struct s390_mmap_arg_struct args = {
+ .addr = (unsigned long)addr,
+ .len = (unsigned long)length,
+ .prot = prot,
+ .flags = flags,
+ .fd = fd,
+ .offset = (unsigned long)offset
+ };
+
+ return (void *)my_syscall1(__NR_mmap, &args);
+}
+#define sys_mmap sys_mmap
+#endif // _NOLIBC_ARCH_S390_H
diff --git a/tools/include/nolibc/arch-x86_64.h b/tools/include/nolibc/arch-x86_64.h
new file mode 100644
index 000000000000..f7f2a11d4c3b
--- /dev/null
+++ b/tools/include/nolibc/arch-x86_64.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * x86_64 specific definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_ARCH_X86_64_H
+#define _NOLIBC_ARCH_X86_64_H
+
+/* The struct returned by the stat() syscall, equivalent to stat64(). The
+ * syscall returns 116 bytes and stops in the middle of __unused.
+ */
+struct sys_stat_struct {
+ unsigned long st_dev;
+ unsigned long st_ino;
+ unsigned long st_nlink;
+ unsigned int st_mode;
+ unsigned int st_uid;
+
+ unsigned int st_gid;
+ unsigned int __pad0;
+ unsigned long st_rdev;
+ long st_size;
+ long st_blksize;
+
+ long st_blocks;
+ unsigned long st_atime;
+ unsigned long st_atime_nsec;
+ unsigned long st_mtime;
+
+ unsigned long st_mtime_nsec;
+ unsigned long st_ctime;
+ unsigned long st_ctime_nsec;
+ long __unused[3];
+};
+
+/* Syscalls for x86_64 :
+ * - registers are 64-bit
+ * - syscall number is passed in rax
+ * - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively
+ * - the system call is performed by calling the syscall instruction
+ * - syscall return comes in rax
+ * - rcx and r11 are clobbered, others are preserved.
+ * - the arguments are cast to long and assigned into the target registers
+ * which are then simply passed as registers to the asm code, so that we
+ * don't have to experience issues with register constraints.
+ * - the syscall number is always specified last in order to allow to force
+ * some registers before (gcc refuses a %-register at the last position).
+ * - see also x86-64 ABI section A.2 AMD64 Linux Kernel Conventions, A.2.1
+ * Calling Conventions.
+ *
+ * Link x86-64 ABI: https://gitlab.com/x86-psABIs/x86-64-ABI/-/wikis/home
+ *
+ */
+
+#define my_syscall0(num) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("rax") = (num); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall1(num, arg1) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("rax") = (num); \
+ register long _arg1 __asm__ ("rdi") = (long)(arg1); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), \
+ "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall2(num, arg1, arg2) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("rax") = (num); \
+ register long _arg1 __asm__ ("rdi") = (long)(arg1); \
+ register long _arg2 __asm__ ("rsi") = (long)(arg2); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), "r"(_arg2), \
+ "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall3(num, arg1, arg2, arg3) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("rax") = (num); \
+ register long _arg1 __asm__ ("rdi") = (long)(arg1); \
+ register long _arg2 __asm__ ("rsi") = (long)(arg2); \
+ register long _arg3 __asm__ ("rdx") = (long)(arg3); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
+ "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall4(num, arg1, arg2, arg3, arg4) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("rax") = (num); \
+ register long _arg1 __asm__ ("rdi") = (long)(arg1); \
+ register long _arg2 __asm__ ("rsi") = (long)(arg2); \
+ register long _arg3 __asm__ ("rdx") = (long)(arg3); \
+ register long _arg4 __asm__ ("r10") = (long)(arg4); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
+ "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("rax") = (num); \
+ register long _arg1 __asm__ ("rdi") = (long)(arg1); \
+ register long _arg2 __asm__ ("rsi") = (long)(arg2); \
+ register long _arg3 __asm__ ("rdx") = (long)(arg3); \
+ register long _arg4 __asm__ ("r10") = (long)(arg4); \
+ register long _arg5 __asm__ ("r8") = (long)(arg5); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
+({ \
+ long _ret; \
+ register long _num __asm__ ("rax") = (num); \
+ register long _arg1 __asm__ ("rdi") = (long)(arg1); \
+ register long _arg2 __asm__ ("rsi") = (long)(arg2); \
+ register long _arg3 __asm__ ("rdx") = (long)(arg3); \
+ register long _arg4 __asm__ ("r10") = (long)(arg4); \
+ register long _arg5 __asm__ ("r8") = (long)(arg5); \
+ register long _arg6 __asm__ ("r9") = (long)(arg6); \
+ \
+ __asm__ volatile ( \
+ "syscall\n" \
+ : "=a"(_ret) \
+ : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
+ "r"(_arg6), "0"(_num) \
+ : "rcx", "r11", "memory", "cc" \
+ ); \
+ _ret; \
+})
+
+char **environ __attribute__((weak));
+const unsigned long *_auxv __attribute__((weak));
+
+#define __ARCH_SUPPORTS_STACK_PROTECTOR
+
+/* startup code */
+/*
+ * x86-64 System V ABI mandates:
+ * 1) %rsp must be 16-byte aligned right before the function call.
+ * 2) The deepest stack frame should be zero (the %rbp).
+ *
+ */
+void __attribute__((weak,noreturn,optimize("omit-frame-pointer"))) _start(void)
+{
+ __asm__ volatile (
+#ifdef NOLIBC_STACKPROTECTOR
+ "call __stack_chk_init\n" // initialize stack protector
+#endif
+ "pop %rdi\n" // argc (first arg, %rdi)
+ "mov %rsp, %rsi\n" // argv[] (second arg, %rsi)
+ "lea 8(%rsi,%rdi,8),%rdx\n" // then a NULL then envp (third arg, %rdx)
+ "mov %rdx, environ\n" // save environ
+ "xor %ebp, %ebp\n" // zero the stack frame
+ "mov %rdx, %rax\n" // search for auxv (follows NULL after last env)
+ "0:\n"
+ "add $8, %rax\n" // search for auxv using rax, it follows the
+ "cmp -8(%rax), %rbp\n" // ... NULL after last env (rbp is zero here)
+ "jnz 0b\n"
+ "mov %rax, _auxv\n" // save it into _auxv
+ "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned before call
+ "call main\n" // main() returns the status code, we'll exit with it.
+ "mov %eax, %edi\n" // retrieve exit code (32 bit)
+ "mov $60, %eax\n" // NR_exit == 60
+ "syscall\n" // really exit
+ "hlt\n" // ensure it does not return
+ );
+ __builtin_unreachable();
+}
+
+#endif // _NOLIBC_ARCH_X86_64_H
diff --git a/tools/include/nolibc/arch.h b/tools/include/nolibc/arch.h
new file mode 100644
index 000000000000..2d5386a8d6aa
--- /dev/null
+++ b/tools/include/nolibc/arch.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+/* Below comes the architecture-specific code. For each architecture, we have
+ * the syscall declarations and the _start code definition. This is the only
+ * global part. On all architectures the kernel puts everything in the stack
+ * before jumping to _start just above us, without any return address (_start
+ * is not a function but an entry pint). So at the stack pointer we find argc.
+ * Then argv[] begins, and ends at the first NULL. Then we have envp which
+ * starts and ends with a NULL as well. So envp=argv+argc+1.
+ */
+
+#ifndef _NOLIBC_ARCH_H
+#define _NOLIBC_ARCH_H
+
+#if defined(__x86_64__)
+#include "arch-x86_64.h"
+#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
+#include "arch-i386.h"
+#elif defined(__ARM_EABI__)
+#include "arch-arm.h"
+#elif defined(__aarch64__)
+#include "arch-aarch64.h"
+#elif defined(__mips__) && defined(_ABIO32)
+#include "arch-mips.h"
+#elif defined(__riscv)
+#include "arch-riscv.h"
+#elif defined(__s390x__)
+#include "arch-s390.h"
+#elif defined(__loongarch__)
+#include "arch-loongarch.h"
+#endif
+
+#endif /* _NOLIBC_ARCH_H */
diff --git a/tools/include/nolibc/ctype.h b/tools/include/nolibc/ctype.h
new file mode 100644
index 000000000000..6f90706d0644
--- /dev/null
+++ b/tools/include/nolibc/ctype.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * ctype function definitions for NOLIBC
+ * Copyright (C) 2017-2021 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_CTYPE_H
+#define _NOLIBC_CTYPE_H
+
+#include "std.h"
+
+/*
+ * As much as possible, please keep functions alphabetically sorted.
+ */
+
+static __attribute__((unused))
+int isascii(int c)
+{
+ /* 0x00..0x7f */
+ return (unsigned int)c <= 0x7f;
+}
+
+static __attribute__((unused))
+int isblank(int c)
+{
+ return c == '\t' || c == ' ';
+}
+
+static __attribute__((unused))
+int iscntrl(int c)
+{
+ /* 0x00..0x1f, 0x7f */
+ return (unsigned int)c < 0x20 || c == 0x7f;
+}
+
+static __attribute__((unused))
+int isdigit(int c)
+{
+ return (unsigned int)(c - '0') < 10;
+}
+
+static __attribute__((unused))
+int isgraph(int c)
+{
+ /* 0x21..0x7e */
+ return (unsigned int)(c - 0x21) < 0x5e;
+}
+
+static __attribute__((unused))
+int islower(int c)
+{
+ return (unsigned int)(c - 'a') < 26;
+}
+
+static __attribute__((unused))
+int isprint(int c)
+{
+ /* 0x20..0x7e */
+ return (unsigned int)(c - 0x20) < 0x5f;
+}
+
+static __attribute__((unused))
+int isspace(int c)
+{
+ /* \t is 0x9, \n is 0xA, \v is 0xB, \f is 0xC, \r is 0xD */
+ return ((unsigned int)c == ' ') || (unsigned int)(c - 0x09) < 5;
+}
+
+static __attribute__((unused))
+int isupper(int c)
+{
+ return (unsigned int)(c - 'A') < 26;
+}
+
+static __attribute__((unused))
+int isxdigit(int c)
+{
+ return isdigit(c) || (unsigned int)(c - 'A') < 6 || (unsigned int)(c - 'a') < 6;
+}
+
+static __attribute__((unused))
+int isalpha(int c)
+{
+ return islower(c) || isupper(c);
+}
+
+static __attribute__((unused))
+int isalnum(int c)
+{
+ return isalpha(c) || isdigit(c);
+}
+
+static __attribute__((unused))
+int ispunct(int c)
+{
+ return isgraph(c) && !isalnum(c);
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_CTYPE_H */
diff --git a/tools/include/nolibc/errno.h b/tools/include/nolibc/errno.h
new file mode 100644
index 000000000000..a44486ff0477
--- /dev/null
+++ b/tools/include/nolibc/errno.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Minimal errno definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_ERRNO_H
+#define _NOLIBC_ERRNO_H
+
+#include <asm/errno.h>
+
+#ifndef NOLIBC_IGNORE_ERRNO
+#define SET_ERRNO(v) do { errno = (v); } while (0)
+int errno __attribute__((weak));
+#else
+#define SET_ERRNO(v) do { } while (0)
+#endif
+
+
+/* errno codes all ensure that they will not conflict with a valid pointer
+ * because they all correspond to the highest addressable memory page.
+ */
+#define MAX_ERRNO 4095
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_ERRNO_H */
diff --git a/tools/include/nolibc/nolibc.h b/tools/include/nolibc/nolibc.h
index 2551e9b71167..04739a6293c4 100644
--- a/tools/include/nolibc/nolibc.h
+++ b/tools/include/nolibc/nolibc.h
@@ -57,21 +57,31 @@
* having to specify anything.
*
* Finally some very common libc-level functions are provided. It is the case
- * for a few functions usually found in string.h, ctype.h, or stdlib.h. Nothing
- * is currently provided regarding stdio emulation.
+ * for a few functions usually found in string.h, ctype.h, or stdlib.h.
*
- * The macro NOLIBC is always defined, so that it is possible for a program to
- * check this macro to know if it is being built against and decide to disable
- * some features or simply not to include some standard libc files.
- *
- * Ideally this file should be split in multiple files for easier long term
- * maintenance, but provided as a single file as it is now, it's quite
- * convenient to use. Maybe some variations involving a set of includes at the
- * top could work.
+ * The nolibc.h file is only a convenient entry point which includes all other
+ * files. It also defines the NOLIBC macro, so that it is possible for a
+ * program to check this macro to know if it is being built against and decide
+ * to disable some features or simply not to include some standard libc files.
*
* A simple static executable may be built this way :
* $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
- * -static -include nolibc.h -lgcc -o hello hello.c
+ * -static -include nolibc.h -o hello hello.c -lgcc
+ *
+ * Simple programs meant to be reasonably portable to various libc and using
+ * only a few common includes, may also be built by simply making the include
+ * path point to the nolibc directory:
+ * $ gcc -fno-asynchronous-unwind-tables -fno-ident -s -Os -nostdlib \
+ * -I../nolibc -o hello hello.c -lgcc
+ *
+ * The available standard (but limited) include files are:
+ * ctype.h, errno.h, signal.h, stdio.h, stdlib.h, string.h, time.h
+ *
+ * In addition, the following ones are expected to be provided by the compiler:
+ * float.h, stdarg.h, stddef.h
+ *
+ * The following ones which are part to the C standard are not provided:
+ * assert.h, locale.h, math.h, setjmp.h, limits.h
*
* A very useful calling convention table may be found here :
* http://man7.org/linux/man-pages/man2/syscall.2.html
@@ -80,2378 +90,23 @@
* https://w3challs.com/syscalls/
*
*/
+#ifndef _NOLIBC_H
+#define _NOLIBC_H
-/* Some archs (at least aarch64) don't expose the regular syscalls anymore by
- * default, either because they have an "_at" replacement, or because there are
- * more modern alternatives. For now we'd rather still use them.
- */
-#define __ARCH_WANT_SYSCALL_NO_AT
-#define __ARCH_WANT_SYSCALL_NO_FLAGS
-#define __ARCH_WANT_SYSCALL_DEPRECATED
-
-#include <asm/unistd.h>
-#include <asm/ioctls.h>
-#include <asm/errno.h>
-#include <linux/fs.h>
-#include <linux/loop.h>
+#include "std.h"
+#include "arch.h"
+#include "types.h"
+#include "sys.h"
+#include "ctype.h"
+#include "signal.h"
+#include "stdio.h"
+#include "stdlib.h"
+#include "string.h"
+#include "time.h"
+#include "unistd.h"
+#include "stackprotector.h"
+/* Used by programs to avoid std includes */
#define NOLIBC
-/* this way it will be removed if unused */
-static int errno;
-
-#ifndef NOLIBC_IGNORE_ERRNO
-#define SET_ERRNO(v) do { errno = (v); } while (0)
-#else
-#define SET_ERRNO(v) do { } while (0)
-#endif
-
-/* errno codes all ensure that they will not conflict with a valid pointer
- * because they all correspond to the highest addressable memry page.
- */
-#define MAX_ERRNO 4095
-
-/* Declare a few quite common macros and types that usually are in stdlib.h,
- * stdint.h, ctype.h, unistd.h and a few other common locations.
- */
-
-#define NULL ((void *)0)
-
-/* stdint types */
-typedef unsigned char uint8_t;
-typedef signed char int8_t;
-typedef unsigned short uint16_t;
-typedef signed short int16_t;
-typedef unsigned int uint32_t;
-typedef signed int int32_t;
-typedef unsigned long long uint64_t;
-typedef signed long long int64_t;
-typedef unsigned long size_t;
-typedef signed long ssize_t;
-typedef unsigned long uintptr_t;
-typedef signed long intptr_t;
-typedef signed long ptrdiff_t;
-
-/* for stat() */
-typedef unsigned int dev_t;
-typedef unsigned long ino_t;
-typedef unsigned int mode_t;
-typedef signed int pid_t;
-typedef unsigned int uid_t;
-typedef unsigned int gid_t;
-typedef unsigned long nlink_t;
-typedef signed long off_t;
-typedef signed long blksize_t;
-typedef signed long blkcnt_t;
-typedef signed long time_t;
-
-/* for poll() */
-struct pollfd {
- int fd;
- short int events;
- short int revents;
-};
-
-/* for select() */
-struct timeval {
- long tv_sec;
- long tv_usec;
-};
-
-/* for pselect() */
-struct timespec {
- long tv_sec;
- long tv_nsec;
-};
-
-/* for gettimeofday() */
-struct timezone {
- int tz_minuteswest;
- int tz_dsttime;
-};
-
-/* for getdents64() */
-struct linux_dirent64 {
- uint64_t d_ino;
- int64_t d_off;
- unsigned short d_reclen;
- unsigned char d_type;
- char d_name[];
-};
-
-/* commonly an fd_set represents 256 FDs */
-#define FD_SETSIZE 256
-typedef struct { uint32_t fd32[FD_SETSIZE/32]; } fd_set;
-
-/* needed by wait4() */
-struct rusage {
- struct timeval ru_utime;
- struct timeval ru_stime;
- long ru_maxrss;
- long ru_ixrss;
- long ru_idrss;
- long ru_isrss;
- long ru_minflt;
- long ru_majflt;
- long ru_nswap;
- long ru_inblock;
- long ru_oublock;
- long ru_msgsnd;
- long ru_msgrcv;
- long ru_nsignals;
- long ru_nvcsw;
- long ru_nivcsw;
-};
-
-/* stat flags (WARNING, octal here) */
-#define S_IFDIR 0040000
-#define S_IFCHR 0020000
-#define S_IFBLK 0060000
-#define S_IFREG 0100000
-#define S_IFIFO 0010000
-#define S_IFLNK 0120000
-#define S_IFSOCK 0140000
-#define S_IFMT 0170000
-
-#define S_ISDIR(mode) (((mode) & S_IFDIR) == S_IFDIR)
-#define S_ISCHR(mode) (((mode) & S_IFCHR) == S_IFCHR)
-#define S_ISBLK(mode) (((mode) & S_IFBLK) == S_IFBLK)
-#define S_ISREG(mode) (((mode) & S_IFREG) == S_IFREG)
-#define S_ISFIFO(mode) (((mode) & S_IFIFO) == S_IFIFO)
-#define S_ISLNK(mode) (((mode) & S_IFLNK) == S_IFLNK)
-#define S_ISSOCK(mode) (((mode) & S_IFSOCK) == S_IFSOCK)
-
-#define DT_UNKNOWN 0
-#define DT_FIFO 1
-#define DT_CHR 2
-#define DT_DIR 4
-#define DT_BLK 6
-#define DT_REG 8
-#define DT_LNK 10
-#define DT_SOCK 12
-
-/* all the *at functions */
-#ifndef AT_FDWCD
-#define AT_FDCWD -100
-#endif
-
-/* lseek */
-#define SEEK_SET 0
-#define SEEK_CUR 1
-#define SEEK_END 2
-
-/* reboot */
-#define LINUX_REBOOT_MAGIC1 0xfee1dead
-#define LINUX_REBOOT_MAGIC2 0x28121969
-#define LINUX_REBOOT_CMD_HALT 0xcdef0123
-#define LINUX_REBOOT_CMD_POWER_OFF 0x4321fedc
-#define LINUX_REBOOT_CMD_RESTART 0x01234567
-#define LINUX_REBOOT_CMD_SW_SUSPEND 0xd000fce2
-
-
-/* The format of the struct as returned by the libc to the application, which
- * significantly differs from the format returned by the stat() syscall flavours.
- */
-struct stat {
- dev_t st_dev; /* ID of device containing file */
- ino_t st_ino; /* inode number */
- mode_t st_mode; /* protection */
- nlink_t st_nlink; /* number of hard links */
- uid_t st_uid; /* user ID of owner */
- gid_t st_gid; /* group ID of owner */
- dev_t st_rdev; /* device ID (if special file) */
- off_t st_size; /* total size, in bytes */
- blksize_t st_blksize; /* blocksize for file system I/O */
- blkcnt_t st_blocks; /* number of 512B blocks allocated */
- time_t st_atime; /* time of last access */
- time_t st_mtime; /* time of last modification */
- time_t st_ctime; /* time of last status change */
-};
-
-#define WEXITSTATUS(status) (((status) & 0xff00) >> 8)
-#define WIFEXITED(status) (((status) & 0x7f) == 0)
-
-
-/* Below comes the architecture-specific code. For each architecture, we have
- * the syscall declarations and the _start code definition. This is the only
- * global part. On all architectures the kernel puts everything in the stack
- * before jumping to _start just above us, without any return address (_start
- * is not a function but an entry pint). So at the stack pointer we find argc.
- * Then argv[] begins, and ends at the first NULL. Then we have envp which
- * starts and ends with a NULL as well. So envp=argv+argc+1.
- */
-
-#if defined(__x86_64__)
-/* Syscalls for x86_64 :
- * - registers are 64-bit
- * - syscall number is passed in rax
- * - arguments are in rdi, rsi, rdx, r10, r8, r9 respectively
- * - the system call is performed by calling the syscall instruction
- * - syscall return comes in rax
- * - rcx and r8..r11 may be clobbered, others are preserved.
- * - the arguments are cast to long and assigned into the target registers
- * which are then simply passed as registers to the asm code, so that we
- * don't have to experience issues with register constraints.
- * - the syscall number is always specified last in order to allow to force
- * some registers before (gcc refuses a %-register at the last position).
- */
-
-#define my_syscall0(num) \
-({ \
- long _ret; \
- register long _num asm("rax") = (num); \
- \
- asm volatile ( \
- "syscall\n" \
- : "=a" (_ret) \
- : "0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall1(num, arg1) \
-({ \
- long _ret; \
- register long _num asm("rax") = (num); \
- register long _arg1 asm("rdi") = (long)(arg1); \
- \
- asm volatile ( \
- "syscall\n" \
- : "=a" (_ret) \
- : "r"(_arg1), \
- "0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall2(num, arg1, arg2) \
-({ \
- long _ret; \
- register long _num asm("rax") = (num); \
- register long _arg1 asm("rdi") = (long)(arg1); \
- register long _arg2 asm("rsi") = (long)(arg2); \
- \
- asm volatile ( \
- "syscall\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), \
- "0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall3(num, arg1, arg2, arg3) \
-({ \
- long _ret; \
- register long _num asm("rax") = (num); \
- register long _arg1 asm("rdi") = (long)(arg1); \
- register long _arg2 asm("rsi") = (long)(arg2); \
- register long _arg3 asm("rdx") = (long)(arg3); \
- \
- asm volatile ( \
- "syscall\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
- "0"(_num) \
- : "rcx", "r8", "r9", "r10", "r11", "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall4(num, arg1, arg2, arg3, arg4) \
-({ \
- long _ret; \
- register long _num asm("rax") = (num); \
- register long _arg1 asm("rdi") = (long)(arg1); \
- register long _arg2 asm("rsi") = (long)(arg2); \
- register long _arg3 asm("rdx") = (long)(arg3); \
- register long _arg4 asm("r10") = (long)(arg4); \
- \
- asm volatile ( \
- "syscall\n" \
- : "=a" (_ret), "=r"(_arg4) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
- "0"(_num) \
- : "rcx", "r8", "r9", "r11", "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
-({ \
- long _ret; \
- register long _num asm("rax") = (num); \
- register long _arg1 asm("rdi") = (long)(arg1); \
- register long _arg2 asm("rsi") = (long)(arg2); \
- register long _arg3 asm("rdx") = (long)(arg3); \
- register long _arg4 asm("r10") = (long)(arg4); \
- register long _arg5 asm("r8") = (long)(arg5); \
- \
- asm volatile ( \
- "syscall\n" \
- : "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
- "0"(_num) \
- : "rcx", "r9", "r11", "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
-({ \
- long _ret; \
- register long _num asm("rax") = (num); \
- register long _arg1 asm("rdi") = (long)(arg1); \
- register long _arg2 asm("rsi") = (long)(arg2); \
- register long _arg3 asm("rdx") = (long)(arg3); \
- register long _arg4 asm("r10") = (long)(arg4); \
- register long _arg5 asm("r8") = (long)(arg5); \
- register long _arg6 asm("r9") = (long)(arg6); \
- \
- asm volatile ( \
- "syscall\n" \
- : "=a" (_ret), "=r"(_arg4), "=r"(_arg5) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
- "r"(_arg6), "0"(_num) \
- : "rcx", "r11", "memory", "cc" \
- ); \
- _ret; \
-})
-
-/* startup code */
-asm(".section .text\n"
- ".global _start\n"
- "_start:\n"
- "pop %rdi\n" // argc (first arg, %rdi)
- "mov %rsp, %rsi\n" // argv[] (second arg, %rsi)
- "lea 8(%rsi,%rdi,8),%rdx\n" // then a NULL then envp (third arg, %rdx)
- "and $-16, %rsp\n" // x86 ABI : esp must be 16-byte aligned when
- "sub $8, %rsp\n" // entering the callee
- "call main\n" // main() returns the status code, we'll exit with it.
- "movzb %al, %rdi\n" // retrieve exit code from 8 lower bits
- "mov $60, %rax\n" // NR_exit == 60
- "syscall\n" // really exit
- "hlt\n" // ensure it does not return
- "");
-
-/* fcntl / open */
-#define O_RDONLY 0
-#define O_WRONLY 1
-#define O_RDWR 2
-#define O_CREAT 0x40
-#define O_EXCL 0x80
-#define O_NOCTTY 0x100
-#define O_TRUNC 0x200
-#define O_APPEND 0x400
-#define O_NONBLOCK 0x800
-#define O_DIRECTORY 0x10000
-
-/* The struct returned by the stat() syscall, equivalent to stat64(). The
- * syscall returns 116 bytes and stops in the middle of __unused.
- */
-struct sys_stat_struct {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned long st_nlink;
- unsigned int st_mode;
- unsigned int st_uid;
-
- unsigned int st_gid;
- unsigned int __pad0;
- unsigned long st_rdev;
- long st_size;
- long st_blksize;
-
- long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
-
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- long __unused[3];
-};
-
-#elif defined(__i386__) || defined(__i486__) || defined(__i586__) || defined(__i686__)
-/* Syscalls for i386 :
- * - mostly similar to x86_64
- * - registers are 32-bit
- * - syscall number is passed in eax
- * - arguments are in ebx, ecx, edx, esi, edi, ebp respectively
- * - all registers are preserved (except eax of course)
- * - the system call is performed by calling int $0x80
- * - syscall return comes in eax
- * - the arguments are cast to long and assigned into the target registers
- * which are then simply passed as registers to the asm code, so that we
- * don't have to experience issues with register constraints.
- * - the syscall number is always specified last in order to allow to force
- * some registers before (gcc refuses a %-register at the last position).
- *
- * Also, i386 supports the old_select syscall if newselect is not available
- */
-#define __ARCH_WANT_SYS_OLD_SELECT
-
-#define my_syscall0(num) \
-({ \
- long _ret; \
- register long _num asm("eax") = (num); \
- \
- asm volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall1(num, arg1) \
-({ \
- long _ret; \
- register long _num asm("eax") = (num); \
- register long _arg1 asm("ebx") = (long)(arg1); \
- \
- asm volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall2(num, arg1, arg2) \
-({ \
- long _ret; \
- register long _num asm("eax") = (num); \
- register long _arg1 asm("ebx") = (long)(arg1); \
- register long _arg2 asm("ecx") = (long)(arg2); \
- \
- asm volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall3(num, arg1, arg2, arg3) \
-({ \
- long _ret; \
- register long _num asm("eax") = (num); \
- register long _arg1 asm("ebx") = (long)(arg1); \
- register long _arg2 asm("ecx") = (long)(arg2); \
- register long _arg3 asm("edx") = (long)(arg3); \
- \
- asm volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall4(num, arg1, arg2, arg3, arg4) \
-({ \
- long _ret; \
- register long _num asm("eax") = (num); \
- register long _arg1 asm("ebx") = (long)(arg1); \
- register long _arg2 asm("ecx") = (long)(arg2); \
- register long _arg3 asm("edx") = (long)(arg3); \
- register long _arg4 asm("esi") = (long)(arg4); \
- \
- asm volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
-({ \
- long _ret; \
- register long _num asm("eax") = (num); \
- register long _arg1 asm("ebx") = (long)(arg1); \
- register long _arg2 asm("ecx") = (long)(arg2); \
- register long _arg3 asm("edx") = (long)(arg3); \
- register long _arg4 asm("esi") = (long)(arg4); \
- register long _arg5 asm("edi") = (long)(arg5); \
- \
- asm volatile ( \
- "int $0x80\n" \
- : "=a" (_ret) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
- "0"(_num) \
- : "memory", "cc" \
- ); \
- _ret; \
-})
-
-/* startup code */
-asm(".section .text\n"
- ".global _start\n"
- "_start:\n"
- "pop %eax\n" // argc (first arg, %eax)
- "mov %esp, %ebx\n" // argv[] (second arg, %ebx)
- "lea 4(%ebx,%eax,4),%ecx\n" // then a NULL then envp (third arg, %ecx)
- "and $-16, %esp\n" // x86 ABI : esp must be 16-byte aligned when
- "push %ecx\n" // push all registers on the stack so that we
- "push %ebx\n" // support both regparm and plain stack modes
- "push %eax\n"
- "call main\n" // main() returns the status code in %eax
- "movzbl %al, %ebx\n" // retrieve exit code from lower 8 bits
- "movl $1, %eax\n" // NR_exit == 1
- "int $0x80\n" // exit now
- "hlt\n" // ensure it does not
- "");
-
-/* fcntl / open */
-#define O_RDONLY 0
-#define O_WRONLY 1
-#define O_RDWR 2
-#define O_CREAT 0x40
-#define O_EXCL 0x80
-#define O_NOCTTY 0x100
-#define O_TRUNC 0x200
-#define O_APPEND 0x400
-#define O_NONBLOCK 0x800
-#define O_DIRECTORY 0x10000
-
-/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
- * exactly 56 bytes (stops before the unused array).
- */
-struct sys_stat_struct {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
-
- unsigned long st_rdev;
- unsigned long st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
-
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused[2];
-};
-
-#elif defined(__ARM_EABI__)
-/* Syscalls for ARM in ARM or Thumb modes :
- * - registers are 32-bit
- * - stack is 8-byte aligned
- * ( http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka4127.html)
- * - syscall number is passed in r7
- * - arguments are in r0, r1, r2, r3, r4, r5
- * - the system call is performed by calling svc #0
- * - syscall return comes in r0.
- * - only lr is clobbered.
- * - the arguments are cast to long and assigned into the target registers
- * which are then simply passed as registers to the asm code, so that we
- * don't have to experience issues with register constraints.
- * - the syscall number is always specified last in order to allow to force
- * some registers before (gcc refuses a %-register at the last position).
- *
- * Also, ARM supports the old_select syscall if newselect is not available
- */
-#define __ARCH_WANT_SYS_OLD_SELECT
-
-#define my_syscall0(num) \
-({ \
- register long _num asm("r7") = (num); \
- register long _arg1 asm("r0"); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_num) \
- : "memory", "cc", "lr" \
- ); \
- _arg1; \
-})
-
-#define my_syscall1(num, arg1) \
-({ \
- register long _num asm("r7") = (num); \
- register long _arg1 asm("r0") = (long)(arg1); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_arg1), \
- "r"(_num) \
- : "memory", "cc", "lr" \
- ); \
- _arg1; \
-})
-
-#define my_syscall2(num, arg1, arg2) \
-({ \
- register long _num asm("r7") = (num); \
- register long _arg1 asm("r0") = (long)(arg1); \
- register long _arg2 asm("r1") = (long)(arg2); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_arg1), "r"(_arg2), \
- "r"(_num) \
- : "memory", "cc", "lr" \
- ); \
- _arg1; \
-})
-
-#define my_syscall3(num, arg1, arg2, arg3) \
-({ \
- register long _num asm("r7") = (num); \
- register long _arg1 asm("r0") = (long)(arg1); \
- register long _arg2 asm("r1") = (long)(arg2); \
- register long _arg3 asm("r2") = (long)(arg3); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
- "r"(_num) \
- : "memory", "cc", "lr" \
- ); \
- _arg1; \
-})
-
-#define my_syscall4(num, arg1, arg2, arg3, arg4) \
-({ \
- register long _num asm("r7") = (num); \
- register long _arg1 asm("r0") = (long)(arg1); \
- register long _arg2 asm("r1") = (long)(arg2); \
- register long _arg3 asm("r2") = (long)(arg3); \
- register long _arg4 asm("r3") = (long)(arg4); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
- "r"(_num) \
- : "memory", "cc", "lr" \
- ); \
- _arg1; \
-})
-
-#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
-({ \
- register long _num asm("r7") = (num); \
- register long _arg1 asm("r0") = (long)(arg1); \
- register long _arg2 asm("r1") = (long)(arg2); \
- register long _arg3 asm("r2") = (long)(arg3); \
- register long _arg4 asm("r3") = (long)(arg4); \
- register long _arg5 asm("r4") = (long)(arg5); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r" (_arg1) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
- "r"(_num) \
- : "memory", "cc", "lr" \
- ); \
- _arg1; \
-})
-
-/* startup code */
-asm(".section .text\n"
- ".global _start\n"
- "_start:\n"
-#if defined(__THUMBEB__) || defined(__THUMBEL__)
- /* We enter here in 32-bit mode but if some previous functions were in
- * 16-bit mode, the assembler cannot know, so we need to tell it we're in
- * 32-bit now, then switch to 16-bit (is there a better way to do it than
- * adding 1 by hand ?) and tell the asm we're now in 16-bit mode so that
- * it generates correct instructions. Note that we do not support thumb1.
- */
- ".code 32\n"
- "add r0, pc, #1\n"
- "bx r0\n"
- ".code 16\n"
-#endif
- "pop {%r0}\n" // argc was in the stack
- "mov %r1, %sp\n" // argv = sp
- "add %r2, %r1, %r0, lsl #2\n" // envp = argv + 4*argc ...
- "add %r2, %r2, $4\n" // ... + 4
- "and %r3, %r1, $-8\n" // AAPCS : sp must be 8-byte aligned in the
- "mov %sp, %r3\n" // callee, an bl doesn't push (lr=pc)
- "bl main\n" // main() returns the status code, we'll exit with it.
- "and %r0, %r0, $0xff\n" // limit exit code to 8 bits
- "movs r7, $1\n" // NR_exit == 1
- "svc $0x00\n"
- "");
-
-/* fcntl / open */
-#define O_RDONLY 0
-#define O_WRONLY 1
-#define O_RDWR 2
-#define O_CREAT 0x40
-#define O_EXCL 0x80
-#define O_NOCTTY 0x100
-#define O_TRUNC 0x200
-#define O_APPEND 0x400
-#define O_NONBLOCK 0x800
-#define O_DIRECTORY 0x4000
-
-/* The struct returned by the stat() syscall, 32-bit only, the syscall returns
- * exactly 56 bytes (stops before the unused array). In big endian, the format
- * differs as devices are returned as short only.
- */
-struct sys_stat_struct {
-#if defined(__ARMEB__)
- unsigned short st_dev;
- unsigned short __pad1;
-#else
- unsigned long st_dev;
-#endif
- unsigned long st_ino;
- unsigned short st_mode;
- unsigned short st_nlink;
- unsigned short st_uid;
- unsigned short st_gid;
-#if defined(__ARMEB__)
- unsigned short st_rdev;
- unsigned short __pad2;
-#else
- unsigned long st_rdev;
-#endif
- unsigned long st_size;
- unsigned long st_blksize;
- unsigned long st_blocks;
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned long __unused[2];
-};
-
-#elif defined(__aarch64__)
-/* Syscalls for AARCH64 :
- * - registers are 64-bit
- * - stack is 16-byte aligned
- * - syscall number is passed in x8
- * - arguments are in x0, x1, x2, x3, x4, x5
- * - the system call is performed by calling svc 0
- * - syscall return comes in x0.
- * - the arguments are cast to long and assigned into the target registers
- * which are then simply passed as registers to the asm code, so that we
- * don't have to experience issues with register constraints.
- *
- * On aarch64, select() is not implemented so we have to use pselect6().
- */
-#define __ARCH_WANT_SYS_PSELECT6
-
-#define my_syscall0(num) \
-({ \
- register long _num asm("x8") = (num); \
- register long _arg1 asm("x0"); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall1(num, arg1) \
-({ \
- register long _num asm("x8") = (num); \
- register long _arg1 asm("x0") = (long)(arg1); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_arg1), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall2(num, arg1, arg2) \
-({ \
- register long _num asm("x8") = (num); \
- register long _arg1 asm("x0") = (long)(arg1); \
- register long _arg2 asm("x1") = (long)(arg2); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_arg1), "r"(_arg2), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall3(num, arg1, arg2, arg3) \
-({ \
- register long _num asm("x8") = (num); \
- register long _arg1 asm("x0") = (long)(arg1); \
- register long _arg2 asm("x1") = (long)(arg2); \
- register long _arg3 asm("x2") = (long)(arg3); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall4(num, arg1, arg2, arg3, arg4) \
-({ \
- register long _num asm("x8") = (num); \
- register long _arg1 asm("x0") = (long)(arg1); \
- register long _arg2 asm("x1") = (long)(arg2); \
- register long _arg3 asm("x2") = (long)(arg3); \
- register long _arg4 asm("x3") = (long)(arg4); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r"(_arg1) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
-({ \
- register long _num asm("x8") = (num); \
- register long _arg1 asm("x0") = (long)(arg1); \
- register long _arg2 asm("x1") = (long)(arg2); \
- register long _arg3 asm("x2") = (long)(arg3); \
- register long _arg4 asm("x3") = (long)(arg4); \
- register long _arg5 asm("x4") = (long)(arg5); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r" (_arg1) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
-({ \
- register long _num asm("x8") = (num); \
- register long _arg1 asm("x0") = (long)(arg1); \
- register long _arg2 asm("x1") = (long)(arg2); \
- register long _arg3 asm("x2") = (long)(arg3); \
- register long _arg4 asm("x3") = (long)(arg4); \
- register long _arg5 asm("x4") = (long)(arg5); \
- register long _arg6 asm("x5") = (long)(arg6); \
- \
- asm volatile ( \
- "svc #0\n" \
- : "=r" (_arg1) \
- : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
- "r"(_arg6), "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-/* startup code */
-asm(".section .text\n"
- ".global _start\n"
- "_start:\n"
- "ldr x0, [sp]\n" // argc (x0) was in the stack
- "add x1, sp, 8\n" // argv (x1) = sp
- "lsl x2, x0, 3\n" // envp (x2) = 8*argc ...
- "add x2, x2, 8\n" // + 8 (skip null)
- "add x2, x2, x1\n" // + argv
- "and sp, x1, -16\n" // sp must be 16-byte aligned in the callee
- "bl main\n" // main() returns the status code, we'll exit with it.
- "and x0, x0, 0xff\n" // limit exit code to 8 bits
- "mov x8, 93\n" // NR_exit == 93
- "svc #0\n"
- "");
-
-/* fcntl / open */
-#define O_RDONLY 0
-#define O_WRONLY 1
-#define O_RDWR 2
-#define O_CREAT 0x40
-#define O_EXCL 0x80
-#define O_NOCTTY 0x100
-#define O_TRUNC 0x200
-#define O_APPEND 0x400
-#define O_NONBLOCK 0x800
-#define O_DIRECTORY 0x4000
-
-/* The struct returned by the newfstatat() syscall. Differs slightly from the
- * x86_64's stat one by field ordering, so be careful.
- */
-struct sys_stat_struct {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
-
- unsigned long st_rdev;
- unsigned long __pad1;
- long st_size;
- int st_blksize;
- int __pad2;
-
- long st_blocks;
- long st_atime;
- unsigned long st_atime_nsec;
- long st_mtime;
-
- unsigned long st_mtime_nsec;
- long st_ctime;
- unsigned long st_ctime_nsec;
- unsigned int __unused[2];
-};
-
-#elif defined(__mips__) && defined(_ABIO32)
-/* Syscalls for MIPS ABI O32 :
- * - WARNING! there's always a delayed slot!
- * - WARNING again, the syntax is different, registers take a '$' and numbers
- * do not.
- * - registers are 32-bit
- * - stack is 8-byte aligned
- * - syscall number is passed in v0 (starts at 0xfa0).
- * - arguments are in a0, a1, a2, a3, then the stack. The caller needs to
- * leave some room in the stack for the callee to save a0..a3 if needed.
- * - Many registers are clobbered, in fact only a0..a2 and s0..s8 are
- * preserved. See: https://www.linux-mips.org/wiki/Syscall as well as
- * scall32-o32.S in the kernel sources.
- * - the system call is performed by calling "syscall"
- * - syscall return comes in v0, and register a3 needs to be checked to know
- * if an error occured, in which case errno is in v0.
- * - the arguments are cast to long and assigned into the target registers
- * which are then simply passed as registers to the asm code, so that we
- * don't have to experience issues with register constraints.
- */
-
-#define my_syscall0(num) \
-({ \
- register long _num asm("v0") = (num); \
- register long _arg4 asm("a3"); \
- \
- asm volatile ( \
- "addiu $sp, $sp, -32\n" \
- "syscall\n" \
- "addiu $sp, $sp, 32\n" \
- : "=r"(_num), "=r"(_arg4) \
- : "r"(_num) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
- ); \
- _arg4 ? -_num : _num; \
-})
-
-#define my_syscall1(num, arg1) \
-({ \
- register long _num asm("v0") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg4 asm("a3"); \
- \
- asm volatile ( \
- "addiu $sp, $sp, -32\n" \
- "syscall\n" \
- "addiu $sp, $sp, 32\n" \
- : "=r"(_num), "=r"(_arg4) \
- : "0"(_num), \
- "r"(_arg1) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
- ); \
- _arg4 ? -_num : _num; \
-})
-
-#define my_syscall2(num, arg1, arg2) \
-({ \
- register long _num asm("v0") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg2 asm("a1") = (long)(arg2); \
- register long _arg4 asm("a3"); \
- \
- asm volatile ( \
- "addiu $sp, $sp, -32\n" \
- "syscall\n" \
- "addiu $sp, $sp, 32\n" \
- : "=r"(_num), "=r"(_arg4) \
- : "0"(_num), \
- "r"(_arg1), "r"(_arg2) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
- ); \
- _arg4 ? -_num : _num; \
-})
-
-#define my_syscall3(num, arg1, arg2, arg3) \
-({ \
- register long _num asm("v0") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg2 asm("a1") = (long)(arg2); \
- register long _arg3 asm("a2") = (long)(arg3); \
- register long _arg4 asm("a3"); \
- \
- asm volatile ( \
- "addiu $sp, $sp, -32\n" \
- "syscall\n" \
- "addiu $sp, $sp, 32\n" \
- : "=r"(_num), "=r"(_arg4) \
- : "0"(_num), \
- "r"(_arg1), "r"(_arg2), "r"(_arg3) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
- ); \
- _arg4 ? -_num : _num; \
-})
-
-#define my_syscall4(num, arg1, arg2, arg3, arg4) \
-({ \
- register long _num asm("v0") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg2 asm("a1") = (long)(arg2); \
- register long _arg3 asm("a2") = (long)(arg3); \
- register long _arg4 asm("a3") = (long)(arg4); \
- \
- asm volatile ( \
- "addiu $sp, $sp, -32\n" \
- "syscall\n" \
- "addiu $sp, $sp, 32\n" \
- : "=r" (_num), "=r"(_arg4) \
- : "0"(_num), \
- "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
- ); \
- _arg4 ? -_num : _num; \
-})
-
-#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
-({ \
- register long _num asm("v0") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg2 asm("a1") = (long)(arg2); \
- register long _arg3 asm("a2") = (long)(arg3); \
- register long _arg4 asm("a3") = (long)(arg4); \
- register long _arg5 = (long)(arg5); \
- \
- asm volatile ( \
- "addiu $sp, $sp, -32\n" \
- "sw %7, 16($sp)\n" \
- "syscall\n " \
- "addiu $sp, $sp, 32\n" \
- : "=r" (_num), "=r"(_arg4) \
- : "0"(_num), \
- "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5) \
- : "memory", "cc", "at", "v1", "hi", "lo", \
- "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9" \
- ); \
- _arg4 ? -_num : _num; \
-})
-
-/* startup code, note that it's called __start on MIPS */
-asm(".section .text\n"
- ".set nomips16\n"
- ".global __start\n"
- ".set noreorder\n"
- ".option pic0\n"
- ".ent __start\n"
- "__start:\n"
- "lw $a0,($sp)\n" // argc was in the stack
- "addiu $a1, $sp, 4\n" // argv = sp + 4
- "sll $a2, $a0, 2\n" // a2 = argc * 4
- "add $a2, $a2, $a1\n" // envp = argv + 4*argc ...
- "addiu $a2, $a2, 4\n" // ... + 4
- "li $t0, -8\n"
- "and $sp, $sp, $t0\n" // sp must be 8-byte aligned
- "addiu $sp,$sp,-16\n" // the callee expects to save a0..a3 there!
- "jal main\n" // main() returns the status code, we'll exit with it.
- "nop\n" // delayed slot
- "and $a0, $v0, 0xff\n" // limit exit code to 8 bits
- "li $v0, 4001\n" // NR_exit == 4001
- "syscall\n"
- ".end __start\n"
- "");
-
-/* fcntl / open */
-#define O_RDONLY 0
-#define O_WRONLY 1
-#define O_RDWR 2
-#define O_APPEND 0x0008
-#define O_NONBLOCK 0x0080
-#define O_CREAT 0x0100
-#define O_TRUNC 0x0200
-#define O_EXCL 0x0400
-#define O_NOCTTY 0x0800
-#define O_DIRECTORY 0x10000
-
-/* The struct returned by the stat() syscall. 88 bytes are returned by the
- * syscall.
- */
-struct sys_stat_struct {
- unsigned int st_dev;
- long st_pad1[3];
- unsigned long st_ino;
- unsigned int st_mode;
- unsigned int st_nlink;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int st_rdev;
- long st_pad2[2];
- long st_size;
- long st_pad3;
- long st_atime;
- long st_atime_nsec;
- long st_mtime;
- long st_mtime_nsec;
- long st_ctime;
- long st_ctime_nsec;
- long st_blksize;
- long st_blocks;
- long st_pad4[14];
-};
-
-#elif defined(__riscv)
-
-#if __riscv_xlen == 64
-#define PTRLOG "3"
-#define SZREG "8"
-#elif __riscv_xlen == 32
-#define PTRLOG "2"
-#define SZREG "4"
-#endif
-
-/* Syscalls for RISCV :
- * - stack is 16-byte aligned
- * - syscall number is passed in a7
- * - arguments are in a0, a1, a2, a3, a4, a5
- * - the system call is performed by calling ecall
- * - syscall return comes in a0
- * - the arguments are cast to long and assigned into the target
- * registers which are then simply passed as registers to the asm code,
- * so that we don't have to experience issues with register constraints.
- */
-
-#define my_syscall0(num) \
-({ \
- register long _num asm("a7") = (num); \
- register long _arg1 asm("a0"); \
- \
- asm volatile ( \
- "ecall\n\t" \
- : "=r"(_arg1) \
- : "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall1(num, arg1) \
-({ \
- register long _num asm("a7") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- \
- asm volatile ( \
- "ecall\n" \
- : "+r"(_arg1) \
- : "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall2(num, arg1, arg2) \
-({ \
- register long _num asm("a7") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg2 asm("a1") = (long)(arg2); \
- \
- asm volatile ( \
- "ecall\n" \
- : "+r"(_arg1) \
- : "r"(_arg2), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall3(num, arg1, arg2, arg3) \
-({ \
- register long _num asm("a7") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg2 asm("a1") = (long)(arg2); \
- register long _arg3 asm("a2") = (long)(arg3); \
- \
- asm volatile ( \
- "ecall\n\t" \
- : "+r"(_arg1) \
- : "r"(_arg2), "r"(_arg3), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall4(num, arg1, arg2, arg3, arg4) \
-({ \
- register long _num asm("a7") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg2 asm("a1") = (long)(arg2); \
- register long _arg3 asm("a2") = (long)(arg3); \
- register long _arg4 asm("a3") = (long)(arg4); \
- \
- asm volatile ( \
- "ecall\n" \
- : "+r"(_arg1) \
- : "r"(_arg2), "r"(_arg3), "r"(_arg4), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
-({ \
- register long _num asm("a7") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg2 asm("a1") = (long)(arg2); \
- register long _arg3 asm("a2") = (long)(arg3); \
- register long _arg4 asm("a3") = (long)(arg4); \
- register long _arg5 asm("a4") = (long)(arg5); \
- \
- asm volatile ( \
- "ecall\n" \
- : "+r"(_arg1) \
- : "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
-({ \
- register long _num asm("a7") = (num); \
- register long _arg1 asm("a0") = (long)(arg1); \
- register long _arg2 asm("a1") = (long)(arg2); \
- register long _arg3 asm("a2") = (long)(arg3); \
- register long _arg4 asm("a3") = (long)(arg4); \
- register long _arg5 asm("a4") = (long)(arg5); \
- register long _arg6 asm("a5") = (long)(arg6); \
- \
- asm volatile ( \
- "ecall\n" \
- : "+r"(_arg1) \
- : "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), "r"(_arg6), \
- "r"(_num) \
- : "memory", "cc" \
- ); \
- _arg1; \
-})
-
-/* startup code */
-asm(".section .text\n"
- ".global _start\n"
- "_start:\n"
- ".option push\n"
- ".option norelax\n"
- "lla gp, __global_pointer$\n"
- ".option pop\n"
- "ld a0, 0(sp)\n" // argc (a0) was in the stack
- "add a1, sp, "SZREG"\n" // argv (a1) = sp
- "slli a2, a0, "PTRLOG"\n" // envp (a2) = SZREG*argc ...
- "add a2, a2, "SZREG"\n" // + SZREG (skip null)
- "add a2,a2,a1\n" // + argv
- "andi sp,a1,-16\n" // sp must be 16-byte aligned
- "call main\n" // main() returns the status code, we'll exit with it.
- "andi a0, a0, 0xff\n" // limit exit code to 8 bits
- "li a7, 93\n" // NR_exit == 93
- "ecall\n"
- "");
-
-/* fcntl / open */
-#define O_RDONLY 0
-#define O_WRONLY 1
-#define O_RDWR 2
-#define O_CREAT 0x100
-#define O_EXCL 0x200
-#define O_NOCTTY 0x400
-#define O_TRUNC 0x1000
-#define O_APPEND 0x2000
-#define O_NONBLOCK 0x4000
-#define O_DIRECTORY 0x200000
-
-struct sys_stat_struct {
- unsigned long st_dev; /* Device. */
- unsigned long st_ino; /* File serial number. */
- unsigned int st_mode; /* File mode. */
- unsigned int st_nlink; /* Link count. */
- unsigned int st_uid; /* User ID of the file's owner. */
- unsigned int st_gid; /* Group ID of the file's group. */
- unsigned long st_rdev; /* Device number, if device. */
- unsigned long __pad1;
- long st_size; /* Size of file, in bytes. */
- int st_blksize; /* Optimal block size for I/O. */
- int __pad2;
- long st_blocks; /* Number 512-byte blocks allocated. */
- long st_atime; /* Time of last access. */
- unsigned long st_atime_nsec;
- long st_mtime; /* Time of last modification. */
- unsigned long st_mtime_nsec;
- long st_ctime; /* Time of last status change. */
- unsigned long st_ctime_nsec;
- unsigned int __unused4;
- unsigned int __unused5;
-};
-
-#endif
-
-
-/* Below are the C functions used to declare the raw syscalls. They try to be
- * architecture-agnostic, and return either a success or -errno. Declaring them
- * static will lead to them being inlined in most cases, but it's still possible
- * to reference them by a pointer if needed.
- */
-static __attribute__((unused))
-void *sys_brk(void *addr)
-{
- return (void *)my_syscall1(__NR_brk, addr);
-}
-
-static __attribute__((noreturn,unused))
-void sys_exit(int status)
-{
- my_syscall1(__NR_exit, status & 255);
- while(1); // shut the "noreturn" warnings.
-}
-
-static __attribute__((unused))
-int sys_chdir(const char *path)
-{
- return my_syscall1(__NR_chdir, path);
-}
-
-static __attribute__((unused))
-int sys_chmod(const char *path, mode_t mode)
-{
-#ifdef __NR_fchmodat
- return my_syscall4(__NR_fchmodat, AT_FDCWD, path, mode, 0);
-#else
- return my_syscall2(__NR_chmod, path, mode);
-#endif
-}
-
-static __attribute__((unused))
-int sys_chown(const char *path, uid_t owner, gid_t group)
-{
-#ifdef __NR_fchownat
- return my_syscall5(__NR_fchownat, AT_FDCWD, path, owner, group, 0);
-#else
- return my_syscall3(__NR_chown, path, owner, group);
-#endif
-}
-
-static __attribute__((unused))
-int sys_chroot(const char *path)
-{
- return my_syscall1(__NR_chroot, path);
-}
-
-static __attribute__((unused))
-int sys_close(int fd)
-{
- return my_syscall1(__NR_close, fd);
-}
-
-static __attribute__((unused))
-int sys_dup(int fd)
-{
- return my_syscall1(__NR_dup, fd);
-}
-
-static __attribute__((unused))
-int sys_dup2(int old, int new)
-{
- return my_syscall2(__NR_dup2, old, new);
-}
-
-static __attribute__((unused))
-int sys_execve(const char *filename, char *const argv[], char *const envp[])
-{
- return my_syscall3(__NR_execve, filename, argv, envp);
-}
-
-static __attribute__((unused))
-pid_t sys_fork(void)
-{
- return my_syscall0(__NR_fork);
-}
-
-static __attribute__((unused))
-int sys_fsync(int fd)
-{
- return my_syscall1(__NR_fsync, fd);
-}
-
-static __attribute__((unused))
-int sys_getdents64(int fd, struct linux_dirent64 *dirp, int count)
-{
- return my_syscall3(__NR_getdents64, fd, dirp, count);
-}
-
-static __attribute__((unused))
-pid_t sys_getpgrp(void)
-{
- return my_syscall0(__NR_getpgrp);
-}
-
-static __attribute__((unused))
-pid_t sys_getpid(void)
-{
- return my_syscall0(__NR_getpid);
-}
-
-static __attribute__((unused))
-int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
-{
- return my_syscall2(__NR_gettimeofday, tv, tz);
-}
-
-static __attribute__((unused))
-int sys_ioctl(int fd, unsigned long req, void *value)
-{
- return my_syscall3(__NR_ioctl, fd, req, value);
-}
-
-static __attribute__((unused))
-int sys_kill(pid_t pid, int signal)
-{
- return my_syscall2(__NR_kill, pid, signal);
-}
-
-static __attribute__((unused))
-int sys_link(const char *old, const char *new)
-{
-#ifdef __NR_linkat
- return my_syscall5(__NR_linkat, AT_FDCWD, old, AT_FDCWD, new, 0);
-#else
- return my_syscall2(__NR_link, old, new);
-#endif
-}
-
-static __attribute__((unused))
-off_t sys_lseek(int fd, off_t offset, int whence)
-{
- return my_syscall3(__NR_lseek, fd, offset, whence);
-}
-
-static __attribute__((unused))
-int sys_mkdir(const char *path, mode_t mode)
-{
-#ifdef __NR_mkdirat
- return my_syscall3(__NR_mkdirat, AT_FDCWD, path, mode);
-#else
- return my_syscall2(__NR_mkdir, path, mode);
-#endif
-}
-
-static __attribute__((unused))
-long sys_mknod(const char *path, mode_t mode, dev_t dev)
-{
-#ifdef __NR_mknodat
- return my_syscall4(__NR_mknodat, AT_FDCWD, path, mode, dev);
-#else
- return my_syscall3(__NR_mknod, path, mode, dev);
-#endif
-}
-
-static __attribute__((unused))
-int sys_mount(const char *src, const char *tgt, const char *fst,
- unsigned long flags, const void *data)
-{
- return my_syscall5(__NR_mount, src, tgt, fst, flags, data);
-}
-
-static __attribute__((unused))
-int sys_open(const char *path, int flags, mode_t mode)
-{
-#ifdef __NR_openat
- return my_syscall4(__NR_openat, AT_FDCWD, path, flags, mode);
-#else
- return my_syscall3(__NR_open, path, flags, mode);
-#endif
-}
-
-static __attribute__((unused))
-int sys_pivot_root(const char *new, const char *old)
-{
- return my_syscall2(__NR_pivot_root, new, old);
-}
-
-static __attribute__((unused))
-int sys_poll(struct pollfd *fds, int nfds, int timeout)
-{
- return my_syscall3(__NR_poll, fds, nfds, timeout);
-}
-
-static __attribute__((unused))
-ssize_t sys_read(int fd, void *buf, size_t count)
-{
- return my_syscall3(__NR_read, fd, buf, count);
-}
-
-static __attribute__((unused))
-ssize_t sys_reboot(int magic1, int magic2, int cmd, void *arg)
-{
- return my_syscall4(__NR_reboot, magic1, magic2, cmd, arg);
-}
-
-static __attribute__((unused))
-int sys_sched_yield(void)
-{
- return my_syscall0(__NR_sched_yield);
-}
-
-static __attribute__((unused))
-int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
-{
-#if defined(__ARCH_WANT_SYS_OLD_SELECT) && !defined(__NR__newselect)
- struct sel_arg_struct {
- unsigned long n;
- fd_set *r, *w, *e;
- struct timeval *t;
- } arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout };
- return my_syscall1(__NR_select, &arg);
-#elif defined(__ARCH_WANT_SYS_PSELECT6) && defined(__NR_pselect6)
- struct timespec t;
-
- if (timeout) {
- t.tv_sec = timeout->tv_sec;
- t.tv_nsec = timeout->tv_usec * 1000;
- }
- return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
-#else
-#ifndef __NR__newselect
-#define __NR__newselect __NR_select
-#endif
- return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout);
-#endif
-}
-
-static __attribute__((unused))
-int sys_setpgid(pid_t pid, pid_t pgid)
-{
- return my_syscall2(__NR_setpgid, pid, pgid);
-}
-
-static __attribute__((unused))
-pid_t sys_setsid(void)
-{
- return my_syscall0(__NR_setsid);
-}
-
-static __attribute__((unused))
-int sys_stat(const char *path, struct stat *buf)
-{
- struct sys_stat_struct stat;
- long ret;
-
-#ifdef __NR_newfstatat
- /* only solution for arm64 */
- ret = my_syscall4(__NR_newfstatat, AT_FDCWD, path, &stat, 0);
-#else
- ret = my_syscall2(__NR_stat, path, &stat);
-#endif
- buf->st_dev = stat.st_dev;
- buf->st_ino = stat.st_ino;
- buf->st_mode = stat.st_mode;
- buf->st_nlink = stat.st_nlink;
- buf->st_uid = stat.st_uid;
- buf->st_gid = stat.st_gid;
- buf->st_rdev = stat.st_rdev;
- buf->st_size = stat.st_size;
- buf->st_blksize = stat.st_blksize;
- buf->st_blocks = stat.st_blocks;
- buf->st_atime = stat.st_atime;
- buf->st_mtime = stat.st_mtime;
- buf->st_ctime = stat.st_ctime;
- return ret;
-}
-
-
-static __attribute__((unused))
-int sys_symlink(const char *old, const char *new)
-{
-#ifdef __NR_symlinkat
- return my_syscall3(__NR_symlinkat, old, AT_FDCWD, new);
-#else
- return my_syscall2(__NR_symlink, old, new);
-#endif
-}
-
-static __attribute__((unused))
-mode_t sys_umask(mode_t mode)
-{
- return my_syscall1(__NR_umask, mode);
-}
-
-static __attribute__((unused))
-int sys_umount2(const char *path, int flags)
-{
- return my_syscall2(__NR_umount2, path, flags);
-}
-
-static __attribute__((unused))
-int sys_unlink(const char *path)
-{
-#ifdef __NR_unlinkat
- return my_syscall3(__NR_unlinkat, AT_FDCWD, path, 0);
-#else
- return my_syscall1(__NR_unlink, path);
-#endif
-}
-
-static __attribute__((unused))
-pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage)
-{
- return my_syscall4(__NR_wait4, pid, status, options, rusage);
-}
-
-static __attribute__((unused))
-pid_t sys_waitpid(pid_t pid, int *status, int options)
-{
- return sys_wait4(pid, status, options, 0);
-}
-
-static __attribute__((unused))
-pid_t sys_wait(int *status)
-{
- return sys_waitpid(-1, status, 0);
-}
-
-static __attribute__((unused))
-ssize_t sys_write(int fd, const void *buf, size_t count)
-{
- return my_syscall3(__NR_write, fd, buf, count);
-}
-
-
-/* Below are the libc-compatible syscalls which return x or -1 and set errno.
- * They rely on the functions above. Similarly they're marked static so that it
- * is possible to assign pointers to them if needed.
- */
-
-static __attribute__((unused))
-int brk(void *addr)
-{
- void *ret = sys_brk(addr);
-
- if (!ret) {
- SET_ERRNO(ENOMEM);
- return -1;
- }
- return 0;
-}
-
-static __attribute__((noreturn,unused))
-void exit(int status)
-{
- sys_exit(status);
-}
-
-static __attribute__((unused))
-int chdir(const char *path)
-{
- int ret = sys_chdir(path);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int chmod(const char *path, mode_t mode)
-{
- int ret = sys_chmod(path, mode);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int chown(const char *path, uid_t owner, gid_t group)
-{
- int ret = sys_chown(path, owner, group);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int chroot(const char *path)
-{
- int ret = sys_chroot(path);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int close(int fd)
-{
- int ret = sys_close(fd);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int dup2(int old, int new)
-{
- int ret = sys_dup2(old, new);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int execve(const char *filename, char *const argv[], char *const envp[])
-{
- int ret = sys_execve(filename, argv, envp);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-pid_t fork(void)
-{
- pid_t ret = sys_fork();
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int fsync(int fd)
-{
- int ret = sys_fsync(fd);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int getdents64(int fd, struct linux_dirent64 *dirp, int count)
-{
- int ret = sys_getdents64(fd, dirp, count);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-pid_t getpgrp(void)
-{
- pid_t ret = sys_getpgrp();
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-pid_t getpid(void)
-{
- pid_t ret = sys_getpid();
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int gettimeofday(struct timeval *tv, struct timezone *tz)
-{
- int ret = sys_gettimeofday(tv, tz);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int ioctl(int fd, unsigned long req, void *value)
-{
- int ret = sys_ioctl(fd, req, value);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int kill(pid_t pid, int signal)
-{
- int ret = sys_kill(pid, signal);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int link(const char *old, const char *new)
-{
- int ret = sys_link(old, new);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-off_t lseek(int fd, off_t offset, int whence)
-{
- off_t ret = sys_lseek(fd, offset, whence);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int mkdir(const char *path, mode_t mode)
-{
- int ret = sys_mkdir(path, mode);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int mknod(const char *path, mode_t mode, dev_t dev)
-{
- int ret = sys_mknod(path, mode, dev);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int mount(const char *src, const char *tgt,
- const char *fst, unsigned long flags,
- const void *data)
-{
- int ret = sys_mount(src, tgt, fst, flags, data);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int open(const char *path, int flags, mode_t mode)
-{
- int ret = sys_open(path, flags, mode);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int pivot_root(const char *new, const char *old)
-{
- int ret = sys_pivot_root(new, old);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int poll(struct pollfd *fds, int nfds, int timeout)
-{
- int ret = sys_poll(fds, nfds, timeout);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-ssize_t read(int fd, void *buf, size_t count)
-{
- ssize_t ret = sys_read(fd, buf, count);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int reboot(int cmd)
-{
- int ret = sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, 0);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-void *sbrk(intptr_t inc)
-{
- void *ret;
-
- /* first call to find current end */
- if ((ret = sys_brk(0)) && (sys_brk(ret + inc) == ret + inc))
- return ret + inc;
-
- SET_ERRNO(ENOMEM);
- return (void *)-1;
-}
-
-static __attribute__((unused))
-int sched_yield(void)
-{
- int ret = sys_sched_yield();
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
-{
- int ret = sys_select(nfds, rfds, wfds, efds, timeout);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int setpgid(pid_t pid, pid_t pgid)
-{
- int ret = sys_setpgid(pid, pgid);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-pid_t setsid(void)
-{
- pid_t ret = sys_setsid();
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-unsigned int sleep(unsigned int seconds)
-{
- struct timeval my_timeval = { seconds, 0 };
-
- if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
- return my_timeval.tv_sec + !!my_timeval.tv_usec;
- else
- return 0;
-}
-
-static __attribute__((unused))
-int stat(const char *path, struct stat *buf)
-{
- int ret = sys_stat(path, buf);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int symlink(const char *old, const char *new)
-{
- int ret = sys_symlink(old, new);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int tcsetpgrp(int fd, pid_t pid)
-{
- return ioctl(fd, TIOCSPGRP, &pid);
-}
-
-static __attribute__((unused))
-mode_t umask(mode_t mode)
-{
- return sys_umask(mode);
-}
-
-static __attribute__((unused))
-int umount2(const char *path, int flags)
-{
- int ret = sys_umount2(path, flags);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-int unlink(const char *path)
-{
- int ret = sys_unlink(path);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-pid_t wait4(pid_t pid, int *status, int options, struct rusage *rusage)
-{
- pid_t ret = sys_wait4(pid, status, options, rusage);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-pid_t waitpid(pid_t pid, int *status, int options)
-{
- pid_t ret = sys_waitpid(pid, status, options);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-pid_t wait(int *status)
-{
- pid_t ret = sys_wait(status);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-static __attribute__((unused))
-ssize_t write(int fd, const void *buf, size_t count)
-{
- ssize_t ret = sys_write(fd, buf, count);
-
- if (ret < 0) {
- SET_ERRNO(-ret);
- ret = -1;
- }
- return ret;
-}
-
-/* some size-optimized reimplementations of a few common str* and mem*
- * functions. They're marked static, except memcpy() and raise() which are used
- * by libgcc on ARM, so they are marked weak instead in order not to cause an
- * error when building a program made of multiple files (not recommended).
- */
-
-static __attribute__((unused))
-void *memmove(void *dst, const void *src, size_t len)
-{
- ssize_t pos = (dst <= src) ? -1 : (long)len;
- void *ret = dst;
-
- while (len--) {
- pos += (dst <= src) ? 1 : -1;
- ((char *)dst)[pos] = ((char *)src)[pos];
- }
- return ret;
-}
-
-static __attribute__((unused))
-void *memset(void *dst, int b, size_t len)
-{
- char *p = dst;
-
- while (len--)
- *(p++) = b;
- return dst;
-}
-
-static __attribute__((unused))
-int memcmp(const void *s1, const void *s2, size_t n)
-{
- size_t ofs = 0;
- char c1 = 0;
-
- while (ofs < n && !(c1 = ((char *)s1)[ofs] - ((char *)s2)[ofs])) {
- ofs++;
- }
- return c1;
-}
-
-static __attribute__((unused))
-char *strcpy(char *dst, const char *src)
-{
- char *ret = dst;
-
- while ((*dst++ = *src++));
- return ret;
-}
-
-static __attribute__((unused))
-char *strchr(const char *s, int c)
-{
- while (*s) {
- if (*s == (char)c)
- return (char *)s;
- s++;
- }
- return NULL;
-}
-
-static __attribute__((unused))
-char *strrchr(const char *s, int c)
-{
- const char *ret = NULL;
-
- while (*s) {
- if (*s == (char)c)
- ret = s;
- s++;
- }
- return (char *)ret;
-}
-
-static __attribute__((unused))
-size_t nolibc_strlen(const char *str)
-{
- size_t len;
-
- for (len = 0; str[len]; len++);
- return len;
-}
-
-#define strlen(str) ({ \
- __builtin_constant_p((str)) ? \
- __builtin_strlen((str)) : \
- nolibc_strlen((str)); \
-})
-
-static __attribute__((unused))
-int isdigit(int c)
-{
- return (unsigned int)(c - '0') <= 9;
-}
-
-static __attribute__((unused))
-long atol(const char *s)
-{
- unsigned long ret = 0;
- unsigned long d;
- int neg = 0;
-
- if (*s == '-') {
- neg = 1;
- s++;
- }
-
- while (1) {
- d = (*s++) - '0';
- if (d > 9)
- break;
- ret *= 10;
- ret += d;
- }
-
- return neg ? -ret : ret;
-}
-
-static __attribute__((unused))
-int atoi(const char *s)
-{
- return atol(s);
-}
-
-static __attribute__((unused))
-const char *ltoa(long in)
-{
- /* large enough for -9223372036854775808 */
- static char buffer[21];
- char *pos = buffer + sizeof(buffer) - 1;
- int neg = in < 0;
- unsigned long n = neg ? -in : in;
-
- *pos-- = '\0';
- do {
- *pos-- = '0' + n % 10;
- n /= 10;
- if (pos < buffer)
- return pos + 1;
- } while (n);
-
- if (neg)
- *pos-- = '-';
- return pos + 1;
-}
-
-__attribute__((weak,unused))
-void *memcpy(void *dst, const void *src, size_t len)
-{
- return memmove(dst, src, len);
-}
-
-/* needed by libgcc for divide by zero */
-__attribute__((weak,unused))
-int raise(int signal)
-{
- return kill(getpid(), signal);
-}
-
-/* Here come a few helper functions */
-
-static __attribute__((unused))
-void FD_ZERO(fd_set *set)
-{
- memset(set, 0, sizeof(*set));
-}
-
-static __attribute__((unused))
-void FD_SET(int fd, fd_set *set)
-{
- if (fd < 0 || fd >= FD_SETSIZE)
- return;
- set->fd32[fd / 32] |= 1 << (fd & 31);
-}
-
-/* WARNING, it only deals with the 4096 first majors and 256 first minors */
-static __attribute__((unused))
-dev_t makedev(unsigned int major, unsigned int minor)
-{
- return ((major & 0xfff) << 8) | (minor & 0xff);
-}
+#endif /* _NOLIBC_H */
diff --git a/tools/include/nolibc/signal.h b/tools/include/nolibc/signal.h
new file mode 100644
index 000000000000..137552216e46
--- /dev/null
+++ b/tools/include/nolibc/signal.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * signal function definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_SIGNAL_H
+#define _NOLIBC_SIGNAL_H
+
+#include "std.h"
+#include "arch.h"
+#include "types.h"
+#include "sys.h"
+
+/* This one is not marked static as it's needed by libgcc for divide by zero */
+__attribute__((weak,unused,section(".text.nolibc_raise")))
+int raise(int signal)
+{
+ return sys_kill(sys_getpid(), signal);
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_SIGNAL_H */
diff --git a/tools/include/nolibc/stackprotector.h b/tools/include/nolibc/stackprotector.h
new file mode 100644
index 000000000000..d119cbbbc256
--- /dev/null
+++ b/tools/include/nolibc/stackprotector.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Stack protector support for NOLIBC
+ * Copyright (C) 2023 Thomas Weißschuh <linux@weissschuh.net>
+ */
+
+#ifndef _NOLIBC_STACKPROTECTOR_H
+#define _NOLIBC_STACKPROTECTOR_H
+
+#include "arch.h"
+
+#if defined(NOLIBC_STACKPROTECTOR)
+
+#if !defined(__ARCH_SUPPORTS_STACK_PROTECTOR)
+#error "nolibc does not support stack protectors on this arch"
+#endif
+
+#include "sys.h"
+#include "stdlib.h"
+
+/* The functions in this header are using raw syscall macros to avoid
+ * triggering stack protector errors themselves
+ */
+
+__attribute__((weak,noreturn,section(".text.nolibc_stack_chk")))
+void __stack_chk_fail(void)
+{
+ pid_t pid;
+ my_syscall3(__NR_write, STDERR_FILENO, "!!Stack smashing detected!!\n", 28);
+ pid = my_syscall0(__NR_getpid);
+ my_syscall2(__NR_kill, pid, SIGABRT);
+ for (;;);
+}
+
+__attribute__((weak,noreturn,section(".text.nolibc_stack_chk")))
+void __stack_chk_fail_local(void)
+{
+ __stack_chk_fail();
+}
+
+__attribute__((weak,section(".data.nolibc_stack_chk")))
+uintptr_t __stack_chk_guard;
+
+__attribute__((weak,no_stack_protector,section(".text.nolibc_stack_chk")))
+void __stack_chk_init(void)
+{
+ my_syscall3(__NR_getrandom, &__stack_chk_guard, sizeof(__stack_chk_guard), 0);
+ /* a bit more randomness in case getrandom() fails */
+ __stack_chk_guard ^= (uintptr_t) &__stack_chk_guard;
+}
+#endif // defined(NOLIBC_STACKPROTECTOR)
+
+#endif // _NOLIBC_STACKPROTECTOR_H
diff --git a/tools/include/nolibc/std.h b/tools/include/nolibc/std.h
new file mode 100644
index 000000000000..933bc0be7e1c
--- /dev/null
+++ b/tools/include/nolibc/std.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Standard definitions and types for NOLIBC
+ * Copyright (C) 2017-2021 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_STD_H
+#define _NOLIBC_STD_H
+
+/* Declare a few quite common macros and types that usually are in stdlib.h,
+ * stdint.h, ctype.h, unistd.h and a few other common locations. Please place
+ * integer type definitions and generic macros here, but avoid OS-specific and
+ * syscall-specific stuff, as this file is expected to be included very early.
+ */
+
+/* note: may already be defined */
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+#include "stdint.h"
+
+/* those are commonly provided by sys/types.h */
+typedef unsigned int dev_t;
+typedef unsigned long ino_t;
+typedef unsigned int mode_t;
+typedef signed int pid_t;
+typedef unsigned int uid_t;
+typedef unsigned int gid_t;
+typedef unsigned long nlink_t;
+typedef signed long off_t;
+typedef signed long blksize_t;
+typedef signed long blkcnt_t;
+typedef signed long time_t;
+
+#endif /* _NOLIBC_STD_H */
diff --git a/tools/include/nolibc/stdint.h b/tools/include/nolibc/stdint.h
new file mode 100644
index 000000000000..c1ce4f5e0603
--- /dev/null
+++ b/tools/include/nolibc/stdint.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Standard definitions and types for NOLIBC
+ * Copyright (C) 2023 Vincent Dagonneau <v@vda.io>
+ */
+
+#ifndef _NOLIBC_STDINT_H
+#define _NOLIBC_STDINT_H
+
+typedef unsigned char uint8_t;
+typedef signed char int8_t;
+typedef unsigned short uint16_t;
+typedef signed short int16_t;
+typedef unsigned int uint32_t;
+typedef signed int int32_t;
+typedef unsigned long long uint64_t;
+typedef signed long long int64_t;
+typedef unsigned long size_t;
+typedef signed long ssize_t;
+typedef unsigned long uintptr_t;
+typedef signed long intptr_t;
+typedef signed long ptrdiff_t;
+
+typedef int8_t int_least8_t;
+typedef uint8_t uint_least8_t;
+typedef int16_t int_least16_t;
+typedef uint16_t uint_least16_t;
+typedef int32_t int_least32_t;
+typedef uint32_t uint_least32_t;
+typedef int64_t int_least64_t;
+typedef uint64_t uint_least64_t;
+
+typedef int8_t int_fast8_t;
+typedef uint8_t uint_fast8_t;
+typedef ssize_t int_fast16_t;
+typedef size_t uint_fast16_t;
+typedef ssize_t int_fast32_t;
+typedef size_t uint_fast32_t;
+typedef ssize_t int_fast64_t;
+typedef size_t uint_fast64_t;
+
+typedef int64_t intmax_t;
+typedef uint64_t uintmax_t;
+
+/* limits of integral types */
+
+#define INT8_MIN (-128)
+#define INT16_MIN (-32767-1)
+#define INT32_MIN (-2147483647-1)
+#define INT64_MIN (-9223372036854775807LL-1)
+
+#define INT8_MAX (127)
+#define INT16_MAX (32767)
+#define INT32_MAX (2147483647)
+#define INT64_MAX (9223372036854775807LL)
+
+#define UINT8_MAX (255)
+#define UINT16_MAX (65535)
+#define UINT32_MAX (4294967295U)
+#define UINT64_MAX (18446744073709551615ULL)
+
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST64_MIN INT64_MIN
+
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MAX INT64_MAX
+
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+#define SIZE_MAX ((size_t)(__LONG_MAX__) * 2 + 1)
+#define INTPTR_MIN (-__LONG_MAX__ - 1)
+#define INTPTR_MAX __LONG_MAX__
+#define PTRDIFF_MIN INTPTR_MIN
+#define PTRDIFF_MAX INTPTR_MAX
+#define UINTPTR_MAX SIZE_MAX
+
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST16_MIN INTPTR_MIN
+#define INT_FAST32_MIN INTPTR_MIN
+#define INT_FAST64_MIN INTPTR_MIN
+
+#define INT_FAST8_MAX INT8_MAX
+#define INT_FAST16_MAX INTPTR_MAX
+#define INT_FAST32_MAX INTPTR_MAX
+#define INT_FAST64_MAX INTPTR_MAX
+
+#define UINT_FAST8_MAX UINT8_MAX
+#define UINT_FAST16_MAX SIZE_MAX
+#define UINT_FAST32_MAX SIZE_MAX
+#define UINT_FAST64_MAX SIZE_MAX
+
+#endif /* _NOLIBC_STDINT_H */
diff --git a/tools/include/nolibc/stdio.h b/tools/include/nolibc/stdio.h
new file mode 100644
index 000000000000..6cbbb52836a0
--- /dev/null
+++ b/tools/include/nolibc/stdio.h
@@ -0,0 +1,315 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * minimal stdio function definitions for NOLIBC
+ * Copyright (C) 2017-2021 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_STDIO_H
+#define _NOLIBC_STDIO_H
+
+#include <stdarg.h>
+
+#include "std.h"
+#include "arch.h"
+#include "errno.h"
+#include "types.h"
+#include "sys.h"
+#include "stdlib.h"
+#include "string.h"
+
+#ifndef EOF
+#define EOF (-1)
+#endif
+
+/* just define FILE as a non-empty type */
+typedef struct FILE {
+ char dummy[1];
+} FILE;
+
+/* We define the 3 common stdio files as constant invalid pointers that
+ * are easily recognized.
+ */
+static __attribute__((unused)) FILE* const stdin = (FILE*)-3;
+static __attribute__((unused)) FILE* const stdout = (FILE*)-2;
+static __attribute__((unused)) FILE* const stderr = (FILE*)-1;
+
+/* getc(), fgetc(), getchar() */
+
+#define getc(stream) fgetc(stream)
+
+static __attribute__((unused))
+int fgetc(FILE* stream)
+{
+ unsigned char ch;
+ int fd;
+
+ if (stream < stdin || stream > stderr)
+ return EOF;
+
+ fd = 3 + (long)stream;
+
+ if (read(fd, &ch, 1) <= 0)
+ return EOF;
+ return ch;
+}
+
+static __attribute__((unused))
+int getchar(void)
+{
+ return fgetc(stdin);
+}
+
+
+/* putc(), fputc(), putchar() */
+
+#define putc(c, stream) fputc(c, stream)
+
+static __attribute__((unused))
+int fputc(int c, FILE* stream)
+{
+ unsigned char ch = c;
+ int fd;
+
+ if (stream < stdin || stream > stderr)
+ return EOF;
+
+ fd = 3 + (long)stream;
+
+ if (write(fd, &ch, 1) <= 0)
+ return EOF;
+ return ch;
+}
+
+static __attribute__((unused))
+int putchar(int c)
+{
+ return fputc(c, stdout);
+}
+
+
+/* fwrite(), puts(), fputs(). Note that puts() emits '\n' but not fputs(). */
+
+/* internal fwrite()-like function which only takes a size and returns 0 on
+ * success or EOF on error. It automatically retries on short writes.
+ */
+static __attribute__((unused))
+int _fwrite(const void *buf, size_t size, FILE *stream)
+{
+ ssize_t ret;
+ int fd;
+
+ if (stream < stdin || stream > stderr)
+ return EOF;
+
+ fd = 3 + (long)stream;
+
+ while (size) {
+ ret = write(fd, buf, size);
+ if (ret <= 0)
+ return EOF;
+ size -= ret;
+ buf += ret;
+ }
+ return 0;
+}
+
+static __attribute__((unused))
+size_t fwrite(const void *s, size_t size, size_t nmemb, FILE *stream)
+{
+ size_t written;
+
+ for (written = 0; written < nmemb; written++) {
+ if (_fwrite(s, size, stream) != 0)
+ break;
+ s += size;
+ }
+ return written;
+}
+
+static __attribute__((unused))
+int fputs(const char *s, FILE *stream)
+{
+ return _fwrite(s, strlen(s), stream);
+}
+
+static __attribute__((unused))
+int puts(const char *s)
+{
+ if (fputs(s, stdout) == EOF)
+ return EOF;
+ return putchar('\n');
+}
+
+
+/* fgets() */
+static __attribute__((unused))
+char *fgets(char *s, int size, FILE *stream)
+{
+ int ofs;
+ int c;
+
+ for (ofs = 0; ofs + 1 < size;) {
+ c = fgetc(stream);
+ if (c == EOF)
+ break;
+ s[ofs++] = c;
+ if (c == '\n')
+ break;
+ }
+ if (ofs < size)
+ s[ofs] = 0;
+ return ofs ? s : NULL;
+}
+
+
+/* minimal vfprintf(). It supports the following formats:
+ * - %[l*]{d,u,c,x,p}
+ * - %s
+ * - unknown modifiers are ignored.
+ */
+static __attribute__((unused))
+int vfprintf(FILE *stream, const char *fmt, va_list args)
+{
+ char escape, lpref, c;
+ unsigned long long v;
+ unsigned int written;
+ size_t len, ofs;
+ char tmpbuf[21];
+ const char *outstr;
+
+ written = ofs = escape = lpref = 0;
+ while (1) {
+ c = fmt[ofs++];
+
+ if (escape) {
+ /* we're in an escape sequence, ofs == 1 */
+ escape = 0;
+ if (c == 'c' || c == 'd' || c == 'u' || c == 'x' || c == 'p') {
+ char *out = tmpbuf;
+
+ if (c == 'p')
+ v = va_arg(args, unsigned long);
+ else if (lpref) {
+ if (lpref > 1)
+ v = va_arg(args, unsigned long long);
+ else
+ v = va_arg(args, unsigned long);
+ } else
+ v = va_arg(args, unsigned int);
+
+ if (c == 'd') {
+ /* sign-extend the value */
+ if (lpref == 0)
+ v = (long long)(int)v;
+ else if (lpref == 1)
+ v = (long long)(long)v;
+ }
+
+ switch (c) {
+ case 'c':
+ out[0] = v;
+ out[1] = 0;
+ break;
+ case 'd':
+ i64toa_r(v, out);
+ break;
+ case 'u':
+ u64toa_r(v, out);
+ break;
+ case 'p':
+ *(out++) = '0';
+ *(out++) = 'x';
+ /* fall through */
+ default: /* 'x' and 'p' above */
+ u64toh_r(v, out);
+ break;
+ }
+ outstr = tmpbuf;
+ }
+ else if (c == 's') {
+ outstr = va_arg(args, char *);
+ if (!outstr)
+ outstr="(null)";
+ }
+ else if (c == '%') {
+ /* queue it verbatim */
+ continue;
+ }
+ else {
+ /* modifiers or final 0 */
+ if (c == 'l') {
+ /* long format prefix, maintain the escape */
+ lpref++;
+ }
+ escape = 1;
+ goto do_escape;
+ }
+ len = strlen(outstr);
+ goto flush_str;
+ }
+
+ /* not an escape sequence */
+ if (c == 0 || c == '%') {
+ /* flush pending data on escape or end */
+ escape = 1;
+ lpref = 0;
+ outstr = fmt;
+ len = ofs - 1;
+ flush_str:
+ if (_fwrite(outstr, len, stream) != 0)
+ break;
+
+ written += len;
+ do_escape:
+ if (c == 0)
+ break;
+ fmt += ofs;
+ ofs = 0;
+ continue;
+ }
+
+ /* literal char, just queue it */
+ }
+ return written;
+}
+
+static __attribute__((unused))
+int vprintf(const char *fmt, va_list args)
+{
+ return vfprintf(stdout, fmt, args);
+}
+
+static __attribute__((unused, format(printf, 2, 3)))
+int fprintf(FILE *stream, const char *fmt, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ ret = vfprintf(stream, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+static __attribute__((unused, format(printf, 1, 2)))
+int printf(const char *fmt, ...)
+{
+ va_list args;
+ int ret;
+
+ va_start(args, fmt);
+ ret = vfprintf(stdout, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+static __attribute__((unused))
+void perror(const char *msg)
+{
+ fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_STDIO_H */
diff --git a/tools/include/nolibc/stdlib.h b/tools/include/nolibc/stdlib.h
new file mode 100644
index 000000000000..894c955d027e
--- /dev/null
+++ b/tools/include/nolibc/stdlib.h
@@ -0,0 +1,452 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * stdlib function definitions for NOLIBC
+ * Copyright (C) 2017-2021 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_STDLIB_H
+#define _NOLIBC_STDLIB_H
+
+#include "std.h"
+#include "arch.h"
+#include "types.h"
+#include "sys.h"
+#include "string.h"
+#include <linux/auxvec.h>
+
+struct nolibc_heap {
+ size_t len;
+ char user_p[] __attribute__((__aligned__));
+};
+
+/* Buffer used to store int-to-ASCII conversions. Will only be implemented if
+ * any of the related functions is implemented. The area is large enough to
+ * store "18446744073709551615" or "-9223372036854775808" and the final zero.
+ */
+static __attribute__((unused)) char itoa_buffer[21];
+
+/*
+ * As much as possible, please keep functions alphabetically sorted.
+ */
+
+/* must be exported, as it's used by libgcc for various divide functions */
+__attribute__((weak,unused,noreturn,section(".text.nolibc_abort")))
+void abort(void)
+{
+ sys_kill(sys_getpid(), SIGABRT);
+ for (;;);
+}
+
+static __attribute__((unused))
+long atol(const char *s)
+{
+ unsigned long ret = 0;
+ unsigned long d;
+ int neg = 0;
+
+ if (*s == '-') {
+ neg = 1;
+ s++;
+ }
+
+ while (1) {
+ d = (*s++) - '0';
+ if (d > 9)
+ break;
+ ret *= 10;
+ ret += d;
+ }
+
+ return neg ? -ret : ret;
+}
+
+static __attribute__((unused))
+int atoi(const char *s)
+{
+ return atol(s);
+}
+
+static __attribute__((unused))
+void free(void *ptr)
+{
+ struct nolibc_heap *heap;
+
+ if (!ptr)
+ return;
+
+ heap = container_of(ptr, struct nolibc_heap, user_p);
+ munmap(heap, heap->len);
+}
+
+/* getenv() tries to find the environment variable named <name> in the
+ * environment array pointed to by global variable "environ" which must be
+ * declared as a char **, and must be terminated by a NULL (it is recommended
+ * to set this variable to the "envp" argument of main()). If the requested
+ * environment variable exists its value is returned otherwise NULL is
+ * returned. getenv() is forcefully inlined so that the reference to "environ"
+ * will be dropped if unused, even at -O0.
+ */
+static __attribute__((unused))
+char *_getenv(const char *name, char **environ)
+{
+ int idx, i;
+
+ if (environ) {
+ for (idx = 0; environ[idx]; idx++) {
+ for (i = 0; name[i] && name[i] == environ[idx][i];)
+ i++;
+ if (!name[i] && environ[idx][i] == '=')
+ return &environ[idx][i+1];
+ }
+ }
+ return NULL;
+}
+
+static inline __attribute__((unused,always_inline))
+char *getenv(const char *name)
+{
+ extern char **environ;
+ return _getenv(name, environ);
+}
+
+static __attribute__((unused))
+unsigned long getauxval(unsigned long type)
+{
+ const unsigned long *auxv = _auxv;
+ unsigned long ret;
+
+ if (!auxv)
+ return 0;
+
+ while (1) {
+ if (!auxv[0] && !auxv[1]) {
+ ret = 0;
+ break;
+ }
+
+ if (auxv[0] == type) {
+ ret = auxv[1];
+ break;
+ }
+
+ auxv += 2;
+ }
+
+ return ret;
+}
+
+static __attribute__((unused))
+void *malloc(size_t len)
+{
+ struct nolibc_heap *heap;
+
+ /* Always allocate memory with size multiple of 4096. */
+ len = sizeof(*heap) + len;
+ len = (len + 4095UL) & -4096UL;
+ heap = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
+ -1, 0);
+ if (__builtin_expect(heap == MAP_FAILED, 0))
+ return NULL;
+
+ heap->len = len;
+ return heap->user_p;
+}
+
+static __attribute__((unused))
+void *calloc(size_t size, size_t nmemb)
+{
+ size_t x = size * nmemb;
+
+ if (__builtin_expect(size && ((x / size) != nmemb), 0)) {
+ SET_ERRNO(ENOMEM);
+ return NULL;
+ }
+
+ /*
+ * No need to zero the heap, the MAP_ANONYMOUS in malloc()
+ * already does it.
+ */
+ return malloc(x);
+}
+
+static __attribute__((unused))
+void *realloc(void *old_ptr, size_t new_size)
+{
+ struct nolibc_heap *heap;
+ size_t user_p_len;
+ void *ret;
+
+ if (!old_ptr)
+ return malloc(new_size);
+
+ heap = container_of(old_ptr, struct nolibc_heap, user_p);
+ user_p_len = heap->len - sizeof(*heap);
+ /*
+ * Don't realloc() if @user_p_len >= @new_size, this block of
+ * memory is still enough to handle the @new_size. Just return
+ * the same pointer.
+ */
+ if (user_p_len >= new_size)
+ return old_ptr;
+
+ ret = malloc(new_size);
+ if (__builtin_expect(!ret, 0))
+ return NULL;
+
+ memcpy(ret, heap->user_p, heap->len);
+ munmap(heap, heap->len);
+ return ret;
+}
+
+/* Converts the unsigned long integer <in> to its hex representation into
+ * buffer <buffer>, which must be long enough to store the number and the
+ * trailing zero (17 bytes for "ffffffffffffffff" or 9 for "ffffffff"). The
+ * buffer is filled from the first byte, and the number of characters emitted
+ * (not counting the trailing zero) is returned. The function is constructed
+ * in a way to optimize the code size and avoid any divide that could add a
+ * dependency on large external functions.
+ */
+static __attribute__((unused))
+int utoh_r(unsigned long in, char *buffer)
+{
+ signed char pos = (~0UL > 0xfffffffful) ? 60 : 28;
+ int digits = 0;
+ int dig;
+
+ do {
+ dig = in >> pos;
+ in -= (uint64_t)dig << pos;
+ pos -= 4;
+ if (dig || digits || pos < 0) {
+ if (dig > 9)
+ dig += 'a' - '0' - 10;
+ buffer[digits++] = '0' + dig;
+ }
+ } while (pos >= 0);
+
+ buffer[digits] = 0;
+ return digits;
+}
+
+/* converts unsigned long <in> to an hex string using the static itoa_buffer
+ * and returns the pointer to that string.
+ */
+static inline __attribute__((unused))
+char *utoh(unsigned long in)
+{
+ utoh_r(in, itoa_buffer);
+ return itoa_buffer;
+}
+
+/* Converts the unsigned long integer <in> to its string representation into
+ * buffer <buffer>, which must be long enough to store the number and the
+ * trailing zero (21 bytes for 18446744073709551615 in 64-bit, 11 for
+ * 4294967295 in 32-bit). The buffer is filled from the first byte, and the
+ * number of characters emitted (not counting the trailing zero) is returned.
+ * The function is constructed in a way to optimize the code size and avoid
+ * any divide that could add a dependency on large external functions.
+ */
+static __attribute__((unused))
+int utoa_r(unsigned long in, char *buffer)
+{
+ unsigned long lim;
+ int digits = 0;
+ int pos = (~0UL > 0xfffffffful) ? 19 : 9;
+ int dig;
+
+ do {
+ for (dig = 0, lim = 1; dig < pos; dig++)
+ lim *= 10;
+
+ if (digits || in >= lim || !pos) {
+ for (dig = 0; in >= lim; dig++)
+ in -= lim;
+ buffer[digits++] = '0' + dig;
+ }
+ } while (pos--);
+
+ buffer[digits] = 0;
+ return digits;
+}
+
+/* Converts the signed long integer <in> to its string representation into
+ * buffer <buffer>, which must be long enough to store the number and the
+ * trailing zero (21 bytes for -9223372036854775808 in 64-bit, 12 for
+ * -2147483648 in 32-bit). The buffer is filled from the first byte, and the
+ * number of characters emitted (not counting the trailing zero) is returned.
+ */
+static __attribute__((unused))
+int itoa_r(long in, char *buffer)
+{
+ char *ptr = buffer;
+ int len = 0;
+
+ if (in < 0) {
+ in = -in;
+ *(ptr++) = '-';
+ len++;
+ }
+ len += utoa_r(in, ptr);
+ return len;
+}
+
+/* for historical compatibility, same as above but returns the pointer to the
+ * buffer.
+ */
+static inline __attribute__((unused))
+char *ltoa_r(long in, char *buffer)
+{
+ itoa_r(in, buffer);
+ return buffer;
+}
+
+/* converts long integer <in> to a string using the static itoa_buffer and
+ * returns the pointer to that string.
+ */
+static inline __attribute__((unused))
+char *itoa(long in)
+{
+ itoa_r(in, itoa_buffer);
+ return itoa_buffer;
+}
+
+/* converts long integer <in> to a string using the static itoa_buffer and
+ * returns the pointer to that string. Same as above, for compatibility.
+ */
+static inline __attribute__((unused))
+char *ltoa(long in)
+{
+ itoa_r(in, itoa_buffer);
+ return itoa_buffer;
+}
+
+/* converts unsigned long integer <in> to a string using the static itoa_buffer
+ * and returns the pointer to that string.
+ */
+static inline __attribute__((unused))
+char *utoa(unsigned long in)
+{
+ utoa_r(in, itoa_buffer);
+ return itoa_buffer;
+}
+
+/* Converts the unsigned 64-bit integer <in> to its hex representation into
+ * buffer <buffer>, which must be long enough to store the number and the
+ * trailing zero (17 bytes for "ffffffffffffffff"). The buffer is filled from
+ * the first byte, and the number of characters emitted (not counting the
+ * trailing zero) is returned. The function is constructed in a way to optimize
+ * the code size and avoid any divide that could add a dependency on large
+ * external functions.
+ */
+static __attribute__((unused))
+int u64toh_r(uint64_t in, char *buffer)
+{
+ signed char pos = 60;
+ int digits = 0;
+ int dig;
+
+ do {
+ if (sizeof(long) >= 8) {
+ dig = (in >> pos) & 0xF;
+ } else {
+ /* 32-bit platforms: avoid a 64-bit shift */
+ uint32_t d = (pos >= 32) ? (in >> 32) : in;
+ dig = (d >> (pos & 31)) & 0xF;
+ }
+ if (dig > 9)
+ dig += 'a' - '0' - 10;
+ pos -= 4;
+ if (dig || digits || pos < 0)
+ buffer[digits++] = '0' + dig;
+ } while (pos >= 0);
+
+ buffer[digits] = 0;
+ return digits;
+}
+
+/* converts uint64_t <in> to an hex string using the static itoa_buffer and
+ * returns the pointer to that string.
+ */
+static inline __attribute__((unused))
+char *u64toh(uint64_t in)
+{
+ u64toh_r(in, itoa_buffer);
+ return itoa_buffer;
+}
+
+/* Converts the unsigned 64-bit integer <in> to its string representation into
+ * buffer <buffer>, which must be long enough to store the number and the
+ * trailing zero (21 bytes for 18446744073709551615). The buffer is filled from
+ * the first byte, and the number of characters emitted (not counting the
+ * trailing zero) is returned. The function is constructed in a way to optimize
+ * the code size and avoid any divide that could add a dependency on large
+ * external functions.
+ */
+static __attribute__((unused))
+int u64toa_r(uint64_t in, char *buffer)
+{
+ unsigned long long lim;
+ int digits = 0;
+ int pos = 19; /* start with the highest possible digit */
+ int dig;
+
+ do {
+ for (dig = 0, lim = 1; dig < pos; dig++)
+ lim *= 10;
+
+ if (digits || in >= lim || !pos) {
+ for (dig = 0; in >= lim; dig++)
+ in -= lim;
+ buffer[digits++] = '0' + dig;
+ }
+ } while (pos--);
+
+ buffer[digits] = 0;
+ return digits;
+}
+
+/* Converts the signed 64-bit integer <in> to its string representation into
+ * buffer <buffer>, which must be long enough to store the number and the
+ * trailing zero (21 bytes for -9223372036854775808). The buffer is filled from
+ * the first byte, and the number of characters emitted (not counting the
+ * trailing zero) is returned.
+ */
+static __attribute__((unused))
+int i64toa_r(int64_t in, char *buffer)
+{
+ char *ptr = buffer;
+ int len = 0;
+
+ if (in < 0) {
+ in = -in;
+ *(ptr++) = '-';
+ len++;
+ }
+ len += u64toa_r(in, ptr);
+ return len;
+}
+
+/* converts int64_t <in> to a string using the static itoa_buffer and returns
+ * the pointer to that string.
+ */
+static inline __attribute__((unused))
+char *i64toa(int64_t in)
+{
+ i64toa_r(in, itoa_buffer);
+ return itoa_buffer;
+}
+
+/* converts uint64_t <in> to a string using the static itoa_buffer and returns
+ * the pointer to that string.
+ */
+static inline __attribute__((unused))
+char *u64toa(uint64_t in)
+{
+ u64toa_r(in, itoa_buffer);
+ return itoa_buffer;
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_STDLIB_H */
diff --git a/tools/include/nolibc/string.h b/tools/include/nolibc/string.h
new file mode 100644
index 000000000000..fffdaf6ff467
--- /dev/null
+++ b/tools/include/nolibc/string.h
@@ -0,0 +1,294 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * string function definitions for NOLIBC
+ * Copyright (C) 2017-2021 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_STRING_H
+#define _NOLIBC_STRING_H
+
+#include "std.h"
+
+static void *malloc(size_t len);
+
+/*
+ * As much as possible, please keep functions alphabetically sorted.
+ */
+
+static __attribute__((unused))
+int memcmp(const void *s1, const void *s2, size_t n)
+{
+ size_t ofs = 0;
+ int c1 = 0;
+
+ while (ofs < n && !(c1 = ((unsigned char *)s1)[ofs] - ((unsigned char *)s2)[ofs])) {
+ ofs++;
+ }
+ return c1;
+}
+
+static __attribute__((unused))
+void *_nolibc_memcpy_up(void *dst, const void *src, size_t len)
+{
+ size_t pos = 0;
+
+ while (pos < len) {
+ ((char *)dst)[pos] = ((const char *)src)[pos];
+ pos++;
+ }
+ return dst;
+}
+
+static __attribute__((unused))
+void *_nolibc_memcpy_down(void *dst, const void *src, size_t len)
+{
+ while (len) {
+ len--;
+ ((char *)dst)[len] = ((const char *)src)[len];
+ }
+ return dst;
+}
+
+/* might be ignored by the compiler without -ffreestanding, then found as
+ * missing.
+ */
+__attribute__((weak,unused,section(".text.nolibc_memmove")))
+void *memmove(void *dst, const void *src, size_t len)
+{
+ size_t dir, pos;
+
+ pos = len;
+ dir = -1;
+
+ if (dst < src) {
+ pos = -1;
+ dir = 1;
+ }
+
+ while (len) {
+ pos += dir;
+ ((char *)dst)[pos] = ((const char *)src)[pos];
+ len--;
+ }
+ return dst;
+}
+
+/* must be exported, as it's used by libgcc on ARM */
+__attribute__((weak,unused,section(".text.nolibc_memcpy")))
+void *memcpy(void *dst, const void *src, size_t len)
+{
+ return _nolibc_memcpy_up(dst, src, len);
+}
+
+/* might be ignored by the compiler without -ffreestanding, then found as
+ * missing.
+ */
+__attribute__((weak,unused,section(".text.nolibc_memset")))
+void *memset(void *dst, int b, size_t len)
+{
+ char *p = dst;
+
+ while (len--) {
+ /* prevent gcc from recognizing memset() here */
+ asm volatile("");
+ *(p++) = b;
+ }
+ return dst;
+}
+
+static __attribute__((unused))
+char *strchr(const char *s, int c)
+{
+ while (*s) {
+ if (*s == (char)c)
+ return (char *)s;
+ s++;
+ }
+ return NULL;
+}
+
+static __attribute__((unused))
+int strcmp(const char *a, const char *b)
+{
+ unsigned int c;
+ int diff;
+
+ while (!(diff = (unsigned char)*a++ - (c = (unsigned char)*b++)) && c)
+ ;
+ return diff;
+}
+
+static __attribute__((unused))
+char *strcpy(char *dst, const char *src)
+{
+ char *ret = dst;
+
+ while ((*dst++ = *src++));
+ return ret;
+}
+
+/* this function is only used with arguments that are not constants or when
+ * it's not known because optimizations are disabled. Note that gcc 12
+ * recognizes an strlen() pattern and replaces it with a jump to strlen(),
+ * thus itself, hence the asm() statement below that's meant to disable this
+ * confusing practice.
+ */
+static __attribute__((unused))
+size_t strlen(const char *str)
+{
+ size_t len;
+
+ for (len = 0; str[len]; len++)
+ asm("");
+ return len;
+}
+
+/* do not trust __builtin_constant_p() at -O0, as clang will emit a test and
+ * the two branches, then will rely on an external definition of strlen().
+ */
+#if defined(__OPTIMIZE__)
+#define nolibc_strlen(x) strlen(x)
+#define strlen(str) ({ \
+ __builtin_constant_p((str)) ? \
+ __builtin_strlen((str)) : \
+ nolibc_strlen((str)); \
+})
+#endif
+
+static __attribute__((unused))
+size_t strnlen(const char *str, size_t maxlen)
+{
+ size_t len;
+
+ for (len = 0; (len < maxlen) && str[len]; len++);
+ return len;
+}
+
+static __attribute__((unused))
+char *strdup(const char *str)
+{
+ size_t len;
+ char *ret;
+
+ len = strlen(str);
+ ret = malloc(len + 1);
+ if (__builtin_expect(ret != NULL, 1))
+ memcpy(ret, str, len + 1);
+
+ return ret;
+}
+
+static __attribute__((unused))
+char *strndup(const char *str, size_t maxlen)
+{
+ size_t len;
+ char *ret;
+
+ len = strnlen(str, maxlen);
+ ret = malloc(len + 1);
+ if (__builtin_expect(ret != NULL, 1)) {
+ memcpy(ret, str, len);
+ ret[len] = '\0';
+ }
+
+ return ret;
+}
+
+static __attribute__((unused))
+size_t strlcat(char *dst, const char *src, size_t size)
+{
+ size_t len;
+ char c;
+
+ for (len = 0; dst[len]; len++)
+ ;
+
+ for (;;) {
+ c = *src;
+ if (len < size)
+ dst[len] = c;
+ if (!c)
+ break;
+ len++;
+ src++;
+ }
+
+ return len;
+}
+
+static __attribute__((unused))
+size_t strlcpy(char *dst, const char *src, size_t size)
+{
+ size_t len;
+ char c;
+
+ for (len = 0;;) {
+ c = src[len];
+ if (len < size)
+ dst[len] = c;
+ if (!c)
+ break;
+ len++;
+ }
+ return len;
+}
+
+static __attribute__((unused))
+char *strncat(char *dst, const char *src, size_t size)
+{
+ char *orig = dst;
+
+ while (*dst)
+ dst++;
+
+ while (size && (*dst = *src)) {
+ src++;
+ dst++;
+ size--;
+ }
+
+ *dst = 0;
+ return orig;
+}
+
+static __attribute__((unused))
+int strncmp(const char *a, const char *b, size_t size)
+{
+ unsigned int c;
+ int diff = 0;
+
+ while (size-- &&
+ !(diff = (unsigned char)*a++ - (c = (unsigned char)*b++)) && c)
+ ;
+
+ return diff;
+}
+
+static __attribute__((unused))
+char *strncpy(char *dst, const char *src, size_t size)
+{
+ size_t len;
+
+ for (len = 0; len < size; len++)
+ if ((dst[len] = *src))
+ src++;
+ return dst;
+}
+
+static __attribute__((unused))
+char *strrchr(const char *s, int c)
+{
+ const char *ret = NULL;
+
+ while (*s) {
+ if (*s == (char)c)
+ ret = s;
+ s++;
+ }
+ return (char *)ret;
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_STRING_H */
diff --git a/tools/include/nolibc/sys.h b/tools/include/nolibc/sys.h
new file mode 100644
index 000000000000..5d624dc63a42
--- /dev/null
+++ b/tools/include/nolibc/sys.h
@@ -0,0 +1,1371 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Syscall definitions for NOLIBC (those in man(2))
+ * Copyright (C) 2017-2021 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_SYS_H
+#define _NOLIBC_SYS_H
+
+#include <stdarg.h>
+#include "std.h"
+
+/* system includes */
+#include <asm/unistd.h>
+#include <asm/signal.h> // for SIGCHLD
+#include <asm/ioctls.h>
+#include <asm/mman.h>
+#include <linux/fs.h>
+#include <linux/loop.h>
+#include <linux/time.h>
+#include <linux/auxvec.h>
+#include <linux/fcntl.h> // for O_* and AT_*
+#include <linux/stat.h> // for statx()
+
+#include "arch.h"
+#include "errno.h"
+#include "types.h"
+
+
+/* Functions in this file only describe syscalls. They're declared static so
+ * that the compiler usually decides to inline them while still being allowed
+ * to pass a pointer to one of their instances. Each syscall exists in two
+ * versions:
+ * - the "internal" ones, which matches the raw syscall interface at the
+ * kernel level, which may sometimes slightly differ from the documented
+ * libc-level ones. For example most of them return either a valid value
+ * or -errno. All of these are prefixed with "sys_". They may be called
+ * by non-portable applications if desired.
+ *
+ * - the "exported" ones, whose interface must closely match the one
+ * documented in man(2), that applications are supposed to expect. These
+ * ones rely on the internal ones, and set errno.
+ *
+ * Each syscall will be defined with the two functions, sorted in alphabetical
+ * order applied to the exported names.
+ *
+ * In case of doubt about the relevance of a function here, only those which
+ * set errno should be defined here. Wrappers like those appearing in man(3)
+ * should not be placed here.
+ */
+
+
+/*
+ * int brk(void *addr);
+ * void *sbrk(intptr_t inc)
+ */
+
+static __attribute__((unused))
+void *sys_brk(void *addr)
+{
+ return (void *)my_syscall1(__NR_brk, addr);
+}
+
+static __attribute__((unused))
+int brk(void *addr)
+{
+ void *ret = sys_brk(addr);
+
+ if (!ret) {
+ SET_ERRNO(ENOMEM);
+ return -1;
+ }
+ return 0;
+}
+
+static __attribute__((unused))
+void *sbrk(intptr_t inc)
+{
+ void *ret;
+
+ /* first call to find current end */
+ if ((ret = sys_brk(0)) && (sys_brk(ret + inc) == ret + inc))
+ return ret + inc;
+
+ SET_ERRNO(ENOMEM);
+ return (void *)-1;
+}
+
+
+/*
+ * int chdir(const char *path);
+ */
+
+static __attribute__((unused))
+int sys_chdir(const char *path)
+{
+ return my_syscall1(__NR_chdir, path);
+}
+
+static __attribute__((unused))
+int chdir(const char *path)
+{
+ int ret = sys_chdir(path);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int chmod(const char *path, mode_t mode);
+ */
+
+static __attribute__((unused))
+int sys_chmod(const char *path, mode_t mode)
+{
+#ifdef __NR_fchmodat
+ return my_syscall4(__NR_fchmodat, AT_FDCWD, path, mode, 0);
+#elif defined(__NR_chmod)
+ return my_syscall2(__NR_chmod, path, mode);
+#else
+#error Neither __NR_fchmodat nor __NR_chmod defined, cannot implement sys_chmod()
+#endif
+}
+
+static __attribute__((unused))
+int chmod(const char *path, mode_t mode)
+{
+ int ret = sys_chmod(path, mode);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int chown(const char *path, uid_t owner, gid_t group);
+ */
+
+static __attribute__((unused))
+int sys_chown(const char *path, uid_t owner, gid_t group)
+{
+#ifdef __NR_fchownat
+ return my_syscall5(__NR_fchownat, AT_FDCWD, path, owner, group, 0);
+#elif defined(__NR_chown)
+ return my_syscall3(__NR_chown, path, owner, group);
+#else
+#error Neither __NR_fchownat nor __NR_chown defined, cannot implement sys_chown()
+#endif
+}
+
+static __attribute__((unused))
+int chown(const char *path, uid_t owner, gid_t group)
+{
+ int ret = sys_chown(path, owner, group);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int chroot(const char *path);
+ */
+
+static __attribute__((unused))
+int sys_chroot(const char *path)
+{
+ return my_syscall1(__NR_chroot, path);
+}
+
+static __attribute__((unused))
+int chroot(const char *path)
+{
+ int ret = sys_chroot(path);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int close(int fd);
+ */
+
+static __attribute__((unused))
+int sys_close(int fd)
+{
+ return my_syscall1(__NR_close, fd);
+}
+
+static __attribute__((unused))
+int close(int fd)
+{
+ int ret = sys_close(fd);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int dup(int fd);
+ */
+
+static __attribute__((unused))
+int sys_dup(int fd)
+{
+ return my_syscall1(__NR_dup, fd);
+}
+
+static __attribute__((unused))
+int dup(int fd)
+{
+ int ret = sys_dup(fd);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int dup2(int old, int new);
+ */
+
+static __attribute__((unused))
+int sys_dup2(int old, int new)
+{
+#ifdef __NR_dup3
+ return my_syscall3(__NR_dup3, old, new, 0);
+#elif defined(__NR_dup2)
+ return my_syscall2(__NR_dup2, old, new);
+#else
+#error Neither __NR_dup3 nor __NR_dup2 defined, cannot implement sys_dup2()
+#endif
+}
+
+static __attribute__((unused))
+int dup2(int old, int new)
+{
+ int ret = sys_dup2(old, new);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int dup3(int old, int new, int flags);
+ */
+
+#ifdef __NR_dup3
+static __attribute__((unused))
+int sys_dup3(int old, int new, int flags)
+{
+ return my_syscall3(__NR_dup3, old, new, flags);
+}
+
+static __attribute__((unused))
+int dup3(int old, int new, int flags)
+{
+ int ret = sys_dup3(old, new, flags);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+#endif
+
+
+/*
+ * int execve(const char *filename, char *const argv[], char *const envp[]);
+ */
+
+static __attribute__((unused))
+int sys_execve(const char *filename, char *const argv[], char *const envp[])
+{
+ return my_syscall3(__NR_execve, filename, argv, envp);
+}
+
+static __attribute__((unused))
+int execve(const char *filename, char *const argv[], char *const envp[])
+{
+ int ret = sys_execve(filename, argv, envp);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * void exit(int status);
+ */
+
+static __attribute__((noreturn,unused))
+void sys_exit(int status)
+{
+ my_syscall1(__NR_exit, status & 255);
+ while(1); // shut the "noreturn" warnings.
+}
+
+static __attribute__((noreturn,unused))
+void exit(int status)
+{
+ sys_exit(status);
+}
+
+
+/*
+ * pid_t fork(void);
+ */
+
+static __attribute__((unused))
+pid_t sys_fork(void)
+{
+#ifdef __NR_clone
+ /* note: some archs only have clone() and not fork(). Different archs
+ * have a different API, but most archs have the flags on first arg and
+ * will not use the rest with no other flag.
+ */
+ return my_syscall5(__NR_clone, SIGCHLD, 0, 0, 0, 0);
+#elif defined(__NR_fork)
+ return my_syscall0(__NR_fork);
+#else
+#error Neither __NR_clone nor __NR_fork defined, cannot implement sys_fork()
+#endif
+}
+
+static __attribute__((unused))
+pid_t fork(void)
+{
+ pid_t ret = sys_fork();
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int fsync(int fd);
+ */
+
+static __attribute__((unused))
+int sys_fsync(int fd)
+{
+ return my_syscall1(__NR_fsync, fd);
+}
+
+static __attribute__((unused))
+int fsync(int fd)
+{
+ int ret = sys_fsync(fd);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int getdents64(int fd, struct linux_dirent64 *dirp, int count);
+ */
+
+static __attribute__((unused))
+int sys_getdents64(int fd, struct linux_dirent64 *dirp, int count)
+{
+ return my_syscall3(__NR_getdents64, fd, dirp, count);
+}
+
+static __attribute__((unused))
+int getdents64(int fd, struct linux_dirent64 *dirp, int count)
+{
+ int ret = sys_getdents64(fd, dirp, count);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * uid_t geteuid(void);
+ */
+
+static __attribute__((unused))
+uid_t sys_geteuid(void)
+{
+#ifdef __NR_geteuid32
+ return my_syscall0(__NR_geteuid32);
+#else
+ return my_syscall0(__NR_geteuid);
+#endif
+}
+
+static __attribute__((unused))
+uid_t geteuid(void)
+{
+ return sys_geteuid();
+}
+
+
+/*
+ * pid_t getpgid(pid_t pid);
+ */
+
+static __attribute__((unused))
+pid_t sys_getpgid(pid_t pid)
+{
+ return my_syscall1(__NR_getpgid, pid);
+}
+
+static __attribute__((unused))
+pid_t getpgid(pid_t pid)
+{
+ pid_t ret = sys_getpgid(pid);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * pid_t getpgrp(void);
+ */
+
+static __attribute__((unused))
+pid_t sys_getpgrp(void)
+{
+ return sys_getpgid(0);
+}
+
+static __attribute__((unused))
+pid_t getpgrp(void)
+{
+ return sys_getpgrp();
+}
+
+
+/*
+ * pid_t getpid(void);
+ */
+
+static __attribute__((unused))
+pid_t sys_getpid(void)
+{
+ return my_syscall0(__NR_getpid);
+}
+
+static __attribute__((unused))
+pid_t getpid(void)
+{
+ return sys_getpid();
+}
+
+
+/*
+ * pid_t getppid(void);
+ */
+
+static __attribute__((unused))
+pid_t sys_getppid(void)
+{
+ return my_syscall0(__NR_getppid);
+}
+
+static __attribute__((unused))
+pid_t getppid(void)
+{
+ return sys_getppid();
+}
+
+
+/*
+ * pid_t gettid(void);
+ */
+
+static __attribute__((unused))
+pid_t sys_gettid(void)
+{
+ return my_syscall0(__NR_gettid);
+}
+
+static __attribute__((unused))
+pid_t gettid(void)
+{
+ return sys_gettid();
+}
+
+static unsigned long getauxval(unsigned long key);
+
+/*
+ * long getpagesize(void);
+ */
+
+static __attribute__((unused))
+long getpagesize(void)
+{
+ long ret;
+
+ ret = getauxval(AT_PAGESZ);
+ if (!ret) {
+ SET_ERRNO(ENOENT);
+ return -1;
+ }
+
+ return ret;
+}
+
+
+/*
+ * int gettimeofday(struct timeval *tv, struct timezone *tz);
+ */
+
+static __attribute__((unused))
+int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ return my_syscall2(__NR_gettimeofday, tv, tz);
+}
+
+static __attribute__((unused))
+int gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ int ret = sys_gettimeofday(tv, tz);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * uid_t getuid(void);
+ */
+
+static __attribute__((unused))
+uid_t sys_getuid(void)
+{
+#ifdef __NR_getuid32
+ return my_syscall0(__NR_getuid32);
+#else
+ return my_syscall0(__NR_getuid);
+#endif
+}
+
+static __attribute__((unused))
+uid_t getuid(void)
+{
+ return sys_getuid();
+}
+
+
+/*
+ * int ioctl(int fd, unsigned long req, void *value);
+ */
+
+static __attribute__((unused))
+int sys_ioctl(int fd, unsigned long req, void *value)
+{
+ return my_syscall3(__NR_ioctl, fd, req, value);
+}
+
+static __attribute__((unused))
+int ioctl(int fd, unsigned long req, void *value)
+{
+ int ret = sys_ioctl(fd, req, value);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+/*
+ * int kill(pid_t pid, int signal);
+ */
+
+static __attribute__((unused))
+int sys_kill(pid_t pid, int signal)
+{
+ return my_syscall2(__NR_kill, pid, signal);
+}
+
+static __attribute__((unused))
+int kill(pid_t pid, int signal)
+{
+ int ret = sys_kill(pid, signal);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int link(const char *old, const char *new);
+ */
+
+static __attribute__((unused))
+int sys_link(const char *old, const char *new)
+{
+#ifdef __NR_linkat
+ return my_syscall5(__NR_linkat, AT_FDCWD, old, AT_FDCWD, new, 0);
+#elif defined(__NR_link)
+ return my_syscall2(__NR_link, old, new);
+#else
+#error Neither __NR_linkat nor __NR_link defined, cannot implement sys_link()
+#endif
+}
+
+static __attribute__((unused))
+int link(const char *old, const char *new)
+{
+ int ret = sys_link(old, new);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * off_t lseek(int fd, off_t offset, int whence);
+ */
+
+static __attribute__((unused))
+off_t sys_lseek(int fd, off_t offset, int whence)
+{
+ return my_syscall3(__NR_lseek, fd, offset, whence);
+}
+
+static __attribute__((unused))
+off_t lseek(int fd, off_t offset, int whence)
+{
+ off_t ret = sys_lseek(fd, offset, whence);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int mkdir(const char *path, mode_t mode);
+ */
+
+static __attribute__((unused))
+int sys_mkdir(const char *path, mode_t mode)
+{
+#ifdef __NR_mkdirat
+ return my_syscall3(__NR_mkdirat, AT_FDCWD, path, mode);
+#elif defined(__NR_mkdir)
+ return my_syscall2(__NR_mkdir, path, mode);
+#else
+#error Neither __NR_mkdirat nor __NR_mkdir defined, cannot implement sys_mkdir()
+#endif
+}
+
+static __attribute__((unused))
+int mkdir(const char *path, mode_t mode)
+{
+ int ret = sys_mkdir(path, mode);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int mknod(const char *path, mode_t mode, dev_t dev);
+ */
+
+static __attribute__((unused))
+long sys_mknod(const char *path, mode_t mode, dev_t dev)
+{
+#ifdef __NR_mknodat
+ return my_syscall4(__NR_mknodat, AT_FDCWD, path, mode, dev);
+#elif defined(__NR_mknod)
+ return my_syscall3(__NR_mknod, path, mode, dev);
+#else
+#error Neither __NR_mknodat nor __NR_mknod defined, cannot implement sys_mknod()
+#endif
+}
+
+static __attribute__((unused))
+int mknod(const char *path, mode_t mode, dev_t dev)
+{
+ int ret = sys_mknod(path, mode, dev);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+#ifndef MAP_SHARED
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_SHARED_VALIDATE 0x03 /* share + validate extension flags */
+#endif
+
+#ifndef MAP_FAILED
+#define MAP_FAILED ((void *)-1)
+#endif
+
+#ifndef sys_mmap
+static __attribute__((unused))
+void *sys_mmap(void *addr, size_t length, int prot, int flags, int fd,
+ off_t offset)
+{
+#ifndef my_syscall6
+ /* Function not implemented. */
+ return (void *)-ENOSYS;
+#else
+
+ int n;
+
+#if defined(__NR_mmap2)
+ n = __NR_mmap2;
+ offset >>= 12;
+#else
+ n = __NR_mmap;
+#endif
+
+ return (void *)my_syscall6(n, addr, length, prot, flags, fd, offset);
+#endif
+}
+#endif
+
+static __attribute__((unused))
+void *mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset)
+{
+ void *ret = sys_mmap(addr, length, prot, flags, fd, offset);
+
+ if ((unsigned long)ret >= -4095UL) {
+ SET_ERRNO(-(long)ret);
+ ret = MAP_FAILED;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+int sys_munmap(void *addr, size_t length)
+{
+ return my_syscall2(__NR_munmap, addr, length);
+}
+
+static __attribute__((unused))
+int munmap(void *addr, size_t length)
+{
+ int ret = sys_munmap(addr, length);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+/*
+ * int mount(const char *source, const char *target,
+ * const char *fstype, unsigned long flags,
+ * const void *data);
+ */
+static __attribute__((unused))
+int sys_mount(const char *src, const char *tgt, const char *fst,
+ unsigned long flags, const void *data)
+{
+ return my_syscall5(__NR_mount, src, tgt, fst, flags, data);
+}
+
+static __attribute__((unused))
+int mount(const char *src, const char *tgt,
+ const char *fst, unsigned long flags,
+ const void *data)
+{
+ int ret = sys_mount(src, tgt, fst, flags, data);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int open(const char *path, int flags[, mode_t mode]);
+ */
+
+static __attribute__((unused))
+int sys_open(const char *path, int flags, mode_t mode)
+{
+#ifdef __NR_openat
+ return my_syscall4(__NR_openat, AT_FDCWD, path, flags, mode);
+#elif defined(__NR_open)
+ return my_syscall3(__NR_open, path, flags, mode);
+#else
+#error Neither __NR_openat nor __NR_open defined, cannot implement sys_open()
+#endif
+}
+
+static __attribute__((unused))
+int open(const char *path, int flags, ...)
+{
+ mode_t mode = 0;
+ int ret;
+
+ if (flags & O_CREAT) {
+ va_list args;
+
+ va_start(args, flags);
+ mode = va_arg(args, mode_t);
+ va_end(args);
+ }
+
+ ret = sys_open(path, flags, mode);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int pivot_root(const char *new, const char *old);
+ */
+
+static __attribute__((unused))
+int sys_pivot_root(const char *new, const char *old)
+{
+ return my_syscall2(__NR_pivot_root, new, old);
+}
+
+static __attribute__((unused))
+int pivot_root(const char *new, const char *old)
+{
+ int ret = sys_pivot_root(new, old);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int poll(struct pollfd *fds, int nfds, int timeout);
+ */
+
+static __attribute__((unused))
+int sys_poll(struct pollfd *fds, int nfds, int timeout)
+{
+#if defined(__NR_ppoll)
+ struct timespec t;
+
+ if (timeout >= 0) {
+ t.tv_sec = timeout / 1000;
+ t.tv_nsec = (timeout % 1000) * 1000000;
+ }
+ return my_syscall4(__NR_ppoll, fds, nfds, (timeout >= 0) ? &t : NULL, NULL);
+#elif defined(__NR_poll)
+ return my_syscall3(__NR_poll, fds, nfds, timeout);
+#else
+#error Neither __NR_ppoll nor __NR_poll defined, cannot implement sys_poll()
+#endif
+}
+
+static __attribute__((unused))
+int poll(struct pollfd *fds, int nfds, int timeout)
+{
+ int ret = sys_poll(fds, nfds, timeout);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * ssize_t read(int fd, void *buf, size_t count);
+ */
+
+static __attribute__((unused))
+ssize_t sys_read(int fd, void *buf, size_t count)
+{
+ return my_syscall3(__NR_read, fd, buf, count);
+}
+
+static __attribute__((unused))
+ssize_t read(int fd, void *buf, size_t count)
+{
+ ssize_t ret = sys_read(fd, buf, count);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int reboot(int cmd);
+ * <cmd> is among LINUX_REBOOT_CMD_*
+ */
+
+static __attribute__((unused))
+ssize_t sys_reboot(int magic1, int magic2, int cmd, void *arg)
+{
+ return my_syscall4(__NR_reboot, magic1, magic2, cmd, arg);
+}
+
+static __attribute__((unused))
+int reboot(int cmd)
+{
+ int ret = sys_reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, 0);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int sched_yield(void);
+ */
+
+static __attribute__((unused))
+int sys_sched_yield(void)
+{
+ return my_syscall0(__NR_sched_yield);
+}
+
+static __attribute__((unused))
+int sched_yield(void)
+{
+ int ret = sys_sched_yield();
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int select(int nfds, fd_set *read_fds, fd_set *write_fds,
+ * fd_set *except_fds, struct timeval *timeout);
+ */
+
+static __attribute__((unused))
+int sys_select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
+{
+#if defined(__ARCH_WANT_SYS_OLD_SELECT) && !defined(__NR__newselect)
+ struct sel_arg_struct {
+ unsigned long n;
+ fd_set *r, *w, *e;
+ struct timeval *t;
+ } arg = { .n = nfds, .r = rfds, .w = wfds, .e = efds, .t = timeout };
+ return my_syscall1(__NR_select, &arg);
+#elif defined(__ARCH_WANT_SYS_PSELECT6) && defined(__NR_pselect6)
+ struct timespec t;
+
+ if (timeout) {
+ t.tv_sec = timeout->tv_sec;
+ t.tv_nsec = timeout->tv_usec * 1000;
+ }
+ return my_syscall6(__NR_pselect6, nfds, rfds, wfds, efds, timeout ? &t : NULL, NULL);
+#elif defined(__NR__newselect) || defined(__NR_select)
+#ifndef __NR__newselect
+#define __NR__newselect __NR_select
+#endif
+ return my_syscall5(__NR__newselect, nfds, rfds, wfds, efds, timeout);
+#else
+#error None of __NR_select, __NR_pselect6, nor __NR__newselect defined, cannot implement sys_select()
+#endif
+}
+
+static __attribute__((unused))
+int select(int nfds, fd_set *rfds, fd_set *wfds, fd_set *efds, struct timeval *timeout)
+{
+ int ret = sys_select(nfds, rfds, wfds, efds, timeout);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int setpgid(pid_t pid, pid_t pgid);
+ */
+
+static __attribute__((unused))
+int sys_setpgid(pid_t pid, pid_t pgid)
+{
+ return my_syscall2(__NR_setpgid, pid, pgid);
+}
+
+static __attribute__((unused))
+int setpgid(pid_t pid, pid_t pgid)
+{
+ int ret = sys_setpgid(pid, pgid);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * pid_t setsid(void);
+ */
+
+static __attribute__((unused))
+pid_t sys_setsid(void)
+{
+ return my_syscall0(__NR_setsid);
+}
+
+static __attribute__((unused))
+pid_t setsid(void)
+{
+ pid_t ret = sys_setsid();
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+#if defined(__NR_statx)
+/*
+ * int statx(int fd, const char *path, int flags, unsigned int mask, struct statx *buf);
+ */
+
+static __attribute__((unused))
+int sys_statx(int fd, const char *path, int flags, unsigned int mask, struct statx *buf)
+{
+ return my_syscall5(__NR_statx, fd, path, flags, mask, buf);
+}
+
+static __attribute__((unused))
+int statx(int fd, const char *path, int flags, unsigned int mask, struct statx *buf)
+{
+ int ret = sys_statx(fd, path, flags, mask, buf);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+#endif
+
+/*
+ * int stat(const char *path, struct stat *buf);
+ * Warning: the struct stat's layout is arch-dependent.
+ */
+
+#if defined(__NR_statx) && !defined(__NR_newfstatat) && !defined(__NR_stat)
+/*
+ * Maybe we can just use statx() when available for all architectures?
+ */
+static __attribute__((unused))
+int sys_stat(const char *path, struct stat *buf)
+{
+ struct statx statx;
+ long ret;
+
+ ret = sys_statx(AT_FDCWD, path, AT_NO_AUTOMOUNT, STATX_BASIC_STATS, &statx);
+ buf->st_dev = ((statx.stx_dev_minor & 0xff)
+ | (statx.stx_dev_major << 8)
+ | ((statx.stx_dev_minor & ~0xff) << 12));
+ buf->st_ino = statx.stx_ino;
+ buf->st_mode = statx.stx_mode;
+ buf->st_nlink = statx.stx_nlink;
+ buf->st_uid = statx.stx_uid;
+ buf->st_gid = statx.stx_gid;
+ buf->st_rdev = ((statx.stx_rdev_minor & 0xff)
+ | (statx.stx_rdev_major << 8)
+ | ((statx.stx_rdev_minor & ~0xff) << 12));
+ buf->st_size = statx.stx_size;
+ buf->st_blksize = statx.stx_blksize;
+ buf->st_blocks = statx.stx_blocks;
+ buf->st_atime = statx.stx_atime.tv_sec;
+ buf->st_mtime = statx.stx_mtime.tv_sec;
+ buf->st_ctime = statx.stx_ctime.tv_sec;
+ return ret;
+}
+#else
+static __attribute__((unused))
+int sys_stat(const char *path, struct stat *buf)
+{
+ struct sys_stat_struct stat;
+ long ret;
+
+#ifdef __NR_newfstatat
+ /* only solution for arm64 */
+ ret = my_syscall4(__NR_newfstatat, AT_FDCWD, path, &stat, 0);
+#elif defined(__NR_stat)
+ ret = my_syscall2(__NR_stat, path, &stat);
+#else
+#error Neither __NR_newfstatat nor __NR_stat defined, cannot implement sys_stat()
+#endif
+ buf->st_dev = stat.st_dev;
+ buf->st_ino = stat.st_ino;
+ buf->st_mode = stat.st_mode;
+ buf->st_nlink = stat.st_nlink;
+ buf->st_uid = stat.st_uid;
+ buf->st_gid = stat.st_gid;
+ buf->st_rdev = stat.st_rdev;
+ buf->st_size = stat.st_size;
+ buf->st_blksize = stat.st_blksize;
+ buf->st_blocks = stat.st_blocks;
+ buf->st_atime = stat.st_atime;
+ buf->st_mtime = stat.st_mtime;
+ buf->st_ctime = stat.st_ctime;
+ return ret;
+}
+#endif
+
+static __attribute__((unused))
+int stat(const char *path, struct stat *buf)
+{
+ int ret = sys_stat(path, buf);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int symlink(const char *old, const char *new);
+ */
+
+static __attribute__((unused))
+int sys_symlink(const char *old, const char *new)
+{
+#ifdef __NR_symlinkat
+ return my_syscall3(__NR_symlinkat, old, AT_FDCWD, new);
+#elif defined(__NR_symlink)
+ return my_syscall2(__NR_symlink, old, new);
+#else
+#error Neither __NR_symlinkat nor __NR_symlink defined, cannot implement sys_symlink()
+#endif
+}
+
+static __attribute__((unused))
+int symlink(const char *old, const char *new)
+{
+ int ret = sys_symlink(old, new);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * mode_t umask(mode_t mode);
+ */
+
+static __attribute__((unused))
+mode_t sys_umask(mode_t mode)
+{
+ return my_syscall1(__NR_umask, mode);
+}
+
+static __attribute__((unused))
+mode_t umask(mode_t mode)
+{
+ return sys_umask(mode);
+}
+
+
+/*
+ * int umount2(const char *path, int flags);
+ */
+
+static __attribute__((unused))
+int sys_umount2(const char *path, int flags)
+{
+ return my_syscall2(__NR_umount2, path, flags);
+}
+
+static __attribute__((unused))
+int umount2(const char *path, int flags)
+{
+ int ret = sys_umount2(path, flags);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * int unlink(const char *path);
+ */
+
+static __attribute__((unused))
+int sys_unlink(const char *path)
+{
+#ifdef __NR_unlinkat
+ return my_syscall3(__NR_unlinkat, AT_FDCWD, path, 0);
+#elif defined(__NR_unlink)
+ return my_syscall1(__NR_unlink, path);
+#else
+#error Neither __NR_unlinkat nor __NR_unlink defined, cannot implement sys_unlink()
+#endif
+}
+
+static __attribute__((unused))
+int unlink(const char *path)
+{
+ int ret = sys_unlink(path);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * pid_t wait(int *status);
+ * pid_t wait4(pid_t pid, int *status, int options, struct rusage *rusage);
+ * pid_t waitpid(pid_t pid, int *status, int options);
+ */
+
+static __attribute__((unused))
+pid_t sys_wait4(pid_t pid, int *status, int options, struct rusage *rusage)
+{
+ return my_syscall4(__NR_wait4, pid, status, options, rusage);
+}
+
+static __attribute__((unused))
+pid_t wait(int *status)
+{
+ pid_t ret = sys_wait4(-1, status, 0, NULL);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+static __attribute__((unused))
+pid_t wait4(pid_t pid, int *status, int options, struct rusage *rusage)
+{
+ pid_t ret = sys_wait4(pid, status, options, rusage);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+static __attribute__((unused))
+pid_t waitpid(pid_t pid, int *status, int options)
+{
+ pid_t ret = sys_wait4(pid, status, options, NULL);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+
+/*
+ * ssize_t write(int fd, const void *buf, size_t count);
+ */
+
+static __attribute__((unused))
+ssize_t sys_write(int fd, const void *buf, size_t count)
+{
+ return my_syscall3(__NR_write, fd, buf, count);
+}
+
+static __attribute__((unused))
+ssize_t write(int fd, const void *buf, size_t count)
+{
+ ssize_t ret = sys_write(fd, buf, count);
+
+ if (ret < 0) {
+ SET_ERRNO(-ret);
+ ret = -1;
+ }
+ return ret;
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_SYS_H */
diff --git a/tools/include/nolibc/time.h b/tools/include/nolibc/time.h
new file mode 100644
index 000000000000..84655361b9ad
--- /dev/null
+++ b/tools/include/nolibc/time.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * time function definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_TIME_H
+#define _NOLIBC_TIME_H
+
+#include "std.h"
+#include "arch.h"
+#include "types.h"
+#include "sys.h"
+
+static __attribute__((unused))
+time_t time(time_t *tptr)
+{
+ struct timeval tv;
+
+ /* note, cannot fail here */
+ sys_gettimeofday(&tv, NULL);
+
+ if (tptr)
+ *tptr = tv.tv_sec;
+ return tv.tv_sec;
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_TIME_H */
diff --git a/tools/include/nolibc/types.h b/tools/include/nolibc/types.h
new file mode 100644
index 000000000000..aedd7d9e3f64
--- /dev/null
+++ b/tools/include/nolibc/types.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Special types used by various syscalls for NOLIBC
+ * Copyright (C) 2017-2021 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_TYPES_H
+#define _NOLIBC_TYPES_H
+
+#include "std.h"
+#include <linux/time.h>
+#include <linux/stat.h>
+
+
+/* Only the generic macros and types may be defined here. The arch-specific
+ * ones such as the O_RDONLY and related macros used by fcntl() and open(), or
+ * the layout of sys_stat_struct must not be defined here.
+ */
+
+/* stat flags (WARNING, octal here). We need to check for an existing
+ * definition because linux/stat.h may omit to define those if it finds
+ * that any glibc header was already included.
+ */
+#if !defined(S_IFMT)
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFBLK 0060000
+#define S_IFREG 0100000
+#define S_IFIFO 0010000
+#define S_IFLNK 0120000
+#define S_IFSOCK 0140000
+#define S_IFMT 0170000
+
+#define S_ISDIR(mode) (((mode) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(mode) (((mode) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(mode) (((mode) & S_IFMT) == S_IFBLK)
+#define S_ISREG(mode) (((mode) & S_IFMT) == S_IFREG)
+#define S_ISFIFO(mode) (((mode) & S_IFMT) == S_IFIFO)
+#define S_ISLNK(mode) (((mode) & S_IFMT) == S_IFLNK)
+#define S_ISSOCK(mode) (((mode) & S_IFMT) == S_IFSOCK)
+
+#define S_IRWXU 00700
+#define S_IRUSR 00400
+#define S_IWUSR 00200
+#define S_IXUSR 00100
+
+#define S_IRWXG 00070
+#define S_IRGRP 00040
+#define S_IWGRP 00020
+#define S_IXGRP 00010
+
+#define S_IRWXO 00007
+#define S_IROTH 00004
+#define S_IWOTH 00002
+#define S_IXOTH 00001
+#endif
+
+/* dirent types */
+#define DT_UNKNOWN 0x0
+#define DT_FIFO 0x1
+#define DT_CHR 0x2
+#define DT_DIR 0x4
+#define DT_BLK 0x6
+#define DT_REG 0x8
+#define DT_LNK 0xa
+#define DT_SOCK 0xc
+
+/* commonly an fd_set represents 256 FDs */
+#ifndef FD_SETSIZE
+#define FD_SETSIZE 256
+#endif
+
+/* PATH_MAX and MAXPATHLEN are often used and found with plenty of different
+ * values.
+ */
+#ifndef PATH_MAX
+#define PATH_MAX 4096
+#endif
+
+#ifndef MAXPATHLEN
+#define MAXPATHLEN (PATH_MAX)
+#endif
+
+/* whence values for lseek() */
+#define SEEK_SET 0
+#define SEEK_CUR 1
+#define SEEK_END 2
+
+/* cmd for reboot() */
+#define LINUX_REBOOT_MAGIC1 0xfee1dead
+#define LINUX_REBOOT_MAGIC2 0x28121969
+#define LINUX_REBOOT_CMD_HALT 0xcdef0123
+#define LINUX_REBOOT_CMD_POWER_OFF 0x4321fedc
+#define LINUX_REBOOT_CMD_RESTART 0x01234567
+#define LINUX_REBOOT_CMD_SW_SUSPEND 0xd000fce2
+
+/* Macros used on waitpid()'s return status */
+#define WEXITSTATUS(status) (((status) & 0xff00) >> 8)
+#define WIFEXITED(status) (((status) & 0x7f) == 0)
+#define WTERMSIG(status) ((status) & 0x7f)
+#define WIFSIGNALED(status) ((status) - 1 < 0xff)
+
+/* waitpid() flags */
+#define WNOHANG 1
+
+/* standard exit() codes */
+#define EXIT_SUCCESS 0
+#define EXIT_FAILURE 1
+
+#define FD_SETIDXMASK (8 * sizeof(unsigned long))
+#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
+
+/* for select() */
+typedef struct {
+ unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
+} fd_set;
+
+#define FD_CLR(fd, set) do { \
+ fd_set *__set = (set); \
+ int __fd = (fd); \
+ if (__fd >= 0) \
+ __set->fds[__fd / FD_SETIDXMASK] &= \
+ ~(1U << (__fd & FX_SETBITMASK)); \
+ } while (0)
+
+#define FD_SET(fd, set) do { \
+ fd_set *__set = (set); \
+ int __fd = (fd); \
+ if (__fd >= 0) \
+ __set->fds[__fd / FD_SETIDXMASK] |= \
+ 1 << (__fd & FD_SETBITMASK); \
+ } while (0)
+
+#define FD_ISSET(fd, set) ({ \
+ fd_set *__set = (set); \
+ int __fd = (fd); \
+ int __r = 0; \
+ if (__fd >= 0) \
+ __r = !!(__set->fds[__fd / FD_SETIDXMASK] & \
+1U << (__fd & FD_SET_BITMASK)); \
+ __r; \
+ })
+
+#define FD_ZERO(set) do { \
+ fd_set *__set = (set); \
+ int __idx; \
+ int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
+ for (__idx = 0; __idx < __size; __idx++) \
+ __set->fds[__idx] = 0; \
+ } while (0)
+
+/* for poll() */
+#define POLLIN 0x0001
+#define POLLPRI 0x0002
+#define POLLOUT 0x0004
+#define POLLERR 0x0008
+#define POLLHUP 0x0010
+#define POLLNVAL 0x0020
+
+struct pollfd {
+ int fd;
+ short int events;
+ short int revents;
+};
+
+/* for getdents64() */
+struct linux_dirent64 {
+ uint64_t d_ino;
+ int64_t d_off;
+ unsigned short d_reclen;
+ unsigned char d_type;
+ char d_name[];
+};
+
+/* needed by wait4() */
+struct rusage {
+ struct timeval ru_utime;
+ struct timeval ru_stime;
+ long ru_maxrss;
+ long ru_ixrss;
+ long ru_idrss;
+ long ru_isrss;
+ long ru_minflt;
+ long ru_majflt;
+ long ru_nswap;
+ long ru_inblock;
+ long ru_oublock;
+ long ru_msgsnd;
+ long ru_msgrcv;
+ long ru_nsignals;
+ long ru_nvcsw;
+ long ru_nivcsw;
+};
+
+/* The format of the struct as returned by the libc to the application, which
+ * significantly differs from the format returned by the stat() syscall flavours.
+ */
+struct stat {
+ dev_t st_dev; /* ID of device containing file */
+ ino_t st_ino; /* inode number */
+ mode_t st_mode; /* protection */
+ nlink_t st_nlink; /* number of hard links */
+ uid_t st_uid; /* user ID of owner */
+ gid_t st_gid; /* group ID of owner */
+ dev_t st_rdev; /* device ID (if special file) */
+ off_t st_size; /* total size, in bytes */
+ blksize_t st_blksize; /* blocksize for file system I/O */
+ blkcnt_t st_blocks; /* number of 512B blocks allocated */
+ time_t st_atime; /* time of last access */
+ time_t st_mtime; /* time of last modification */
+ time_t st_ctime; /* time of last status change */
+};
+
+/* WARNING, it only deals with the 4096 first majors and 256 first minors */
+#define makedev(major, minor) ((dev_t)((((major) & 0xfff) << 8) | ((minor) & 0xff)))
+#define major(dev) ((unsigned int)(((dev) >> 8) & 0xfff))
+#define minor(dev) ((unsigned int)(((dev) & 0xff))
+
+#ifndef offsetof
+#define offsetof(TYPE, FIELD) ((size_t) &((TYPE *)0)->FIELD)
+#endif
+
+#ifndef container_of
+#define container_of(PTR, TYPE, FIELD) ({ \
+ __typeof__(((TYPE *)0)->FIELD) *__FIELD_PTR = (PTR); \
+ (TYPE *)((char *) __FIELD_PTR - offsetof(TYPE, FIELD)); \
+})
+#endif
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_TYPES_H */
diff --git a/tools/include/nolibc/unistd.h b/tools/include/nolibc/unistd.h
new file mode 100644
index 000000000000..ac7d53d986cd
--- /dev/null
+++ b/tools/include/nolibc/unistd.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * unistd function definitions for NOLIBC
+ * Copyright (C) 2017-2022 Willy Tarreau <w@1wt.eu>
+ */
+
+#ifndef _NOLIBC_UNISTD_H
+#define _NOLIBC_UNISTD_H
+
+#include "std.h"
+#include "arch.h"
+#include "types.h"
+#include "sys.h"
+
+
+#define STDIN_FILENO 0
+#define STDOUT_FILENO 1
+#define STDERR_FILENO 2
+
+
+static __attribute__((unused))
+int msleep(unsigned int msecs)
+{
+ struct timeval my_timeval = { msecs / 1000, (msecs % 1000) * 1000 };
+
+ if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+ return (my_timeval.tv_sec * 1000) +
+ (my_timeval.tv_usec / 1000) +
+ !!(my_timeval.tv_usec % 1000);
+ else
+ return 0;
+}
+
+static __attribute__((unused))
+unsigned int sleep(unsigned int seconds)
+{
+ struct timeval my_timeval = { seconds, 0 };
+
+ if (sys_select(0, 0, 0, 0, &my_timeval) < 0)
+ return my_timeval.tv_sec + !!my_timeval.tv_usec;
+ else
+ return 0;
+}
+
+static __attribute__((unused))
+int usleep(unsigned int usecs)
+{
+ struct timeval my_timeval = { usecs / 1000000, usecs % 1000000 };
+
+ return sys_select(0, 0, 0, 0, &my_timeval);
+}
+
+static __attribute__((unused))
+int tcsetpgrp(int fd, pid_t pid)
+{
+ return ioctl(fd, TIOCSPGRP, &pid);
+}
+
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
+#endif /* _NOLIBC_UNISTD_H */
diff --git a/tools/include/tools/dis-asm-compat.h b/tools/include/tools/dis-asm-compat.h
new file mode 100644
index 000000000000..70f331e23ed3
--- /dev/null
+++ b/tools/include/tools/dis-asm-compat.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+#ifndef _TOOLS_DIS_ASM_COMPAT_H
+#define _TOOLS_DIS_ASM_COMPAT_H
+
+#include <stdio.h>
+#include <dis-asm.h>
+
+/* define types for older binutils version, to centralize ifdef'ery a bit */
+#ifndef DISASM_INIT_STYLED
+enum disassembler_style {DISASSEMBLER_STYLE_NOT_EMPTY};
+typedef int (*fprintf_styled_ftype) (void *, enum disassembler_style, const char*, ...);
+#endif
+
+/*
+ * Trivial fprintf wrapper to be used as the fprintf_styled_func argument to
+ * init_disassemble_info_compat() when normal fprintf suffices.
+ */
+static inline int fprintf_styled(void *out,
+ enum disassembler_style style,
+ const char *fmt, ...)
+{
+ va_list args;
+ int r;
+
+ (void)style;
+
+ va_start(args, fmt);
+ r = vfprintf(out, fmt, args);
+ va_end(args);
+
+ return r;
+}
+
+/*
+ * Wrapper for init_disassemble_info() that hides version
+ * differences. Depending on binutils version and architecture either
+ * fprintf_func or fprintf_styled_func will be called.
+ */
+static inline void init_disassemble_info_compat(struct disassemble_info *info,
+ void *stream,
+ fprintf_ftype unstyled_func,
+ fprintf_styled_ftype styled_func)
+{
+#ifdef DISASM_INIT_STYLED
+ init_disassemble_info(info, stream,
+ unstyled_func,
+ styled_func);
+#else
+ (void)styled_func;
+ init_disassemble_info(info, stream,
+ unstyled_func);
+#endif
+}
+
+#endif /* _TOOLS_DIS_ASM_COMPAT_H */
diff --git a/tools/include/uapi/asm-generic/fcntl.h b/tools/include/uapi/asm-generic/fcntl.h
index ac190958c981..1c7a0f6632c0 100644
--- a/tools/include/uapi/asm-generic/fcntl.h
+++ b/tools/include/uapi/asm-generic/fcntl.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_GENERIC_FCNTL_H
#define _ASM_GENERIC_FCNTL_H
@@ -90,7 +91,6 @@
/* a horrid kludge trying to make sure that this will fail on old kernels */
#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
-#define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)
#ifndef O_NDELAY
#define O_NDELAY O_NONBLOCK
@@ -115,13 +115,13 @@
#define F_GETSIG 11 /* for sockets. */
#endif
-#ifndef CONFIG_64BIT
+#if __BITS_PER_LONG == 32 || defined(__KERNEL__)
#ifndef F_GETLK64
#define F_GETLK64 12 /* using 'struct flock64' */
#define F_SETLK64 13
#define F_SETLKW64 14
#endif
-#endif
+#endif /* __BITS_PER_LONG == 32 || defined(__KERNEL__) */
#ifndef F_SETOWN_EX
#define F_SETOWN_EX 15
@@ -142,7 +142,7 @@
* record locks, but are "owned" by the open file description, not the
* process. This means that they are inherited across fork() like BSD (flock)
* locks, and they are only released automatically when the last reference to
- * the the open file against which they were acquired is put.
+ * the open file against which they were acquired is put.
*/
#define F_OFD_GETLK 36
#define F_OFD_SETLK 37
@@ -180,6 +180,10 @@ struct f_owner_ex {
blocking */
#define LOCK_UN 8 /* remove lock */
+/*
+ * LOCK_MAND support has been removed from the kernel. We leave the symbols
+ * here to not break legacy builds, but these should not be used in new code.
+ */
#define LOCK_MAND 32 /* This is a mandatory flock ... */
#define LOCK_READ 64 /* which allows concurrent read operations */
#define LOCK_WRITE 128 /* which allows concurrent write operations */
@@ -188,24 +192,19 @@ struct f_owner_ex {
#define F_LINUX_SPECIFIC_BASE 1024
#ifndef HAVE_ARCH_STRUCT_FLOCK
-#ifndef __ARCH_FLOCK_PAD
-#define __ARCH_FLOCK_PAD
-#endif
-
struct flock {
short l_type;
short l_whence;
__kernel_off_t l_start;
__kernel_off_t l_len;
__kernel_pid_t l_pid;
- __ARCH_FLOCK_PAD
-};
+#ifdef __ARCH_FLOCK_EXTRA_SYSID
+ __ARCH_FLOCK_EXTRA_SYSID
#endif
-
-#ifndef HAVE_ARCH_STRUCT_FLOCK64
-#ifndef __ARCH_FLOCK64_PAD
-#define __ARCH_FLOCK64_PAD
+#ifdef __ARCH_FLOCK_PAD
+ __ARCH_FLOCK_PAD
#endif
+};
struct flock64 {
short l_type;
@@ -213,8 +212,10 @@ struct flock64 {
__kernel_loff_t l_start;
__kernel_loff_t l_len;
__kernel_pid_t l_pid;
+#ifdef __ARCH_FLOCK64_PAD
__ARCH_FLOCK64_PAD
-};
#endif
+};
+#endif /* HAVE_ARCH_STRUCT_FLOCK */
#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index f94f65d429be..6ce1f1ceb432 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -72,6 +72,13 @@
#define MADV_COLD 20 /* deactivate these pages */
#define MADV_PAGEOUT 21 /* reclaim these pages */
+#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */
+#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */
+
+#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */
+
+#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/tools/include/uapi/asm-generic/socket.h b/tools/include/uapi/asm-generic/socket.h
index 77f7c1638eb1..8756df13be50 100644
--- a/tools/include/uapi/asm-generic/socket.h
+++ b/tools/include/uapi/asm-generic/socket.h
@@ -119,6 +119,8 @@
#define SO_DETACH_REUSEPORT_BPF 68
+#define SO_RCVMARK 75
+
#if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 3a3201e4618e..45fa180cc56a 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -140,7 +140,7 @@ __SYSCALL(__NR_renameat, sys_renameat)
#define __NR_umount2 39
__SYSCALL(__NR_umount2, sys_umount)
#define __NR_mount 40
-__SC_COMP(__NR_mount, sys_mount, compat_sys_mount)
+__SYSCALL(__NR_mount, sys_mount)
#define __NR_pivot_root 41
__SYSCALL(__NR_pivot_root, sys_pivot_root)
@@ -207,9 +207,9 @@ __SYSCALL(__NR_read, sys_read)
#define __NR_write 64
__SYSCALL(__NR_write, sys_write)
#define __NR_readv 65
-__SC_COMP(__NR_readv, sys_readv, compat_sys_readv)
+__SC_COMP(__NR_readv, sys_readv, sys_readv)
#define __NR_writev 66
-__SC_COMP(__NR_writev, sys_writev, compat_sys_writev)
+__SC_COMP(__NR_writev, sys_writev, sys_writev)
#define __NR_pread64 67
__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64)
#define __NR_pwrite64 68
@@ -237,7 +237,7 @@ __SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
/* fs/splice.c */
#define __NR_vmsplice 75
-__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice)
+__SYSCALL(__NR_vmsplice, sys_vmsplice)
#define __NR_splice 76
__SYSCALL(__NR_splice, sys_splice)
#define __NR_tee 77
@@ -383,7 +383,7 @@ __SYSCALL(__NR_syslog, sys_syslog)
/* kernel/ptrace.c */
#define __NR_ptrace 117
-__SYSCALL(__NR_ptrace, sys_ptrace)
+__SC_COMP(__NR_ptrace, sys_ptrace, compat_sys_ptrace)
/* kernel/sched/core.c */
#define __NR_sched_setparam 118
@@ -517,7 +517,7 @@ __SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
__SC_3264(__NR_adjtimex, sys_adjtimex_time32, sys_adjtimex)
#endif
-/* kernel/timer.c */
+/* kernel/sys.c */
#define __NR_getpid 172
__SYSCALL(__NR_getpid, sys_getpid)
#define __NR_getppid 173
@@ -606,9 +606,9 @@ __SYSCALL(__NR_sendto, sys_sendto)
#define __NR_recvfrom 207
__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
#define __NR_setsockopt 208
-__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
+__SC_COMP(__NR_setsockopt, sys_setsockopt, sys_setsockopt)
#define __NR_getsockopt 209
-__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
+__SC_COMP(__NR_getsockopt, sys_getsockopt, sys_getsockopt)
#define __NR_shutdown 210
__SYSCALL(__NR_shutdown, sys_shutdown)
#define __NR_sendmsg 211
@@ -673,15 +673,15 @@ __SYSCALL(__NR_madvise, sys_madvise)
#define __NR_remap_file_pages 234
__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
#define __NR_mbind 235
-__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
+__SYSCALL(__NR_mbind, sys_mbind)
#define __NR_get_mempolicy 236
-__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
+__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
#define __NR_set_mempolicy 237
-__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
+__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
#define __NR_migrate_pages 238
-__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
+__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
#define __NR_move_pages 239
-__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
+__SYSCALL(__NR_move_pages, sys_move_pages)
#endif
#define __NR_rt_tgsigqueueinfo 240
@@ -727,11 +727,9 @@ __SYSCALL(__NR_setns, sys_setns)
#define __NR_sendmmsg 269
__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
#define __NR_process_vm_readv 270
-__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
- compat_sys_process_vm_readv)
+__SYSCALL(__NR_process_vm_readv, sys_process_vm_readv)
#define __NR_process_vm_writev 271
-__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
- compat_sys_process_vm_writev)
+__SYSCALL(__NR_process_vm_writev, sys_process_vm_writev)
#define __NR_kcmp 272
__SYSCALL(__NR_kcmp, sys_kcmp)
#define __NR_finit_module 273
@@ -781,7 +779,7 @@ __SYSCALL(__NR_rseq, sys_rseq)
#define __NR_kexec_file_load 294
__SYSCALL(__NR_kexec_file_load, sys_kexec_file_load)
/* 295 through 402 are unassigned to sync up with generic numbers, don't use */
-#if __BITS_PER_LONG == 32
+#if defined(__SYSCALL_COMPAT) || __BITS_PER_LONG == 32
#define __NR_clock_gettime64 403
__SYSCALL(__NR_clock_gettime64, sys_clock_gettime)
#define __NR_clock_settime64 404
@@ -850,14 +848,46 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open)
#define __NR_clone3 435
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+#define __NR_close_range 436
+__SYSCALL(__NR_close_range, sys_close_range)
#define __NR_openat2 437
__SYSCALL(__NR_openat2, sys_openat2)
#define __NR_pidfd_getfd 438
__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
+#define __NR_faccessat2 439
+__SYSCALL(__NR_faccessat2, sys_faccessat2)
+#define __NR_process_madvise 440
+__SYSCALL(__NR_process_madvise, sys_process_madvise)
+#define __NR_epoll_pwait2 441
+__SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
+#define __NR_mount_setattr 442
+__SYSCALL(__NR_mount_setattr, sys_mount_setattr)
+#define __NR_quotactl_fd 443
+__SYSCALL(__NR_quotactl_fd, sys_quotactl_fd)
+
+#define __NR_landlock_create_ruleset 444
+__SYSCALL(__NR_landlock_create_ruleset, sys_landlock_create_ruleset)
+#define __NR_landlock_add_rule 445
+__SYSCALL(__NR_landlock_add_rule, sys_landlock_add_rule)
+#define __NR_landlock_restrict_self 446
+__SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
+
+#ifdef __ARCH_WANT_MEMFD_SECRET
+#define __NR_memfd_secret 447
+__SYSCALL(__NR_memfd_secret, sys_memfd_secret)
+#endif
+#define __NR_process_mrelease 448
+__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
+
+#define __NR_futex_waitv 449
+__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
+
+#define __NR_set_mempolicy_home_node 450
+__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
#undef __NR_syscalls
-#define __NR_syscalls 439
+#define __NR_syscalls 451
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index edba4d93e9e6..da5206517158 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -17,6 +17,8 @@
#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
#elif defined(__alpha__)
#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
+#elif defined(__loongarch__)
+#include "../../../arch/loongarch/include/uapi/asm/bitsperlong.h"
#else
#include <asm-generic/bitsperlong.h>
#endif
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
index 39acc149d843..ff52668abf8c 100644
--- a/tools/include/uapi/asm/bpf_perf_event.h
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -1,9 +1,13 @@
#if defined(__aarch64__)
#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
+#elif defined(__arc__)
+#include "../../arch/arc/include/uapi/asm/bpf_perf_event.h"
#elif defined(__s390__)
#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
#elif defined(__riscv)
#include "../../arch/riscv/include/uapi/asm/bpf_perf_event.h"
+#elif defined(__loongarch__)
+#include "../../arch/loongarch/include/uapi/asm/bpf_perf_event.h"
#else
#include <uapi/asm-generic/bpf_perf_event.h>
#endif
diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h
index 637189ec1ab9..869379f91fe4 100644
--- a/tools/include/uapi/asm/errno.h
+++ b/tools/include/uapi/asm/errno.h
@@ -9,10 +9,8 @@
#include "../../../arch/alpha/include/uapi/asm/errno.h"
#elif defined(__mips__)
#include "../../../arch/mips/include/uapi/asm/errno.h"
-#elif defined(__ia64__)
-#include "../../../arch/ia64/include/uapi/asm/errno.h"
-#elif defined(__xtensa__)
-#include "../../../arch/xtensa/include/uapi/asm/errno.h"
+#elif defined(__hppa__)
+#include "../../../arch/parisc/include/uapi/asm/errno.h"
#else
#include <asm-generic/errno.h>
#endif
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 868bf7996c0f..642808520d92 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -1,11 +1,10 @@
-/**
- * \file drm.h
+/*
* Header for the Direct Rendering Manager
*
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
*
- * \par Acknowledgments:
- * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
+ * Acknowledgments:
+ * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
*/
/*
@@ -85,7 +84,7 @@ typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
-/**
+/*
* Cliprect.
*
* \warning: If you change this structure, make sure you change
@@ -101,7 +100,7 @@ struct drm_clip_rect {
unsigned short y2;
};
-/**
+/*
* Drawable information.
*/
struct drm_drawable_info {
@@ -109,7 +108,7 @@ struct drm_drawable_info {
struct drm_clip_rect *rects;
};
-/**
+/*
* Texture region,
*/
struct drm_tex_region {
@@ -120,7 +119,7 @@ struct drm_tex_region {
unsigned int age;
};
-/**
+/*
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
@@ -132,7 +131,7 @@ struct drm_hw_lock {
char padding[60]; /**< Pad to cache line */
};
-/**
+/*
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
@@ -149,7 +148,7 @@ struct drm_version {
char __user *desc; /**< User-space buffer to hold desc */
};
-/**
+/*
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
@@ -168,7 +167,7 @@ struct drm_block {
int unused;
};
-/**
+/*
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
@@ -183,7 +182,7 @@ struct drm_control {
int irq;
};
-/**
+/*
* Type of memory to map.
*/
enum drm_map_type {
@@ -195,7 +194,7 @@ enum drm_map_type {
_DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */
};
-/**
+/*
* Memory mapping flags.
*/
enum drm_map_flags {
@@ -214,7 +213,7 @@ struct drm_ctx_priv_map {
void *handle; /**< Handle of map */
};
-/**
+/*
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
@@ -231,7 +230,7 @@ struct drm_map {
/* Private data */
};
-/**
+/*
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
@@ -263,7 +262,7 @@ enum drm_stat_type {
/* Add to the *END* of the list */
};
-/**
+/*
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
@@ -274,7 +273,7 @@ struct drm_stats {
} data[15];
};
-/**
+/*
* Hardware locking flags.
*/
enum drm_lock_flags {
@@ -289,7 +288,7 @@ enum drm_lock_flags {
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
-/**
+/*
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
@@ -299,7 +298,7 @@ struct drm_lock {
enum drm_lock_flags flags;
};
-/**
+/*
* DMA flags
*
* \warning
@@ -328,7 +327,7 @@ enum drm_dma_flags {
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
-/**
+/*
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
@@ -351,7 +350,7 @@ struct drm_buf_desc {
*/
};
-/**
+/*
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
@@ -359,7 +358,7 @@ struct drm_buf_info {
struct drm_buf_desc __user *list;
};
-/**
+/*
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
@@ -367,7 +366,7 @@ struct drm_buf_free {
int __user *list;
};
-/**
+/*
* Buffer information
*
* \sa drm_buf_map.
@@ -379,7 +378,7 @@ struct drm_buf_pub {
void __user *address; /**< Address of buffer */
};
-/**
+/*
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
@@ -392,7 +391,7 @@ struct drm_buf_map {
struct drm_buf_pub __user *list; /**< Buffer information */
};
-/**
+/*
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
@@ -417,7 +416,7 @@ enum drm_ctx_flags {
_DRM_CONTEXT_2DONLY = 0x02
};
-/**
+/*
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
@@ -427,7 +426,7 @@ struct drm_ctx {
enum drm_ctx_flags flags;
};
-/**
+/*
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
@@ -435,14 +434,14 @@ struct drm_ctx_res {
struct drm_ctx __user *contexts;
};
-/**
+/*
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
-/**
+/*
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
@@ -456,14 +455,14 @@ struct drm_update_draw {
unsigned long long data;
};
-/**
+/*
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
-/**
+/*
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
@@ -505,7 +504,7 @@ struct drm_wait_vblank_reply {
long tval_usec;
};
-/**
+/*
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
@@ -518,7 +517,7 @@ union drm_wait_vblank {
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
-/**
+/*
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
@@ -528,7 +527,7 @@ struct drm_modeset_ctl {
__u32 cmd;
};
-/**
+/*
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
@@ -537,7 +536,7 @@ struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
-/**
+/*
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
@@ -549,7 +548,7 @@ struct drm_agp_buffer {
unsigned long physical; /**< Physical used by i810 */
};
-/**
+/*
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
@@ -559,7 +558,7 @@ struct drm_agp_binding {
unsigned long offset; /**< In bytes -- will round to page boundary */
};
-/**
+/*
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
@@ -580,7 +579,7 @@ struct drm_agp_info {
unsigned short id_device;
};
-/**
+/*
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
@@ -588,7 +587,7 @@ struct drm_scatter_gather {
unsigned long handle; /**< Used for mapping / unmapping */
};
-/**
+/*
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
@@ -598,14 +597,14 @@ struct drm_set_version {
int drm_dd_minor;
};
-/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
-/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+/* DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
@@ -614,7 +613,7 @@ struct drm_gem_flink {
__u32 name;
};
-/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+/* DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
@@ -626,33 +625,150 @@ struct drm_gem_open {
__u64 size;
};
+/**
+ * DRM_CAP_DUMB_BUFFER
+ *
+ * If set to 1, the driver supports creating dumb buffers via the
+ * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
+ */
#define DRM_CAP_DUMB_BUFFER 0x1
+/**
+ * DRM_CAP_VBLANK_HIGH_CRTC
+ *
+ * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
+ * in the high bits of &drm_wait_vblank_request.type.
+ *
+ * Starting kernel version 2.6.39, this capability is always set to 1.
+ */
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
+/**
+ * DRM_CAP_DUMB_PREFERRED_DEPTH
+ *
+ * The preferred bit depth for dumb buffers.
+ *
+ * The bit depth is the number of bits used to indicate the color of a single
+ * pixel excluding any padding. This is different from the number of bits per
+ * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
+ * pixel.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
+/**
+ * DRM_CAP_DUMB_PREFER_SHADOW
+ *
+ * If set to 1, the driver prefers userspace to render to a shadow buffer
+ * instead of directly rendering to a dumb buffer. For best speed, userspace
+ * should do streaming ordered memory copies into the dumb buffer and never
+ * read from it.
+ *
+ * Note that this preference only applies to dumb buffers, it's irrelevant for
+ * other types of buffers.
+ */
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
+/**
+ * DRM_CAP_PRIME
+ *
+ * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
+ * and &DRM_PRIME_CAP_EXPORT.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors. See
+ * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ */
#define DRM_CAP_PRIME 0x5
+/**
+ * DRM_PRIME_CAP_IMPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
+ * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ */
#define DRM_PRIME_CAP_IMPORT 0x1
+/**
+ * DRM_PRIME_CAP_EXPORT
+ *
+ * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
+ * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ */
#define DRM_PRIME_CAP_EXPORT 0x2
+/**
+ * DRM_CAP_TIMESTAMP_MONOTONIC
+ *
+ * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
+ * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
+ * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
+ * clocks.
+ *
+ * Starting from kernel version 2.6.39, the default value for this capability
+ * is 1. Starting kernel version 4.15, this capability is always set to 1.
+ */
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+/**
+ * DRM_CAP_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ */
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
-/*
- * The CURSOR_WIDTH and CURSOR_HEIGHT capabilities return a valid widthxheight
- * combination for the hardware cursor. The intention is that a hardware
- * agnostic userspace can query a cursor plane size to use.
+/**
+ * DRM_CAP_CURSOR_WIDTH
+ *
+ * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
+ * width x height combination for the hardware cursor. The intention is that a
+ * hardware agnostic userspace can query a cursor plane size to use.
*
* Note that the cross-driver contract is to merely return a valid size;
* drivers are free to attach another meaning on top, eg. i915 returns the
* maximum plane size.
*/
#define DRM_CAP_CURSOR_WIDTH 0x8
+/**
+ * DRM_CAP_CURSOR_HEIGHT
+ *
+ * See &DRM_CAP_CURSOR_WIDTH.
+ */
#define DRM_CAP_CURSOR_HEIGHT 0x9
+/**
+ * DRM_CAP_ADDFB2_MODIFIERS
+ *
+ * If set to 1, the driver supports supplying modifiers in the
+ * &DRM_IOCTL_MODE_ADDFB2 ioctl.
+ */
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
+/**
+ * DRM_CAP_PAGE_FLIP_TARGET
+ *
+ * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
+ * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
+ * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
+ * ioctl.
+ */
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
+/**
+ * DRM_CAP_CRTC_IN_VBLANK_EVENT
+ *
+ * If set to 1, the kernel supports reporting the CRTC ID in
+ * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
+ * &DRM_EVENT_FLIP_COMPLETE events.
+ *
+ * Starting kernel version 4.12, this capability is always set to 1.
+ */
#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
+/**
+ * DRM_CAP_SYNCOBJ
+ *
+ * If set to 1, the driver supports sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
#define DRM_CAP_SYNCOBJ 0x13
+/**
+ * DRM_CAP_SYNCOBJ_TIMELINE
+ *
+ * If set to 1, the driver supports timeline operations on sync objects. See
+ * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ */
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
-/** DRM_IOCTL_GET_CAP ioctl argument type */
+/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
@@ -661,9 +777,12 @@ struct drm_get_cap {
/**
* DRM_CLIENT_CAP_STEREO_3D
*
- * if set to 1, the DRM core will expose the stereo 3D capabilities of the
+ * If set to 1, the DRM core will expose the stereo 3D capabilities of the
* monitor by advertising the supported 3D layouts in the flags of struct
- * drm_mode_modeinfo.
+ * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 3.13.
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
@@ -672,13 +791,25 @@ struct drm_get_cap {
*
* If set to 1, the DRM core will expose all planes (overlay, primary, and
* cursor) to userspace.
+ *
+ * This capability has been introduced in kernel version 3.15. Starting from
+ * kernel version 3.17, this capability is always supported for all drivers.
*/
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2
/**
* DRM_CLIENT_CAP_ATOMIC
*
- * If set to 1, the DRM core will expose atomic properties to userspace
+ * If set to 1, the DRM core will expose atomic properties to userspace. This
+ * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
+ * &DRM_CLIENT_CAP_ASPECT_RATIO.
+ *
+ * If the driver doesn't support atomic mode-setting, enabling this capability
+ * will fail with -EOPNOTSUPP.
+ *
+ * This capability has been introduced in kernel version 4.0. Starting from
+ * kernel version 4.2, this capability is always supported for atomic-capable
+ * drivers.
*/
#define DRM_CLIENT_CAP_ATOMIC 3
@@ -686,6 +817,10 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_ASPECT_RATIO
*
* If set to 1, the DRM core will provide aspect ratio information in modes.
+ * See ``DRM_MODE_FLAG_PIC_AR_*``.
+ *
+ * This capability is always supported for all drivers starting from kernel
+ * version 4.18.
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
@@ -693,12 +828,15 @@ struct drm_get_cap {
* DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
*
* If set to 1, the DRM core will expose special connectors to be used for
- * writing back to memory the scene setup in the commit. Depends on client
- * also supporting DRM_CLIENT_CAP_ATOMIC
+ * writing back to memory the scene setup in the commit. The client must enable
+ * &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * This capability is always supported for atomic-capable drivers starting from
+ * kernel version 4.19.
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
-/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
+/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
__u64 value;
@@ -912,6 +1050,16 @@ extern "C" {
#define DRM_IOCTL_MODE_GETPROPBLOB DRM_IOWR(0xAC, struct drm_mode_get_blob)
#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
+/**
+ * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
+ *
+ * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * Warning: removing a framebuffer currently in-use on an enabled plane will
+ * disable that plane. The CRTC the plane is linked to may also be disabled
+ * (depending on driver capabilities).
+ */
#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
@@ -949,6 +1097,26 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
/**
+ * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
+ *
+ * This queries metadata about a framebuffer. User-space fills
+ * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
+ * struct as the output.
+ *
+ * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
+ * will be filled with GEM buffer handles. Planes are valid until one has a
+ * zero handle -- this can be used to compute the number of planes.
+ *
+ * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
+ * until one has a zero &drm_mode_fb_cmd2.pitches.
+ *
+ * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
+ * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
+ * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
+ */
+#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+
+/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
* Generic IOCTLS restart at 0xA0.
@@ -959,7 +1127,7 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/**
+/*
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 829c0a48577f..8df261c5ab9b 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -55,15 +55,15 @@ extern "C" {
* cause the related events to not be seen.
*
* I915_RESET_UEVENT - Event is generated just before an attempt to reset the
- * the GPU. The value supplied with the event is always 1. NOTE: Disable
+ * GPU. The value supplied with the event is always 1. NOTE: Disable
* reset via module parameter will cause this event to not be seen.
*/
#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
#define I915_ERROR_UEVENT "ERROR"
#define I915_RESET_UEVENT "RESET"
-/*
- * i915_user_extension: Base class for defining a chain of extensions
+/**
+ * struct i915_user_extension - Base class for defining a chain of extensions
*
* Many interfaces need to grow over time. In most cases we can simply
* extend the struct and have userspace pass in more data. Another option,
@@ -76,12 +76,58 @@ extern "C" {
* increasing complexity, and for large parts of that interface to be
* entirely optional. The downside is more pointer chasing; chasing across
* the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct i915_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct i915_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct i915_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+ *
*/
struct i915_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct i915_user_extension, or zero if the end.
+ */
__u64 next_extension;
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct i915_user_extension.
+ */
__u32 name;
- __u32 flags; /* All undefined bits must be zero. */
- __u32 rsvd[4]; /* Reserved for future use; must be zero. */
+ /**
+ * @flags: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 flags;
+ /**
+ * @rsvd: MBZ
+ *
+ * Reserved for future use; must be zero.
+ */
+ __u32 rsvd[4];
};
/*
@@ -108,25 +154,77 @@ enum i915_mocs_table_index {
I915_MOCS_CACHED,
};
-/*
+/**
+ * enum drm_i915_gem_engine_class - uapi engine type enumeration
+ *
* Different engines serve different roles, and there may be more than one
- * engine serving each role. enum drm_i915_gem_engine_class provides a
- * classification of the role of the engine, which may be used when requesting
- * operations to be performed on a certain subset of engines, or for providing
- * information about that group.
+ * engine serving each role. This enum provides a classification of the role
+ * of the engine, which may be used when requesting operations to be performed
+ * on a certain subset of engines, or for providing information about that
+ * group.
*/
enum drm_i915_gem_engine_class {
+ /**
+ * @I915_ENGINE_CLASS_RENDER:
+ *
+ * Render engines support instructions used for 3D, Compute (GPGPU),
+ * and programmable media workloads. These instructions fetch data and
+ * dispatch individual work items to threads that operate in parallel.
+ * The threads run small programs (called "kernels" or "shaders") on
+ * the GPU's execution units (EUs).
+ */
I915_ENGINE_CLASS_RENDER = 0,
+
+ /**
+ * @I915_ENGINE_CLASS_COPY:
+ *
+ * Copy engines (also referred to as "blitters") support instructions
+ * that move blocks of data from one location in memory to another,
+ * or that fill a specified location of memory with fixed data.
+ * Copy engines can perform pre-defined logical or bitwise operations
+ * on the source, destination, or pattern data.
+ */
I915_ENGINE_CLASS_COPY = 1,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO:
+ *
+ * Video engines (also referred to as "bit stream decode" (BSD) or
+ * "vdbox") support instructions that perform fixed-function media
+ * decode and encode.
+ */
I915_ENGINE_CLASS_VIDEO = 2,
+
+ /**
+ * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
+ *
+ * Video enhancement engines (also referred to as "vebox") support
+ * instructions related to image enhancement.
+ */
I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
- /* should be kept compact */
+ /**
+ * @I915_ENGINE_CLASS_COMPUTE:
+ *
+ * Compute engines support a subset of the instructions available
+ * on render engines: compute engines support Compute (GPGPU) and
+ * programmable media workloads, but do not support the 3D pipeline.
+ */
+ I915_ENGINE_CLASS_COMPUTE = 4,
+
+ /* Values in this enum should be kept compact. */
+ /**
+ * @I915_ENGINE_CLASS_INVALID:
+ *
+ * Placeholder value to represent an invalid engine class assignment.
+ */
I915_ENGINE_CLASS_INVALID = -1
};
-/*
+/**
+ * struct i915_engine_class_instance - Engine class/instance identifier
+ *
* There may be more than one engine fulfilling any role within the system.
* Each engine of a class is given a unique instance number and therefore
* any engine can be specified by its class:instance tuplet. APIs that allow
@@ -134,10 +232,21 @@ enum drm_i915_gem_engine_class {
* for this identification.
*/
struct i915_engine_class_instance {
- __u16 engine_class; /* see enum drm_i915_gem_engine_class */
- __u16 engine_instance;
+ /**
+ * @engine_class:
+ *
+ * Engine class from enum drm_i915_gem_engine_class
+ */
+ __u16 engine_class;
#define I915_ENGINE_CLASS_INVALID_NONE -1
#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
+
+ /**
+ * @engine_instance:
+ *
+ * Engine instance.
+ */
+ __u16 engine_instance;
};
/**
@@ -177,8 +286,9 @@ enum drm_i915_pmu_engine_sample {
#define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
#define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
#define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
+#define I915_PMU_SOFTWARE_GT_AWAKE_TIME __I915_PMU_OTHER(4)
-#define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
+#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
@@ -359,6 +469,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_QUERY 0x39
#define DRM_I915_GEM_VM_CREATE 0x3a
#define DRM_I915_GEM_VM_DESTROY 0x3b
+#define DRM_I915_GEM_CREATE_EXT 0x3c
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -391,6 +502,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_CREATE_EXT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
@@ -523,7 +635,32 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
+/*
+ * Indicates the 2k user priority levels are statically mapped into 3 buckets as
+ * follows:
+ *
+ * -1k to -1 Low priority
+ * 0 Normal priority
+ * 1 to 1k Highest priority
+ */
+#define I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP (1ul << 5)
+/*
+ * Query the status of HuC load.
+ *
+ * The query can fail in the following scenarios with the listed error codes:
+ * -ENODEV if HuC is not present on this platform,
+ * -EOPNOTSUPP if HuC firmware usage is disabled,
+ * -ENOPKG if HuC firmware fetch failed,
+ * -ENOEXEC if HuC firmware is invalid or mismatched,
+ * -ENOMEM if i915 failed to prepare the FW objects for transfer to the uC,
+ * -EIO if the FW transfer or the FW authentication failed.
+ *
+ * If the IOCTL is successful, the returned parameter will be set to one of the
+ * following values:
+ * * 0 if HuC firmware load is not complete,
+ * * 1 if HuC firmware is authenticated and running.
+ */
#define I915_PARAM_HUC_STATUS 42
/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
@@ -619,16 +756,44 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_PERF_REVISION 54
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
+ * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
+ * I915_EXEC_USE_EXTENSIONS.
+ */
+#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
+
+/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
+#define I915_PARAM_HAS_USERPTR_PROBE 56
+
+/*
+ * Frequency of the timestamps in OA reports. This used to be the same as the CS
+ * timestamp frequency, but differs on some platforms.
+ */
+#define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
+
/* Must be kept compact -- no holes and well documented */
-typedef struct drm_i915_getparam {
+/**
+ * struct drm_i915_getparam - Driver parameter query structure.
+ */
+struct drm_i915_getparam {
+ /** @param: Driver parameter to query. */
__s32 param;
- /*
+
+ /**
+ * @value: Address of memory where queried value should be put.
+ *
* WARNING: Using pointers instead of fixed-size u64 means we need to write
* compat32 code. Don't repeat this mistake.
*/
int __user *value;
-} drm_i915_getparam_t;
+};
+
+/**
+ * typedef drm_i915_getparam_t - Driver parameter query structure.
+ * See struct drm_i915_getparam.
+ */
+typedef struct drm_i915_getparam drm_i915_getparam_t;
/* Ioctl to set kernel params:
*/
@@ -794,45 +959,113 @@ struct drm_i915_gem_mmap_gtt {
__u64 offset;
};
+/**
+ * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
+ *
+ * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
+ * and is used to retrieve the fake offset to mmap an object specified by &handle.
+ *
+ * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
+ * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
+ * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
+ */
struct drm_i915_gem_mmap_offset {
- /** Handle for the object being mapped. */
+ /** @handle: Handle for the object being mapped. */
__u32 handle;
+ /** @pad: Must be zero */
__u32 pad;
/**
- * Fake offset to use for subsequent mmap call
+ * @offset: The fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
/**
- * Flags for extended behaviour.
+ * @flags: Flags for extended behaviour.
*
- * It is mandatory that one of the MMAP_OFFSET types
- * (GTT, WC, WB, UC, etc) should be included.
+ * It is mandatory that one of the `MMAP_OFFSET` types
+ * should be included:
+ *
+ * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
+ * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
+ * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
+ * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
+ *
+ * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
+ * type. On devices without local memory, this caching mode is invalid.
+ *
+ * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
+ * be used, depending on the object placement on creation. WB will be used
+ * when the object can only exist in system memory, WC otherwise.
*/
__u64 flags;
-#define I915_MMAP_OFFSET_GTT 0
-#define I915_MMAP_OFFSET_WC 1
-#define I915_MMAP_OFFSET_WB 2
-#define I915_MMAP_OFFSET_UC 3
- /*
- * Zero-terminated chain of extensions.
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+#define I915_MMAP_OFFSET_FIXED 4
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
*
* No current extensions defined; mbz.
*/
__u64 extensions;
};
+/**
+ * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
+ * preparation for accessing the pages via some CPU domain.
+ *
+ * Specifying a new write or read domain will flush the object out of the
+ * previous domain(if required), before then updating the objects domain
+ * tracking with the new domain.
+ *
+ * Note this might involve waiting for the object first if it is still active on
+ * the GPU.
+ *
+ * Supported values for @read_domains and @write_domain:
+ *
+ * - I915_GEM_DOMAIN_WC: Uncached write-combined domain
+ * - I915_GEM_DOMAIN_CPU: CPU cache domain
+ * - I915_GEM_DOMAIN_GTT: Mappable aperture domain
+ *
+ * All other domains are rejected.
+ *
+ * Note that for discrete, starting from DG1, this is no longer supported, and
+ * is instead rejected. On such platforms the CPU domain is effectively static,
+ * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
+ * which can't be set explicitly and instead depends on the object placements,
+ * as per the below.
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ */
struct drm_i915_gem_set_domain {
- /** Handle for the object */
+ /** @handle: Handle for the object. */
__u32 handle;
- /** New read domains */
+ /** @read_domains: New read domains. */
__u32 read_domains;
- /** New write domain */
+ /**
+ * @write_domain: New write domain.
+ *
+ * Note that having something in the write domain implies it's in the
+ * read domain, and only that read domain.
+ */
__u32 write_domain;
};
@@ -936,6 +1169,7 @@ struct drm_i915_gem_exec_object {
__u64 offset;
};
+/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
@@ -982,10 +1216,16 @@ struct drm_i915_gem_exec_object2 {
/**
* When the EXEC_OBJECT_PINNED flag is specified this is populated by
* the user with the GTT offset at which this object will be pinned.
+ *
* When the I915_EXEC_NO_RELOC flag is specified this must contain the
* presumed_offset of the object.
+ *
* During execbuffer2 the kernel populates it with the value of the
* current GTT offset of the object, for future presumed_offset writes.
+ *
+ * See struct drm_i915_gem_create_ext for the rules when dealing with
+ * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
+ * minimum page sizes, like DG2.
*/
__u64 offset;
@@ -1034,38 +1274,119 @@ struct drm_i915_gem_exec_object2 {
__u64 rsvd2;
};
+/**
+ * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
+ * ioctl.
+ *
+ * The request will wait for input fence to signal before submission.
+ *
+ * The returned output fence will be signaled after the completion of the
+ * request.
+ */
struct drm_i915_gem_exec_fence {
- /**
- * User's handle for a drm_syncobj to wait on or signal.
- */
+ /** @handle: User's handle for a drm_syncobj to wait on or signal. */
__u32 handle;
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_EXEC_FENCE_WAIT:
+ * Wait for the input fence before request submission.
+ *
+ * I915_EXEC_FENCE_SIGNAL:
+ * Return request completion fence as output
+ */
+ __u32 flags;
#define I915_EXEC_FENCE_WAIT (1<<0)
#define I915_EXEC_FENCE_SIGNAL (1<<1)
#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
- __u32 flags;
};
-struct drm_i915_gem_execbuffer2 {
+/**
+ * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
+ * for execbuf ioctl.
+ *
+ * This structure describes an array of drm_syncobj and associated points for
+ * timeline variants of drm_syncobj. It is invalid to append this structure to
+ * the execbuf if I915_EXEC_FENCE_ARRAY is set.
+ */
+struct drm_i915_gem_execbuffer_ext_timeline_fences {
+#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
/**
- * List of gem_exec_object2 structs
+ * @fence_count: Number of elements in the @handles_ptr & @value_ptr
+ * arrays.
*/
+ __u64 fence_count;
+
+ /**
+ * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
+ * of length @fence_count.
+ */
+ __u64 handles_ptr;
+
+ /**
+ * @values_ptr: Pointer to an array of u64 values of length
+ * @fence_count.
+ * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
+ * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
+ * binary one.
+ */
+ __u64 values_ptr;
+};
+
+/**
+ * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
+ * ioctl.
+ */
+struct drm_i915_gem_execbuffer2 {
+ /** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
__u64 buffers_ptr;
+
+ /** @buffer_count: Number of elements in @buffers_ptr array */
__u32 buffer_count;
- /** Offset in the batchbuffer to start execution from. */
+ /**
+ * @batch_start_offset: Offset in the batchbuffer to start execution
+ * from.
+ */
__u32 batch_start_offset;
- /** Bytes used in batchbuffer from batch_start_offset */
+
+ /**
+ * @batch_len: Length in bytes of the batch buffer, starting from the
+ * @batch_start_offset. If 0, length is assumed to be the batch buffer
+ * object size.
+ */
__u32 batch_len;
+
+ /** @DR1: deprecated */
__u32 DR1;
+
+ /** @DR4: deprecated */
__u32 DR4;
+
+ /** @num_cliprects: See @cliprects_ptr */
__u32 num_cliprects;
+
/**
- * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
- * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
- * struct drm_i915_gem_exec_fence *fences.
+ * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
+ *
+ * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
+ * I915_EXEC_USE_EXTENSIONS flags are not set.
+ *
+ * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
+ * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
+ * array.
+ *
+ * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
+ * single &i915_user_extension and num_cliprects is 0.
*/
__u64 cliprects_ptr;
+
+ /** @flags: Execbuf flags */
+ __u64 flags;
#define I915_EXEC_RING_MASK (0x3f)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
@@ -1083,10 +1404,6 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
- __u64 flags;
- __u64 rsvd1; /* now used for context info */
- __u64 rsvd2;
-};
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
@@ -1181,7 +1498,30 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_FENCE_SUBMIT (1 << 20)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
+/*
+ * Setting I915_EXEC_USE_EXTENSIONS implies that
+ * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
+ * list of i915_user_extension. Each i915_user_extension node is the base of a
+ * larger structure. The list of supported structures are listed in the
+ * drm_i915_gem_execbuffer_ext enum.
+ */
+#define I915_EXEC_USE_EXTENSIONS (1 << 21)
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
+
+ /** @rsvd1: Context id */
+ __u64 rsvd1;
+
+ /**
+ * @rsvd2: in and out sync_file file descriptors.
+ *
+ * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
+ * lower 32 bits of this field will have the in sync_file fd (input).
+ *
+ * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
+ * field will have the out sync_file fd (output).
+ */
+ __u64 rsvd2;
+};
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1245,12 +1585,11 @@ struct drm_i915_gem_busy {
* reading from the object simultaneously.
*
* The value of each engine class is the same as specified in the
- * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
+ * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
* I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
- * reported as active itself. Some hardware may have parallel
- * execution engines, e.g. multiple media engines, which are
- * mapped to the same class identifier and so are not separately
- * reported for busyness.
+ * Some hardware may have parallel execution engines, e.g. multiple
+ * media engines, which are mapped to the same class identifier and so
+ * are not separately reported for busyness.
*
* Caveat emptor:
* Only the boolean result of this query is reliable; that is whether
@@ -1261,49 +1600,91 @@ struct drm_i915_gem_busy {
};
/**
- * I915_CACHING_NONE
- *
- * GPU access is not coherent with cpu caches. Default for machines without an
- * LLC.
+ * struct drm_i915_gem_caching - Set or get the caching for given object
+ * handle.
+ *
+ * Allow userspace to control the GTT caching bits for a given object when the
+ * object is later mapped through the ppGTT(or GGTT on older platforms lacking
+ * ppGTT support, or if the object is used for scanout). Note that this might
+ * require unbinding the object from the GTT first, if its current caching value
+ * doesn't match.
+ *
+ * Note that this all changes on discrete platforms, starting from DG1, the
+ * set/get caching is no longer supported, and is now rejected. Instead the CPU
+ * caching attributes(WB vs WC) will become an immutable creation time property
+ * for the object, along with the GTT caching level. For now we don't expose any
+ * new uAPI for this, instead on DG1 this is all implicit, although this largely
+ * shouldn't matter since DG1 is coherent by default(without any way of
+ * controlling it).
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ * - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ * contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ * mapped as write-combined only.
+ *
+ * - Everything else is always allocated and mapped as write-back, with the
+ * guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ *
+ * Side note: Part of the reason for this is that changing the at-allocation-time CPU
+ * caching attributes for the pages might be required(and is expensive) if we
+ * need to then CPU map the pages later with different caching attributes. This
+ * inconsistent caching behaviour, while supported on x86, is not universally
+ * supported on other architectures. So for simplicity we opt for setting
+ * everything at creation time, whilst also making it immutable, on discrete
+ * platforms.
*/
-#define I915_CACHING_NONE 0
-/**
- * I915_CACHING_CACHED
- *
- * GPU access is coherent with cpu caches and furthermore the data is cached in
- * last-level caches shared between cpu cores and the gpu GT. Default on
- * machines with HAS_LLC.
- */
-#define I915_CACHING_CACHED 1
-/**
- * I915_CACHING_DISPLAY
- *
- * Special GPU caching mode which is coherent with the scanout engines.
- * Transparently falls back to I915_CACHING_NONE on platforms where no special
- * cache mode (like write-through or gfdt flushing) is available. The kernel
- * automatically sets this mode when using a buffer as a scanout target.
- * Userspace can manually set this mode to avoid a costly stall and clflush in
- * the hotpath of drawing the first frame.
- */
-#define I915_CACHING_DISPLAY 2
-
struct drm_i915_gem_caching {
/**
- * Handle of the buffer to set/get the caching level of. */
+ * @handle: Handle of the buffer to set/get the caching level.
+ */
__u32 handle;
/**
- * Cacheing level to apply or return value
+ * @caching: The GTT caching level to apply or possible return value.
+ *
+ * The supported @caching values:
*
- * bits0-15 are for generic caching control (i.e. the above defined
- * values). bits16-31 are reserved for platform-specific variations
- * (e.g. l3$ caching on gen7). */
+ * I915_CACHING_NONE:
+ *
+ * GPU access is not coherent with CPU caches. Default for machines
+ * without an LLC. This means manual flushing might be needed, if we
+ * want GPU access to be coherent.
+ *
+ * I915_CACHING_CACHED:
+ *
+ * GPU access is coherent with CPU caches and furthermore the data is
+ * cached in last-level caches shared between CPU cores and the GPU GT.
+ *
+ * I915_CACHING_DISPLAY:
+ *
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no
+ * special cache mode (like write-through or gfdt flushing) is
+ * available. The kernel automatically sets this mode when using a
+ * buffer as a scanout target. Userspace can manually set this mode to
+ * avoid a costly stall and clflush in the hotpath of drawing the first
+ * frame.
+ */
+#define I915_CACHING_NONE 0
+#define I915_CACHING_CACHED 1
+#define I915_CACHING_DISPLAY 2
__u32 caching;
};
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
+/*
+ * Do not add new tiling types here. The I915_TILING_* values are for
+ * de-tiling fence registers that no longer exist on modern platforms. Although
+ * the hardware may support new types of tiling in general (e.g., Tile4), we
+ * do not need to add them to the uapi that is specific to now-defunct ioctls.
+ */
#define I915_TILING_LAST I915_TILING_Y
#define I915_BIT_6_SWIZZLE_NONE 0
@@ -1521,21 +1902,64 @@ struct drm_i915_gem_context_create {
__u32 pad;
};
+/**
+ * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
+ */
struct drm_i915_gem_context_create_ext {
- __u32 ctx_id; /* output: id of new context*/
+ /** @ctx_id: Id of the created context (output) */
+ __u32 ctx_id;
+
+ /**
+ * @flags: Supported flags are:
+ *
+ * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
+ *
+ * Extensions may be appended to this structure and driver must check
+ * for those. See @extensions.
+ *
+ * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
+ *
+ * Created context will have single timeline.
+ */
__u32 flags;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
+
+ /**
+ * @extensions: Zero-terminated chain of extensions.
+ *
+ * I915_CONTEXT_CREATE_EXT_SETPARAM:
+ * Context parameter to set or query during context creation.
+ * See struct drm_i915_gem_context_create_ext_setparam.
+ *
+ * I915_CONTEXT_CREATE_EXT_CLONE:
+ * This extension has been removed. On the off chance someone somewhere
+ * has attempted to use it, never re-use this extension number.
+ */
__u64 extensions;
+#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
};
+/**
+ * struct drm_i915_gem_context_param - Context parameter to set or query.
+ */
struct drm_i915_gem_context_param {
+ /** @ctx_id: Context id */
__u32 ctx_id;
+
+ /** @size: Size of the parameter @value */
__u32 size;
+
+ /** @param: Parameter to set or query */
__u64 param;
#define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
+/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed. On the off chance
+ * someone somewhere has attempted to use it, never re-use this context
+ * param number.
+ */
#define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
#define I915_CONTEXT_PARAM_GTT_SIZE 0x3
#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
@@ -1602,6 +2026,7 @@ struct drm_i915_gem_context_param {
* Extensions:
* i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ * i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
*/
#define I915_CONTEXT_PARAM_ENGINES 0xa
@@ -1619,12 +2044,67 @@ struct drm_i915_gem_context_param {
* By default, new contexts allow persistence.
*/
#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
+
+/* This API has been removed. On the off chance someone somewhere has
+ * attempted to use it, never re-use this context param number.
+ */
+#define I915_CONTEXT_PARAM_RINGSIZE 0xc
+
+/*
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ *
+ * Mark that the context makes use of protected content, which will result
+ * in the context being invalidated when the protected content session is.
+ * Given that the protected content session is killed on suspend, the device
+ * is kept awake for the lifetime of a protected context, so the user should
+ * make sure to dispose of them once done.
+ * This flag can only be set at context creation time and, when set to true,
+ * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
+ * to false. This flag can't be set to true in conjunction with setting the
+ * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_context_create_ext_setparam p_protected = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
+ * .value = 1,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_norecover = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * .next_extension = to_user_pointer(&p_protected),
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_RECOVERABLE,
+ * .value = 0,
+ * }
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_norecover);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * In addition to the normal failure cases, setting this flag during context
+ * creation can result in the following errors:
+ *
+ * -ENODEV: feature not available
+ * -EPERM: trying to mark a recoverable or not bannable context as protected
+ */
+#define I915_CONTEXT_PARAM_PROTECTED_CONTENT 0xd
/* Must be kept compact -- no holes and well documented */
+ /** @value: Context parameter value to be set or queried */
__u64 value;
};
-/**
+/*
* Context SSEU programming
*
* It may be necessary for either functional or performance reason to configure
@@ -1683,6 +2163,69 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
+/**
+ * DOC: Virtual Engine uAPI
+ *
+ * Virtual engine is a concept where userspace is able to configure a set of
+ * physical engines, submit a batch buffer, and let the driver execute it on any
+ * engine from the set as it sees fit.
+ *
+ * This is primarily useful on parts which have multiple instances of a same
+ * class engine, like for example GT3+ Skylake parts with their two VCS engines.
+ *
+ * For instance userspace can enumerate all engines of a certain class using the
+ * previously described `Engine Discovery uAPI`_. After that userspace can
+ * create a GEM context with a placeholder slot for the virtual engine (using
+ * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
+ * and instance respectively) and finally using the
+ * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
+ * the same reserved slot.
+ *
+ * Example of creating a virtual engine and submitting a batch buffer to it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
+ * .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
+ * .engine_index = 0, // Place this virtual engine into engine map slot 0
+ * .num_siblings = 2,
+ * .engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
+ * { I915_ENGINE_CLASS_VIDEO, 1 }, },
+ * };
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+ * .engines = { { I915_ENGINE_CLASS_INVALID,
+ * I915_ENGINE_CLASS_INVALID_NONE } },
+ * .extensions = to_user_pointer(&virtual), // Chains after load_balance extension
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // Now we have created a GEM context with its engine map containing a
+ * // single virtual engine. Submissions to this slot can go either to
+ * // vcs0 or vcs1, depending on the load balancing algorithm used inside
+ * // the driver. The load balancing is dynamic from one batch buffer to
+ * // another and transparent to userspace.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0 which is the virtual engine
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
/*
* i915_context_engines_load_balance:
*
@@ -1708,7 +2251,7 @@ struct i915_context_engines_load_balance {
__u64 mbz64; /* reserved for future use; must be zero */
- struct i915_engine_class_instance engines[0];
+ struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
@@ -1746,7 +2289,7 @@ struct i915_context_engines_bond {
__u64 flags; /* all undefined flags must be zero */
__u64 mbz64[4]; /* reserved for future use; must be zero */
- struct i915_engine_class_instance engines[0];
+ struct i915_engine_class_instance engines[];
} __attribute__((packed));
#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
@@ -1759,10 +2302,195 @@ struct i915_context_engines_bond {
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
+/**
+ * struct i915_context_engines_parallel_submit - Configure engine for
+ * parallel submission.
+ *
+ * Setup a slot in the context engine map to allow multiple BBs to be submitted
+ * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
+ * in parallel. Multiple hardware contexts are created internally in the i915 to
+ * run these BBs. Once a slot is configured for N BBs only N BBs can be
+ * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
+ * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
+ * many BBs there are based on the slot's configuration. The N BBs are the last
+ * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
+ *
+ * The default placement behavior is to create implicit bonds between each
+ * context if each context maps to more than 1 physical engine (e.g. context is
+ * a virtual engine). Also we only allow contexts of same engine class and these
+ * contexts must be in logically contiguous order. Examples of the placement
+ * behavior are described below. Lastly, the default is to not allow BBs to be
+ * preempted mid-batch. Rather insert coordinated preemption points on all
+ * hardware contexts between each set of BBs. Flags could be added in the future
+ * to change both of these default behaviors.
+ *
+ * Returns -EINVAL if hardware context placement configuration is invalid or if
+ * the placement configuration isn't supported on the platform / submission
+ * interface.
+ * Returns -ENODEV if extension isn't supported on the platform / submission
+ * interface.
+ *
+ * .. code-block:: none
+ *
+ * Examples syntax:
+ * CS[X] = generic engine of same class, logical instance X
+ * INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *
+ * Example 1 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=1,
+ * engines=CS[0],CS[1])
+ *
+ * Results in the following valid placement:
+ * CS[0], CS[1]
+ *
+ * Example 2 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[2],CS[1],CS[3])
+ *
+ * Results in the following valid placements:
+ * CS[0], CS[1]
+ * CS[2], CS[3]
+ *
+ * This can be thought of as two virtual engines, each containing two
+ * engines thereby making a 2D array. However, there are bonds tying the
+ * entries together and placing restrictions on how they can be scheduled.
+ * Specifically, the scheduler can choose only vertical columns from the 2D
+ * array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
+ * scheduler wants to submit to CS[0], it must also choose CS[1] and vice
+ * versa. Same for CS[2] requires also using CS[3].
+ * VE[0] = CS[0], CS[2]
+ * VE[1] = CS[1], CS[3]
+ *
+ * Example 3 pseudo code:
+ * set_engines(INVALID)
+ * set_parallel(engine_index=0, width=2, num_siblings=2,
+ * engines=CS[0],CS[1],CS[1],CS[3])
+ *
+ * Results in the following valid and invalid placements:
+ * CS[0], CS[1]
+ * CS[1], CS[3] - Not logically contiguous, return -EINVAL
+ */
+struct i915_context_engines_parallel_submit {
+ /**
+ * @base: base user extension.
+ */
+ struct i915_user_extension base;
+
+ /**
+ * @engine_index: slot for parallel engine
+ */
+ __u16 engine_index;
+
+ /**
+ * @width: number of contexts per parallel engine or in other words the
+ * number of batches in each submission
+ */
+ __u16 width;
+
+ /**
+ * @num_siblings: number of siblings per context or in other words the
+ * number of possible placements for each submission
+ */
+ __u16 num_siblings;
+
+ /**
+ * @mbz16: reserved for future use; must be zero
+ */
+ __u16 mbz16;
+
+ /**
+ * @flags: all undefined flags must be zero, currently not defined flags
+ */
+ __u64 flags;
+
+ /**
+ * @mbz64: reserved for future use; must be zero
+ */
+ __u64 mbz64[3];
+
+ /**
+ * @engines: 2-d array of engine instances to configure parallel engine
+ *
+ * length = width (i) * num_siblings (j)
+ * index = j + i * num_siblings
+ */
+ struct i915_engine_class_instance engines[];
+
+} __packed;
+
+#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 width; \
+ __u16 num_siblings; \
+ __u16 mbz16; \
+ __u64 flags; \
+ __u64 mbz64[3]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/**
+ * DOC: Context Engine Map uAPI
+ *
+ * Context engine map is a new way of addressing engines when submitting batch-
+ * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
+ * inside the flags field of `struct drm_i915_gem_execbuffer2`.
+ *
+ * To use it created GEM contexts need to be configured with a list of engines
+ * the user is intending to submit to. This is accomplished using the
+ * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
+ * i915_context_param_engines`.
+ *
+ * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
+ * configured map.
+ *
+ * Example of creating such context and submitting against it:
+ *
+ * .. code-block:: C
+ *
+ * I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
+ * .engines = { { I915_ENGINE_CLASS_RENDER, 0 },
+ * { I915_ENGINE_CLASS_COPY, 0 } }
+ * };
+ * struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ * .base = {
+ * .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ * },
+ * .param = {
+ * .param = I915_CONTEXT_PARAM_ENGINES,
+ * .value = to_user_pointer(&engines),
+ * .size = sizeof(engines),
+ * },
+ * };
+ * struct drm_i915_gem_context_create_ext create = {
+ * .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ * .extensions = to_user_pointer(&p_engines);
+ * };
+ *
+ * ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ * // We have now created a GEM context with two engines in the map:
+ * // Index 0 points to rcs0 while index 1 points to bcs0. Other engines
+ * // will not be accessible from this context.
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ *
+ * ...
+ * execbuf.rsvd1 = ctx_id;
+ * execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
+ * gem_execbuf(drm_fd, &execbuf);
+ */
+
struct i915_context_param_engines {
__u64 extensions; /* linked chain of extension blocks, 0 terminates */
#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
struct i915_engine_class_instance engines[0];
} __attribute__((packed));
@@ -1771,25 +2499,19 @@ struct i915_context_param_engines {
struct i915_engine_class_instance engines[N__]; \
} __attribute__((packed)) name__
+/**
+ * struct drm_i915_gem_context_create_ext_setparam - Context parameter
+ * to set or query during context creation.
+ */
struct drm_i915_gem_context_create_ext_setparam {
-#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
+ /** @base: Extension link. See struct i915_user_extension. */
struct i915_user_extension base;
- struct drm_i915_gem_context_param param;
-};
-struct drm_i915_gem_context_create_ext_clone {
-#define I915_CONTEXT_CREATE_EXT_CLONE 1
- struct i915_user_extension base;
- __u32 clone_id;
- __u32 flags;
-#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
-#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
-#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
-#define I915_CONTEXT_CLONE_SSEU (1u << 3)
-#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
-#define I915_CONTEXT_CLONE_VM (1u << 5)
-#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
- __u64 rsvd;
+ /**
+ * @param: Context parameter to set or query.
+ * See struct drm_i915_gem_context_param.
+ */
+ struct drm_i915_gem_context_param param;
};
struct drm_i915_gem_context_destroy {
@@ -1797,7 +2519,9 @@ struct drm_i915_gem_context_destroy {
__u32 pad;
};
-/*
+/**
+ * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
+ *
* DRM_I915_GEM_VM_CREATE -
*
* Create a new virtual memory address space (ppGTT) for use within a context
@@ -1807,20 +2531,23 @@ struct drm_i915_gem_context_destroy {
* The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
* returned in the outparam @id.
*
- * No flags are defined, with all bits reserved and must be zero.
- *
* An extension chain maybe provided, starting with @extensions, and terminated
* by the @next_extension being 0. Currently, no extensions are defined.
*
* DRM_I915_GEM_VM_DESTROY -
*
- * Destroys a previously created VM id, specified in @id.
+ * Destroys a previously created VM id, specified in @vm_id.
*
* No extensions or flags are allowed currently, and so must be zero.
*/
struct drm_i915_gem_vm_control {
+ /** @extensions: Zero-terminated chain of extensions. */
__u64 extensions;
+
+ /** @flags: reserved for future usage, currently MBZ */
__u32 flags;
+
+ /** @vm_id: Id of the VM created or to be destroyed */
__u32 vm_id;
};
@@ -1862,14 +2589,69 @@ struct drm_i915_reset_stats {
__u32 pad;
};
+/**
+ * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
+ *
+ * Userptr objects have several restrictions on what ioctls can be used with the
+ * object handle.
+ */
struct drm_i915_gem_userptr {
+ /**
+ * @user_ptr: The pointer to the allocated memory.
+ *
+ * Needs to be aligned to PAGE_SIZE.
+ */
__u64 user_ptr;
+
+ /**
+ * @user_size:
+ *
+ * The size in bytes for the allocated memory. This will also become the
+ * object size.
+ *
+ * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
+ * or larger.
+ */
__u64 user_size;
+
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * I915_USERPTR_READ_ONLY:
+ *
+ * Mark the object as readonly, this also means GPU access can only be
+ * readonly. This is only supported on HW which supports readonly access
+ * through the GTT. If the HW can't support readonly access, an error is
+ * returned.
+ *
+ * I915_USERPTR_PROBE:
+ *
+ * Probe the provided @user_ptr range and validate that the @user_ptr is
+ * indeed pointing to normal memory and that the range is also valid.
+ * For example if some garbage address is given to the kernel, then this
+ * should complain.
+ *
+ * Returns -EFAULT if the probe failed.
+ *
+ * Note that this doesn't populate the backing pages, and also doesn't
+ * guarantee that the object will remain valid when the object is
+ * eventually used.
+ *
+ * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
+ * returns a non-zero value.
+ *
+ * I915_USERPTR_UNSYNCHRONIZED:
+ *
+ * NOT USED. Setting this flag will result in an error.
+ */
__u32 flags;
#define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_PROBE 0x2
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
/**
- * Returned handle for the object.
+ * @handle: Returned handle for the object.
*
* Object handles are nonzero.
*/
@@ -1890,6 +2672,10 @@ enum drm_i915_oa_format {
I915_OA_FORMAT_A12_B8_C8,
I915_OA_FORMAT_A32u40_A4u32_B8_C8,
+ /* DG2 */
+ I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
+ I915_OA_FORMAT_A24u40_A14u32_B8_C8,
+
I915_OA_FORMAT_MAX /* non-ABI */
};
@@ -1913,7 +2699,7 @@ enum drm_i915_perf_property_id {
/**
* The value specifies which set of OA unit metrics should be
- * be configured, defining the contents of any OA unit reports.
+ * configured, defining the contents of any OA unit reports.
*
* This property is available in perf revision 1.
*/
@@ -1948,6 +2734,30 @@ enum drm_i915_perf_property_id {
*/
DRM_I915_PERF_PROP_HOLD_PREEMPTION,
+ /**
+ * Specifying this pins all contexts to the specified SSEU power
+ * configuration for the duration of the recording.
+ *
+ * This parameter's value is a pointer to a struct
+ * drm_i915_gem_context_param_sseu.
+ *
+ * This property is available in perf revision 4.
+ */
+ DRM_I915_PERF_PROP_GLOBAL_SSEU,
+
+ /**
+ * This optional parameter specifies the timer interval in nanoseconds
+ * at which the i915 driver will check the OA buffer for available data.
+ * Minimum allowed value is 100 microseconds. A default value is used by
+ * the driver if this parameter is not specified. Note that larger timer
+ * values will reduce cpu consumption during OA perf captures. However,
+ * excessively large values would potentially result in OA buffer
+ * overwrites as captures reach end of the OA buffer.
+ *
+ * This property is available in perf revision 5.
+ */
+ DRM_I915_PERF_PROP_POLL_OA_PERIOD,
+
DRM_I915_PERF_PROP_MAX /* non-ABI */
};
@@ -1967,7 +2777,7 @@ struct drm_i915_perf_open_param {
__u64 properties_ptr;
};
-/**
+/*
* Enable data capture for a stream that was either opened in a disabled state
* via I915_PERF_FLAG_DISABLED or was later disabled via
* I915_PERF_IOCTL_DISABLE.
@@ -1981,7 +2791,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
-/**
+/*
* Disable data capture for a stream.
*
* It is an error to try and read a stream that is disabled.
@@ -1990,7 +2800,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
-/**
+/*
* Change metrics_set captured by a stream.
*
* If the stream is bound to a specific context, the configuration change
@@ -2003,7 +2813,7 @@ struct drm_i915_perf_open_param {
*/
#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
-/**
+/*
* Common to all i915 perf records
*/
struct drm_i915_perf_record_header {
@@ -2052,162 +2862,382 @@ enum drm_i915_perf_record_type {
};
/**
+ * struct drm_i915_perf_oa_config
+ *
* Structure to upload perf dynamic configuration into the kernel.
*/
struct drm_i915_perf_oa_config {
- /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
+ /**
+ * @uuid:
+ *
+ * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
+ */
char uuid[36];
+ /**
+ * @n_mux_regs:
+ *
+ * Number of mux regs in &mux_regs_ptr.
+ */
__u32 n_mux_regs;
+
+ /**
+ * @n_boolean_regs:
+ *
+ * Number of boolean regs in &boolean_regs_ptr.
+ */
__u32 n_boolean_regs;
+
+ /**
+ * @n_flex_regs:
+ *
+ * Number of flex regs in &flex_regs_ptr.
+ */
__u32 n_flex_regs;
- /*
- * These fields are pointers to tuples of u32 values (register address,
- * value). For example the expected length of the buffer pointed by
- * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
+ /**
+ * @mux_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_mux_regs).
*/
__u64 mux_regs_ptr;
+
+ /**
+ * @boolean_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_boolean_regs).
+ */
__u64 boolean_regs_ptr;
+
+ /**
+ * @flex_regs_ptr:
+ *
+ * Pointer to tuples of u32 values (register address, value) for mux
+ * registers. Expected length of buffer is (2 * sizeof(u32) *
+ * &n_flex_regs).
+ */
__u64 flex_regs_ptr;
};
+/**
+ * struct drm_i915_query_item - An individual query for the kernel to process.
+ *
+ * The behaviour is determined by the @query_id. Note that exactly what
+ * @data_ptr is also depends on the specific @query_id.
+ */
struct drm_i915_query_item {
+ /**
+ * @query_id:
+ *
+ * The id for this query. Currently accepted query IDs are:
+ * - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
+ * - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
+ * - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
+ * - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
+ * - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
+ * - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
+ */
__u64 query_id;
-#define DRM_I915_QUERY_TOPOLOGY_INFO 1
-#define DRM_I915_QUERY_ENGINE_INFO 2
-#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
+#define DRM_I915_QUERY_MEMORY_REGIONS 4
+#define DRM_I915_QUERY_HWCONFIG_BLOB 5
+#define DRM_I915_QUERY_GEOMETRY_SUBSLICES 6
/* Must be kept compact -- no holes and well documented */
- /*
+ /**
+ * @length:
+ *
* When set to zero by userspace, this is filled with the size of the
- * data to be written at the data_ptr pointer. The kernel sets this
+ * data to be written at the @data_ptr pointer. The kernel sets this
* value to a negative value to signal an error on a particular query
* item.
*/
__s32 length;
- /*
- * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ /**
+ * @flags:
+ *
+ * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
- * following :
- * - DRM_I915_QUERY_PERF_CONFIG_LIST
- * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
- * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * following:
+ *
+ * - %DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
+ *
+ * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
+ * a struct i915_engine_class_instance that references a render engine.
*/
__u32 flags;
#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
- /*
- * Data will be written at the location pointed by data_ptr when the
- * value of length matches the length of the data to be written by the
+ /**
+ * @data_ptr:
+ *
+ * Data will be written at the location pointed by @data_ptr when the
+ * value of @length matches the length of the data to be written by the
* kernel.
*/
__u64 data_ptr;
};
+/**
+ * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
+ * kernel to fill out.
+ *
+ * Note that this is generally a two step process for each struct
+ * drm_i915_query_item in the array:
+ *
+ * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
+ * drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
+ * kernel will then fill in the size, in bytes, which tells userspace how
+ * memory it needs to allocate for the blob(say for an array of properties).
+ *
+ * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
+ * &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
+ * the &drm_i915_query_item.length should still be the same as what the
+ * kernel previously set. At this point the kernel can fill in the blob.
+ *
+ * Note that for some query items it can make sense for userspace to just pass
+ * in a buffer/blob equal to or larger than the required size. In this case only
+ * a single ioctl call is needed. For some smaller query items this can work
+ * quite well.
+ *
+ */
struct drm_i915_query {
+ /** @num_items: The number of elements in the @items_ptr array */
__u32 num_items;
- /*
- * Unused for now. Must be cleared to zero.
+ /**
+ * @flags: Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * This points to an array of num_items drm_i915_query_item structures.
+ /**
+ * @items_ptr:
+ *
+ * Pointer to an array of struct drm_i915_query_item. The number of
+ * array elements is @num_items.
*/
__u64 items_ptr;
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
- *
- * data: contains the 3 pieces of information :
- *
- * - the slice mask with one bit per slice telling whether a slice is
- * available. The availability of slice X can be queried with the following
- * formula :
- *
- * (data[X / 8] >> (X % 8)) & 1
- *
- * - the subslice mask for each slice with one bit per subslice telling
- * whether a subslice is available. Gen12 has dual-subslices, which are
- * similar to two gen11 subslices. For gen12, this array represents dual-
- * subslices. The availability of subslice Y in slice X can be queried
- * with the following formula :
- *
- * (data[subslice_offset +
- * X * subslice_stride +
- * Y / 8] >> (Y % 8)) & 1
- *
- * - the EU mask for each subslice in each slice with one bit per EU telling
- * whether an EU is available. The availability of EU Z in subslice Y in
- * slice X can be queried with the following formula :
+/**
+ * struct drm_i915_query_topology_info
*
- * (data[eu_offset +
- * (X * max_subslices + Y) * eu_stride +
- * Z / 8] >> (Z % 8)) & 1
+ * Describes slice/subslice/EU information queried by
+ * %DRM_I915_QUERY_TOPOLOGY_INFO
*/
struct drm_i915_query_topology_info {
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u16 flags;
+ /**
+ * @max_slices:
+ *
+ * The number of bits used to express the slice mask.
+ */
__u16 max_slices;
+
+ /**
+ * @max_subslices:
+ *
+ * The number of bits used to express the subslice mask.
+ */
__u16 max_subslices;
+
+ /**
+ * @max_eus_per_subslice:
+ *
+ * The number of bits in the EU mask that correspond to a single
+ * subslice's EUs.
+ */
__u16 max_eus_per_subslice;
- /*
+ /**
+ * @subslice_offset:
+ *
* Offset in data[] at which the subslice masks are stored.
*/
__u16 subslice_offset;
- /*
+ /**
+ * @subslice_stride:
+ *
* Stride at which each of the subslice masks for each slice are
* stored.
*/
__u16 subslice_stride;
- /*
+ /**
+ * @eu_offset:
+ *
* Offset in data[] at which the EU masks are stored.
*/
__u16 eu_offset;
- /*
+ /**
+ * @eu_stride:
+ *
* Stride at which each of the EU masks for each subslice are stored.
*/
__u16 eu_stride;
+ /**
+ * @data:
+ *
+ * Contains 3 pieces of information :
+ *
+ * - The slice mask with one bit per slice telling whether a slice is
+ * available. The availability of slice X can be queried with the
+ * following formula :
+ *
+ * .. code:: c
+ *
+ * (data[X / 8] >> (X % 8)) & 1
+ *
+ * Starting with Xe_HP platforms, Intel hardware no longer has
+ * traditional slices so i915 will always report a single slice
+ * (hardcoded slicemask = 0x1) which contains all of the platform's
+ * subslices. I.e., the mask here does not reflect any of the newer
+ * hardware concepts such as "gslices" or "cslices" since userspace
+ * is capable of inferring those from the subslice mask.
+ *
+ * - The subslice mask for each slice with one bit per subslice telling
+ * whether a subslice is available. Starting with Gen12 we use the
+ * term "subslice" to refer to what the hardware documentation
+ * describes as a "dual-subslices." The availability of subslice Y
+ * in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
+ *
+ * - The EU mask for each subslice in each slice, with one bit per EU
+ * telling whether an EU is available. The availability of EU Z in
+ * subslice Y in slice X can be queried with the following formula :
+ *
+ * .. code:: c
+ *
+ * (data[eu_offset +
+ * (X * max_subslices + Y) * eu_stride +
+ * Z / 8
+ * ] >> (Z % 8)) & 1
+ */
__u8 data[];
};
/**
+ * DOC: Engine Discovery uAPI
+ *
+ * Engine discovery uAPI is a way of enumerating physical engines present in a
+ * GPU associated with an open i915 DRM file descriptor. This supersedes the old
+ * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
+ * `I915_PARAM_HAS_BLT`.
+ *
+ * The need for this interface came starting with Icelake and newer GPUs, which
+ * started to establish a pattern of having multiple engines of a same class,
+ * where not all instances were always completely functionally equivalent.
+ *
+ * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
+ * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
+ *
+ * Example for getting the list of engines:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_engine_info *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_ENGINE_INFO;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of engines. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * //
+ * // Alternatively a large buffer can be allocated straight away enabling
+ * // querying in one pass, in which case item.length should contain the
+ * // length of the provided buffer.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with info on all engines.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each engine in the array
+ * for (i = 0; i < info->num_engines; i++) {
+ * struct drm_i915_engine_info einfo = info->engines[i];
+ * u16 class = einfo.engine.class;
+ * u16 instance = einfo.engine.instance;
+ * ....
+ * }
+ *
+ * free(info);
+ *
+ * Each of the enumerated engines, apart from being defined by its class and
+ * instance (see `struct i915_engine_class_instance`), also can have flags and
+ * capabilities defined as documented in i915_drm.h.
+ *
+ * For instance video engines which support HEVC encoding will have the
+ * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
+ *
+ * Engine discovery only fully comes to its own when combined with the new way
+ * of addressing engines when submitting batch buffers using contexts with
+ * engine maps configured.
+ */
+
+/**
* struct drm_i915_engine_info
*
* Describes one engine and it's capabilities as known to the driver.
*/
struct drm_i915_engine_info {
- /** Engine class and instance. */
+ /** @engine: Engine class and instance. */
struct i915_engine_class_instance engine;
- /** Reserved field. */
+ /** @rsvd0: Reserved field. */
__u32 rsvd0;
- /** Engine flags. */
+ /** @flags: Engine flags. */
__u64 flags;
+#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE (1 << 0)
- /** Capabilities of this engine. */
+ /** @capabilities: Capabilities of this engine. */
__u64 capabilities;
#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
- /** Reserved fields. */
- __u64 rsvd1[4];
+ /** @logical_instance: Logical instance of engine */
+ __u16 logical_instance;
+
+ /** @rsvd1: Reserved fields. */
+ __u16 rsvd1[3];
+ /** @rsvd2: Reserved fields. */
+ __u64 rsvd2[3];
};
/**
@@ -2217,66 +3247,486 @@ struct drm_i915_engine_info {
* an array of struct drm_i915_engine_info structures.
*/
struct drm_i915_query_engine_info {
- /** Number of struct drm_i915_engine_info structs following. */
+ /** @num_engines: Number of struct drm_i915_engine_info structs following. */
__u32 num_engines;
- /** MBZ */
+ /** @rsvd: MBZ */
__u32 rsvd[3];
- /** Marker for drm_i915_engine_info structures. */
+ /** @engines: Marker for drm_i915_engine_info structures. */
struct drm_i915_engine_info engines[];
};
-/*
- * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+/**
+ * struct drm_i915_query_perf_config
+ *
+ * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
+ * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
*/
struct drm_i915_query_perf_config {
union {
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
- * this fields to the number of configurations available.
+ /**
+ * @n_configs:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
+ * the number of configurations available.
*/
__u64 n_configs;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @config:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*/
__u64 config;
- /*
- * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
- * i915 will use the value in this field as configuration
- * identifier to decide what data to write into config_ptr.
+ /**
+ * @uuid:
+ *
+ * When &drm_i915_query_item.flags ==
+ * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
+ * value in this field as configuration identifier to decide
+ * what data to write into config_ptr.
*
* String formatted like "%08x-%04x-%04x-%04x-%012x"
*/
char uuid[36];
};
- /*
+ /**
+ * @flags:
+ *
* Unused for now. Must be cleared to zero.
*/
__u32 flags;
- /*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
- * write an array of __u64 of configuration identifiers.
+ /**
+ * @data:
+ *
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
+ * i915 will write an array of __u64 of configuration identifiers.
*
- * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
- * write a struct drm_i915_perf_oa_config. If the following fields of
- * drm_i915_perf_oa_config are set not set to 0, i915 will write into
- * the associated pointers the values of submitted when the
+ * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
+ * i915 will write a struct drm_i915_perf_oa_config. If the following
+ * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
+ * write into the associated pointers the values of submitted when the
* configuration was created :
*
- * - n_mux_regs
- * - n_boolean_regs
- * - n_flex_regs
+ * - &drm_i915_perf_oa_config.n_mux_regs
+ * - &drm_i915_perf_oa_config.n_boolean_regs
+ * - &drm_i915_perf_oa_config.n_flex_regs
*/
__u8 data[];
};
+/**
+ * enum drm_i915_gem_memory_class - Supported memory classes
+ */
+enum drm_i915_gem_memory_class {
+ /** @I915_MEMORY_CLASS_SYSTEM: System memory */
+ I915_MEMORY_CLASS_SYSTEM = 0,
+ /** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
+ I915_MEMORY_CLASS_DEVICE,
+};
+
+/**
+ * struct drm_i915_gem_memory_class_instance - Identify particular memory region
+ */
+struct drm_i915_gem_memory_class_instance {
+ /** @memory_class: See enum drm_i915_gem_memory_class */
+ __u16 memory_class;
+
+ /** @memory_instance: Which instance */
+ __u16 memory_instance;
+};
+
+/**
+ * struct drm_i915_memory_region_info - Describes one region as known to the
+ * driver.
+ *
+ * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
+ * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
+ * at &drm_i915_query_item.query_id.
+ */
+struct drm_i915_memory_region_info {
+ /** @region: The class:instance pair encoding */
+ struct drm_i915_gem_memory_class_instance region;
+
+ /** @rsvd0: MBZ */
+ __u32 rsvd0;
+
+ /**
+ * @probed_size: Memory probed by the driver
+ *
+ * Note that it should not be possible to ever encounter a zero value
+ * here, also note that no current region type will ever return -1 here.
+ * Although for future region types, this might be a possibility. The
+ * same applies to the other size fields.
+ */
+ __u64 probed_size;
+
+ /**
+ * @unallocated_size: Estimate of memory remaining
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
+ * Without this (or if this is an older kernel) the value here will
+ * always equal the @probed_size. Note this is only currently tracked
+ * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
+ * will always equal the @probed_size).
+ */
+ __u64 unallocated_size;
+
+ union {
+ /** @rsvd1: MBZ */
+ __u64 rsvd1[8];
+ struct {
+ /**
+ * @probed_cpu_visible_size: Memory probed by the driver
+ * that is CPU accessible.
+ *
+ * This will be always be <= @probed_size, and the
+ * remainder (if there is any) will not be CPU
+ * accessible.
+ *
+ * On systems without small BAR, the @probed_size will
+ * always equal the @probed_cpu_visible_size, since all
+ * of it will be CPU accessible.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the @probed_size).
+ *
+ * Note that if the value returned here is zero, then
+ * this must be an old kernel which lacks the relevant
+ * small-bar uAPI support (including
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
+ * such systems we should never actually end up with a
+ * small BAR configuration, assuming we are able to load
+ * the kernel module. Hence it should be safe to treat
+ * this the same as when @probed_cpu_visible_size ==
+ * @probed_size.
+ */
+ __u64 probed_cpu_visible_size;
+
+ /**
+ * @unallocated_cpu_visible_size: Estimate of CPU
+ * visible memory remaining.
+ *
+ * Note this is only tracked for
+ * I915_MEMORY_CLASS_DEVICE regions (for other types the
+ * value here will always equal the
+ * @probed_cpu_visible_size).
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always
+ * equal the @probed_cpu_visible_size. Note this is only
+ * currently tracked for I915_MEMORY_CLASS_DEVICE
+ * regions (for other types the value here will also
+ * always equal the @probed_cpu_visible_size).
+ *
+ * If this is an older kernel the value here will be
+ * zero, see also @probed_cpu_visible_size.
+ */
+ __u64 unallocated_cpu_visible_size;
+ };
+ };
+};
+
+/**
+ * struct drm_i915_query_memory_regions
+ *
+ * The region info query enumerates all regions known to the driver by filling
+ * in an array of struct drm_i915_memory_region_info structures.
+ *
+ * Example for getting the list of supported regions:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_query_memory_regions *info;
+ * struct drm_i915_query_item item = {
+ * .query_id = DRM_I915_QUERY_MEMORY_REGIONS;
+ * };
+ * struct drm_i915_query query = {
+ * .num_items = 1,
+ * .items_ptr = (uintptr_t)&item,
+ * };
+ * int err, i;
+ *
+ * // First query the size of the blob we need, this needs to be large
+ * // enough to hold our array of regions. The kernel will fill out the
+ * // item.length for us, which is the number of bytes we need.
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * info = calloc(1, item.length);
+ * // Now that we allocated the required number of bytes, we call the ioctl
+ * // again, this time with the data_ptr pointing to our newly allocated
+ * // blob, which the kernel can then populate with the all the region info.
+ * item.data_ptr = (uintptr_t)&info,
+ *
+ * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ * if (err) ...
+ *
+ * // We can now access each region in the array
+ * for (i = 0; i < info->num_regions; i++) {
+ * struct drm_i915_memory_region_info mr = info->regions[i];
+ * u16 class = mr.region.class;
+ * u16 instance = mr.region.instance;
+ *
+ * ....
+ * }
+ *
+ * free(info);
+ */
+struct drm_i915_query_memory_regions {
+ /** @num_regions: Number of supported regions */
+ __u32 num_regions;
+
+ /** @rsvd: MBZ */
+ __u32 rsvd[3];
+
+ /** @regions: Info about each supported region */
+ struct drm_i915_memory_region_info regions[];
+};
+
+/**
+ * DOC: GuC HWCONFIG blob uAPI
+ *
+ * The GuC produces a blob with information about the current device.
+ * i915 reads this blob from GuC and makes it available via this uAPI.
+ *
+ * The format and meaning of the blob content are documented in the
+ * Programmer's Reference Manual.
+ */
+
+/**
+ * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
+ * extension support using struct i915_user_extension.
+ *
+ * Note that new buffer flags should be added here, at least for the stuff that
+ * is immutable. Previously we would have two ioctls, one to create the object
+ * with gem_create, and another to apply various parameters, however this
+ * creates some ambiguity for the params which are considered immutable. Also in
+ * general we're phasing out the various SET/GET ioctls.
+ */
+struct drm_i915_gem_create_ext {
+ /**
+ * @size: Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ *
+ * On platforms like DG2/ATS the kernel will always use 64K or larger
+ * pages for I915_MEMORY_CLASS_DEVICE. The kernel also requires a
+ * minimum of 64K GTT alignment for such objects.
+ *
+ * NOTE: Previously the ABI here required a minimum GTT alignment of 2M
+ * on DG2/ATS, due to how the hardware implemented 64K GTT page support,
+ * where we had the following complications:
+ *
+ * 1) The entire PDE (which covers a 2MB virtual address range), must
+ * contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
+ * PDE is forbidden by the hardware.
+ *
+ * 2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
+ * objects.
+ *
+ * However on actual production HW this was completely changed to now
+ * allow setting a TLB hint at the PTE level (see PS64), which is a lot
+ * more flexible than the above. With this the 2M restriction was
+ * dropped where we now only require 64K.
+ */
+ __u64 size;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+ /**
+ * @flags: Optional flags.
+ *
+ * Supported values:
+ *
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
+ * the object will need to be accessed via the CPU.
+ *
+ * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
+ * strictly required on configurations where some subset of the device
+ * memory is directly visible/mappable through the CPU (which we also
+ * call small BAR), like on some DG2+ systems. Note that this is quite
+ * undesirable, but due to various factors like the client CPU, BIOS etc
+ * it's something we can expect to see in the wild. See
+ * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
+ * determine if this system applies.
+ *
+ * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
+ * ensure the kernel can always spill the allocation to system memory,
+ * if the object can't be allocated in the mappable part of
+ * I915_MEMORY_CLASS_DEVICE.
+ *
+ * Also note that since the kernel only supports flat-CCS on objects
+ * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
+ * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
+ * flat-CCS.
+ *
+ * Without this hint, the kernel will assume that non-mappable
+ * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
+ * kernel can still migrate the object to the mappable part, as a last
+ * resort, if userspace ever CPU faults this object, but this might be
+ * expensive, and so ideally should be avoided.
+ *
+ * On older kernels which lack the relevant small-bar uAPI support (see
+ * also &drm_i915_memory_region_info.probed_cpu_visible_size),
+ * usage of the flag will result in an error, but it should NEVER be
+ * possible to end up with a small BAR configuration, assuming we can
+ * also successfully load the i915 kernel module. In such cases the
+ * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
+ * such there are zero restrictions on where the object can be placed.
+ */
+#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
+ __u32 flags;
+
+ /**
+ * @extensions: The chain of extensions to apply to this object.
+ *
+ * This will be useful in the future when we need to support several
+ * different extensions, and we need to apply more than one when
+ * creating the object. See struct i915_user_extension.
+ *
+ * If we don't supply any extensions then we get the same old gem_create
+ * behaviour.
+ *
+ * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
+ * struct drm_i915_gem_create_ext_memory_regions.
+ *
+ * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
+ * struct drm_i915_gem_create_ext_protected_content.
+ */
+#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
+ __u64 extensions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_memory_regions - The
+ * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
+ *
+ * Set the object with the desired set of placements/regions in priority
+ * order. Each entry must be unique and supported by the device.
+ *
+ * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
+ * an equivalent layout of class:instance pair encodings. See struct
+ * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
+ * query the supported regions for a device.
+ *
+ * As an example, on discrete devices, if we wish to set the placement as
+ * device local-memory we can do something like:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_memory_class_instance region_lmem = {
+ * .memory_class = I915_MEMORY_CLASS_DEVICE,
+ * .memory_instance = 0,
+ * };
+ * struct drm_i915_gem_create_ext_memory_regions regions = {
+ * .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
+ * .regions = (uintptr_t)&region_lmem,
+ * .num_regions = 1,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = 16 * PAGE_SIZE,
+ * .extensions = (uintptr_t)&regions,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ *
+ * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
+ * along with the final object size in &drm_i915_gem_create_ext.size, which
+ * should account for any rounding up, if required.
+ *
+ * Note that userspace has no means of knowing the current backing region
+ * for objects where @num_regions is larger than one. The kernel will only
+ * ensure that the priority order of the @regions array is honoured, either
+ * when initially placing the object, or when moving memory around due to
+ * memory pressure
+ *
+ * On Flat-CCS capable HW, compression is supported for the objects residing
+ * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
+ * memory class in @regions and migrated (by i915, due to memory
+ * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
+ * decompress the content. But i915 doesn't have the required information to
+ * decompress the userspace compressed objects.
+ *
+ * So i915 supports Flat-CCS, on the objects which can reside only on
+ * I915_MEMORY_CLASS_DEVICE regions.
+ */
+struct drm_i915_gem_create_ext_memory_regions {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @num_regions: Number of elements in the @regions array. */
+ __u32 num_regions;
+ /**
+ * @regions: The regions/placements array.
+ *
+ * An array of struct drm_i915_gem_memory_class_instance.
+ */
+ __u64 regions;
+};
+
+/**
+ * struct drm_i915_gem_create_ext_protected_content - The
+ * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
+ *
+ * If this extension is provided, buffer contents are expected to be protected
+ * by PXP encryption and require decryption for scan out and processing. This
+ * is only possible on platforms that have PXP enabled, on all other scenarios
+ * using this extension will cause the ioctl to fail and return -ENODEV. The
+ * flags parameter is reserved for future expansion and must currently be set
+ * to zero.
+ *
+ * The buffer contents are considered invalid after a PXP session teardown.
+ *
+ * The encryption is guaranteed to be processed correctly only if the object
+ * is submitted with a context created using the
+ * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
+ * at submission time on the validity of the objects involved.
+ *
+ * Below is an example on how to create a protected object:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_i915_gem_create_ext_protected_content protected_ext = {
+ * .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
+ * .flags = 0,
+ * };
+ * struct drm_i915_gem_create_ext create_ext = {
+ * .size = PAGE_SIZE,
+ * .extensions = (uintptr_t)&protected_ext,
+ * };
+ *
+ * int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
+ * if (err) ...
+ */
+struct drm_i915_gem_create_ext_protected_content {
+ /** @base: Extension link. See struct i915_user_extension. */
+ struct i915_user_extension base;
+ /** @flags: reserved for future usage, currently MBZ */
+ __u32 flags;
+};
+
+/* ID of the protected content session managed by i915 when PXP is active */
+#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
+
#if defined(__cplusplus)
}
#endif
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 2e29a671d67e..1bb11a6ee667 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -19,7 +19,8 @@
/* ld/ldx fields */
#define BPF_DW 0x18 /* double word (64-bit) */
-#define BPF_XADD 0xc0 /* exclusive add */
+#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */
+#define BPF_XADD 0xc0 /* exclusive add - legacy name */
/* alu/jmp fields */
#define BPF_MOV 0xb0 /* mov reg to reg */
@@ -43,6 +44,11 @@
#define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */
+/* atomic op type fields (stored in immediate) */
+#define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */
+#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */
+#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */
+
/* Register numbers */
enum {
BPF_REG_0 = 0,
@@ -73,15 +79,789 @@ struct bpf_insn {
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
- __u8 data[]; /* Arbitrary size */
+ __u8 data[0]; /* Arbitrary size */
};
struct bpf_cgroup_storage_key {
__u64 cgroup_inode_id; /* cgroup inode id */
- __u32 attach_type; /* program attach type */
+ __u32 attach_type; /* program attach type (enum bpf_attach_type) */
+};
+
+enum bpf_cgroup_iter_order {
+ BPF_CGROUP_ITER_ORDER_UNSPEC = 0,
+ BPF_CGROUP_ITER_SELF_ONLY, /* process only a single object. */
+ BPF_CGROUP_ITER_DESCENDANTS_PRE, /* walk descendants in pre-order. */
+ BPF_CGROUP_ITER_DESCENDANTS_POST, /* walk descendants in post-order. */
+ BPF_CGROUP_ITER_ANCESTORS_UP, /* walk ancestors upward. */
};
-/* BPF syscall commands, see bpf(2) man-page for details. */
+union bpf_iter_link_info {
+ struct {
+ __u32 map_fd;
+ } map;
+ struct {
+ enum bpf_cgroup_iter_order order;
+
+ /* At most one of cgroup_fd and cgroup_id can be non-zero. If
+ * both are zero, the walk starts from the default cgroup v2
+ * root. For walking v1 hierarchy, one should always explicitly
+ * specify cgroup_fd.
+ */
+ __u32 cgroup_fd;
+ __u64 cgroup_id;
+ } cgroup;
+ /* Parameters of task iterators. */
+ struct {
+ __u32 tid;
+ __u32 pid;
+ __u32 pid_fd;
+ } task;
+};
+
+/* BPF syscall commands, see bpf(2) man-page for more details. */
+/**
+ * DOC: eBPF Syscall Preamble
+ *
+ * The operation to be performed by the **bpf**\ () system call is determined
+ * by the *cmd* argument. Each operation takes an accompanying argument,
+ * provided via *attr*, which is a pointer to a union of type *bpf_attr* (see
+ * below). The size argument is the size of the union pointed to by *attr*.
+ */
+/**
+ * DOC: eBPF Syscall Commands
+ *
+ * BPF_MAP_CREATE
+ * Description
+ * Create a map and return a file descriptor that refers to the
+ * map. The close-on-exec file descriptor flag (see **fcntl**\ (2))
+ * is automatically enabled for the new file descriptor.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_MAP_CREATE** will delete the map (but see NOTES).
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_MAP_LOOKUP_ELEM
+ * Description
+ * Look up an element with a given *key* in the map referred to
+ * by the file descriptor *map_fd*.
+ *
+ * The *flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_UPDATE_ELEM
+ * Description
+ * Create or update an element (key/value pair) in a specified map.
+ *
+ * The *flags* argument should be specified as one of the
+ * following:
+ *
+ * **BPF_ANY**
+ * Create a new element or update an existing element.
+ * **BPF_NOEXIST**
+ * Create a new element only if it did not exist.
+ * **BPF_EXIST**
+ * Update an existing element.
+ * **BPF_F_LOCK**
+ * Update a spin_lock-ed map element.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**,
+ * **E2BIG**, **EEXIST**, or **ENOENT**.
+ *
+ * **E2BIG**
+ * The number of elements in the map reached the
+ * *max_entries* limit specified at map creation time.
+ * **EEXIST**
+ * If *flags* specifies **BPF_NOEXIST** and the element
+ * with *key* already exists in the map.
+ * **ENOENT**
+ * If *flags* specifies **BPF_EXIST** and the element with
+ * *key* does not exist in the map.
+ *
+ * BPF_MAP_DELETE_ELEM
+ * Description
+ * Look up and delete an element by key in a specified map.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_GET_NEXT_KEY
+ * Description
+ * Look up an element by key in a specified map and return the key
+ * of the next element. Can be used to iterate over all elements
+ * in the map.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * The following cases can be used to iterate over all elements of
+ * the map:
+ *
+ * * If *key* is not found, the operation returns zero and sets
+ * the *next_key* pointer to the key of the first element.
+ * * If *key* is found, the operation returns zero and sets the
+ * *next_key* pointer to the key of the next element.
+ * * If *key* is the last element, returns -1 and *errno* is set
+ * to **ENOENT**.
+ *
+ * May set *errno* to **ENOMEM**, **EFAULT**, **EPERM**, or
+ * **EINVAL** on error.
+ *
+ * BPF_PROG_LOAD
+ * Description
+ * Verify and load an eBPF program, returning a new file
+ * descriptor associated with the program.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_PROG_LOAD** will unload the eBPF program (but see NOTES).
+ *
+ * The close-on-exec file descriptor flag (see **fcntl**\ (2)) is
+ * automatically enabled for the new file descriptor.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_OBJ_PIN
+ * Description
+ * Pin an eBPF program or map referred by the specified *bpf_fd*
+ * to the provided *pathname* on the filesystem.
+ *
+ * The *pathname* argument must not contain a dot (".").
+ *
+ * On success, *pathname* retains a reference to the eBPF object,
+ * preventing deallocation of the object when the original
+ * *bpf_fd* is closed. This allow the eBPF object to live beyond
+ * **close**\ (\ *bpf_fd*\ ), and hence the lifetime of the parent
+ * process.
+ *
+ * Applying **unlink**\ (2) or similar calls to the *pathname*
+ * unpins the object from the filesystem, removing the reference.
+ * If no other file descriptors or filesystem nodes refer to the
+ * same object, it will be deallocated (see NOTES).
+ *
+ * The filesystem type for the parent directory of *pathname* must
+ * be **BPF_FS_MAGIC**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_OBJ_GET
+ * Description
+ * Open a file descriptor for the eBPF object pinned to the
+ * specified *pathname*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_PROG_ATTACH
+ * Description
+ * Attach an eBPF program to a *target_fd* at the specified
+ * *attach_type* hook.
+ *
+ * The *attach_type* specifies the eBPF attachment point to
+ * attach the program to, and must be one of *bpf_attach_type*
+ * (see below).
+ *
+ * The *attach_bpf_fd* must be a valid file descriptor for a
+ * loaded eBPF program of a cgroup, flow dissector, LIRC, sockmap
+ * or sock_ops type corresponding to the specified *attach_type*.
+ *
+ * The *target_fd* must be a valid file descriptor for a kernel
+ * object which depends on the attach type of *attach_bpf_fd*:
+ *
+ * **BPF_PROG_TYPE_CGROUP_DEVICE**,
+ * **BPF_PROG_TYPE_CGROUP_SKB**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
+ * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
+ * **BPF_PROG_TYPE_SOCK_OPS**
+ *
+ * Control Group v2 hierarchy with the eBPF controller
+ * enabled. Requires the kernel to be compiled with
+ * **CONFIG_CGROUP_BPF**.
+ *
+ * **BPF_PROG_TYPE_FLOW_DISSECTOR**
+ *
+ * Network namespace (eg /proc/self/ns/net).
+ *
+ * **BPF_PROG_TYPE_LIRC_MODE2**
+ *
+ * LIRC device path (eg /dev/lircN). Requires the kernel
+ * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
+ *
+ * **BPF_PROG_TYPE_SK_SKB**,
+ * **BPF_PROG_TYPE_SK_MSG**
+ *
+ * eBPF map of socket type (eg **BPF_MAP_TYPE_SOCKHASH**).
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_DETACH
+ * Description
+ * Detach the eBPF program associated with the *target_fd* at the
+ * hook specified by *attach_type*. The program must have been
+ * previously attached using **BPF_PROG_ATTACH**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_TEST_RUN
+ * Description
+ * Run the eBPF program associated with the *prog_fd* a *repeat*
+ * number of times against a provided program context *ctx_in* and
+ * data *data_in*, and return the modified program context
+ * *ctx_out*, *data_out* (for example, packet data), result of the
+ * execution *retval*, and *duration* of the test run.
+ *
+ * The sizes of the buffers provided as input and output
+ * parameters *ctx_in*, *ctx_out*, *data_in*, and *data_out* must
+ * be provided in the corresponding variables *ctx_size_in*,
+ * *ctx_size_out*, *data_size_in*, and/or *data_size_out*. If any
+ * of these parameters are not provided (ie set to NULL), the
+ * corresponding size field must be zero.
+ *
+ * Some program types have particular requirements:
+ *
+ * **BPF_PROG_TYPE_SK_LOOKUP**
+ * *data_in* and *data_out* must be NULL.
+ *
+ * **BPF_PROG_TYPE_RAW_TRACEPOINT**,
+ * **BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE**
+ *
+ * *ctx_out*, *data_in* and *data_out* must be NULL.
+ * *repeat* must be zero.
+ *
+ * BPF_PROG_RUN is an alias for BPF_PROG_TEST_RUN.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * **ENOSPC**
+ * Either *data_size_out* or *ctx_size_out* is too small.
+ * **ENOTSUPP**
+ * This command is not supported by the program type of
+ * the program referred to by *prog_fd*.
+ *
+ * BPF_PROG_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF program currently loaded into the kernel.
+ *
+ * Looks for the eBPF program with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF programs
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_MAP_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF map currently loaded into the kernel.
+ *
+ * Looks for the eBPF map with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF maps
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_PROG_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF program corresponding to
+ * *prog_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_MAP_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF map corresponding to
+ * *map_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_OBJ_GET_INFO_BY_FD
+ * Description
+ * Obtain information about the eBPF object corresponding to
+ * *bpf_fd*.
+ *
+ * Populates up to *info_len* bytes of *info*, which will be in
+ * one of the following formats depending on the eBPF object type
+ * of *bpf_fd*:
+ *
+ * * **struct bpf_prog_info**
+ * * **struct bpf_map_info**
+ * * **struct bpf_btf_info**
+ * * **struct bpf_link_info**
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_QUERY
+ * Description
+ * Obtain information about eBPF programs associated with the
+ * specified *attach_type* hook.
+ *
+ * The *target_fd* must be a valid file descriptor for a kernel
+ * object which depends on the attach type of *attach_bpf_fd*:
+ *
+ * **BPF_PROG_TYPE_CGROUP_DEVICE**,
+ * **BPF_PROG_TYPE_CGROUP_SKB**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK**,
+ * **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
+ * **BPF_PROG_TYPE_CGROUP_SOCKOPT**,
+ * **BPF_PROG_TYPE_CGROUP_SYSCTL**,
+ * **BPF_PROG_TYPE_SOCK_OPS**
+ *
+ * Control Group v2 hierarchy with the eBPF controller
+ * enabled. Requires the kernel to be compiled with
+ * **CONFIG_CGROUP_BPF**.
+ *
+ * **BPF_PROG_TYPE_FLOW_DISSECTOR**
+ *
+ * Network namespace (eg /proc/self/ns/net).
+ *
+ * **BPF_PROG_TYPE_LIRC_MODE2**
+ *
+ * LIRC device path (eg /dev/lircN). Requires the kernel
+ * to be compiled with **CONFIG_BPF_LIRC_MODE2**.
+ *
+ * **BPF_PROG_QUERY** always fetches the number of programs
+ * attached and the *attach_flags* which were used to attach those
+ * programs. Additionally, if *prog_ids* is nonzero and the number
+ * of attached programs is less than *prog_cnt*, populates
+ * *prog_ids* with the eBPF program ids of the programs attached
+ * at *target_fd*.
+ *
+ * The following flags may alter the result:
+ *
+ * **BPF_F_QUERY_EFFECTIVE**
+ * Only return information regarding programs which are
+ * currently effective at the specified *target_fd*.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_RAW_TRACEPOINT_OPEN
+ * Description
+ * Attach an eBPF program to a tracepoint *name* to access kernel
+ * internal arguments of the tracepoint in their raw form.
+ *
+ * The *prog_fd* must be a valid file descriptor associated with
+ * a loaded eBPF program of type **BPF_PROG_TYPE_RAW_TRACEPOINT**.
+ *
+ * No ABI guarantees are made about the content of tracepoint
+ * arguments exposed to the corresponding eBPF program.
+ *
+ * Applying **close**\ (2) to the file descriptor returned by
+ * **BPF_RAW_TRACEPOINT_OPEN** will delete the map (but see NOTES).
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_BTF_LOAD
+ * Description
+ * Verify and load BPF Type Format (BTF) metadata into the kernel,
+ * returning a new file descriptor associated with the metadata.
+ * BTF is described in more detail at
+ * https://www.kernel.org/doc/html/latest/bpf/btf.html.
+ *
+ * The *btf* parameter must point to valid memory providing
+ * *btf_size* bytes of BTF binary metadata.
+ *
+ * The returned file descriptor can be passed to other **bpf**\ ()
+ * subcommands such as **BPF_PROG_LOAD** or **BPF_MAP_CREATE** to
+ * associate the BTF with those objects.
+ *
+ * Similar to **BPF_PROG_LOAD**, **BPF_BTF_LOAD** has optional
+ * parameters to specify a *btf_log_buf*, *btf_log_size* and
+ * *btf_log_level* which allow the kernel to return freeform log
+ * output regarding the BTF verification process.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_BTF_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the BPF Type Format (BTF)
+ * corresponding to *btf_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_TASK_FD_QUERY
+ * Description
+ * Obtain information about eBPF programs associated with the
+ * target process identified by *pid* and *fd*.
+ *
+ * If the *pid* and *fd* are associated with a tracepoint, kprobe
+ * or uprobe perf event, then the *prog_id* and *fd_type* will
+ * be populated with the eBPF program id and file descriptor type
+ * of type **bpf_task_fd_type**. If associated with a kprobe or
+ * uprobe, the *probe_offset* and *probe_addr* will also be
+ * populated. Optionally, if *buf* is provided, then up to
+ * *buf_len* bytes of *buf* will be populated with the name of
+ * the tracepoint, kprobe or uprobe.
+ *
+ * The resulting *prog_id* may be introspected in deeper detail
+ * using **BPF_PROG_GET_FD_BY_ID** and **BPF_OBJ_GET_INFO_BY_FD**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_LOOKUP_AND_DELETE_ELEM
+ * Description
+ * Look up an element with the given *key* in the map referred to
+ * by the file descriptor *fd*, and if found, delete the element.
+ *
+ * For **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map
+ * types, the *flags* argument needs to be set to 0, but for other
+ * map types, it may be specified as:
+ *
+ * **BPF_F_LOCK**
+ * Look up and delete the value of a spin-locked map
+ * without returning the lock. This must be specified if
+ * the elements contain a spinlock.
+ *
+ * The **BPF_MAP_TYPE_QUEUE** and **BPF_MAP_TYPE_STACK** map types
+ * implement this command as a "pop" operation, deleting the top
+ * element rather than one corresponding to *key*.
+ * The *key* and *key_len* parameters should be zeroed when
+ * issuing this operation for these map types.
+ *
+ * This command is only valid for the following map types:
+ * * **BPF_MAP_TYPE_QUEUE**
+ * * **BPF_MAP_TYPE_STACK**
+ * * **BPF_MAP_TYPE_HASH**
+ * * **BPF_MAP_TYPE_PERCPU_HASH**
+ * * **BPF_MAP_TYPE_LRU_HASH**
+ * * **BPF_MAP_TYPE_LRU_PERCPU_HASH**
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_FREEZE
+ * Description
+ * Freeze the permissions of the specified map.
+ *
+ * Write permissions may be frozen by passing zero *flags*.
+ * Upon success, no future syscall invocations may alter the
+ * map state of *map_fd*. Write operations from eBPF programs
+ * are still possible for a frozen map.
+ *
+ * Not supported for maps of type **BPF_MAP_TYPE_STRUCT_OPS**.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_BTF_GET_NEXT_ID
+ * Description
+ * Fetch the next BPF Type Format (BTF) object currently loaded
+ * into the kernel.
+ *
+ * Looks for the BTF object with an id greater than *start_id*
+ * and updates *next_id* on success. If no other BTF objects
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_MAP_LOOKUP_BATCH
+ * Description
+ * Iterate and fetch multiple elements in a map.
+ *
+ * Two opaque values are used to manage batch operations,
+ * *in_batch* and *out_batch*. Initially, *in_batch* must be set
+ * to NULL to begin the batched operation. After each subsequent
+ * **BPF_MAP_LOOKUP_BATCH**, the caller should pass the resultant
+ * *out_batch* as the *in_batch* for the next operation to
+ * continue iteration from the current point.
+ *
+ * The *keys* and *values* are output parameters which must point
+ * to memory large enough to hold *count* items based on the key
+ * and value size of the map *map_fd*. The *keys* buffer must be
+ * of *key_size* * *count*. The *values* buffer must be of
+ * *value_size* * *count*.
+ *
+ * The *elem_flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * On success, *count* elements from the map are copied into the
+ * user buffer, with the keys copied into *keys* and the values
+ * copied into the corresponding indices in *values*.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **ENOSPC** to indicate that *keys* or
+ * *values* is too small to dump an entire bucket during
+ * iteration of a hash-based map type.
+ *
+ * BPF_MAP_LOOKUP_AND_DELETE_BATCH
+ * Description
+ * Iterate and delete all elements in a map.
+ *
+ * This operation has the same behavior as
+ * **BPF_MAP_LOOKUP_BATCH** with two exceptions:
+ *
+ * * Every element that is successfully returned is also deleted
+ * from the map. This is at least *count* elements. Note that
+ * *count* is both an input and an output parameter.
+ * * Upon returning with *errno* set to **EFAULT**, up to
+ * *count* elements may be deleted without returning the keys
+ * and values of the deleted elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_MAP_UPDATE_BATCH
+ * Description
+ * Update multiple elements in a map by *key*.
+ *
+ * The *keys* and *values* are input parameters which must point
+ * to memory large enough to hold *count* items based on the key
+ * and value size of the map *map_fd*. The *keys* buffer must be
+ * of *key_size* * *count*. The *values* buffer must be of
+ * *value_size* * *count*.
+ *
+ * Each element specified in *keys* is sequentially updated to the
+ * value in the corresponding index in *values*. The *in_batch*
+ * and *out_batch* parameters are ignored and should be zeroed.
+ *
+ * The *elem_flags* argument should be specified as one of the
+ * following:
+ *
+ * **BPF_ANY**
+ * Create new elements or update a existing elements.
+ * **BPF_NOEXIST**
+ * Create new elements only if they do not exist.
+ * **BPF_EXIST**
+ * Update existing elements.
+ * **BPF_F_LOCK**
+ * Update spin_lock-ed map elements. This must be
+ * specified if the map value contains a spinlock.
+ *
+ * On success, *count* elements from the map are updated.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * May set *errno* to **EINVAL**, **EPERM**, **ENOMEM**, or
+ * **E2BIG**. **E2BIG** indicates that the number of elements in
+ * the map reached the *max_entries* limit specified at map
+ * creation time.
+ *
+ * May set *errno* to one of the following error codes under
+ * specific circumstances:
+ *
+ * **EEXIST**
+ * If *flags* specifies **BPF_NOEXIST** and the element
+ * with *key* already exists in the map.
+ * **ENOENT**
+ * If *flags* specifies **BPF_EXIST** and the element with
+ * *key* does not exist in the map.
+ *
+ * BPF_MAP_DELETE_BATCH
+ * Description
+ * Delete multiple elements in a map by *key*.
+ *
+ * The *keys* parameter is an input parameter which must point
+ * to memory large enough to hold *count* items based on the key
+ * size of the map *map_fd*, that is, *key_size* * *count*.
+ *
+ * Each element specified in *keys* is sequentially deleted. The
+ * *in_batch*, *out_batch*, and *values* parameters are ignored
+ * and should be zeroed.
+ *
+ * The *elem_flags* argument may be specified as one of the
+ * following:
+ *
+ * **BPF_F_LOCK**
+ * Look up the value of a spin-locked map without
+ * returning the lock. This must be specified if the
+ * elements contain a spinlock.
+ *
+ * On success, *count* elements from the map are updated.
+ *
+ * If an error is returned and *errno* is not **EFAULT**, *count*
+ * is set to the number of successfully processed elements. If
+ * *errno* is **EFAULT**, up to *count* elements may be been
+ * deleted.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_LINK_CREATE
+ * Description
+ * Attach an eBPF program to a *target_fd* at the specified
+ * *attach_type* hook and return a file descriptor handle for
+ * managing the link.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_UPDATE
+ * Description
+ * Update the eBPF program in the specified *link_fd* to
+ * *new_prog_fd*.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_LINK_GET_FD_BY_ID
+ * Description
+ * Open a file descriptor for the eBPF Link corresponding to
+ * *link_id*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_GET_NEXT_ID
+ * Description
+ * Fetch the next eBPF link currently loaded into the kernel.
+ *
+ * Looks for the eBPF link with an id greater than *start_id*
+ * and updates *next_id* on success. If no other eBPF links
+ * remain with ids higher than *start_id*, returns -1 and sets
+ * *errno* to **ENOENT**.
+ *
+ * Return
+ * Returns zero on success. On error, or when no id remains, -1
+ * is returned and *errno* is set appropriately.
+ *
+ * BPF_ENABLE_STATS
+ * Description
+ * Enable eBPF runtime statistics gathering.
+ *
+ * Runtime statistics gathering for the eBPF runtime is disabled
+ * by default to minimize the corresponding performance overhead.
+ * This command enables statistics globally.
+ *
+ * Multiple programs may independently enable statistics.
+ * After gathering the desired statistics, eBPF runtime statistics
+ * may be disabled again by calling **close**\ (2) for the file
+ * descriptor returned by this function. Statistics will only be
+ * disabled system-wide when all outstanding file descriptors
+ * returned by prior calls for this subcommand are closed.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_ITER_CREATE
+ * Description
+ * Create an iterator on top of the specified *link_fd* (as
+ * previously created using **BPF_LINK_CREATE**) and return a
+ * file descriptor that can be used to trigger the iteration.
+ *
+ * If the resulting file descriptor is pinned to the filesystem
+ * using **BPF_OBJ_PIN**, then subsequent **read**\ (2) syscalls
+ * for that path will trigger the iterator to read kernel state
+ * using the eBPF program attached to *link_fd*.
+ *
+ * Return
+ * A new file descriptor (a nonnegative integer), or -1 if an
+ * error occurred (in which case, *errno* is set appropriately).
+ *
+ * BPF_LINK_DETACH
+ * Description
+ * Forcefully detach the specified *link_fd* from its
+ * corresponding attachment point.
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * BPF_PROG_BIND_MAP
+ * Description
+ * Bind a map to the lifetime of an eBPF program.
+ *
+ * The map identified by *map_fd* is bound to the program
+ * identified by *prog_fd* and only released when *prog_fd* is
+ * released. This may be used in cases where metadata should be
+ * associated with a program which otherwise does not contain any
+ * references to the map (for example, embedded in the eBPF
+ * program instructions).
+ *
+ * Return
+ * Returns zero on success. On error, -1 is returned and *errno*
+ * is set appropriately.
+ *
+ * NOTES
+ * eBPF objects (maps and programs) can be shared between processes.
+ *
+ * * After **fork**\ (2), the child inherits file descriptors
+ * referring to the same eBPF objects.
+ * * File descriptors referring to eBPF objects can be transferred over
+ * **unix**\ (7) domain sockets.
+ * * File descriptors referring to eBPF objects can be duplicated in the
+ * usual way, using **dup**\ (2) and similar calls.
+ * * File descriptors referring to eBPF objects can be pinned to the
+ * filesystem using the **BPF_OBJ_PIN** command of **bpf**\ (2).
+ *
+ * An eBPF object is deallocated only after all file descriptors referring
+ * to the object have been closed and no references remain pinned to the
+ * filesystem or attached (for example, bound to a program or device).
+ */
enum bpf_cmd {
BPF_MAP_CREATE,
BPF_MAP_LOOKUP_ELEM,
@@ -94,6 +874,7 @@ enum bpf_cmd {
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
+ BPF_PROG_RUN = BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
@@ -113,6 +894,12 @@ enum bpf_cmd {
BPF_MAP_DELETE_BATCH,
BPF_LINK_CREATE,
BPF_LINK_UPDATE,
+ BPF_LINK_GET_FD_BY_ID,
+ BPF_LINK_GET_NEXT_ID,
+ BPF_ENABLE_STATS,
+ BPF_ITER_CREATE,
+ BPF_LINK_DETACH,
+ BPF_PROG_BIND_MAP,
};
enum bpf_map_type {
@@ -135,7 +922,14 @@ enum bpf_map_type {
BPF_MAP_TYPE_CPUMAP,
BPF_MAP_TYPE_XSKMAP,
BPF_MAP_TYPE_SOCKHASH,
- BPF_MAP_TYPE_CGROUP_STORAGE,
+ BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
+ /* BPF_MAP_TYPE_CGROUP_STORAGE is available to bpf programs attaching
+ * to a cgroup. The newer BPF_MAP_TYPE_CGRP_STORAGE is available to
+ * both cgroup-attached and other progs and supports all functionality
+ * provided by BPF_MAP_TYPE_CGROUP_STORAGE. So mark
+ * BPF_MAP_TYPE_CGROUP_STORAGE deprecated.
+ */
+ BPF_MAP_TYPE_CGROUP_STORAGE = BPF_MAP_TYPE_CGROUP_STORAGE_DEPRECATED,
BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
BPF_MAP_TYPE_QUEUE,
@@ -143,6 +937,12 @@ enum bpf_map_type {
BPF_MAP_TYPE_SK_STORAGE,
BPF_MAP_TYPE_DEVMAP_HASH,
BPF_MAP_TYPE_STRUCT_OPS,
+ BPF_MAP_TYPE_RINGBUF,
+ BPF_MAP_TYPE_INODE_STORAGE,
+ BPF_MAP_TYPE_TASK_STORAGE,
+ BPF_MAP_TYPE_BLOOM_FILTER,
+ BPF_MAP_TYPE_USER_RINGBUF,
+ BPF_MAP_TYPE_CGRP_STORAGE,
};
/* Note that tracing related programs such as
@@ -184,6 +984,9 @@ enum bpf_prog_type {
BPF_PROG_TYPE_STRUCT_OPS,
BPF_PROG_TYPE_EXT,
BPF_PROG_TYPE_LSM,
+ BPF_PROG_TYPE_SK_LOOKUP,
+ BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
+ BPF_PROG_TYPE_NETFILTER,
};
enum bpf_attach_type {
@@ -215,11 +1018,44 @@ enum bpf_attach_type {
BPF_TRACE_FEXIT,
BPF_MODIFY_RETURN,
BPF_LSM_MAC,
+ BPF_TRACE_ITER,
+ BPF_CGROUP_INET4_GETPEERNAME,
+ BPF_CGROUP_INET6_GETPEERNAME,
+ BPF_CGROUP_INET4_GETSOCKNAME,
+ BPF_CGROUP_INET6_GETSOCKNAME,
+ BPF_XDP_DEVMAP,
+ BPF_CGROUP_INET_SOCK_RELEASE,
+ BPF_XDP_CPUMAP,
+ BPF_SK_LOOKUP,
+ BPF_XDP,
+ BPF_SK_SKB_VERDICT,
+ BPF_SK_REUSEPORT_SELECT,
+ BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
+ BPF_PERF_EVENT,
+ BPF_TRACE_KPROBE_MULTI,
+ BPF_LSM_CGROUP,
+ BPF_STRUCT_OPS,
__MAX_BPF_ATTACH_TYPE
};
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+enum bpf_link_type {
+ BPF_LINK_TYPE_UNSPEC = 0,
+ BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
+ BPF_LINK_TYPE_TRACING = 2,
+ BPF_LINK_TYPE_CGROUP = 3,
+ BPF_LINK_TYPE_ITER = 4,
+ BPF_LINK_TYPE_NETNS = 5,
+ BPF_LINK_TYPE_XDP = 6,
+ BPF_LINK_TYPE_PERF_EVENT = 7,
+ BPF_LINK_TYPE_KPROBE_MULTI = 8,
+ BPF_LINK_TYPE_STRUCT_OPS = 9,
+ BPF_LINK_TYPE_NETFILTER = 10,
+
+ MAX_BPF_LINK_TYPE,
+};
+
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
*
* NONE(default): No further bpf programs allowed in the subtree.
@@ -275,7 +1111,7 @@ enum bpf_attach_type {
*/
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
-/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
+/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will allow any alignment whatsoever. On platforms
* with strict alignment requirements for loads ands stores (such
* as sparc and mips) the verifier validates that all loads and
@@ -310,24 +1146,82 @@ enum bpf_attach_type {
/* The verifier internal test flag. Behavior is undefined */
#define BPF_F_TEST_STATE_FREQ (1U << 3)
+/* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
+ * restrict map and helper usage for such programs. Sleepable BPF programs can
+ * only be attached to hooks where kernel execution context allows sleeping.
+ * Such programs are allowed to use helpers that may sleep like
+ * bpf_copy_from_user().
+ */
+#define BPF_F_SLEEPABLE (1U << 4)
+
+/* If BPF_F_XDP_HAS_FRAGS is used in BPF_PROG_LOAD command, the loaded program
+ * fully support xdp frags.
+ */
+#define BPF_F_XDP_HAS_FRAGS (1U << 5)
+
+/* If BPF_F_XDP_DEV_BOUND_ONLY is used in BPF_PROG_LOAD command, the loaded
+ * program becomes device-bound but can access XDP metadata.
+ */
+#define BPF_F_XDP_DEV_BOUND_ONLY (1U << 6)
+
+/* link_create.kprobe_multi.flags used in LINK_CREATE command for
+ * BPF_TRACE_KPROBE_MULTI attach type to create return probe.
+ */
+#define BPF_F_KPROBE_MULTI_RETURN (1U << 0)
+
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
- * two extensions:
- *
- * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE
- * insn[0].imm: map fd map fd
- * insn[1].imm: 0 offset into value
- * insn[0].off: 0 0
- * insn[1].off: 0 0
- * ldimm64 rewrite: address of map address of map[0]+offset
- * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE
+ * the following extensions:
+ *
+ * insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
+ * insn[0].imm: map fd or fd_idx
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map
+ * verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
-#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_IDX 5
+
+/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
+ * insn[0].imm: map fd or fd_idx
+ * insn[1].imm: offset into value
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map[0]+offset
+ * verifier type: PTR_TO_MAP_VALUE
+ */
+#define BPF_PSEUDO_MAP_VALUE 2
+#define BPF_PSEUDO_MAP_IDX_VALUE 6
+
+/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
+ * insn[0].imm: kernel btd id of VAR
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of the kernel variable
+ * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
+ * is struct/union.
+ */
+#define BPF_PSEUDO_BTF_ID 3
+/* insn[0].src_reg: BPF_PSEUDO_FUNC
+ * insn[0].imm: insn offset to the func
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of the function
+ * verifier type: PTR_TO_FUNC.
+ */
+#define BPF_PSEUDO_FUNC 4
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
*/
#define BPF_PSEUDO_CALL 1
+/* when bpf_call->src_reg == BPF_PSEUDO_KFUNC_CALL,
+ * bpf_call->imm == btf_id of a BTF_KIND_FUNC in the running kernel
+ */
+#define BPF_PSEUDO_KFUNC_CALL 2
/* flags for BPF_MAP_UPDATE_ELEM command */
enum {
@@ -369,16 +1263,38 @@ enum {
/* Enable memory-mapping BPF map */
BPF_F_MMAPABLE = (1U << 10),
+
+/* Share perf_event among processes */
+ BPF_F_PRESERVE_ELEMS = (1U << 11),
+
+/* Create a map that is suitable to be an inner map with dynamic max entries */
+ BPF_F_INNER_MAP = (1U << 12),
+
+/* Create a map that will be registered/unregesitered by the backed bpf_link */
+ BPF_F_LINK = (1U << 13),
};
/* Flags for BPF_PROG_QUERY. */
/* Query effective (directly attached + inherited from ancestor cgroups)
* programs that will be executed for events within a cgroup.
- * attach_flags with this flag are returned only for directly attached programs.
+ * attach_flags with this flag are always returned 0.
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+/* Flags for BPF_PROG_TEST_RUN */
+
+/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
+#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
+/* If set, XDP frames will be transmitted after processing */
+#define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1)
+
+/* type for BPF_ENABLE_STATS */
+enum bpf_stats_type {
+ /* enabled run_time_ns and run_cnt */
+ BPF_STATS_RUN_TIME = 0,
+};
+
enum bpf_stack_build_id_status {
/* user space need an empty entry to identify end of a trace */
BPF_STACK_BUILD_ID_EMPTY = 0,
@@ -422,6 +1338,13 @@ union bpf_attr {
* struct stored as the
* map value
*/
+ /* Any per-map-type extra fields
+ *
+ * BPF_MAP_TYPE_BLOOM_FILTER - the lowest 4 bits indicate the
+ * number of hash functions (if 0, the bloom filter will default
+ * to using 5 hash functions).
+ */
+ __u64 map_extra;
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -476,7 +1399,21 @@ union bpf_attr {
__aligned_u64 line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */
__u32 attach_btf_id; /* in-kernel BTF type id to attach to */
- __u32 attach_prog_fd; /* 0 to attach to vmlinux */
+ union {
+ /* valid prog_fd to attach to bpf prog */
+ __u32 attach_prog_fd;
+ /* or valid module BTF object fd or 0 to attach to vmlinux */
+ __u32 attach_btf_obj_fd;
+ };
+ __u32 core_relo_cnt; /* number of bpf_core_relo */
+ __aligned_u64 fd_array; /* array of FDs */
+ __aligned_u64 core_relos;
+ __u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
+ /* output: actual total log contents size (including termintaing zero).
+ * It could be both larger than original log_size (if log was
+ * truncated), or smaller (if log buffer wasn't filled completely).
+ */
+ __u32 log_true_size;
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -515,6 +1452,9 @@ union bpf_attr {
*/
__aligned_u64 ctx_in;
__aligned_u64 ctx_out;
+ __u32 flags;
+ __u32 cpu;
+ __u32 batch_size;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -523,6 +1463,7 @@ union bpf_attr {
__u32 prog_id;
__u32 map_id;
__u32 btf_id;
+ __u32 link_id;
};
__u32 next_id;
__u32 open_flags;
@@ -541,6 +1482,10 @@ union bpf_attr {
__u32 attach_flags;
__aligned_u64 prog_ids;
__u32 prog_cnt;
+ /* output: per-program attach_flags.
+ * not allowed to be set during effective query.
+ */
+ __aligned_u64 prog_attach_flags;
} query;
struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */
@@ -554,6 +1499,11 @@ union bpf_attr {
__u32 btf_size;
__u32 btf_log_size;
__u32 btf_log_level;
+ /* output: actual total log contents size (including termintaing zero).
+ * It could be both larger than original log_size (if log was
+ * truncated), or smaller (if log buffer wasn't filled completely).
+ */
+ __u32 btf_log_true_size;
};
struct {
@@ -573,22 +1523,94 @@ union bpf_attr {
} task_fd_query;
struct { /* struct used by BPF_LINK_CREATE command */
- __u32 prog_fd; /* eBPF program to attach */
- __u32 target_fd; /* object to attach to */
+ union {
+ __u32 prog_fd; /* eBPF program to attach */
+ __u32 map_fd; /* struct_ops to attach */
+ };
+ union {
+ __u32 target_fd; /* object to attach to */
+ __u32 target_ifindex; /* target ifindex */
+ };
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
+ union {
+ __u32 target_btf_id; /* btf_id of target to attach to */
+ struct {
+ __aligned_u64 iter_info; /* extra bpf_iter_link_info */
+ __u32 iter_info_len; /* iter_info length */
+ };
+ struct {
+ /* black box user-provided value passed through
+ * to BPF program at the execution time and
+ * accessible through bpf_get_attach_cookie() BPF helper
+ */
+ __u64 bpf_cookie;
+ } perf_event;
+ struct {
+ __u32 flags;
+ __u32 cnt;
+ __aligned_u64 syms;
+ __aligned_u64 addrs;
+ __aligned_u64 cookies;
+ } kprobe_multi;
+ struct {
+ /* this is overlaid with the target_btf_id above. */
+ __u32 target_btf_id;
+ /* black box user-provided value passed through
+ * to BPF program at the execution time and
+ * accessible through bpf_get_attach_cookie() BPF helper
+ */
+ __u64 cookie;
+ } tracing;
+ struct {
+ __u32 pf;
+ __u32 hooknum;
+ __s32 priority;
+ __u32 flags;
+ } netfilter;
+ };
} link_create;
struct { /* struct used by BPF_LINK_UPDATE command */
__u32 link_fd; /* link fd */
- /* new program fd to update link with */
- __u32 new_prog_fd;
+ union {
+ /* new program fd to update link with */
+ __u32 new_prog_fd;
+ /* new struct_ops map fd to update link with */
+ __u32 new_map_fd;
+ };
__u32 flags; /* extra flags */
- /* expected link's program fd; is specified only if
- * BPF_F_REPLACE flag is set in flags */
- __u32 old_prog_fd;
+ union {
+ /* expected link's program fd; is specified only if
+ * BPF_F_REPLACE flag is set in flags.
+ */
+ __u32 old_prog_fd;
+ /* expected link's map fd; is specified only
+ * if BPF_F_REPLACE flag is set.
+ */
+ __u32 old_map_fd;
+ };
} link_update;
+ struct {
+ __u32 link_fd;
+ } link_detach;
+
+ struct { /* struct used by BPF_ENABLE_STATS command */
+ __u32 type;
+ } enable_stats;
+
+ struct { /* struct used by BPF_ITER_CREATE command */
+ __u32 link_fd;
+ __u32 flags;
+ } iter_create;
+
+ struct { /* struct used by BPF_PROG_BIND_MAP command */
+ __u32 prog_fd;
+ __u32 map_fd;
+ __u32 flags; /* extra flags */
+ } prog_bind_map;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -596,7 +1618,7 @@ union bpf_attr {
* parsed and used to produce a manual page. The workflow is the following,
* and requires the rst2man utility:
*
- * $ ./scripts/bpf_helpers_doc.py \
+ * $ ./scripts/bpf_doc.py \
* --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst
* $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7
* $ man /tmp/bpf-helpers.7
@@ -615,7 +1637,7 @@ union bpf_attr {
* Map value associated to *key*, or **NULL** if no entry was
* found.
*
- * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
+ * long bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags)
* Description
* Add or update the value of the entry associated to *key* in
* *map* with *value*. *flags* is one of:
@@ -633,43 +1655,45 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_delete_elem(struct bpf_map *map, const void *key)
+ * long bpf_map_delete_elem(struct bpf_map *map, const void *key)
* Description
* Delete entry with *key* from *map*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
* Description
* For tracing programs, safely attempt to read *size* bytes from
* kernel space address *unsafe_ptr* and store the data in *dst*.
*
- * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel()
- * instead.
+ * Generally, use **bpf_probe_read_user**\ () or
+ * **bpf_probe_read_kernel**\ () instead.
* Return
* 0 on success, or a negative error in case of failure.
*
* u64 bpf_ktime_get_ns(void)
* Description
* Return the time elapsed since system boot, in nanoseconds.
+ * Does not include time the system was suspended.
+ * See: **clock_gettime**\ (**CLOCK_MONOTONIC**)
* Return
* Current *ktime*.
*
- * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
+ * long bpf_trace_printk(const char *fmt, u32 fmt_size, ...)
* Description
* This helper is a "printk()-like" facility for debugging. It
* prints a message defined by format *fmt* (of size *fmt_size*)
- * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
+ * to file *\/sys/kernel/tracing/trace* from TraceFS, if
* available. It can take up to three additional **u64**
* arguments (as an eBPF helpers, the total number of arguments is
* limited to five).
*
* Each time the helper is called, it appends a line to the trace.
- * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
- * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
+ * Lines are discarded while *\/sys/kernel/tracing/trace* is
+ * open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
* The format of the trace is customizable, and the exact output
* one will get depends on the options set in
- * *\/sys/kernel/debug/tracing/trace_options* (see also the
+ * *\/sys/kernel/tracing/trace_options* (see also the
* *README* file under the same directory). However, it usually
* defaults to something like:
*
@@ -705,7 +1729,7 @@ union bpf_attr {
*
* Also, note that **bpf_trace_printk**\ () is slow, and should
* only be used for debugging purposes. For this reason, a notice
- * bloc (spanning several lines) is printed to kernel logs and
+ * block (spanning several lines) is printed to kernel logs and
* states that the helper should not be used "for production use"
* the first time this helper is used (or more precisely, when
* **trace_printk**\ () buffers are allocated). For passing values
@@ -729,13 +1753,13 @@ union bpf_attr {
* u32 bpf_get_smp_processor_id(void)
* Description
* Get the SMP (symmetric multiprocessing) processor id. Note that
- * all programs run with preemption disabled, which means that the
+ * all programs run with migration disabled, which means that the
* SMP processor id is stable during all the execution of the
* program.
* Return
* The SMP id of the processor running the program.
*
- * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
+ * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags)
* Description
* Store *len* bytes from address *from* into the packet
* associated to *skb*, at *offset*. *flags* are a combination of
@@ -752,7 +1776,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
+ * long bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size)
* Description
* Recompute the layer 3 (e.g. IP) checksum for the packet
* associated to *skb*. Computation is incremental, so the helper
@@ -777,7 +1801,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
+ * long bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags)
* Description
* Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the
* packet associated to *skb*. Computation is incremental, so the
@@ -809,7 +1833,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
+ * long bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index)
* Description
* This special helper is used to trigger a "tail call", or in
* other words, to jump into another eBPF program. The same stack
@@ -836,11 +1860,11 @@ union bpf_attr {
* if the maximum number of tail calls has been reached for this
* chain of programs. This limit is defined in the kernel by the
* macro **MAX_TAIL_CALL_CNT** (not accessible to user space),
- * which is currently set to 32.
+ * which is currently set to 33.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
+ * long bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags)
* Description
* Clone and redirect the packet associated to *skb* to another
* net device of index *ifindex*. Both ingress and egress
@@ -865,6 +1889,8 @@ union bpf_attr {
* 0 on success, or a negative error in case of failure.
*
* u64 bpf_get_current_pid_tgid(void)
+ * Description
+ * Get the current pid and tgid.
* Return
* A 64-bit integer containing the current tgid and pid, and
* created as such:
@@ -872,11 +1898,13 @@ union bpf_attr {
* *current_task*\ **->pid**.
*
* u64 bpf_get_current_uid_gid(void)
+ * Description
+ * Get the current uid and gid.
* Return
* A 64-bit integer containing the current GID and UID, and
* created as such: *current_gid* **<< 32 \|** *current_uid*.
*
- * int bpf_get_current_comm(void *buf, u32 size_of_buf)
+ * long bpf_get_current_comm(void *buf, u32 size_of_buf)
* Description
* Copy the **comm** attribute of the current task into *buf* of
* *size_of_buf*. The **comm** attribute contains the name of
@@ -913,7 +1941,7 @@ union bpf_attr {
* Return
* The classid, or 0 for the default unconfigured classid.
*
- * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
+ * long bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
* Description
* Push a *vlan_tci* (VLAN tag control information) of protocol
* *vlan_proto* to the packet associated to *skb*, then update
@@ -929,7 +1957,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_vlan_pop(struct sk_buff *skb)
+ * long bpf_skb_vlan_pop(struct sk_buff *skb)
* Description
* Pop a VLAN header from the packet associated to *skb*.
*
@@ -941,7 +1969,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
* Description
* Get tunnel metadata. This helper takes a pointer *key* to an
* empty **struct bpf_tunnel_key** of **size**, that will be
@@ -971,14 +1999,14 @@ union bpf_attr {
*
* int ret;
* struct bpf_tunnel_key key = {};
- *
+ *
* ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0);
* if (ret < 0)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* if (key.remote_ipv4 != 0x0a000001)
* return TC_ACT_SHOT; // drop packet
- *
+ *
* return TC_ACT_OK; // accept packet
*
* This interface can also be used with all encapsulation devices
@@ -992,7 +2020,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
+ * long bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags)
* Description
* Populate tunnel metadata for packet associated to *skb.* The
* tunnel metadata is set to the contents of *key*, of *size*. The
@@ -1014,6 +2042,9 @@ union bpf_attr {
* sending the packet. This flag was added for GRE
* encapsulation, but might be used with other protocols
* as well in the future.
+ * **BPF_F_NO_TUNNEL_KEY**
+ * Add a flag to tunnel metadata indicating that no tunnel
+ * key should be set in the resulting tunnel header.
*
* Here is a typical usage on the transmit path:
*
@@ -1058,7 +2089,7 @@ union bpf_attr {
* The value of the perf event counter read from the map, or a
* negative error code in case of failure.
*
- * int bpf_redirect(u32 ifindex, u64 flags)
+ * long bpf_redirect(u32 ifindex, u64 flags)
* Description
* Redirect the packet to another net device of index *ifindex*.
* This helper is somewhat similar to **bpf_clone_redirect**\
@@ -1085,7 +2116,7 @@ union bpf_attr {
* Description
* Retrieve the realm or the route, that is to say the
* **tclassid** field of the destination for the *skb*. The
- * indentifier retrieved is a user-provided tag, similar to the
+ * identifier retrieved is a user-provided tag, similar to the
* one used with the net_cls cgroup (see description for
* **bpf_get_cgroup_classid**\ () helper), but here this tag is
* held by a route (a destination entry), not by a task.
@@ -1105,7 +2136,7 @@ union bpf_attr {
* The realm of the route for the packet associated to *skb*, or 0
* if none was found.
*
- * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -1150,7 +2181,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
+ * long bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
* Description
* This helper was provided as an easy way to load data from a
* packet. It can be used to load *len* bytes from *offset* from
@@ -1167,7 +2198,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
+ * long bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
* Description
* Walk a user or a kernel stack and return its id. To achieve
* this, the helper needs *ctx*, which is a pointer to the context
@@ -1236,7 +2267,7 @@ union bpf_attr {
* The checksum result, or a negative error code in case of
* failure.
*
- * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Retrieve tunnel options metadata for the packet associated to
* *skb*, and store the raw tunnel option data to the buffer *opt*
@@ -1254,7 +2285,7 @@ union bpf_attr {
* Return
* The size of the option data retrieved.
*
- * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
+ * long bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Set tunnel options metadata for the packet associated to *skb*
* to the option data contained in the raw buffer *opt* of *size*.
@@ -1264,7 +2295,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
+ * long bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags)
* Description
* Change the protocol of the *skb* to *proto*. Currently
* supported are transition from IPv4 to IPv6, and from IPv6 to
@@ -1291,7 +2322,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_change_type(struct sk_buff *skb, u32 type)
+ * long bpf_skb_change_type(struct sk_buff *skb, u32 type)
* Description
* Change the packet type for the packet associated to *skb*. This
* comes down to setting *skb*\ **->pkt_type** to *type*, except
@@ -1318,7 +2349,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
+ * long bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index)
* Description
* Check whether *skb* is a descendant of the cgroup2 held by
* *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*.
@@ -1346,10 +2377,12 @@ union bpf_attr {
* The 32-bit hash.
*
* u64 bpf_get_current_task(void)
+ * Description
+ * Get the current task.
* Return
* A pointer to the current task struct.
*
- * int bpf_probe_write_user(void *dst, const void *src, u32 len)
+ * long bpf_probe_write_user(void *dst, const void *src, u32 len)
* Description
* Attempt in a safe way to write *len* bytes from the buffer
* *src* to *dst* in memory. It only works for threads that are in
@@ -1368,7 +2401,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
+ * long bpf_current_task_under_cgroup(struct bpf_map *map, u32 index)
* Description
* Check whether the probe is being run is the context of a given
* subset of the cgroup2 hierarchy. The cgroup2 to test is held by
@@ -1376,11 +2409,11 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if the *skb* task belongs to the cgroup2.
- * * 1, if the *skb* task does not belong to the cgroup2.
+ * * 1, if current task belongs to the cgroup2.
+ * * 0, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
- * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
* Description
* Resize (trim or grow) the packet associated to *skb* to the
* new *len*. The *flags* are reserved for future usage, and must
@@ -1404,12 +2437,13 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_pull_data(struct sk_buff *skb, u32 len)
+ * long bpf_skb_pull_data(struct sk_buff *skb, u32 len)
* Description
* Pull in non-linear data in case the *skb* is non-linear and not
* all of *len* are part of the linear section. Make *len* bytes
* from *skb* readable and writable. If a zero value is passed for
- * *len*, then the whole length of the *skb* is pulled.
+ * *len*, then all bytes in the linear part of *skb* will be made
+ * readable and writable.
*
* This helper is only needed for reading and writing with direct
* packet access.
@@ -1459,8 +2493,10 @@ union bpf_attr {
* indicate that the hash is outdated and to trigger a
* recalculation the next time the kernel tries to access this
* hash or when the **bpf_get_hash_recalc**\ () helper is called.
+ * Return
+ * void.
*
- * int bpf_get_numa_node_id(void)
+ * long bpf_get_numa_node_id(void)
* Description
* Return the id of the current NUMA node. The primary use case
* for this helper is the selection of sockets for the local NUMA
@@ -1471,7 +2507,7 @@ union bpf_attr {
* Return
* The id of current NUMA node.
*
- * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
+ * long bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags)
* Description
* Grows headroom of packet associated to *skb* and adjusts the
* offset of the MAC header accordingly, adding *len* bytes of
@@ -1492,7 +2528,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta)
* Description
* Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that
* it is possible to use a negative value for *delta*. This helper
@@ -1507,14 +2543,14 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Copy a NUL terminated string from an unsafe kernel address
- * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for
+ * *unsafe_ptr* to *dst*. See **bpf_probe_read_kernel_str**\ () for
* more details.
*
- * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str()
- * instead.
+ * Generally, use **bpf_probe_read_user_str**\ () or
+ * **bpf_probe_read_kernel_str**\ () instead.
* Return
* On success, the strictly positive length of the string,
* including the trailing NUL character. On error, a negative
@@ -1530,24 +2566,34 @@ union bpf_attr {
* networking traffic statistics as it provides a global socket
* identifier that can be assumed unique.
* Return
- * A 8-byte long non-decreasing number on success, or 0 if the
- * socket field is missing inside *skb*.
+ * A 8-byte long unique number on success, or 0 if the socket
+ * field is missing inside *skb*.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
* Description
* Equivalent to bpf_get_socket_cookie() helper that accepts
* *skb*, but gets socket from **struct bpf_sock_addr** context.
* Return
- * A 8-byte long non-decreasing number.
+ * A 8-byte long unique number.
*
* u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
* Description
- * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
* *skb*, but gets socket from **struct bpf_sock_ops** context.
* Return
- * A 8-byte long non-decreasing number.
+ * A 8-byte long unique number.
+ *
+ * u64 bpf_get_socket_cookie(struct sock *sk)
+ * Description
+ * Equivalent to **bpf_get_socket_cookie**\ () helper that accepts
+ * *sk*, but gets socket from a BTF **struct sock**. This helper
+ * also works for sleepable programs.
+ * Return
+ * A 8-byte long unique number or 0 if *sk* is NULL.
*
* u32 bpf_get_socket_uid(struct sk_buff *skb)
+ * Description
+ * Get the owner UID of the socked associated to *skb*.
* Return
* The owner UID of the socket associated to *skb*. If the socket
* is **NULL**, or if it is not a full socket (i.e. if it is a
@@ -1555,14 +2601,14 @@ union bpf_attr {
* is returned (note that **overflowuid** might also be the actual
* UID value for the socket).
*
- * u32 bpf_set_hash(struct sk_buff *skb, u32 hash)
+ * long bpf_set_hash(struct sk_buff *skb, u32 hash)
* Description
* Set the full hash for *skb* (set the field *skb*\ **->hash**)
* to value *hash*.
* Return
* 0
*
- * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **setsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1570,32 +2616,55 @@ union bpf_attr {
* must be specified, see **setsockopt(2)** for more information.
* The option value of length *optlen* is pointed by *optval*.
*
+ * *bpf_socket* should be one of the following:
+ *
+ * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
+ * and **BPF_CGROUP_INET6_CONNECT**.
+ *
* This helper actually implements a subset of **setsockopt()**.
* It supports the following *level*\ s:
*
* * **SOL_SOCKET**, which supports the following *optname*\ s:
* **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**,
- * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**.
+ * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**,
+ * **SO_BINDTODEVICE**, **SO_KEEPALIVE**, **SO_REUSEADDR**,
+ * **SO_REUSEPORT**, **SO_BINDTOIFINDEX**, **SO_TXREHASH**.
* * **IPPROTO_TCP**, which supports the following *optname*\ s:
* **TCP_CONGESTION**, **TCP_BPF_IW**,
- * **TCP_BPF_SNDCWND_CLAMP**.
+ * **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
+ * **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
+ * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**,
+ * **TCP_NODELAY**, **TCP_MAXSEG**, **TCP_WINDOW_CLAMP**,
+ * **TCP_THIN_LINEAR_TIMEOUTS**, **TCP_BPF_DELACK_MAX**,
+ * **TCP_BPF_RTO_MIN**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
- * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ * * **IPPROTO_IPV6**, which supports the following *optname*\ s:
+ * **IPV6_TCLASS**, **IPV6_AUTOFLOWLABEL**.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
+ * long bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags)
* Description
* Grow or shrink the room for data in the packet associated to
* *skb* by *len_diff*, and according to the selected *mode*.
*
+ * By default, the helper will reset any offloaded checksum
+ * indicator of the skb to CHECKSUM_NONE. This can be avoided
+ * by the following flag:
+ *
+ * * **BPF_F_ADJ_ROOM_NO_CSUM_RESET**: Do not reset offloaded
+ * checksum data of the skb to CHECKSUM_NONE.
+ *
* There are two supported modes at this time:
*
* * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer
- * (room space is added or removed below the layer 2 header).
+ * (room space is added or removed between the layer 2 and
+ * layer 3 headers).
*
* * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer
- * (room space is added or removed below the layer 3 header).
+ * (room space is added or removed between the layer 3 and
+ * layer 4 headers).
*
* The following flags are supported at this time:
*
@@ -1615,6 +2684,15 @@ union bpf_attr {
* Use with ENCAP_L3/L4 flags to further specify the tunnel
* type; *len* is the length of the inner MAC header.
*
+ * * **BPF_F_ADJ_ROOM_ENCAP_L2_ETH**:
+ * Use with BPF_F_ADJ_ROOM_ENCAP_L2 flag to further specify the
+ * L2 type as Ethernet.
+ *
+ * * **BPF_F_ADJ_ROOM_DECAP_L3_IPV4**,
+ * **BPF_F_ADJ_ROOM_DECAP_L3_IPV6**:
+ * Indicate the new IP header version after decapsulating the outer
+ * IP header. Used when the inner and outer IP versions are different.
+ *
* A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
* previously done by the verifier are invalidated and must be
@@ -1623,7 +2701,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_redirect_map(struct bpf_map *map, u64 key, u64 flags)
* Description
* Redirect the packet to the endpoint referenced by *map* at
* index *key*. Depending on its type, this *map* can contain
@@ -1634,17 +2712,21 @@ union bpf_attr {
*
* The lower two bits of *flags* are used as the return code if
* the map lookup fails. This is so that the return value can be
- * one of the XDP program return codes up to XDP_TX, as chosen by
- * the caller. Any higher bits in the *flags* argument must be
- * unset.
+ * one of the XDP program return codes up to **XDP_TX**, as chosen
+ * by the caller. The higher bits of *flags* can be set to
+ * BPF_F_BROADCAST or BPF_F_EXCLUDE_INGRESS as defined below.
*
- * See also bpf_redirect(), which only supports redirecting to an
- * ifindex, but doesn't require a map to do so.
+ * With BPF_F_BROADCAST the packet will be broadcasted to all the
+ * interfaces in the map, with BPF_F_EXCLUDE_INGRESS the ingress
+ * interface will be excluded when do broadcasting.
+ *
+ * See also **bpf_redirect**\ (), which only supports redirecting
+ * to an ifindex, but doesn't require a map to do so.
* Return
* **XDP_REDIRECT** on success, or the value of the two lower bits
- * of the **flags* argument on error.
+ * of the *flags* argument on error.
*
- * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@@ -1655,7 +2737,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a *map* referencing sockets. The
* *skops* is used as a new value for the entry associated to
@@ -1674,7 +2756,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta)
* Description
* Adjust the address pointed by *xdp_md*\ **->data_meta** by
* *delta* (which can be positive or negative). Note that this
@@ -1703,7 +2785,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
* Read the value of a perf event counter, and store it into *buf*
* of size *buf_size*. This helper relies on a *map* of type
@@ -1747,15 +2829,15 @@ union bpf_attr {
* the time running for event since last normalization. The
* enabled and running times are accumulated since the perf event
* open. To achieve scaling factor between two invocations of an
- * eBPF program, users can can use CPU id as the key (which is
+ * eBPF program, users can use CPU id as the key (which is
* typical for perf array usage model) to remember the previous
* value and do the calculation inside the eBPF program.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
+ * long bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size)
* Description
- * For en eBPF program attached to a perf event, retrieve the
+ * For an eBPF program attached to a perf event, retrieve the
* value of the event counter associated to *ctx* and store it in
* the structure pointed by *buf* and of size *buf_size*. Enabled
* and running times are also stored in the structure (see
@@ -1764,7 +2846,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
+ * long bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **getsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1773,24 +2855,28 @@ union bpf_attr {
* The retrieved value is stored in the structure pointed by
* *opval* and of length *optlen*.
*
- * This helper actually implements a subset of **getsockopt()**.
- * It supports the following *level*\ s:
+ * *bpf_socket* should be one of the following:
*
- * * **IPPROTO_TCP**, which supports *optname*
- * **TCP_CONGESTION**.
- * * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
- * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
+ * * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
+ * * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
+ * and **BPF_CGROUP_INET6_CONNECT**.
+ *
+ * This helper actually implements a subset of **getsockopt()**.
+ * It supports the same set of *optname*\ s that is supported by
+ * the **bpf_setsockopt**\ () helper. The exceptions are
+ * **TCP_BPF_*** is **bpf_setsockopt**\ () only and
+ * **TCP_SAVED_SYN** is **bpf_getsockopt**\ () only.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_override_return(struct pt_regs *regs, u64 rc)
+ * long bpf_override_return(struct pt_regs *regs, u64 rc)
* Description
* Used for error injection, this helper uses kprobes to override
* the return value of the probed function, and to set it to *rc*.
* The first argument is the context *regs* on which the kprobe
* works.
*
- * This helper works by setting setting the PC (program counter)
+ * This helper works by setting the PC (program counter)
* to an override function which is run in place of the original
* probed function. This means the probed function is not run at
* all. The replacement function just returns with the required
@@ -1808,7 +2894,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
+ * long bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval)
* Description
* Attempt to set the value of the **bpf_sock_ops_cb_flags** field
* for the full TCP socket associated to *bpf_sock_ops* to
@@ -1852,7 +2938,7 @@ union bpf_attr {
* be set is returned (which comes down to 0 if all bits were set
* as required).
*
- * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
+ * long bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags)
* Description
* This helper is used in programs implementing policies at the
* socket level. If the message *msg* is allowed to pass (i.e. if
@@ -1866,7 +2952,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes)
* Description
* For socket policies, apply the verdict of the eBPF program to
* the next *bytes* (number of bytes) of message *msg*.
@@ -1900,7 +2986,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
+ * long bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes)
* Description
* For socket policies, prevent the execution of the verdict eBPF
* program for message *msg* until *bytes* (byte number) have been
@@ -1918,7 +3004,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
+ * long bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags)
* Description
* For socket policies, pull in non-linear data from user space
* for *msg* and set pointers *msg*\ **->data** and *msg*\
@@ -1949,7 +3035,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
+ * long bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len)
* Description
* Bind the socket associated to *ctx* to the address pointed by
* *addr*, of length *addr_len*. This allows for making outgoing
@@ -1959,18 +3045,19 @@ union bpf_attr {
*
* This helper works for IPv4 and IPv6, TCP and UDP sockets. The
* domain (*addr*\ **->sa_family**) must be **AF_INET** (or
- * **AF_INET6**). Looking for a free port to bind to can be
- * expensive, therefore binding to port is not permitted by the
- * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively)
- * must be set to zero.
+ * **AF_INET6**). It's advised to pass zero port (**sin_port**
+ * or **sin6_port**) which triggers IP_BIND_ADDRESS_NO_PORT-like
+ * behavior and lets the kernel efficiently pick up an unused
+ * port as long as 4-tuple is unique. Passing non-zero port might
+ * lead to degraded performance.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
+ * long bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta)
* Description
* Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is
- * only possible to shrink the packet as of this writing,
- * therefore *delta* must be a negative integer.
+ * possible to both shrink and grow the packet tail.
+ * Shrink done via *delta* being a negative integer.
*
* A call to this helper is susceptible to change the underlying
* packet buffer. Therefore, at load time, all checks on pointers
@@ -1980,7 +3067,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
+ * long bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags)
* Description
* Retrieve the XFRM state (IP transform framework, see also
* **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*.
@@ -1996,7 +3083,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
+ * long bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *ctx*, which is a pointer
@@ -2012,8 +3099,18 @@ union bpf_attr {
* **BPF_F_USER_STACK**
* Collect a user space stack instead of a kernel stack.
* **BPF_F_USER_BUILD_ID**
- * Collect buildid+offset instead of ips for user stack,
- * only valid if **BPF_F_USER_STACK** is also specified.
+ * Collect (build_id, file_offset) instead of ips for user
+ * stack, only valid if **BPF_F_USER_STACK** is also
+ * specified.
+ *
+ * *file_offset* is an offset relative to the beginning
+ * of the executable or shared object file backing the vma
+ * which the *ip* falls in. It is *not* an offset relative
+ * to that object's base address. Accordingly, it must be
+ * adjusted by adding (sh_addr - sh_offset), where
+ * sh_{addr,offset} correspond to the executable section
+ * containing *file_offset* in the object, for comparisons
+ * to symbols' st_value to be valid.
*
* **bpf_get_stack**\ () can collect up to
* **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
@@ -2026,10 +3123,10 @@ union bpf_attr {
*
* # sysctl kernel.perf_event_max_stack=<new value>
* Return
- * A non-negative value equal to or less than *size* on success,
- * or a negative error in case of failure.
+ * The non-negative copied *buf* length equal to or less than
+ * *size* on success, or a negative error in case of failure.
*
- * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * long bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -2051,7 +3148,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
+ * long bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags)
* Description
* Do FIB lookup in kernel tables using parameters in *params*.
* If lookup is successful and result shows packet is to be
@@ -2073,6 +3170,11 @@ union bpf_attr {
* **BPF_FIB_LOOKUP_OUTPUT**
* Perform lookup from an egress perspective (default is
* ingress).
+ * **BPF_FIB_LOOKUP_SKIP_NEIGH**
+ * Skip the neighbour table lookup. *params*->dmac
+ * and *params*->smac will not be set as output. A common
+ * use case is to call **bpf_redirect_neigh**\ () after
+ * doing **bpf_fib_lookup**\ ().
*
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
@@ -2082,7 +3184,10 @@ union bpf_attr {
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
* packet is not forwarded or needs assist from full stack
*
- * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
+ * If lookup fails with BPF_FIB_LKUP_RET_FRAG_NEEDED, then the MTU
+ * was exceeded and output params->mtu_result contains the MTU.
+ *
+ * long bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a sockhash *map* referencing sockets.
* The *skops* is used as a new value for the entry associated to
@@ -2101,7 +3206,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags)
* Description
* This helper is used in programs implementing policies at the
* socket level. If the message *msg* is allowed to pass (i.e. if
@@ -2115,11 +3220,11 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags)
* Description
* This helper is used in programs implementing policies at the
* skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
- * if the verdeict eBPF program returns **SK_PASS**), redirect it
+ * if the verdict eBPF program returns **SK_PASS**), redirect it
* to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
* egress interfaces can be used for redirection. The
@@ -2129,7 +3234,7 @@ union bpf_attr {
* Return
* **SK_PASS** on success, or **SK_DROP** on error.
*
- * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
+ * long bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
* Description
* Encapsulate the packet associated to *skb* within a Layer 3
* protocol header. This header is provided in the buffer at
@@ -2166,7 +3271,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
+ * long bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len)
* Description
* Store *len* bytes from address *from* into the packet
* associated to *skb*, at *offset*. Only the flags, tag and TLVs
@@ -2181,7 +3286,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
+ * long bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta)
* Description
* Adjust the size allocated to TLVs in the outermost IPv6
* Segment Routing Header contained in the packet associated to
@@ -2197,7 +3302,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
+ * long bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len)
* Description
* Apply an IPv6 Segment Routing action of type *action* to the
* packet associated to *skb*. Each action takes a parameter
@@ -2226,7 +3331,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_repeat(void *ctx)
+ * long bpf_rc_repeat(void *ctx)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded repeat key message. This delays
@@ -2245,7 +3350,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * long bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded key press with *scancode*,
@@ -2256,7 +3361,7 @@ union bpf_attr {
* **bpf_rc_keydown**\ () again with the same values, or calling
* **bpf_rc_repeat**\ ().
*
- * Some protocols include a toggle bit, in case the button was
+ * Some protocols include a toggle bit, in case the button was
* released and pressed again between consecutive scancodes.
*
* The *ctx* should point to the lirc sample as passed into
@@ -2288,6 +3393,9 @@ union bpf_attr {
* The id is returned or 0 in case the id could not be retrieved.
*
* u64 bpf_get_current_cgroup_id(void)
+ * Description
+ * Get the current cgroup id based on the cgroup within which
+ * the current task is running.
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
@@ -2305,15 +3413,15 @@ union bpf_attr {
* running simultaneously.
*
* A user should care about the synchronization by himself.
- * For example, by using the **BPF_STX_XADD** instruction to alter
+ * For example, by using the **BPF_ATOMIC** instructions to alter
* the shared data.
* Return
* A pointer to the local storage area.
*
- * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
+ * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
* Description
* Select a **SO_REUSEPORT** socket from a
- * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
+ * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
* It checks the selected socket is matching the incoming
* request in the socket buffer.
* Return
@@ -2355,7 +3463,7 @@ union bpf_attr {
* Look for an IPv6 socket.
*
* If the *netns* is a negative signed 32-bit integer, then the
- * socket lookup table in the netns associated with the *ctx* will
+ * socket lookup table in the netns associated with the *ctx*
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
@@ -2392,7 +3500,7 @@ union bpf_attr {
* Look for an IPv6 socket.
*
* If the *netns* is a negative signed 32-bit integer, then the
- * socket lookup table in the netns associated with the *ctx* will
+ * socket lookup table in the netns associated with the *ctx*
* will be used. For the TC hooks, this is the netns of the device
* in the skb. For socket hooks, this is the netns of the socket.
* If *netns* is any other signed 32-bit value greater than or
@@ -2411,7 +3519,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * int bpf_sk_release(struct bpf_sock *sock)
+ * long bpf_sk_release(void *sock)
* Description
* Release the reference held by *sock*. *sock* must be a
* non-**NULL** pointer that was returned from
@@ -2419,7 +3527,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ * long bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
* Description
* Push an element *value* in *map*. *flags* is one of:
*
@@ -2429,19 +3537,19 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+ * long bpf_map_pop_elem(struct bpf_map *map, void *value)
* Description
* Pop an element from *map*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+ * long bpf_map_peek_elem(struct bpf_map *map, void *value)
* Description
* Get an element from *map* without removing it.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* For socket policies, insert *len* bytes into *msg* at offset
* *start*.
@@ -2457,7 +3565,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
+ * long bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* Will remove *len* bytes from a *msg* starting at byte *start*.
* This may result in **ENOMEM** errors under certain situations if
@@ -2469,7 +3577,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+ * long bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
* Description
* This helper is used in programs implementing IR decoding, to
* report a successfully decoded pointer movement.
@@ -2483,7 +3591,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ * long bpf_spin_lock(struct bpf_spin_lock *lock)
* Description
* Acquire a spinlock represented by the pointer *lock*, which is
* stored as part of a value of a map. Taking the lock allows to
@@ -2531,7 +3639,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ * long bpf_spin_unlock(struct bpf_spin_lock *lock)
* Description
* Release the *lock* previously locked by a call to
* **bpf_spin_lock**\ (\ *lock*\ ).
@@ -2554,7 +3662,7 @@ union bpf_attr {
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
- * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
+ * long bpf_skb_ecn_set_ce(struct sk_buff *skb)
* Description
* Set ECN (Explicit Congestion Notification) field of IP header
* to **CE** (Congestion Encountered) if current value is **ECT**
@@ -2591,23 +3699,23 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in *sk*.
*
* *iph* points to the start of the IPv4 or IPv6 header, while
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
- * **sizeof**\ (**struct ip6hdr**).
+ * **sizeof**\ (**struct ipv6hdr**).
*
* *th* points to the start of the TCP header, while *th_len*
- * contains **sizeof**\ (**struct tcphdr**).
- *
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
* Return
* 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative
* error otherwise.
*
- * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
+ * long bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags)
* Description
* Get name of sysctl in /proc/sys/ and copy it into provided by
* program buffer *buf* of size *buf_len*.
@@ -2623,7 +3731,7 @@ union bpf_attr {
* **-E2BIG** if the buffer wasn't big enough (*buf* will contain
* truncated name in this case).
*
- * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
* Description
* Get current value of sysctl as it is presented in /proc/sys
* (incl. newline, etc), and copy it as a string into provided
@@ -2642,7 +3750,7 @@ union bpf_attr {
* **-EINVAL** if current value was unavailable, e.g. because
* sysctl is uninitialized and read returns -EIO for it.
*
- * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
+ * long bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len)
* Description
* Get new value being written by user space to sysctl (before
* the actual write happens) and copy it as a string into
@@ -2659,7 +3767,7 @@ union bpf_attr {
*
* **-EINVAL** if sysctl is being read.
*
- * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
+ * long bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len)
* Description
* Override new value being written by user space to sysctl with
* value provided by program in buffer *buf* of size *buf_len*.
@@ -2676,7 +3784,7 @@ union bpf_attr {
*
* **-EINVAL** if sysctl is being read.
*
- * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
+ * long bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res)
* Description
* Convert the initial part of the string from buffer *buf* of
* size *buf_len* to a long integer according to the given base
@@ -2700,7 +3808,7 @@ union bpf_attr {
*
* **-ERANGE** if resulting value was out of range.
*
- * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
+ * long bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res)
* Description
* Convert the initial part of the string from buffer *buf* of
* size *buf_len* to an unsigned long integer according to the
@@ -2723,7 +3831,7 @@ union bpf_attr {
*
* **-ERANGE** if resulting value was out of range.
*
- * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
+ * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
* Description
* Get a bpf-local-storage from a *sk*.
*
@@ -2739,6 +3847,9 @@ union bpf_attr {
* "type". The bpf-local-storage "type" (i.e. the *map*) is
* searched against all bpf-local-storages residing at *sk*.
*
+ * *sk* is a kernel **struct sock** pointer for LSM program.
+ * *sk* is a **struct bpf_sock** pointer for other program types.
+ *
* An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
* used such that a new bpf-local-storage will be
* created if one does not exist. *value* can be used
@@ -2751,15 +3862,16 @@ union bpf_attr {
* **NULL** if not found or there was an error in adding
* a new bpf-local-storage.
*
- * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
* Description
* Delete a bpf-local-storage from a *sk*.
* Return
* 0 on success.
*
* **-ENOENT** if the bpf-local-storage cannot be found.
+ * **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
*
- * int bpf_send_signal(u32 sig)
+ * long bpf_send_signal(u32 sig)
* Description
* Send signal *sig* to the process of the current task.
* The signal may be delivered to any of this process's threads.
@@ -2774,18 +3886,18 @@ union bpf_attr {
*
* **-EAGAIN** if bpf program can try again.
*
- * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Try to issue a SYN cookie for the packet with corresponding
* IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
*
* *iph* points to the start of the IPv4 or IPv6 header, while
* *iph_len* contains **sizeof**\ (**struct iphdr**) or
- * **sizeof**\ (**struct ip6hdr**).
+ * **sizeof**\ (**struct ipv6hdr**).
*
* *th* points to the start of the TCP header, while *th_len*
- * contains the length of the TCP header.
- *
+ * contains the length of the TCP header with options (at least
+ * **sizeof**\ (**struct tcphdr**)).
* Return
* On success, lower 32 bits hold the generated SYN cookie in
* followed by 16 bits which hold the MSS value for that cookie,
@@ -2801,7 +3913,7 @@ union bpf_attr {
*
* **-EPROTONOSUPPORT** IP packet version is not 4 or 6
*
- * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -2825,21 +3937,21 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Safely attempt to read *size* bytes from user space address
* *unsafe_ptr* and store the data in *dst*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Safely attempt to read *size* bytes from kernel space address
* *unsafe_ptr* and store the data in *dst*.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Copy a NUL terminated string from an unsafe user address
* *unsafe_ptr* to *dst*. The *size* should include the
@@ -2848,10 +3960,10 @@ union bpf_attr {
* string length is larger than *size*, just *size*-1 bytes are
* copied and the last byte is set to NUL.
*
- * On success, the length of the copied string is returned. This
- * makes this helper useful in tracing programs for reading
- * strings, and more importantly to get its length at runtime. See
- * the following snippet:
+ * On success, returns the number of bytes that were written,
+ * including the terminal NUL. This makes this helper useful in
+ * tracing programs for reading strings, and more importantly to
+ * get its length at runtime. See the following snippet:
*
* ::
*
@@ -2868,7 +3980,7 @@ union bpf_attr {
* // size, after checking its boundaries.
* }
*
- * In comparison, using **bpf_probe_read_user()** helper here
+ * In comparison, using **bpf_probe_read_user**\ () helper here
* instead to read the string would require to estimate the length
* at compile time, and would often result in copying more memory
* than necessary.
@@ -2879,26 +3991,26 @@ union bpf_attr {
* **->mm->env_start**: using this helper and the return value,
* one can quickly iterate at the right offset of the memory area.
* Return
- * On success, the strictly positive length of the string,
+ * On success, the strictly positive length of the output string,
* including the trailing NUL character. On error, a negative
* value.
*
- * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * long bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
* Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
- * to *dst*. Same semantics as with bpf_probe_read_user_str() apply.
+ * to *dst*. Same semantics as with **bpf_probe_read_user_str**\ () apply.
* Return
- * On success, the strictly positive length of the string, including
+ * On success, the strictly positive length of the string, including
* the trailing NUL character. On error, a negative value.
*
- * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
+ * long bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
* Description
- * Send out a tcp-ack. *tp* is the in-kernel struct tcp_sock.
+ * Send out a tcp-ack. *tp* is the in-kernel struct **tcp_sock**.
* *rcv_nxt* is the ack_seq to be sent out.
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_send_signal_thread(u32 sig)
+ * long bpf_send_signal_thread(u32 sig)
* Description
* Send signal *sig* to the thread corresponding to the current task.
* Return
@@ -2918,38 +4030,38 @@ union bpf_attr {
* Return
* The 64 bit jiffies
*
- * int bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
+ * long bpf_read_branch_records(struct bpf_perf_event_data *ctx, void *buf, u32 size, u64 flags)
* Description
* For an eBPF program attached to a perf event, retrieve the
- * branch records (struct perf_branch_entry) associated to *ctx*
- * and store it in the buffer pointed by *buf* up to size
+ * branch records (**struct perf_branch_entry**) associated to *ctx*
+ * and store it in the buffer pointed by *buf* up to size
* *size* bytes.
* Return
* On success, number of bytes written to *buf*. On error, a
* negative value.
*
* The *flags* can be set to **BPF_F_GET_BRANCH_RECORDS_SIZE** to
- * instead return the number of bytes required to store all the
+ * instead return the number of bytes required to store all the
* branch entries. If this flag is set, *buf* may be NULL.
*
* **-EINVAL** if arguments invalid or **size** not a multiple
- * of sizeof(struct perf_branch_entry).
+ * of **sizeof**\ (**struct perf_branch_entry**\ ).
*
* **-ENOENT** if architecture does not support branch records.
*
- * int bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
+ * long bpf_get_ns_current_pid_tgid(u64 dev, u64 ino, struct bpf_pidns_info *nsdata, u32 size)
* Description
* Returns 0 on success, values for *pid* and *tgid* as seen from the current
* *namespace* will be returned in *nsdata*.
- *
- * On failure, the returned value is one of the following:
+ * Return
+ * 0 on success, or one of the following in case of failure:
*
* **-EINVAL** if dev and inum supplied don't match dev_t and inode number
* with nsfs of current task, or if dev conversion to dev_t lost high bits.
*
* **-ENOENT** if pidns does not exists for the current task.
*
- * int bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * long bpf_xdp_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -2981,8 +4093,8 @@ union bpf_attr {
* a global identifier that can be assumed unique. If *ctx* is
* NULL, then the helper returns the cookie for the initial
* network namespace. The cookie itself is very similar to that
- * of bpf_get_socket_cookie() helper, but for network namespaces
- * instead of sockets.
+ * of **bpf_get_socket_cookie**\ () helper, but for network
+ * namespaces instead of sockets.
* Return
* A 8-byte long opaque number.
*
@@ -3004,8 +4116,12 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * int bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
+ * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
* Description
+ * Helper is overloaded depending on BPF program type. This
+ * description applies to **BPF_PROG_TYPE_SCHED_CLS** and
+ * **BPF_PROG_TYPE_SCHED_ACT** programs.
+ *
* Assign the *sk* to the *skb*. When combined with appropriate
* routing configuration to receive the packet towards the socket,
* will cause *skb* to be delivered to the specified socket.
@@ -3017,148 +4133,1659 @@ union bpf_attr {
*
* The *flags* argument must be zero.
* Return
+ * 0 on success, or a negative error in case of failure:
+ *
+ * **-EINVAL** if specified *flags* are not supported.
+ *
+ * **-ENOENT** if the socket is unavailable for assignment.
+ *
+ * **-ENETUNREACH** if the socket is unreachable (wrong netns).
+ *
+ * **-EOPNOTSUPP** if the operation is not supported, for example
+ * a call from outside of TC ingress.
+ *
+ * **-ESOCKTNOSUPPORT** if the socket type is not supported
+ * (reuseport).
+ *
+ * long bpf_sk_assign(struct bpf_sk_lookup *ctx, struct bpf_sock *sk, u64 flags)
+ * Description
+ * Helper is overloaded depending on BPF program type. This
+ * description applies to **BPF_PROG_TYPE_SK_LOOKUP** programs.
+ *
+ * Select the *sk* as a result of a socket lookup.
+ *
+ * For the operation to succeed passed socket must be compatible
+ * with the packet description provided by the *ctx* object.
+ *
+ * L4 protocol (**IPPROTO_TCP** or **IPPROTO_UDP**) must
+ * be an exact match. While IP family (**AF_INET** or
+ * **AF_INET6**) must be compatible, that is IPv6 sockets
+ * that are not v6-only can be selected for IPv4 packets.
+ *
+ * Only TCP listeners and UDP unconnected sockets can be
+ * selected. *sk* can also be NULL to reset any previous
+ * selection.
+ *
+ * *flags* argument can combination of following values:
+ *
+ * * **BPF_SK_LOOKUP_F_REPLACE** to override the previous
+ * socket selection, potentially done by a BPF program
+ * that ran before us.
+ *
+ * * **BPF_SK_LOOKUP_F_NO_REUSEPORT** to skip
+ * load-balancing within reuseport group for the socket
+ * being selected.
+ *
+ * On success *ctx->sk* will point to the selected socket.
+ *
+ * Return
* 0 on success, or a negative errno in case of failure.
*
- * * **-EINVAL** Unsupported flags specified.
- * * **-ENOENT** Socket is unavailable for assignment.
- * * **-ENETUNREACH** Socket is unreachable (wrong netns).
- * * **-EOPNOTSUPP** Unsupported operation, for example a
- * call from outside of TC ingress.
- * * **-ESOCKTNOSUPPORT** Socket type not supported (reuseport).
+ * * **-EAFNOSUPPORT** if socket family (*sk->family*) is
+ * not compatible with packet family (*ctx->family*).
+ *
+ * * **-EEXIST** if socket has been already selected,
+ * potentially by another program, and
+ * **BPF_SK_LOOKUP_F_REPLACE** flag was not specified.
+ *
+ * * **-EINVAL** if unsupported flags were specified.
+ *
+ * * **-EPROTOTYPE** if socket L4 protocol
+ * (*sk->protocol*) doesn't match packet protocol
+ * (*ctx->protocol*).
+ *
+ * * **-ESOCKTNOSUPPORT** if socket is not in allowed
+ * state (TCP listening or UDP unconnected).
+ *
+ * u64 bpf_ktime_get_boot_ns(void)
+ * Description
+ * Return the time elapsed since system boot, in nanoseconds.
+ * Does include the time the system was suspended.
+ * See: **clock_gettime**\ (**CLOCK_BOOTTIME**)
+ * Return
+ * Current *ktime*.
+ *
+ * long bpf_seq_printf(struct seq_file *m, const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * Description
+ * **bpf_seq_printf**\ () uses seq_file **seq_printf**\ () to print
+ * out the format string.
+ * The *m* represents the seq_file. The *fmt* and *fmt_size* are for
+ * the format string itself. The *data* and *data_len* are format string
+ * arguments. The *data* are a **u64** array and corresponding format string
+ * values are stored in the array. For strings and pointers where pointees
+ * are accessed, only the pointer values are stored in the *data* array.
+ * The *data_len* is the size of *data* in bytes - must be a multiple of 8.
+ *
+ * Formats **%s**, **%p{i,I}{4,6}** requires to read kernel memory.
+ * Reading kernel memory may fail due to either invalid address or
+ * valid address but requiring a major memory fault. If reading kernel memory
+ * fails, the string for **%s** will be an empty string, and the ip
+ * address for **%p{i,I}{4,6}** will be 0. Not returning error to
+ * bpf program is consistent with what **bpf_trace_printk**\ () does for now.
+ * Return
+ * 0 on success, or a negative error in case of failure:
+ *
+ * **-EBUSY** if per-CPU memory copy buffer is busy, can try again
+ * by returning 1 from bpf program.
+ *
+ * **-EINVAL** if arguments are invalid, or if *fmt* is invalid/unsupported.
+ *
+ * **-E2BIG** if *fmt* contains too many format specifiers.
+ *
+ * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
+ *
+ * long bpf_seq_write(struct seq_file *m, const void *data, u32 len)
+ * Description
+ * **bpf_seq_write**\ () uses seq_file **seq_write**\ () to write the data.
+ * The *m* represents the seq_file. The *data* and *len* represent the
+ * data to write in bytes.
+ * Return
+ * 0 on success, or a negative error in case of failure:
+ *
+ * **-EOVERFLOW** if an overflow happened: The same object will be tried again.
+ *
+ * u64 bpf_sk_cgroup_id(void *sk)
+ * Description
+ * Return the cgroup v2 id of the socket *sk*.
+ *
+ * *sk* must be a non-**NULL** pointer to a socket, e.g. one
+ * returned from **bpf_sk_lookup_xxx**\ (),
+ * **bpf_sk_fullsock**\ (), etc. The format of returned id is
+ * same as in **bpf_skb_cgroup_id**\ ().
+ *
+ * This helper is available only if the kernel was compiled with
+ * the **CONFIG_SOCK_CGROUP_DATA** configuration option.
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *sk* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *sk*, then return value will be same as that
+ * of **bpf_sk_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *sk*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_sk_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * long bpf_ringbuf_output(void *ringbuf, void *data, u64 size, u64 flags)
+ * Description
+ * Copy *size* bytes from *data* into a ring buffer *ringbuf*.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * An adaptive notification is a notification sent whenever the user-space
+ * process has caught up and consumed all available payloads. In case the user-space
+ * process is still processing a previous payload, then no notification is needed
+ * as it will process the newly added payload automatically.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * void *bpf_ringbuf_reserve(void *ringbuf, u64 size, u64 flags)
+ * Description
+ * Reserve *size* bytes of payload in a ring buffer *ringbuf*.
+ * *flags* must be 0.
+ * Return
+ * Valid pointer with *size* bytes of memory available; NULL,
+ * otherwise.
+ *
+ * void bpf_ringbuf_submit(void *data, u64 flags)
+ * Description
+ * Submit reserved ring buffer sample, pointed to by *data*.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * void bpf_ringbuf_discard(void *data, u64 flags)
+ * Description
+ * Discard reserved ring buffer sample, pointed to by *data*.
+ * If **BPF_RB_NO_WAKEUP** is specified in *flags*, no notification
+ * of new data availability is sent.
+ * If **BPF_RB_FORCE_WAKEUP** is specified in *flags*, notification
+ * of new data availability is sent unconditionally.
+ * If **0** is specified in *flags*, an adaptive notification
+ * of new data availability is sent.
+ *
+ * See 'bpf_ringbuf_output()' for the definition of adaptive notification.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * u64 bpf_ringbuf_query(void *ringbuf, u64 flags)
+ * Description
+ * Query various characteristics of provided ring buffer. What
+ * exactly is queries is determined by *flags*:
+ *
+ * * **BPF_RB_AVAIL_DATA**: Amount of data not yet consumed.
+ * * **BPF_RB_RING_SIZE**: The size of ring buffer.
+ * * **BPF_RB_CONS_POS**: Consumer position (can wrap around).
+ * * **BPF_RB_PROD_POS**: Producer(s) position (can wrap around).
+ *
+ * Data returned is just a momentary snapshot of actual values
+ * and could be inaccurate, so this facility should be used to
+ * power heuristics and for reporting, not to make 100% correct
+ * calculation.
+ * Return
+ * Requested value, or 0, if *flags* are not recognized.
+ *
+ * long bpf_csum_level(struct sk_buff *skb, u64 level)
+ * Description
+ * Change the skbs checksum level by one layer up or down, or
+ * reset it entirely to none in order to have the stack perform
+ * checksum validation. The level is applicable to the following
+ * protocols: TCP, UDP, GRE, SCTP, FCOE. For example, a decap of
+ * | ETH | IP | UDP | GUE | IP | TCP | into | ETH | IP | TCP |
+ * through **bpf_skb_adjust_room**\ () helper with passing in
+ * **BPF_F_ADJ_ROOM_NO_CSUM_RESET** flag would require one call
+ * to **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_DEC** since
+ * the UDP header is removed. Similarly, an encap of the latter
+ * into the former could be accompanied by a helper call to
+ * **bpf_csum_level**\ () with **BPF_CSUM_LEVEL_INC** if the
+ * skb is still intended to be processed in higher layers of the
+ * stack instead of just egressing at tc.
+ *
+ * There are three supported level settings at this time:
+ *
+ * * **BPF_CSUM_LEVEL_INC**: Increases skb->csum_level for skbs
+ * with CHECKSUM_UNNECESSARY.
+ * * **BPF_CSUM_LEVEL_DEC**: Decreases skb->csum_level for skbs
+ * with CHECKSUM_UNNECESSARY.
+ * * **BPF_CSUM_LEVEL_RESET**: Resets skb->csum_level to 0 and
+ * sets CHECKSUM_NONE to force checksum validation by the stack.
+ * * **BPF_CSUM_LEVEL_QUERY**: No-op, returns the current
+ * skb->csum_level.
+ * Return
+ * 0 on success, or a negative error in case of failure. In the
+ * case of **BPF_CSUM_LEVEL_QUERY**, the current skb->csum_level
+ * is returned or the error code -EACCES in case the skb is not
+ * subject to CHECKSUM_UNNECESSARY.
+ *
+ * struct tcp6_sock *bpf_skc_to_tcp6_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
+ * Description
+ * Return a user or a kernel stack in bpf program provided buffer.
+ * To achieve this, the helper needs *task*, which is a valid
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
+ *
+ * The last argument, *flags*, holds the number of stack frames to
+ * skip (from 0 to 255), masked with
+ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set
+ * the following flags:
+ *
+ * **BPF_F_USER_STACK**
+ * Collect a user space stack instead of a kernel stack.
+ * **BPF_F_USER_BUILD_ID**
+ * Collect buildid+offset instead of ips for user stack,
+ * only valid if **BPF_F_USER_STACK** is also specified.
+ *
+ * **bpf_get_task_stack**\ () can collect up to
+ * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject
+ * to sufficient large buffer size. Note that
+ * this limit can be controlled with the **sysctl** program, and
+ * that it should be manually increased in order to profile long
+ * user stacks (such as stacks for Java programs). To do so, use:
+ *
+ * ::
+ *
+ * # sysctl kernel.perf_event_max_stack=<new value>
+ * Return
+ * The non-negative copied *buf* length equal to or less than
+ * *size* on success, or a negative error in case of failure.
+ *
+ * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
+ * Description
+ * Load header option. Support reading a particular TCP header
+ * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
+ *
+ * If *flags* is 0, it will search the option from the
+ * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
+ * has details on what skb_data contains under different
+ * *skops*\ **->op**.
+ *
+ * The first byte of the *searchby_res* specifies the
+ * kind that it wants to search.
+ *
+ * If the searching kind is an experimental kind
+ * (i.e. 253 or 254 according to RFC6994). It also
+ * needs to specify the "magic" which is either
+ * 2 bytes or 4 bytes. It then also needs to
+ * specify the size of the magic by using
+ * the 2nd byte which is "kind-length" of a TCP
+ * header option and the "kind-length" also
+ * includes the first 2 bytes "kind" and "kind-length"
+ * itself as a normal TCP header option also does.
+ *
+ * For example, to search experimental kind 254 with
+ * 2 byte magic 0xeB9F, the searchby_res should be
+ * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
+ *
+ * To search for the standard window scale option (3),
+ * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
+ * Note, kind-length must be 0 for regular option.
+ *
+ * Searching for No-Op (0) and End-of-Option-List (1) are
+ * not supported.
+ *
+ * *len* must be at least 2 bytes which is the minimal size
+ * of a header option.
+ *
+ * Supported flags:
+ *
+ * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
+ * saved_syn packet or the just-received syn packet.
+ *
+ * Return
+ * > 0 when found, the header option is copied to *searchby_res*.
+ * The return value is the total length copied. On failure, a
+ * negative error code is returned:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOMSG** if the option is not found.
+ *
+ * **-ENOENT** if no syn packet is available when
+ * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
+ *
+ * **-ENOSPC** if there is not enough space. Only *len* number of
+ * bytes are copied.
+ *
+ * **-EFAULT** on failure to parse the header options in the
+ * packet.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
+ * Description
+ * Store header option. The data will be copied
+ * from buffer *from* with length *len* to the TCP header.
+ *
+ * The buffer *from* should have the whole option that
+ * includes the kind, kind-length, and the actual
+ * option data. The *len* must be at least kind-length
+ * long. The kind-length does not have to be 4 byte
+ * aligned. The kernel will take care of the padding
+ * and setting the 4 bytes aligned value to th->doff.
+ *
+ * This helper will check for duplicated option
+ * by searching the same option in the outgoing skb.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** If param is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ * Nothing has been written
+ *
+ * **-EEXIST** if the option already exists.
+ *
+ * **-EFAULT** on failure to parse the existing header options.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
+ * Description
+ * Reserve *len* bytes for the bpf header option. The
+ * space will be used by **bpf_store_hdr_opt**\ () later in
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * If **bpf_reserve_hdr_opt**\ () is called multiple times,
+ * the total number of bytes will be reserved.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from an *inode*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *inode* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
+ * helper enforces the key must be an inode and the map must also
+ * be a **BPF_MAP_TYPE_INODE_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *inode* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *inode*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
+ * Description
+ * Delete a bpf_local_storage from an *inode*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * long bpf_d_path(struct path *path, char *buf, u32 sz)
+ * Description
+ * Return full path for given **struct path** object, which
+ * needs to be the kernel BTF *path* object. The path is
+ * returned in the provided buffer *buf* of size *sz* and
+ * is zero terminated.
+ *
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
+ * Description
+ * Read *size* bytes from user space address *user_ptr* and store
+ * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
+ * Description
+ * Use BTF to store a string representation of *ptr*->ptr in *str*,
+ * using *ptr*->type_id. This value should specify the type
+ * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
+ * can be used to look up vmlinux BTF type ids. Traversing the
+ * data structure using BTF, the type information and values are
+ * stored in the first *str_size* - 1 bytes of *str*. Safe copy of
+ * the pointer data is carried out to avoid kernel crashes during
+ * operation. Smaller types can use string space on the stack;
+ * larger programs can use map data to store the string
+ * representation.
+ *
+ * The string can be subsequently shared with userspace via
+ * bpf_perf_event_output() or ring buffer interfaces.
+ * bpf_trace_printk() is to be avoided as it places too small
+ * a limit on string size to be useful.
+ *
+ * *flags* is a combination of
+ *
+ * **BTF_F_COMPACT**
+ * no formatting around type information
+ * **BTF_F_NONAME**
+ * no struct/union member names/types
+ * **BTF_F_PTR_RAW**
+ * show raw (unobfuscated) pointer values;
+ * equivalent to printk specifier %px.
+ * **BTF_F_ZERO**
+ * show zero-valued struct/union members; they
+ * are not displayed by default
+ *
+ * Return
+ * The number of bytes that were written (or would have been
+ * written if output had to be truncated due to string size),
+ * or a negative error in cases of failure.
+ *
+ * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
+ * Description
+ * Use BTF to write to seq_write a string representation of
+ * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
+ * *flags* are identical to those used for bpf_snprintf_btf.
+ * Return
+ * 0 on success or a negative error in case of failure.
+ *
+ * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
+ * Description
+ * See **bpf_get_cgroup_classid**\ () for the main description.
+ * This helper differs from **bpf_get_cgroup_classid**\ () in that
+ * the cgroup v1 net_cls class is retrieved only from the *skb*'s
+ * associated socket instead of the current process.
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * long bpf_redirect_neigh(u32 ifindex, struct bpf_redir_neigh *params, int plen, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*
+ * and fill in L2 addresses from neighboring subsystem. This helper
+ * is somewhat similar to **bpf_redirect**\ (), except that it
+ * populates L2 addresses as well, meaning, internally, the helper
+ * relies on the neighbor lookup for the L2 address of the nexthop.
+ *
+ * The helper will perform a FIB lookup based on the skb's
+ * networking header to get the address of the next hop, unless
+ * this is supplied by the caller in the *params* argument. The
+ * *plen* argument indicates the len of *params* and should be set
+ * to 0 if *params* is NULL.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types, and enabled
+ * for IPv4 and IPv6 protocols.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ *
+ * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on *cpu*. A ksym is an
+ * extern variable decorated with '__ksym'. For ksym, there is a
+ * global var (either static or global) defined of the same name
+ * in the kernel. The ksym is percpu if the global var is percpu.
+ * The returned pointer points to the global percpu var on *cpu*.
+ *
+ * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
+ * kernel, except that bpf_per_cpu_ptr() may return NULL. This
+ * happens if *cpu* is larger than nr_cpu_ids. The caller of
+ * bpf_per_cpu_ptr() must check the returned value.
+ * Return
+ * A pointer pointing to the kernel percpu variable on *cpu*, or
+ * NULL, if *cpu* is invalid.
+ *
+ * void *bpf_this_cpu_ptr(const void *percpu_ptr)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on this cpu. See the
+ * description of 'ksym' in **bpf_per_cpu_ptr**\ ().
+ *
+ * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
+ * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
+ * never return NULL.
+ * Return
+ * A pointer pointing to the kernel percpu variable on this cpu.
+ *
+ * long bpf_redirect_peer(u32 ifindex, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_redirect**\ (), except
+ * that the redirection happens to the *ifindex*' peer device and
+ * the netns switch takes place from ingress to ingress without
+ * going through the CPU's backlog queue.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types at the ingress
+ * hook and for veth device types. The peer device must reside in a
+ * different network namespace.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ *
+ * void *bpf_task_storage_get(struct bpf_map *map, struct task_struct *task, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from the *task*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *task* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *task*) except this
+ * helper enforces the key must be a task_struct and the map must also
+ * be a **BPF_MAP_TYPE_TASK_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *task* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *task*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * long bpf_task_storage_delete(struct bpf_map *map, struct task_struct *task)
+ * Description
+ * Delete a bpf_local_storage from a *task*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * struct task_struct *bpf_get_current_task_btf(void)
+ * Description
+ * Return a BTF pointer to the "current" task.
+ * This pointer can also be used in helpers that accept an
+ * *ARG_PTR_TO_BTF_ID* of type *task_struct*.
+ * Return
+ * Pointer to the current task.
+ *
+ * long bpf_bprm_opts_set(struct linux_binprm *bprm, u64 flags)
+ * Description
+ * Set or clear certain options on *bprm*:
+ *
+ * **BPF_F_BPRM_SECUREEXEC** Set the secureexec bit
+ * which sets the **AT_SECURE** auxv for glibc. The bit
+ * is cleared if the flag is not specified.
+ * Return
+ * **-EINVAL** if invalid *flags* are passed, zero otherwise.
+ *
+ * u64 bpf_ktime_get_coarse_ns(void)
+ * Description
+ * Return a coarse-grained version of the time elapsed since
+ * system boot, in nanoseconds. Does not include time the system
+ * was suspended.
+ *
+ * See: **clock_gettime**\ (**CLOCK_MONOTONIC_COARSE**)
+ * Return
+ * Current *ktime*.
+ *
+ * long bpf_ima_inode_hash(struct inode *inode, void *dst, u32 size)
+ * Description
+ * Returns the stored IMA hash of the *inode* (if it's available).
+ * If the hash is larger than *size*, then only *size*
+ * bytes will be copied to *dst*
+ * Return
+ * The **hash_algo** is returned on success,
+ * **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
+ * invalid arguments are passed.
+ *
+ * struct socket *bpf_sock_from_file(struct file *file)
+ * Description
+ * If the given file represents a socket, returns the associated
+ * socket.
+ * Return
+ * A pointer to a struct socket on success or NULL if the file is
+ * not a socket.
+ *
+ * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
+ * Description
+ * Check packet size against exceeding MTU of net device (based
+ * on *ifindex*). This helper will likely be used in combination
+ * with helpers that adjust/change the packet size.
+ *
+ * The argument *len_diff* can be used for querying with a planned
+ * size change. This allows to check MTU prior to changing packet
+ * ctx. Providing a *len_diff* adjustment that is larger than the
+ * actual packet size (resulting in negative packet size) will in
+ * principle not exceed the MTU, which is why it is not considered
+ * a failure. Other BPF helpers are needed for performing the
+ * planned size change; therefore the responsibility for catching
+ * a negative packet size belongs in those helpers.
+ *
+ * Specifying *ifindex* zero means the MTU check is performed
+ * against the current net device. This is practical if this isn't
+ * used prior to redirect.
+ *
+ * On input *mtu_len* must be a valid pointer, else verifier will
+ * reject BPF program. If the value *mtu_len* is initialized to
+ * zero then the ctx packet size is use. When value *mtu_len* is
+ * provided as input this specify the L3 length that the MTU check
+ * is done against. Remember XDP and TC length operate at L2, but
+ * this value is L3 as this correlate to MTU and IP-header tot_len
+ * values which are L3 (similar behavior as bpf_fib_lookup).
+ *
+ * The Linux kernel route table can configure MTUs on a more
+ * specific per route level, which is not provided by this helper.
+ * For route level MTU checks use the **bpf_fib_lookup**\ ()
+ * helper.
+ *
+ * *ctx* is either **struct xdp_md** for XDP programs or
+ * **struct sk_buff** for tc cls_act programs.
+ *
+ * The *flags* argument can be a combination of one or more of the
+ * following values:
+ *
+ * **BPF_MTU_CHK_SEGS**
+ * This flag will only works for *ctx* **struct sk_buff**.
+ * If packet context contains extra packet segment buffers
+ * (often knows as GSO skb), then MTU check is harder to
+ * check at this point, because in transmit path it is
+ * possible for the skb packet to get re-segmented
+ * (depending on net device features). This could still be
+ * a MTU violation, so this flag enables performing MTU
+ * check against segments, with a different violation
+ * return code to tell it apart. Check cannot use len_diff.
+ *
+ * On return *mtu_len* pointer contains the MTU value of the net
+ * device. Remember the net device configured MTU is the L3 size,
+ * which is returned here and XDP and TC length operate at L2.
+ * Helper take this into account for you, but remember when using
+ * MTU value in your BPF-code.
+ *
+ * Return
+ * * 0 on success, and populate MTU value in *mtu_len* pointer.
+ *
+ * * < 0 if any input argument is invalid (*mtu_len* not updated)
+ *
+ * MTU violations return positive values, but also populate MTU
+ * value in *mtu_len* pointer, as this can be needed for
+ * implementing PMTU handing:
+ *
+ * * **BPF_MTU_CHK_RET_FRAG_NEEDED**
+ * * **BPF_MTU_CHK_RET_SEGS_TOOBIG**
+ *
+ * long bpf_for_each_map_elem(struct bpf_map *map, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * For each element in **map**, call **callback_fn** function with
+ * **map**, **callback_ctx** and other map-specific parameters.
+ * The **callback_fn** should be a static function and
+ * the **callback_ctx** should be a pointer to the stack.
+ * The **flags** is used to control certain aspects of the helper.
+ * Currently, the **flags** must be 0.
+ *
+ * The following are a list of supported map types and their
+ * respective expected callback signatures:
+ *
+ * BPF_MAP_TYPE_HASH, BPF_MAP_TYPE_PERCPU_HASH,
+ * BPF_MAP_TYPE_LRU_HASH, BPF_MAP_TYPE_LRU_PERCPU_HASH,
+ * BPF_MAP_TYPE_ARRAY, BPF_MAP_TYPE_PERCPU_ARRAY
+ *
+ * long (\*callback_fn)(struct bpf_map \*map, const void \*key, void \*value, void \*ctx);
+ *
+ * For per_cpu maps, the map_value is the value on the cpu where the
+ * bpf_prog is running.
+ *
+ * If **callback_fn** return 0, the helper will continue to the next
+ * element. If return value is 1, the helper will skip the rest of
+ * elements and return. Other return values are not used now.
+ *
+ * Return
+ * The number of traversed map elements for success, **-EINVAL** for
+ * invalid **flags**.
+ *
+ * long bpf_snprintf(char *str, u32 str_size, const char *fmt, u64 *data, u32 data_len)
+ * Description
+ * Outputs a string into the **str** buffer of size **str_size**
+ * based on a format string stored in a read-only map pointed by
+ * **fmt**.
+ *
+ * Each format specifier in **fmt** corresponds to one u64 element
+ * in the **data** array. For strings and pointers where pointees
+ * are accessed, only the pointer values are stored in the *data*
+ * array. The *data_len* is the size of *data* in bytes - must be
+ * a multiple of 8.
+ *
+ * Formats **%s** and **%p{i,I}{4,6}** require to read kernel
+ * memory. Reading kernel memory may fail due to either invalid
+ * address or valid address but requiring a major memory fault. If
+ * reading kernel memory fails, the string for **%s** will be an
+ * empty string, and the ip address for **%p{i,I}{4,6}** will be 0.
+ * Not returning error to bpf program is consistent with what
+ * **bpf_trace_printk**\ () does for now.
+ *
+ * Return
+ * The strictly positive length of the formatted string, including
+ * the trailing zero character. If the return value is greater than
+ * **str_size**, **str** contains a truncated string, guaranteed to
+ * be zero-terminated except when **str_size** is 0.
+ *
+ * Or **-EBUSY** if the per-CPU memory copy buffer is busy.
+ *
+ * long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
+ * Description
+ * Execute bpf syscall with given arguments.
+ * Return
+ * A syscall result.
+ *
+ * long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
+ * Description
+ * Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
+ * Return
+ * Returns btf_id and btf_obj_fd in lower and upper 32 bits.
+ *
+ * long bpf_sys_close(u32 fd)
+ * Description
+ * Execute close syscall for given FD.
+ * Return
+ * A syscall result.
+ *
+ * long bpf_timer_init(struct bpf_timer *timer, struct bpf_map *map, u64 flags)
+ * Description
+ * Initialize the timer.
+ * First 4 bits of *flags* specify clockid.
+ * Only CLOCK_MONOTONIC, CLOCK_REALTIME, CLOCK_BOOTTIME are allowed.
+ * All other bits of *flags* are reserved.
+ * The verifier will reject the program if *timer* is not from
+ * the same *map*.
+ * Return
+ * 0 on success.
+ * **-EBUSY** if *timer* is already initialized.
+ * **-EINVAL** if invalid *flags* are passed.
+ * **-EPERM** if *timer* is in a map that doesn't have any user references.
+ * The user space should either hold a file descriptor to a map with timers
+ * or pin such map in bpffs. When map is unpinned or file descriptor is
+ * closed all timers in the map will be cancelled and freed.
+ *
+ * long bpf_timer_set_callback(struct bpf_timer *timer, void *callback_fn)
+ * Description
+ * Configure the timer to call *callback_fn* static function.
+ * Return
+ * 0 on success.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
+ * **-EPERM** if *timer* is in a map that doesn't have any user references.
+ * The user space should either hold a file descriptor to a map with timers
+ * or pin such map in bpffs. When map is unpinned or file descriptor is
+ * closed all timers in the map will be cancelled and freed.
+ *
+ * long bpf_timer_start(struct bpf_timer *timer, u64 nsecs, u64 flags)
+ * Description
+ * Set timer expiration N nanoseconds from the current time. The
+ * configured callback will be invoked in soft irq context on some cpu
+ * and will not repeat unless another bpf_timer_start() is made.
+ * In such case the next invocation can migrate to a different cpu.
+ * Since struct bpf_timer is a field inside map element the map
+ * owns the timer. The bpf_timer_set_callback() will increment refcnt
+ * of BPF program to make sure that callback_fn code stays valid.
+ * When user space reference to a map reaches zero all timers
+ * in a map are cancelled and corresponding program's refcnts are
+ * decremented. This is done to make sure that Ctrl-C of a user
+ * process doesn't leave any timers running. If map is pinned in
+ * bpffs the callback_fn can re-arm itself indefinitely.
+ * bpf_map_update/delete_elem() helpers and user space sys_bpf commands
+ * cancel and free the timer in the given map element.
+ * The map can contain timers that invoke callback_fn-s from different
+ * programs. The same callback_fn can serve different timers from
+ * different maps if key/value layout matches across maps.
+ * Every bpf_timer_set_callback() can have different callback_fn.
+ *
+ * *flags* can be one of:
+ *
+ * **BPF_F_TIMER_ABS**
+ * Start the timer in absolute expire value instead of the
+ * default relative one.
+ *
+ * Return
+ * 0 on success.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier
+ * or invalid *flags* are passed.
+ *
+ * long bpf_timer_cancel(struct bpf_timer *timer)
+ * Description
+ * Cancel the timer and wait for callback_fn to finish if it was running.
+ * Return
+ * 0 if the timer was not active.
+ * 1 if the timer was active.
+ * **-EINVAL** if *timer* was not initialized with bpf_timer_init() earlier.
+ * **-EDEADLK** if callback_fn tried to call bpf_timer_cancel() on its
+ * own timer which would have led to a deadlock otherwise.
+ *
+ * u64 bpf_get_func_ip(void *ctx)
+ * Description
+ * Get address of the traced function (for tracing and kprobe programs).
+ * Return
+ * Address of the traced function.
+ * 0 for kprobes placed within the function (not at the entry).
+ *
+ * u64 bpf_get_attach_cookie(void *ctx)
+ * Description
+ * Get bpf_cookie value provided (optionally) during the program
+ * attachment. It might be different for each individual
+ * attachment, even if BPF program itself is the same.
+ * Expects BPF program context *ctx* as a first argument.
+ *
+ * Supported for the following program types:
+ * - kprobe/uprobe;
+ * - tracepoint;
+ * - perf_event.
+ * Return
+ * Value specified by user at BPF link creation/attachment time
+ * or 0, if it was not specified.
+ *
+ * long bpf_task_pt_regs(struct task_struct *task)
+ * Description
+ * Get the struct pt_regs associated with **task**.
+ * Return
+ * A pointer to struct pt_regs.
+ *
+ * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags)
+ * Description
+ * Get branch trace from hardware engines like Intel LBR. The
+ * hardware engine is stopped shortly after the helper is
+ * called. Therefore, the user need to filter branch entries
+ * based on the actual use case. To capture branch trace
+ * before the trigger point of the BPF program, the helper
+ * should be called at the beginning of the BPF program.
+ *
+ * The data is stored as struct perf_branch_entry into output
+ * buffer *entries*. *size* is the size of *entries* in bytes.
+ * *flags* is reserved for now and must be zero.
+ *
+ * Return
+ * On success, number of bytes written to *buf*. On error, a
+ * negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-ENOENT** if architecture does not support branch records.
+ *
+ * long bpf_trace_vprintk(const char *fmt, u32 fmt_size, const void *data, u32 data_len)
+ * Description
+ * Behaves like **bpf_trace_printk**\ () helper, but takes an array of u64
+ * to format and can handle more format args as a result.
+ *
+ * Arguments are to be used as in **bpf_seq_printf**\ () helper.
+ * Return
+ * The number of bytes written to the buffer, or a negative error
+ * in case of failure.
+ *
+ * struct unix_sock *bpf_skc_to_unix_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *unix_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_kallsyms_lookup_name(const char *name, int name_sz, int flags, u64 *res)
+ * Description
+ * Get the address of a kernel symbol, returned in *res*. *res* is
+ * set to 0 if the symbol is not found.
+ * Return
+ * On success, zero. On error, a negative value.
+ *
+ * **-EINVAL** if *flags* is not zero.
+ *
+ * **-EINVAL** if string *name* is not the same size as *name_sz*.
+ *
+ * **-ENOENT** if symbol is not found.
+ *
+ * **-EPERM** if caller does not have permission to obtain kernel address.
+ *
+ * long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * Find vma of *task* that contains *addr*, call *callback_fn*
+ * function with *task*, *vma*, and *callback_ctx*.
+ * The *callback_fn* should be a static function and
+ * the *callback_ctx* should be a pointer to the stack.
+ * The *flags* is used to control certain aspects of the helper.
+ * Currently, the *flags* must be 0.
+ *
+ * The expected callback signature is
+ *
+ * long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
+ *
+ * Return
+ * 0 on success.
+ * **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
+ * **-EBUSY** if failed to try lock mmap_lock.
+ * **-EINVAL** for invalid **flags**.
+ *
+ * long bpf_loop(u32 nr_loops, void *callback_fn, void *callback_ctx, u64 flags)
+ * Description
+ * For **nr_loops**, call **callback_fn** function
+ * with **callback_ctx** as the context parameter.
+ * The **callback_fn** should be a static function and
+ * the **callback_ctx** should be a pointer to the stack.
+ * The **flags** is used to control certain aspects of the helper.
+ * Currently, the **flags** must be 0. Currently, nr_loops is
+ * limited to 1 << 23 (~8 million) loops.
+ *
+ * long (\*callback_fn)(u32 index, void \*ctx);
+ *
+ * where **index** is the current index in the loop. The index
+ * is zero-indexed.
+ *
+ * If **callback_fn** returns 0, the helper will continue to the next
+ * loop. If return value is 1, the helper will skip the rest of
+ * the loops and return. Other return values are not used now,
+ * and will be rejected by the verifier.
+ *
+ * Return
+ * The number of loops performed, **-EINVAL** for invalid **flags**,
+ * **-E2BIG** if **nr_loops** exceeds the maximum number of loops.
+ *
+ * long bpf_strncmp(const char *s1, u32 s1_sz, const char *s2)
+ * Description
+ * Do strncmp() between **s1** and **s2**. **s1** doesn't need
+ * to be null-terminated and **s1_sz** is the maximum storage
+ * size of **s1**. **s2** must be a read-only string.
+ * Return
+ * An integer less than, equal to, or greater than zero
+ * if the first **s1_sz** bytes of **s1** is found to be
+ * less than, to match, or be greater than **s2**.
+ *
+ * long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
+ * Description
+ * Get **n**-th argument register (zero based) of the traced function (for tracing programs)
+ * returned in **value**.
+ *
+ * Return
+ * 0 on success.
+ * **-EINVAL** if n >= argument register count of traced function.
+ *
+ * long bpf_get_func_ret(void *ctx, u64 *value)
+ * Description
+ * Get return value of the traced function (for tracing programs)
+ * in **value**.
+ *
+ * Return
+ * 0 on success.
+ * **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
+ *
+ * long bpf_get_func_arg_cnt(void *ctx)
+ * Description
+ * Get number of registers of the traced function (for tracing programs) where
+ * function arguments are stored in these registers.
+ *
+ * Return
+ * The number of argument registers of the traced function.
+ *
+ * int bpf_get_retval(void)
+ * Description
+ * Get the BPF program's return value that will be returned to the upper layers.
+ *
+ * This helper is currently supported by cgroup programs and only by the hooks
+ * where BPF program's return value is returned to the userspace via errno.
+ * Return
+ * The BPF program's return value.
+ *
+ * int bpf_set_retval(int retval)
+ * Description
+ * Set the BPF program's return value that will be returned to the upper layers.
+ *
+ * This helper is currently supported by cgroup programs and only by the hooks
+ * where BPF program's return value is returned to the userspace via errno.
+ *
+ * Note that there is the following corner case where the program exports an error
+ * via bpf_set_retval but signals success via 'return 1':
+ *
+ * bpf_set_retval(-EPERM);
+ * return 1;
+ *
+ * In this case, the BPF program's return value will use helper's -EPERM. This
+ * still holds true for cgroup/bind{4,6} which supports extra 'return 3' success case.
+ *
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * u64 bpf_xdp_get_buff_len(struct xdp_buff *xdp_md)
+ * Description
+ * Get the total size of a given xdp buff (linear and paged area)
+ * Return
+ * The total size of a given xdp buffer.
+ *
+ * long bpf_xdp_load_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
+ * Description
+ * This helper is provided as an easy way to load data from a
+ * xdp buffer. It can be used to load *len* bytes from *offset* from
+ * the frame associated to *xdp_md*, into the buffer pointed by
+ * *buf*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_xdp_store_bytes(struct xdp_buff *xdp_md, u32 offset, void *buf, u32 len)
+ * Description
+ * Store *len* bytes from buffer *buf* into the frame
+ * associated to *xdp_md*, at *offset*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_copy_from_user_task(void *dst, u32 size, const void *user_ptr, struct task_struct *tsk, u64 flags)
+ * Description
+ * Read *size* bytes from user space address *user_ptr* in *tsk*'s
+ * address space, and stores the data in *dst*. *flags* is not
+ * used yet and is provided for future extensibility. This helper
+ * can only be used by sleepable programs.
+ * Return
+ * 0 on success, or a negative error in case of failure. On error
+ * *dst* buffer is zeroed out.
+ *
+ * long bpf_skb_set_tstamp(struct sk_buff *skb, u64 tstamp, u32 tstamp_type)
+ * Description
+ * Change the __sk_buff->tstamp_type to *tstamp_type*
+ * and set *tstamp* to the __sk_buff->tstamp together.
+ *
+ * If there is no need to change the __sk_buff->tstamp_type,
+ * the tstamp value can be directly written to __sk_buff->tstamp
+ * instead.
+ *
+ * BPF_SKB_TSTAMP_DELIVERY_MONO is the only tstamp that
+ * will be kept during bpf_redirect_*(). A non zero
+ * *tstamp* must be used with the BPF_SKB_TSTAMP_DELIVERY_MONO
+ * *tstamp_type*.
+ *
+ * A BPF_SKB_TSTAMP_UNSPEC *tstamp_type* can only be used
+ * with a zero *tstamp*.
+ *
+ * Only IPv4 and IPv6 skb->protocol are supported.
+ *
+ * This function is most useful when it needs to set a
+ * mono delivery time to __sk_buff->tstamp and then
+ * bpf_redirect_*() to the egress of an iface. For example,
+ * changing the (rcv) timestamp in __sk_buff->tstamp at
+ * ingress to a mono delivery time and then bpf_redirect_*()
+ * to sch_fq@phy-dev.
+ * Return
+ * 0 on success.
+ * **-EINVAL** for invalid input
+ * **-EOPNOTSUPP** for unsupported protocol
+ *
+ * long bpf_ima_file_hash(struct file *file, void *dst, u32 size)
+ * Description
+ * Returns a calculated IMA hash of the *file*.
+ * If the hash is larger than *size*, then only *size*
+ * bytes will be copied to *dst*
+ * Return
+ * The **hash_algo** is returned on success,
+ * **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
+ * invalid arguments are passed.
+ *
+ * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * Description
+ * Exchange kptr at pointer *map_value* with *ptr*, and return the
+ * old value. *ptr* can be NULL, otherwise it must be a referenced
+ * pointer which will be released when this helper is called.
+ * Return
+ * The old value of kptr (which can be NULL). The returned pointer
+ * if not NULL, is a reference which must be released using its
+ * corresponding release function, or moved into a BPF map before
+ * program exit.
+ *
+ * void *bpf_map_lookup_percpu_elem(struct bpf_map *map, const void *key, u32 cpu)
+ * Description
+ * Perform a lookup in *percpu map* for an entry associated to
+ * *key* on *cpu*.
+ * Return
+ * Map value associated to *key* on *cpu*, or **NULL** if no entry
+ * was found or *cpu* is invalid.
+ *
+ * struct mptcp_sock *bpf_skc_to_mptcp_sock(void *sk)
+ * Description
+ * Dynamically cast a *sk* pointer to a *mptcp_sock* pointer.
+ * Return
+ * *sk* if casting is valid, or **NULL** otherwise.
+ *
+ * long bpf_dynptr_from_mem(void *data, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Get a dynptr to local memory *data*.
+ *
+ * *data* must be a ptr to a map value.
+ * The maximum *size* supported is DYNPTR_MAX_SIZE.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
+ * -EINVAL if flags is not 0.
+ *
+ * long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
+ * Description
+ * Reserve *size* bytes of payload in a ring buffer *ringbuf*
+ * through the dynptr interface. *flags* must be 0.
+ *
+ * Please note that a corresponding bpf_ringbuf_submit_dynptr or
+ * bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
+ * reservation fails. This is enforced by the verifier.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Submit reserved ring buffer sample, pointed to by *data*,
+ * through the dynptr interface. This is a no-op if the dynptr is
+ * invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_submit'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
+ * Description
+ * Discard reserved ring buffer sample through the dynptr
+ * interface. This is a no-op if the dynptr is invalid/null.
+ *
+ * For more information on *flags*, please see
+ * 'bpf_ringbuf_discard'.
+ * Return
+ * Nothing. Always succeeds.
+ *
+ * long bpf_dynptr_read(void *dst, u32 len, const struct bpf_dynptr *src, u32 offset, u64 flags)
+ * Description
+ * Read *len* bytes from *src* into *dst*, starting from *offset*
+ * into *src*.
+ * *flags* is currently unused.
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *src*'s data, -EINVAL if *src* is an invalid dynptr or if
+ * *flags* is not 0.
+ *
+ * long bpf_dynptr_write(const struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)
+ * Description
+ * Write *len* bytes from *src* into *dst*, starting from *offset*
+ * into *dst*.
+ *
+ * *flags* must be 0 except for skb-type dynptrs.
+ *
+ * For skb-type dynptrs:
+ * * All data slices of the dynptr are automatically
+ * invalidated after **bpf_dynptr_write**\ (). This is
+ * because writing may pull the skb and change the
+ * underlying packet buffer.
+ *
+ * * For *flags*, please see the flags accepted by
+ * **bpf_skb_store_bytes**\ ().
+ * Return
+ * 0 on success, -E2BIG if *offset* + *len* exceeds the length
+ * of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst*
+ * is a read-only dynptr or if *flags* is not correct. For skb-type dynptrs,
+ * other errors correspond to errors returned by **bpf_skb_store_bytes**\ ().
+ *
+ * void *bpf_dynptr_data(const struct bpf_dynptr *ptr, u32 offset, u32 len)
+ * Description
+ * Get a pointer to the underlying dynptr data.
+ *
+ * *len* must be a statically known value. The returned data slice
+ * is invalidated whenever the dynptr is invalidated.
+ *
+ * skb and xdp type dynptrs may not use bpf_dynptr_data. They should
+ * instead use bpf_dynptr_slice and bpf_dynptr_slice_rdwr.
+ * Return
+ * Pointer to the underlying dynptr data, NULL if the dynptr is
+ * read-only, if the dynptr is invalid, or if the offset and length
+ * is out of bounds.
+ *
+ * s64 bpf_tcp_raw_gen_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th, u32 th_len)
+ * Description
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv4/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ * Return
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ *
+ * s64 bpf_tcp_raw_gen_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th, u32 th_len)
+ * Description
+ * Try to issue a SYN cookie for the packet with corresponding
+ * IPv6/TCP headers, *iph* and *th*, without depending on a
+ * listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the start of the TCP header, while *th_len*
+ * contains the length of the TCP header (at least
+ * **sizeof**\ (**struct tcphdr**)).
+ * Return
+ * On success, lower 32 bits hold the generated SYN cookie in
+ * followed by 16 bits which hold the MSS value for that cookie,
+ * and the top 16 bits are unused.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EINVAL** if *th_len* is invalid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ *
+ * long bpf_tcp_raw_check_syncookie_ipv4(struct iphdr *iph, struct tcphdr *th)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv4 header.
+ *
+ * *th* points to the TCP header.
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ *
+ * long bpf_tcp_raw_check_syncookie_ipv6(struct ipv6hdr *iph, struct tcphdr *th)
+ * Description
+ * Check whether *iph* and *th* contain a valid SYN cookie ACK
+ * without depending on a listening socket.
+ *
+ * *iph* points to the IPv6 header.
+ *
+ * *th* points to the TCP header.
+ * Return
+ * 0 if *iph* and *th* are a valid SYN cookie ACK.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EACCES** if the SYN cookie is not valid.
+ *
+ * **-EPROTONOSUPPORT** if CONFIG_IPV6 is not builtin.
+ *
+ * u64 bpf_ktime_get_tai_ns(void)
+ * Description
+ * A nonsettable system-wide clock derived from wall-clock time but
+ * ignoring leap seconds. This clock does not experience
+ * discontinuities and backwards jumps caused by NTP inserting leap
+ * seconds as CLOCK_REALTIME does.
+ *
+ * See: **clock_gettime**\ (**CLOCK_TAI**)
+ * Return
+ * Current *ktime*.
+ *
+ * long bpf_user_ringbuf_drain(struct bpf_map *map, void *callback_fn, void *ctx, u64 flags)
+ * Description
+ * Drain samples from the specified user ring buffer, and invoke
+ * the provided callback for each such sample:
+ *
+ * long (\*callback_fn)(const struct bpf_dynptr \*dynptr, void \*ctx);
+ *
+ * If **callback_fn** returns 0, the helper will continue to try
+ * and drain the next sample, up to a maximum of
+ * BPF_MAX_USER_RINGBUF_SAMPLES samples. If the return value is 1,
+ * the helper will skip the rest of the samples and return. Other
+ * return values are not used now, and will be rejected by the
+ * verifier.
+ * Return
+ * The number of drained samples if no error was encountered while
+ * draining samples, or 0 if no samples were present in the ring
+ * buffer. If a user-space producer was epoll-waiting on this map,
+ * and at least one sample was drained, they will receive an event
+ * notification notifying them of available space in the ring
+ * buffer. If the BPF_RB_NO_WAKEUP flag is passed to this
+ * function, no wakeup notification will be sent. If the
+ * BPF_RB_FORCE_WAKEUP flag is passed, a wakeup notification will
+ * be sent even if no sample was drained.
+ *
+ * On failure, the returned value is one of the following:
+ *
+ * **-EBUSY** if the ring buffer is contended, and another calling
+ * context was concurrently draining the ring buffer.
+ *
+ * **-EINVAL** if user-space is not properly tracking the ring
+ * buffer due to the producer position not being aligned to 8
+ * bytes, a sample not being aligned to 8 bytes, or the producer
+ * position not matching the advertised length of a sample.
+ *
+ * **-E2BIG** if user-space has tried to publish a sample which is
+ * larger than the size of the ring buffer, or which cannot fit
+ * within a struct bpf_dynptr.
+ *
+ * void *bpf_cgrp_storage_get(struct bpf_map *map, struct cgroup *cgroup, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from the *cgroup*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *cgroup* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *cgroup*) except this
+ * helper enforces the key must be a cgroup struct and the map must also
+ * be a **BPF_MAP_TYPE_CGRP_STORAGE**.
+ *
+ * In reality, the local-storage value is embedded directly inside of the
+ * *cgroup* object itself, rather than being located in the
+ * **BPF_MAP_TYPE_CGRP_STORAGE** map. When the local-storage value is
+ * queried for some *map* on a *cgroup* object, the kernel will perform an
+ * O(n) iteration over all of the live local-storage values for that
+ * *cgroup* object until the local-storage value for the *map* is found.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * long bpf_cgrp_storage_delete(struct bpf_map *map, struct cgroup *cgroup)
+ * Description
+ * Delete a bpf_local_storage from a *cgroup*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
*/
-#define __BPF_FUNC_MAPPER(FN) \
- FN(unspec), \
- FN(map_lookup_elem), \
- FN(map_update_elem), \
- FN(map_delete_elem), \
- FN(probe_read), \
- FN(ktime_get_ns), \
- FN(trace_printk), \
- FN(get_prandom_u32), \
- FN(get_smp_processor_id), \
- FN(skb_store_bytes), \
- FN(l3_csum_replace), \
- FN(l4_csum_replace), \
- FN(tail_call), \
- FN(clone_redirect), \
- FN(get_current_pid_tgid), \
- FN(get_current_uid_gid), \
- FN(get_current_comm), \
- FN(get_cgroup_classid), \
- FN(skb_vlan_push), \
- FN(skb_vlan_pop), \
- FN(skb_get_tunnel_key), \
- FN(skb_set_tunnel_key), \
- FN(perf_event_read), \
- FN(redirect), \
- FN(get_route_realm), \
- FN(perf_event_output), \
- FN(skb_load_bytes), \
- FN(get_stackid), \
- FN(csum_diff), \
- FN(skb_get_tunnel_opt), \
- FN(skb_set_tunnel_opt), \
- FN(skb_change_proto), \
- FN(skb_change_type), \
- FN(skb_under_cgroup), \
- FN(get_hash_recalc), \
- FN(get_current_task), \
- FN(probe_write_user), \
- FN(current_task_under_cgroup), \
- FN(skb_change_tail), \
- FN(skb_pull_data), \
- FN(csum_update), \
- FN(set_hash_invalid), \
- FN(get_numa_node_id), \
- FN(skb_change_head), \
- FN(xdp_adjust_head), \
- FN(probe_read_str), \
- FN(get_socket_cookie), \
- FN(get_socket_uid), \
- FN(set_hash), \
- FN(setsockopt), \
- FN(skb_adjust_room), \
- FN(redirect_map), \
- FN(sk_redirect_map), \
- FN(sock_map_update), \
- FN(xdp_adjust_meta), \
- FN(perf_event_read_value), \
- FN(perf_prog_read_value), \
- FN(getsockopt), \
- FN(override_return), \
- FN(sock_ops_cb_flags_set), \
- FN(msg_redirect_map), \
- FN(msg_apply_bytes), \
- FN(msg_cork_bytes), \
- FN(msg_pull_data), \
- FN(bind), \
- FN(xdp_adjust_tail), \
- FN(skb_get_xfrm_state), \
- FN(get_stack), \
- FN(skb_load_bytes_relative), \
- FN(fib_lookup), \
- FN(sock_hash_update), \
- FN(msg_redirect_hash), \
- FN(sk_redirect_hash), \
- FN(lwt_push_encap), \
- FN(lwt_seg6_store_bytes), \
- FN(lwt_seg6_adjust_srh), \
- FN(lwt_seg6_action), \
- FN(rc_repeat), \
- FN(rc_keydown), \
- FN(skb_cgroup_id), \
- FN(get_current_cgroup_id), \
- FN(get_local_storage), \
- FN(sk_select_reuseport), \
- FN(skb_ancestor_cgroup_id), \
- FN(sk_lookup_tcp), \
- FN(sk_lookup_udp), \
- FN(sk_release), \
- FN(map_push_elem), \
- FN(map_pop_elem), \
- FN(map_peek_elem), \
- FN(msg_push_data), \
- FN(msg_pop_data), \
- FN(rc_pointer_rel), \
- FN(spin_lock), \
- FN(spin_unlock), \
- FN(sk_fullsock), \
- FN(tcp_sock), \
- FN(skb_ecn_set_ce), \
- FN(get_listener_sock), \
- FN(skc_lookup_tcp), \
- FN(tcp_check_syncookie), \
- FN(sysctl_get_name), \
- FN(sysctl_get_current_value), \
- FN(sysctl_get_new_value), \
- FN(sysctl_set_new_value), \
- FN(strtol), \
- FN(strtoul), \
- FN(sk_storage_get), \
- FN(sk_storage_delete), \
- FN(send_signal), \
- FN(tcp_gen_syncookie), \
- FN(skb_output), \
- FN(probe_read_user), \
- FN(probe_read_kernel), \
- FN(probe_read_user_str), \
- FN(probe_read_kernel_str), \
- FN(tcp_send_ack), \
- FN(send_signal_thread), \
- FN(jiffies64), \
- FN(read_branch_records), \
- FN(get_ns_current_pid_tgid), \
- FN(xdp_output), \
- FN(get_netns_cookie), \
- FN(get_current_ancestor_cgroup_id), \
- FN(sk_assign),
+#define ___BPF_FUNC_MAPPER(FN, ctx...) \
+ FN(unspec, 0, ##ctx) \
+ FN(map_lookup_elem, 1, ##ctx) \
+ FN(map_update_elem, 2, ##ctx) \
+ FN(map_delete_elem, 3, ##ctx) \
+ FN(probe_read, 4, ##ctx) \
+ FN(ktime_get_ns, 5, ##ctx) \
+ FN(trace_printk, 6, ##ctx) \
+ FN(get_prandom_u32, 7, ##ctx) \
+ FN(get_smp_processor_id, 8, ##ctx) \
+ FN(skb_store_bytes, 9, ##ctx) \
+ FN(l3_csum_replace, 10, ##ctx) \
+ FN(l4_csum_replace, 11, ##ctx) \
+ FN(tail_call, 12, ##ctx) \
+ FN(clone_redirect, 13, ##ctx) \
+ FN(get_current_pid_tgid, 14, ##ctx) \
+ FN(get_current_uid_gid, 15, ##ctx) \
+ FN(get_current_comm, 16, ##ctx) \
+ FN(get_cgroup_classid, 17, ##ctx) \
+ FN(skb_vlan_push, 18, ##ctx) \
+ FN(skb_vlan_pop, 19, ##ctx) \
+ FN(skb_get_tunnel_key, 20, ##ctx) \
+ FN(skb_set_tunnel_key, 21, ##ctx) \
+ FN(perf_event_read, 22, ##ctx) \
+ FN(redirect, 23, ##ctx) \
+ FN(get_route_realm, 24, ##ctx) \
+ FN(perf_event_output, 25, ##ctx) \
+ FN(skb_load_bytes, 26, ##ctx) \
+ FN(get_stackid, 27, ##ctx) \
+ FN(csum_diff, 28, ##ctx) \
+ FN(skb_get_tunnel_opt, 29, ##ctx) \
+ FN(skb_set_tunnel_opt, 30, ##ctx) \
+ FN(skb_change_proto, 31, ##ctx) \
+ FN(skb_change_type, 32, ##ctx) \
+ FN(skb_under_cgroup, 33, ##ctx) \
+ FN(get_hash_recalc, 34, ##ctx) \
+ FN(get_current_task, 35, ##ctx) \
+ FN(probe_write_user, 36, ##ctx) \
+ FN(current_task_under_cgroup, 37, ##ctx) \
+ FN(skb_change_tail, 38, ##ctx) \
+ FN(skb_pull_data, 39, ##ctx) \
+ FN(csum_update, 40, ##ctx) \
+ FN(set_hash_invalid, 41, ##ctx) \
+ FN(get_numa_node_id, 42, ##ctx) \
+ FN(skb_change_head, 43, ##ctx) \
+ FN(xdp_adjust_head, 44, ##ctx) \
+ FN(probe_read_str, 45, ##ctx) \
+ FN(get_socket_cookie, 46, ##ctx) \
+ FN(get_socket_uid, 47, ##ctx) \
+ FN(set_hash, 48, ##ctx) \
+ FN(setsockopt, 49, ##ctx) \
+ FN(skb_adjust_room, 50, ##ctx) \
+ FN(redirect_map, 51, ##ctx) \
+ FN(sk_redirect_map, 52, ##ctx) \
+ FN(sock_map_update, 53, ##ctx) \
+ FN(xdp_adjust_meta, 54, ##ctx) \
+ FN(perf_event_read_value, 55, ##ctx) \
+ FN(perf_prog_read_value, 56, ##ctx) \
+ FN(getsockopt, 57, ##ctx) \
+ FN(override_return, 58, ##ctx) \
+ FN(sock_ops_cb_flags_set, 59, ##ctx) \
+ FN(msg_redirect_map, 60, ##ctx) \
+ FN(msg_apply_bytes, 61, ##ctx) \
+ FN(msg_cork_bytes, 62, ##ctx) \
+ FN(msg_pull_data, 63, ##ctx) \
+ FN(bind, 64, ##ctx) \
+ FN(xdp_adjust_tail, 65, ##ctx) \
+ FN(skb_get_xfrm_state, 66, ##ctx) \
+ FN(get_stack, 67, ##ctx) \
+ FN(skb_load_bytes_relative, 68, ##ctx) \
+ FN(fib_lookup, 69, ##ctx) \
+ FN(sock_hash_update, 70, ##ctx) \
+ FN(msg_redirect_hash, 71, ##ctx) \
+ FN(sk_redirect_hash, 72, ##ctx) \
+ FN(lwt_push_encap, 73, ##ctx) \
+ FN(lwt_seg6_store_bytes, 74, ##ctx) \
+ FN(lwt_seg6_adjust_srh, 75, ##ctx) \
+ FN(lwt_seg6_action, 76, ##ctx) \
+ FN(rc_repeat, 77, ##ctx) \
+ FN(rc_keydown, 78, ##ctx) \
+ FN(skb_cgroup_id, 79, ##ctx) \
+ FN(get_current_cgroup_id, 80, ##ctx) \
+ FN(get_local_storage, 81, ##ctx) \
+ FN(sk_select_reuseport, 82, ##ctx) \
+ FN(skb_ancestor_cgroup_id, 83, ##ctx) \
+ FN(sk_lookup_tcp, 84, ##ctx) \
+ FN(sk_lookup_udp, 85, ##ctx) \
+ FN(sk_release, 86, ##ctx) \
+ FN(map_push_elem, 87, ##ctx) \
+ FN(map_pop_elem, 88, ##ctx) \
+ FN(map_peek_elem, 89, ##ctx) \
+ FN(msg_push_data, 90, ##ctx) \
+ FN(msg_pop_data, 91, ##ctx) \
+ FN(rc_pointer_rel, 92, ##ctx) \
+ FN(spin_lock, 93, ##ctx) \
+ FN(spin_unlock, 94, ##ctx) \
+ FN(sk_fullsock, 95, ##ctx) \
+ FN(tcp_sock, 96, ##ctx) \
+ FN(skb_ecn_set_ce, 97, ##ctx) \
+ FN(get_listener_sock, 98, ##ctx) \
+ FN(skc_lookup_tcp, 99, ##ctx) \
+ FN(tcp_check_syncookie, 100, ##ctx) \
+ FN(sysctl_get_name, 101, ##ctx) \
+ FN(sysctl_get_current_value, 102, ##ctx) \
+ FN(sysctl_get_new_value, 103, ##ctx) \
+ FN(sysctl_set_new_value, 104, ##ctx) \
+ FN(strtol, 105, ##ctx) \
+ FN(strtoul, 106, ##ctx) \
+ FN(sk_storage_get, 107, ##ctx) \
+ FN(sk_storage_delete, 108, ##ctx) \
+ FN(send_signal, 109, ##ctx) \
+ FN(tcp_gen_syncookie, 110, ##ctx) \
+ FN(skb_output, 111, ##ctx) \
+ FN(probe_read_user, 112, ##ctx) \
+ FN(probe_read_kernel, 113, ##ctx) \
+ FN(probe_read_user_str, 114, ##ctx) \
+ FN(probe_read_kernel_str, 115, ##ctx) \
+ FN(tcp_send_ack, 116, ##ctx) \
+ FN(send_signal_thread, 117, ##ctx) \
+ FN(jiffies64, 118, ##ctx) \
+ FN(read_branch_records, 119, ##ctx) \
+ FN(get_ns_current_pid_tgid, 120, ##ctx) \
+ FN(xdp_output, 121, ##ctx) \
+ FN(get_netns_cookie, 122, ##ctx) \
+ FN(get_current_ancestor_cgroup_id, 123, ##ctx) \
+ FN(sk_assign, 124, ##ctx) \
+ FN(ktime_get_boot_ns, 125, ##ctx) \
+ FN(seq_printf, 126, ##ctx) \
+ FN(seq_write, 127, ##ctx) \
+ FN(sk_cgroup_id, 128, ##ctx) \
+ FN(sk_ancestor_cgroup_id, 129, ##ctx) \
+ FN(ringbuf_output, 130, ##ctx) \
+ FN(ringbuf_reserve, 131, ##ctx) \
+ FN(ringbuf_submit, 132, ##ctx) \
+ FN(ringbuf_discard, 133, ##ctx) \
+ FN(ringbuf_query, 134, ##ctx) \
+ FN(csum_level, 135, ##ctx) \
+ FN(skc_to_tcp6_sock, 136, ##ctx) \
+ FN(skc_to_tcp_sock, 137, ##ctx) \
+ FN(skc_to_tcp_timewait_sock, 138, ##ctx) \
+ FN(skc_to_tcp_request_sock, 139, ##ctx) \
+ FN(skc_to_udp6_sock, 140, ##ctx) \
+ FN(get_task_stack, 141, ##ctx) \
+ FN(load_hdr_opt, 142, ##ctx) \
+ FN(store_hdr_opt, 143, ##ctx) \
+ FN(reserve_hdr_opt, 144, ##ctx) \
+ FN(inode_storage_get, 145, ##ctx) \
+ FN(inode_storage_delete, 146, ##ctx) \
+ FN(d_path, 147, ##ctx) \
+ FN(copy_from_user, 148, ##ctx) \
+ FN(snprintf_btf, 149, ##ctx) \
+ FN(seq_printf_btf, 150, ##ctx) \
+ FN(skb_cgroup_classid, 151, ##ctx) \
+ FN(redirect_neigh, 152, ##ctx) \
+ FN(per_cpu_ptr, 153, ##ctx) \
+ FN(this_cpu_ptr, 154, ##ctx) \
+ FN(redirect_peer, 155, ##ctx) \
+ FN(task_storage_get, 156, ##ctx) \
+ FN(task_storage_delete, 157, ##ctx) \
+ FN(get_current_task_btf, 158, ##ctx) \
+ FN(bprm_opts_set, 159, ##ctx) \
+ FN(ktime_get_coarse_ns, 160, ##ctx) \
+ FN(ima_inode_hash, 161, ##ctx) \
+ FN(sock_from_file, 162, ##ctx) \
+ FN(check_mtu, 163, ##ctx) \
+ FN(for_each_map_elem, 164, ##ctx) \
+ FN(snprintf, 165, ##ctx) \
+ FN(sys_bpf, 166, ##ctx) \
+ FN(btf_find_by_name_kind, 167, ##ctx) \
+ FN(sys_close, 168, ##ctx) \
+ FN(timer_init, 169, ##ctx) \
+ FN(timer_set_callback, 170, ##ctx) \
+ FN(timer_start, 171, ##ctx) \
+ FN(timer_cancel, 172, ##ctx) \
+ FN(get_func_ip, 173, ##ctx) \
+ FN(get_attach_cookie, 174, ##ctx) \
+ FN(task_pt_regs, 175, ##ctx) \
+ FN(get_branch_snapshot, 176, ##ctx) \
+ FN(trace_vprintk, 177, ##ctx) \
+ FN(skc_to_unix_sock, 178, ##ctx) \
+ FN(kallsyms_lookup_name, 179, ##ctx) \
+ FN(find_vma, 180, ##ctx) \
+ FN(loop, 181, ##ctx) \
+ FN(strncmp, 182, ##ctx) \
+ FN(get_func_arg, 183, ##ctx) \
+ FN(get_func_ret, 184, ##ctx) \
+ FN(get_func_arg_cnt, 185, ##ctx) \
+ FN(get_retval, 186, ##ctx) \
+ FN(set_retval, 187, ##ctx) \
+ FN(xdp_get_buff_len, 188, ##ctx) \
+ FN(xdp_load_bytes, 189, ##ctx) \
+ FN(xdp_store_bytes, 190, ##ctx) \
+ FN(copy_from_user_task, 191, ##ctx) \
+ FN(skb_set_tstamp, 192, ##ctx) \
+ FN(ima_file_hash, 193, ##ctx) \
+ FN(kptr_xchg, 194, ##ctx) \
+ FN(map_lookup_percpu_elem, 195, ##ctx) \
+ FN(skc_to_mptcp_sock, 196, ##ctx) \
+ FN(dynptr_from_mem, 197, ##ctx) \
+ FN(ringbuf_reserve_dynptr, 198, ##ctx) \
+ FN(ringbuf_submit_dynptr, 199, ##ctx) \
+ FN(ringbuf_discard_dynptr, 200, ##ctx) \
+ FN(dynptr_read, 201, ##ctx) \
+ FN(dynptr_write, 202, ##ctx) \
+ FN(dynptr_data, 203, ##ctx) \
+ FN(tcp_raw_gen_syncookie_ipv4, 204, ##ctx) \
+ FN(tcp_raw_gen_syncookie_ipv6, 205, ##ctx) \
+ FN(tcp_raw_check_syncookie_ipv4, 206, ##ctx) \
+ FN(tcp_raw_check_syncookie_ipv6, 207, ##ctx) \
+ FN(ktime_get_tai_ns, 208, ##ctx) \
+ FN(user_ringbuf_drain, 209, ##ctx) \
+ FN(cgrp_storage_get, 210, ##ctx) \
+ FN(cgrp_storage_delete, 211, ##ctx) \
+ /* */
+
+/* backwards-compatibility macros for users of __BPF_FUNC_MAPPER that don't
+ * know or care about integer value that is now passed as second argument
+ */
+#define __BPF_FUNC_MAPPER_APPLY(name, value, FN) FN(name),
+#define __BPF_FUNC_MAPPER(FN) ___BPF_FUNC_MAPPER(__BPF_FUNC_MAPPER_APPLY, FN)
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
*/
-#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x
+#define __BPF_ENUM_FN(x, y) BPF_FUNC_ ## x = y,
enum bpf_func_id {
- __BPF_FUNC_MAPPER(__BPF_ENUM_FN)
+ ___BPF_FUNC_MAPPER(__BPF_ENUM_FN)
__BPF_FUNC_MAX_ID,
};
#undef __BPF_ENUM_FN
@@ -3211,6 +5838,12 @@ enum {
BPF_F_ZERO_CSUM_TX = (1ULL << 1),
BPF_F_DONT_FRAGMENT = (1ULL << 2),
BPF_F_SEQ_NUMBER = (1ULL << 3),
+ BPF_F_NO_TUNNEL_KEY = (1ULL << 4),
+};
+
+/* BPF_FUNC_skb_get_tunnel_key flags. */
+enum {
+ BPF_F_TUNINFO_FLAGS = (1ULL << 4),
};
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
@@ -3228,6 +5861,14 @@ enum {
BPF_F_CURRENT_NETNS = (-1L),
};
+/* BPF_FUNC_csum_level level values. */
+enum {
+ BPF_CSUM_LEVEL_QUERY,
+ BPF_CSUM_LEVEL_INC,
+ BPF_CSUM_LEVEL_DEC,
+ BPF_CSUM_LEVEL_RESET,
+};
+
/* BPF_FUNC_skb_adjust_room flags. */
enum {
BPF_F_ADJ_ROOM_FIXED_GSO = (1ULL << 0),
@@ -3235,6 +5876,10 @@ enum {
BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = (1ULL << 2),
BPF_F_ADJ_ROOM_ENCAP_L4_GRE = (1ULL << 3),
BPF_F_ADJ_ROOM_ENCAP_L4_UDP = (1ULL << 4),
+ BPF_F_ADJ_ROOM_NO_CSUM_RESET = (1ULL << 5),
+ BPF_F_ADJ_ROOM_ENCAP_L2_ETH = (1ULL << 6),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = (1ULL << 7),
+ BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = (1ULL << 8),
};
enum {
@@ -3251,9 +5896,13 @@ enum {
BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
};
-/* BPF_FUNC_sk_storage_get flags */
+/* BPF_FUNC_<kernel_obj>_storage_get flags */
enum {
- BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0),
+ BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0),
+ /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
+ * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
+ */
+ BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE,
};
/* BPF_FUNC_read_branch_records flags. */
@@ -3261,6 +5910,35 @@ enum {
BPF_F_GET_BRANCH_RECORDS_SIZE = (1ULL << 0),
};
+/* BPF_FUNC_bpf_ringbuf_commit, BPF_FUNC_bpf_ringbuf_discard, and
+ * BPF_FUNC_bpf_ringbuf_output flags.
+ */
+enum {
+ BPF_RB_NO_WAKEUP = (1ULL << 0),
+ BPF_RB_FORCE_WAKEUP = (1ULL << 1),
+};
+
+/* BPF_FUNC_bpf_ringbuf_query flags */
+enum {
+ BPF_RB_AVAIL_DATA = 0,
+ BPF_RB_RING_SIZE = 1,
+ BPF_RB_CONS_POS = 2,
+ BPF_RB_PROD_POS = 3,
+};
+
+/* BPF ring buffer constants */
+enum {
+ BPF_RINGBUF_BUSY_BIT = (1U << 31),
+ BPF_RINGBUF_DISCARD_BIT = (1U << 30),
+ BPF_RINGBUF_HDR_SZ = 8,
+};
+
+/* BPF_FUNC_sk_assign flags in bpf_sk_lookup context. */
+enum {
+ BPF_SK_LOOKUP_F_REPLACE = (1ULL << 0),
+ BPF_SK_LOOKUP_F_NO_REUSEPORT = (1ULL << 1),
+};
+
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
@@ -3280,12 +5958,32 @@ enum bpf_lwt_encap_mode {
BPF_LWT_ENCAP_IP,
};
+/* Flags for bpf_bprm_opts_set helper */
+enum {
+ BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
+};
+
+/* Flags for bpf_redirect_map helper */
+enum {
+ BPF_F_BROADCAST = (1ULL << 3),
+ BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
+};
+
#define __bpf_md_ptr(type, name) \
union { \
type name; \
__u64 :64; \
} __attribute__((aligned(8)))
+enum {
+ BPF_SKB_TSTAMP_UNSPEC,
+ BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */
+ /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle,
+ * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC
+ * and try to deduce it by ingress, egress or skb->sk->sk_clockid.
+ */
+};
+
/* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure
*/
@@ -3326,6 +6024,9 @@ struct __sk_buff {
__u32 gso_segs;
__bpf_md_ptr(struct bpf_sock *, sk);
__u32 gso_size;
+ __u8 tstamp_type;
+ __u32 :24; /* Padding, future use. */
+ __u64 hwtstamp;
};
struct bpf_tunnel_key {
@@ -3336,8 +6037,15 @@ struct bpf_tunnel_key {
};
__u8 tunnel_tos;
__u8 tunnel_ttl;
- __u16 tunnel_ext; /* Padding, future use. */
+ union {
+ __u16 tunnel_ext; /* compat */
+ __be16 tunnel_flags;
+ };
__u32 tunnel_label;
+ union {
+ __u32 local_ipv4;
+ __u32 local_ipv6[4];
+ };
};
/* user accessible mirror of in-kernel xfrm_state.
@@ -3376,6 +6084,11 @@ enum bpf_ret_code {
* represented by BPF_REDIRECT above).
*/
BPF_LWT_REROUTE = 128,
+ /* BPF_FLOW_DISSECTOR_CONTINUE: used by BPF_PROG_TYPE_FLOW_DISSECTOR
+ * to indicate that no custom dissection was performed, and
+ * fallback to standard dissector is requested.
+ */
+ BPF_FLOW_DISSECTOR_CONTINUE = 129,
};
struct bpf_sock {
@@ -3389,10 +6102,12 @@ struct bpf_sock {
__u32 src_ip4;
__u32 src_ip6[4];
__u32 src_port; /* host byte order */
- __u32 dst_port; /* network byte order */
+ __be16 dst_port; /* network byte order */
+ __u16 :16; /* zero padding */
__u32 dst_ip4;
__u32 dst_ip6[4];
__u32 state;
+ __s32 rx_queue_mapping;
};
struct bpf_tcp_sock {
@@ -3486,6 +6201,34 @@ struct xdp_md {
/* Below access go through struct xdp_rxq_info */
__u32 ingress_ifindex; /* rxq->dev->ifindex */
__u32 rx_queue_index; /* rxq->queue_index */
+
+ __u32 egress_ifindex; /* txq->dev->ifindex */
+};
+
+/* DEVMAP map-value layout
+ *
+ * The struct data-layout of map-value is a configuration interface.
+ * New members can only be added to the end of this structure.
+ */
+struct bpf_devmap_val {
+ __u32 ifindex; /* device index */
+ union {
+ int fd; /* prog fd on map write */
+ __u32 id; /* prog id on map read */
+ } bpf_prog;
+};
+
+/* CPUMAP map-value layout
+ *
+ * The struct data-layout of map-value is a configuration interface.
+ * New members can only be added to the end of this structure.
+ */
+struct bpf_cpumap_val {
+ __u32 qsize; /* queue size to remote target CPU */
+ union {
+ int fd; /* prog fd on map write */
+ __u32 id; /* prog id on map read */
+ } bpf_prog;
};
enum sk_action {
@@ -3508,6 +6251,8 @@ struct sk_msg_md {
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
__u32 size; /* Total size of sk_msg */
+
+ __bpf_md_ptr(struct bpf_sock *, sk); /* current socket */
};
struct sk_reuseport_md {
@@ -3533,6 +6278,20 @@ struct sk_reuseport_md {
__u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
__u32 bind_inany; /* Is sock bound to an INANY address? */
__u32 hash; /* A hash of the packet 4 tuples */
+ /* When reuse->migrating_sk is NULL, it is selecting a sk for the
+ * new incoming connection request (e.g. selecting a listen sk for
+ * the received SYN in the TCP case). reuse->sk is one of the sk
+ * in the reuseport group. The bpf prog can use reuse->sk to learn
+ * the local listening ip/port without looking into the skb.
+ *
+ * When reuse->migrating_sk is not NULL, reuse->sk is closed and
+ * reuse->migrating_sk is the socket that needs to be migrated
+ * to another listening socket. migrating_sk could be a fullsock
+ * sk that is fully established or a reqsk that is in-the-middle
+ * of 3-way handshake.
+ */
+ __bpf_md_ptr(struct bpf_sock *, sk);
+ __bpf_md_ptr(struct bpf_sock *, migrating_sk);
};
#define BPF_TAG_SIZE 8
@@ -3573,6 +6332,10 @@ struct bpf_prog_info {
__aligned_u64 prog_tags;
__u64 run_time_ns;
__u64 run_cnt;
+ __u64 recursion_misses;
+ __u32 verified_insns;
+ __u32 attach_btf_obj_id;
+ __u32 attach_btf_id;
} __attribute__((aligned(8)));
struct bpf_map_info {
@@ -3590,17 +6353,83 @@ struct bpf_map_info {
__u32 btf_id;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
+ __u32 :32; /* alignment pad */
+ __u64 map_extra;
} __attribute__((aligned(8)));
struct bpf_btf_info {
__aligned_u64 btf;
__u32 btf_size;
__u32 id;
+ __aligned_u64 name;
+ __u32 name_len;
+ __u32 kernel_btf;
+} __attribute__((aligned(8)));
+
+struct bpf_link_info {
+ __u32 type;
+ __u32 id;
+ __u32 prog_id;
+ union {
+ struct {
+ __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
+ __u32 tp_name_len; /* in/out: tp_name buffer len */
+ } raw_tracepoint;
+ struct {
+ __u32 attach_type;
+ __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */
+ __u32 target_btf_id; /* BTF type id inside the object */
+ } tracing;
+ struct {
+ __u64 cgroup_id;
+ __u32 attach_type;
+ } cgroup;
+ struct {
+ __aligned_u64 target_name; /* in/out: target_name buffer ptr */
+ __u32 target_name_len; /* in/out: target_name buffer len */
+
+ /* If the iter specific field is 32 bits, it can be put
+ * in the first or second union. Otherwise it should be
+ * put in the second union.
+ */
+ union {
+ struct {
+ __u32 map_id;
+ } map;
+ };
+ union {
+ struct {
+ __u64 cgroup_id;
+ __u32 order;
+ } cgroup;
+ struct {
+ __u32 tid;
+ __u32 pid;
+ } task;
+ };
+ } iter;
+ struct {
+ __u32 netns_ino;
+ __u32 attach_type;
+ } netns;
+ struct {
+ __u32 ifindex;
+ } xdp;
+ struct {
+ __u32 map_id;
+ } struct_ops;
+ struct {
+ __u32 pf;
+ __u32 hooknum;
+ __s32 priority;
+ __u32 flags;
+ } netfilter;
+ };
} __attribute__((aligned(8)));
/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
* by user and intended to be used by socket (e.g. to bind to, depends on
- * attach attach type).
+ * attach type).
*/
struct bpf_sock_addr {
__u32 user_family; /* Allows 4-byte read, but no write. */
@@ -3610,7 +6439,7 @@ struct bpf_sock_addr {
__u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
* Stored in network byte order.
*/
- __u32 user_port; /* Allows 4-byte read and write.
+ __u32 user_port; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order
*/
__u32 family; /* Allows 4-byte read, but no write */
@@ -3675,6 +6504,37 @@ struct bpf_sock_ops {
__u64 bytes_received;
__u64 bytes_acked;
__bpf_md_ptr(struct bpf_sock *, sk);
+ /* [skb_data, skb_data_end) covers the whole TCP header.
+ *
+ * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
+ * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the
+ * header has not been written.
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
+ * been written so far.
+ * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes
+ * the 3WHS.
+ * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
+ * the 3WHS.
+ *
+ * bpf_load_hdr_opt() can also be used to read a particular option.
+ */
+ __bpf_md_ptr(void *, skb_data);
+ __bpf_md_ptr(void *, skb_data_end);
+ __u32 skb_len; /* The total length of a packet.
+ * It includes the header, options,
+ * and payload.
+ */
+ __u32 skb_tcp_flags; /* tcp_flags of the header. It provides
+ * an easy way to check for tcp_flags
+ * without parsing skb_data.
+ *
+ * In particular, the skb_tcp_flags
+ * will still be available in
+ * BPF_SOCK_OPS_HDR_OPT_LEN even though
+ * the outgoing header has not
+ * been written yet.
+ */
+ __u64 skb_hwtstamp;
};
/* Definitions for bpf_sock_ops_cb_flags */
@@ -3683,8 +6543,51 @@ enum {
BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
+ /* Call bpf for all received TCP headers. The bpf prog will be
+ * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ *
+ * It could be used at the client/active side (i.e. connect() side)
+ * when the server told it that the server was in syncookie
+ * mode and required the active side to resend the bpf-written
+ * options. The active side can keep writing the bpf-options until
+ * it received a valid packet from the server side to confirm
+ * the earlier packet (and options) has been received. The later
+ * example patch is using it like this at the active side when the
+ * server is in syncookie mode.
+ *
+ * The bpf prog will usually turn this off in the common cases.
+ */
+ BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4),
+ /* Call bpf when kernel has received a header option that
+ * the kernel cannot handle. The bpf prog will be called under
+ * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ */
+ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
+ /* Call bpf when the kernel is writing header options for the
+ * outgoing packet. The bpf prog will first be called
+ * to reserve space in a skb under
+ * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then
+ * the bpf prog will be called to write the header option(s)
+ * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
+ * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
+ * related helpers that will be useful to the bpf programs.
+ *
+ * The kernel gets its chance to reserve space and write
+ * options first before the BPF program does.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
/* Mask of all currently supported cb flags */
- BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF,
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
};
/* List of known BPF sock_ops operators.
@@ -3740,6 +6643,63 @@ enum {
*/
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
*/
+ BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
+ * It will be called to handle
+ * the packets received at
+ * an already established
+ * connection.
+ *
+ * sock_ops->skb_data:
+ * Referring to the received skb.
+ * It covers the TCP header only.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option.
+ */
+ BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the
+ * header option later in
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Not available because no header has
+ * been written yet.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the
+ * outgoing skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_reserve_hdr_opt() should
+ * be used to reserve space.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Referring to the outgoing skb.
+ * It covers the TCP header
+ * that has already been written
+ * by the kernel and the
+ * earlier bpf-progs.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the outgoing
+ * skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_store_hdr_opt() should
+ * be used to write the
+ * option.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option that
+ * has already been written
+ * by the kernel or the
+ * earlier bpf-progs.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -3767,6 +6727,63 @@ enum {
enum {
TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
+ TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */
+ TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */
+ /* Copy the SYN pkt to optval
+ *
+ * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the
+ * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
+ * to only getting from the saved_syn. It can either get the
+ * syn packet from:
+ *
+ * 1. the just-received SYN packet (only available when writing the
+ * SYNACK). It will be useful when it is not necessary to
+ * save the SYN packet for latter use. It is also the only way
+ * to get the SYN during syncookie mode because the syn
+ * packet cannot be saved during syncookie.
+ *
+ * OR
+ *
+ * 2. the earlier saved syn which was done by
+ * bpf_setsockopt(TCP_SAVE_SYN).
+ *
+ * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
+ * SYN packet is obtained.
+ *
+ * If the bpf-prog does not need the IP[46] header, the
+ * bpf-prog can avoid parsing the IP header by using
+ * TCP_BPF_SYN. Otherwise, the bpf-prog can get both
+ * IP[46] and TCP header by using TCP_BPF_SYN_IP.
+ *
+ * >0: Total number of bytes copied
+ * -ENOSPC: Not enough space in optval. Only optlen number of
+ * bytes is copied.
+ * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
+ * is not saved by setsockopt(TCP_SAVE_SYN).
+ */
+ TCP_BPF_SYN = 1005, /* Copy the TCP header */
+ TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
+ TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
+};
+
+enum {
+ BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
+};
+
+/* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ */
+enum {
+ BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the
+ * total option spaces
+ * required for an established
+ * sk in order to calculate the
+ * MSS. No skb is actually
+ * sent.
+ */
+ BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode
+ * when sending a SYN.
+ */
};
struct bpf_perf_event_value {
@@ -3803,6 +6820,7 @@ struct bpf_raw_tracepoint_args {
enum {
BPF_FIB_LOOKUP_DIRECT = (1U << 0),
BPF_FIB_LOOKUP_OUTPUT = (1U << 1),
+ BPF_FIB_LOOKUP_SKIP_NEIGH = (1U << 2),
};
enum {
@@ -3828,9 +6846,13 @@ struct bpf_fib_lookup {
__be16 sport;
__be16 dport;
- /* total length of packet from network header - used for MTU check */
- __u16 tot_len;
+ union { /* used for MTU check */
+ /* input to lookup */
+ __u16 tot_len; /* L3 length from network hdr (iph->tot_len) */
+ /* output: MTU value */
+ __u16 mtu_result;
+ };
/* input: L3 device index for lookup
* output: device index from FIB lookup
*/
@@ -3866,6 +6888,27 @@ struct bpf_fib_lookup {
__u8 dmac[6]; /* ETH_ALEN */
};
+struct bpf_redir_neigh {
+ /* network family for lookup (AF_INET, AF_INET6) */
+ __u32 nh_family;
+ /* network address of nexthop; skips fib lookup to find gateway */
+ union {
+ __be32 ipv4_nh;
+ __u32 ipv6_nh[4]; /* in6_addr; network order */
+ };
+};
+
+/* bpf_check_mtu flags*/
+enum bpf_check_mtu_flags {
+ BPF_MTU_CHK_SEGS = (1U << 0),
+};
+
+enum bpf_check_mtu_ret {
+ BPF_MTU_CHK_RET_SUCCESS, /* check and lookup successful */
+ BPF_MTU_CHK_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+ BPF_MTU_CHK_RET_SEGS_TOOBIG, /* GSO re-segmentation needed to fwd */
+};
+
enum bpf_task_fd_type {
BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */
BPF_FD_TYPE_TRACEPOINT, /* tp name */
@@ -3925,6 +6968,41 @@ struct bpf_spin_lock {
__u32 val;
};
+struct bpf_timer {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_dynptr {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_list_head {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_list_node {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_rb_root {
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_rb_node {
+ __u64 :64;
+ __u64 :64;
+ __u64 :64;
+} __attribute__((aligned(8)));
+
+struct bpf_refcount {
+ __u32 :32;
+} __attribute__((aligned(4)));
+
struct bpf_sysctl {
__u32 write; /* Sysctl is being read (= 0) or written (= 1).
* Allows 1,2,4-byte read, but no write.
@@ -3949,4 +7027,146 @@ struct bpf_pidns_info {
__u32 pid;
__u32 tgid;
};
+
+/* User accessible data for SK_LOOKUP programs. Add new fields at the end. */
+struct bpf_sk_lookup {
+ union {
+ __bpf_md_ptr(struct bpf_sock *, sk); /* Selected socket */
+ __u64 cookie; /* Non-zero if socket was selected in PROG_TEST_RUN */
+ };
+
+ __u32 family; /* Protocol family (AF_INET, AF_INET6) */
+ __u32 protocol; /* IP protocol (IPPROTO_TCP, IPPROTO_UDP) */
+ __u32 remote_ip4; /* Network byte order */
+ __u32 remote_ip6[4]; /* Network byte order */
+ __be16 remote_port; /* Network byte order */
+ __u16 :16; /* Zero padding */
+ __u32 local_ip4; /* Network byte order */
+ __u32 local_ip6[4]; /* Network byte order */
+ __u32 local_port; /* Host byte order */
+ __u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
+};
+
+/*
+ * struct btf_ptr is used for typed pointer representation; the
+ * type id is used to render the pointer data as the appropriate type
+ * via the bpf_snprintf_btf() helper described above. A flags field -
+ * potentially to specify additional details about the BTF pointer
+ * (rather than its mode of display) - is included for future use.
+ * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
+ */
+struct btf_ptr {
+ void *ptr;
+ __u32 type_id;
+ __u32 flags; /* BTF ptr flags; unused at present. */
+};
+
+/*
+ * Flags to control bpf_snprintf_btf() behaviour.
+ * - BTF_F_COMPACT: no formatting around type information
+ * - BTF_F_NONAME: no struct/union member names/types
+ * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_F_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ */
+enum {
+ BTF_F_COMPACT = (1ULL << 0),
+ BTF_F_NONAME = (1ULL << 1),
+ BTF_F_PTR_RAW = (1ULL << 2),
+ BTF_F_ZERO = (1ULL << 3),
+};
+
+/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
+ * has to be adjusted by relocations. It is emitted by llvm and passed to
+ * libbpf and later to the kernel.
+ */
+enum bpf_core_relo_kind {
+ BPF_CORE_FIELD_BYTE_OFFSET = 0, /* field byte offset */
+ BPF_CORE_FIELD_BYTE_SIZE = 1, /* field size in bytes */
+ BPF_CORE_FIELD_EXISTS = 2, /* field existence in target kernel */
+ BPF_CORE_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
+ BPF_CORE_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
+ BPF_CORE_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
+ BPF_CORE_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
+ BPF_CORE_TYPE_ID_TARGET = 7, /* type ID in target kernel */
+ BPF_CORE_TYPE_EXISTS = 8, /* type existence in target kernel */
+ BPF_CORE_TYPE_SIZE = 9, /* type size in bytes */
+ BPF_CORE_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
+ BPF_CORE_ENUMVAL_VALUE = 11, /* enum value integer value */
+ BPF_CORE_TYPE_MATCHES = 12, /* type match in target kernel */
+};
+
+/*
+ * "struct bpf_core_relo" is used to pass relocation data form LLVM to libbpf
+ * and from libbpf to the kernel.
+ *
+ * CO-RE relocation captures the following data:
+ * - insn_off - instruction offset (in bytes) within a BPF program that needs
+ * its insn->imm field to be relocated with actual field info;
+ * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
+ * type or field;
+ * - access_str_off - offset into corresponding .BTF string section. String
+ * interpretation depends on specific relocation kind:
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
+ * - kind - one of enum bpf_core_relo_kind;
+ *
+ * Example:
+ * struct sample {
+ * int a;
+ * struct {
+ * int b[10];
+ * };
+ * };
+ *
+ * struct sample *s = ...;
+ * int *x = &s->a; // encoded as "0:0" (a is field #0)
+ * int *y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
+ * // b is field #0 inside anon struct, accessing elem #5)
+ * int *z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
+ *
+ * type_id for all relocs in this example will capture BTF type id of
+ * `struct sample`.
+ *
+ * Such relocation is emitted when using __builtin_preserve_access_index()
+ * Clang built-in, passing expression that captures field address, e.g.:
+ *
+ * bpf_probe_read(&dst, sizeof(dst),
+ * __builtin_preserve_access_index(&src->a.b.c));
+ *
+ * In this case Clang will emit field relocation recording necessary data to
+ * be able to find offset of embedded `a.b.c` field within `src` struct.
+ *
+ * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
+ */
+struct bpf_core_relo {
+ __u32 insn_off;
+ __u32 type_id;
+ __u32 access_str_off;
+ enum bpf_core_relo_kind kind;
+};
+
+/*
+ * Flags to control bpf_timer_start() behaviour.
+ * - BPF_F_TIMER_ABS: Timeout passed is absolute time, by default it is
+ * relative to current time.
+ */
+enum {
+ BPF_F_TIMER_ABS = (1ULL << 0),
+};
+
+/* BPF numbers iterator state */
+struct bpf_iter_num {
+ /* opaque iterator state; having __u64 here allows to preserve correct
+ * alignment requirements in vmlinux.h, generated from BTF
+ */
+ __u64 __opaque[1];
+} __attribute__((aligned(8)));
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/include/uapi/linux/bpf_perf_event.h b/tools/include/uapi/linux/bpf_perf_event.h
index 8f95303f9d80..eb1b9d21250c 100644
--- a/tools/include/uapi/linux/bpf_perf_event.h
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -13,6 +13,7 @@
struct bpf_perf_event_data {
bpf_user_pt_regs_t regs;
__u64 sample_period;
+ __u64 addr;
};
#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/tools/include/uapi/linux/btf.h b/tools/include/uapi/linux/btf.h
index 5a667107ad2c..ec1798b6d3ff 100644
--- a/tools/include/uapi/linux/btf.h
+++ b/tools/include/uapi/linux/btf.h
@@ -33,17 +33,17 @@ struct btf_type {
/* "info" bits arrangement
* bits 0-15: vlen (e.g. # of struct's members)
* bits 16-23: unused
- * bits 24-27: kind (e.g. int, ptr, array...etc)
- * bits 28-30: unused
+ * bits 24-28: kind (e.g. int, ptr, array...etc)
+ * bits 29-30: unused
* bit 31: kind_flag, currently used by
- * struct, union and fwd
+ * struct, union, enum, fwd and enum64
*/
__u32 info;
- /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC.
+ /* "size" is used by INT, ENUM, STRUCT, UNION, DATASEC and ENUM64.
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
- * FUNC, FUNC_PROTO and VAR.
+ * FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG.
* "type" is a type_id referring to another type.
*/
union {
@@ -52,28 +52,35 @@ struct btf_type {
};
};
-#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f)
+#define BTF_INFO_KIND(info) (((info) >> 24) & 0x1f)
#define BTF_INFO_VLEN(info) ((info) & 0xffff)
#define BTF_INFO_KFLAG(info) ((info) >> 31)
-#define BTF_KIND_UNKN 0 /* Unknown */
-#define BTF_KIND_INT 1 /* Integer */
-#define BTF_KIND_PTR 2 /* Pointer */
-#define BTF_KIND_ARRAY 3 /* Array */
-#define BTF_KIND_STRUCT 4 /* Struct */
-#define BTF_KIND_UNION 5 /* Union */
-#define BTF_KIND_ENUM 6 /* Enumeration */
-#define BTF_KIND_FWD 7 /* Forward */
-#define BTF_KIND_TYPEDEF 8 /* Typedef */
-#define BTF_KIND_VOLATILE 9 /* Volatile */
-#define BTF_KIND_CONST 10 /* Const */
-#define BTF_KIND_RESTRICT 11 /* Restrict */
-#define BTF_KIND_FUNC 12 /* Function */
-#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
-#define BTF_KIND_VAR 14 /* Variable */
-#define BTF_KIND_DATASEC 15 /* Section */
-#define BTF_KIND_MAX BTF_KIND_DATASEC
-#define NR_BTF_KINDS (BTF_KIND_MAX + 1)
+enum {
+ BTF_KIND_UNKN = 0, /* Unknown */
+ BTF_KIND_INT = 1, /* Integer */
+ BTF_KIND_PTR = 2, /* Pointer */
+ BTF_KIND_ARRAY = 3, /* Array */
+ BTF_KIND_STRUCT = 4, /* Struct */
+ BTF_KIND_UNION = 5, /* Union */
+ BTF_KIND_ENUM = 6, /* Enumeration up to 32-bit values */
+ BTF_KIND_FWD = 7, /* Forward */
+ BTF_KIND_TYPEDEF = 8, /* Typedef */
+ BTF_KIND_VOLATILE = 9, /* Volatile */
+ BTF_KIND_CONST = 10, /* Const */
+ BTF_KIND_RESTRICT = 11, /* Restrict */
+ BTF_KIND_FUNC = 12, /* Function */
+ BTF_KIND_FUNC_PROTO = 13, /* Function Proto */
+ BTF_KIND_VAR = 14, /* Variable */
+ BTF_KIND_DATASEC = 15, /* Section */
+ BTF_KIND_FLOAT = 16, /* Floating point */
+ BTF_KIND_DECL_TAG = 17, /* Decl Tag */
+ BTF_KIND_TYPE_TAG = 18, /* Type Tag */
+ BTF_KIND_ENUM64 = 19, /* Enumeration up to 64-bit values */
+
+ NR_BTF_KINDS,
+ BTF_KIND_MAX = NR_BTF_KINDS - 1,
+};
/* For some specific BTF_KIND, "struct btf_type" is immediately
* followed by extra data.
@@ -169,4 +176,25 @@ struct btf_var_secinfo {
__u32 size;
};
+/* BTF_KIND_DECL_TAG is followed by a single "struct btf_decl_tag" to describe
+ * additional information related to the tag applied location.
+ * If component_idx == -1, the tag is applied to a struct, union,
+ * variable or function. Otherwise, it is applied to a struct/union
+ * member or a func argument, and component_idx indicates which member
+ * or argument (0 ... vlen-1).
+ */
+struct btf_decl_tag {
+ __s32 component_idx;
+};
+
+/* BTF_KIND_ENUM64 is followed by multiple "struct btf_enum64".
+ * The exact number of btf_enum64 is stored in the vlen (of the
+ * info in "struct btf_type").
+ */
+struct btf_enum64 {
+ __u32 name_off;
+ __u32 val_lo32;
+ __u32 val_hi32;
+};
+
#endif /* _UAPI__LINUX_BTF_H__ */
diff --git a/tools/include/uapi/linux/const.h b/tools/include/uapi/linux/const.h
index 5ed721ad5b19..af2a44c08683 100644
--- a/tools/include/uapi/linux/const.h
+++ b/tools/include/uapi/linux/const.h
@@ -28,4 +28,9 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
+
+#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+
#endif /* _UAPI_LINUX_CONST_H */
diff --git a/tools/include/uapi/linux/ethtool.h b/tools/include/uapi/linux/ethtool.h
index c86c3e942df9..47afae3895ec 100644
--- a/tools/include/uapi/linux/ethtool.h
+++ b/tools/include/uapi/linux/ethtool.h
@@ -48,4 +48,57 @@ struct ethtool_channels {
__u32 combined_count;
};
+#define ETHTOOL_FWVERS_LEN 32
+#define ETHTOOL_BUSINFO_LEN 32
+#define ETHTOOL_EROMVERS_LEN 32
+
+/**
+ * struct ethtool_drvinfo - general driver and device information
+ * @cmd: Command number = %ETHTOOL_GDRVINFO
+ * @driver: Driver short name. This should normally match the name
+ * in its bus driver structure (e.g. pci_driver::name). Must
+ * not be an empty string.
+ * @version: Driver version string; may be an empty string
+ * @fw_version: Firmware version string; may be an empty string
+ * @erom_version: Expansion ROM version string; may be an empty string
+ * @bus_info: Device bus address. This should match the dev_name()
+ * string for the underlying bus device, if there is one. May be
+ * an empty string.
+ * @reserved2: Reserved for future use; see the note on reserved space.
+ * @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
+ * %ETHTOOL_SPFLAGS commands; also the number of strings in the
+ * %ETH_SS_PRIV_FLAGS set
+ * @n_stats: Number of u64 statistics returned by the %ETHTOOL_GSTATS
+ * command; also the number of strings in the %ETH_SS_STATS set
+ * @testinfo_len: Number of results returned by the %ETHTOOL_TEST
+ * command; also the number of strings in the %ETH_SS_TEST set
+ * @eedump_len: Size of EEPROM accessible through the %ETHTOOL_GEEPROM
+ * and %ETHTOOL_SEEPROM commands, in bytes
+ * @regdump_len: Size of register dump returned by the %ETHTOOL_GREGS
+ * command, in bytes
+ *
+ * Users can use the %ETHTOOL_GSSET_INFO command to get the number of
+ * strings in any string set (from Linux 2.6.34).
+ *
+ * Drivers should set at most @driver, @version, @fw_version and
+ * @bus_info in their get_drvinfo() implementation. The ethtool
+ * core fills in the other fields using other driver operations.
+ */
+struct ethtool_drvinfo {
+ __u32 cmd;
+ char driver[32];
+ char version[32];
+ char fw_version[ETHTOOL_FWVERS_LEN];
+ char bus_info[ETHTOOL_BUSINFO_LEN];
+ char erom_version[ETHTOOL_EROMVERS_LEN];
+ char reserved2[12];
+ __u32 n_priv_flags;
+ __u32 n_stats;
+ __u32 testinfo_len;
+ __u32 eedump_len;
+ __u32 regdump_len;
+};
+
+#define ETHTOOL_GDRVINFO 0x00000003
+
#endif /* _UAPI_LINUX_ETHTOOL_H */
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
index ca88b7bce553..e8c07da58c9f 100644
--- a/tools/include/uapi/linux/fcntl.h
+++ b/tools/include/uapi/linux/fcntl.h
@@ -43,6 +43,7 @@
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
#define F_SEAL_WRITE 0x0008 /* prevent writes */
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent future writes while mapped */
+#define F_SEAL_EXEC 0x0020 /* prevent chmod modifying exec bits */
/* (1U << 31) is reserved for signed error codes */
/*
@@ -84,10 +85,20 @@
#define DN_ATTRIB 0x00000020 /* File changed attibutes */
#define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
+/*
+ * The constants AT_REMOVEDIR and AT_EACCESS have the same value. AT_EACCESS is
+ * meaningful only to faccessat, while AT_REMOVEDIR is meaningful only to
+ * unlinkat. The two functions do completely different things and therefore,
+ * the flags can be allowed to overlap. For example, passing AT_REMOVEDIR to
+ * faccessat would be undefined behavior and thus treating it equivalent to
+ * AT_EACCESS is valid undefined behavior.
+ */
#define AT_FDCWD -100 /* Special value used to indicate
openat should use the current
working directory. */
#define AT_SYMLINK_NOFOLLOW 0x100 /* Do not follow symbolic links. */
+#define AT_EACCESS 0x200 /* Test access permitted for
+ effective IDs, not real IDs. */
#define AT_REMOVEDIR 0x200 /* Remove directory instead of
unlinking file. */
#define AT_SYMLINK_FOLLOW 0x400 /* Follow symbolic links. */
diff --git a/tools/include/uapi/linux/filter.h b/tools/include/uapi/linux/filter.h
new file mode 100644
index 000000000000..eaef459e7bd4
--- /dev/null
+++ b/tools/include/uapi/linux/filter.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Linux Socket Filter Data Structures
+ */
+
+#ifndef __LINUX_FILTER_H__
+#define __LINUX_FILTER_H__
+
+
+#include <linux/types.h>
+#include <linux/bpf_common.h>
+
+/*
+ * Current version of the filter code architecture.
+ */
+#define BPF_MAJOR_VERSION 1
+#define BPF_MINOR_VERSION 1
+
+/*
+ * Try and keep these values and structures similar to BSD, especially
+ * the BPF code definitions which need to match so you can share filters
+ */
+
+struct sock_filter { /* Filter block */
+ __u16 code; /* Actual filter code */
+ __u8 jt; /* Jump true */
+ __u8 jf; /* Jump false */
+ __u32 k; /* Generic multiuse field */
+};
+
+struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
+ unsigned short len; /* Number of filter blocks */
+ struct sock_filter *filter;
+};
+
+/* ret - BPF_K and BPF_X also apply */
+#define BPF_RVAL(code) ((code) & 0x18)
+#define BPF_A 0x10
+
+/* misc */
+#define BPF_MISCOP(code) ((code) & 0xf8)
+#define BPF_TAX 0x00
+#define BPF_TXA 0x80
+
+/*
+ * Macros for filter block array initializers.
+ */
+#ifndef BPF_STMT
+#define BPF_STMT(code, k) { (unsigned short)(code), 0, 0, k }
+#endif
+#ifndef BPF_JUMP
+#define BPF_JUMP(code, k, jt, jf) { (unsigned short)(code), jt, jf, k }
+#endif
+
+/*
+ * Number of scratch memory words for: BPF_ST and BPF_STX
+ */
+#define BPF_MEMWORDS 16
+
+/* RATIONALE. Negative offsets are invalid in BPF.
+ We use them to reference ancillary data.
+ Unlike introduction new instructions, it does not break
+ existing compilers/optimizers.
+ */
+#define SKF_AD_OFF (-0x1000)
+#define SKF_AD_PROTOCOL 0
+#define SKF_AD_PKTTYPE 4
+#define SKF_AD_IFINDEX 8
+#define SKF_AD_NLATTR 12
+#define SKF_AD_NLATTR_NEST 16
+#define SKF_AD_MARK 20
+#define SKF_AD_QUEUE 24
+#define SKF_AD_HATYPE 28
+#define SKF_AD_RXHASH 32
+#define SKF_AD_CPU 36
+#define SKF_AD_ALU_XOR_X 40
+#define SKF_AD_VLAN_TAG 44
+#define SKF_AD_VLAN_TAG_PRESENT 48
+#define SKF_AD_PAY_OFFSET 52
+#define SKF_AD_RANDOM 56
+#define SKF_AD_VLAN_TPID 60
+#define SKF_AD_MAX 64
+
+#define SKF_NET_OFF (-0x100000)
+#define SKF_LL_OFF (-0x200000)
+
+#define BPF_NET_OFF SKF_NET_OFF
+#define BPF_LL_OFF SKF_LL_OFF
+
+#endif /* __LINUX_FILTER_H__ */
diff --git a/tools/include/uapi/linux/fs.h b/tools/include/uapi/linux/fs.h
index 379a612f8f1d..b7b56871029c 100644
--- a/tools/include/uapi/linux/fs.h
+++ b/tools/include/uapi/linux/fs.h
@@ -90,7 +90,7 @@ struct file_dedupe_range {
__u16 dest_count; /* in - total elements in info array */
__u16 reserved1; /* must be zero */
__u32 reserved2; /* must be zero */
- struct file_dedupe_range_info info[0];
+ struct file_dedupe_range_info info[];
};
/* And dynamically-tunable limits and defaults: */
@@ -184,8 +184,9 @@ struct fsxattr {
#define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
+#define BLKGETDISKSEQ _IOR(0x12,128,__u64)
/*
- * A jump here: 130-131 are reserved for zoned block devices
+ * A jump here: 130-136 are reserved for zoned block devices
* (see uapi/linux/blkzoned.h)
*/
@@ -262,6 +263,7 @@ struct fsxattr {
#define FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */
#define FS_EOFBLOCKS_FL 0x00400000 /* Reserved for ext4 */
#define FS_NOCOW_FL 0x00800000 /* Do not cow file */
+#define FS_DAX_FL 0x02000000 /* Inode is DAX */
#define FS_INLINE_DATA_FL 0x10000000 /* Reserved for ext4 */
#define FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
#define FS_CASEFOLD_FL 0x40000000 /* Folder is case insensitive */
diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h
index 0d8a6f47711c..fd1fb0d5389d 100644
--- a/tools/include/uapi/linux/fscrypt.h
+++ b/tools/include/uapi/linux/fscrypt.h
@@ -19,15 +19,18 @@
#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03
#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04
#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08
-#define FSCRYPT_POLICY_FLAGS_VALID 0x0F
+#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32 0x10
/* Encryption algorithms */
#define FSCRYPT_MODE_AES_256_XTS 1
#define FSCRYPT_MODE_AES_256_CTS 4
#define FSCRYPT_MODE_AES_128_CBC 5
#define FSCRYPT_MODE_AES_128_CTS 6
+#define FSCRYPT_MODE_SM4_XTS 7
+#define FSCRYPT_MODE_SM4_CTS 8
#define FSCRYPT_MODE_ADIANTUM 9
-#define __FSCRYPT_MODE_MAX 9
+#define FSCRYPT_MODE_AES_256_HCTR2 10
+/* If adding a mode number > 10, update FSCRYPT_MODE_MAX in fscrypt_private.h */
/*
* Legacy policy version; ad-hoc KDF and no key verification.
@@ -44,7 +47,6 @@ struct fscrypt_policy_v1 {
__u8 flags;
__u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
};
-#define fscrypt_policy fscrypt_policy_v1
/*
* Process-subscribed "logon" key description prefix and payload format.
@@ -155,19 +157,21 @@ struct fscrypt_get_key_status_arg {
__u32 __out_reserved[13];
};
-#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy_v1)
#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
-#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy_v1)
#define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */
#define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg)
#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg)
#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg)
#define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg)
+#define FS_IOC_GET_ENCRYPTION_NONCE _IOR('f', 27, __u8[16])
/**********************************************************************/
/* old names; don't add anything new here! */
#ifndef __KERNEL__
+#define fscrypt_policy fscrypt_policy_v1
#define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE
#define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4
#define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8
@@ -175,7 +179,7 @@ struct fscrypt_get_key_status_arg {
#define FS_POLICY_FLAGS_PAD_32 FSCRYPT_POLICY_FLAGS_PAD_32
#define FS_POLICY_FLAGS_PAD_MASK FSCRYPT_POLICY_FLAGS_PAD_MASK
#define FS_POLICY_FLAG_DIRECT_KEY FSCRYPT_POLICY_FLAG_DIRECT_KEY
-#define FS_POLICY_FLAGS_VALID FSCRYPT_POLICY_FLAGS_VALID
+#define FS_POLICY_FLAGS_VALID 0x07 /* contains old flags only */
#define FS_ENCRYPTION_MODE_INVALID 0 /* never used */
#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS
#define FS_ENCRYPTION_MODE_AES_256_GCM 2 /* never used */
@@ -183,8 +187,6 @@ struct fscrypt_get_key_status_arg {
#define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS
#define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC
#define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS
-#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* removed */
-#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* removed */
#define FS_ENCRYPTION_MODE_ADIANTUM FSCRYPT_MODE_ADIANTUM
#define FS_KEY_DESC_PREFIX FSCRYPT_KEY_DESC_PREFIX
#define FS_KEY_DESC_PREFIX_SIZE FSCRYPT_KEY_DESC_PREFIX_SIZE
diff --git a/tools/include/uapi/linux/hw_breakpoint.h b/tools/include/uapi/linux/hw_breakpoint.h
index 965e4d8606d8..1575d3ca6f0d 100644
--- a/tools/include/uapi/linux/hw_breakpoint.h
+++ b/tools/include/uapi/linux/hw_breakpoint.h
@@ -22,14 +22,4 @@ enum {
HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
};
-enum bp_type_idx {
- TYPE_INST = 0,
-#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
- TYPE_DATA = 0,
-#else
- TYPE_DATA = 1,
-#endif
- TYPE_MAX
-};
-
#endif /* _UAPI_LINUX_HW_BREAKPOINT_H */
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index ca6665ea758a..39e659c83cfd 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -7,24 +7,23 @@
/* This struct should be in sync with struct rtnl_link_stats64 */
struct rtnl_link_stats {
- __u32 rx_packets; /* total packets received */
- __u32 tx_packets; /* total packets transmitted */
- __u32 rx_bytes; /* total bytes received */
- __u32 tx_bytes; /* total bytes transmitted */
- __u32 rx_errors; /* bad packets received */
- __u32 tx_errors; /* packet transmit problems */
- __u32 rx_dropped; /* no space in linux buffers */
- __u32 tx_dropped; /* no space available in linux */
- __u32 multicast; /* multicast packets received */
+ __u32 rx_packets;
+ __u32 tx_packets;
+ __u32 rx_bytes;
+ __u32 tx_bytes;
+ __u32 rx_errors;
+ __u32 tx_errors;
+ __u32 rx_dropped;
+ __u32 tx_dropped;
+ __u32 multicast;
__u32 collisions;
-
/* detailed rx_errors: */
__u32 rx_length_errors;
- __u32 rx_over_errors; /* receiver ring buff overflow */
- __u32 rx_crc_errors; /* recved pkt with crc error */
- __u32 rx_frame_errors; /* recv'd frame alignment error */
- __u32 rx_fifo_errors; /* recv'r fifo overrun */
- __u32 rx_missed_errors; /* receiver missed packet */
+ __u32 rx_over_errors;
+ __u32 rx_crc_errors;
+ __u32 rx_frame_errors;
+ __u32 rx_fifo_errors;
+ __u32 rx_missed_errors;
/* detailed tx_errors */
__u32 tx_aborted_errors;
@@ -37,29 +36,201 @@ struct rtnl_link_stats {
__u32 rx_compressed;
__u32 tx_compressed;
- __u32 rx_nohandler; /* dropped, no handler found */
+ __u32 rx_nohandler;
};
-/* The main device statistics structure */
+/**
+ * struct rtnl_link_stats64 - The main device statistics structure.
+ *
+ * @rx_packets: Number of good packets received by the interface.
+ * For hardware interfaces counts all good packets received from the device
+ * by the host, including packets which host had to drop at various stages
+ * of processing (even in the driver).
+ *
+ * @tx_packets: Number of packets successfully transmitted.
+ * For hardware interfaces counts packets which host was able to successfully
+ * hand over to the device, which does not necessarily mean that packets
+ * had been successfully transmitted out of the device, only that device
+ * acknowledged it copied them out of host memory.
+ *
+ * @rx_bytes: Number of good received bytes, corresponding to @rx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @rx_errors: Total number of bad packets received on this network device.
+ * This counter must include events counted by @rx_length_errors,
+ * @rx_crc_errors, @rx_frame_errors and other errors not otherwise
+ * counted.
+ *
+ * @tx_errors: Total number of transmit problems.
+ * This counter must include events counter by @tx_aborted_errors,
+ * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors,
+ * @tx_window_errors and other errors not otherwise counted.
+ *
+ * @rx_dropped: Number of packets received but not processed,
+ * e.g. due to lack of resources or unsupported protocol.
+ * For hardware interfaces this counter may include packets discarded
+ * due to L2 address filtering but should not include packets dropped
+ * by the device due to buffer exhaustion which are counted separately in
+ * @rx_missed_errors (since procfs folds those two counters together).
+ *
+ * @tx_dropped: Number of packets dropped on their way to transmission,
+ * e.g. due to lack of resources.
+ *
+ * @multicast: Multicast packets received.
+ * For hardware interfaces this statistic is commonly calculated
+ * at the device level (unlike @rx_packets) and therefore may include
+ * packets which did not reach the host.
+ *
+ * For IEEE 802.3 devices this counter may be equivalent to:
+ *
+ * - 30.3.1.1.21 aMulticastFramesReceivedOK
+ *
+ * @collisions: Number of collisions during packet transmissions.
+ *
+ * @rx_length_errors: Number of packets dropped due to invalid length.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to a sum
+ * of the following attributes:
+ *
+ * - 30.3.1.1.23 aInRangeLengthErrors
+ * - 30.3.1.1.24 aOutOfRangeLengthField
+ * - 30.3.1.1.25 aFrameTooLongErrors
+ *
+ * @rx_over_errors: Receiver FIFO overflow event counter.
+ *
+ * Historically the count of overflow events. Such events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * The recommended interpretation for high speed interfaces is -
+ * number of packets dropped because they did not fit into buffers
+ * provided by the host, e.g. packets larger than MTU or next buffer
+ * in the ring was not available for a scatter transfer.
+ *
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * This statistics was historically used interchangeably with
+ * @rx_fifo_errors.
+ *
+ * This statistic corresponds to hardware events and is not commonly used
+ * on software devices.
+ *
+ * @rx_crc_errors: Number of packets received with a CRC error.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.6 aFrameCheckSequenceErrors
+ *
+ * @rx_frame_errors: Receiver frame alignment errors.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to:
+ *
+ * - 30.3.1.1.7 aAlignmentErrors
+ *
+ * @rx_fifo_errors: Receiver FIFO error counter.
+ *
+ * Historically the count of overflow events. Those events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * This statistics was used interchangeably with @rx_over_errors.
+ * Not recommended for use in drivers for high speed interfaces.
+ *
+ * This statistic is used on software devices, e.g. to count software
+ * packet queue overflow (can) or sequencing errors (GRE).
+ *
+ * @rx_missed_errors: Count of packets missed by the host.
+ * Folded into the "drop" counter in `/proc/net/dev`.
+ *
+ * Counts number of packets dropped by the device due to lack
+ * of buffer space. This usually indicates that the host interface
+ * is slower than the network interface, or host is not keeping up
+ * with the receive packet rate.
+ *
+ * This statistic corresponds to hardware events and is not used
+ * on software devices.
+ *
+ * @tx_aborted_errors:
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ * For IEEE 802.3 devices capable of half-duplex operation this counter
+ * must be equivalent to:
+ *
+ * - 30.3.1.1.11 aFramesAbortedDueToXSColls
+ *
+ * High speed interfaces may use this counter as a general device
+ * discard counter.
+ *
+ * @tx_carrier_errors: Number of frame transmission errors due to loss
+ * of carrier during transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.13 aCarrierSenseErrors
+ *
+ * @tx_fifo_errors: Number of frame transmission errors due to device
+ * FIFO underrun / underflow. This condition occurs when the device
+ * begins transmission of a frame but is unable to deliver the
+ * entire frame to the transmitter in time for transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for
+ * old half-duplex Ethernet.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices possibly equivalent to:
+ *
+ * - 30.3.2.1.4 aSQETestErrors
+ *
+ * @tx_window_errors: Number of frame transmission errors due
+ * to late collisions (for Ethernet - after the first 64B of transmission).
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.10 aLateCollisions
+ *
+ * @rx_compressed: Number of correctly received compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @tx_compressed: Number of transmitted compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @rx_nohandler: Number of packets received on the interface
+ * but dropped by the networking stack because the device is
+ * not designated to receive packets (e.g. backup link in a bond).
+ */
struct rtnl_link_stats64 {
- __u64 rx_packets; /* total packets received */
- __u64 tx_packets; /* total packets transmitted */
- __u64 rx_bytes; /* total bytes received */
- __u64 tx_bytes; /* total bytes transmitted */
- __u64 rx_errors; /* bad packets received */
- __u64 tx_errors; /* packet transmit problems */
- __u64 rx_dropped; /* no space in linux buffers */
- __u64 tx_dropped; /* no space available in linux */
- __u64 multicast; /* multicast packets received */
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 rx_errors;
+ __u64 tx_errors;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+ __u64 multicast;
__u64 collisions;
/* detailed rx_errors: */
__u64 rx_length_errors;
- __u64 rx_over_errors; /* receiver ring buff overflow */
- __u64 rx_crc_errors; /* recved pkt with crc error */
- __u64 rx_frame_errors; /* recv'd frame alignment error */
- __u64 rx_fifo_errors; /* recv'r fifo overrun */
- __u64 rx_missed_errors; /* receiver missed packet */
+ __u64 rx_over_errors;
+ __u64 rx_crc_errors;
+ __u64 rx_frame_errors;
+ __u64 rx_fifo_errors;
+ __u64 rx_missed_errors;
/* detailed tx_errors */
__u64 tx_aborted_errors;
@@ -71,8 +242,7 @@ struct rtnl_link_stats64 {
/* for cslip etc */
__u64 rx_compressed;
__u64 tx_compressed;
-
- __u64 rx_nohandler; /* dropped, no handler found */
+ __u64 rx_nohandler;
};
/* The struct should be in sync with struct ifmap */
@@ -170,12 +340,32 @@ enum {
IFLA_PROP_LIST,
IFLA_ALT_IFNAME, /* Alternative ifname */
IFLA_PERM_ADDRESS,
+ IFLA_PROTO_DOWN_REASON,
+
+ /* device (sysfs) name as parent, used instead
+ * of IFLA_LINK where there's no parent netdev
+ */
+ IFLA_PARENT_DEV_NAME,
+ IFLA_PARENT_DEV_BUS_NAME,
+ IFLA_GRO_MAX_SIZE,
+ IFLA_TSO_MAX_SIZE,
+ IFLA_TSO_MAX_SEGS,
+
__IFLA_MAX
};
#define IFLA_MAX (__IFLA_MAX - 1)
+enum {
+ IFLA_PROTO_DOWN_REASON_UNSPEC,
+ IFLA_PROTO_DOWN_REASON_MASK, /* u32, mask for reason bits */
+ IFLA_PROTO_DOWN_REASON_VALUE, /* u32, reason bit value */
+
+ __IFLA_PROTO_DOWN_REASON_CNT,
+ IFLA_PROTO_DOWN_REASON_MAX = __IFLA_PROTO_DOWN_REASON_CNT - 1
+};
+
/* backwards compatibility for userspace */
#ifndef __KERNEL__
#define IFLA_RTA(r) ((struct rtattr*)(((char*)(r)) + NLMSG_ALIGN(sizeof(struct ifinfomsg))))
@@ -230,6 +420,7 @@ enum {
IFLA_INET6_ICMP6STATS, /* statistics (icmpv6) */
IFLA_INET6_TOKEN, /* device token */
IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */
+ IFLA_INET6_RA_MTU, /* mtu carried in the RA message */
__IFLA_INET6_MAX
};
@@ -292,6 +483,7 @@ enum {
IFLA_BR_MCAST_MLD_VERSION,
IFLA_BR_VLAN_STATS_PER_PORT,
IFLA_BR_MULTI_BOOLOPT,
+ IFLA_BR_MCAST_QUERIER_STATE,
__IFLA_BR_MAX,
};
@@ -343,6 +535,10 @@ enum {
IFLA_BRPORT_NEIGH_SUPPRESS,
IFLA_BRPORT_ISOLATED,
IFLA_BRPORT_BACKUP_PORT,
+ IFLA_BRPORT_MRP_RING_OPEN,
+ IFLA_BRPORT_MRP_IN_OPEN,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT,
+ IFLA_BRPORT_MCAST_EHT_HOSTS_CNT,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -407,6 +603,9 @@ enum {
IFLA_MACVLAN_MACADDR,
IFLA_MACVLAN_MACADDR_DATA,
IFLA_MACVLAN_MACADDR_COUNT,
+ IFLA_MACVLAN_BC_QUEUE_LEN,
+ IFLA_MACVLAN_BC_QUEUE_LEN_USED,
+ IFLA_MACVLAN_BC_CUTOFF,
__IFLA_MACVLAN_MAX,
};
@@ -428,6 +627,7 @@ enum macvlan_macaddr_mode {
};
#define MACVLAN_FLAG_NOPROMISC 1
+#define MACVLAN_FLAG_NODST 2 /* skip dst macvlan if matching src macvlan */
/* VRF section */
enum {
@@ -474,6 +674,7 @@ enum {
IFLA_XFRM_UNSPEC,
IFLA_XFRM_LINK,
IFLA_XFRM_IF_ID,
+ IFLA_XFRM_COLLECT_METADATA,
__IFLA_XFRM_MAX
};
@@ -592,6 +793,18 @@ enum ifla_geneve_df {
GENEVE_DF_MAX = __GENEVE_DF_END - 1,
};
+/* Bareudp section */
+enum {
+ IFLA_BAREUDP_UNSPEC,
+ IFLA_BAREUDP_PORT,
+ IFLA_BAREUDP_ETHERTYPE,
+ IFLA_BAREUDP_SRCPORT_MIN,
+ IFLA_BAREUDP_MULTIPROTO_MODE,
+ __IFLA_BAREUDP_MAX
+};
+
+#define IFLA_BAREUDP_MAX (__IFLA_BAREUDP_MAX - 1)
+
/* PPP section */
enum {
IFLA_PPP_UNSPEC,
@@ -649,6 +862,9 @@ enum {
IFLA_BOND_AD_ACTOR_SYSTEM,
IFLA_BOND_TLB_DYNAMIC_LB,
IFLA_BOND_PEER_NOTIF_DELAY,
+ IFLA_BOND_AD_LACP_ACTIVE,
+ IFLA_BOND_MISSED_MAX,
+ IFLA_BOND_NS_IP6_TARGET,
__IFLA_BOND_MAX,
};
@@ -676,6 +892,7 @@ enum {
IFLA_BOND_SLAVE_AD_AGGREGATOR_ID,
IFLA_BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE,
IFLA_BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE,
+ IFLA_BOND_SLAVE_PRIO,
__IFLA_BOND_SLAVE_MAX,
};
@@ -893,7 +1110,14 @@ enum {
#define IFLA_IPOIB_MAX (__IFLA_IPOIB_MAX - 1)
-/* HSR section */
+/* HSR/PRP section, both uses same interface */
+
+/* Different redundancy protocols for hsr device */
+enum {
+ HSR_PROTOCOL_HSR,
+ HSR_PROTOCOL_PRP,
+ HSR_PROTOCOL_MAX,
+};
enum {
IFLA_HSR_UNSPEC,
@@ -903,6 +1127,9 @@ enum {
IFLA_HSR_SUPERVISION_ADDR, /* Supervision frame multicast addr */
IFLA_HSR_SEQ_NR,
IFLA_HSR_VERSION, /* HSR version */
+ IFLA_HSR_PROTOCOL, /* Indicate different protocol than
+ * HSR. For example PRP.
+ */
__IFLA_HSR_MAX,
};
@@ -1027,6 +1254,8 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
+#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)
+#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5)
enum {
IFLA_RMNET_UNSPEC,
@@ -1042,4 +1271,14 @@ struct ifla_rmnet_flags {
__u32 mask;
};
+/* MCTP section */
+
+enum {
+ IFLA_MCTP_UNSPEC,
+ IFLA_MCTP_NET,
+ __IFLA_MCTP_MAX,
+};
+
+#define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1)
+
#endif /* _UAPI_LINUX_IF_LINK_H */
diff --git a/tools/include/uapi/linux/if_tun.h b/tools/include/uapi/linux/if_tun.h
index 454ae31b93c7..2ec07de1d73b 100644
--- a/tools/include/uapi/linux/if_tun.h
+++ b/tools/include/uapi/linux/if_tun.h
@@ -108,7 +108,7 @@ struct tun_pi {
struct tun_filter {
__u16 flags; /* TUN_FLT_ flags see above */
__u16 count; /* Number of addresses */
- __u8 addr[0][ETH_ALEN];
+ __u8 addr[][ETH_ALEN];
};
#endif /* _UAPI__IF_TUN_H */
diff --git a/tools/include/uapi/linux/if_xdp.h b/tools/include/uapi/linux/if_xdp.h
index be328c59389d..a78a8096f4ce 100644
--- a/tools/include/uapi/linux/if_xdp.h
+++ b/tools/include/uapi/linux/if_xdp.h
@@ -73,9 +73,12 @@ struct xdp_umem_reg {
};
struct xdp_statistics {
- __u64 rx_dropped; /* Dropped for reasons other than invalid desc */
+ __u64 rx_dropped; /* Dropped for other reasons */
__u64 rx_invalid_descs; /* Dropped due to invalid descriptor */
__u64 tx_invalid_descs; /* Dropped due to invalid descriptor */
+ __u64 rx_ring_full; /* Dropped due to rx ring being full */
+ __u64 rx_fill_ring_empty_descs; /* Failed to retrieve item from fill ring */
+ __u64 tx_ring_empty_descs; /* Failed to retrieve item from tx ring */
};
struct xdp_options {
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
index 8533bf07450f..07a4cb149305 100644
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -20,6 +20,7 @@
#define _UAPI_LINUX_IN_H
#include <linux/types.h>
+#include <linux/stddef.h>
#include <linux/libc-compat.h>
#include <linux/socket.h>
@@ -68,6 +69,8 @@ enum {
#define IPPROTO_PIM IPPROTO_PIM
IPPROTO_COMP = 108, /* Compression Header Protocol */
#define IPPROTO_COMP IPPROTO_COMP
+ IPPROTO_L2TP = 115, /* Layer 2 Tunnelling Protocol */
+#define IPPROTO_L2TP IPPROTO_L2TP
IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
@@ -123,6 +126,7 @@ struct in_addr {
#define IP_CHECKSUM 23
#define IP_BIND_ADDRESS_NO_PORT 24
#define IP_RECVFRAGSIZE 25
+#define IP_RECVERR_RFC4884 26
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
@@ -134,7 +138,7 @@ struct in_addr {
* this socket to prevent accepting spoofed ones.
*/
#define IP_PMTUDISC_INTERFACE 4
-/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+/* weaker version of IP_PMTUDISC_INTERFACE, which allows packets to get
* fragmented if they exeed the interface mtu
*/
#define IP_PMTUDISC_OMIT 5
@@ -191,7 +195,10 @@ struct ip_msfilter {
__be32 imsf_interface;
__u32 imsf_fmode;
__u32 imsf_numsrc;
- __be32 imsf_slist[1];
+ union {
+ __be32 imsf_slist[1];
+ __DECLARE_FLEX_ARRAY(__be32, imsf_slist_flex);
+ };
};
#define IP_MSFILTER_SIZE(numsrc) \
@@ -210,11 +217,22 @@ struct group_source_req {
};
struct group_filter {
- __u32 gf_interface; /* interface index */
- struct __kernel_sockaddr_storage gf_group; /* multicast address */
- __u32 gf_fmode; /* filter mode */
- __u32 gf_numsrc; /* number of sources */
- struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+ union {
+ struct {
+ __u32 gf_interface_aux; /* interface index */
+ struct __kernel_sockaddr_storage gf_group_aux; /* multicast address */
+ __u32 gf_fmode_aux; /* filter mode */
+ __u32 gf_numsrc_aux; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist[1]; /* interface index */
+ };
+ struct {
+ __u32 gf_interface; /* interface index */
+ struct __kernel_sockaddr_storage gf_group; /* multicast address */
+ __u32 gf_fmode; /* filter mode */
+ __u32 gf_numsrc; /* number of sources */
+ struct __kernel_sockaddr_storage gf_slist_flex[]; /* interface index */
+ };
+ };
};
#define GROUP_FILTER_SIZE(numsrc) \
@@ -288,6 +306,9 @@ struct sockaddr_in {
/* Address indicating an error return. */
#define INADDR_NONE ((unsigned long int) 0xffffffff)
+/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
+#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
+
/* Network number for local host loopback. */
#define IN_LOOPBACKNET 127
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 4b95f9a31a2f..4003a166328c 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -8,6 +8,7 @@
* Note: you must update KVM_API_VERSION if you change this interface.
*/
+#include <linux/const.h>
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/ioctl.h>
@@ -85,14 +86,6 @@ struct kvm_debug_guest {
/* *** End of deprecated interfaces *** */
-/* for KVM_CREATE_MEMORY_REGION */
-struct kvm_memory_region {
- __u32 slot;
- __u32 flags;
- __u64 guest_phys_addr;
- __u64 memory_size; /* bytes */
-};
-
/* for KVM_SET_USER_MEMORY_REGION */
struct kvm_userspace_memory_region {
__u32 slot;
@@ -103,9 +96,9 @@ struct kvm_userspace_memory_region {
};
/*
- * The bit 0 ~ bit 15 of kvm_memory_region::flags are visible for userspace,
- * other bits are reserved for kvm internal use which are defined in
- * include/linux/kvm_host.h.
+ * The bit 0 ~ bit 15 of kvm_userspace_memory_region::flags are visible for
+ * userspace, other bits are reserved for kvm internal use which are defined
+ * in include/linux/kvm_host.h.
*/
#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
#define KVM_MEM_READONLY (1UL << 1)
@@ -116,7 +109,7 @@ struct kvm_irq_level {
* ACPI gsi notion of irq.
* For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
* For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
- * For ARM: See Documentation/virt/kvm/api.txt
+ * For ARM: See Documentation/virt/kvm/api.rst
*/
union {
__u32 irq;
@@ -188,10 +181,13 @@ struct kvm_s390_cmma_log {
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
#define KVM_EXIT_HYPERV_HCALL 2
+#define KVM_EXIT_HYPERV_SYNDBG 3
__u32 type;
+ __u32 pad1;
union {
struct {
__u32 msr;
+ __u32 pad2;
__u64 control;
__u64 evt_page;
__u64 msg_page;
@@ -201,6 +197,29 @@ struct kvm_hyperv_exit {
__u64 result;
__u64 params[2];
} hcall;
+ struct {
+ __u32 msr;
+ __u32 pad2;
+ __u64 control;
+ __u64 status;
+ __u64 send_page;
+ __u64 recv_page;
+ __u64 pending_page;
+ } syndbg;
+ } u;
+};
+
+struct kvm_xen_exit {
+#define KVM_EXIT_XEN_HCALL 1
+ __u32 type;
+ union {
+ struct {
+ __u32 longmode;
+ __u32 cpl;
+ __u64 input;
+ __u64 result;
+ __u64 params[6];
+ } hcall;
} u;
};
@@ -236,6 +255,15 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_IOAPIC_EOI 26
#define KVM_EXIT_HYPERV 27
#define KVM_EXIT_ARM_NISV 28
+#define KVM_EXIT_X86_RDMSR 29
+#define KVM_EXIT_X86_WRMSR 30
+#define KVM_EXIT_DIRTY_RING_FULL 31
+#define KVM_EXIT_AP_RESET_HOLD 32
+#define KVM_EXIT_X86_BUS_LOCK 33
+#define KVM_EXIT_XEN 34
+#define KVM_EXIT_RISCV_SBI 35
+#define KVM_EXIT_RISCV_CSR 36
+#define KVM_EXIT_NOTIFY 37
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -247,6 +275,9 @@ struct kvm_hyperv_exit {
/* Encounter unexpected vm-exit reason */
#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON 4
+/* Flags that describe what fields in emulation_failure hold valid data. */
+#define KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES (1ULL << 0)
+
/* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
struct kvm_run {
/* in */
@@ -277,6 +308,7 @@ struct kvm_run {
/* KVM_EXIT_FAIL_ENTRY */
struct {
__u64 hardware_entry_failure_reason;
+ __u32 cpu;
} fail_entry;
/* KVM_EXIT_EXCEPTION */
struct {
@@ -349,6 +381,35 @@ struct kvm_run {
__u32 ndata;
__u64 data[16];
} internal;
+ /*
+ * KVM_INTERNAL_ERROR_EMULATION
+ *
+ * "struct emulation_failure" is an overlay of "struct internal"
+ * that is used for the KVM_INTERNAL_ERROR_EMULATION sub-type of
+ * KVM_EXIT_INTERNAL_ERROR. Note, unlike other internal error
+ * sub-types, this struct is ABI! It also needs to be backwards
+ * compatible with "struct internal". Take special care that
+ * "ndata" is correct, that new fields are enumerated in "flags",
+ * and that each flag enumerates fields that are 64-bit aligned
+ * and sized (so that ndata+internal.data[] is valid/accurate).
+ *
+ * Space beyond the defined fields may be used to store arbitrary
+ * debug information relating to the emulation failure. It is
+ * accounted for in "ndata" but the format is unspecified and is
+ * not represented in "flags". Any such information is *not* ABI!
+ */
+ struct {
+ __u32 suberror;
+ __u32 ndata;
+ __u64 flags;
+ union {
+ struct {
+ __u8 insn_size;
+ __u8 insn_bytes[15];
+ };
+ };
+ /* Arbitrary debug data may follow. */
+ } emulation_failure;
/* KVM_EXIT_OSI */
struct {
__u64 gprs[32];
@@ -377,8 +438,17 @@ struct kvm_run {
#define KVM_SYSTEM_EVENT_SHUTDOWN 1
#define KVM_SYSTEM_EVENT_RESET 2
#define KVM_SYSTEM_EVENT_CRASH 3
+#define KVM_SYSTEM_EVENT_WAKEUP 4
+#define KVM_SYSTEM_EVENT_SUSPEND 5
+#define KVM_SYSTEM_EVENT_SEV_TERM 6
__u32 type;
- __u64 flags;
+ __u32 ndata;
+ union {
+#ifndef __KERNEL__
+ __u64 flags;
+#endif
+ __u64 data[16];
+ };
} system_event;
/* KVM_EXIT_S390_STSI */
struct {
@@ -400,6 +470,41 @@ struct kvm_run {
__u64 esr_iss;
__u64 fault_ipa;
} arm_nisv;
+ /* KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR */
+ struct {
+ __u8 error; /* user -> kernel */
+ __u8 pad[7];
+#define KVM_MSR_EXIT_REASON_INVAL (1 << 0)
+#define KVM_MSR_EXIT_REASON_UNKNOWN (1 << 1)
+#define KVM_MSR_EXIT_REASON_FILTER (1 << 2)
+#define KVM_MSR_EXIT_REASON_VALID_MASK (KVM_MSR_EXIT_REASON_INVAL | \
+ KVM_MSR_EXIT_REASON_UNKNOWN | \
+ KVM_MSR_EXIT_REASON_FILTER)
+ __u32 reason; /* kernel -> user */
+ __u32 index; /* kernel -> user */
+ __u64 data; /* kernel <-> user */
+ } msr;
+ /* KVM_EXIT_XEN */
+ struct kvm_xen_exit xen;
+ /* KVM_EXIT_RISCV_SBI */
+ struct {
+ unsigned long extension_id;
+ unsigned long function_id;
+ unsigned long args[6];
+ unsigned long ret[2];
+ } riscv_sbi;
+ /* KVM_EXIT_RISCV_CSR */
+ struct {
+ unsigned long csr_num;
+ unsigned long new_value;
+ unsigned long write_mask;
+ unsigned long ret_value;
+ } riscv_csr;
+ /* KVM_EXIT_NOTIFY */
+ struct {
+#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0)
+ __u32 flags;
+ } notify;
/* Fix the size of the union. */
char padding[256];
};
@@ -446,7 +551,7 @@ struct kvm_coalesced_mmio {
struct kvm_coalesced_mmio_ring {
__u32 first, last;
- struct kvm_coalesced_mmio coalesced_mmio[0];
+ struct kvm_coalesced_mmio coalesced_mmio[];
};
#define KVM_COALESCED_MMIO_MAX \
@@ -474,15 +579,34 @@ struct kvm_s390_mem_op {
__u32 size; /* amount of bytes */
__u32 op; /* type of operation */
__u64 buf; /* buffer in userspace */
- __u8 ar; /* the access register number */
- __u8 reserved[31]; /* should be set to 0 */
+ union {
+ struct {
+ __u8 ar; /* the access register number */
+ __u8 key; /* access key, ignored if flag unset */
+ __u8 pad1[6]; /* ignored */
+ __u64 old_addr; /* ignored if cmpxchg flag unset */
+ };
+ __u32 sida_offset; /* offset into the sida */
+ __u8 reserved[32]; /* ignored */
+ };
};
/* types for kvm_s390_mem_op->op */
#define KVM_S390_MEMOP_LOGICAL_READ 0
#define KVM_S390_MEMOP_LOGICAL_WRITE 1
+#define KVM_S390_MEMOP_SIDA_READ 2
+#define KVM_S390_MEMOP_SIDA_WRITE 3
+#define KVM_S390_MEMOP_ABSOLUTE_READ 4
+#define KVM_S390_MEMOP_ABSOLUTE_WRITE 5
+#define KVM_S390_MEMOP_ABSOLUTE_CMPXCHG 6
+
/* flags for kvm_s390_mem_op->flags */
#define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0)
#define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1)
+#define KVM_S390_MEMOP_F_SKEY_PROTECTION (1ULL << 2)
+
+/* flags specifying extension support via KVM_CAP_S390_MEM_OP_EXTENSION */
+#define KVM_S390_MEMOP_EXTENSION_CAP_BASE (1 << 0)
+#define KVM_S390_MEMOP_EXTENSION_CAP_CMPXCHG (1 << 1)
/* for KVM_INTERRUPT */
struct kvm_interrupt {
@@ -514,7 +638,7 @@ struct kvm_clear_dirty_log {
/* for KVM_SET_SIGNAL_MASK */
struct kvm_signal_mask {
__u32 len;
- __u8 sigset[0];
+ __u8 sigset[];
};
/* for KVM_TPR_ACCESS_REPORTING */
@@ -541,6 +665,8 @@ struct kvm_vapic_addr {
#define KVM_MP_STATE_CHECK_STOP 6
#define KVM_MP_STATE_OPERATING 7
#define KVM_MP_STATE_LOAD 8
+#define KVM_MP_STATE_AP_RESET_HOLD 9
+#define KVM_MP_STATE_SUSPENDED 10
struct kvm_mp_state {
__u32 mp_state;
@@ -772,9 +898,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2
-/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
-#define KVM_VM_MIPS_TE 0
+/* on MIPS, 0 indicates auto, 1 forces VZ ASE, 2 forces trap & emulate */
+#define KVM_VM_MIPS_AUTO 0
#define KVM_VM_MIPS_VZ 1
+#define KVM_VM_MIPS_TE 2
#define KVM_S390_SIE_PAGE_OFFSET 1
@@ -1010,6 +1137,53 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_NISV_TO_USER 177
#define KVM_CAP_ARM_INJECT_EXT_DABT 178
#define KVM_CAP_S390_VCPU_RESETS 179
+#define KVM_CAP_S390_PROTECTED 180
+#define KVM_CAP_PPC_SECURE_GUEST 181
+#define KVM_CAP_HALT_POLL 182
+#define KVM_CAP_ASYNC_PF_INT 183
+#define KVM_CAP_LAST_CPU 184
+#define KVM_CAP_SMALLER_MAXPHYADDR 185
+#define KVM_CAP_S390_DIAG318 186
+#define KVM_CAP_STEAL_TIME 187
+#define KVM_CAP_X86_USER_SPACE_MSR 188
+#define KVM_CAP_X86_MSR_FILTER 189
+#define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190
+#define KVM_CAP_SYS_HYPERV_CPUID 191
+#define KVM_CAP_DIRTY_LOG_RING 192
+#define KVM_CAP_X86_BUS_LOCK_EXIT 193
+#define KVM_CAP_PPC_DAWR1 194
+#define KVM_CAP_SET_GUEST_DEBUG2 195
+#define KVM_CAP_SGX_ATTRIBUTE 196
+#define KVM_CAP_VM_COPY_ENC_CONTEXT_FROM 197
+#define KVM_CAP_PTP_KVM 198
+#define KVM_CAP_HYPERV_ENFORCE_CPUID 199
+#define KVM_CAP_SREGS2 200
+#define KVM_CAP_EXIT_HYPERCALL 201
+#define KVM_CAP_PPC_RPT_INVALIDATE 202
+#define KVM_CAP_BINARY_STATS_FD 203
+#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
+#define KVM_CAP_ARM_MTE 205
+#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
+#define KVM_CAP_VM_GPA_BITS 207
+#define KVM_CAP_XSAVE2 208
+#define KVM_CAP_SYS_ATTRIBUTES 209
+#define KVM_CAP_PPC_AIL_MODE_3 210
+#define KVM_CAP_S390_MEM_OP_EXTENSION 211
+#define KVM_CAP_PMU_CAPABILITY 212
+#define KVM_CAP_DISABLE_QUIRKS2 213
+#define KVM_CAP_VM_TSC_CONTROL 214
+#define KVM_CAP_SYSTEM_EVENT_DATA 215
+#define KVM_CAP_ARM_SYSTEM_SUSPEND 216
+#define KVM_CAP_S390_PROTECTED_DUMP 217
+#define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218
+#define KVM_CAP_X86_NOTIFY_VMEXIT 219
+#define KVM_CAP_VM_DISABLE_NX_HUGE_PAGES 220
+#define KVM_CAP_S390_ZPCI_OP 221
+#define KVM_CAP_S390_CPU_TOPOLOGY 222
+#define KVM_CAP_DIRTY_LOG_RING_ACQ_REL 223
+#define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
+#define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
+#define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1041,11 +1215,20 @@ struct kvm_irq_routing_hv_sint {
__u32 sint;
};
+struct kvm_irq_routing_xen_evtchn {
+ __u32 port;
+ __u32 vcpu;
+ __u32 priority;
+};
+
+#define KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL ((__u32)(-1))
+
/* gsi routing entry types */
#define KVM_IRQ_ROUTING_IRQCHIP 1
#define KVM_IRQ_ROUTING_MSI 2
#define KVM_IRQ_ROUTING_S390_ADAPTER 3
#define KVM_IRQ_ROUTING_HV_SINT 4
+#define KVM_IRQ_ROUTING_XEN_EVTCHN 5
struct kvm_irq_routing_entry {
__u32 gsi;
@@ -1057,6 +1240,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing_msi msi;
struct kvm_irq_routing_s390_adapter adapter;
struct kvm_irq_routing_hv_sint hv_sint;
+ struct kvm_irq_routing_xen_evtchn xen_evtchn;
__u32 pad[8];
} u;
};
@@ -1064,7 +1248,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing {
__u32 nr;
__u32 flags;
- struct kvm_irq_routing_entry entries[0];
+ struct kvm_irq_routing_entry entries[];
};
#endif
@@ -1083,6 +1267,14 @@ struct kvm_x86_mce {
#endif
#ifdef KVM_CAP_XEN_HVM
+#define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR (1 << 0)
+#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
+#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
+#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
+#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
+#define KVM_XEN_HVM_CONFIG_EVTCHN_SEND (1 << 5)
+#define KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG (1 << 6)
+
struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
@@ -1100,7 +1292,7 @@ struct kvm_xen_hvm_config {
*
* KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
* the irqfd to operate in resampling mode for level triggered interrupt
- * emulation. See Documentation/virt/kvm/api.txt.
+ * emulation. See Documentation/virt/kvm/api.rst.
*/
#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
@@ -1116,11 +1308,16 @@ struct kvm_irqfd {
/* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */
#define KVM_CLOCK_TSC_STABLE 2
+#define KVM_CLOCK_REALTIME (1 << 2)
+#define KVM_CLOCK_HOST_TSC (1 << 3)
struct kvm_clock_data {
__u64 clock;
__u32 flags;
- __u32 pad[9];
+ __u32 pad0;
+ __u64 realtime;
+ __u64 host_tsc;
+ __u32 pad[4];
};
/* For KVM_CAP_SW_TLB */
@@ -1172,7 +1369,7 @@ struct kvm_dirty_tlb {
struct kvm_reg_list {
__u64 n; /* number of regs */
- __u64 reg[0];
+ __u64 reg[];
};
struct kvm_one_reg {
@@ -1248,19 +1445,13 @@ struct kvm_vfio_spapr_tce {
};
/*
- * ioctls for VM fds
- */
-#define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region)
-/*
* KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
* a vcpu fd.
*/
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
-/* KVM_SET_MEMORY_ALIAS is obsolete: */
-#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
#define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44)
-#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45)
+#define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) /* deprecated */
#define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46, \
struct kvm_userspace_memory_region)
#define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47)
@@ -1315,7 +1506,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2)
/* Available with KVM_CAP_PPC_GET_PVINFO */
#define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo)
-/* Available with KVM_CAP_TSC_CONTROL */
+/* Available with KVM_CAP_TSC_CONTROL for a vCPU, or with
+* KVM_CAP_VM_TSC_CONTROL to set defaults for a VM */
#define KVM_SET_TSC_KHZ _IO(KVMIO, 0xa2)
#define KVM_GET_TSC_KHZ _IO(KVMIO, 0xa3)
/* Available with KVM_CAP_PCI_2_3 */
@@ -1350,6 +1542,7 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_PMU_EVENT_FILTER */
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
+#define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
@@ -1468,7 +1661,7 @@ struct kvm_enc_region {
/* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT_2 */
#define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log)
-/* Available with KVM_CAP_HYPERV_CPUID */
+/* Available with KVM_CAP_HYPERV_CPUID (vcpu) / KVM_CAP_SYS_HYPERV_CPUID (system) */
#define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2)
/* Available with KVM_CAP_ARM_SVE */
@@ -1478,6 +1671,201 @@ struct kvm_enc_region {
#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
+struct kvm_s390_pv_sec_parm {
+ __u64 origin;
+ __u64 length;
+};
+
+struct kvm_s390_pv_unp {
+ __u64 addr;
+ __u64 size;
+ __u64 tweak;
+};
+
+enum pv_cmd_dmp_id {
+ KVM_PV_DUMP_INIT,
+ KVM_PV_DUMP_CONFIG_STOR_STATE,
+ KVM_PV_DUMP_COMPLETE,
+ KVM_PV_DUMP_CPU,
+};
+
+struct kvm_s390_pv_dmp {
+ __u64 subcmd;
+ __u64 buff_addr;
+ __u64 buff_len;
+ __u64 gaddr; /* For dump storage state */
+ __u64 reserved[4];
+};
+
+enum pv_cmd_info_id {
+ KVM_PV_INFO_VM,
+ KVM_PV_INFO_DUMP,
+};
+
+struct kvm_s390_pv_info_dump {
+ __u64 dump_cpu_buffer_len;
+ __u64 dump_config_mem_buffer_per_1m;
+ __u64 dump_config_finalize_len;
+};
+
+struct kvm_s390_pv_info_vm {
+ __u64 inst_calls_list[4];
+ __u64 max_cpus;
+ __u64 max_guests;
+ __u64 max_guest_addr;
+ __u64 feature_indication;
+};
+
+struct kvm_s390_pv_info_header {
+ __u32 id;
+ __u32 len_max;
+ __u32 len_written;
+ __u32 reserved;
+};
+
+struct kvm_s390_pv_info {
+ struct kvm_s390_pv_info_header header;
+ union {
+ struct kvm_s390_pv_info_dump dump;
+ struct kvm_s390_pv_info_vm vm;
+ };
+};
+
+enum pv_cmd_id {
+ KVM_PV_ENABLE,
+ KVM_PV_DISABLE,
+ KVM_PV_SET_SEC_PARMS,
+ KVM_PV_UNPACK,
+ KVM_PV_VERIFY,
+ KVM_PV_PREP_RESET,
+ KVM_PV_UNSHARE_ALL,
+ KVM_PV_INFO,
+ KVM_PV_DUMP,
+ KVM_PV_ASYNC_CLEANUP_PREPARE,
+ KVM_PV_ASYNC_CLEANUP_PERFORM,
+};
+
+struct kvm_pv_cmd {
+ __u32 cmd; /* Command to be executed */
+ __u16 rc; /* Ultravisor return code */
+ __u16 rrc; /* Ultravisor return reason code */
+ __u64 data; /* Data or address */
+ __u32 flags; /* flags for future extensions. Must be 0 for now */
+ __u32 reserved[3];
+};
+
+/* Available with KVM_CAP_S390_PROTECTED */
+#define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd)
+
+/* Available with KVM_CAP_X86_MSR_FILTER */
+#define KVM_X86_SET_MSR_FILTER _IOW(KVMIO, 0xc6, struct kvm_msr_filter)
+
+/* Available with KVM_CAP_DIRTY_LOG_RING */
+#define KVM_RESET_DIRTY_RINGS _IO(KVMIO, 0xc7)
+
+/* Per-VM Xen attributes */
+#define KVM_XEN_HVM_GET_ATTR _IOWR(KVMIO, 0xc8, struct kvm_xen_hvm_attr)
+#define KVM_XEN_HVM_SET_ATTR _IOW(KVMIO, 0xc9, struct kvm_xen_hvm_attr)
+
+struct kvm_xen_hvm_attr {
+ __u16 type;
+ __u16 pad[3];
+ union {
+ __u8 long_mode;
+ __u8 vector;
+ __u8 runstate_update_flag;
+ struct {
+ __u64 gfn;
+#define KVM_XEN_INVALID_GFN ((__u64)-1)
+ } shared_info;
+ struct {
+ __u32 send_port;
+ __u32 type; /* EVTCHNSTAT_ipi / EVTCHNSTAT_interdomain */
+ __u32 flags;
+#define KVM_XEN_EVTCHN_DEASSIGN (1 << 0)
+#define KVM_XEN_EVTCHN_UPDATE (1 << 1)
+#define KVM_XEN_EVTCHN_RESET (1 << 2)
+ /*
+ * Events sent by the guest are either looped back to
+ * the guest itself (potentially on a different port#)
+ * or signalled via an eventfd.
+ */
+ union {
+ struct {
+ __u32 port;
+ __u32 vcpu;
+ __u32 priority;
+ } port;
+ struct {
+ __u32 port; /* Zero for eventfd */
+ __s32 fd;
+ } eventfd;
+ __u32 padding[4];
+ } deliver;
+ } evtchn;
+ __u32 xen_version;
+ __u64 pad[8];
+ } u;
+};
+
+
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
+#define KVM_XEN_ATTR_TYPE_LONG_MODE 0x0
+#define KVM_XEN_ATTR_TYPE_SHARED_INFO 0x1
+#define KVM_XEN_ATTR_TYPE_UPCALL_VECTOR 0x2
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_ATTR_TYPE_EVTCHN 0x3
+#define KVM_XEN_ATTR_TYPE_XEN_VERSION 0x4
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG */
+#define KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG 0x5
+
+/* Per-vCPU Xen attributes */
+#define KVM_XEN_VCPU_GET_ATTR _IOWR(KVMIO, 0xca, struct kvm_xen_vcpu_attr)
+#define KVM_XEN_VCPU_SET_ATTR _IOW(KVMIO, 0xcb, struct kvm_xen_vcpu_attr)
+
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_HVM_EVTCHN_SEND _IOW(KVMIO, 0xd0, struct kvm_irq_routing_xen_evtchn)
+
+#define KVM_GET_SREGS2 _IOR(KVMIO, 0xcc, struct kvm_sregs2)
+#define KVM_SET_SREGS2 _IOW(KVMIO, 0xcd, struct kvm_sregs2)
+
+struct kvm_xen_vcpu_attr {
+ __u16 type;
+ __u16 pad[3];
+ union {
+ __u64 gpa;
+#define KVM_XEN_INVALID_GPA ((__u64)-1)
+ __u64 pad[8];
+ struct {
+ __u64 state;
+ __u64 state_entry_time;
+ __u64 time_running;
+ __u64 time_runnable;
+ __u64 time_blocked;
+ __u64 time_offline;
+ } runstate;
+ __u32 vcpu_id;
+ struct {
+ __u32 port;
+ __u32 priority;
+ __u64 expires_ns;
+ } timer;
+ __u8 vector;
+ } u;
+};
+
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO 0x0
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO 0x1
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR 0x2
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT 0x3
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA 0x4
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
+/* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_EVTCHN_SEND */
+#define KVM_XEN_VCPU_ATTR_TYPE_VCPU_ID 0x6
+#define KVM_XEN_VCPU_ATTR_TYPE_TIMER 0x7
+#define KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR 0x8
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
@@ -1506,6 +1894,10 @@ enum sev_cmd_id {
KVM_SEV_DBG_ENCRYPT,
/* Guest certificates commands */
KVM_SEV_CERT_EXPORT,
+ /* Attestation report */
+ KVM_SEV_GET_ATTESTATION_REPORT,
+ /* Guest Migration Extension */
+ KVM_SEV_SEND_CANCEL,
KVM_SEV_NR_MAX,
};
@@ -1558,6 +1950,51 @@ struct kvm_sev_dbg {
__u32 len;
};
+struct kvm_sev_attestation_report {
+ __u8 mnonce[16];
+ __u64 uaddr;
+ __u32 len;
+};
+
+struct kvm_sev_send_start {
+ __u32 policy;
+ __u64 pdh_cert_uaddr;
+ __u32 pdh_cert_len;
+ __u64 plat_certs_uaddr;
+ __u32 plat_certs_len;
+ __u64 amd_certs_uaddr;
+ __u32 amd_certs_len;
+ __u64 session_uaddr;
+ __u32 session_len;
+};
+
+struct kvm_sev_send_update_data {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+};
+
+struct kvm_sev_receive_start {
+ __u32 handle;
+ __u32 policy;
+ __u64 pdh_uaddr;
+ __u32 pdh_len;
+ __u64 session_uaddr;
+ __u32 session_len;
+};
+
+struct kvm_sev_receive_update_data {
+ __u64 hdr_uaddr;
+ __u32 hdr_len;
+ __u64 guest_uaddr;
+ __u32 guest_len;
+ __u64 trans_uaddr;
+ __u32 trans_len;
+};
+
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
#define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
@@ -1628,4 +2065,176 @@ struct kvm_hyperv_eventfd {
#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
+#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
+#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
+
+/*
+ * Arch needs to define the macro after implementing the dirty ring
+ * feature. KVM_DIRTY_LOG_PAGE_OFFSET should be defined as the
+ * starting page offset of the dirty ring structures.
+ */
+#ifndef KVM_DIRTY_LOG_PAGE_OFFSET
+#define KVM_DIRTY_LOG_PAGE_OFFSET 0
+#endif
+
+/*
+ * KVM dirty GFN flags, defined as:
+ *
+ * |---------------+---------------+--------------|
+ * | bit 1 (reset) | bit 0 (dirty) | Status |
+ * |---------------+---------------+--------------|
+ * | 0 | 0 | Invalid GFN |
+ * | 0 | 1 | Dirty GFN |
+ * | 1 | X | GFN to reset |
+ * |---------------+---------------+--------------|
+ *
+ * Lifecycle of a dirty GFN goes like:
+ *
+ * dirtied harvested reset
+ * 00 -----------> 01 -------------> 1X -------+
+ * ^ |
+ * | |
+ * +------------------------------------------+
+ *
+ * The userspace program is only responsible for the 01->1X state
+ * conversion after harvesting an entry. Also, it must not skip any
+ * dirty bits, so that dirty bits are always harvested in sequence.
+ */
+#define KVM_DIRTY_GFN_F_DIRTY _BITUL(0)
+#define KVM_DIRTY_GFN_F_RESET _BITUL(1)
+#define KVM_DIRTY_GFN_F_MASK 0x3
+
+/*
+ * KVM dirty rings should be mapped at KVM_DIRTY_LOG_PAGE_OFFSET of
+ * per-vcpu mmaped regions as an array of struct kvm_dirty_gfn. The
+ * size of the gfn buffer is decided by the first argument when
+ * enabling KVM_CAP_DIRTY_LOG_RING.
+ */
+struct kvm_dirty_gfn {
+ __u32 flags;
+ __u32 slot;
+ __u64 offset;
+};
+
+#define KVM_BUS_LOCK_DETECTION_OFF (1 << 0)
+#define KVM_BUS_LOCK_DETECTION_EXIT (1 << 1)
+
+#define KVM_PMU_CAP_DISABLE (1 << 0)
+
+/**
+ * struct kvm_stats_header - Header of per vm/vcpu binary statistics data.
+ * @flags: Some extra information for header, always 0 for now.
+ * @name_size: The size in bytes of the memory which contains statistics
+ * name string including trailing '\0'. The memory is allocated
+ * at the send of statistics descriptor.
+ * @num_desc: The number of statistics the vm or vcpu has.
+ * @id_offset: The offset of the vm/vcpu stats' id string in the file pointed
+ * by vm/vcpu stats fd.
+ * @desc_offset: The offset of the vm/vcpu stats' descriptor block in the file
+ * pointd by vm/vcpu stats fd.
+ * @data_offset: The offset of the vm/vcpu stats' data block in the file
+ * pointed by vm/vcpu stats fd.
+ *
+ * This is the header userspace needs to read from stats fd before any other
+ * readings. It is used by userspace to discover all the information about the
+ * vm/vcpu's binary statistics.
+ * Userspace reads this header from the start of the vm/vcpu's stats fd.
+ */
+struct kvm_stats_header {
+ __u32 flags;
+ __u32 name_size;
+ __u32 num_desc;
+ __u32 id_offset;
+ __u32 desc_offset;
+ __u32 data_offset;
+};
+
+#define KVM_STATS_TYPE_SHIFT 0
+#define KVM_STATS_TYPE_MASK (0xF << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_CUMULATIVE (0x0 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_INSTANT (0x1 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_PEAK (0x2 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_LINEAR_HIST (0x3 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_LOG_HIST (0x4 << KVM_STATS_TYPE_SHIFT)
+#define KVM_STATS_TYPE_MAX KVM_STATS_TYPE_LOG_HIST
+
+#define KVM_STATS_UNIT_SHIFT 4
+#define KVM_STATS_UNIT_MASK (0xF << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_NONE (0x0 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_BYTES (0x1 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_SECONDS (0x2 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_CYCLES (0x3 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_BOOLEAN (0x4 << KVM_STATS_UNIT_SHIFT)
+#define KVM_STATS_UNIT_MAX KVM_STATS_UNIT_BOOLEAN
+
+#define KVM_STATS_BASE_SHIFT 8
+#define KVM_STATS_BASE_MASK (0xF << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW10 (0x0 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_POW2 (0x1 << KVM_STATS_BASE_SHIFT)
+#define KVM_STATS_BASE_MAX KVM_STATS_BASE_POW2
+
+/**
+ * struct kvm_stats_desc - Descriptor of a KVM statistics.
+ * @flags: Annotations of the stats, like type, unit, etc.
+ * @exponent: Used together with @flags to determine the unit.
+ * @size: The number of data items for this stats.
+ * Every data item is of type __u64.
+ * @offset: The offset of the stats to the start of stat structure in
+ * structure kvm or kvm_vcpu.
+ * @bucket_size: A parameter value used for histogram stats. It is only used
+ * for linear histogram stats, specifying the size of the bucket;
+ * @name: The name string for the stats. Its size is indicated by the
+ * &kvm_stats_header->name_size.
+ */
+struct kvm_stats_desc {
+ __u32 flags;
+ __s16 exponent;
+ __u16 size;
+ __u32 offset;
+ __u32 bucket_size;
+ char name[];
+};
+
+#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+
+/* Available with KVM_CAP_XSAVE2 */
+#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
+
+/* Available with KVM_CAP_S390_PROTECTED_DUMP */
+#define KVM_S390_PV_CPU_COMMAND _IOWR(KVMIO, 0xd0, struct kvm_pv_cmd)
+
+/* Available with KVM_CAP_X86_NOTIFY_VMEXIT */
+#define KVM_X86_NOTIFY_VMEXIT_ENABLED (1ULL << 0)
+#define KVM_X86_NOTIFY_VMEXIT_USER (1ULL << 1)
+
+/* Available with KVM_CAP_S390_ZPCI_OP */
+#define KVM_S390_ZPCI_OP _IOW(KVMIO, 0xd1, struct kvm_s390_zpci_op)
+
+struct kvm_s390_zpci_op {
+ /* in */
+ __u32 fh; /* target device */
+ __u8 op; /* operation to perform */
+ __u8 pad[3];
+ union {
+ /* for KVM_S390_ZPCIOP_REG_AEN */
+ struct {
+ __u64 ibv; /* Guest addr of interrupt bit vector */
+ __u64 sb; /* Guest addr of summary bit */
+ __u32 flags;
+ __u32 noi; /* Number of interrupts */
+ __u8 isc; /* Guest interrupt subclass */
+ __u8 sbo; /* Offset of guest summary bit vector */
+ __u16 pad;
+ } reg_aen;
+ __u64 reserved[8];
+ } u;
+};
+
+/* types for kvm_s390_zpci_op->op */
+#define KVM_S390_ZPCIOP_REG_AEN 0
+#define KVM_S390_ZPCIOP_DEREG_AEN 1
+
+/* flags for kvm_s390_zpci_op->u.reg_aen.flags */
+#define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0)
+
#endif /* __LINUX_KVM_H */
diff --git a/tools/include/uapi/linux/lirc.h b/tools/include/uapi/linux/lirc.h
deleted file mode 100644
index 45fcbf99d72e..000000000000
--- a/tools/include/uapi/linux/lirc.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/*
- * lirc.h - linux infrared remote control header file
- * last modified 2010/07/13 by Jarod Wilson
- */
-
-#ifndef _LINUX_LIRC_H
-#define _LINUX_LIRC_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-
-#define PULSE_BIT 0x01000000
-#define PULSE_MASK 0x00FFFFFF
-
-#define LIRC_MODE2_SPACE 0x00000000
-#define LIRC_MODE2_PULSE 0x01000000
-#define LIRC_MODE2_FREQUENCY 0x02000000
-#define LIRC_MODE2_TIMEOUT 0x03000000
-
-#define LIRC_VALUE_MASK 0x00FFFFFF
-#define LIRC_MODE2_MASK 0xFF000000
-
-#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE)
-#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE)
-#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY)
-#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT)
-
-#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK)
-#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK)
-
-#define LIRC_IS_SPACE(val) (LIRC_MODE2(val) == LIRC_MODE2_SPACE)
-#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE)
-#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY)
-#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT)
-
-/* used heavily by lirc userspace */
-#define lirc_t int
-
-/*** lirc compatible hardware features ***/
-
-#define LIRC_MODE2SEND(x) (x)
-#define LIRC_SEND2MODE(x) (x)
-#define LIRC_MODE2REC(x) ((x) << 16)
-#define LIRC_REC2MODE(x) ((x) >> 16)
-
-#define LIRC_MODE_RAW 0x00000001
-#define LIRC_MODE_PULSE 0x00000002
-#define LIRC_MODE_MODE2 0x00000004
-#define LIRC_MODE_SCANCODE 0x00000008
-#define LIRC_MODE_LIRCCODE 0x00000010
-
-
-#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW)
-#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE)
-#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2)
-#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE)
-
-#define LIRC_CAN_SEND_MASK 0x0000003f
-
-#define LIRC_CAN_SET_SEND_CARRIER 0x00000100
-#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200
-#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400
-
-#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW)
-#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE)
-#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2)
-#define LIRC_CAN_REC_SCANCODE LIRC_MODE2REC(LIRC_MODE_SCANCODE)
-#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE)
-
-#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK)
-
-#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16)
-#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16)
-
-#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000
-#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000
-#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000
-#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000
-#define LIRC_CAN_SET_REC_FILTER 0x08000000
-
-#define LIRC_CAN_MEASURE_CARRIER 0x02000000
-#define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000
-
-#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK)
-#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK)
-
-#define LIRC_CAN_NOTIFY_DECODE 0x01000000
-
-/*** IOCTL commands for lirc driver ***/
-
-#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32)
-
-#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32)
-#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32)
-#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32)
-
-#define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, __u32)
-#define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, __u32)
-
-/* code length in bits, currently only for LIRC_MODE_LIRCCODE */
-#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32)
-
-#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32)
-#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32)
-/* Note: these can reset the according pulse_width */
-#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32)
-#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32)
-#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32)
-#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32)
-
-/*
- * when a timeout != 0 is set the driver will send a
- * LIRC_MODE2_TIMEOUT data packet, otherwise LIRC_MODE2_TIMEOUT is
- * never sent, timeout is disabled by default
- */
-#define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, __u32)
-
-/* 1 enables, 0 disables timeout reports in MODE2 */
-#define LIRC_SET_REC_TIMEOUT_REPORTS _IOW('i', 0x00000019, __u32)
-
-/*
- * if enabled from the next key press on the driver will send
- * LIRC_MODE2_FREQUENCY packets
- */
-#define LIRC_SET_MEASURE_CARRIER_MODE _IOW('i', 0x0000001d, __u32)
-
-/*
- * to set a range use LIRC_SET_REC_CARRIER_RANGE with the
- * lower bound first and later LIRC_SET_REC_CARRIER with the upper bound
- */
-#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32)
-
-#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32)
-
-/*
- * Return the recording timeout, which is either set by
- * the ioctl LIRC_SET_REC_TIMEOUT or by the kernel after setting the protocols.
- */
-#define LIRC_GET_REC_TIMEOUT _IOR('i', 0x00000024, __u32)
-
-/*
- * struct lirc_scancode - decoded scancode with protocol for use with
- * LIRC_MODE_SCANCODE
- *
- * @timestamp: Timestamp in nanoseconds using CLOCK_MONOTONIC when IR
- * was decoded.
- * @flags: should be 0 for transmit. When receiving scancodes,
- * LIRC_SCANCODE_FLAG_TOGGLE or LIRC_SCANCODE_FLAG_REPEAT can be set
- * depending on the protocol
- * @rc_proto: see enum rc_proto
- * @keycode: the translated keycode. Set to 0 for transmit.
- * @scancode: the scancode received or to be sent
- */
-struct lirc_scancode {
- __u64 timestamp;
- __u16 flags;
- __u16 rc_proto;
- __u32 keycode;
- __u64 scancode;
-};
-
-/* Set if the toggle bit of rc-5 or rc-6 is enabled */
-#define LIRC_SCANCODE_FLAG_TOGGLE 1
-/* Set if this is a nec or sanyo repeat */
-#define LIRC_SCANCODE_FLAG_REPEAT 2
-
-/**
- * enum rc_proto - the Remote Controller protocol
- *
- * @RC_PROTO_UNKNOWN: Protocol not known
- * @RC_PROTO_OTHER: Protocol known but proprietary
- * @RC_PROTO_RC5: Philips RC5 protocol
- * @RC_PROTO_RC5X_20: Philips RC5x 20 bit protocol
- * @RC_PROTO_RC5_SZ: StreamZap variant of RC5
- * @RC_PROTO_JVC: JVC protocol
- * @RC_PROTO_SONY12: Sony 12 bit protocol
- * @RC_PROTO_SONY15: Sony 15 bit protocol
- * @RC_PROTO_SONY20: Sony 20 bit protocol
- * @RC_PROTO_NEC: NEC protocol
- * @RC_PROTO_NECX: Extended NEC protocol
- * @RC_PROTO_NEC32: NEC 32 bit protocol
- * @RC_PROTO_SANYO: Sanyo protocol
- * @RC_PROTO_MCIR2_KBD: RC6-ish MCE keyboard
- * @RC_PROTO_MCIR2_MSE: RC6-ish MCE mouse
- * @RC_PROTO_RC6_0: Philips RC6-0-16 protocol
- * @RC_PROTO_RC6_6A_20: Philips RC6-6A-20 protocol
- * @RC_PROTO_RC6_6A_24: Philips RC6-6A-24 protocol
- * @RC_PROTO_RC6_6A_32: Philips RC6-6A-32 protocol
- * @RC_PROTO_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol
- * @RC_PROTO_SHARP: Sharp protocol
- * @RC_PROTO_XMP: XMP protocol
- * @RC_PROTO_CEC: CEC protocol
- * @RC_PROTO_IMON: iMon Pad protocol
- * @RC_PROTO_RCMM12: RC-MM protocol 12 bits
- * @RC_PROTO_RCMM24: RC-MM protocol 24 bits
- * @RC_PROTO_RCMM32: RC-MM protocol 32 bits
- */
-enum rc_proto {
- RC_PROTO_UNKNOWN = 0,
- RC_PROTO_OTHER = 1,
- RC_PROTO_RC5 = 2,
- RC_PROTO_RC5X_20 = 3,
- RC_PROTO_RC5_SZ = 4,
- RC_PROTO_JVC = 5,
- RC_PROTO_SONY12 = 6,
- RC_PROTO_SONY15 = 7,
- RC_PROTO_SONY20 = 8,
- RC_PROTO_NEC = 9,
- RC_PROTO_NECX = 10,
- RC_PROTO_NEC32 = 11,
- RC_PROTO_SANYO = 12,
- RC_PROTO_MCIR2_KBD = 13,
- RC_PROTO_MCIR2_MSE = 14,
- RC_PROTO_RC6_0 = 15,
- RC_PROTO_RC6_6A_20 = 16,
- RC_PROTO_RC6_6A_24 = 17,
- RC_PROTO_RC6_6A_32 = 18,
- RC_PROTO_RC6_MCE = 19,
- RC_PROTO_SHARP = 20,
- RC_PROTO_XMP = 21,
- RC_PROTO_CEC = 22,
- RC_PROTO_IMON = 23,
- RC_PROTO_RCMM12 = 24,
- RC_PROTO_RCMM24 = 25,
- RC_PROTO_RCMM32 = 26,
-};
-
-#endif
diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h
index fc1a64c3447b..f55bc680b5b0 100644
--- a/tools/include/uapi/linux/mman.h
+++ b/tools/include/uapi/linux/mman.h
@@ -5,8 +5,9 @@
#include <asm/mman.h>
#include <asm-generic/hugetlb_encode.h>
-#define MREMAP_MAYMOVE 1
-#define MREMAP_FIXED 2
+#define MREMAP_MAYMOVE 1
+#define MREMAP_FIXED 2
+#define MREMAP_DONTUNMAP 4
#define OVERCOMMIT_GUESS 0
#define OVERCOMMIT_ALWAYS 1
@@ -26,6 +27,7 @@
#define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT
#define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK
+#define MAP_HUGE_16KB HUGETLB_FLAG_ENCODE_16KB
#define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB
#define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB
#define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB
diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h
index 96a0240f23fe..4d93967f8aea 100644
--- a/tools/include/uapi/linux/mount.h
+++ b/tools/include/uapi/linux/mount.h
@@ -1,6 +1,8 @@
#ifndef _UAPI_LINUX_MOUNT_H
#define _UAPI_LINUX_MOUNT_H
+#include <linux/types.h>
+
/*
* These are the fs-independent mount-flags: up to 32 flags are supported
*
@@ -16,6 +18,7 @@
#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
#define MS_DIRSYNC 128 /* Directory modifications are synchronous */
+#define MS_NOSYMFOLLOW 256 /* Do not follow symlinks */
#define MS_NOATIME 1024 /* Do not update access times. */
#define MS_NODIRATIME 2048 /* Do not update directory access times */
#define MS_BIND 4096
@@ -70,7 +73,8 @@
#define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */
#define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */
#define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */
-#define MOVE_MOUNT__MASK 0x00000077
+#define MOVE_MOUNT_SET_GROUP 0x00000100 /* Set sharing group instead */
+#define MOVE_MOUNT__MASK 0x00000177
/*
* fsopen() flags.
@@ -116,5 +120,20 @@ enum fsconfig_command {
#define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */
#define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */
#define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */
+#define MOUNT_ATTR_IDMAP 0x00100000 /* Idmap mount to @userns_fd in struct mount_attr. */
+#define MOUNT_ATTR_NOSYMFOLLOW 0x00200000 /* Do not follow symlinks */
+
+/*
+ * mount_setattr()
+ */
+struct mount_attr {
+ __u64 attr_set;
+ __u64 attr_clr;
+ __u64 propagation;
+ __u64 userns_fd;
+};
+
+/* List of all mount_attr versions. */
+#define MOUNT_ATTR_SIZE_VER0 32 /* sizeof first published struct */
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h
new file mode 100644
index 000000000000..639524b59930
--- /dev/null
+++ b/tools/include/uapi/linux/netdev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/netdev.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_NETDEV_H
+#define _UAPI_LINUX_NETDEV_H
+
+#define NETDEV_FAMILY_NAME "netdev"
+#define NETDEV_FAMILY_VERSION 1
+
+/**
+ * enum netdev_xdp_act
+ * @NETDEV_XDP_ACT_BASIC: XDP feautues set supported by all drivers
+ * (XDP_ABORTED, XDP_DROP, XDP_PASS, XDP_TX)
+ * @NETDEV_XDP_ACT_REDIRECT: The netdev supports XDP_REDIRECT
+ * @NETDEV_XDP_ACT_NDO_XMIT: This feature informs if netdev implements
+ * ndo_xdp_xmit callback.
+ * @NETDEV_XDP_ACT_XSK_ZEROCOPY: This feature informs if netdev supports AF_XDP
+ * in zero copy mode.
+ * @NETDEV_XDP_ACT_HW_OFFLOAD: This feature informs if netdev supports XDP hw
+ * offloading.
+ * @NETDEV_XDP_ACT_RX_SG: This feature informs if netdev implements non-linear
+ * XDP buffer support in the driver napi callback.
+ * @NETDEV_XDP_ACT_NDO_XMIT_SG: This feature informs if netdev implements
+ * non-linear XDP buffer support in ndo_xdp_xmit callback.
+ */
+enum netdev_xdp_act {
+ NETDEV_XDP_ACT_BASIC = 1,
+ NETDEV_XDP_ACT_REDIRECT = 2,
+ NETDEV_XDP_ACT_NDO_XMIT = 4,
+ NETDEV_XDP_ACT_XSK_ZEROCOPY = 8,
+ NETDEV_XDP_ACT_HW_OFFLOAD = 16,
+ NETDEV_XDP_ACT_RX_SG = 32,
+ NETDEV_XDP_ACT_NDO_XMIT_SG = 64,
+
+ NETDEV_XDP_ACT_MASK = 127,
+};
+
+enum {
+ NETDEV_A_DEV_IFINDEX = 1,
+ NETDEV_A_DEV_PAD,
+ NETDEV_A_DEV_XDP_FEATURES,
+
+ __NETDEV_A_DEV_MAX,
+ NETDEV_A_DEV_MAX = (__NETDEV_A_DEV_MAX - 1)
+};
+
+enum {
+ NETDEV_CMD_DEV_GET = 1,
+ NETDEV_CMD_DEV_ADD_NTF,
+ NETDEV_CMD_DEV_DEL_NTF,
+ NETDEV_CMD_DEV_CHANGE_NTF,
+
+ __NETDEV_CMD_MAX,
+ NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1)
+};
+
+#define NETDEV_MCGRP_MGMT "mgmt"
+
+#endif /* _UAPI_LINUX_NETDEV_H */
diff --git a/tools/include/uapi/linux/openat2.h b/tools/include/uapi/linux/openat2.h
index 58b1eb711360..a5feb7604948 100644
--- a/tools/include/uapi/linux/openat2.h
+++ b/tools/include/uapi/linux/openat2.h
@@ -35,5 +35,9 @@ struct open_how {
#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
be scoped inside the dirfd
(similar to chroot(2)). */
+#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be
+ completed through cached lookup. May
+ return -EAGAIN if that's not
+ possible. */
#endif /* _UAPI_LINUX_OPENAT2_H */
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 7b2d6fc9e6ed..39c6a250dd1b 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -38,6 +38,21 @@ enum perf_type_id {
};
/*
+ * attr.config layout for type PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
+ * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
+ * AA: hardware event ID
+ * EEEEEEEE: PMU type ID
+ * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
+ * BB: hardware cache ID
+ * CC: hardware cache op ID
+ * DD: hardware cache op result ID
+ * EEEEEEEE: PMU type ID
+ * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
+ */
+#define PERF_PMU_TYPE_SHIFT 32
+#define PERF_HW_EVENT_MASK 0xffffffff
+
+/*
* Generalized performance event event_id types, used by the
* attr.event_id parameter of the sys_perf_event_open()
* syscall:
@@ -112,6 +127,7 @@ enum perf_sw_ids {
PERF_COUNT_SW_EMULATION_FAULTS = 8,
PERF_COUNT_SW_DUMMY = 9,
PERF_COUNT_SW_BPF_OUTPUT = 10,
+ PERF_COUNT_SW_CGROUP_SWITCHES = 11,
PERF_COUNT_SW_MAX, /* non-ABI */
};
@@ -143,12 +159,14 @@ enum perf_event_sample_format {
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_AUX = 1U << 20,
PERF_SAMPLE_CGROUP = 1U << 21,
+ PERF_SAMPLE_DATA_PAGE_SIZE = 1U << 22,
+ PERF_SAMPLE_CODE_PAGE_SIZE = 1U << 23,
+ PERF_SAMPLE_WEIGHT_STRUCT = 1U << 24,
- PERF_SAMPLE_MAX = 1U << 22, /* non-ABI */
-
- __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
+ PERF_SAMPLE_MAX = 1U << 25, /* non-ABI */
};
+#define PERF_SAMPLE_WEIGHT_TYPE (PERF_SAMPLE_WEIGHT | PERF_SAMPLE_WEIGHT_STRUCT)
/*
* values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
*
@@ -184,6 +202,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */
+ PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -213,6 +233,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
+ PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -231,9 +253,50 @@ enum {
PERF_BR_SYSRET = 8, /* syscall return */
PERF_BR_COND_CALL = 9, /* conditional function call */
PERF_BR_COND_RET = 10, /* conditional function return */
+ PERF_BR_ERET = 11, /* exception return */
+ PERF_BR_IRQ = 12, /* irq */
+ PERF_BR_SERROR = 13, /* system error */
+ PERF_BR_NO_TX = 14, /* not in transaction */
+ PERF_BR_EXTEND_ABI = 15, /* extend ABI */
PERF_BR_MAX,
};
+/*
+ * Common branch speculation outcome classification
+ */
+enum {
+ PERF_BR_SPEC_NA = 0, /* Not available */
+ PERF_BR_SPEC_WRONG_PATH = 1, /* Speculative but on wrong path */
+ PERF_BR_NON_SPEC_CORRECT_PATH = 2, /* Non-speculative but on correct path */
+ PERF_BR_SPEC_CORRECT_PATH = 3, /* Speculative and on correct path */
+ PERF_BR_SPEC_MAX,
+};
+
+enum {
+ PERF_BR_NEW_FAULT_ALGN = 0, /* Alignment fault */
+ PERF_BR_NEW_FAULT_DATA = 1, /* Data fault */
+ PERF_BR_NEW_FAULT_INST = 2, /* Inst fault */
+ PERF_BR_NEW_ARCH_1 = 3, /* Architecture specific */
+ PERF_BR_NEW_ARCH_2 = 4, /* Architecture specific */
+ PERF_BR_NEW_ARCH_3 = 5, /* Architecture specific */
+ PERF_BR_NEW_ARCH_4 = 6, /* Architecture specific */
+ PERF_BR_NEW_ARCH_5 = 7, /* Architecture specific */
+ PERF_BR_NEW_MAX,
+};
+
+enum {
+ PERF_BR_PRIV_UNKNOWN = 0,
+ PERF_BR_PRIV_USER = 1,
+ PERF_BR_PRIV_KERNEL = 2,
+ PERF_BR_PRIV_HV = 3,
+};
+
+#define PERF_BR_ARM64_FIQ PERF_BR_NEW_ARCH_1
+#define PERF_BR_ARM64_DEBUG_HALT PERF_BR_NEW_ARCH_2
+#define PERF_BR_ARM64_DEBUG_EXIT PERF_BR_NEW_ARCH_3
+#define PERF_BR_ARM64_DEBUG_INST PERF_BR_NEW_ARCH_4
+#define PERF_BR_ARM64_DEBUG_DATA PERF_BR_NEW_ARCH_5
+
#define PERF_SAMPLE_BRANCH_PLM_ALL \
(PERF_SAMPLE_BRANCH_USER|\
PERF_SAMPLE_BRANCH_KERNEL|\
@@ -279,6 +342,7 @@ enum {
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } && !PERF_FORMAT_GROUP
*
* { u64 nr;
@@ -286,6 +350,7 @@ enum {
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
* { u64 value;
* { u64 id; } && PERF_FORMAT_ID
+ * { u64 lost; } && PERF_FORMAT_LOST
* } cntr[nr];
* } && PERF_FORMAT_GROUP
* };
@@ -295,8 +360,9 @@ enum perf_event_read_format {
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
PERF_FORMAT_GROUP = 1U << 3,
+ PERF_FORMAT_LOST = 1U << 4,
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
+ PERF_FORMAT_MAX = 1U << 5, /* non-ABI */
};
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
@@ -307,6 +373,8 @@ enum perf_event_read_format {
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
+#define PERF_ATTR_SIZE_VER7 128 /* add: sig_data */
+#define PERF_ATTR_SIZE_VER8 136 /* add: config3 */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -383,7 +451,12 @@ struct perf_event_attr {
bpf_event : 1, /* include bpf events */
aux_output : 1, /* generate AUX records instead of events */
cgroup : 1, /* include cgroup events */
- __reserved_1 : 31;
+ text_poke : 1, /* include text poke events */
+ build_id : 1, /* use build id in mmap2 events */
+ inherit_thread : 1, /* children only inherit if cloned with CLONE_THREAD */
+ remove_on_exec : 1, /* event is removed from task on exec */
+ sigtrap : 1, /* send synchronous SIGTRAP on event */
+ __reserved_1 : 26;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -435,6 +508,16 @@ struct perf_event_attr {
__u16 __reserved_2;
__u32 aux_sample_size;
__u32 __reserved_3;
+
+ /*
+ * User provided data if sigtrap=1, passed back to user via
+ * siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+ * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+ * truncated accordingly on 32 bit architectures.
+ */
+ __u64 sig_data;
+
+ __u64 config3; /* extension of config2 */
};
/*
@@ -455,7 +538,7 @@ struct perf_event_query_bpf {
/*
* User provided buffer to store program ids
*/
- __u32 ids[0];
+ __u32 ids[];
};
/*
@@ -532,9 +615,10 @@ struct perf_event_mmap_page {
cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
- cap_user_time : 1, /* The time_* fields are used */
+ cap_user_time : 1, /* The time_{shift,mult,offset} fields are used */
cap_user_time_zero : 1, /* The time_zero field is used */
- cap_____res : 59;
+ cap_user_time_short : 1, /* the time_{cycle,mask} fields are used */
+ cap_____res : 58;
};
};
@@ -593,13 +677,29 @@ struct perf_event_mmap_page {
* ((rem * time_mult) >> time_shift);
*/
__u64 time_zero;
+
__u32 size; /* Header size up to __reserved[] fields. */
+ __u32 __reserved_1;
+
+ /*
+ * If cap_usr_time_short, the hardware clock is less than 64bit wide
+ * and we must compute the 'cyc' value, as used by cap_usr_time, as:
+ *
+ * cyc = time_cycles + ((cyc - time_cycles) & time_mask)
+ *
+ * NOTE: this form is explicitly chosen such that cap_usr_time_short
+ * is a correction on top of cap_usr_time, and code that doesn't
+ * know about cap_usr_time_short still works under the assumption
+ * the counter doesn't wrap.
+ */
+ __u64 time_cycles;
+ __u64 time_mask;
/*
* Hole for extension of the self monitor capabilities
*/
- __u8 __reserved[118*8+4]; /* align to 1k. */
+ __u8 __reserved[116*8]; /* align to 1k. */
/*
* Control data for the mmap() data buffer.
@@ -639,6 +739,22 @@ struct perf_event_mmap_page {
__u64 aux_size;
};
+/*
+ * The current state of perf_event_header::misc bits usage:
+ * ('|' used bit, '-' unused bit)
+ *
+ * 012 CDEF
+ * |||---------||||
+ *
+ * Where:
+ * 0-2 CPUMODE_MASK
+ *
+ * C PROC_MAP_PARSE_TIMEOUT
+ * D MMAP_DATA / COMM_EXEC / FORK_EXEC / SWITCH_OUT
+ * E MMAP_BUILD_ID / EXACT_IP / SCHED_OUT_PREEMPT
+ * F (reserved)
+ */
+
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
#define PERF_RECORD_MISC_KERNEL (1 << 0)
@@ -670,6 +786,7 @@ struct perf_event_mmap_page {
*
* PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events
+ * PERF_RECORD_MISC_MMAP_BUILD_ID - PERF_RECORD_MMAP2 event
*
*
* PERF_RECORD_MISC_EXACT_IP:
@@ -679,9 +796,13 @@ struct perf_event_mmap_page {
*
* PERF_RECORD_MISC_SWITCH_OUT_PREEMPT:
* Indicates that thread was preempted in TASK_RUNNING state.
+ *
+ * PERF_RECORD_MISC_MMAP_BUILD_ID:
+ * Indicates that mmap2 event carries build id data.
*/
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14)
+#define PERF_RECORD_MISC_MMAP_BUILD_ID (1 << 14)
/*
* Reserve the last bit to indicate some extended misc field
*/
@@ -870,7 +991,24 @@ enum perf_event_type {
* char data[size];
* u64 dyn_size; } && PERF_SAMPLE_STACK_USER
*
- * { u64 weight; } && PERF_SAMPLE_WEIGHT
+ * { union perf_sample_weight
+ * {
+ * u64 full; && PERF_SAMPLE_WEIGHT
+ * #if defined(__LITTLE_ENDIAN_BITFIELD)
+ * struct {
+ * u32 var1_dw;
+ * u16 var2_w;
+ * u16 var3_w;
+ * } && PERF_SAMPLE_WEIGHT_STRUCT
+ * #elif defined(__BIG_ENDIAN_BITFIELD)
+ * struct {
+ * u16 var3_w;
+ * u16 var2_w;
+ * u32 var1_dw;
+ * } && PERF_SAMPLE_WEIGHT_STRUCT
+ * #endif
+ * }
+ * }
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
* { u64 transaction; } && PERF_SAMPLE_TRANSACTION
* { u64 abi; # enum perf_sample_regs_abi
@@ -878,6 +1016,8 @@ enum perf_event_type {
* { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
* { u64 size;
* char data[size]; } && PERF_SAMPLE_AUX
+ * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE
+ * { u64 code_page_size;} && PERF_SAMPLE_CODE_PAGE_SIZE
* };
*/
PERF_RECORD_SAMPLE = 9,
@@ -893,10 +1033,20 @@ enum perf_event_type {
* u64 addr;
* u64 len;
* u64 pgoff;
- * u32 maj;
- * u32 min;
- * u64 ino;
- * u64 ino_generation;
+ * union {
+ * struct {
+ * u32 maj;
+ * u32 min;
+ * u64 ino;
+ * u64 ino_generation;
+ * };
+ * struct {
+ * u8 build_id_size;
+ * u8 __reserved_1;
+ * u16 __reserved_2;
+ * u8 build_id[20];
+ * };
+ * };
* u32 prot, flags;
* char filename[];
* struct sample_id sample_id;
@@ -1024,12 +1174,50 @@ enum perf_event_type {
*/
PERF_RECORD_CGROUP = 19,
+ /*
+ * Records changes to kernel text i.e. self-modified code. 'old_len' is
+ * the number of old bytes, 'new_len' is the number of new bytes. Either
+ * 'old_len' or 'new_len' may be zero to indicate, for example, the
+ * addition or removal of a trampoline. 'bytes' contains the old bytes
+ * followed immediately by the new bytes.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 addr;
+ * u16 old_len;
+ * u16 new_len;
+ * u8 bytes[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_TEXT_POKE = 20,
+
+ /*
+ * Data written to the AUX area by hardware due to aux_output, may need
+ * to be matched to the event by an architecture-specific hardware ID.
+ * This records the hardware ID, but requires sample_id to provide the
+ * event ID. e.g. Intel PT uses this record to disambiguate PEBS-via-PT
+ * records from multiple events.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u64 hw_id;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX_OUTPUT_HW_ID = 21,
+
PERF_RECORD_MAX, /* non-ABI */
};
enum perf_record_ksymbol_type {
PERF_RECORD_KSYMBOL_TYPE_UNKNOWN = 0,
PERF_RECORD_KSYMBOL_TYPE_BPF = 1,
+ /*
+ * Out of line code such as kprobe-replaced instructions or optimized
+ * kprobes or ftrace trampolines.
+ */
+ PERF_RECORD_KSYMBOL_TYPE_OOL = 2,
PERF_RECORD_KSYMBOL_TYPE_MAX /* non-ABI */
};
@@ -1060,10 +1248,15 @@ enum perf_callchain_context {
/**
* PERF_RECORD_AUX::flags bits
*/
-#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
-#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
-#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
-#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
+#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
+#define PERF_AUX_FLAG_COLLISION 0x08 /* sample collided with another */
+#define PERF_AUX_FLAG_PMU_FORMAT_TYPE_MASK 0xff00 /* PMU specific trace format type */
+
+/* CoreSight PMU AUX buffer formats */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_CORESIGHT 0x0000 /* Default for backward compatibility */
+#define PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW 0x0100 /* Raw format of the source */
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
@@ -1082,14 +1275,18 @@ union perf_mem_data_src {
mem_lvl_num:4, /* memory hierarchy level number */
mem_remote:1, /* remote */
mem_snoopx:2, /* snoop mode, ext */
- mem_rsvd:24;
+ mem_blk:3, /* access blocked */
+ mem_hops:3, /* hop level */
+ mem_rsvd:18;
};
};
#elif defined(__BIG_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
- __u64 mem_rsvd:24,
+ __u64 mem_rsvd:18,
+ mem_hops:3, /* hop level */
+ mem_blk:3, /* access blocked */
mem_snoopx:2, /* snoop mode, ext */
mem_remote:1, /* remote */
mem_lvl_num:4, /* memory hierarchy level number */
@@ -1112,7 +1309,13 @@ union perf_mem_data_src {
#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
#define PERF_MEM_OP_SHIFT 0
-/* memory hierarchy (memory level, hit or miss) */
+/*
+ * PERF_MEM_LVL_* namespace being depricated to some extent in the
+ * favour of newer composite PERF_MEM_{LVLNUM_,REMOTE_,SNOOPX_} fields.
+ * Supporting this namespace inorder to not break defined ABIs.
+ *
+ * memory hierarchy (memory level, hit or miss)
+ */
#define PERF_MEM_LVL_NA 0x01 /* not available */
#define PERF_MEM_LVL_HIT 0x02 /* hit level */
#define PERF_MEM_LVL_MISS 0x04 /* miss level */
@@ -1136,7 +1339,10 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L2 0x02 /* L2 */
#define PERF_MEM_LVLNUM_L3 0x03 /* L3 */
#define PERF_MEM_LVLNUM_L4 0x04 /* L4 */
-/* 5-0xa available */
+/* 5-0x7 available */
+#define PERF_MEM_LVLNUM_UNC 0x08 /* Uncached */
+#define PERF_MEM_LVLNUM_CXL 0x09 /* CXL */
+#define PERF_MEM_LVLNUM_IO 0x0a /* I/O */
#define PERF_MEM_LVLNUM_ANY_CACHE 0x0b /* Any cache */
#define PERF_MEM_LVLNUM_LFB 0x0c /* LFB */
#define PERF_MEM_LVLNUM_RAM 0x0d /* RAM */
@@ -1154,8 +1360,8 @@ union perf_mem_data_src {
#define PERF_MEM_SNOOP_SHIFT 19
#define PERF_MEM_SNOOPX_FWD 0x01 /* forward */
-/* 1 free */
-#define PERF_MEM_SNOOPX_SHIFT 37
+#define PERF_MEM_SNOOPX_PEER 0x02 /* xfer from peer */
+#define PERF_MEM_SNOOPX_SHIFT 38
/* locked instruction */
#define PERF_MEM_LOCK_NA 0x01 /* not available */
@@ -1172,6 +1378,20 @@ union perf_mem_data_src {
#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
#define PERF_MEM_TLB_SHIFT 26
+/* Access blocked */
+#define PERF_MEM_BLK_NA 0x01 /* not available */
+#define PERF_MEM_BLK_DATA 0x02 /* data could not be forwarded */
+#define PERF_MEM_BLK_ADDR 0x04 /* address conflict */
+#define PERF_MEM_BLK_SHIFT 40
+
+/* hop level */
+#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
+#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
+#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
+#define PERF_MEM_HOPS_3 0x04 /* remote board */
+/* 5-7 available */
+#define PERF_MEM_HOPS_SHIFT 43
+
#define PERF_MEM_S(a, s) \
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
@@ -1190,6 +1410,7 @@ union perf_mem_data_src {
* abort: aborting a hardware transaction
* cycles: cycles from last branch (or 0 if not supported)
* type: branch type
+ * spec: branch speculation info (or 0 if not supported)
*/
struct perf_branch_entry {
__u64 from;
@@ -1200,7 +1421,29 @@ struct perf_branch_entry {
abort:1, /* transaction abort */
cycles:16, /* cycle count to last branch */
type:4, /* branch type */
- reserved:40;
+ spec:2, /* branch speculation info */
+ new_type:4, /* additional branch type */
+ priv:3, /* privilege level */
+ reserved:31;
+};
+
+union perf_sample_weight {
+ __u64 full;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ struct {
+ __u32 var1_dw;
+ __u16 var2_w;
+ __u16 var3_w;
+ };
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ struct {
+ __u16 var3_w;
+ __u16 var2_w;
+ __u32 var1_dw;
+ };
+#else
+#error "Unknown endianness"
+#endif
};
#endif /* _UAPI_LINUX_PERF_EVENT_H */
diff --git a/tools/include/uapi/linux/pkt_cls.h b/tools/include/uapi/linux/pkt_cls.h
index 12153771396a..3faee0199a9b 100644
--- a/tools/include/uapi/linux/pkt_cls.h
+++ b/tools/include/uapi/linux/pkt_cls.h
@@ -180,7 +180,7 @@ struct tc_u32_sel {
short hoff;
__be32 hmask;
- struct tc_u32_key keys[0];
+ struct tc_u32_key keys[];
};
struct tc_u32_mark {
@@ -192,7 +192,7 @@ struct tc_u32_mark {
struct tc_u32_pcnt {
__u64 rcnt;
__u64 rhit;
- __u64 kcnts[0];
+ __u64 kcnts[];
};
/* Flags */
diff --git a/tools/include/uapi/linux/pkt_sched.h b/tools/include/uapi/linux/pkt_sched.h
index 0d18b1d1fbbc..5c903abc9fa5 100644
--- a/tools/include/uapi/linux/pkt_sched.h
+++ b/tools/include/uapi/linux/pkt_sched.h
@@ -414,6 +414,7 @@ enum {
TCA_HTB_RATE64,
TCA_HTB_CEIL64,
TCA_HTB_PAD,
+ TCA_HTB_OFFLOAD,
__TCA_HTB_MAX,
};
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index 07b4f8131e36..759b3f53e53f 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -213,6 +213,7 @@ struct prctl_mm_map {
/* Speculation control variants */
# define PR_SPEC_STORE_BYPASS 0
# define PR_SPEC_INDIRECT_BRANCH 1
+# define PR_SPEC_L1D_FLUSH 2
/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
# define PR_SPEC_NOT_AFFECTED 0
# define PR_SPEC_PRCTL (1UL << 0)
@@ -233,9 +234,62 @@ struct prctl_mm_map {
#define PR_SET_TAGGED_ADDR_CTRL 55
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+/* MTE tag check fault modes */
+# define PR_MTE_TCF_NONE 0UL
+# define PR_MTE_TCF_SYNC (1UL << 1)
+# define PR_MTE_TCF_ASYNC (1UL << 2)
+# define PR_MTE_TCF_MASK (PR_MTE_TCF_SYNC | PR_MTE_TCF_ASYNC)
+/* MTE tag inclusion mask */
+# define PR_MTE_TAG_SHIFT 3
+# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT)
+/* Unused; kept only for source compatibility */
+# define PR_MTE_TCF_SHIFT 1
/* Control reclaim behavior when allocating memory */
#define PR_SET_IO_FLUSHER 57
#define PR_GET_IO_FLUSHER 58
+/* Dispatch syscalls to a userspace handler */
+#define PR_SET_SYSCALL_USER_DISPATCH 59
+# define PR_SYS_DISPATCH_OFF 0
+# define PR_SYS_DISPATCH_ON 1
+/* The control values for the user space selector when dispatch is enabled */
+# define SYSCALL_DISPATCH_FILTER_ALLOW 0
+# define SYSCALL_DISPATCH_FILTER_BLOCK 1
+
+/* Set/get enabled arm64 pointer authentication keys */
+#define PR_PAC_SET_ENABLED_KEYS 60
+#define PR_PAC_GET_ENABLED_KEYS 61
+
+/* Request the scheduler to share a core */
+#define PR_SCHED_CORE 62
+# define PR_SCHED_CORE_GET 0
+# define PR_SCHED_CORE_CREATE 1 /* create unique core_sched cookie */
+# define PR_SCHED_CORE_SHARE_TO 2 /* push core_sched cookie to pid */
+# define PR_SCHED_CORE_SHARE_FROM 3 /* pull core_sched cookie to pid */
+# define PR_SCHED_CORE_MAX 4
+# define PR_SCHED_CORE_SCOPE_THREAD 0
+# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
+# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
+
+/* arm64 Scalable Matrix Extension controls */
+/* Flag values must be in sync with SVE versions */
+#define PR_SME_SET_VL 63 /* set task vector length */
+# define PR_SME_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
+#define PR_SME_GET_VL 64 /* get task vector length */
+/* Bits common to PR_SME_SET_VL and PR_SME_GET_VL */
+# define PR_SME_VL_LEN_MASK 0xffff
+# define PR_SME_VL_INHERIT (1 << 17) /* inherit across exec */
+
+/* Memory deny write / execute */
+#define PR_SET_MDWE 65
+# define PR_MDWE_REFUSE_EXEC_GAIN 1
+
+#define PR_GET_MDWE 66
+
+#define PR_SET_VMA 0x53564d41
+# define PR_SET_VMA_ANON_NAME 0
+
+#define PR_SET_MEMORY_MERGE 67
+#define PR_GET_MEMORY_MERGE 68
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h
index 2e3bc22c6f20..3bac0a8ceab2 100644
--- a/tools/include/uapi/linux/sched.h
+++ b/tools/include/uapi/linux/sched.h
@@ -35,6 +35,7 @@
/* Flags for the clone3() syscall. */
#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
+#define CLONE_INTO_CGROUP 0x200000000ULL /* Clone into a specific cgroup given the right permissions. */
/*
* cloning flags intersect with CSIGNAL so can be used with unshare and clone3
@@ -81,6 +82,8 @@
* @set_tid_size: This defines the size of the array referenced
* in @set_tid. This cannot be larger than the
* kernel's limit of nested PID namespaces.
+ * @cgroup: If CLONE_INTO_CGROUP is specified set this to
+ * a file descriptor for the cgroup.
*
* The structure is versioned by size and thus extensible.
* New struct members must go at the end of the struct and
@@ -97,11 +100,13 @@ struct clone_args {
__aligned_u64 tls;
__aligned_u64 set_tid;
__aligned_u64 set_tid_size;
+ __aligned_u64 cgroup;
};
#endif
#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
+#define CLONE_ARGS_SIZE_VER2 88 /* sizeof third published struct */
/*
* Scheduling policies
diff --git a/tools/include/uapi/linux/seg6.h b/tools/include/uapi/linux/seg6.h
index 286e8d6a8e98..f94baf154c47 100644
--- a/tools/include/uapi/linux/seg6.h
+++ b/tools/include/uapi/linux/seg6.h
@@ -30,7 +30,7 @@ struct ipv6_sr_hdr {
__u8 flags;
__u16 tag;
- struct in6_addr segments[0];
+ struct in6_addr segments[];
};
#define SR6_FLAG1_PROTECTED (1 << 6)
@@ -49,7 +49,7 @@ struct ipv6_sr_hdr {
struct sr6_tlv {
__u8 type;
__u8 len;
- __u8 data[0];
+ __u8 data[];
};
#endif
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h
index ad80a5c885d5..7cab2c65d3d7 100644
--- a/tools/include/uapi/linux/stat.h
+++ b/tools/include/uapi/linux/stat.h
@@ -123,7 +123,11 @@ struct statx {
__u32 stx_dev_major; /* ID of device containing file [uncond] */
__u32 stx_dev_minor;
/* 0x90 */
- __u64 __spare2[14]; /* Spare space for future expansion */
+ __u64 stx_mnt_id;
+ __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */
+ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */
+ /* 0xa0 */
+ __u64 __spare3[12]; /* Spare space for future expansion */
/* 0x100 */
};
@@ -148,9 +152,20 @@ struct statx {
#define STATX_BLOCKS 0x00000400U /* Want/got stx_blocks */
#define STATX_BASIC_STATS 0x000007ffU /* The stuff in the normal stat struct */
#define STATX_BTIME 0x00000800U /* Want/got stx_btime */
-#define STATX_ALL 0x00000fffU /* All currently supported flags */
+#define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */
+#define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */
+
#define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */
+#ifndef __KERNEL__
+/*
+ * This is deprecated, and shall remain the same value in the future. To avoid
+ * confusion please use the equivalent (STATX_BASIC_STATS | STATX_BTIME)
+ * instead.
+ */
+#define STATX_ALL 0x00000fffU
+#endif
+
/*
* Attributes to be found in stx_attributes and masked in stx_attributes_mask.
*
@@ -158,9 +173,12 @@ struct statx {
* be of use to ordinary userspace programs such as GUIs or ls rather than
* specialised tools.
*
- * Note that the flags marked [I] correspond to generic FS_IOC_FLAGS
+ * Note that the flags marked [I] correspond to the FS_IOC_SETFLAGS flags
* semantically. Where possible, the numerical value is picked to correspond
- * also.
+ * also. Note that the DAX attribute indicates that the file is in the CPU
+ * direct access state. It does not correspond to the per-inode flag that
+ * some filesystems support.
+ *
*/
#define STATX_ATTR_COMPRESSED 0x00000004 /* [I] File is compressed by the fs */
#define STATX_ATTR_IMMUTABLE 0x00000010 /* [I] File is marked immutable */
@@ -168,7 +186,9 @@ struct statx {
#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */
#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */
#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */
+#define STATX_ATTR_MOUNT_ROOT 0x00002000 /* Root of a mount */
#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
+#define STATX_ATTR_DAX 0x00200000 /* File is currently in DAX state */
#endif /* _UAPI_LINUX_STAT_H */
diff --git a/tools/include/uapi/linux/stddef.h b/tools/include/uapi/linux/stddef.h
new file mode 100644
index 000000000000..bb6ea517efb5
--- /dev/null
+++ b/tools/include/uapi/linux/stddef.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_STDDEF_H
+#define _LINUX_STDDEF_H
+
+
+
+#ifndef __always_inline
+#define __always_inline __inline__
+#endif
+
+/**
+ * __struct_group() - Create a mirrored named and anonyomous struct
+ *
+ * @TAG: The tag name for the named sub-struct (usually empty)
+ * @NAME: The identifier name of the mirrored sub-struct
+ * @ATTRS: Any struct attributes (usually empty)
+ * @MEMBERS: The member declarations for the mirrored structs
+ *
+ * Used to create an anonymous union of two structs with identical layout
+ * and size: one anonymous and one named. The former's members can be used
+ * normally without sub-struct naming, and the latter can be used to
+ * reason about the start, end, and size of the group of struct members.
+ * The named struct can also be explicitly tagged for layer reuse, as well
+ * as both having struct attributes appended.
+ */
+#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
+ union { \
+ struct { MEMBERS } ATTRS; \
+ struct TAG { MEMBERS } ATTRS NAME; \
+ }
+
+/**
+ * __DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union
+ *
+ * @TYPE: The type of each flexible array element
+ * @NAME: The name of the flexible array member
+ *
+ * In order to have a flexible array member in a union or alone in a
+ * struct, it needs to be wrapped in an anonymous struct with at least 1
+ * named member, but that member can be empty.
+ */
+#define __DECLARE_FLEX_ARRAY(TYPE, NAME) \
+ struct { \
+ struct { } __empty_ ## NAME; \
+ TYPE NAME[]; \
+ }
+#endif
diff --git a/tools/include/uapi/linux/tc_act/tc_bpf.h b/tools/include/uapi/linux/tc_act/tc_bpf.h
index 653c4f94f76e..fe6c8f8f3e8c 100644
--- a/tools/include/uapi/linux/tc_act/tc_bpf.h
+++ b/tools/include/uapi/linux/tc_act/tc_bpf.h
@@ -1,11 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* Copyright (c) 2015 Jiri Pirko <jiri@resnulli.us>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_TC_BPF_H
diff --git a/tools/include/uapi/linux/tcp.h b/tools/include/uapi/linux/tcp.h
new file mode 100644
index 000000000000..13ceeb395eb8
--- /dev/null
+++ b/tools/include/uapi/linux/tcp.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the TCP protocol.
+ *
+ * Version: @(#)tcp.h 1.0.2 04/28/93
+ *
+ * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _UAPI_LINUX_TCP_H
+#define _UAPI_LINUX_TCP_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/socket.h>
+
+struct tcphdr {
+ __be16 source;
+ __be16 dest;
+ __be32 seq;
+ __be32 ack_seq;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u16 res1:4,
+ doff:4,
+ fin:1,
+ syn:1,
+ rst:1,
+ psh:1,
+ ack:1,
+ urg:1,
+ ece:1,
+ cwr:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u16 doff:4,
+ res1:4,
+ cwr:1,
+ ece:1,
+ urg:1,
+ ack:1,
+ psh:1,
+ rst:1,
+ syn:1,
+ fin:1;
+#else
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+ __be16 window;
+ __sum16 check;
+ __be16 urg_ptr;
+};
+
+/*
+ * The union cast uses a gcc extension to avoid aliasing problems
+ * (union is compatible to any of its members)
+ * This means this part of the code is -fstrict-aliasing safe now.
+ */
+union tcp_word_hdr {
+ struct tcphdr hdr;
+ __be32 words[5];
+};
+
+#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3])
+
+enum {
+ TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000),
+ TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000),
+ TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000),
+ TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000),
+ TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000),
+ TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000),
+ TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000),
+ TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000),
+ TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000),
+ TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000)
+};
+
+/*
+ * TCP general constants
+ */
+#define TCP_MSS_DEFAULT 536U /* IPv4 (RFC1122, RFC2581) */
+#define TCP_MSS_DESIRED 1220U /* IPv6 (tunneled), EDNS0 (RFC3226) */
+
+/* TCP socket options */
+#define TCP_NODELAY 1 /* Turn off Nagle's algorithm. */
+#define TCP_MAXSEG 2 /* Limit MSS */
+#define TCP_CORK 3 /* Never send partially complete segments */
+#define TCP_KEEPIDLE 4 /* Start keeplives after this period */
+#define TCP_KEEPINTVL 5 /* Interval between keepalives */
+#define TCP_KEEPCNT 6 /* Number of keepalives before death */
+#define TCP_SYNCNT 7 /* Number of SYN retransmits */
+#define TCP_LINGER2 8 /* Life time of orphaned FIN-WAIT-2 state */
+#define TCP_DEFER_ACCEPT 9 /* Wake up listener only when data arrive */
+#define TCP_WINDOW_CLAMP 10 /* Bound advertised window */
+#define TCP_INFO 11 /* Information about this connection. */
+#define TCP_QUICKACK 12 /* Block/reenable quick acks */
+#define TCP_CONGESTION 13 /* Congestion control algorithm */
+#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */
+#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
+#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
+#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */
+#define TCP_REPAIR 19 /* TCP sock is under repair right now */
+#define TCP_REPAIR_QUEUE 20
+#define TCP_QUEUE_SEQ 21
+#define TCP_REPAIR_OPTIONS 22
+#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
+#define TCP_TIMESTAMP 24
+#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */
+#define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */
+#define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */
+#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */
+#define TCP_REPAIR_WINDOW 29 /* Get/set window parameters */
+#define TCP_FASTOPEN_CONNECT 30 /* Attempt FastOpen with connect */
+#define TCP_ULP 31 /* Attach a ULP to a TCP connection */
+#define TCP_MD5SIG_EXT 32 /* TCP MD5 Signature with extensions */
+#define TCP_FASTOPEN_KEY 33 /* Set the key for Fast Open (cookie) */
+#define TCP_FASTOPEN_NO_COOKIE 34 /* Enable TFO without a TFO cookie */
+#define TCP_ZEROCOPY_RECEIVE 35
+#define TCP_INQ 36 /* Notify bytes available to read as a cmsg on read */
+
+#define TCP_CM_INQ TCP_INQ
+
+#define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */
+
+
+#define TCP_REPAIR_ON 1
+#define TCP_REPAIR_OFF 0
+#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
+
+struct tcp_repair_opt {
+ __u32 opt_code;
+ __u32 opt_val;
+};
+
+struct tcp_repair_window {
+ __u32 snd_wl1;
+ __u32 snd_wnd;
+ __u32 max_window;
+
+ __u32 rcv_wnd;
+ __u32 rcv_wup;
+};
+
+enum {
+ TCP_NO_QUEUE,
+ TCP_RECV_QUEUE,
+ TCP_SEND_QUEUE,
+ TCP_QUEUES_NR,
+};
+
+/* why fastopen failed from client perspective */
+enum tcp_fastopen_client_fail {
+ TFO_STATUS_UNSPEC, /* catch-all */
+ TFO_COOKIE_UNAVAILABLE, /* if not in TFO_CLIENT_NO_COOKIE mode */
+ TFO_DATA_NOT_ACKED, /* SYN-ACK did not ack SYN data */
+ TFO_SYN_RETRANSMITTED, /* SYN-ACK did not ack SYN data after timeout */
+};
+
+/* for TCP_INFO socket option */
+#define TCPI_OPT_TIMESTAMPS 1
+#define TCPI_OPT_SACK 2
+#define TCPI_OPT_WSCALE 4
+#define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */
+#define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */
+#define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */
+
+/*
+ * Sender's congestion state indicating normal or abnormal situations
+ * in the last round of packets sent. The state is driven by the ACK
+ * information and timer events.
+ */
+enum tcp_ca_state {
+ /*
+ * Nothing bad has been observed recently.
+ * No apparent reordering, packet loss, or ECN marks.
+ */
+ TCP_CA_Open = 0,
+#define TCPF_CA_Open (1<<TCP_CA_Open)
+ /*
+ * The sender enters disordered state when it has received DUPACKs or
+ * SACKs in the last round of packets sent. This could be due to packet
+ * loss or reordering but needs further information to confirm packets
+ * have been lost.
+ */
+ TCP_CA_Disorder = 1,
+#define TCPF_CA_Disorder (1<<TCP_CA_Disorder)
+ /*
+ * The sender enters Congestion Window Reduction (CWR) state when it
+ * has received ACKs with ECN-ECE marks, or has experienced congestion
+ * or packet discard on the sender host (e.g. qdisc).
+ */
+ TCP_CA_CWR = 2,
+#define TCPF_CA_CWR (1<<TCP_CA_CWR)
+ /*
+ * The sender is in fast recovery and retransmitting lost packets,
+ * typically triggered by ACK events.
+ */
+ TCP_CA_Recovery = 3,
+#define TCPF_CA_Recovery (1<<TCP_CA_Recovery)
+ /*
+ * The sender is in loss recovery triggered by retransmission timeout.
+ */
+ TCP_CA_Loss = 4
+#define TCPF_CA_Loss (1<<TCP_CA_Loss)
+};
+
+struct tcp_info {
+ __u8 tcpi_state;
+ __u8 tcpi_ca_state;
+ __u8 tcpi_retransmits;
+ __u8 tcpi_probes;
+ __u8 tcpi_backoff;
+ __u8 tcpi_options;
+ __u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
+ __u8 tcpi_delivery_rate_app_limited:1, tcpi_fastopen_client_fail:2;
+
+ __u32 tcpi_rto;
+ __u32 tcpi_ato;
+ __u32 tcpi_snd_mss;
+ __u32 tcpi_rcv_mss;
+
+ __u32 tcpi_unacked;
+ __u32 tcpi_sacked;
+ __u32 tcpi_lost;
+ __u32 tcpi_retrans;
+ __u32 tcpi_fackets;
+
+ /* Times. */
+ __u32 tcpi_last_data_sent;
+ __u32 tcpi_last_ack_sent; /* Not remembered, sorry. */
+ __u32 tcpi_last_data_recv;
+ __u32 tcpi_last_ack_recv;
+
+ /* Metrics. */
+ __u32 tcpi_pmtu;
+ __u32 tcpi_rcv_ssthresh;
+ __u32 tcpi_rtt;
+ __u32 tcpi_rttvar;
+ __u32 tcpi_snd_ssthresh;
+ __u32 tcpi_snd_cwnd;
+ __u32 tcpi_advmss;
+ __u32 tcpi_reordering;
+
+ __u32 tcpi_rcv_rtt;
+ __u32 tcpi_rcv_space;
+
+ __u32 tcpi_total_retrans;
+
+ __u64 tcpi_pacing_rate;
+ __u64 tcpi_max_pacing_rate;
+ __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
+ __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
+ __u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */
+ __u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */
+
+ __u32 tcpi_notsent_bytes;
+ __u32 tcpi_min_rtt;
+ __u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */
+ __u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */
+
+ __u64 tcpi_delivery_rate;
+
+ __u64 tcpi_busy_time; /* Time (usec) busy sending data */
+ __u64 tcpi_rwnd_limited; /* Time (usec) limited by receive window */
+ __u64 tcpi_sndbuf_limited; /* Time (usec) limited by send buffer */
+
+ __u32 tcpi_delivered;
+ __u32 tcpi_delivered_ce;
+
+ __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */
+ __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */
+ __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */
+ __u32 tcpi_reord_seen; /* reordering events seen */
+
+ __u32 tcpi_rcv_ooopack; /* Out-of-order packets received */
+
+ __u32 tcpi_snd_wnd; /* peer's advertised receive window after
+ * scaling (bytes)
+ */
+};
+
+/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
+enum {
+ TCP_NLA_PAD,
+ TCP_NLA_BUSY, /* Time (usec) busy sending data */
+ TCP_NLA_RWND_LIMITED, /* Time (usec) limited by receive window */
+ TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */
+ TCP_NLA_DATA_SEGS_OUT, /* Data pkts sent including retransmission */
+ TCP_NLA_TOTAL_RETRANS, /* Data pkts retransmitted */
+ TCP_NLA_PACING_RATE, /* Pacing rate in bytes per second */
+ TCP_NLA_DELIVERY_RATE, /* Delivery rate in bytes per second */
+ TCP_NLA_SND_CWND, /* Sending congestion window */
+ TCP_NLA_REORDERING, /* Reordering metric */
+ TCP_NLA_MIN_RTT, /* minimum RTT */
+ TCP_NLA_RECUR_RETRANS, /* Recurring retransmits for the current pkt */
+ TCP_NLA_DELIVERY_RATE_APP_LMT, /* delivery rate application limited ? */
+ TCP_NLA_SNDQ_SIZE, /* Data (bytes) pending in send queue */
+ TCP_NLA_CA_STATE, /* ca_state of socket */
+ TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */
+ TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */
+ TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */
+ TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */
+ TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */
+ TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
+ TCP_NLA_REORD_SEEN, /* reordering events seen */
+ TCP_NLA_SRTT, /* smoothed RTT in usecs */
+ TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
+ TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */
+ TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */
+};
+
+/* for TCP_MD5SIG socket option */
+#define TCP_MD5SIG_MAXKEYLEN 80
+
+/* tcp_md5sig extension flags for TCP_MD5SIG_EXT */
+#define TCP_MD5SIG_FLAG_PREFIX 0x1 /* address prefix length */
+#define TCP_MD5SIG_FLAG_IFINDEX 0x2 /* ifindex set */
+
+struct tcp_md5sig {
+ struct __kernel_sockaddr_storage tcpm_addr; /* address associated */
+ __u8 tcpm_flags; /* extension flags */
+ __u8 tcpm_prefixlen; /* address prefix */
+ __u16 tcpm_keylen; /* key length */
+ int tcpm_ifindex; /* device index for scope */
+ __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */
+};
+
+/* INET_DIAG_MD5SIG */
+struct tcp_diag_md5sig {
+ __u8 tcpm_family;
+ __u8 tcpm_prefixlen;
+ __u16 tcpm_keylen;
+ __be32 tcpm_addr[4];
+ __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN];
+};
+
+/* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */
+
+#define TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT 0x1
+struct tcp_zerocopy_receive {
+ __u64 address; /* in: address of mapping */
+ __u32 length; /* in/out: number of bytes to map/mapped */
+ __u32 recv_skip_hint; /* out: amount of bytes to skip */
+ __u32 inq; /* out: amount of bytes in read queue */
+ __s32 err; /* out: socket error */
+ __u64 copybuf_address; /* in: copybuf address (small reads) */
+ __s32 copybuf_len; /* in/out: copybuf bytes avail/used or error */
+ __u32 flags; /* in: flags */
+};
+#endif /* _UAPI_LINUX_TCP_H */
diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h
index cf525cddeb94..74a84e02422a 100644
--- a/tools/include/uapi/linux/usbdevice_fs.h
+++ b/tools/include/uapi/linux/usbdevice_fs.h
@@ -131,7 +131,7 @@ struct usbdevfs_urb {
unsigned int signr; /* signal to be sent on completion,
or 0 if none should be sent. */
void __user *usercontext;
- struct usbdevfs_iso_packet_desc iso_frame_desc[0];
+ struct usbdevfs_iso_packet_desc iso_frame_desc[];
};
/* ioctls for talking directly to drivers */
@@ -176,7 +176,7 @@ struct usbdevfs_disconnect_claim {
struct usbdevfs_streams {
unsigned int num_streams; /* Not used by USBDEVFS_FREE_STREAMS */
unsigned int num_eps;
- unsigned char eps[0];
+ unsigned char eps[];
};
/*
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index 40d028eed645..92e1b700b51c 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <linux/ioctl.h>
+#define VHOST_FILE_UNBIND -1
+
/* ioctls */
#define VHOST_VIRTIO 0xAF
@@ -87,9 +89,6 @@
/* Set or get vhost backend capability */
-/* Use message type V2 */
-#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
-
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
@@ -116,4 +115,77 @@
#define VHOST_VSOCK_SET_GUEST_CID _IOW(VHOST_VIRTIO, 0x60, __u64)
#define VHOST_VSOCK_SET_RUNNING _IOW(VHOST_VIRTIO, 0x61, int)
+/* VHOST_VDPA specific defines */
+
+/* Get the device id. The device ids follow the same definition of
+ * the device id defined in virtio-spec.
+ */
+#define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
+/* Get and set the status. The status bits follow the same definition
+ * of the device status defined in virtio-spec.
+ */
+#define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
+#define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
+/* Get and set the device config. The device config follows the same
+ * definition of the device config defined in virtio-spec.
+ */
+#define VHOST_VDPA_GET_CONFIG _IOR(VHOST_VIRTIO, 0x73, \
+ struct vhost_vdpa_config)
+#define VHOST_VDPA_SET_CONFIG _IOW(VHOST_VIRTIO, 0x74, \
+ struct vhost_vdpa_config)
+/* Enable/disable the ring. */
+#define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, \
+ struct vhost_vring_state)
+/* Get the max ring size. */
+#define VHOST_VDPA_GET_VRING_NUM _IOR(VHOST_VIRTIO, 0x76, __u16)
+
+/* Set event fd for config interrupt*/
+#define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int)
+
+/* Get the valid iova range */
+#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \
+ struct vhost_vdpa_iova_range)
+/* Get the config size */
+#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32)
+
+/* Get the count of all virtqueues */
+#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32)
+
+/* Get the number of virtqueue groups. */
+#define VHOST_VDPA_GET_GROUP_NUM _IOR(VHOST_VIRTIO, 0x81, __u32)
+
+/* Get the number of address spaces. */
+#define VHOST_VDPA_GET_AS_NUM _IOR(VHOST_VIRTIO, 0x7A, unsigned int)
+
+/* Get the group for a virtqueue: read index, write group in num,
+ * The virtqueue index is stored in the index field of
+ * vhost_vring_state. The group for this specific virtqueue is
+ * returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \
+ struct vhost_vring_state)
+/* Set the ASID for a virtqueue group. The group index is stored in
+ * the index field of vhost_vring_state, the ASID associated with this
+ * group is stored at num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \
+ struct vhost_vring_state)
+
+/* Suspend a device so it does not process virtqueue requests anymore
+ *
+ * After the return of ioctl the device must preserve all the necessary state
+ * (the virtqueue vring base plus the possible device specific states) that is
+ * required for restoring in the future. The device must not change its
+ * configuration after that point.
+ */
+#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D)
+
+/* Resume a device so it can resume processing virtqueue requests
+ *
+ * After the return of this ioctl the device will have restored all the
+ * necessary states and it is fully operational to continue processing the
+ * virtqueue descriptors.
+ */
+#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)
+
#endif
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index 535a7229e1d9..de6810e94abe 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -3,22 +3,6 @@
* Advanced Linux Sound Architecture - ALSA - Driver
* Copyright (c) 1994-2003 by Jaroslav Kysela <perex@perex.cz>,
* Abramo Bagnara <abramo@alsa-project.org>
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
#ifndef _UAPI__SOUND_ASOUND_H
@@ -56,8 +40,10 @@
* *
****************************************************************************/
+#define AES_IEC958_STATUS_SIZE 24
+
struct snd_aes_iec958 {
- unsigned char status[24]; /* AES/IEC958 channel status bits */
+ unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */
unsigned char subcode[147]; /* AES/IEC958 subcode bits */
unsigned char pad; /* nothing */
unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
@@ -202,6 +188,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
+/*
+ * For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+ * available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+ * `right-padding` sample which has less width than 32 bit.
+ */
#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
@@ -299,7 +290,8 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_HAS_LINK_ABSOLUTE_ATIME 0x02000000 /* report absolute hardware link audio time, not reset on startup */
#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
-
+#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */
+#define SNDRV_PCM_INFO_NO_REWINDS 0x20000000 /* hardware can only support monotonic changes of appl_ptr */
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
@@ -710,7 +702,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 2)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -736,12 +728,38 @@ struct snd_rawmidi_info {
unsigned char reserved[64]; /* reserved for future use */
};
+#define SNDRV_RAWMIDI_MODE_FRAMING_MASK (7<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_SHIFT 0
+#define SNDRV_RAWMIDI_MODE_FRAMING_NONE (0<<0)
+#define SNDRV_RAWMIDI_MODE_FRAMING_TSTAMP (1<<0)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MASK (7<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_SHIFT 3
+#define SNDRV_RAWMIDI_MODE_CLOCK_NONE (0<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_REALTIME (1<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC (2<<3)
+#define SNDRV_RAWMIDI_MODE_CLOCK_MONOTONIC_RAW (3<<3)
+
+#define SNDRV_RAWMIDI_FRAMING_DATA_LENGTH 16
+
+struct snd_rawmidi_framing_tstamp {
+ /* For now, frame_type is always 0. Midi 2.0 is expected to add new
+ * types here. Applications are expected to skip unknown frame types.
+ */
+ __u8 frame_type;
+ __u8 length; /* number of valid bytes in data field */
+ __u8 reserved[2];
+ __u32 tv_nsec; /* nanoseconds */
+ __u64 tv_sec; /* seconds */
+ __u8 data[SNDRV_RAWMIDI_FRAMING_DATA_LENGTH];
+} __packed;
+
struct snd_rawmidi_params {
int stream;
size_t buffer_size; /* queue size in bytes */
size_t avail_min; /* minimum avail bytes for wakeup */
unsigned int no_active_sensing: 1; /* do not send active sensing byte in close() */
- unsigned char reserved[16]; /* reserved for future use */
+ unsigned int mode; /* For input data only, frame incoming data */
+ unsigned char reserved[12]; /* reserved for future use */
};
#ifndef __KERNEL__
@@ -757,6 +775,7 @@ struct snd_rawmidi_status {
#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
#define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params)
#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
@@ -974,7 +993,7 @@ typedef int __bitwise snd_ctl_elem_iface_t;
#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1)
#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE)
#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */
-// (1 << 3) is unused.
+/* (1 << 3) is unused. */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
@@ -1071,7 +1090,7 @@ struct snd_ctl_elem_value {
struct snd_ctl_tlv {
unsigned int numid; /* control element numeric identification */
unsigned int length; /* in bytes aligned to 4 */
- unsigned int tlv[0]; /* first TLV */
+ unsigned int tlv[]; /* first TLV */
};
#define SNDRV_CTL_IOCTL_PVERSION _IOR('U', 0x00, int)
diff --git a/tools/include/vdso/bits.h b/tools/include/vdso/bits.h
new file mode 100644
index 000000000000..388b212088ea
--- /dev/null
+++ b/tools/include/vdso/bits.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_BITS_H
+#define __VDSO_BITS_H
+
+#include <vdso/const.h>
+
+#define BIT(nr) (UL(1) << (nr))
+#define BIT_ULL(nr) (ULL(1) << (nr))
+
+#endif /* __VDSO_BITS_H */
diff --git a/tools/include/vdso/const.h b/tools/include/vdso/const.h
new file mode 100644
index 000000000000..94b385ad438d
--- /dev/null
+++ b/tools/include/vdso/const.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_CONST_H
+#define __VDSO_CONST_H
+
+#include <uapi/linux/const.h>
+
+#define UL(x) (_UL(x))
+#define ULL(x) (_ULL(x))
+
+#endif /* __VDSO_CONST_H */
diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c
index 0f257139b003..7703f0118385 100644
--- a/tools/io_uring/io_uring-bench.c
+++ b/tools/io_uring/io_uring-bench.c
@@ -130,7 +130,7 @@ static int io_uring_register_files(struct submitter *s)
s->nr_files);
}
-static int gettid(void)
+static int lk_gettid(void)
{
return syscall(__NR_gettid);
}
@@ -281,7 +281,7 @@ static void *submitter_fn(void *data)
struct io_sq_ring *ring = &s->sq_ring;
int ret, prepped;
- printf("submitter=%d\n", gettid());
+ printf("submitter=%d\n", lk_gettid());
srand48_r(pthread_self(), &s->rand);
diff --git a/tools/io_uring/io_uring-cp.c b/tools/io_uring/io_uring-cp.c
index 81461813ec62..d9bd6f5f8f46 100644
--- a/tools/io_uring/io_uring-cp.c
+++ b/tools/io_uring/io_uring-cp.c
@@ -131,8 +131,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
writes = reads = offset = 0;
while (insize || write_left) {
- unsigned long had_reads;
- int got_comp;
+ int had_reads, got_comp;
/*
* Queue up as many reads as we can
@@ -174,8 +173,13 @@ static int copy_file(struct io_uring *ring, off_t insize)
if (!got_comp) {
ret = io_uring_wait_cqe(ring, &cqe);
got_comp = 1;
- } else
+ } else {
ret = io_uring_peek_cqe(ring, &cqe);
+ if (ret == -EAGAIN) {
+ cqe = NULL;
+ ret = 0;
+ }
+ }
if (ret < 0) {
fprintf(stderr, "io_uring_peek_cqe: %s\n",
strerror(-ret));
@@ -194,7 +198,7 @@ static int copy_file(struct io_uring *ring, off_t insize)
fprintf(stderr, "cqe failed: %s\n",
strerror(-cqe->res));
return 1;
- } else if ((size_t) cqe->res != data->iov.iov_len) {
+ } else if (cqe->res != data->iov.iov_len) {
/* Short read/write, adjust and requeue */
data->iov.iov_base += cqe->res;
data->iov.iov_len -= cqe->res;
@@ -221,6 +225,25 @@ static int copy_file(struct io_uring *ring, off_t insize)
}
}
+ /* wait out pending writes */
+ while (writes) {
+ struct io_data *data;
+
+ ret = io_uring_wait_cqe(ring, &cqe);
+ if (ret) {
+ fprintf(stderr, "wait_cqe=%d\n", ret);
+ return 1;
+ }
+ if (cqe->res < 0) {
+ fprintf(stderr, "write res=%d\n", cqe->res);
+ return 1;
+ }
+ data = io_uring_cqe_get_data(cqe);
+ free(data);
+ writes--;
+ io_uring_cqe_seen(ring, cqe);
+ }
+
return 0;
}
diff --git a/tools/io_uring/liburing.h b/tools/io_uring/liburing.h
index 5f305c86b892..28a837b6069d 100644
--- a/tools/io_uring/liburing.h
+++ b/tools/io_uring/liburing.h
@@ -10,6 +10,7 @@ extern "C" {
#include <string.h>
#include "../../include/uapi/linux/io_uring.h"
#include <inttypes.h>
+#include <linux/swab.h>
#include "barrier.h"
/*
@@ -145,11 +146,14 @@ static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd,
}
static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd,
- short poll_mask)
+ unsigned poll_mask)
{
memset(sqe, 0, sizeof(*sqe));
sqe->opcode = IORING_OP_POLL_ADD;
sqe->fd = fd;
+#if __BYTE_ORDER == __BIG_ENDIAN
+ poll_mask = __swahw32(poll_mask);
+#endif
sqe->poll_events = poll_mask;
}
diff --git a/tools/kvm/kvm_stat/kvm_stat b/tools/kvm/kvm_stat/kvm_stat
index e83fc8e868f4..15bf00e79e3f 100755
--- a/tools/kvm/kvm_stat/kvm_stat
+++ b/tools/kvm/kvm_stat/kvm_stat
@@ -32,6 +32,7 @@ import resource
import struct
import re
import subprocess
+import signal
from collections import defaultdict, namedtuple
from functools import reduce
from datetime import datetime
@@ -40,11 +41,14 @@ VMX_EXIT_REASONS = {
'EXCEPTION_NMI': 0,
'EXTERNAL_INTERRUPT': 1,
'TRIPLE_FAULT': 2,
- 'PENDING_INTERRUPT': 7,
+ 'INIT_SIGNAL': 3,
+ 'SIPI_SIGNAL': 4,
+ 'INTERRUPT_WINDOW': 7,
'NMI_WINDOW': 8,
'TASK_SWITCH': 9,
'CPUID': 10,
'HLT': 12,
+ 'INVD': 13,
'INVLPG': 14,
'RDPMC': 15,
'RDTSC': 16,
@@ -64,26 +68,48 @@ VMX_EXIT_REASONS = {
'MSR_READ': 31,
'MSR_WRITE': 32,
'INVALID_STATE': 33,
+ 'MSR_LOAD_FAIL': 34,
'MWAIT_INSTRUCTION': 36,
+ 'MONITOR_TRAP_FLAG': 37,
'MONITOR_INSTRUCTION': 39,
'PAUSE_INSTRUCTION': 40,
'MCE_DURING_VMENTRY': 41,
'TPR_BELOW_THRESHOLD': 43,
'APIC_ACCESS': 44,
+ 'EOI_INDUCED': 45,
+ 'GDTR_IDTR': 46,
+ 'LDTR_TR': 47,
'EPT_VIOLATION': 48,
'EPT_MISCONFIG': 49,
+ 'INVEPT': 50,
+ 'RDTSCP': 51,
+ 'PREEMPTION_TIMER': 52,
+ 'INVVPID': 53,
'WBINVD': 54,
'XSETBV': 55,
'APIC_WRITE': 56,
+ 'RDRAND': 57,
'INVPCID': 58,
+ 'VMFUNC': 59,
+ 'ENCLS': 60,
+ 'RDSEED': 61,
+ 'PML_FULL': 62,
+ 'XSAVES': 63,
+ 'XRSTORS': 64,
+ 'UMWAIT': 67,
+ 'TPAUSE': 68,
+ 'BUS_LOCK': 74,
+ 'NOTIFY': 75,
}
SVM_EXIT_REASONS = {
'READ_CR0': 0x000,
+ 'READ_CR2': 0x002,
'READ_CR3': 0x003,
'READ_CR4': 0x004,
'READ_CR8': 0x008,
'WRITE_CR0': 0x010,
+ 'WRITE_CR2': 0x012,
'WRITE_CR3': 0x013,
'WRITE_CR4': 0x014,
'WRITE_CR8': 0x018,
@@ -104,6 +130,7 @@ SVM_EXIT_REASONS = {
'WRITE_DR6': 0x036,
'WRITE_DR7': 0x037,
'EXCP_BASE': 0x040,
+ 'LAST_EXCP': 0x05f,
'INTR': 0x060,
'NMI': 0x061,
'SMI': 0x062,
@@ -150,21 +177,45 @@ SVM_EXIT_REASONS = {
'MWAIT': 0x08b,
'MWAIT_COND': 0x08c,
'XSETBV': 0x08d,
+ 'RDPRU': 0x08e,
+ 'EFER_WRITE_TRAP': 0x08f,
+ 'CR0_WRITE_TRAP': 0x090,
+ 'CR1_WRITE_TRAP': 0x091,
+ 'CR2_WRITE_TRAP': 0x092,
+ 'CR3_WRITE_TRAP': 0x093,
+ 'CR4_WRITE_TRAP': 0x094,
+ 'CR5_WRITE_TRAP': 0x095,
+ 'CR6_WRITE_TRAP': 0x096,
+ 'CR7_WRITE_TRAP': 0x097,
+ 'CR8_WRITE_TRAP': 0x098,
+ 'CR9_WRITE_TRAP': 0x099,
+ 'CR10_WRITE_TRAP': 0x09a,
+ 'CR11_WRITE_TRAP': 0x09b,
+ 'CR12_WRITE_TRAP': 0x09c,
+ 'CR13_WRITE_TRAP': 0x09d,
+ 'CR14_WRITE_TRAP': 0x09e,
+ 'CR15_WRITE_TRAP': 0x09f,
+ 'INVPCID': 0x0a2,
'NPF': 0x400,
+ 'AVIC_INCOMPLETE_IPI': 0x401,
+ 'AVIC_UNACCELERATED_ACCESS': 0x402,
+ 'VMGEXIT': 0x403,
}
-# EC definition of HSR (from arch/arm64/include/asm/kvm_arm.h)
+# EC definition of HSR (from arch/arm64/include/asm/esr.h)
AARCH64_EXIT_REASONS = {
'UNKNOWN': 0x00,
- 'WFI': 0x01,
+ 'WFx': 0x01,
'CP15_32': 0x03,
'CP15_64': 0x04,
'CP14_MR': 0x05,
'CP14_LS': 0x06,
'FP_ASIMD': 0x07,
'CP10_ID': 0x08,
+ 'PAC': 0x09,
'CP14_64': 0x0C,
- 'ILL_ISS': 0x0E,
+ 'BTI': 0x0D,
+ 'ILL': 0x0E,
'SVC32': 0x11,
'HVC32': 0x12,
'SMC32': 0x13,
@@ -172,21 +223,26 @@ AARCH64_EXIT_REASONS = {
'HVC64': 0x16,
'SMC64': 0x17,
'SYS64': 0x18,
- 'IABT': 0x20,
- 'IABT_HYP': 0x21,
+ 'SVE': 0x19,
+ 'ERET': 0x1A,
+ 'FPAC': 0x1C,
+ 'SME': 0x1D,
+ 'IMP_DEF': 0x1F,
+ 'IABT_LOW': 0x20,
+ 'IABT_CUR': 0x21,
'PC_ALIGN': 0x22,
- 'DABT': 0x24,
- 'DABT_HYP': 0x25,
+ 'DABT_LOW': 0x24,
+ 'DABT_CUR': 0x25,
'SP_ALIGN': 0x26,
'FP_EXC32': 0x28,
'FP_EXC64': 0x2C,
'SERROR': 0x2F,
- 'BREAKPT': 0x30,
- 'BREAKPT_HYP': 0x31,
- 'SOFTSTP': 0x32,
- 'SOFTSTP_HYP': 0x33,
- 'WATCHPT': 0x34,
- 'WATCHPT_HYP': 0x35,
+ 'BREAKPT_LOW': 0x30,
+ 'BREAKPT_CUR': 0x31,
+ 'SOFTSTP_LOW': 0x32,
+ 'SOFTSTP_CUR': 0x33,
+ 'WATCHPT_LOW': 0x34,
+ 'WATCHPT_CUR': 0x35,
'BKPT32': 0x38,
'VECTOR32': 0x3A,
'BRK64': 0x3C,
@@ -219,6 +275,19 @@ USERSPACE_EXIT_REASONS = {
'S390_TSCH': 22,
'EPR': 23,
'SYSTEM_EVENT': 24,
+ 'S390_STSI': 25,
+ 'IOAPIC_EOI': 26,
+ 'HYPERV': 27,
+ 'ARM_NISV': 28,
+ 'X86_RDMSR': 29,
+ 'X86_WRMSR': 30,
+ 'DIRTY_RING_FULL': 31,
+ 'AP_RESET_HOLD': 32,
+ 'X86_BUS_LOCK': 33,
+ 'XEN': 34,
+ 'RISCV_SBI': 35,
+ 'RISCV_CSR': 36,
+ 'NOTIFY': 37,
}
IOCTL_NUMBERS = {
@@ -228,6 +297,8 @@ IOCTL_NUMBERS = {
'RESET': 0x00002403,
}
+signal_received = False
+
ENCODING = locale.getpreferredencoding(False)
TRACE_FILTER = re.compile(r'^[^\(]*$')
@@ -556,7 +627,7 @@ class TracepointProvider(Provider):
name)'.
All available events have directories under
- /sys/kernel/debug/tracing/events/ which export information
+ /sys/kernel/tracing/events/ which export information
about the specific event. Therefore, listing the dirs gives us
a list of all available events.
@@ -739,7 +810,11 @@ class DebugfsProvider(Provider):
The fields are all available KVM debugfs files
"""
- return self.walkdir(PATH_DEBUGFS_KVM)[2]
+ exempt_list = ['halt_poll_fail_ns', 'halt_poll_success_ns', 'halt_wait_ns']
+ fields = [field for field in self.walkdir(PATH_DEBUGFS_KVM)[2]
+ if field not in exempt_list]
+
+ return fields
def update_fields(self, fields_filter):
"""Refresh fields, applying fields_filter"""
@@ -1500,8 +1575,7 @@ class StdFormat(object):
def get_banner(self):
return self._banner
- @staticmethod
- def get_statline(keys, s):
+ def get_statline(self, keys, s):
res = ''
for key in keys:
res += ' %9d' % s[key].delta
@@ -1517,27 +1591,71 @@ class CSVFormat(object):
def get_banner(self):
return self._banner
- @staticmethod
- def get_statline(keys, s):
+ def get_statline(self, keys, s):
return reduce(lambda res, key: "{},{!s}".format(res, s[key].delta),
keys, '')
def log(stats, opts, frmt, keys):
"""Prints statistics as reiterating key block, multiple value blocks."""
+ global signal_received
line = 0
banner_repeat = 20
+ f = None
+
+ def do_banner(opts):
+ nonlocal f
+ if opts.log_to_file:
+ if not f:
+ try:
+ f = open(opts.log_to_file, 'a')
+ except (IOError, OSError):
+ sys.exit("Error: Could not open file: %s" %
+ opts.log_to_file)
+ if isinstance(frmt, CSVFormat) and f.tell() != 0:
+ return
+ print(frmt.get_banner(), file=f or sys.stdout)
+
+ def do_statline(opts, values):
+ statline = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + \
+ frmt.get_statline(keys, values)
+ print(statline, file=f or sys.stdout)
+
+ do_banner(opts)
+ banner_printed = True
while True:
try:
time.sleep(opts.set_delay)
- if line % banner_repeat == 0:
- print(frmt.get_banner())
- print(datetime.now().strftime("%Y-%m-%d %H:%M:%S") +
- frmt.get_statline(keys, stats.get()))
- line += 1
+ if signal_received:
+ banner_printed = True
+ line = 0
+ f.close()
+ do_banner(opts)
+ signal_received = False
+ if (line % banner_repeat == 0 and not banner_printed and
+ not (opts.log_to_file and isinstance(frmt, CSVFormat))):
+ do_banner(opts)
+ banner_printed = True
+ values = stats.get()
+ if (not opts.skip_zero_records or
+ any(values[k].delta != 0 for k in keys)):
+ do_statline(opts, values)
+ line += 1
+ banner_printed = False
except KeyboardInterrupt:
break
+ if opts.log_to_file:
+ f.close()
+
+
+def handle_signal(sig, frame):
+ global signal_received
+
+ signal_received = True
+
+ return
+
def is_delay_valid(delay):
"""Verify delay is in valid value range."""
@@ -1596,7 +1714,8 @@ Press any other key to refresh statistics immediately.
.format(values))
if len(pids) > 1:
sys.exit('Error: Multiple processes found (pids: {}). Use "-p"'
- ' to specify the desired pid'.format(" ".join(pids)))
+ ' to specify the desired pid'
+ .format(" ".join(map(str, pids))))
namespace.pid = pids[0]
argparser = argparse.ArgumentParser(description=description_text,
@@ -1610,7 +1729,7 @@ Press any other key to refresh statistics immediately.
argparser.add_argument('-c', '--csv',
action='store_true',
default=False,
- help='log in csv format - requires option -l/--log',
+ help='log in csv format - requires option -l/-L',
)
argparser.add_argument('-d', '--debugfs',
action='store_true',
@@ -1638,6 +1757,11 @@ Press any other key to refresh statistics immediately.
default=False,
help='run in logging mode (like vmstat)',
)
+ argparser.add_argument('-L', '--log-to-file',
+ type=str,
+ metavar='FILE',
+ help="like '--log', but logging to a file"
+ )
argparser.add_argument('-p', '--pid',
type=int,
default=0,
@@ -1655,9 +1779,16 @@ Press any other key to refresh statistics immediately.
default=False,
help='retrieve statistics from tracepoints',
)
+ argparser.add_argument('-z', '--skip-zero-records',
+ action='store_true',
+ default=False,
+ help='omit records with all zeros in logging mode',
+ )
options = argparser.parse_args()
- if options.csv and not options.log:
+ if options.csv and not (options.log or options.log_to_file):
sys.exit('Error: Option -c/--csv requires -l/--log')
+ if options.skip_zero_records and not (options.log or options.log_to_file):
+ sys.exit('Error: Option -z/--skip-zero-records requires -l/-L')
try:
# verify that we were passed a valid regex up front
re.compile(options.fields)
@@ -1693,7 +1824,7 @@ def assign_globals():
debugfs = ''
for line in open('/proc/mounts'):
- if line.split(' ')[0] == 'debugfs':
+ if line.split(' ')[2] == 'debugfs':
debugfs = line.split(' ')[1]
break
if debugfs == '':
@@ -1737,7 +1868,9 @@ def main():
sys.stdout.write(' ' + '\n '.join(sorted(set(event_list))) + '\n')
sys.exit(0)
- if options.log:
+ if options.log or options.log_to_file:
+ if options.log_to_file:
+ signal.signal(signal.SIGHUP, handle_signal)
keys = sorted(stats.get().keys())
if options.csv:
frmt = CSVFormat(keys)
diff --git a/tools/kvm/kvm_stat/kvm_stat.service b/tools/kvm/kvm_stat/kvm_stat.service
new file mode 100644
index 000000000000..8f13b843d5b4
--- /dev/null
+++ b/tools/kvm/kvm_stat/kvm_stat.service
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+[Unit]
+Description=Service that logs KVM kernel module trace events
+Before=qemu-kvm.service
+
+[Service]
+Type=simple
+ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv
+ExecReload=/bin/kill -HUP $MAINPID
+Restart=always
+RestartSec=60s
+SyslogIdentifier=kvm_stat
+SyslogLevel=debug
+
+[Install]
+WantedBy=multi-user.target
diff --git a/tools/kvm/kvm_stat/kvm_stat.txt b/tools/kvm/kvm_stat/kvm_stat.txt
index a97ded2aedad..3a9f2037bd23 100644
--- a/tools/kvm/kvm_stat/kvm_stat.txt
+++ b/tools/kvm/kvm_stat/kvm_stat.txt
@@ -65,8 +65,10 @@ OPTIONS
run in batch mode for one second
-c::
---csv=<file>::
- log in csv format - requires option -l/--log
+--csv::
+ log in csv format. Requires option -l/--log or -L/--log-to-file.
+ When used with option -L/--log-to-file, the header is only ever
+ written to start of file to preserve the format.
-d::
--debugfs::
@@ -92,6 +94,11 @@ OPTIONS
--log::
run in logging mode (like vmstat)
+
+-L<file>::
+--log-to-file=<file>::
+ like -l/--log, but logging to a file. Appends to existing files.
+
-p<pid>::
--pid=<pid>::
limit statistics to one virtual machine (pid)
@@ -104,6 +111,10 @@ OPTIONS
--tracepoints::
retrieve statistics from tracepoints
+-z::
+--skip-zero-records::
+ omit records with all zeros in logging mode
+
SEE ALSO
--------
'perf'(1), 'trace-cmd'(1)
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index a13e9c7f1fc5..044860ac1ed1 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -15,6 +15,16 @@ LD ?= $(CROSS_COMPILE)ld
MAKEFLAGS += --no-print-directory
+INSTALL = install
+
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
LIBFILE = $(OUTPUT)libapi.a
CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
@@ -45,10 +55,23 @@ RM = rm -f
API_IN := $(OUTPUT)libapi-in.o
+ifeq ($(LP64), 1)
+ libdir_relative = lib64
+else
+ libdir_relative = lib
+endif
+
+prefix ?=
+libdir = $(prefix)/$(libdir_relative)
+
+# Shell quotes
+libdir_SQ = $(subst ','\'',$(libdir))
+
all:
export srctree OUTPUT CC LD CFLAGS V
include $(srctree)/tools/build/Makefile.include
+include $(srctree)/tools/scripts/Makefile.include
all: fixdep $(LIBFILE)
@@ -58,9 +81,52 @@ $(API_IN): FORCE
$(LIBFILE): $(API_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(API_IN)
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
+define do_install
+ if [ ! -d '$2' ]; then \
+ $(INSTALL) -d -m 755 '$2'; \
+ fi; \
+ $(INSTALL) $1 $(if $3,-m $3,) '$2'
+endef
+
+install_lib: $(LIBFILE)
+ $(call QUIET_INSTALL, $(LIBFILE)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIBFILE) $(DESTDIR)$(libdir_SQ)
+
+HDRS := cpu.h debug.h io.h
+FD_HDRS := fd/array.h
+FS_HDRS := fs/fs.h fs/tracing_path.h
+INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/api
+INSTALL_HDRS := $(addprefix $(INSTALL_HDRS_PFX)/, $(HDRS))
+INSTALL_FD_HDRS := $(addprefix $(INSTALL_HDRS_PFX)/, $(FD_HDRS))
+INSTALL_FS_HDRS := $(addprefix $(INSTALL_HDRS_PFX)/, $(FS_HDRS))
+
+$(INSTALL_HDRS): $(INSTALL_HDRS_PFX)/%.h: %.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_HDRS_PFX)/,644)
+
+$(INSTALL_FD_HDRS): $(INSTALL_HDRS_PFX)/fd/%.h: fd/%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_HDRS_PFX)/fd/,644)
+
+$(INSTALL_FS_HDRS): $(INSTALL_HDRS_PFX)/fs/%.h: fs/%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_HDRS_PFX)/fs/,644)
+
+install_headers: $(INSTALL_HDRS) $(INSTALL_FD_HDRS) $(INSTALL_FS_HDRS)
+ $(call QUIET_INSTALL, libapi_headers)
+
+install: install_lib install_headers
+
clean:
$(call QUIET_CLEAN, libapi) $(RM) $(LIBFILE); \
- find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+ find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
FORCE:
diff --git a/tools/lib/api/fd/array.c b/tools/lib/api/fd/array.c
index 58d44d5eee31..f0f195207fca 100644
--- a/tools/lib/api/fd/array.c
+++ b/tools/lib/api/fd/array.c
@@ -8,6 +8,7 @@
#include <poll.h>
#include <stdlib.h>
#include <unistd.h>
+#include <string.h>
void fdarray__init(struct fdarray *fda, int nr_autogrow)
{
@@ -19,7 +20,7 @@ void fdarray__init(struct fdarray *fda, int nr_autogrow)
int fdarray__grow(struct fdarray *fda, int nr)
{
- void *priv;
+ struct priv *priv;
int nr_alloc = fda->nr_alloc + nr;
size_t psize = sizeof(fda->priv[0]) * nr_alloc;
size_t size = sizeof(struct pollfd) * nr_alloc;
@@ -34,6 +35,9 @@ int fdarray__grow(struct fdarray *fda, int nr)
return -ENOMEM;
}
+ memset(&entries[fda->nr_alloc], 0, sizeof(struct pollfd) * nr);
+ memset(&priv[fda->nr_alloc], 0, sizeof(fda->priv[0]) * nr);
+
fda->nr_alloc = nr_alloc;
fda->entries = entries;
fda->priv = priv;
@@ -69,7 +73,7 @@ void fdarray__delete(struct fdarray *fda)
free(fda);
}
-int fdarray__add(struct fdarray *fda, int fd, short revents)
+int fdarray__add(struct fdarray *fda, int fd, short revents, enum fdarray_flags flags)
{
int pos = fda->nr;
@@ -79,10 +83,28 @@ int fdarray__add(struct fdarray *fda, int fd, short revents)
fda->entries[fda->nr].fd = fd;
fda->entries[fda->nr].events = revents;
+ fda->priv[fda->nr].flags = flags;
fda->nr++;
return pos;
}
+int fdarray__dup_entry_from(struct fdarray *fda, int pos, struct fdarray *from)
+{
+ struct pollfd *entry;
+ int npos;
+
+ if (pos >= from->nr)
+ return -EINVAL;
+
+ entry = &from->entries[pos];
+
+ npos = fdarray__add(fda, entry->fd, entry->events, from->priv[pos].flags);
+ if (npos >= 0)
+ fda->priv[npos] = from->priv[pos];
+
+ return npos;
+}
+
int fdarray__filter(struct fdarray *fda, short revents,
void (*entry_destructor)(struct fdarray *fda, int fd, void *arg),
void *arg)
@@ -93,22 +115,22 @@ int fdarray__filter(struct fdarray *fda, short revents,
return 0;
for (fd = 0; fd < fda->nr; ++fd) {
+ if (!fda->entries[fd].events)
+ continue;
+
if (fda->entries[fd].revents & revents) {
if (entry_destructor)
entry_destructor(fda, fd, arg);
+ fda->entries[fd].revents = fda->entries[fd].events = 0;
continue;
}
- if (fd != nr) {
- fda->entries[nr] = fda->entries[fd];
- fda->priv[nr] = fda->priv[fd];
- }
-
- ++nr;
+ if (!(fda->priv[fd].flags & fdarray_flag__nonfilterable))
+ ++nr;
}
- return fda->nr = nr;
+ return nr;
}
int fdarray__poll(struct fdarray *fda, int timeout)
diff --git a/tools/lib/api/fd/array.h b/tools/lib/api/fd/array.h
index b39557d1a88f..5c01f7b05dfb 100644
--- a/tools/lib/api/fd/array.h
+++ b/tools/lib/api/fd/array.h
@@ -21,19 +21,29 @@ struct fdarray {
int nr_alloc;
int nr_autogrow;
struct pollfd *entries;
- union {
- int idx;
- void *ptr;
+ struct priv {
+ union {
+ int idx;
+ void *ptr;
+ };
+ unsigned int flags;
} *priv;
};
+enum fdarray_flags {
+ fdarray_flag__default = 0x00000000,
+ fdarray_flag__nonfilterable = 0x00000001,
+ fdarray_flag__non_perf_event = 0x00000002,
+};
+
void fdarray__init(struct fdarray *fda, int nr_autogrow);
void fdarray__exit(struct fdarray *fda);
struct fdarray *fdarray__new(int nr_alloc, int nr_autogrow);
void fdarray__delete(struct fdarray *fda);
-int fdarray__add(struct fdarray *fda, int fd, short revents);
+int fdarray__add(struct fdarray *fda, int fd, short revents, enum fdarray_flags flags);
+int fdarray__dup_entry_from(struct fdarray *fda, int pos, struct fdarray *from);
int fdarray__poll(struct fdarray *fda, int timeout);
int fdarray__filter(struct fdarray *fda, short revents,
void (*entry_destructor)(struct fdarray *fda, int fd, void *arg),
diff --git a/tools/lib/api/fs/cgroup.c b/tools/lib/api/fs/cgroup.c
index 889a6eb4aaca..1573dae4259d 100644
--- a/tools/lib/api/fs/cgroup.c
+++ b/tools/lib/api/fs/cgroup.c
@@ -8,12 +8,29 @@
#include <string.h>
#include "fs.h"
+struct cgroupfs_cache_entry {
+ char subsys[32];
+ char mountpoint[PATH_MAX];
+};
+
+/* just cache last used one */
+static struct cgroupfs_cache_entry cached;
+
int cgroupfs_find_mountpoint(char *buf, size_t maxlen, const char *subsys)
{
FILE *fp;
- char mountpoint[PATH_MAX + 1], tokens[PATH_MAX + 1], type[PATH_MAX + 1];
- char path_v1[PATH_MAX + 1], path_v2[PATH_MAX + 2], *path;
- char *token, *saved_ptr = NULL;
+ char *line = NULL;
+ size_t len = 0;
+ char *p, *path;
+ char mountpoint[PATH_MAX];
+
+ if (!strcmp(cached.subsys, subsys)) {
+ if (strlen(cached.mountpoint) < maxlen) {
+ strcpy(buf, cached.mountpoint);
+ return 0;
+ }
+ return -1;
+ }
fp = fopen("/proc/mounts", "r");
if (!fp)
@@ -22,45 +39,63 @@ int cgroupfs_find_mountpoint(char *buf, size_t maxlen, const char *subsys)
/*
* in order to handle split hierarchy, we need to scan /proc/mounts
* and inspect every cgroupfs mount point to find one that has
- * perf_event subsystem
+ * the given subsystem. If we found v1, just use it. If not we can
+ * use v2 path as a fallback.
*/
- path_v1[0] = '\0';
- path_v2[0] = '\0';
+ mountpoint[0] = '\0';
- while (fscanf(fp, "%*s %"__stringify(PATH_MAX)"s %"__stringify(PATH_MAX)"s %"
- __stringify(PATH_MAX)"s %*d %*d\n",
- mountpoint, type, tokens) == 3) {
+ /*
+ * The /proc/mounts has the follow format:
+ *
+ * <devname> <mount point> <fs type> <options> ...
+ *
+ */
+ while (getline(&line, &len, fp) != -1) {
+ /* skip devname */
+ p = strchr(line, ' ');
+ if (p == NULL)
+ continue;
+
+ /* save the mount point */
+ path = ++p;
+ p = strchr(p, ' ');
+ if (p == NULL)
+ continue;
- if (!path_v1[0] && !strcmp(type, "cgroup")) {
+ *p++ = '\0';
- token = strtok_r(tokens, ",", &saved_ptr);
+ /* check filesystem type */
+ if (strncmp(p, "cgroup", 6))
+ continue;
- while (token != NULL) {
- if (subsys && !strcmp(token, subsys)) {
- strcpy(path_v1, mountpoint);
- break;
- }
- token = strtok_r(NULL, ",", &saved_ptr);
- }
+ if (p[6] == '2') {
+ /* save cgroup v2 path */
+ strcpy(mountpoint, path);
+ continue;
}
- if (!path_v2[0] && !strcmp(type, "cgroup2"))
- strcpy(path_v2, mountpoint);
+ /* now we have cgroup v1, check the options for subsystem */
+ p += 7;
- if (path_v1[0] && path_v2[0])
- break;
+ p = strstr(p, subsys);
+ if (p == NULL)
+ continue;
+
+ /* sanity check: it should be separated by a space or a comma */
+ if (!strchr(" ,", p[-1]) || !strchr(" ,", p[strlen(subsys)]))
+ continue;
+
+ strcpy(mountpoint, path);
+ break;
}
+ free(line);
fclose(fp);
- if (path_v1[0])
- path = path_v1;
- else if (path_v2[0])
- path = path_v2;
- else
- return -1;
+ strncpy(cached.subsys, subsys, sizeof(cached.subsys) - 1);
+ strcpy(cached.mountpoint, mountpoint);
- if (strlen(path) < maxlen) {
- strcpy(buf, path);
+ if (mountpoint[0] && strlen(mountpoint) < maxlen) {
+ strcpy(buf, mountpoint);
return 0;
}
return -1;
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 027b18f7ed8c..82f53d81a7a7 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -90,6 +90,7 @@ struct fs {
const char * const *mounts;
char path[PATH_MAX];
bool found;
+ bool checked;
long magic;
};
@@ -111,31 +112,37 @@ static struct fs fs__entries[] = {
.name = "sysfs",
.mounts = sysfs__fs_known_mountpoints,
.magic = SYSFS_MAGIC,
+ .checked = false,
},
[FS__PROCFS] = {
.name = "proc",
.mounts = procfs__known_mountpoints,
.magic = PROC_SUPER_MAGIC,
+ .checked = false,
},
[FS__DEBUGFS] = {
.name = "debugfs",
.mounts = debugfs__known_mountpoints,
.magic = DEBUGFS_MAGIC,
+ .checked = false,
},
[FS__TRACEFS] = {
.name = "tracefs",
.mounts = tracefs__known_mountpoints,
.magic = TRACEFS_MAGIC,
+ .checked = false,
},
[FS__HUGETLBFS] = {
.name = "hugetlbfs",
.mounts = hugetlbfs__known_mountpoints,
.magic = HUGETLBFS_MAGIC,
+ .checked = false,
},
[FS__BPF_FS] = {
.name = "bpf",
.mounts = bpf_fs__known_mountpoints,
.magic = BPF_FS_MAGIC,
+ .checked = false,
},
};
@@ -158,6 +165,7 @@ static bool fs__read_mounts(struct fs *fs)
}
fclose(fp);
+ fs->checked = true;
return fs->found = found;
}
@@ -220,6 +228,7 @@ static bool fs__env_override(struct fs *fs)
return false;
fs->found = true;
+ fs->checked = true;
strncpy(fs->path, override_path, sizeof(fs->path) - 1);
fs->path[sizeof(fs->path) - 1] = '\0';
return true;
@@ -246,6 +255,14 @@ static const char *fs__mountpoint(int idx)
if (fs->found)
return (const char *)fs->path;
+ /* the mount point was already checked for the mount point
+ * but and did not exist, so return NULL to avoid scanning again.
+ * This makes the found and not found paths cost equivalent
+ * in case of multiple calls.
+ */
+ if (fs->checked)
+ return NULL;
+
return fs__get_mountpoint(fs);
}
diff --git a/tools/lib/api/fs/fs.h b/tools/lib/api/fs/fs.h
index 936edb95e1f3..aa222ca30311 100644
--- a/tools/lib/api/fs/fs.h
+++ b/tools/lib/api/fs/fs.h
@@ -18,6 +18,18 @@
const char *name##__mount(void); \
bool name##__configured(void); \
+/*
+ * The xxxx__mountpoint() entry points find the first match mount point for each
+ * filesystems listed below, where xxxx is the filesystem type.
+ *
+ * The interface is as follows:
+ *
+ * - If a mount point is found on first call, it is cached and used for all
+ * subsequent calls.
+ *
+ * - If a mount point is not found, NULL is returned on first call and all
+ * subsequent calls.
+ */
FS(sysfs)
FS(procfs)
FS(debugfs)
diff --git a/tools/lib/api/fs/tracing_path.c b/tools/lib/api/fs/tracing_path.c
index 5afb11b30fca..7ba3e81274e8 100644
--- a/tools/lib/api/fs/tracing_path.c
+++ b/tools/lib/api/fs/tracing_path.c
@@ -14,8 +14,8 @@
#include "tracing_path.h"
static char tracing_mnt[PATH_MAX] = "/sys/kernel/debug";
-static char tracing_path[PATH_MAX] = "/sys/kernel/debug/tracing";
-static char tracing_events_path[PATH_MAX] = "/sys/kernel/debug/tracing/events";
+static char tracing_path[PATH_MAX] = "/sys/kernel/tracing";
+static char tracing_events_path[PATH_MAX] = "/sys/kernel/tracing/events";
static void __tracing_path_set(const char *tracing, const char *mountpoint)
{
@@ -113,6 +113,22 @@ DIR *tracing_events__opendir(void)
return dir;
}
+int tracing_events__scandir_alphasort(struct dirent ***namelist)
+{
+ char *path = get_tracing_file("events");
+ int ret;
+
+ if (!path) {
+ *namelist = NULL;
+ return 0;
+ }
+
+ ret = scandir(path, namelist, NULL, alphasort);
+ put_events_file(path);
+
+ return ret;
+}
+
int tracing_path__strerror_open_tp(int err, char *buf, size_t size,
const char *sys, const char *name)
{
diff --git a/tools/lib/api/fs/tracing_path.h b/tools/lib/api/fs/tracing_path.h
index a19136b086dc..fc6347c11deb 100644
--- a/tools/lib/api/fs/tracing_path.h
+++ b/tools/lib/api/fs/tracing_path.h
@@ -6,6 +6,7 @@
#include <dirent.h>
DIR *tracing_events__opendir(void);
+int tracing_events__scandir_alphasort(struct dirent ***namelist);
void tracing_path_set(const char *mountpoint);
const char *tracing_path_mount(void);
diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h
new file mode 100644
index 000000000000..d5e8cf0dada0
--- /dev/null
+++ b/tools/lib/api/io.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Lightweight buffered reading library.
+ *
+ * Copyright 2019 Google LLC.
+ */
+#ifndef __API_IO__
+#define __API_IO__
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+struct io {
+ /* File descriptor being read/ */
+ int fd;
+ /* Size of the read buffer. */
+ unsigned int buf_len;
+ /* Pointer to storage for buffering read. */
+ char *buf;
+ /* End of the storage. */
+ char *end;
+ /* Currently accessed data pointer. */
+ char *data;
+ /* Set true on when the end of file on read error. */
+ bool eof;
+};
+
+static inline void io__init(struct io *io, int fd,
+ char *buf, unsigned int buf_len)
+{
+ io->fd = fd;
+ io->buf_len = buf_len;
+ io->buf = buf;
+ io->end = buf;
+ io->data = buf;
+ io->eof = false;
+}
+
+/* Reads one character from the "io" file with similar semantics to fgetc. */
+static inline int io__get_char(struct io *io)
+{
+ char *ptr = io->data;
+
+ if (io->eof)
+ return -1;
+
+ if (ptr == io->end) {
+ ssize_t n = read(io->fd, io->buf, io->buf_len);
+
+ if (n <= 0) {
+ io->eof = true;
+ return -1;
+ }
+ ptr = &io->buf[0];
+ io->end = &io->buf[n];
+ }
+ io->data = ptr + 1;
+ return *ptr;
+}
+
+/* Read a hexadecimal value with no 0x prefix into the out argument hex. If the
+ * first character isn't hexadecimal returns -2, io->eof returns -1, otherwise
+ * returns the character after the hexadecimal value which may be -1 for eof.
+ * If the read value is larger than a u64 the high-order bits will be dropped.
+ */
+static inline int io__get_hex(struct io *io, __u64 *hex)
+{
+ bool first_read = true;
+
+ *hex = 0;
+ while (true) {
+ int ch = io__get_char(io);
+
+ if (ch < 0)
+ return ch;
+ if (ch >= '0' && ch <= '9')
+ *hex = (*hex << 4) | (ch - '0');
+ else if (ch >= 'a' && ch <= 'f')
+ *hex = (*hex << 4) | (ch - 'a' + 10);
+ else if (ch >= 'A' && ch <= 'F')
+ *hex = (*hex << 4) | (ch - 'A' + 10);
+ else if (first_read)
+ return -2;
+ else
+ return ch;
+ first_read = false;
+ }
+}
+
+/* Read a positive decimal value with out argument dec. If the first character
+ * isn't a decimal returns -2, io->eof returns -1, otherwise returns the
+ * character after the decimal value which may be -1 for eof. If the read value
+ * is larger than a u64 the high-order bits will be dropped.
+ */
+static inline int io__get_dec(struct io *io, __u64 *dec)
+{
+ bool first_read = true;
+
+ *dec = 0;
+ while (true) {
+ int ch = io__get_char(io);
+
+ if (ch < 0)
+ return ch;
+ if (ch >= '0' && ch <= '9')
+ *dec = (*dec * 10) + ch - '0';
+ else if (first_read)
+ return -2;
+ else
+ return ch;
+ first_read = false;
+ }
+}
+
+/* Read up to and including the first newline following the pattern of getline. */
+static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_len_out)
+{
+ char buf[128];
+ int buf_pos = 0;
+ char *line = NULL, *temp;
+ size_t line_len = 0;
+ int ch = 0;
+
+ /* TODO: reuse previously allocated memory. */
+ free(*line_out);
+ while (ch != '\n') {
+ ch = io__get_char(io);
+
+ if (ch < 0)
+ break;
+
+ if (buf_pos == sizeof(buf)) {
+ temp = realloc(line, line_len + sizeof(buf));
+ if (!temp)
+ goto err_out;
+ line = temp;
+ memcpy(&line[line_len], buf, sizeof(buf));
+ line_len += sizeof(buf);
+ buf_pos = 0;
+ }
+ buf[buf_pos++] = (char)ch;
+ }
+ temp = realloc(line, line_len + buf_pos + 1);
+ if (!temp)
+ goto err_out;
+ line = temp;
+ memcpy(&line[line_len], buf, buf_pos);
+ line[line_len + buf_pos] = '\0';
+ line_len += buf_pos;
+ *line_out = line;
+ *line_len_out = line_len;
+ return line_len;
+err_out:
+ free(line);
+ return -ENOMEM;
+}
+
+#endif /* __API_IO__ */
diff --git a/tools/lib/bitmap.c b/tools/lib/bitmap.c
index 5043747ef6c5..c3e4871967bc 100644
--- a/tools/lib/bitmap.c
+++ b/tools/lib/bitmap.c
@@ -5,9 +5,9 @@
*/
#include <linux/bitmap.h>
-int __bitmap_weight(const unsigned long *bitmap, int bits)
+unsigned int __bitmap_weight(const unsigned long *bitmap, int bits)
{
- int k, w = 0, lim = bits/BITS_PER_LONG;
+ unsigned int k, w = 0, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; k++)
w += hweight_long(bitmap[k]);
@@ -28,11 +28,11 @@ void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
dst[k] = bitmap1[k] | bitmap2[k];
}
-size_t bitmap_scnprintf(unsigned long *bitmap, int nbits,
+size_t bitmap_scnprintf(unsigned long *bitmap, unsigned int nbits,
char *buf, size_t size)
{
/* current bit is 'cur', most recently seen range is [rbot, rtop] */
- int cur, rbot, rtop;
+ unsigned int cur, rbot, rtop;
bool first = true;
size_t ret = 0;
@@ -57,7 +57,7 @@ size_t bitmap_scnprintf(unsigned long *bitmap, int nbits,
return ret;
}
-int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
+bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k;
@@ -72,17 +72,31 @@ int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
return result != 0;
}
-int __bitmap_equal(const unsigned long *bitmap1,
- const unsigned long *bitmap2, unsigned int bits)
+bool __bitmap_equal(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
{
unsigned int k, lim = bits/BITS_PER_LONG;
for (k = 0; k < lim; ++k)
if (bitmap1[k] != bitmap2[k])
- return 0;
+ return false;
if (bits % BITS_PER_LONG)
if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
- return 0;
+ return false;
- return 1;
+ return true;
+}
+
+bool __bitmap_intersects(const unsigned long *bitmap1,
+ const unsigned long *bitmap2, unsigned int bits)
+{
+ unsigned int k, lim = bits/BITS_PER_LONG;
+ for (k = 0; k < lim; ++k)
+ if (bitmap1[k] & bitmap2[k])
+ return true;
+
+ if (bits % BITS_PER_LONG)
+ if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits))
+ return true;
+ return false;
}
diff --git a/tools/lib/bpf/.gitignore b/tools/lib/bpf/.gitignore
index 8a81b3679d2b..0da84cb9e66d 100644
--- a/tools/lib/bpf/.gitignore
+++ b/tools/lib/bpf/.gitignore
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-libbpf_version.h
libbpf.pc
-FEATURE-DUMP.libbpf
libbpf.so.*
TAGS
tags
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index e3962cfbc9a6..b8b0a6369363 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1,3 +1,4 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
- netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
- btf_dump.o
+ netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \
+ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \
+ usdt.o zip.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index aee7f1a83c77..cf7f02c67968 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -1,10 +1,15 @@
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
# Most of this file is copied from tools/lib/traceevent/Makefile
+RM ?= rm
+srctree = $(abs_srctree)
+
+VERSION_SCRIPT := libbpf.map
LIBBPF_VERSION := $(shell \
- grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | \
+ grep -oE '^LIBBPF_([0-9.]+)' $(VERSION_SCRIPT) | \
sort -rV | head -n1 | cut -d'_' -f2)
-LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
+LIBBPF_MAJOR_VERSION := $(word 1,$(subst ., ,$(LIBBPF_VERSION)))
+LIBBPF_MINOR_VERSION := $(word 2,$(subst ., ,$(LIBBPF_VERSION)))
MAKEFLAGS += --no-print-directory
@@ -55,28 +60,8 @@ ifndef VERBOSE
VERBOSE = 0
endif
-FEATURE_USER = .libbpf
-FEATURE_TESTS = libelf libelf-mmap zlib bpf reallocarray
-FEATURE_DISPLAY = libelf zlib bpf
-
-INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
-FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
-
-check_feat := 1
-NON_CHECK_FEAT_TARGETS := clean TAGS tags cscope help
-ifdef MAKECMDGOALS
-ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),)
- check_feat := 0
-endif
-endif
-
-ifeq ($(check_feat),1)
-ifeq ($(FEATURES_DUMP),)
-include $(srctree)/tools/build/Makefile.feature
-else
-include $(FEATURES_DUMP)
-endif
-endif
+INCLUDES = -I$(or $(OUTPUT),.) \
+ -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
export prefix libdir src obj
@@ -95,27 +80,20 @@ PC_FILE = libbpf.pc
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
else
- CFLAGS := -g -Wall
-endif
-
-ifeq ($(feature-libelf-mmap), 1)
- override CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
-endif
-
-ifeq ($(feature-reallocarray), 0)
- override CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+ CFLAGS := -g -O2
endif
# Append required CFLAGS
-override CFLAGS += $(EXTRA_WARNINGS)
+override CFLAGS += -std=gnu89
+override CFLAGS += $(EXTRA_WARNINGS) -Wno-switch-enum
override CFLAGS += -Werror -Wall
-override CFLAGS += -fPIC
override CFLAGS += $(INCLUDES)
override CFLAGS += -fvisibility=hidden
override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+override CFLAGS += $(CLANG_CROSS_FLAGS)
# flags specific for shared library
-SHLIB_FLAGS := -DSHARED
+SHLIB_FLAGS := -DSHARED -fPIC
ifeq ($(VERBOSE),1)
Q =
@@ -137,8 +115,8 @@ SHARED_OBJDIR := $(OUTPUT)sharedobjs/
STATIC_OBJDIR := $(OUTPUT)staticobjs/
BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o
BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o
-VERSION_SCRIPT := libbpf.map
BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h
+BPF_GENERATED := $(BPF_HELPER_DEFS)
LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
@@ -149,9 +127,11 @@ TAGS_PROG := $(if $(shell which etags 2>/dev/null),etags,ctags)
GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
sed 's/\[.*\]//' | \
- awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}' | \
sort -u | wc -l)
-VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
+VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
+ sed 's/\[.*\]//' | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}' | \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
CMD_TARGETS = $(LIB_TARGET) $(PC_FILE)
@@ -161,42 +141,36 @@ all: fixdep
all_cmd: $(CMD_TARGETS) check
-$(BPF_IN_SHARED): force elfdep zdep bpfdep $(BPF_HELPER_DEFS)
+$(BPF_IN_SHARED): force $(BPF_GENERATED)
@(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@(test -f ../../include/uapi/linux/bpf_common.h -a -f ../../../include/uapi/linux/bpf_common.h && ( \
(diff -B ../../include/uapi/linux/bpf_common.h ../../../include/uapi/linux/bpf_common.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf_common.h' differs from latest version at 'include/uapi/linux/bpf_common.h'" >&2 )) || true
- @(test -f ../../include/uapi/linux/netlink.h -a -f ../../../include/uapi/linux/netlink.h && ( \
- (diff -B ../../include/uapi/linux/netlink.h ../../../include/uapi/linux/netlink.h >/dev/null) || \
- echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/netlink.h' differs from latest version at 'include/uapi/linux/netlink.h'" >&2 )) || true
- @(test -f ../../include/uapi/linux/if_link.h -a -f ../../../include/uapi/linux/if_link.h && ( \
- (diff -B ../../include/uapi/linux/if_link.h ../../../include/uapi/linux/if_link.h >/dev/null) || \
- echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_link.h' differs from latest version at 'include/uapi/linux/if_link.h'" >&2 )) || true
@(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
(diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
-$(BPF_IN_STATIC): force elfdep zdep bpfdep $(BPF_HELPER_DEFS)
+$(BPF_IN_STATIC): force $(BPF_GENERATED)
$(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
$(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
- $(QUIET_GEN)$(srctree)/scripts/bpf_helpers_doc.py --header \
+ $(QUIET_GEN)$(srctree)/scripts/bpf_doc.py --header \
--file $(srctree)/tools/include/uapi/linux/bpf.h > $(BPF_HELPER_DEFS)
$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
-$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
- $(QUIET_LINK)$(CC) $(LDFLAGS) \
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED) $(VERSION_SCRIPT)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) \
--shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
- -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -lz -o $@
+ -Wl,--version-script=$(VERSION_SCRIPT) $< -lelf -lz -o $@
@ln -sf $(@F) $(OUTPUT)libbpf.so
@ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
$(OUTPUT)libbpf.a: $(BPF_IN_STATIC)
- $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
+ $(QUIET_LINK)$(RM) -f $@; $(AR) rcs $@ $^
$(OUTPUT)libbpf.pc:
$(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
@@ -204,9 +178,9 @@ $(OUTPUT)libbpf.pc:
-e "s|@VERSION@|$(LIBBPF_VERSION)|" \
< libbpf.pc.template > $@
-check: check_abi
+check: check_abi check_version
-check_abi: $(OUTPUT)libbpf.so
+check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
@if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \
echo "Warning: Num of global symbols in $(BPF_IN_SHARED)" \
"($(GLOBAL_SYM_COUNT)) does NOT match with num of" \
@@ -218,7 +192,9 @@ check_abi: $(OUTPUT)libbpf.so
sed 's/\[.*\]//' | \
awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \
sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
- readelf -s --wide $(OUTPUT)libbpf.so | \
+ readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \
+ sed 's/\[.*\]//' | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND|ABS/ {print $$NF}'| \
grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \
diff -u $(OUTPUT)libbpf_global_syms.tmp \
@@ -228,6 +204,21 @@ check_abi: $(OUTPUT)libbpf.so
exit 1; \
fi
+HDR_MAJ_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MAJOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
+HDR_MIN_VERSION := $(shell grep -oE '^$(pound)define LIBBPF_MINOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3)
+
+check_version: $(VERSION_SCRIPT) libbpf_version.h
+ @if [ "$(HDR_MAJ_VERSION)" != "$(LIBBPF_MAJOR_VERSION)" ]; then \
+ echo "Error: libbpf major version mismatch detected: " \
+ "'$(HDR_MAJ_VERSION)' != '$(LIBBPF_MAJOR_VERSION)'" >&2; \
+ exit 1; \
+ fi
+ @if [ "$(HDR_MIN_VERSION)" != "$(LIBBPF_MINOR_VERSION)" ]; then \
+ echo "Error: libbpf minor version mismatch detected: " \
+ "'$(HDR_MIN_VERSION)' != '$(LIBBPF_MINOR_VERSION)'" >&2; \
+ exit 1; \
+ fi
+
define do_install_mkdir
if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
@@ -238,7 +229,7 @@ define do_install
if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
fi; \
- $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
+ $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
endef
install_lib: all_cmd
@@ -246,60 +237,48 @@ install_lib: all_cmd
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
-install_headers: $(BPF_HELPER_DEFS)
- $(call QUIET_INSTALL, headers) \
- $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
- $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
- $(call do_install,btf.h,$(prefix)/include/bpf,644); \
- $(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \
- $(call do_install,libbpf_common.h,$(prefix)/include/bpf,644); \
- $(call do_install,xsk.h,$(prefix)/include/bpf,644); \
- $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \
- $(call do_install,$(BPF_HELPER_DEFS),$(prefix)/include/bpf,644); \
- $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \
- $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \
- $(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644);
+SRC_HDRS := bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h \
+ bpf_helpers.h bpf_tracing.h bpf_endian.h bpf_core_read.h \
+ skel_internal.h libbpf_version.h usdt.bpf.h
+GEN_HDRS := $(BPF_GENERATED)
+
+INSTALL_PFX := $(DESTDIR)$(prefix)/include/bpf
+INSTALL_SRC_HDRS := $(addprefix $(INSTALL_PFX)/, $(SRC_HDRS))
+INSTALL_GEN_HDRS := $(addprefix $(INSTALL_PFX)/, $(notdir $(GEN_HDRS)))
+
+$(INSTALL_SRC_HDRS): $(INSTALL_PFX)/%.h: %.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(prefix)/include/bpf,644)
+
+$(INSTALL_GEN_HDRS): $(INSTALL_PFX)/%.h: $(OUTPUT)%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(prefix)/include/bpf,644)
+
+install_headers: $(BPF_GENERATED) $(INSTALL_SRC_HDRS) $(INSTALL_GEN_HDRS)
+ $(call QUIET_INSTALL, libbpf_headers)
install_pkgconfig: $(PC_FILE)
$(call QUIET_INSTALL, $(PC_FILE)) \
$(call do_install,$(PC_FILE),$(libdir_SQ)/pkgconfig,644)
-install: install_lib install_pkgconfig
-
-### Cleaning rules
-
-config-clean:
- $(call QUIET_CLEAN, config)
- $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
+install: install_lib install_pkgconfig install_headers
clean:
$(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
- *~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \
+ *~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_GENERATED) \
$(SHARED_OBJDIR) $(STATIC_OBJDIR) \
$(addprefix $(OUTPUT), \
*.o *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) *.pc)
- $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
-
-
-PHONY += force elfdep zdep bpfdep cscope tags
+PHONY += force cscope tags check check_abi check_version
force:
-elfdep:
- @if [ "$(feature-libelf)" != "1" ]; then echo "No libelf found"; exit 1 ; fi
-
-zdep:
- @if [ "$(feature-zlib)" != "1" ]; then echo "No zlib found"; exit 1 ; fi
-
-bpfdep:
- @if [ "$(feature-bpf)" != "1" ]; then echo "BPF API too old"; exit 1 ; fi
-
cscope:
ls *.c *.h > cscope.files
cscope -b -q -I $(srctree)/include -f cscope.out
tags:
- rm -f TAGS tags
+ $(RM) -f TAGS tags
ls *.c *.h | xargs $(TAGS_PROG) -a
# Declare the contents of the .PHONY variable as phony. We keep that
@@ -308,3 +287,20 @@ tags:
# Delete partially updated (corrupted) files on error
.DELETE_ON_ERROR:
+
+help:
+ @echo 'libbpf common targets:'
+ @echo ' HINT: use "V=1" to enable verbose build'
+ @echo ' all - build libraries and pkgconfig'
+ @echo ' clean - remove all generated files'
+ @echo ' check - check abi and version info'
+ @echo ''
+ @echo 'libbpf install targets:'
+ @echo ' HINT: use "prefix"(defaults to "/usr/local") or "DESTDIR" (defaults to "/")'
+ @echo ' to adjust target desitantion, e.g. "make prefix=/usr/local install"'
+ @echo ' install - build and install all headers, libraries and pkgconfig'
+ @echo ' install_headers - install only headers to include/bpf'
+ @echo ''
+ @echo 'libbpf make targets:'
+ @echo ' tags - use ctags to make tag information for source code browsing'
+ @echo ' cscope - use cscope to make interactive source code browsing database'
diff --git a/tools/lib/bpf/README.rst b/tools/lib/bpf/README.rst
deleted file mode 100644
index 8928f7787f2d..000000000000
--- a/tools/lib/bpf/README.rst
+++ /dev/null
@@ -1,168 +0,0 @@
-.. SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-libbpf API naming convention
-============================
-
-libbpf API provides access to a few logically separated groups of
-functions and types. Every group has its own naming convention
-described here. It's recommended to follow these conventions whenever a
-new function or type is added to keep libbpf API clean and consistent.
-
-All types and functions provided by libbpf API should have one of the
-following prefixes: ``bpf_``, ``btf_``, ``libbpf_``, ``xsk_``,
-``perf_buffer_``.
-
-System call wrappers
---------------------
-
-System call wrappers are simple wrappers for commands supported by
-sys_bpf system call. These wrappers should go to ``bpf.h`` header file
-and map one-on-one to corresponding commands.
-
-For example ``bpf_map_lookup_elem`` wraps ``BPF_MAP_LOOKUP_ELEM``
-command of sys_bpf, ``bpf_prog_attach`` wraps ``BPF_PROG_ATTACH``, etc.
-
-Objects
--------
-
-Another class of types and functions provided by libbpf API is "objects"
-and functions to work with them. Objects are high-level abstractions
-such as BPF program or BPF map. They're represented by corresponding
-structures such as ``struct bpf_object``, ``struct bpf_program``,
-``struct bpf_map``, etc.
-
-Structures are forward declared and access to their fields should be
-provided via corresponding getters and setters rather than directly.
-
-These objects are associated with corresponding parts of ELF object that
-contains compiled BPF programs.
-
-For example ``struct bpf_object`` represents ELF object itself created
-from an ELF file or from a buffer, ``struct bpf_program`` represents a
-program in ELF object and ``struct bpf_map`` is a map.
-
-Functions that work with an object have names built from object name,
-double underscore and part that describes function purpose.
-
-For example ``bpf_object__open`` consists of the name of corresponding
-object, ``bpf_object``, double underscore and ``open`` that defines the
-purpose of the function to open ELF file and create ``bpf_object`` from
-it.
-
-Another example: ``bpf_program__load`` is named for corresponding
-object, ``bpf_program``, that is separated from other part of the name
-by double underscore.
-
-All objects and corresponding functions other than BTF related should go
-to ``libbpf.h``. BTF types and functions should go to ``btf.h``.
-
-Auxiliary functions
--------------------
-
-Auxiliary functions and types that don't fit well in any of categories
-described above should have ``libbpf_`` prefix, e.g.
-``libbpf_get_error`` or ``libbpf_prog_type_by_name``.
-
-AF_XDP functions
--------------------
-
-AF_XDP functions should have an ``xsk_`` prefix, e.g.
-``xsk_umem__get_data`` or ``xsk_umem__create``. The interface consists
-of both low-level ring access functions and high-level configuration
-functions. These can be mixed and matched. Note that these functions
-are not reentrant for performance reasons.
-
-Please take a look at Documentation/networking/af_xdp.rst in the Linux
-kernel source tree on how to use XDP sockets and for some common
-mistakes in case you do not get any traffic up to user space.
-
-libbpf ABI
-==========
-
-libbpf can be both linked statically or used as DSO. To avoid possible
-conflicts with other libraries an application is linked with, all
-non-static libbpf symbols should have one of the prefixes mentioned in
-API documentation above. See API naming convention to choose the right
-name for a new symbol.
-
-Symbol visibility
------------------
-
-libbpf follow the model when all global symbols have visibility "hidden"
-by default and to make a symbol visible it has to be explicitly
-attributed with ``LIBBPF_API`` macro. For example:
-
-.. code-block:: c
-
- LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
-
-This prevents from accidentally exporting a symbol, that is not supposed
-to be a part of ABI what, in turn, improves both libbpf developer- and
-user-experiences.
-
-ABI versionning
----------------
-
-To make future ABI extensions possible libbpf ABI is versioned.
-Versioning is implemented by ``libbpf.map`` version script that is
-passed to linker.
-
-Version name is ``LIBBPF_`` prefix + three-component numeric version,
-starting from ``0.0.1``.
-
-Every time ABI is being changed, e.g. because a new symbol is added or
-semantic of existing symbol is changed, ABI version should be bumped.
-This bump in ABI version is at most once per kernel development cycle.
-
-For example, if current state of ``libbpf.map`` is:
-
-.. code-block::
- LIBBPF_0.0.1 {
- global:
- bpf_func_a;
- bpf_func_b;
- local:
- \*;
- };
-
-, and a new symbol ``bpf_func_c`` is being introduced, then
-``libbpf.map`` should be changed like this:
-
-.. code-block::
- LIBBPF_0.0.1 {
- global:
- bpf_func_a;
- bpf_func_b;
- local:
- \*;
- };
- LIBBPF_0.0.2 {
- global:
- bpf_func_c;
- } LIBBPF_0.0.1;
-
-, where new version ``LIBBPF_0.0.2`` depends on the previous
-``LIBBPF_0.0.1``.
-
-Format of version script and ways to handle ABI changes, including
-incompatible ones, described in details in [1].
-
-Stand-alone build
-=================
-
-Under https://github.com/libbpf/libbpf there is a (semi-)automated
-mirror of the mainline's version of libbpf for a stand-alone build.
-
-However, all changes to libbpf's code base must be upstreamed through
-the mainline kernel tree.
-
-License
-=======
-
-libbpf is dual-licensed under LGPL 2.1 and BSD 2-Clause.
-
-Links
-=====
-
-[1] https://www.akkadia.org/drepper/dsohowto.pdf
- (Chapter 3. Maintaining APIs and ABIs).
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 5cc1b0785d18..128ac723c4ea 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -28,13 +28,14 @@
#include <asm/unistd.h>
#include <errno.h>
#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <linux/kernel.h>
+#include <limits.h>
+#include <sys/resource.h>
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
/*
* When building perf, unistd.h is overridden. __NR_bpf is
* required to be defined explicitly.
@@ -52,6 +53,12 @@
# define __NR_bpf 351
# elif defined(__arc__)
# define __NR_bpf 280
+# elif defined(__mips__) && defined(_ABIO32)
+# define __NR_bpf 4355
+# elif defined(__mips__) && defined(_ABIN32)
+# define __NR_bpf 6319
+# elif defined(__mips__) && defined(_ABI64)
+# define __NR_bpf 5315
# else
# error __NR_bpf not defined. libbpf does not support your arch.
# endif
@@ -68,128 +75,131 @@ static inline int sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
return syscall(__NR_bpf, cmd, attr, size);
}
-static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
+static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
+ unsigned int size)
+{
+ int fd;
+
+ fd = sys_bpf(cmd, attr, size);
+ return ensure_good_fd(fd);
+}
+
+int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
{
int fd;
do {
- fd = sys_bpf(BPF_PROG_LOAD, attr, size);
- } while (fd < 0 && errno == EAGAIN);
+ fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
+ } while (fd < 0 && errno == EAGAIN && --attempts > 0);
return fd;
}
-int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
+/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
+ * memcg-based memory accounting for BPF maps and progs. This was done in [0].
+ * We use the support for bpf_ktime_get_coarse_ns() helper, which was added in
+ * the same 5.11 Linux release ([1]), to detect memcg-based accounting for BPF.
+ *
+ * [0] https://lore.kernel.org/bpf/20201201215900.3569844-1-guro@fb.com/
+ * [1] d05512618056 ("bpf: Add bpf_ktime_get_coarse_ns helper")
+ */
+int probe_memcg_account(void)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, attach_btf_obj_fd);
+ struct bpf_insn insns[] = {
+ BPF_EMIT_CALL(BPF_FUNC_ktime_get_coarse_ns),
+ BPF_EXIT_INSN(),
+ };
+ size_t insn_cnt = ARRAY_SIZE(insns);
union bpf_attr attr;
+ int prog_fd;
- memset(&attr, '\0', sizeof(attr));
-
- attr.map_type = create_attr->map_type;
- attr.key_size = create_attr->key_size;
- attr.value_size = create_attr->value_size;
- attr.max_entries = create_attr->max_entries;
- attr.map_flags = create_attr->map_flags;
- if (create_attr->name)
- memcpy(attr.map_name, create_attr->name,
- min(strlen(create_attr->name), BPF_OBJ_NAME_LEN - 1));
- attr.numa_node = create_attr->numa_node;
- attr.btf_fd = create_attr->btf_fd;
- attr.btf_key_type_id = create_attr->btf_key_type_id;
- attr.btf_value_type_id = create_attr->btf_value_type_id;
- attr.map_ifindex = create_attr->map_ifindex;
- if (attr.map_type == BPF_MAP_TYPE_STRUCT_OPS)
- attr.btf_vmlinux_value_type_id =
- create_attr->btf_vmlinux_value_type_id;
- else
- attr.inner_map_fd = create_attr->inner_map_fd;
+ /* attempt loading freplace trying to use custom BTF */
+ memset(&attr, 0, attr_sz);
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = insn_cnt;
+ attr.license = ptr_to_u64("GPL");
- return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ prog_fd = sys_bpf_fd(BPF_PROG_LOAD, &attr, attr_sz);
+ if (prog_fd >= 0) {
+ close(prog_fd);
+ return 1;
+ }
+ return 0;
}
-int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size, int max_entries,
- __u32 map_flags, int node)
-{
- struct bpf_create_map_attr map_attr = {};
+static bool memlock_bumped;
+static rlim_t memlock_rlim = RLIM_INFINITY;
- map_attr.name = name;
- map_attr.map_type = map_type;
- map_attr.map_flags = map_flags;
- map_attr.key_size = key_size;
- map_attr.value_size = value_size;
- map_attr.max_entries = max_entries;
- if (node >= 0) {
- map_attr.numa_node = node;
- map_attr.map_flags |= BPF_F_NUMA_NODE;
- }
+int libbpf_set_memlock_rlim(size_t memlock_bytes)
+{
+ if (memlock_bumped)
+ return libbpf_err(-EBUSY);
- return bpf_create_map_xattr(&map_attr);
+ memlock_rlim = memlock_bytes;
+ return 0;
}
-int bpf_create_map(enum bpf_map_type map_type, int key_size,
- int value_size, int max_entries, __u32 map_flags)
+int bump_rlimit_memlock(void)
{
- struct bpf_create_map_attr map_attr = {};
+ struct rlimit rlim;
- map_attr.map_type = map_type;
- map_attr.map_flags = map_flags;
- map_attr.key_size = key_size;
- map_attr.value_size = value_size;
- map_attr.max_entries = max_entries;
+ /* if kernel supports memcg-based accounting, skip bumping RLIMIT_MEMLOCK */
+ if (memlock_bumped || kernel_supports(NULL, FEAT_MEMCG_ACCOUNT))
+ return 0;
- return bpf_create_map_xattr(&map_attr);
-}
+ memlock_bumped = true;
-int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size, int max_entries,
- __u32 map_flags)
-{
- struct bpf_create_map_attr map_attr = {};
+ /* zero memlock_rlim_max disables auto-bumping RLIMIT_MEMLOCK */
+ if (memlock_rlim == 0)
+ return 0;
- map_attr.name = name;
- map_attr.map_type = map_type;
- map_attr.map_flags = map_flags;
- map_attr.key_size = key_size;
- map_attr.value_size = value_size;
- map_attr.max_entries = max_entries;
+ rlim.rlim_cur = rlim.rlim_max = memlock_rlim;
+ if (setrlimit(RLIMIT_MEMLOCK, &rlim))
+ return -errno;
- return bpf_create_map_xattr(&map_attr);
+ return 0;
}
-int bpf_create_map_in_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int inner_map_fd, int max_entries,
- __u32 map_flags, int node)
+int bpf_map_create(enum bpf_map_type map_type,
+ const char *map_name,
+ __u32 key_size,
+ __u32 value_size,
+ __u32 max_entries,
+ const struct bpf_map_create_opts *opts)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
union bpf_attr attr;
+ int fd;
- memset(&attr, '\0', sizeof(attr));
+ bump_rlimit_memlock();
+
+ memset(&attr, 0, attr_sz);
+
+ if (!OPTS_VALID(opts, bpf_map_create_opts))
+ return libbpf_err(-EINVAL);
attr.map_type = map_type;
+ if (map_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
attr.key_size = key_size;
- attr.value_size = 4;
- attr.inner_map_fd = inner_map_fd;
+ attr.value_size = value_size;
attr.max_entries = max_entries;
- attr.map_flags = map_flags;
- if (name)
- memcpy(attr.map_name, name,
- min(strlen(name), BPF_OBJ_NAME_LEN - 1));
-
- if (node >= 0) {
- attr.map_flags |= BPF_F_NUMA_NODE;
- attr.numa_node = node;
- }
- return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
-}
+ attr.btf_fd = OPTS_GET(opts, btf_fd, 0);
+ attr.btf_key_type_id = OPTS_GET(opts, btf_key_type_id, 0);
+ attr.btf_value_type_id = OPTS_GET(opts, btf_value_type_id, 0);
+ attr.btf_vmlinux_value_type_id = OPTS_GET(opts, btf_vmlinux_value_type_id, 0);
-int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name,
- int key_size, int inner_map_fd, int max_entries,
- __u32 map_flags)
-{
- return bpf_create_map_in_map_node(map_type, name, key_size,
- inner_map_fd, max_entries, map_flags,
- -1);
+ attr.inner_map_fd = OPTS_GET(opts, inner_map_fd, 0);
+ attr.map_flags = OPTS_GET(opts, map_flags, 0);
+ attr.map_extra = OPTS_GET(opts, map_extra, 0);
+ attr.numa_node = OPTS_GET(opts, numa_node, 0);
+ attr.map_ifindex = OPTS_GET(opts, map_ifindex, 0);
+
+ fd = sys_bpf_fd(BPF_MAP_CREATE, &attr, attr_sz);
+ return libbpf_err_errno(fd);
}
static void *
@@ -217,61 +227,92 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
return info;
}
-int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
- char *log_buf, size_t log_buf_sz)
+int bpf_prog_load(enum bpf_prog_type prog_type,
+ const char *prog_name, const char *license,
+ const struct bpf_insn *insns, size_t insn_cnt,
+ struct bpf_prog_load_opts *opts)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
void *finfo = NULL, *linfo = NULL;
+ const char *func_info, *line_info;
+ __u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
+ __u32 func_info_rec_size, line_info_rec_size;
+ int fd, attempts;
union bpf_attr attr;
- __u32 log_level;
- int fd;
+ char *log_buf;
- if (!load_attr || !log_buf != !log_buf_sz)
- return -EINVAL;
-
- log_level = load_attr->log_level;
- if (log_level > (4 | 2 | 1) || (log_level && !log_buf))
- return -EINVAL;
-
- memset(&attr, 0, sizeof(attr));
- attr.prog_type = load_attr->prog_type;
- attr.expected_attach_type = load_attr->expected_attach_type;
- if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
- attr.prog_type == BPF_PROG_TYPE_LSM) {
- attr.attach_btf_id = load_attr->attach_btf_id;
- } else if (attr.prog_type == BPF_PROG_TYPE_TRACING ||
- attr.prog_type == BPF_PROG_TYPE_EXT) {
- attr.attach_btf_id = load_attr->attach_btf_id;
- attr.attach_prog_fd = load_attr->attach_prog_fd;
- } else {
- attr.prog_ifindex = load_attr->prog_ifindex;
- attr.kern_version = load_attr->kern_version;
- }
- attr.insn_cnt = (__u32)load_attr->insns_cnt;
- attr.insns = ptr_to_u64(load_attr->insns);
- attr.license = ptr_to_u64(load_attr->license);
+ bump_rlimit_memlock();
+
+ if (!OPTS_VALID(opts, bpf_prog_load_opts))
+ return libbpf_err(-EINVAL);
+
+ attempts = OPTS_GET(opts, attempts, 0);
+ if (attempts < 0)
+ return libbpf_err(-EINVAL);
+ if (attempts == 0)
+ attempts = PROG_LOAD_ATTEMPTS;
+
+ memset(&attr, 0, attr_sz);
+
+ attr.prog_type = prog_type;
+ attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
+
+ attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
+ attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
+ attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
+ attr.kern_version = OPTS_GET(opts, kern_version, 0);
+
+ if (prog_name && kernel_supports(NULL, FEAT_PROG_NAME))
+ libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
+ attr.license = ptr_to_u64(license);
+
+ if (insn_cnt > UINT_MAX)
+ return libbpf_err(-E2BIG);
+
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = (__u32)insn_cnt;
+
+ attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
+ attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
+
+ if (attach_prog_fd && attach_btf_obj_fd)
+ return libbpf_err(-EINVAL);
+
+ attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
+ if (attach_prog_fd)
+ attr.attach_prog_fd = attach_prog_fd;
+ else
+ attr.attach_btf_obj_fd = attach_btf_obj_fd;
+
+ log_buf = OPTS_GET(opts, log_buf, NULL);
+ log_size = OPTS_GET(opts, log_size, 0);
+ log_level = OPTS_GET(opts, log_level, 0);
+
+ if (!!log_buf != !!log_size)
+ return libbpf_err(-EINVAL);
+
+ func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
+ func_info = OPTS_GET(opts, func_info, NULL);
+ attr.func_info_rec_size = func_info_rec_size;
+ attr.func_info = ptr_to_u64(func_info);
+ attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0);
+
+ line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
+ line_info = OPTS_GET(opts, line_info, NULL);
+ attr.line_info_rec_size = line_info_rec_size;
+ attr.line_info = ptr_to_u64(line_info);
+ attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
+
+ attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
- attr.log_level = log_level;
if (log_level) {
attr.log_buf = ptr_to_u64(log_buf);
- attr.log_size = log_buf_sz;
- } else {
- attr.log_buf = ptr_to_u64(NULL);
- attr.log_size = 0;
+ attr.log_size = log_size;
+ attr.log_level = log_level;
}
- attr.prog_btf_fd = load_attr->prog_btf_fd;
- attr.func_info_rec_size = load_attr->func_info_rec_size;
- attr.func_info_cnt = load_attr->func_info_cnt;
- attr.func_info = ptr_to_u64(load_attr->func_info);
- attr.line_info_rec_size = load_attr->line_info_rec_size;
- attr.line_info_cnt = load_attr->line_info_cnt;
- attr.line_info = ptr_to_u64(load_attr->line_info);
- if (load_attr->name)
- memcpy(attr.prog_name, load_attr->name,
- min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
- attr.prog_flags = load_attr->prog_flags;
-
- fd = sys_bpf_prog_load(&attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
+ OPTS_SET(opts, log_true_size, attr.log_true_size);
if (fd >= 0)
return fd;
@@ -281,177 +322,195 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
*/
while (errno == E2BIG && (!finfo || !linfo)) {
if (!finfo && attr.func_info_cnt &&
- attr.func_info_rec_size < load_attr->func_info_rec_size) {
+ attr.func_info_rec_size < func_info_rec_size) {
/* try with corrected func info records */
- finfo = alloc_zero_tailing_info(load_attr->func_info,
- load_attr->func_info_cnt,
- load_attr->func_info_rec_size,
+ finfo = alloc_zero_tailing_info(func_info,
+ attr.func_info_cnt,
+ func_info_rec_size,
attr.func_info_rec_size);
- if (!finfo)
+ if (!finfo) {
+ errno = E2BIG;
goto done;
+ }
attr.func_info = ptr_to_u64(finfo);
- attr.func_info_rec_size = load_attr->func_info_rec_size;
+ attr.func_info_rec_size = func_info_rec_size;
} else if (!linfo && attr.line_info_cnt &&
- attr.line_info_rec_size <
- load_attr->line_info_rec_size) {
- linfo = alloc_zero_tailing_info(load_attr->line_info,
- load_attr->line_info_cnt,
- load_attr->line_info_rec_size,
+ attr.line_info_rec_size < line_info_rec_size) {
+ linfo = alloc_zero_tailing_info(line_info,
+ attr.line_info_cnt,
+ line_info_rec_size,
attr.line_info_rec_size);
- if (!linfo)
+ if (!linfo) {
+ errno = E2BIG;
goto done;
+ }
attr.line_info = ptr_to_u64(linfo);
- attr.line_info_rec_size = load_attr->line_info_rec_size;
+ attr.line_info_rec_size = line_info_rec_size;
} else {
break;
}
- fd = sys_bpf_prog_load(&attr, sizeof(attr));
-
+ fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
+ OPTS_SET(opts, log_true_size, attr.log_true_size);
if (fd >= 0)
goto done;
}
- if (log_level || !log_buf)
- goto done;
+ if (log_level == 0 && log_buf) {
+ /* log_level == 0 with non-NULL log_buf requires retrying on error
+ * with log_level == 1 and log_buf/log_buf_size set, to get details of
+ * failure
+ */
+ attr.log_buf = ptr_to_u64(log_buf);
+ attr.log_size = log_size;
+ attr.log_level = 1;
- /* Try again with log */
- attr.log_buf = ptr_to_u64(log_buf);
- attr.log_size = log_buf_sz;
- attr.log_level = 1;
- log_buf[0] = 0;
- fd = sys_bpf_prog_load(&attr, sizeof(attr));
+ fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
+ OPTS_SET(opts, log_true_size, attr.log_true_size);
+ }
done:
+ /* free() doesn't affect errno, so we don't need to restore it */
free(finfo);
free(linfo);
- return fd;
+ return libbpf_err_errno(fd);
}
-int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
- size_t insns_cnt, const char *license,
- __u32 kern_version, char *log_buf,
- size_t log_buf_sz)
+int bpf_map_update_elem(int fd, const void *key, const void *value,
+ __u64 flags)
{
- struct bpf_load_program_attr load_attr;
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
+ union bpf_attr attr;
+ int ret;
- memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
- load_attr.prog_type = type;
- load_attr.expected_attach_type = 0;
- load_attr.name = NULL;
- load_attr.insns = insns;
- load_attr.insns_cnt = insns_cnt;
- load_attr.license = license;
- load_attr.kern_version = kern_version;
+ memset(&attr, 0, attr_sz);
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
+ attr.flags = flags;
- return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
+ ret = sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
-int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
- size_t insns_cnt, __u32 prog_flags, const char *license,
- __u32 kern_version, char *log_buf, size_t log_buf_sz,
- int log_level)
+int bpf_map_lookup_elem(int fd, const void *key, void *value)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
- attr.prog_type = type;
- attr.insn_cnt = (__u32)insns_cnt;
- attr.insns = ptr_to_u64(insns);
- attr.license = ptr_to_u64(license);
- attr.log_buf = ptr_to_u64(log_buf);
- attr.log_size = log_buf_sz;
- attr.log_level = log_level;
- log_buf[0] = 0;
- attr.kern_version = kern_version;
- attr.prog_flags = prog_flags;
+ memset(&attr, 0, attr_sz);
+ attr.map_fd = fd;
+ attr.key = ptr_to_u64(key);
+ attr.value = ptr_to_u64(value);
- return sys_bpf_prog_load(&attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
-int bpf_map_update_elem(int fd, const void *key, const void *value,
- __u64 flags)
+int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
attr.flags = flags;
- return sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
-int bpf_map_lookup_elem(int fd, const void *key, void *value)
+int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
- return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
-int bpf_map_lookup_elem_flags(int fd, const void *key, void *value, __u64 flags)
+int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key, void *value, __u64 flags)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.value = ptr_to_u64(value);
attr.flags = flags;
- return sys_bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
-int bpf_map_lookup_and_delete_elem(int fd, const void *key, void *value)
+int bpf_map_delete_elem(int fd, const void *key)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
- attr.value = ptr_to_u64(value);
- return sys_bpf(BPF_MAP_LOOKUP_AND_DELETE_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
-int bpf_map_delete_elem(int fd, const void *key)
+int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
+ attr.flags = flags;
- return sys_bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
int bpf_map_get_next_key(int fd, const void *key, void *next_key)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, next_key);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.map_fd = fd;
attr.key = ptr_to_u64(key);
attr.next_key = ptr_to_u64(next_key);
- return sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_GET_NEXT_KEY, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
int bpf_map_freeze(int fd)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, map_fd);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.map_fd = fd;
- return sys_bpf(BPF_MAP_FREEZE, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_FREEZE, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
@@ -459,13 +518,14 @@ static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
__u32 *count,
const struct bpf_map_batch_opts *opts)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, batch);
union bpf_attr attr;
int ret;
if (!OPTS_VALID(opts, bpf_map_batch_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.batch.map_fd = fd;
attr.batch.in_batch = ptr_to_u64(in_batch);
attr.batch.out_batch = ptr_to_u64(out_batch);
@@ -475,17 +535,17 @@ static int bpf_map_batch_common(int cmd, int fd, void *in_batch,
attr.batch.elem_flags = OPTS_GET(opts, elem_flags, 0);
attr.batch.flags = OPTS_GET(opts, flags, 0);
- ret = sys_bpf(cmd, &attr, sizeof(attr));
+ ret = sys_bpf(cmd, &attr, attr_sz);
*count = attr.batch.count;
- return ret;
+ return libbpf_err_errno(ret);
}
-int bpf_map_delete_batch(int fd, void *keys, __u32 *count,
+int bpf_map_delete_batch(int fd, const void *keys, __u32 *count,
const struct bpf_map_batch_opts *opts)
{
return bpf_map_batch_common(BPF_MAP_DELETE_BATCH, fd, NULL,
- NULL, keys, NULL, count, opts);
+ NULL, (void *)keys, NULL, count, opts);
}
int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch, void *keys,
@@ -505,32 +565,47 @@ int bpf_map_lookup_and_delete_batch(int fd, void *in_batch, void *out_batch,
count, opts);
}
-int bpf_map_update_batch(int fd, void *keys, void *values, __u32 *count,
+int bpf_map_update_batch(int fd, const void *keys, const void *values, __u32 *count,
const struct bpf_map_batch_opts *opts)
{
return bpf_map_batch_common(BPF_MAP_UPDATE_BATCH, fd, NULL, NULL,
- keys, values, count, opts);
+ (void *)keys, (void *)values, count, opts);
}
int bpf_obj_pin(int fd, const char *pathname)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.pathname = ptr_to_u64((void *)pathname);
attr.bpf_fd = fd;
- return sys_bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_OBJ_PIN, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
int bpf_obj_get(const char *pathname)
{
+ return bpf_obj_get_opts(pathname, NULL);
+}
+
+int bpf_obj_get_opts(const char *pathname, const struct bpf_obj_get_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, file_flags);
union bpf_attr attr;
+ int fd;
- memset(&attr, 0, sizeof(attr));
+ if (!OPTS_VALID(opts, bpf_obj_get_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
attr.pathname = ptr_to_u64((void *)pathname);
+ attr.file_flags = OPTS_GET(opts, file_flags, 0);
- return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_OBJ_GET, &attr, attr_sz);
+ return libbpf_err_errno(fd);
}
int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
@@ -540,170 +615,309 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
.flags = flags,
);
- return bpf_prog_attach_xattr(prog_fd, target_fd, type, &opts);
+ return bpf_prog_attach_opts(prog_fd, target_fd, type, &opts);
}
-int bpf_prog_attach_xattr(int prog_fd, int target_fd,
+int bpf_prog_attach_opts(int prog_fd, int target_fd,
enum bpf_attach_type type,
const struct bpf_prog_attach_opts *opts)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
union bpf_attr attr;
+ int ret;
if (!OPTS_VALID(opts, bpf_prog_attach_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
attr.attach_flags = OPTS_GET(opts, flags, 0);
attr.replace_bpf_fd = OPTS_GET(opts, replace_prog_fd, 0);
- return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_PROG_ATTACH, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
int bpf_prog_detach(int target_fd, enum bpf_attach_type type)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.target_fd = target_fd;
attr.attach_type = type;
- return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
int bpf_prog_detach2(int prog_fd, int target_fd, enum bpf_attach_type type)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, replace_bpf_fd);
union bpf_attr attr;
+ int ret;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
- return sys_bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_PROG_DETACH, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
int bpf_link_create(int prog_fd, int target_fd,
enum bpf_attach_type attach_type,
const struct bpf_link_create_opts *opts)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, link_create);
+ __u32 target_btf_id, iter_info_len;
union bpf_attr attr;
+ int fd, err;
if (!OPTS_VALID(opts, bpf_link_create_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
+
+ iter_info_len = OPTS_GET(opts, iter_info_len, 0);
+ target_btf_id = OPTS_GET(opts, target_btf_id, 0);
+
+ /* validate we don't have unexpected combinations of non-zero fields */
+ if (iter_info_len || target_btf_id) {
+ if (iter_info_len && target_btf_id)
+ return libbpf_err(-EINVAL);
+ if (!OPTS_ZEROED(opts, target_btf_id))
+ return libbpf_err(-EINVAL);
+ }
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.link_create.prog_fd = prog_fd;
attr.link_create.target_fd = target_fd;
attr.link_create.attach_type = attach_type;
+ attr.link_create.flags = OPTS_GET(opts, flags, 0);
- return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
+ if (target_btf_id) {
+ attr.link_create.target_btf_id = target_btf_id;
+ goto proceed;
+ }
+
+ switch (attach_type) {
+ case BPF_TRACE_ITER:
+ attr.link_create.iter_info = ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
+ attr.link_create.iter_info_len = iter_info_len;
+ break;
+ case BPF_PERF_EVENT:
+ attr.link_create.perf_event.bpf_cookie = OPTS_GET(opts, perf_event.bpf_cookie, 0);
+ if (!OPTS_ZEROED(opts, perf_event))
+ return libbpf_err(-EINVAL);
+ break;
+ case BPF_TRACE_KPROBE_MULTI:
+ attr.link_create.kprobe_multi.flags = OPTS_GET(opts, kprobe_multi.flags, 0);
+ attr.link_create.kprobe_multi.cnt = OPTS_GET(opts, kprobe_multi.cnt, 0);
+ attr.link_create.kprobe_multi.syms = ptr_to_u64(OPTS_GET(opts, kprobe_multi.syms, 0));
+ attr.link_create.kprobe_multi.addrs = ptr_to_u64(OPTS_GET(opts, kprobe_multi.addrs, 0));
+ attr.link_create.kprobe_multi.cookies = ptr_to_u64(OPTS_GET(opts, kprobe_multi.cookies, 0));
+ if (!OPTS_ZEROED(opts, kprobe_multi))
+ return libbpf_err(-EINVAL);
+ break;
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
+ case BPF_LSM_MAC:
+ attr.link_create.tracing.cookie = OPTS_GET(opts, tracing.cookie, 0);
+ if (!OPTS_ZEROED(opts, tracing))
+ return libbpf_err(-EINVAL);
+ break;
+ default:
+ if (!OPTS_ZEROED(opts, flags))
+ return libbpf_err(-EINVAL);
+ break;
+ }
+proceed:
+ fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, attr_sz);
+ if (fd >= 0)
+ return fd;
+ /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
+ * and other similar programs
+ */
+ err = -errno;
+ if (err != -EINVAL)
+ return libbpf_err(err);
+
+ /* if user used features not supported by
+ * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
+ */
+ if (attr.link_create.target_fd || attr.link_create.target_btf_id)
+ return libbpf_err(err);
+ if (!OPTS_ZEROED(opts, sz))
+ return libbpf_err(err);
+
+ /* otherwise, for few select kinds of programs that can be
+ * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
+ * a fallback for older kernels
+ */
+ switch (attach_type) {
+ case BPF_TRACE_RAW_TP:
+ case BPF_LSM_MAC:
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
+ return bpf_raw_tracepoint_open(NULL, prog_fd);
+ default:
+ return libbpf_err(err);
+ }
+}
+
+int bpf_link_detach(int link_fd)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, link_detach);
+ union bpf_attr attr;
+ int ret;
+
+ memset(&attr, 0, attr_sz);
+ attr.link_detach.link_fd = link_fd;
+
+ ret = sys_bpf(BPF_LINK_DETACH, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
int bpf_link_update(int link_fd, int new_prog_fd,
const struct bpf_link_update_opts *opts)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, link_update);
union bpf_attr attr;
+ int ret;
if (!OPTS_VALID(opts, bpf_link_update_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
+
+ if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0))
+ return libbpf_err(-EINVAL);
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.link_update.link_fd = link_fd;
attr.link_update.new_prog_fd = new_prog_fd;
attr.link_update.flags = OPTS_GET(opts, flags, 0);
- attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
+ if (OPTS_GET(opts, old_prog_fd, 0))
+ attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
+ else if (OPTS_GET(opts, old_map_fd, 0))
+ attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0);
- return sys_bpf(BPF_LINK_UPDATE, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
-int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
- __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
+int bpf_iter_create(int link_fd)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, iter_create);
+ union bpf_attr attr;
+ int fd;
+
+ memset(&attr, 0, attr_sz);
+ attr.iter_create.link_fd = link_fd;
+
+ fd = sys_bpf_fd(BPF_ITER_CREATE, &attr, attr_sz);
+ return libbpf_err_errno(fd);
+}
+
+int bpf_prog_query_opts(int target_fd,
+ enum bpf_attach_type type,
+ struct bpf_prog_query_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, query);
union bpf_attr attr;
int ret;
- memset(&attr, 0, sizeof(attr));
+ if (!OPTS_VALID(opts, bpf_prog_query_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+
attr.query.target_fd = target_fd;
attr.query.attach_type = type;
- attr.query.query_flags = query_flags;
- attr.query.prog_cnt = *prog_cnt;
- attr.query.prog_ids = ptr_to_u64(prog_ids);
+ attr.query.query_flags = OPTS_GET(opts, query_flags, 0);
+ attr.query.prog_cnt = OPTS_GET(opts, prog_cnt, 0);
+ attr.query.prog_ids = ptr_to_u64(OPTS_GET(opts, prog_ids, NULL));
+ attr.query.prog_attach_flags = ptr_to_u64(OPTS_GET(opts, prog_attach_flags, NULL));
- ret = sys_bpf(BPF_PROG_QUERY, &attr, sizeof(attr));
- if (attach_flags)
- *attach_flags = attr.query.attach_flags;
- *prog_cnt = attr.query.prog_cnt;
- return ret;
+ ret = sys_bpf(BPF_PROG_QUERY, &attr, attr_sz);
+
+ OPTS_SET(opts, attach_flags, attr.query.attach_flags);
+ OPTS_SET(opts, prog_cnt, attr.query.prog_cnt);
+
+ return libbpf_err_errno(ret);
}
-int bpf_prog_test_run(int prog_fd, int repeat, void *data, __u32 size,
- void *data_out, __u32 *size_out, __u32 *retval,
- __u32 *duration)
+int bpf_prog_query(int target_fd, enum bpf_attach_type type, __u32 query_flags,
+ __u32 *attach_flags, __u32 *prog_ids, __u32 *prog_cnt)
{
- union bpf_attr attr;
+ LIBBPF_OPTS(bpf_prog_query_opts, opts);
int ret;
- memset(&attr, 0, sizeof(attr));
- attr.test.prog_fd = prog_fd;
- attr.test.data_in = ptr_to_u64(data);
- attr.test.data_out = ptr_to_u64(data_out);
- attr.test.data_size_in = size;
- attr.test.repeat = repeat;
+ opts.query_flags = query_flags;
+ opts.prog_ids = prog_ids;
+ opts.prog_cnt = *prog_cnt;
+
+ ret = bpf_prog_query_opts(target_fd, type, &opts);
+
+ if (attach_flags)
+ *attach_flags = opts.attach_flags;
+ *prog_cnt = opts.prog_cnt;
- ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
- if (size_out)
- *size_out = attr.test.data_size_out;
- if (retval)
- *retval = attr.test.retval;
- if (duration)
- *duration = attr.test.duration;
- return ret;
+ return libbpf_err_errno(ret);
}
-int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
+int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, test);
union bpf_attr attr;
int ret;
- if (!test_attr->data_out && test_attr->data_size_out > 0)
- return -EINVAL;
-
- memset(&attr, 0, sizeof(attr));
- attr.test.prog_fd = test_attr->prog_fd;
- attr.test.data_in = ptr_to_u64(test_attr->data_in);
- attr.test.data_out = ptr_to_u64(test_attr->data_out);
- attr.test.data_size_in = test_attr->data_size_in;
- attr.test.data_size_out = test_attr->data_size_out;
- attr.test.ctx_in = ptr_to_u64(test_attr->ctx_in);
- attr.test.ctx_out = ptr_to_u64(test_attr->ctx_out);
- attr.test.ctx_size_in = test_attr->ctx_size_in;
- attr.test.ctx_size_out = test_attr->ctx_size_out;
- attr.test.repeat = test_attr->repeat;
-
- ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
- test_attr->data_size_out = attr.test.data_size_out;
- test_attr->ctx_size_out = attr.test.ctx_size_out;
- test_attr->retval = attr.test.retval;
- test_attr->duration = attr.test.duration;
- return ret;
+ if (!OPTS_VALID(opts, bpf_test_run_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.test.prog_fd = prog_fd;
+ attr.test.batch_size = OPTS_GET(opts, batch_size, 0);
+ attr.test.cpu = OPTS_GET(opts, cpu, 0);
+ attr.test.flags = OPTS_GET(opts, flags, 0);
+ attr.test.repeat = OPTS_GET(opts, repeat, 0);
+ attr.test.duration = OPTS_GET(opts, duration, 0);
+ attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
+ attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
+ attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
+ attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
+ attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
+ attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
+ attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
+ attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
+
+ ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, attr_sz);
+
+ OPTS_SET(opts, data_size_out, attr.test.data_size_out);
+ OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
+ OPTS_SET(opts, duration, attr.test.duration);
+ OPTS_SET(opts, retval, attr.test.retval);
+
+ return libbpf_err_errno(ret);
}
static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr;
int err;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.start_id = start_id;
- err = sys_bpf(cmd, &attr, sizeof(attr));
+ err = sys_bpf(cmd, &attr, attr_sz);
if (!err)
*next_id = attr.next_id;
- return err;
+ return libbpf_err_errno(err);
}
int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id)
@@ -721,108 +935,258 @@ int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id)
return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID);
}
-int bpf_prog_get_fd_by_id(__u32 id)
+int bpf_link_get_next_id(__u32 start_id, __u32 *next_id)
{
+ return bpf_obj_get_next_id(start_id, next_id, BPF_LINK_GET_NEXT_ID);
+}
+
+int bpf_prog_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr;
+ int fd;
- memset(&attr, 0, sizeof(attr));
+ if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
attr.prog_id = id;
+ attr.open_flags = OPTS_GET(opts, open_flags, 0);
- return sys_bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_PROG_GET_FD_BY_ID, &attr, attr_sz);
+ return libbpf_err_errno(fd);
}
-int bpf_map_get_fd_by_id(__u32 id)
+int bpf_prog_get_fd_by_id(__u32 id)
{
+ return bpf_prog_get_fd_by_id_opts(id, NULL);
+}
+
+int bpf_map_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr;
+ int fd;
- memset(&attr, 0, sizeof(attr));
+ if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
attr.map_id = id;
+ attr.open_flags = OPTS_GET(opts, open_flags, 0);
- return sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
+ return libbpf_err_errno(fd);
}
-int bpf_btf_get_fd_by_id(__u32 id)
+int bpf_map_get_fd_by_id(__u32 id)
{
+ return bpf_map_get_fd_by_id_opts(id, NULL);
+}
+
+int bpf_btf_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
union bpf_attr attr;
+ int fd;
- memset(&attr, 0, sizeof(attr));
+ if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
attr.btf_id = id;
+ attr.open_flags = OPTS_GET(opts, open_flags, 0);
+
+ fd = sys_bpf_fd(BPF_BTF_GET_FD_BY_ID, &attr, attr_sz);
+ return libbpf_err_errno(fd);
+}
- return sys_bpf(BPF_BTF_GET_FD_BY_ID, &attr, sizeof(attr));
+int bpf_btf_get_fd_by_id(__u32 id)
+{
+ return bpf_btf_get_fd_by_id_opts(id, NULL);
}
-int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len)
+int bpf_link_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, open_flags);
+ union bpf_attr attr;
+ int fd;
+
+ if (!OPTS_VALID(opts, bpf_get_fd_by_id_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.link_id = id;
+ attr.open_flags = OPTS_GET(opts, open_flags, 0);
+
+ fd = sys_bpf_fd(BPF_LINK_GET_FD_BY_ID, &attr, attr_sz);
+ return libbpf_err_errno(fd);
+}
+
+int bpf_link_get_fd_by_id(__u32 id)
+{
+ return bpf_link_get_fd_by_id_opts(id, NULL);
+}
+
+int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, info);
union bpf_attr attr;
int err;
- memset(&attr, 0, sizeof(attr));
- attr.info.bpf_fd = prog_fd;
+ memset(&attr, 0, attr_sz);
+ attr.info.bpf_fd = bpf_fd;
attr.info.info_len = *info_len;
attr.info.info = ptr_to_u64(info);
- err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
+ err = sys_bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, attr_sz);
if (!err)
*info_len = attr.info.info_len;
+ return libbpf_err_errno(err);
+}
- return err;
+int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len)
+{
+ return bpf_obj_get_info_by_fd(prog_fd, info, info_len);
+}
+
+int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len)
+{
+ return bpf_obj_get_info_by_fd(map_fd, info, info_len);
+}
+
+int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len)
+{
+ return bpf_obj_get_info_by_fd(btf_fd, info, info_len);
+}
+
+int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len)
+{
+ return bpf_obj_get_info_by_fd(link_fd, info, info_len);
}
int bpf_raw_tracepoint_open(const char *name, int prog_fd)
{
+ const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint);
union bpf_attr attr;
+ int fd;
- memset(&attr, 0, sizeof(attr));
+ memset(&attr, 0, attr_sz);
attr.raw_tracepoint.name = ptr_to_u64(name);
attr.raw_tracepoint.prog_fd = prog_fd;
- return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
+ fd = sys_bpf_fd(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
+ return libbpf_err_errno(fd);
}
-int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
- bool do_log)
+int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
{
- union bpf_attr attr = {};
+ const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
+ union bpf_attr attr;
+ char *log_buf;
+ size_t log_size;
+ __u32 log_level;
int fd;
- attr.btf = ptr_to_u64(btf);
- attr.btf_size = btf_size;
+ bump_rlimit_memlock();
-retry:
- if (do_log && log_buf && log_buf_size) {
- attr.btf_log_level = 1;
- attr.btf_log_size = log_buf_size;
+ memset(&attr, 0, attr_sz);
+
+ if (!OPTS_VALID(opts, bpf_btf_load_opts))
+ return libbpf_err(-EINVAL);
+
+ log_buf = OPTS_GET(opts, log_buf, NULL);
+ log_size = OPTS_GET(opts, log_size, 0);
+ log_level = OPTS_GET(opts, log_level, 0);
+
+ if (log_size > UINT_MAX)
+ return libbpf_err(-EINVAL);
+ if (log_size && !log_buf)
+ return libbpf_err(-EINVAL);
+
+ attr.btf = ptr_to_u64(btf_data);
+ attr.btf_size = btf_size;
+ /* log_level == 0 and log_buf != NULL means "try loading without
+ * log_buf, but retry with log_buf and log_level=1 on error", which is
+ * consistent across low-level and high-level BTF and program loading
+ * APIs within libbpf and provides a sensible behavior in practice
+ */
+ if (log_level) {
attr.btf_log_buf = ptr_to_u64(log_buf);
+ attr.btf_log_size = (__u32)log_size;
+ attr.btf_log_level = log_level;
}
- fd = sys_bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
- if (fd == -1 && !do_log && log_buf && log_buf_size) {
- do_log = true;
- goto retry;
+ fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
+ if (fd < 0 && log_buf && log_level == 0) {
+ attr.btf_log_buf = ptr_to_u64(log_buf);
+ attr.btf_log_size = (__u32)log_size;
+ attr.btf_log_level = 1;
+ fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
}
- return fd;
+ OPTS_SET(opts, log_true_size, attr.btf_log_true_size);
+ return libbpf_err_errno(fd);
}
int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf, __u32 *buf_len,
__u32 *prog_id, __u32 *fd_type, __u64 *probe_offset,
__u64 *probe_addr)
{
- union bpf_attr attr = {};
+ const size_t attr_sz = offsetofend(union bpf_attr, task_fd_query);
+ union bpf_attr attr;
int err;
+ memset(&attr, 0, attr_sz);
attr.task_fd_query.pid = pid;
attr.task_fd_query.fd = fd;
attr.task_fd_query.flags = flags;
attr.task_fd_query.buf = ptr_to_u64(buf);
attr.task_fd_query.buf_len = *buf_len;
- err = sys_bpf(BPF_TASK_FD_QUERY, &attr, sizeof(attr));
+ err = sys_bpf(BPF_TASK_FD_QUERY, &attr, attr_sz);
+
*buf_len = attr.task_fd_query.buf_len;
*prog_id = attr.task_fd_query.prog_id;
*fd_type = attr.task_fd_query.fd_type;
*probe_offset = attr.task_fd_query.probe_offset;
*probe_addr = attr.task_fd_query.probe_addr;
- return err;
+ return libbpf_err_errno(err);
+}
+
+int bpf_enable_stats(enum bpf_stats_type type)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, enable_stats);
+ union bpf_attr attr;
+ int fd;
+
+ memset(&attr, 0, attr_sz);
+ attr.enable_stats.type = type;
+
+ fd = sys_bpf_fd(BPF_ENABLE_STATS, &attr, attr_sz);
+ return libbpf_err_errno(fd);
+}
+
+int bpf_prog_bind_map(int prog_fd, int map_fd,
+ const struct bpf_prog_bind_opts *opts)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_bind_map);
+ union bpf_attr attr;
+ int ret;
+
+ if (!OPTS_VALID(opts, bpf_prog_bind_opts))
+ return libbpf_err(-EINVAL);
+
+ memset(&attr, 0, attr_sz);
+ attr.prog_bind_map.prog_fd = prog_fd;
+ attr.prog_bind_map.map_fd = map_fd;
+ attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
+
+ ret = sys_bpf(BPF_PROG_BIND_MAP, &attr, attr_sz);
+ return libbpf_err_errno(ret);
}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 46d47afdd887..a2c091389b18 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/*
- * common eBPF ELF operations.
+ * Common BPF ELF operations.
*
* Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
@@ -29,92 +29,113 @@
#include <stdint.h>
#include "libbpf_common.h"
+#include "libbpf_legacy.h"
#ifdef __cplusplus
extern "C" {
#endif
-struct bpf_create_map_attr {
- const char *name;
- enum bpf_map_type map_type;
- __u32 map_flags;
- __u32 key_size;
- __u32 value_size;
- __u32 max_entries;
- __u32 numa_node;
+int libbpf_set_memlock_rlim(size_t memlock_bytes);
+
+struct bpf_map_create_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+
__u32 btf_fd;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
+ __u32 btf_vmlinux_value_type_id;
+
+ __u32 inner_map_fd;
+ __u32 map_flags;
+ __u64 map_extra;
+
+ __u32 numa_node;
__u32 map_ifindex;
- union {
- __u32 inner_map_fd;
- __u32 btf_vmlinux_value_type_id;
- };
};
+#define bpf_map_create_opts__last_field map_ifindex
+
+LIBBPF_API int bpf_map_create(enum bpf_map_type map_type,
+ const char *map_name,
+ __u32 key_size,
+ __u32 value_size,
+ __u32 max_entries,
+ const struct bpf_map_create_opts *opts);
+
+struct bpf_prog_load_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+
+ /* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns
+ * -EAGAIN. This field determines how many attempts libbpf has to
+ * make. If not specified, libbpf will use default value of 5.
+ */
+ int attempts;
-LIBBPF_API int
-bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr);
-LIBBPF_API int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size,
- int max_entries, __u32 map_flags, int node);
-LIBBPF_API int bpf_create_map_name(enum bpf_map_type map_type, const char *name,
- int key_size, int value_size,
- int max_entries, __u32 map_flags);
-LIBBPF_API int bpf_create_map(enum bpf_map_type map_type, int key_size,
- int value_size, int max_entries, __u32 map_flags);
-LIBBPF_API int bpf_create_map_in_map_node(enum bpf_map_type map_type,
- const char *name, int key_size,
- int inner_map_fd, int max_entries,
- __u32 map_flags, int node);
-LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type,
- const char *name, int key_size,
- int inner_map_fd, int max_entries,
- __u32 map_flags);
-
-struct bpf_load_program_attr {
- enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
- const char *name;
- const struct bpf_insn *insns;
- size_t insns_cnt;
- const char *license;
- union {
- __u32 kern_version;
- __u32 attach_prog_fd;
- };
- union {
- __u32 prog_ifindex;
- __u32 attach_btf_id;
- };
__u32 prog_btf_fd;
- __u32 func_info_rec_size;
+ __u32 prog_flags;
+ __u32 prog_ifindex;
+ __u32 kern_version;
+
+ __u32 attach_btf_id;
+ __u32 attach_prog_fd;
+ __u32 attach_btf_obj_fd;
+
+ const int *fd_array;
+
+ /* .BTF.ext func info data */
const void *func_info;
__u32 func_info_cnt;
- __u32 line_info_rec_size;
+ __u32 func_info_rec_size;
+
+ /* .BTF.ext line info data */
const void *line_info;
__u32 line_info_cnt;
+ __u32 line_info_rec_size;
+
+ /* verifier log options */
__u32 log_level;
- __u32 prog_flags;
+ __u32 log_size;
+ char *log_buf;
+ /* output: actual total log contents size (including termintaing zero).
+ * It could be both larger than original log_size (if log was
+ * truncated), or smaller (if log buffer wasn't filled completely).
+ * If kernel doesn't support this feature, log_size is left unchanged.
+ */
+ __u32 log_true_size;
+ size_t :0;
};
+#define bpf_prog_load_opts__last_field log_true_size
+
+LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
+ const char *prog_name, const char *license,
+ const struct bpf_insn *insns, size_t insn_cnt,
+ struct bpf_prog_load_opts *opts);
/* Flags to direct loading requirements */
#define MAPS_RELAX_COMPAT 0x01
-/* Recommend log buffer size */
+/* Recommended log buffer size */
#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
-LIBBPF_API int
-bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
- char *log_buf, size_t log_buf_sz);
-LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
- const struct bpf_insn *insns, size_t insns_cnt,
- const char *license, __u32 kern_version,
- char *log_buf, size_t log_buf_sz);
-LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
- const struct bpf_insn *insns,
- size_t insns_cnt, __u32 prog_flags,
- const char *license, __u32 kern_version,
- char *log_buf, size_t log_buf_sz,
- int log_level);
+
+struct bpf_btf_load_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+
+ /* kernel log options */
+ char *log_buf;
+ __u32 log_level;
+ __u32 log_size;
+ /* output: actual total log contents size (including termintaing zero).
+ * It could be both larger than original log_size (if log was
+ * truncated), or smaller (if log buffer wasn't filled completely).
+ * If kernel doesn't support this feature, log_size is left unchanged.
+ */
+ __u32 log_true_size;
+ size_t :0;
+};
+#define bpf_btf_load_opts__last_field log_true_size
+
+LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
+ struct bpf_btf_load_opts *opts);
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
__u64 flags);
@@ -124,7 +145,10 @@ LIBBPF_API int bpf_map_lookup_elem_flags(int fd, const void *key, void *value,
__u64 flags);
LIBBPF_API int bpf_map_lookup_and_delete_elem(int fd, const void *key,
void *value);
+LIBBPF_API int bpf_map_lookup_and_delete_elem_flags(int fd, const void *key,
+ void *value, __u64 flags);
LIBBPF_API int bpf_map_delete_elem(int fd, const void *key);
+LIBBPF_API int bpf_map_delete_elem_flags(int fd, const void *key, __u64 flags);
LIBBPF_API int bpf_map_get_next_key(int fd, const void *key, void *next_key);
LIBBPF_API int bpf_map_freeze(int fd);
@@ -135,22 +159,144 @@ struct bpf_map_batch_opts {
};
#define bpf_map_batch_opts__last_field flags
-LIBBPF_API int bpf_map_delete_batch(int fd, void *keys,
+
+/**
+ * @brief **bpf_map_delete_batch()** allows for batch deletion of multiple
+ * elements in a BPF map.
+ *
+ * @param fd BPF map file descriptor
+ * @param keys pointer to an array of *count* keys
+ * @param count input and output parameter; on input **count** represents the
+ * number of elements in the map to delete in batch;
+ * on output if a non-EFAULT error is returned, **count** represents the number of deleted
+ * elements if the output **count** value is not equal to the input **count** value
+ * If EFAULT is returned, **count** should not be trusted to be correct.
+ * @param opts options for configuring the way the batch deletion works
+ * @return 0, on success; negative error code, otherwise (errno is also set to
+ * the error code)
+ */
+LIBBPF_API int bpf_map_delete_batch(int fd, const void *keys,
__u32 *count,
const struct bpf_map_batch_opts *opts);
+
+/**
+ * @brief **bpf_map_lookup_batch()** allows for batch lookup of BPF map elements.
+ *
+ * The parameter *in_batch* is the address of the first element in the batch to read.
+ * *out_batch* is an output parameter that should be passed as *in_batch* to subsequent
+ * calls to **bpf_map_lookup_batch()**. NULL can be passed for *in_batch* to indicate
+ * that the batched lookup starts from the beginning of the map.
+ *
+ * The *keys* and *values* are output parameters which must point to memory large enough to
+ * hold *count* items based on the key and value size of the map *map_fd*. The *keys*
+ * buffer must be of *key_size* * *count*. The *values* buffer must be of
+ * *value_size* * *count*.
+ *
+ * @param fd BPF map file descriptor
+ * @param in_batch address of the first element in batch to read, can pass NULL to
+ * indicate that the batched lookup starts from the beginning of the map.
+ * @param out_batch output parameter that should be passed to next call as *in_batch*
+ * @param keys pointer to an array large enough for *count* keys
+ * @param values pointer to an array large enough for *count* values
+ * @param count input and output parameter; on input it's the number of elements
+ * in the map to read in batch; on output it's the number of elements that were
+ * successfully read.
+ * If a non-EFAULT error is returned, count will be set as the number of elements
+ * that were read before the error occurred.
+ * If EFAULT is returned, **count** should not be trusted to be correct.
+ * @param opts options for configuring the way the batch lookup works
+ * @return 0, on success; negative error code, otherwise (errno is also set to
+ * the error code)
+ */
LIBBPF_API int bpf_map_lookup_batch(int fd, void *in_batch, void *out_batch,
void *keys, void *values, __u32 *count,
const struct bpf_map_batch_opts *opts);
+
+/**
+ * @brief **bpf_map_lookup_and_delete_batch()** allows for batch lookup and deletion
+ * of BPF map elements where each element is deleted after being retrieved.
+ *
+ * @param fd BPF map file descriptor
+ * @param in_batch address of the first element in batch to read, can pass NULL to
+ * get address of the first element in *out_batch*
+ * @param out_batch output parameter that should be passed to next call as *in_batch*
+ * @param keys pointer to an array of *count* keys
+ * @param values pointer to an array large enough for *count* values
+ * @param count input and output parameter; on input it's the number of elements
+ * in the map to read and delete in batch; on output it represents the number of
+ * elements that were successfully read and deleted
+ * If a non-**EFAULT** error code is returned and if the output **count** value
+ * is not equal to the input **count** value, up to **count** elements may
+ * have been deleted.
+ * if **EFAULT** is returned up to *count* elements may have been deleted without
+ * being returned via the *keys* and *values* output parameters.
+ * @param opts options for configuring the way the batch lookup and delete works
+ * @return 0, on success; negative error code, otherwise (errno is also set to
+ * the error code)
+ */
LIBBPF_API int bpf_map_lookup_and_delete_batch(int fd, void *in_batch,
void *out_batch, void *keys,
void *values, __u32 *count,
const struct bpf_map_batch_opts *opts);
-LIBBPF_API int bpf_map_update_batch(int fd, void *keys, void *values,
+
+/**
+ * @brief **bpf_map_update_batch()** updates multiple elements in a map
+ * by specifying keys and their corresponding values.
+ *
+ * The *keys* and *values* parameters must point to memory large enough
+ * to hold *count* items based on the key and value size of the map.
+ *
+ * The *opts* parameter can be used to control how *bpf_map_update_batch()*
+ * should handle keys that either do or do not already exist in the map.
+ * In particular the *flags* parameter of *bpf_map_batch_opts* can be
+ * one of the following:
+ *
+ * Note that *count* is an input and output parameter, where on output it
+ * represents how many elements were successfully updated. Also note that if
+ * **EFAULT** then *count* should not be trusted to be correct.
+ *
+ * **BPF_ANY**
+ * Create new elements or update existing.
+ *
+ * **BPF_NOEXIST**
+ * Create new elements only if they do not exist.
+ *
+ * **BPF_EXIST**
+ * Update existing elements.
+ *
+ * **BPF_F_LOCK**
+ * Update spin_lock-ed map elements. This must be
+ * specified if the map value contains a spinlock.
+ *
+ * @param fd BPF map file descriptor
+ * @param keys pointer to an array of *count* keys
+ * @param values pointer to an array of *count* values
+ * @param count input and output parameter; on input it's the number of elements
+ * in the map to update in batch; on output if a non-EFAULT error is returned,
+ * **count** represents the number of updated elements if the output **count**
+ * value is not equal to the input **count** value.
+ * If EFAULT is returned, **count** should not be trusted to be correct.
+ * @param opts options for configuring the way the batch update works
+ * @return 0, on success; negative error code, otherwise (errno is also set to
+ * the error code)
+ */
+LIBBPF_API int bpf_map_update_batch(int fd, const void *keys, const void *values,
__u32 *count,
const struct bpf_map_batch_opts *opts);
+struct bpf_obj_get_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+
+ __u32 file_flags;
+
+ size_t :0;
+};
+#define bpf_obj_get_opts__last_field file_flags
+
LIBBPF_API int bpf_obj_pin(int fd, const char *pathname);
LIBBPF_API int bpf_obj_get(const char *pathname);
+LIBBPF_API int bpf_obj_get_opts(const char *pathname,
+ const struct bpf_obj_get_opts *opts);
struct bpf_prog_attach_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
@@ -161,32 +307,58 @@ struct bpf_prog_attach_opts {
LIBBPF_API int bpf_prog_attach(int prog_fd, int attachable_fd,
enum bpf_attach_type type, unsigned int flags);
-LIBBPF_API int bpf_prog_attach_xattr(int prog_fd, int attachable_fd,
+LIBBPF_API int bpf_prog_attach_opts(int prog_fd, int attachable_fd,
enum bpf_attach_type type,
const struct bpf_prog_attach_opts *opts);
LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
enum bpf_attach_type type);
+union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */
struct bpf_link_create_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 flags;
+ union bpf_iter_link_info *iter_info;
+ __u32 iter_info_len;
+ __u32 target_btf_id;
+ union {
+ struct {
+ __u64 bpf_cookie;
+ } perf_event;
+ struct {
+ __u32 flags;
+ __u32 cnt;
+ const char **syms;
+ const unsigned long *addrs;
+ const __u64 *cookies;
+ } kprobe_multi;
+ struct {
+ __u64 cookie;
+ } tracing;
+ };
+ size_t :0;
};
-#define bpf_link_create_opts__last_field sz
+#define bpf_link_create_opts__last_field kprobe_multi.cookies
LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
enum bpf_attach_type attach_type,
const struct bpf_link_create_opts *opts);
+LIBBPF_API int bpf_link_detach(int link_fd);
+
struct bpf_link_update_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
__u32 flags; /* extra flags */
__u32 old_prog_fd; /* expected old program FD */
+ __u32 old_map_fd; /* expected old map FD */
};
-#define bpf_link_update_opts__last_field old_prog_fd
+#define bpf_link_update_opts__last_field old_map_fd
LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd,
const struct bpf_link_update_opts *opts);
+LIBBPF_API int bpf_iter_create(int link_fd);
+
struct bpf_prog_test_run_attr {
int prog_fd;
int repeat;
@@ -204,33 +376,168 @@ struct bpf_prog_test_run_attr {
* out: length of cxt_out */
};
-LIBBPF_API int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr);
-
-/*
- * bpf_prog_test_run does not check that data_out is large enough. Consider
- * using bpf_prog_test_run_xattr instead.
- */
-LIBBPF_API int bpf_prog_test_run(int prog_fd, int repeat, void *data,
- __u32 size, void *data_out, __u32 *size_out,
- __u32 *retval, __u32 *duration);
LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id);
LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id);
+LIBBPF_API int bpf_link_get_next_id(__u32 start_id, __u32 *next_id);
+
+struct bpf_get_fd_by_id_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 open_flags; /* permissions requested for the operation on fd */
+ size_t :0;
+};
+#define bpf_get_fd_by_id_opts__last_field open_flags
+
LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_prog_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts);
LIBBPF_API int bpf_map_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_map_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts);
LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
-LIBBPF_API int bpf_obj_get_info_by_fd(int prog_fd, void *info, __u32 *info_len);
+LIBBPF_API int bpf_btf_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts);
+LIBBPF_API int bpf_link_get_fd_by_id(__u32 id);
+LIBBPF_API int bpf_link_get_fd_by_id_opts(__u32 id,
+ const struct bpf_get_fd_by_id_opts *opts);
+LIBBPF_API int bpf_obj_get_info_by_fd(int bpf_fd, void *info, __u32 *info_len);
+
+/**
+ * @brief **bpf_prog_get_info_by_fd()** obtains information about the BPF
+ * program corresponding to *prog_fd*.
+ *
+ * Populates up to *info_len* bytes of *info* and updates *info_len* with the
+ * actual number of bytes written to *info*.
+ *
+ * @param prog_fd BPF program file descriptor
+ * @param info pointer to **struct bpf_prog_info** that will be populated with
+ * BPF program information
+ * @param info_len pointer to the size of *info*; on success updated with the
+ * number of bytes written to *info*
+ * @return 0, on success; negative error code, otherwise (errno is also set to
+ * the error code)
+ */
+LIBBPF_API int bpf_prog_get_info_by_fd(int prog_fd, struct bpf_prog_info *info, __u32 *info_len);
+
+/**
+ * @brief **bpf_map_get_info_by_fd()** obtains information about the BPF
+ * map corresponding to *map_fd*.
+ *
+ * Populates up to *info_len* bytes of *info* and updates *info_len* with the
+ * actual number of bytes written to *info*.
+ *
+ * @param map_fd BPF map file descriptor
+ * @param info pointer to **struct bpf_map_info** that will be populated with
+ * BPF map information
+ * @param info_len pointer to the size of *info*; on success updated with the
+ * number of bytes written to *info*
+ * @return 0, on success; negative error code, otherwise (errno is also set to
+ * the error code)
+ */
+LIBBPF_API int bpf_map_get_info_by_fd(int map_fd, struct bpf_map_info *info, __u32 *info_len);
+
+/**
+ * @brief **bpf_btf_get_info_by_fd()** obtains information about the
+ * BTF object corresponding to *btf_fd*.
+ *
+ * Populates up to *info_len* bytes of *info* and updates *info_len* with the
+ * actual number of bytes written to *info*.
+ *
+ * @param btf_fd BTF object file descriptor
+ * @param info pointer to **struct bpf_btf_info** that will be populated with
+ * BTF object information
+ * @param info_len pointer to the size of *info*; on success updated with the
+ * number of bytes written to *info*
+ * @return 0, on success; negative error code, otherwise (errno is also set to
+ * the error code)
+ */
+LIBBPF_API int bpf_btf_get_info_by_fd(int btf_fd, struct bpf_btf_info *info, __u32 *info_len);
+
+/**
+ * @brief **bpf_btf_get_info_by_fd()** obtains information about the BPF
+ * link corresponding to *link_fd*.
+ *
+ * Populates up to *info_len* bytes of *info* and updates *info_len* with the
+ * actual number of bytes written to *info*.
+ *
+ * @param link_fd BPF link file descriptor
+ * @param info pointer to **struct bpf_link_info** that will be populated with
+ * BPF link information
+ * @param info_len pointer to the size of *info*; on success updated with the
+ * number of bytes written to *info*
+ * @return 0, on success; negative error code, otherwise (errno is also set to
+ * the error code)
+ */
+LIBBPF_API int bpf_link_get_info_by_fd(int link_fd, struct bpf_link_info *info, __u32 *info_len);
+
+struct bpf_prog_query_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 query_flags;
+ __u32 attach_flags; /* output argument */
+ __u32 *prog_ids;
+ __u32 prog_cnt; /* input+output argument */
+ __u32 *prog_attach_flags;
+};
+#define bpf_prog_query_opts__last_field prog_attach_flags
+
+LIBBPF_API int bpf_prog_query_opts(int target_fd,
+ enum bpf_attach_type type,
+ struct bpf_prog_query_opts *opts);
LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
__u32 query_flags, __u32 *attach_flags,
__u32 *prog_ids, __u32 *prog_cnt);
+
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
-LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf,
- __u32 log_buf_size, bool do_log);
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
__u64 *probe_offset, __u64 *probe_addr);
#ifdef __cplusplus
+/* forward-declaring enums in C++ isn't compatible with pure C enums, so
+ * instead define bpf_enable_stats() as accepting int as an input
+ */
+LIBBPF_API int bpf_enable_stats(int type);
+#else
+enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
+LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
+#endif
+
+struct bpf_prog_bind_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 flags;
+};
+#define bpf_prog_bind_opts__last_field flags
+
+LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
+ const struct bpf_prog_bind_opts *opts);
+
+struct bpf_test_run_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ const void *data_in; /* optional */
+ void *data_out; /* optional */
+ __u32 data_size_in;
+ __u32 data_size_out; /* in: max length of data_out
+ * out: length of data_out
+ */
+ const void *ctx_in; /* optional */
+ void *ctx_out; /* optional */
+ __u32 ctx_size_in;
+ __u32 ctx_size_out; /* in: max length of ctx_out
+ * out: length of cxt_out
+ */
+ __u32 retval; /* out: return code of the BPF program */
+ int repeat;
+ __u32 duration; /* out: average per repetition in ns */
+ __u32 flags;
+ __u32 cpu;
+ __u32 batch_size;
+};
+#define bpf_test_run_opts__last_field batch_size
+
+LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
+ struct bpf_test_run_opts *opts);
+
+#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index 7009dc90e012..1ac57bb7ac55 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -19,32 +19,53 @@ enum bpf_field_info_kind {
BPF_FIELD_RSHIFT_U64 = 5,
};
+/* second argument to __builtin_btf_type_id() built-in */
+enum bpf_type_id_kind {
+ BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */
+ BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */
+};
+
+/* second argument to __builtin_preserve_type_info() built-in */
+enum bpf_type_info_kind {
+ BPF_TYPE_EXISTS = 0, /* type existence in target kernel */
+ BPF_TYPE_SIZE = 1, /* type size in target kernel */
+ BPF_TYPE_MATCHES = 2, /* type match in target kernel */
+};
+
+/* second argument to __builtin_preserve_enum_value() built-in */
+enum bpf_enum_value_kind {
+ BPF_ENUMVAL_EXISTS = 0, /* enum value existence in kernel */
+ BPF_ENUMVAL_VALUE = 1, /* enum value value relocation */
+};
+
#define __CORE_RELO(src, field, info) \
__builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
-#if __BYTE_ORDER == __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
- bpf_probe_read((void *)dst, \
- __CORE_RELO(src, fld, BYTE_SIZE), \
- (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
+ bpf_probe_read_kernel( \
+ (void *)dst, \
+ __CORE_RELO(src, fld, BYTE_SIZE), \
+ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#else
/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
* for big-endian we need to adjust destination pointer accordingly, based on
* field byte size
*/
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
- bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
- __CORE_RELO(src, fld, BYTE_SIZE), \
- (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
+ bpf_probe_read_kernel( \
+ (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
+ __CORE_RELO(src, fld, BYTE_SIZE), \
+ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#endif
/*
* Extract bitfield, identified by s->field, and return its value as u64.
* All this is done in relocatable manner, so bitfield changes such as
* signedness, bit size, offset changes, this will be handled automatically.
- * This version of macro is using bpf_probe_read() to read underlying integer
- * storage. Macro functions as an expression and its return type is
- * bpf_probe_read()'s return value: 0, on success, <0 on error.
+ * This version of macro is using bpf_probe_read_kernel() to read underlying
+ * integer storage. Macro functions as an expression and its return type is
+ * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error.
*/
#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \
unsigned long long val = 0; \
@@ -68,11 +89,19 @@ enum bpf_field_info_kind {
const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
unsigned long long val; \
\
+ /* This is a so-called barrier_var() operation that makes specified \
+ * variable "a black box" for optimizing compiler. \
+ * It forces compiler to perform BYTE_OFFSET relocation on p and use \
+ * its calculated value in the switch below, instead of applying \
+ * the same relocation 4 times for each individual memory load. \
+ */ \
+ asm volatile("" : "=r"(p) : "0"(p)); \
+ \
switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
- case 1: val = *(const unsigned char *)p; \
- case 2: val = *(const unsigned short *)p; \
- case 4: val = *(const unsigned int *)p; \
- case 8: val = *(const unsigned long long *)p; \
+ case 1: val = *(const unsigned char *)p; break; \
+ case 2: val = *(const unsigned short *)p; break; \
+ case 4: val = *(const unsigned int *)p; break; \
+ case 8: val = *(const unsigned long long *)p; break; \
} \
val <<= __CORE_RELO(s, field, LSHIFT_U64); \
if (__CORE_RELO(s, field, SIGNED)) \
@@ -82,25 +111,124 @@ enum bpf_field_info_kind {
val; \
})
+#define ___bpf_field_ref1(field) (field)
+#define ___bpf_field_ref2(type, field) (((typeof(type) *)0)->field)
+#define ___bpf_field_ref(args...) \
+ ___bpf_apply(___bpf_field_ref, ___bpf_narg(args))(args)
+
/*
* Convenience macro to check that field actually exists in target kernel's.
* Returns:
* 1, if matching field is present in target kernel;
* 0, if no matching field found.
+ *
+ * Supports two forms:
+ * - field reference through variable access:
+ * bpf_core_field_exists(p->my_field);
+ * - field reference through type and field names:
+ * bpf_core_field_exists(struct my_type, my_field).
*/
-#define bpf_core_field_exists(field) \
- __builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
+#define bpf_core_field_exists(field...) \
+ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_EXISTS)
/*
- * Convenience macro to get byte size of a field. Works for integers,
+ * Convenience macro to get the byte size of a field. Works for integers,
* struct/unions, pointers, arrays, and enums.
+ *
+ * Supports two forms:
+ * - field reference through variable access:
+ * bpf_core_field_size(p->my_field);
+ * - field reference through type and field names:
+ * bpf_core_field_size(struct my_type, my_field).
+ */
+#define bpf_core_field_size(field...) \
+ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_SIZE)
+
+/*
+ * Convenience macro to get field's byte offset.
+ *
+ * Supports two forms:
+ * - field reference through variable access:
+ * bpf_core_field_offset(p->my_field);
+ * - field reference through type and field names:
+ * bpf_core_field_offset(struct my_type, my_field).
+ */
+#define bpf_core_field_offset(field...) \
+ __builtin_preserve_field_info(___bpf_field_ref(field), BPF_FIELD_BYTE_OFFSET)
+
+/*
+ * Convenience macro to get BTF type ID of a specified type, using a local BTF
+ * information. Return 32-bit unsigned integer with type ID from program's own
+ * BTF. Always succeeds.
+ */
+#define bpf_core_type_id_local(type) \
+ __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
+
+/*
+ * Convenience macro to get BTF type ID of a target kernel's type that matches
+ * specified local type.
+ * Returns:
+ * - valid 32-bit unsigned type ID in kernel BTF;
+ * - 0, if no matching type was found in a target kernel BTF.
+ */
+#define bpf_core_type_id_kernel(type) \
+ __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
+
+/*
+ * Convenience macro to check that provided named type
+ * (struct/union/enum/typedef) exists in a target kernel.
+ * Returns:
+ * 1, if such type is present in target kernel's BTF;
+ * 0, if no matching type is found.
+ */
+#define bpf_core_type_exists(type) \
+ __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
+
+/*
+ * Convenience macro to check that provided named type
+ * (struct/union/enum/typedef) "matches" that in a target kernel.
+ * Returns:
+ * 1, if the type matches in the target kernel's BTF;
+ * 0, if the type does not match any in the target kernel
+ */
+#define bpf_core_type_matches(type) \
+ __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_MATCHES)
+
+/*
+ * Convenience macro to get the byte size of a provided named type
+ * (struct/union/enum/typedef) in a target kernel.
+ * Returns:
+ * >= 0 size (in bytes), if type is present in target kernel's BTF;
+ * 0, if no matching type is found.
+ */
+#define bpf_core_type_size(type) \
+ __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
+
+/*
+ * Convenience macro to check that provided enumerator value is defined in
+ * a target kernel.
+ * Returns:
+ * 1, if specified enum type and its enumerator value are present in target
+ * kernel's BTF;
+ * 0, if no matching enum and/or enum value within that enum is found.
+ */
+#define bpf_core_enum_value_exists(enum_type, enum_value) \
+ __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
+
+/*
+ * Convenience macro to get the integer value of an enumerator value in
+ * a target kernel.
+ * Returns:
+ * 64-bit value, if specified enum type and its enumerator value are
+ * present in target kernel's BTF;
+ * 0, if no matching enum and/or enum value within that enum is found.
*/
-#define bpf_core_field_size(field) \
- __builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
+#define bpf_core_enum_value(enum_type, enum_value) \
+ __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
/*
- * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
- * relocation for source address using __builtin_preserve_access_index()
+ * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
+ * offset relocation for source address using __builtin_preserve_access_index()
* built-in, provided by Clang.
*
* __builtin_preserve_access_index() takes as an argument an expression of
@@ -115,17 +243,22 @@ enum bpf_field_info_kind {
* (local) BTF, used to record relocation.
*/
#define bpf_core_read(dst, sz, src) \
- bpf_probe_read(dst, sz, \
- (const void *)__builtin_preserve_access_index(src))
+ bpf_probe_read_kernel(dst, sz, (const void *)__builtin_preserve_access_index(src))
+/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
+#define bpf_core_read_user(dst, sz, src) \
+ bpf_probe_read_user(dst, sz, (const void *)__builtin_preserve_access_index(src))
/*
* bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
* additionally emitting BPF CO-RE field relocation for specified source
* argument.
*/
#define bpf_core_read_str(dst, sz, src) \
- bpf_probe_read_str(dst, sz, \
- (const void *)__builtin_preserve_access_index(src))
+ bpf_probe_read_kernel_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
+
+/* NOTE: see comments for BPF_CORE_READ_USER() about the proper types use. */
+#define bpf_core_read_user_str(dst, sz, src) \
+ bpf_probe_read_user_str(dst, sz, (const void *)__builtin_preserve_access_index(src))
#define ___concat(a, b) a ## b
#define ___apply(fn, n) ___concat(fn, n)
@@ -184,30 +317,29 @@ enum bpf_field_info_kind {
read_fn((void *)(dst), sizeof(*(dst)), &((src_type)(src))->accessor)
/* "recursively" read a sequence of inner pointers using local __t var */
-#define ___rd_first(src, a) ___read(bpf_core_read, &__t, ___type(src), src, a);
-#define ___rd_last(...) \
- ___read(bpf_core_read, &__t, \
- ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
-#define ___rd_p1(...) const void *__t; ___rd_first(__VA_ARGS__)
-#define ___rd_p2(...) ___rd_p1(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
-#define ___rd_p3(...) ___rd_p2(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
-#define ___rd_p4(...) ___rd_p3(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
-#define ___rd_p5(...) ___rd_p4(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
-#define ___rd_p6(...) ___rd_p5(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
-#define ___rd_p7(...) ___rd_p6(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
-#define ___rd_p8(...) ___rd_p7(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
-#define ___rd_p9(...) ___rd_p8(___nolast(__VA_ARGS__)) ___rd_last(__VA_ARGS__)
-#define ___read_ptrs(src, ...) \
- ___apply(___rd_p, ___narg(__VA_ARGS__))(src, __VA_ARGS__)
-
-#define ___core_read0(fn, dst, src, a) \
+#define ___rd_first(fn, src, a) ___read(fn, &__t, ___type(src), src, a);
+#define ___rd_last(fn, ...) \
+ ___read(fn, &__t, ___type(___nolast(__VA_ARGS__)), __t, ___last(__VA_ARGS__));
+#define ___rd_p1(fn, ...) const void *__t; ___rd_first(fn, __VA_ARGS__)
+#define ___rd_p2(fn, ...) ___rd_p1(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p3(fn, ...) ___rd_p2(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p4(fn, ...) ___rd_p3(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p5(fn, ...) ___rd_p4(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p6(fn, ...) ___rd_p5(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p7(fn, ...) ___rd_p6(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p8(fn, ...) ___rd_p7(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___rd_p9(fn, ...) ___rd_p8(fn, ___nolast(__VA_ARGS__)) ___rd_last(fn, __VA_ARGS__)
+#define ___read_ptrs(fn, src, ...) \
+ ___apply(___rd_p, ___narg(__VA_ARGS__))(fn, src, __VA_ARGS__)
+
+#define ___core_read0(fn, fn_ptr, dst, src, a) \
___read(fn, dst, ___type(src), src, a);
-#define ___core_readN(fn, dst, src, ...) \
- ___read_ptrs(src, ___nolast(__VA_ARGS__)) \
+#define ___core_readN(fn, fn_ptr, dst, src, ...) \
+ ___read_ptrs(fn_ptr, src, ___nolast(__VA_ARGS__)) \
___read(fn, dst, ___type(src, ___nolast(__VA_ARGS__)), __t, \
___last(__VA_ARGS__));
-#define ___core_read(fn, dst, src, a, ...) \
- ___apply(___core_read, ___empty(__VA_ARGS__))(fn, dst, \
+#define ___core_read(fn, fn_ptr, dst, src, a, ...) \
+ ___apply(___core_read, ___empty(__VA_ARGS__))(fn, fn_ptr, dst, \
src, a, ##__VA_ARGS__)
/*
@@ -215,20 +347,73 @@ enum bpf_field_info_kind {
* BPF_CORE_READ(), in which final field is read into user-provided storage.
* See BPF_CORE_READ() below for more details on general usage.
*/
-#define BPF_CORE_READ_INTO(dst, src, a, ...) \
- ({ \
- ___core_read(bpf_core_read, dst, src, a, ##__VA_ARGS__) \
- })
+#define BPF_CORE_READ_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_core_read, bpf_core_read, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/*
+ * Variant of BPF_CORE_READ_INTO() for reading from user-space memory.
+ *
+ * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
+ */
+#define BPF_CORE_READ_USER_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_core_read_user, bpf_core_read_user, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/* Non-CO-RE variant of BPF_CORE_READ_INTO() */
+#define BPF_PROBE_READ_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_probe_read_kernel, bpf_probe_read_kernel, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/* Non-CO-RE variant of BPF_CORE_READ_USER_INTO().
+ *
+ * As no CO-RE relocations are emitted, source types can be arbitrary and are
+ * not restricted to kernel types only.
+ */
+#define BPF_PROBE_READ_USER_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_probe_read_user, bpf_probe_read_user, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
/*
* BPF_CORE_READ_STR_INTO() does same "pointer chasing" as
* BPF_CORE_READ() for intermediate pointers, but then executes (and returns
* corresponding error code) bpf_core_read_str() for final string read.
*/
-#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) \
- ({ \
- ___core_read(bpf_core_read_str, dst, src, a, ##__VA_ARGS__) \
- })
+#define BPF_CORE_READ_STR_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_core_read_str, bpf_core_read, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/*
+ * Variant of BPF_CORE_READ_STR_INTO() for reading from user-space memory.
+ *
+ * NOTE: see comments for BPF_CORE_READ_USER() about the proper types use.
+ */
+#define BPF_CORE_READ_USER_STR_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_core_read_user_str, bpf_core_read_user, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/* Non-CO-RE variant of BPF_CORE_READ_STR_INTO() */
+#define BPF_PROBE_READ_STR_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_probe_read_kernel_str, bpf_probe_read_kernel, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
+
+/*
+ * Non-CO-RE variant of BPF_CORE_READ_USER_STR_INTO().
+ *
+ * As no CO-RE relocations are emitted, source types can be arbitrary and are
+ * not restricted to kernel types only.
+ */
+#define BPF_PROBE_READ_USER_STR_INTO(dst, src, a, ...) ({ \
+ ___core_read(bpf_probe_read_user_str, bpf_probe_read_user, \
+ dst, (src), a, ##__VA_ARGS__) \
+})
/*
* BPF_CORE_READ() is used to simplify BPF CO-RE relocatable read, especially
@@ -239,25 +424,61 @@ enum bpf_field_info_kind {
* int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
*
* BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
- * CO-RE relocatable bpf_probe_read() wrapper) calls, logically equivalent to:
+ * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically
+ * equivalent to:
* 1. const void *__t = s->a.b.c;
* 2. __t = __t->d.e;
* 3. __t = __t->f;
* 4. return __t->g;
*
* Equivalence is logical, because there is a heavy type casting/preservation
- * involved, as well as all the reads are happening through bpf_probe_read()
- * calls using __builtin_preserve_access_index() to emit CO-RE relocations.
+ * involved, as well as all the reads are happening through
+ * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to
+ * emit CO-RE relocations.
*
* N.B. Only up to 9 "field accessors" are supported, which should be more
* than enough for any practical purpose.
*/
-#define BPF_CORE_READ(src, a, ...) \
- ({ \
- ___type(src, a, ##__VA_ARGS__) __r; \
- BPF_CORE_READ_INTO(&__r, src, a, ##__VA_ARGS__); \
- __r; \
- })
+#define BPF_CORE_READ(src, a, ...) ({ \
+ ___type((src), a, ##__VA_ARGS__) __r; \
+ BPF_CORE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \
+ __r; \
+})
+
+/*
+ * Variant of BPF_CORE_READ() for reading from user-space memory.
+ *
+ * NOTE: all the source types involved are still *kernel types* and need to
+ * exist in kernel (or kernel module) BTF, otherwise CO-RE relocation will
+ * fail. Custom user types are not relocatable with CO-RE.
+ * The typical situation in which BPF_CORE_READ_USER() might be used is to
+ * read kernel UAPI types from the user-space memory passed in as a syscall
+ * input argument.
+ */
+#define BPF_CORE_READ_USER(src, a, ...) ({ \
+ ___type((src), a, ##__VA_ARGS__) __r; \
+ BPF_CORE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \
+ __r; \
+})
+
+/* Non-CO-RE variant of BPF_CORE_READ() */
+#define BPF_PROBE_READ(src, a, ...) ({ \
+ ___type((src), a, ##__VA_ARGS__) __r; \
+ BPF_PROBE_READ_INTO(&__r, (src), a, ##__VA_ARGS__); \
+ __r; \
+})
+
+/*
+ * Non-CO-RE variant of BPF_CORE_READ_USER().
+ *
+ * As no CO-RE relocations are emitted, source types can be arbitrary and are
+ * not restricted to kernel types only.
+ */
+#define BPF_PROBE_READ_USER(src, a, ...) ({ \
+ ___type((src), a, ##__VA_ARGS__) __r; \
+ BPF_PROBE_READ_USER_INTO(&__r, (src), a, ##__VA_ARGS__); \
+ __r; \
+})
#endif
diff --git a/tools/lib/bpf/bpf_endian.h b/tools/lib/bpf/bpf_endian.h
index fbe28008450f..ec9db4feca9f 100644
--- a/tools/lib/bpf/bpf_endian.h
+++ b/tools/lib/bpf/bpf_endian.h
@@ -2,8 +2,35 @@
#ifndef __BPF_ENDIAN__
#define __BPF_ENDIAN__
-#include <linux/stddef.h>
-#include <linux/swab.h>
+/*
+ * Isolate byte #n and put it into byte #m, for __u##b type.
+ * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64:
+ * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx
+ * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000
+ * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn
+ * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000
+ */
+#define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8))
+
+#define ___bpf_swab16(x) ((__u16)( \
+ ___bpf_mvb(x, 16, 0, 1) | \
+ ___bpf_mvb(x, 16, 1, 0)))
+
+#define ___bpf_swab32(x) ((__u32)( \
+ ___bpf_mvb(x, 32, 0, 3) | \
+ ___bpf_mvb(x, 32, 1, 2) | \
+ ___bpf_mvb(x, 32, 2, 1) | \
+ ___bpf_mvb(x, 32, 3, 0)))
+
+#define ___bpf_swab64(x) ((__u64)( \
+ ___bpf_mvb(x, 64, 0, 7) | \
+ ___bpf_mvb(x, 64, 1, 6) | \
+ ___bpf_mvb(x, 64, 2, 5) | \
+ ___bpf_mvb(x, 64, 3, 4) | \
+ ___bpf_mvb(x, 64, 4, 3) | \
+ ___bpf_mvb(x, 64, 5, 2) | \
+ ___bpf_mvb(x, 64, 6, 1) | \
+ ___bpf_mvb(x, 64, 7, 0)))
/* LLVM's BPF target selects the endianness of the CPU
* it compiles on, or the user specifies (bpfel/bpfeb),
@@ -23,16 +50,16 @@
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
# define __bpf_ntohs(x) __builtin_bswap16(x)
# define __bpf_htons(x) __builtin_bswap16(x)
-# define __bpf_constant_ntohs(x) ___constant_swab16(x)
-# define __bpf_constant_htons(x) ___constant_swab16(x)
+# define __bpf_constant_ntohs(x) ___bpf_swab16(x)
+# define __bpf_constant_htons(x) ___bpf_swab16(x)
# define __bpf_ntohl(x) __builtin_bswap32(x)
# define __bpf_htonl(x) __builtin_bswap32(x)
-# define __bpf_constant_ntohl(x) ___constant_swab32(x)
-# define __bpf_constant_htonl(x) ___constant_swab32(x)
+# define __bpf_constant_ntohl(x) ___bpf_swab32(x)
+# define __bpf_constant_htonl(x) ___bpf_swab32(x)
# define __bpf_be64_to_cpu(x) __builtin_bswap64(x)
# define __bpf_cpu_to_be64(x) __builtin_bswap64(x)
-# define __bpf_constant_be64_to_cpu(x) ___constant_swab64(x)
-# define __bpf_constant_cpu_to_be64(x) ___constant_swab64(x)
+# define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x)
+# define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x)
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
# define __bpf_ntohs(x) (x)
# define __bpf_htons(x) (x)
diff --git a/tools/lib/bpf/bpf_gen_internal.h b/tools/lib/bpf/bpf_gen_internal.h
new file mode 100644
index 000000000000..fdf44403ff36
--- /dev/null
+++ b/tools/lib/bpf/bpf_gen_internal.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2021 Facebook */
+#ifndef __BPF_GEN_INTERNAL_H
+#define __BPF_GEN_INTERNAL_H
+
+#include "bpf.h"
+
+struct ksym_relo_desc {
+ const char *name;
+ int kind;
+ int insn_idx;
+ bool is_weak;
+ bool is_typeless;
+ bool is_ld64;
+};
+
+struct ksym_desc {
+ const char *name;
+ int ref;
+ int kind;
+ union {
+ /* used for kfunc */
+ int off;
+ /* used for typeless ksym */
+ bool typeless;
+ };
+ int insn;
+ bool is_ld64;
+};
+
+struct bpf_gen {
+ struct gen_loader_opts *opts;
+ void *data_start;
+ void *data_cur;
+ void *insn_start;
+ void *insn_cur;
+ ssize_t cleanup_label;
+ __u32 nr_progs;
+ __u32 nr_maps;
+ int log_level;
+ int error;
+ struct ksym_relo_desc *relos;
+ int relo_cnt;
+ struct bpf_core_relo *core_relos;
+ int core_relo_cnt;
+ char attach_target[128];
+ int attach_kind;
+ struct ksym_desc *ksyms;
+ __u32 nr_ksyms;
+ int fd_array;
+ int nr_fd_array;
+};
+
+void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps);
+int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps);
+void bpf_gen__free(struct bpf_gen *gen);
+void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
+void bpf_gen__map_create(struct bpf_gen *gen,
+ enum bpf_map_type map_type, const char *map_name,
+ __u32 key_size, __u32 value_size, __u32 max_entries,
+ struct bpf_map_create_opts *map_attr, int map_idx);
+void bpf_gen__prog_load(struct bpf_gen *gen,
+ enum bpf_prog_type prog_type, const char *prog_name,
+ const char *license, struct bpf_insn *insns, size_t insn_cnt,
+ struct bpf_prog_load_opts *load_attr, int prog_idx);
+void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
+void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
+void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
+void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
+ bool is_typeless, bool is_ld64, int kind, int insn_idx);
+void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo);
+void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int key, int inner_map_idx);
+
+#endif
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index f69cc208778a..929a3baca8ef 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -2,44 +2,163 @@
#ifndef __BPF_HELPERS__
#define __BPF_HELPERS__
+/*
+ * Note that bpf programs need to include either
+ * vmlinux.h (auto-generated from BTF) or linux/types.h
+ * in advance since bpf_helper_defs.h uses such types
+ * as __u64.
+ */
#include "bpf_helper_defs.h"
#define __uint(name, val) int (*name)[val]
#define __type(name, val) typeof(val) *name
-
-/* Helper macro to print out debug messages */
-#define bpf_printk(fmt, ...) \
-({ \
- char ____fmt[] = fmt; \
- bpf_trace_printk(____fmt, sizeof(____fmt), \
- ##__VA_ARGS__); \
-})
+#define __array(name, val) typeof(val) *name[]
/*
* Helper macro to place programs, maps, license in
* different sections in elf_bpf file. Section names
- * are interpreted by elf_bpf loader
+ * are interpreted by libbpf depending on the context (BPF programs, BPF maps,
+ * extern variables, etc).
+ * To allow use of SEC() with externs (e.g., for extern .maps declarations),
+ * make sure __attribute__((unused)) doesn't trigger compilation warning.
+ */
+#if __GNUC__ && !__clang__
+
+/*
+ * Pragma macros are broken on GCC
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=55578
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90400
*/
-#define SEC(NAME) __attribute__((section(NAME), used))
+#define SEC(name) __attribute__((section(name), used))
-#ifndef __always_inline
-#define __always_inline __attribute__((always_inline))
+#else
+
+#define SEC(name) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wignored-attributes\"") \
+ __attribute__((section(name), used)) \
+ _Pragma("GCC diagnostic pop") \
+
+#endif
+
+/* Avoid 'linux/stddef.h' definition of '__always_inline'. */
+#undef __always_inline
+#define __always_inline inline __attribute__((always_inline))
+
+#ifndef __noinline
+#define __noinline __attribute__((noinline))
#endif
#ifndef __weak
#define __weak __attribute__((weak))
#endif
/*
- * Helper structure used by eBPF C program
- * to describe BPF map attributes to libbpf loader
+ * Use __hidden attribute to mark a non-static BPF subprogram effectively
+ * static for BPF verifier's verification algorithm purposes, allowing more
+ * extensive and permissive BPF verification process, taking into account
+ * subprogram's caller context.
*/
-struct bpf_map_def {
- unsigned int type;
- unsigned int key_size;
- unsigned int value_size;
- unsigned int max_entries;
- unsigned int map_flags;
-};
+#define __hidden __attribute__((visibility("hidden")))
+
+/* When utilizing vmlinux.h with BPF CO-RE, user BPF programs can't include
+ * any system-level headers (such as stddef.h, linux/version.h, etc), and
+ * commonly-used macros like NULL and KERNEL_VERSION aren't available through
+ * vmlinux.h. This just adds unnecessary hurdles and forces users to re-define
+ * them on their own. So as a convenience, provide such definitions here.
+ */
+#ifndef NULL
+#define NULL ((void *)0)
+#endif
+
+#ifndef KERNEL_VERSION
+#define KERNEL_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + ((c) > 255 ? 255 : (c)))
+#endif
+
+/*
+ * Helper macros to manipulate data structures
+ */
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER)
+#endif
+#ifndef container_of
+#define container_of(ptr, type, member) \
+ ({ \
+ void *__mptr = (void *)(ptr); \
+ ((type *)(__mptr - offsetof(type, member))); \
+ })
+#endif
+
+/*
+ * Compiler (optimization) barrier.
+ */
+#ifndef barrier
+#define barrier() asm volatile("" ::: "memory")
+#endif
+
+/* Variable-specific compiler (optimization) barrier. It's a no-op which makes
+ * compiler believe that there is some black box modification of a given
+ * variable and thus prevents compiler from making extra assumption about its
+ * value and potential simplifications and optimizations on this variable.
+ *
+ * E.g., compiler might often delay or even omit 32-bit to 64-bit casting of
+ * a variable, making some code patterns unverifiable. Putting barrier_var()
+ * in place will ensure that cast is performed before the barrier_var()
+ * invocation, because compiler has to pessimistically assume that embedded
+ * asm section might perform some extra operations on that variable.
+ *
+ * This is a variable-specific variant of more global barrier().
+ */
+#ifndef barrier_var
+#define barrier_var(var) asm volatile("" : "+r"(var))
+#endif
+
+/*
+ * Helper macro to throw a compilation error if __bpf_unreachable() gets
+ * built into the resulting code. This works given BPF back end does not
+ * implement __builtin_trap(). This is useful to assert that certain paths
+ * of the program code are never used and hence eliminated by the compiler.
+ *
+ * For example, consider a switch statement that covers known cases used by
+ * the program. __bpf_unreachable() can then reside in the default case. If
+ * the program gets extended such that a case is not covered in the switch
+ * statement, then it will throw a build error due to the default case not
+ * being compiled out.
+ */
+#ifndef __bpf_unreachable
+# define __bpf_unreachable() __builtin_trap()
+#endif
+
+/*
+ * Helper function to perform a tail call with a constant/immediate map slot.
+ */
+#if __clang_major__ >= 8 && defined(__bpf__)
+static __always_inline void
+bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
+{
+ if (!__builtin_constant_p(slot))
+ __bpf_unreachable();
+
+ /*
+ * Provide a hard guarantee that LLVM won't optimize setting r2 (map
+ * pointer) and r3 (constant map index) from _different paths_ ending
+ * up at the _same_ call insn as otherwise we won't be able to use the
+ * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
+ * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
+ * tracking for prog array pokes") for details on verifier tracking.
+ *
+ * Note on clobber list: we need to stay in-line with BPF calling
+ * convention, so even if we don't end up using r0, r4, r5, we need
+ * to mark them as clobber so that LLVM doesn't end up using them
+ * before / after the call.
+ */
+ asm volatile("r1 = %[ctx]\n\t"
+ "r2 = %[map]\n\t"
+ "r3 = %[slot]\n\t"
+ "call 12"
+ :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
+ : "r0", "r1", "r2", "r3", "r4", "r5");
+}
+#endif
enum libbpf_pin_type {
LIBBPF_PIN_NONE,
@@ -54,5 +173,225 @@ enum libbpf_tristate {
};
#define __kconfig __attribute__((section(".kconfig")))
+#define __ksym __attribute__((section(".ksyms")))
+#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
+#define __kptr __attribute__((btf_type_tag("kptr")))
+
+#define bpf_ksym_exists(sym) ({ \
+ _Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
+ !!sym; \
+})
+
+#ifndef ___bpf_concat
+#define ___bpf_concat(a, b) a ## b
+#endif
+#ifndef ___bpf_apply
+#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
+#endif
+#ifndef ___bpf_nth
+#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
+#endif
+#ifndef ___bpf_narg
+#define ___bpf_narg(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#endif
+
+#define ___bpf_fill0(arr, p, x) do {} while (0)
+#define ___bpf_fill1(arr, p, x) arr[p] = x
+#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
+#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
+#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
+#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
+#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
+#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
+#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
+#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
+#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
+#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
+#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
+#define ___bpf_fill(arr, args...) \
+ ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
+
+/*
+ * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
+ * in a structure.
+ */
+#define BPF_SEQ_PRINTF(seq, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
+ ___param, sizeof(___param)); \
+})
+
+/*
+ * BPF_SNPRINTF wraps the bpf_snprintf helper with variadic arguments instead of
+ * an array of u64.
+ */
+#define BPF_SNPRINTF(out, out_size, fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_snprintf(out, out_size, ___fmt, \
+ ___param, sizeof(___param)); \
+})
+
+#ifdef BPF_NO_GLOBAL_DATA
+#define BPF_PRINTK_FMT_MOD
+#else
+#define BPF_PRINTK_FMT_MOD static const
+#endif
+
+#define __bpf_printk(fmt, ...) \
+({ \
+ BPF_PRINTK_FMT_MOD char ____fmt[] = fmt; \
+ bpf_trace_printk(____fmt, sizeof(____fmt), \
+ ##__VA_ARGS__); \
+})
+
+/*
+ * __bpf_vprintk wraps the bpf_trace_vprintk helper with variadic arguments
+ * instead of an array of u64.
+ */
+#define __bpf_vprintk(fmt, args...) \
+({ \
+ static const char ___fmt[] = fmt; \
+ unsigned long long ___param[___bpf_narg(args)]; \
+ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ ___bpf_fill(___param, args); \
+ _Pragma("GCC diagnostic pop") \
+ \
+ bpf_trace_vprintk(___fmt, sizeof(___fmt), \
+ ___param, sizeof(___param)); \
+})
+
+/* Use __bpf_printk when bpf_printk call has 3 or fewer fmt args
+ * Otherwise use __bpf_vprintk
+ */
+#define ___bpf_pick_printk(...) \
+ ___bpf_nth(_, ##__VA_ARGS__, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
+ __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, __bpf_vprintk, \
+ __bpf_vprintk, __bpf_vprintk, __bpf_printk /*3*/, __bpf_printk /*2*/,\
+ __bpf_printk /*1*/, __bpf_printk /*0*/)
+
+/* Helper macro to print out debug messages */
+#define bpf_printk(fmt, args...) ___bpf_pick_printk(args)(fmt, ##args)
+
+struct bpf_iter_num;
+
+extern int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end) __weak __ksym;
+extern int *bpf_iter_num_next(struct bpf_iter_num *it) __weak __ksym;
+extern void bpf_iter_num_destroy(struct bpf_iter_num *it) __weak __ksym;
+
+#ifndef bpf_for_each
+/* bpf_for_each(iter_type, cur_elem, args...) provides generic construct for
+ * using BPF open-coded iterators without having to write mundane explicit
+ * low-level loop logic. Instead, it provides for()-like generic construct
+ * that can be used pretty naturally. E.g., for some hypothetical cgroup
+ * iterator, you'd write:
+ *
+ * struct cgroup *cg, *parent_cg = <...>;
+ *
+ * bpf_for_each(cgroup, cg, parent_cg, CG_ITER_CHILDREN) {
+ * bpf_printk("Child cgroup id = %d", cg->cgroup_id);
+ * if (cg->cgroup_id == 123)
+ * break;
+ * }
+ *
+ * I.e., it looks almost like high-level for each loop in other languages,
+ * supports continue/break, and is verifiable by BPF verifier.
+ *
+ * For iterating integers, the difference betwen bpf_for_each(num, i, N, M)
+ * and bpf_for(i, N, M) is in that bpf_for() provides additional proof to
+ * verifier that i is in [N, M) range, and in bpf_for_each() case i is `int
+ * *`, not just `int`. So for integers bpf_for() is more convenient.
+ *
+ * Note: this macro relies on C99 feature of allowing to declare variables
+ * inside for() loop, bound to for() loop lifetime. It also utilizes GCC
+ * extension: __attribute__((cleanup(<func>))), supported by both GCC and
+ * Clang.
+ */
+#define bpf_for_each(type, cur, args...) for ( \
+ /* initialize and define destructor */ \
+ struct bpf_iter_##type ___it __attribute__((aligned(8), /* enforce, just in case */, \
+ cleanup(bpf_iter_##type##_destroy))), \
+ /* ___p pointer is just to call bpf_iter_##type##_new() *once* to init ___it */ \
+ *___p __attribute__((unused)) = ( \
+ bpf_iter_##type##_new(&___it, ##args), \
+ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
+ /* for bpf_iter_##type##_destroy() when used from cleanup() attribute */ \
+ (void)bpf_iter_##type##_destroy, (void *)0); \
+ /* iteration and termination check */ \
+ (((cur) = bpf_iter_##type##_next(&___it))); \
+)
+#endif /* bpf_for_each */
+
+#ifndef bpf_for
+/* bpf_for(i, start, end) implements a for()-like looping construct that sets
+ * provided integer variable *i* to values starting from *start* through,
+ * but not including, *end*. It also proves to BPF verifier that *i* belongs
+ * to range [start, end), so this can be used for accessing arrays without
+ * extra checks.
+ *
+ * Note: *start* and *end* are assumed to be expressions with no side effects
+ * and whose values do not change throughout bpf_for() loop execution. They do
+ * not have to be statically known or constant, though.
+ *
+ * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
+ * loop bound variables and cleanup attribute, supported by GCC and Clang.
+ */
+#define bpf_for(i, start, end) for ( \
+ /* initialize and define destructor */ \
+ struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
+ cleanup(bpf_iter_num_destroy))), \
+ /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
+ *___p __attribute__((unused)) = ( \
+ bpf_iter_num_new(&___it, (start), (end)), \
+ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
+ /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
+ (void)bpf_iter_num_destroy, (void *)0); \
+ ({ \
+ /* iteration step */ \
+ int *___t = bpf_iter_num_next(&___it); \
+ /* termination and bounds check */ \
+ (___t && ((i) = *___t, (i) >= (start) && (i) < (end))); \
+ }); \
+)
+#endif /* bpf_for */
+
+#ifndef bpf_repeat
+/* bpf_repeat(N) performs N iterations without exposing iteration number
+ *
+ * Note: similarly to bpf_for_each(), it relies on C99 feature of declaring for()
+ * loop bound variables and cleanup attribute, supported by GCC and Clang.
+ */
+#define bpf_repeat(N) for ( \
+ /* initialize and define destructor */ \
+ struct bpf_iter_num ___it __attribute__((aligned(8), /* enforce, just in case */ \
+ cleanup(bpf_iter_num_destroy))), \
+ /* ___p pointer is necessary to call bpf_iter_num_new() *once* to init ___it */ \
+ *___p __attribute__((unused)) = ( \
+ bpf_iter_num_new(&___it, 0, (N)), \
+ /* this is a workaround for Clang bug: it currently doesn't emit BTF */ \
+ /* for bpf_iter_num_destroy() when used from cleanup() attribute */ \
+ (void)bpf_iter_num_destroy, (void *)0); \
+ bpf_iter_num_next(&___it); \
+ /* nothing here */ \
+)
+#endif /* bpf_repeat */
#endif
diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
index bafca49cb1e6..5c503096ef43 100644
--- a/tools/lib/bpf/bpf_prog_linfo.c
+++ b/tools/lib/bpf/bpf_prog_linfo.c
@@ -8,9 +8,6 @@
#include "libbpf.h"
#include "libbpf_internal.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
struct bpf_prog_linfo {
void *raw_linfo;
void *raw_jited_linfo;
@@ -109,7 +106,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
nr_linfo = info->nr_line_info;
if (!nr_linfo)
- return NULL;
+ return errno = EINVAL, NULL;
/*
* The min size that bpf_prog_linfo has to access for
@@ -117,11 +114,11 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
*/
if (info->line_info_rec_size <
offsetof(struct bpf_line_info, file_name_off))
- return NULL;
+ return errno = EINVAL, NULL;
prog_linfo = calloc(1, sizeof(*prog_linfo));
if (!prog_linfo)
- return NULL;
+ return errno = ENOMEM, NULL;
/* Copy xlated line_info */
prog_linfo->nr_linfo = nr_linfo;
@@ -177,7 +174,7 @@ struct bpf_prog_linfo *bpf_prog_linfo__new(const struct bpf_prog_info *info)
err_free:
bpf_prog_linfo__free(prog_linfo);
- return NULL;
+ return errno = EINVAL, NULL;
}
const struct bpf_line_info *
@@ -189,11 +186,11 @@ bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
const __u64 *jited_linfo;
if (func_idx >= prog_linfo->nr_jited_func)
- return NULL;
+ return errno = ENOENT, NULL;
nr_linfo = prog_linfo->nr_jited_linfo_per_func[func_idx];
if (nr_skip >= nr_linfo)
- return NULL;
+ return errno = ENOENT, NULL;
start = prog_linfo->jited_linfo_func_idx[func_idx] + nr_skip;
jited_rec_size = prog_linfo->jited_rec_size;
@@ -201,7 +198,7 @@ bpf_prog_linfo__lfind_addr_func(const struct bpf_prog_linfo *prog_linfo,
(start * jited_rec_size);
jited_linfo = raw_jited_linfo;
if (addr < *jited_linfo)
- return NULL;
+ return errno = ENOENT, NULL;
nr_linfo -= nr_skip;
rec_size = prog_linfo->rec_size;
@@ -228,13 +225,13 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
nr_linfo = prog_linfo->nr_linfo;
if (nr_skip >= nr_linfo)
- return NULL;
+ return errno = ENOENT, NULL;
rec_size = prog_linfo->rec_size;
raw_linfo = prog_linfo->raw_linfo + (nr_skip * rec_size);
linfo = raw_linfo;
if (insn_off < linfo->insn_off)
- return NULL;
+ return errno = ENOENT, NULL;
nr_linfo -= nr_skip;
for (i = 0; i < nr_linfo; i++) {
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index f3f3c3fb98cb..6fb3d0f9af17 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -2,6 +2,8 @@
#ifndef __BPF_TRACING_H__
#define __BPF_TRACING_H__
+#include <bpf/bpf_helpers.h>
+
/* Scan the ARCH passed in from ARCH env variable (see Makefile) */
#if defined(__TARGET_ARCH_x86)
#define bpf_target_x86
@@ -24,300 +26,627 @@
#elif defined(__TARGET_ARCH_sparc)
#define bpf_target_sparc
#define bpf_target_defined
+#elif defined(__TARGET_ARCH_riscv)
+ #define bpf_target_riscv
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_arc)
+ #define bpf_target_arc
+ #define bpf_target_defined
+#elif defined(__TARGET_ARCH_loongarch)
+ #define bpf_target_loongarch
+ #define bpf_target_defined
#else
- #undef bpf_target_defined
-#endif
/* Fall back to what the compiler says */
-#ifndef bpf_target_defined
#if defined(__x86_64__)
#define bpf_target_x86
+ #define bpf_target_defined
#elif defined(__s390__)
#define bpf_target_s390
+ #define bpf_target_defined
#elif defined(__arm__)
#define bpf_target_arm
+ #define bpf_target_defined
#elif defined(__aarch64__)
#define bpf_target_arm64
+ #define bpf_target_defined
#elif defined(__mips__)
#define bpf_target_mips
+ #define bpf_target_defined
#elif defined(__powerpc__)
#define bpf_target_powerpc
+ #define bpf_target_defined
#elif defined(__sparc__)
#define bpf_target_sparc
+ #define bpf_target_defined
+#elif defined(__riscv) && __riscv_xlen == 64
+ #define bpf_target_riscv
+ #define bpf_target_defined
+#elif defined(__arc__)
+ #define bpf_target_arc
+ #define bpf_target_defined
+#elif defined(__loongarch__)
+ #define bpf_target_loongarch
+ #define bpf_target_defined
+#endif /* no compiler target */
+
#endif
+
+#ifndef __BPF_TARGET_MISSING
+#define __BPF_TARGET_MISSING "GCC error \"Must specify a BPF target arch via __TARGET_ARCH_xxx\""
#endif
#if defined(bpf_target_x86)
+/*
+ * https://en.wikipedia.org/wiki/X86_calling_conventions#System_V_AMD64_ABI
+ */
+
#if defined(__KERNEL__) || defined(__VMLINUX_H__)
-#define PT_REGS_PARM1(x) ((x)->di)
-#define PT_REGS_PARM2(x) ((x)->si)
-#define PT_REGS_PARM3(x) ((x)->dx)
-#define PT_REGS_PARM4(x) ((x)->cx)
-#define PT_REGS_PARM5(x) ((x)->r8)
-#define PT_REGS_RET(x) ((x)->sp)
-#define PT_REGS_FP(x) ((x)->bp)
-#define PT_REGS_RC(x) ((x)->ax)
-#define PT_REGS_SP(x) ((x)->sp)
-#define PT_REGS_IP(x) ((x)->ip)
-
-#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), di)
-#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), si)
-#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), dx)
-#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), cx)
-#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
-#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), sp)
-#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), bp)
-#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), ax)
-#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), ip)
+#define __PT_PARM1_REG di
+#define __PT_PARM2_REG si
+#define __PT_PARM3_REG dx
+#define __PT_PARM4_REG cx
+#define __PT_PARM5_REG r8
+#define __PT_PARM6_REG r9
+/*
+ * Syscall uses r10 for PARM4. See arch/x86/entry/entry_64.S:entry_SYSCALL_64
+ * comments in Linux sources. And refer to syscall(2) manpage.
+ */
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG r10
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG sp
+#define __PT_FP_REG bp
+#define __PT_RC_REG ax
+#define __PT_SP_REG sp
+#define __PT_IP_REG ip
#else
#ifdef __i386__
-/* i386 kernel is built with -mregparm=3 */
-#define PT_REGS_PARM1(x) ((x)->eax)
-#define PT_REGS_PARM2(x) ((x)->edx)
-#define PT_REGS_PARM3(x) ((x)->ecx)
-#define PT_REGS_PARM4(x) 0
-#define PT_REGS_PARM5(x) 0
-#define PT_REGS_RET(x) ((x)->esp)
-#define PT_REGS_FP(x) ((x)->ebp)
-#define PT_REGS_RC(x) ((x)->eax)
-#define PT_REGS_SP(x) ((x)->esp)
-#define PT_REGS_IP(x) ((x)->eip)
-
-#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), eax)
-#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), edx)
-#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), ecx)
-#define PT_REGS_PARM4_CORE(x) 0
-#define PT_REGS_PARM5_CORE(x) 0
-#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), esp)
-#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), ebp)
-#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), eax)
-#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), esp)
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), eip)
-#else
+/* i386 kernel is built with -mregparm=3 */
+#define __PT_PARM1_REG eax
+#define __PT_PARM2_REG edx
+#define __PT_PARM3_REG ecx
+/* i386 syscall ABI is very different, refer to syscall(2) manpage */
+#define __PT_PARM1_SYSCALL_REG ebx
+#define __PT_PARM2_SYSCALL_REG ecx
+#define __PT_PARM3_SYSCALL_REG edx
+#define __PT_PARM4_SYSCALL_REG esi
+#define __PT_PARM5_SYSCALL_REG edi
+#define __PT_PARM6_SYSCALL_REG ebp
+
+#define __PT_RET_REG esp
+#define __PT_FP_REG ebp
+#define __PT_RC_REG eax
+#define __PT_SP_REG esp
+#define __PT_IP_REG eip
+
+#else /* __i386__ */
+
+#define __PT_PARM1_REG rdi
+#define __PT_PARM2_REG rsi
+#define __PT_PARM3_REG rdx
+#define __PT_PARM4_REG rcx
+#define __PT_PARM5_REG r8
+#define __PT_PARM6_REG r9
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG r10
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG rsp
+#define __PT_FP_REG rbp
+#define __PT_RC_REG rax
+#define __PT_SP_REG rsp
+#define __PT_IP_REG rip
+
+#endif /* __i386__ */
+
+#endif /* __KERNEL__ || __VMLINUX_H__ */
-#define PT_REGS_PARM1(x) ((x)->rdi)
-#define PT_REGS_PARM2(x) ((x)->rsi)
-#define PT_REGS_PARM3(x) ((x)->rdx)
-#define PT_REGS_PARM4(x) ((x)->rcx)
-#define PT_REGS_PARM5(x) ((x)->r8)
-#define PT_REGS_RET(x) ((x)->rsp)
-#define PT_REGS_FP(x) ((x)->rbp)
-#define PT_REGS_RC(x) ((x)->rax)
-#define PT_REGS_SP(x) ((x)->rsp)
-#define PT_REGS_IP(x) ((x)->rip)
-
-#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), rdi)
-#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), rsi)
-#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), rdx)
-#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), rcx)
-#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8)
-#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), rsp)
-#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), rbp)
-#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), rax)
-#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), rsp)
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), rip)
+#elif defined(bpf_target_s390)
-#endif
-#endif
+/*
+ * https://github.com/IBM/s390x-abi/releases/download/v1.6/lzsabi_s390x.pdf
+ */
-#elif defined(bpf_target_s390)
+struct pt_regs___s390 {
+ unsigned long orig_gpr2;
+};
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
-struct pt_regs;
-#define PT_REGS_S390 const volatile user_pt_regs
-#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
-#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
-#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
-#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
-#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
-#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
-/* Works only with CONFIG_FRAME_POINTER */
-#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
-#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
-#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
-#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
-
-#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
-#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[3])
-#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4])
-#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5])
-#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6])
-#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), grps[14])
-#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11])
-#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2])
-#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15])
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), pdw.addr)
+#define __PT_REGS_CAST(x) ((const user_pt_regs *)(x))
+#define __PT_PARM1_REG gprs[2]
+#define __PT_PARM2_REG gprs[3]
+#define __PT_PARM3_REG gprs[4]
+#define __PT_PARM4_REG gprs[5]
+#define __PT_PARM5_REG gprs[6]
+
+#define __PT_PARM1_SYSCALL_REG orig_gpr2
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG gprs[7]
+#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) \
+ BPF_CORE_READ((const struct pt_regs___s390 *)(x), __PT_PARM1_SYSCALL_REG)
+
+#define __PT_RET_REG gprs[14]
+#define __PT_FP_REG gprs[11] /* Works only with CONFIG_FRAME_POINTER */
+#define __PT_RC_REG gprs[2]
+#define __PT_SP_REG gprs[15]
+#define __PT_IP_REG psw.addr
#elif defined(bpf_target_arm)
-#define PT_REGS_PARM1(x) ((x)->uregs[0])
-#define PT_REGS_PARM2(x) ((x)->uregs[1])
-#define PT_REGS_PARM3(x) ((x)->uregs[2])
-#define PT_REGS_PARM4(x) ((x)->uregs[3])
-#define PT_REGS_PARM5(x) ((x)->uregs[4])
-#define PT_REGS_RET(x) ((x)->uregs[14])
-#define PT_REGS_FP(x) ((x)->uregs[11]) /* Works only with CONFIG_FRAME_POINTER */
-#define PT_REGS_RC(x) ((x)->uregs[0])
-#define PT_REGS_SP(x) ((x)->uregs[13])
-#define PT_REGS_IP(x) ((x)->uregs[12])
-
-#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), uregs[0])
-#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), uregs[1])
-#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), uregs[2])
-#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), uregs[3])
-#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), uregs[4])
-#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), uregs[14])
-#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), uregs[11])
-#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), uregs[0])
-#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), uregs[13])
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), uregs[12])
+/*
+ * https://github.com/ARM-software/abi-aa/blob/main/aapcs32/aapcs32.rst#machine-registers
+ */
+
+#define __PT_PARM1_REG uregs[0]
+#define __PT_PARM2_REG uregs[1]
+#define __PT_PARM3_REG uregs[2]
+#define __PT_PARM4_REG uregs[3]
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG uregs[4]
+#define __PT_PARM6_SYSCALL_REG uregs[5]
+#define __PT_PARM7_SYSCALL_REG uregs[6]
+
+#define __PT_RET_REG uregs[14]
+#define __PT_FP_REG uregs[11] /* Works only with CONFIG_FRAME_POINTER */
+#define __PT_RC_REG uregs[0]
+#define __PT_SP_REG uregs[13]
+#define __PT_IP_REG uregs[12]
#elif defined(bpf_target_arm64)
+/*
+ * https://github.com/ARM-software/abi-aa/blob/main/aapcs64/aapcs64.rst#machine-registers
+ */
+
+struct pt_regs___arm64 {
+ unsigned long orig_x0;
+};
+
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
-struct pt_regs;
-#define PT_REGS_ARM64 const volatile struct user_pt_regs
-#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
-#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
-#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
-#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
-#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
-#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
-/* Works only with CONFIG_FRAME_POINTER */
-#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
-#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
-#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
-#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
-
-#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
-#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[1])
-#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[2])
-#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[3])
-#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[4])
-#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[30])
-#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[29])
-#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0])
-#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), sp)
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), pc)
+#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
+#define __PT_PARM1_REG regs[0]
+#define __PT_PARM2_REG regs[1]
+#define __PT_PARM3_REG regs[2]
+#define __PT_PARM4_REG regs[3]
+#define __PT_PARM5_REG regs[4]
+#define __PT_PARM6_REG regs[5]
+#define __PT_PARM7_REG regs[6]
+#define __PT_PARM8_REG regs[7]
+
+#define __PT_PARM1_SYSCALL_REG orig_x0
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+#define PT_REGS_PARM1_SYSCALL(x) PT_REGS_PARM1_CORE_SYSCALL(x)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) \
+ BPF_CORE_READ((const struct pt_regs___arm64 *)(x), __PT_PARM1_SYSCALL_REG)
+
+#define __PT_RET_REG regs[30]
+#define __PT_FP_REG regs[29] /* Works only with CONFIG_FRAME_POINTER */
+#define __PT_RC_REG regs[0]
+#define __PT_SP_REG sp
+#define __PT_IP_REG pc
#elif defined(bpf_target_mips)
-#define PT_REGS_PARM1(x) ((x)->regs[4])
-#define PT_REGS_PARM2(x) ((x)->regs[5])
-#define PT_REGS_PARM3(x) ((x)->regs[6])
-#define PT_REGS_PARM4(x) ((x)->regs[7])
-#define PT_REGS_PARM5(x) ((x)->regs[8])
-#define PT_REGS_RET(x) ((x)->regs[31])
-#define PT_REGS_FP(x) ((x)->regs[30]) /* Works only with CONFIG_FRAME_POINTER */
-#define PT_REGS_RC(x) ((x)->regs[1])
-#define PT_REGS_SP(x) ((x)->regs[29])
-#define PT_REGS_IP(x) ((x)->cp0_epc)
-
-#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), regs[4])
-#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), regs[5])
-#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), regs[6])
-#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), regs[7])
-#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8])
-#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31])
-#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30])
-#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[1])
-#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29])
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc)
+/*
+ * N64 ABI is assumed right now.
+ * https://en.wikipedia.org/wiki/MIPS_architecture#Calling_conventions
+ */
+
+#define __PT_PARM1_REG regs[4]
+#define __PT_PARM2_REG regs[5]
+#define __PT_PARM3_REG regs[6]
+#define __PT_PARM4_REG regs[7]
+#define __PT_PARM5_REG regs[8]
+#define __PT_PARM6_REG regs[9]
+#define __PT_PARM7_REG regs[10]
+#define __PT_PARM8_REG regs[11]
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG /* only N32/N64 */
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG /* only N32/N64 */
+
+#define __PT_RET_REG regs[31]
+#define __PT_FP_REG regs[30] /* Works only with CONFIG_FRAME_POINTER */
+#define __PT_RC_REG regs[2]
+#define __PT_SP_REG regs[29]
+#define __PT_IP_REG cp0_epc
#elif defined(bpf_target_powerpc)
-#define PT_REGS_PARM1(x) ((x)->gpr[3])
-#define PT_REGS_PARM2(x) ((x)->gpr[4])
-#define PT_REGS_PARM3(x) ((x)->gpr[5])
-#define PT_REGS_PARM4(x) ((x)->gpr[6])
-#define PT_REGS_PARM5(x) ((x)->gpr[7])
-#define PT_REGS_RC(x) ((x)->gpr[3])
-#define PT_REGS_SP(x) ((x)->sp)
-#define PT_REGS_IP(x) ((x)->nip)
-
-#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), gpr[3])
-#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), gpr[4])
-#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), gpr[5])
-#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), gpr[6])
-#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), gpr[7])
-#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), gpr[3])
-#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp)
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), nip)
+/*
+ * http://refspecs.linux-foundation.org/elf/elfspec_ppc.pdf (page 3-14,
+ * section "Function Calling Sequence")
+ */
+
+#define __PT_PARM1_REG gpr[3]
+#define __PT_PARM2_REG gpr[4]
+#define __PT_PARM3_REG gpr[5]
+#define __PT_PARM4_REG gpr[6]
+#define __PT_PARM5_REG gpr[7]
+#define __PT_PARM6_REG gpr[8]
+#define __PT_PARM7_REG gpr[9]
+#define __PT_PARM8_REG gpr[10]
+
+/* powerpc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG orig_gpr3
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+#if !defined(__arch64__)
+#define __PT_PARM7_SYSCALL_REG __PT_PARM7_REG /* only powerpc (not powerpc64) */
+#endif
+
+#define __PT_RET_REG regs[31]
+#define __PT_FP_REG __unsupported__
+#define __PT_RC_REG gpr[3]
+#define __PT_SP_REG sp
+#define __PT_IP_REG nip
#elif defined(bpf_target_sparc)
-#define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0])
-#define PT_REGS_PARM2(x) ((x)->u_regs[UREG_I1])
-#define PT_REGS_PARM3(x) ((x)->u_regs[UREG_I2])
-#define PT_REGS_PARM4(x) ((x)->u_regs[UREG_I3])
-#define PT_REGS_PARM5(x) ((x)->u_regs[UREG_I4])
-#define PT_REGS_RET(x) ((x)->u_regs[UREG_I7])
-#define PT_REGS_RC(x) ((x)->u_regs[UREG_I0])
-#define PT_REGS_SP(x) ((x)->u_regs[UREG_FP])
-
-#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
-#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I1])
-#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I2])
-#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I3])
-#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I4])
-#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I7])
-#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0])
-#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), u_regs[UREG_FP])
+/*
+ * https://en.wikipedia.org/wiki/Calling_convention#SPARC
+ */
+#define __PT_PARM1_REG u_regs[UREG_I0]
+#define __PT_PARM2_REG u_regs[UREG_I1]
+#define __PT_PARM3_REG u_regs[UREG_I2]
+#define __PT_PARM4_REG u_regs[UREG_I3]
+#define __PT_PARM5_REG u_regs[UREG_I4]
+#define __PT_PARM6_REG u_regs[UREG_I5]
+
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG u_regs[UREG_I7]
+#define __PT_FP_REG __unsupported__
+#define __PT_RC_REG u_regs[UREG_I0]
+#define __PT_SP_REG u_regs[UREG_FP]
/* Should this also be a bpf_target check for the sparc case? */
#if defined(__arch64__)
-#define PT_REGS_IP(x) ((x)->tpc)
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), tpc)
+#define __PT_IP_REG tpc
#else
-#define PT_REGS_IP(x) ((x)->pc)
-#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc)
+#define __PT_IP_REG pc
+#endif
+
+#elif defined(bpf_target_riscv)
+
+/*
+ * https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions
+ */
+
+#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
+#define __PT_PARM1_REG a0
+#define __PT_PARM2_REG a1
+#define __PT_PARM3_REG a2
+#define __PT_PARM4_REG a3
+#define __PT_PARM5_REG a4
+#define __PT_PARM6_REG a5
+#define __PT_PARM7_REG a6
+#define __PT_PARM8_REG a7
+
+/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG ra
+#define __PT_FP_REG s0
+#define __PT_RC_REG a0
+#define __PT_SP_REG sp
+#define __PT_IP_REG pc
+
+#elif defined(bpf_target_arc)
+
+/*
+ * Section "Function Calling Sequence" (page 24):
+ * https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf
+ */
+
+/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
+#define __PT_PARM1_REG scratch.r0
+#define __PT_PARM2_REG scratch.r1
+#define __PT_PARM3_REG scratch.r2
+#define __PT_PARM4_REG scratch.r3
+#define __PT_PARM5_REG scratch.r4
+#define __PT_PARM6_REG scratch.r5
+#define __PT_PARM7_REG scratch.r6
+#define __PT_PARM8_REG scratch.r7
+
+/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG scratch.blink
+#define __PT_FP_REG scratch.fp
+#define __PT_RC_REG scratch.r0
+#define __PT_SP_REG scratch.sp
+#define __PT_IP_REG scratch.ret
+
+#elif defined(bpf_target_loongarch)
+
+/*
+ * https://docs.kernel.org/loongarch/introduction.html
+ * https://loongson.github.io/LoongArch-Documentation/LoongArch-ELF-ABI-EN.html
+ */
+
+/* loongarch provides struct user_pt_regs instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const struct user_pt_regs *)(x))
+#define __PT_PARM1_REG regs[4]
+#define __PT_PARM2_REG regs[5]
+#define __PT_PARM3_REG regs[6]
+#define __PT_PARM4_REG regs[7]
+#define __PT_PARM5_REG regs[8]
+#define __PT_PARM6_REG regs[9]
+#define __PT_PARM7_REG regs[10]
+#define __PT_PARM8_REG regs[11]
+
+/* loongarch does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#define __PT_PARM1_SYSCALL_REG __PT_PARM1_REG
+#define __PT_PARM2_SYSCALL_REG __PT_PARM2_REG
+#define __PT_PARM3_SYSCALL_REG __PT_PARM3_REG
+#define __PT_PARM4_SYSCALL_REG __PT_PARM4_REG
+#define __PT_PARM5_SYSCALL_REG __PT_PARM5_REG
+#define __PT_PARM6_SYSCALL_REG __PT_PARM6_REG
+
+#define __PT_RET_REG regs[1]
+#define __PT_FP_REG regs[22]
+#define __PT_RC_REG regs[4]
+#define __PT_SP_REG regs[3]
+#define __PT_IP_REG csr_era
+
+#endif
+
+#if defined(bpf_target_defined)
+
+struct pt_regs;
+
+/* allow some architectures to override `struct pt_regs` */
+#ifndef __PT_REGS_CAST
+#define __PT_REGS_CAST(x) (x)
#endif
+/*
+ * Different architectures support different number of arguments passed
+ * through registers. i386 supports just 3, some arches support up to 8.
+ */
+#ifndef __PT_PARM4_REG
+#define __PT_PARM4_REG __unsupported__
+#endif
+#ifndef __PT_PARM5_REG
+#define __PT_PARM5_REG __unsupported__
+#endif
+#ifndef __PT_PARM6_REG
+#define __PT_PARM6_REG __unsupported__
+#endif
+#ifndef __PT_PARM7_REG
+#define __PT_PARM7_REG __unsupported__
+#endif
+#ifndef __PT_PARM8_REG
+#define __PT_PARM8_REG __unsupported__
+#endif
+/*
+ * Similarly, syscall-specific conventions might differ between function call
+ * conventions within each architecutre. All supported architectures pass
+ * either 6 or 7 syscall arguments in registers.
+ *
+ * See syscall(2) manpage for succinct table with information on each arch.
+ */
+#ifndef __PT_PARM7_SYSCALL_REG
+#define __PT_PARM7_SYSCALL_REG __unsupported__
#endif
+#define PT_REGS_PARM1(x) (__PT_REGS_CAST(x)->__PT_PARM1_REG)
+#define PT_REGS_PARM2(x) (__PT_REGS_CAST(x)->__PT_PARM2_REG)
+#define PT_REGS_PARM3(x) (__PT_REGS_CAST(x)->__PT_PARM3_REG)
+#define PT_REGS_PARM4(x) (__PT_REGS_CAST(x)->__PT_PARM4_REG)
+#define PT_REGS_PARM5(x) (__PT_REGS_CAST(x)->__PT_PARM5_REG)
+#define PT_REGS_PARM6(x) (__PT_REGS_CAST(x)->__PT_PARM6_REG)
+#define PT_REGS_PARM7(x) (__PT_REGS_CAST(x)->__PT_PARM7_REG)
+#define PT_REGS_PARM8(x) (__PT_REGS_CAST(x)->__PT_PARM8_REG)
+#define PT_REGS_RET(x) (__PT_REGS_CAST(x)->__PT_RET_REG)
+#define PT_REGS_FP(x) (__PT_REGS_CAST(x)->__PT_FP_REG)
+#define PT_REGS_RC(x) (__PT_REGS_CAST(x)->__PT_RC_REG)
+#define PT_REGS_SP(x) (__PT_REGS_CAST(x)->__PT_SP_REG)
+#define PT_REGS_IP(x) (__PT_REGS_CAST(x)->__PT_IP_REG)
+
+#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_REG)
+#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_REG)
+#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_REG)
+#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_REG)
+#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_REG)
+#define PT_REGS_PARM6_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_REG)
+#define PT_REGS_PARM7_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_REG)
+#define PT_REGS_PARM8_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM8_REG)
+#define PT_REGS_RET_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RET_REG)
+#define PT_REGS_FP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_FP_REG)
+#define PT_REGS_RC_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_RC_REG)
+#define PT_REGS_SP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_SP_REG)
+#define PT_REGS_IP_CORE(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_IP_REG)
+
#if defined(bpf_target_powerpc)
+
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+
#elif defined(bpf_target_sparc)
+
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
+
#else
+
#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
- ({ bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
+ ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
- ({ bpf_probe_read(&(ip), sizeof(ip), \
- (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
+ ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
+
#endif
+#ifndef PT_REGS_PARM1_SYSCALL
+#define PT_REGS_PARM1_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM1_SYSCALL_REG)
+#define PT_REGS_PARM1_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM1_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM2_SYSCALL
+#define PT_REGS_PARM2_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM2_SYSCALL_REG)
+#define PT_REGS_PARM2_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM2_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM3_SYSCALL
+#define PT_REGS_PARM3_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM3_SYSCALL_REG)
+#define PT_REGS_PARM3_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM3_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM4_SYSCALL
+#define PT_REGS_PARM4_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM4_SYSCALL_REG)
+#define PT_REGS_PARM4_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM4_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM5_SYSCALL
+#define PT_REGS_PARM5_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM5_SYSCALL_REG)
+#define PT_REGS_PARM5_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM5_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM6_SYSCALL
+#define PT_REGS_PARM6_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM6_SYSCALL_REG)
+#define PT_REGS_PARM6_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM6_SYSCALL_REG)
+#endif
+#ifndef PT_REGS_PARM7_SYSCALL
+#define PT_REGS_PARM7_SYSCALL(x) (__PT_REGS_CAST(x)->__PT_PARM7_SYSCALL_REG)
+#define PT_REGS_PARM7_CORE_SYSCALL(x) BPF_CORE_READ(__PT_REGS_CAST(x), __PT_PARM7_SYSCALL_REG)
+#endif
+
+#else /* defined(bpf_target_defined) */
+
+#define PT_REGS_PARM1(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM8(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RET(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_FP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RC(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_SP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_IP(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define PT_REGS_PARM1_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM8_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RET_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_FP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_RC_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_SP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_IP_CORE(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define PT_REGS_PARM1_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#define PT_REGS_PARM1_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM2_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM3_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM4_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM5_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM6_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+#define PT_REGS_PARM7_CORE_SYSCALL(x) ({ _Pragma(__BPF_TARGET_MISSING); 0l; })
+
+#endif /* defined(bpf_target_defined) */
+
+/*
+ * When invoked from a syscall handler kprobe, returns a pointer to a
+ * struct pt_regs containing syscall arguments and suitable for passing to
+ * PT_REGS_PARMn_SYSCALL() and PT_REGS_PARMn_CORE_SYSCALL().
+ */
+#ifndef PT_REGS_SYSCALL_REGS
+/* By default, assume that the arch selects ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ((struct pt_regs *)PT_REGS_PARM1(ctx))
+#endif
+
+#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
+#endif
+#ifndef ___bpf_apply
#define ___bpf_apply(fn, n) ___bpf_concat(fn, n)
+#endif
+#ifndef ___bpf_nth
#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N
-#define ___bpf_narg(...) \
- ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-#define ___bpf_empty(...) \
- ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0)
-
-#define ___bpf_ctx_cast0() ctx
-#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
-#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
-#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
-#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
-#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
-#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
-#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
-#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
-#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
+#endif
+#ifndef ___bpf_narg
+#define ___bpf_narg(...) ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+#endif
+
+#define ___bpf_ctx_cast0() ctx
+#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0]
+#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1]
+#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2]
+#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3]
+#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4]
+#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5]
+#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6]
+#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7]
+#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8]
#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9]
#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10]
#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11]
-#define ___bpf_ctx_cast(args...) \
- ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
+#define ___bpf_ctx_cast(args...) ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args)
/*
* BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and
@@ -336,7 +665,7 @@ struct pt_regs;
*/
#define BPF_PROG(name, args...) \
name(unsigned long long *ctx); \
-static __attribute__((always_inline)) typeof(name(0)) \
+static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx, ##args); \
typeof(name(0)) name(unsigned long long *ctx) \
{ \
@@ -345,24 +674,128 @@ typeof(name(0)) name(unsigned long long *ctx) \
return ____##name(___bpf_ctx_cast(args)); \
_Pragma("GCC diagnostic pop") \
} \
-static __attribute__((always_inline)) typeof(name(0)) \
+static __always_inline typeof(name(0)) \
____##name(unsigned long long *ctx, ##args)
+#ifndef ___bpf_nth2
+#define ___bpf_nth2(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, \
+ _14, _15, _16, _17, _18, _19, _20, _21, _22, _23, _24, N, ...) N
+#endif
+#ifndef ___bpf_narg2
+#define ___bpf_narg2(...) \
+ ___bpf_nth2(_, ##__VA_ARGS__, 12, 12, 11, 11, 10, 10, 9, 9, 8, 8, 7, 7, \
+ 6, 6, 5, 5, 4, 4, 3, 3, 2, 2, 1, 1, 0)
+#endif
+
+#define ___bpf_treg_cnt(t) \
+ __builtin_choose_expr(sizeof(t) == 1, 1, \
+ __builtin_choose_expr(sizeof(t) == 2, 1, \
+ __builtin_choose_expr(sizeof(t) == 4, 1, \
+ __builtin_choose_expr(sizeof(t) == 8, 1, \
+ __builtin_choose_expr(sizeof(t) == 16, 2, \
+ (void)0)))))
+
+#define ___bpf_reg_cnt0() (0)
+#define ___bpf_reg_cnt1(t, x) (___bpf_reg_cnt0() + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt2(t, x, args...) (___bpf_reg_cnt1(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt3(t, x, args...) (___bpf_reg_cnt2(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt4(t, x, args...) (___bpf_reg_cnt3(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt5(t, x, args...) (___bpf_reg_cnt4(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt6(t, x, args...) (___bpf_reg_cnt5(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt7(t, x, args...) (___bpf_reg_cnt6(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt8(t, x, args...) (___bpf_reg_cnt7(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt9(t, x, args...) (___bpf_reg_cnt8(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt10(t, x, args...) (___bpf_reg_cnt9(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt11(t, x, args...) (___bpf_reg_cnt10(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt12(t, x, args...) (___bpf_reg_cnt11(args) + ___bpf_treg_cnt(t))
+#define ___bpf_reg_cnt(args...) ___bpf_apply(___bpf_reg_cnt, ___bpf_narg2(args))(args)
+
+#define ___bpf_union_arg(t, x, n) \
+ __builtin_choose_expr(sizeof(t) == 1, ({ union { __u8 z[1]; t x; } ___t = { .z = {ctx[n]}}; ___t.x; }), \
+ __builtin_choose_expr(sizeof(t) == 2, ({ union { __u16 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
+ __builtin_choose_expr(sizeof(t) == 4, ({ union { __u32 z[1]; t x; } ___t = { .z = {ctx[n]} }; ___t.x; }), \
+ __builtin_choose_expr(sizeof(t) == 8, ({ union { __u64 z[1]; t x; } ___t = {.z = {ctx[n]} }; ___t.x; }), \
+ __builtin_choose_expr(sizeof(t) == 16, ({ union { __u64 z[2]; t x; } ___t = {.z = {ctx[n], ctx[n + 1]} }; ___t.x; }), \
+ (void)0)))))
+
+#define ___bpf_ctx_arg0(n, args...)
+#define ___bpf_ctx_arg1(n, t, x) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt1(t, x))
+#define ___bpf_ctx_arg2(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt2(t, x, args)) ___bpf_ctx_arg1(n, args)
+#define ___bpf_ctx_arg3(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt3(t, x, args)) ___bpf_ctx_arg2(n, args)
+#define ___bpf_ctx_arg4(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt4(t, x, args)) ___bpf_ctx_arg3(n, args)
+#define ___bpf_ctx_arg5(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt5(t, x, args)) ___bpf_ctx_arg4(n, args)
+#define ___bpf_ctx_arg6(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt6(t, x, args)) ___bpf_ctx_arg5(n, args)
+#define ___bpf_ctx_arg7(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt7(t, x, args)) ___bpf_ctx_arg6(n, args)
+#define ___bpf_ctx_arg8(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt8(t, x, args)) ___bpf_ctx_arg7(n, args)
+#define ___bpf_ctx_arg9(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt9(t, x, args)) ___bpf_ctx_arg8(n, args)
+#define ___bpf_ctx_arg10(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt10(t, x, args)) ___bpf_ctx_arg9(n, args)
+#define ___bpf_ctx_arg11(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt11(t, x, args)) ___bpf_ctx_arg10(n, args)
+#define ___bpf_ctx_arg12(n, t, x, args...) , ___bpf_union_arg(t, x, n - ___bpf_reg_cnt12(t, x, args)) ___bpf_ctx_arg11(n, args)
+#define ___bpf_ctx_arg(args...) ___bpf_apply(___bpf_ctx_arg, ___bpf_narg2(args))(___bpf_reg_cnt(args), args)
+
+#define ___bpf_ctx_decl0()
+#define ___bpf_ctx_decl1(t, x) , t x
+#define ___bpf_ctx_decl2(t, x, args...) , t x ___bpf_ctx_decl1(args)
+#define ___bpf_ctx_decl3(t, x, args...) , t x ___bpf_ctx_decl2(args)
+#define ___bpf_ctx_decl4(t, x, args...) , t x ___bpf_ctx_decl3(args)
+#define ___bpf_ctx_decl5(t, x, args...) , t x ___bpf_ctx_decl4(args)
+#define ___bpf_ctx_decl6(t, x, args...) , t x ___bpf_ctx_decl5(args)
+#define ___bpf_ctx_decl7(t, x, args...) , t x ___bpf_ctx_decl6(args)
+#define ___bpf_ctx_decl8(t, x, args...) , t x ___bpf_ctx_decl7(args)
+#define ___bpf_ctx_decl9(t, x, args...) , t x ___bpf_ctx_decl8(args)
+#define ___bpf_ctx_decl10(t, x, args...) , t x ___bpf_ctx_decl9(args)
+#define ___bpf_ctx_decl11(t, x, args...) , t x ___bpf_ctx_decl10(args)
+#define ___bpf_ctx_decl12(t, x, args...) , t x ___bpf_ctx_decl11(args)
+#define ___bpf_ctx_decl(args...) ___bpf_apply(___bpf_ctx_decl, ___bpf_narg2(args))(args)
+
+/*
+ * BPF_PROG2 is an enhanced version of BPF_PROG in order to handle struct
+ * arguments. Since each struct argument might take one or two u64 values
+ * in the trampoline stack, argument type size is needed to place proper number
+ * of u64 values for each argument. Therefore, BPF_PROG2 has different
+ * syntax from BPF_PROG. For example, for the following BPF_PROG syntax:
+ *
+ * int BPF_PROG(test2, int a, int b) { ... }
+ *
+ * the corresponding BPF_PROG2 syntax is:
+ *
+ * int BPF_PROG2(test2, int, a, int, b) { ... }
+ *
+ * where type and the corresponding argument name are separated by comma.
+ *
+ * Use BPF_PROG2 macro if one of the arguments might be a struct/union larger
+ * than 8 bytes:
+ *
+ * int BPF_PROG2(test_struct_arg, struct bpf_testmod_struct_arg_1, a, int, b,
+ * int, c, int, d, struct bpf_testmod_struct_arg_2, e, int, ret)
+ * {
+ * // access a, b, c, d, e, and ret directly
+ * ...
+ * }
+ */
+#define BPF_PROG2(name, args...) \
+name(unsigned long long *ctx); \
+static __always_inline typeof(name(0)) \
+____##name(unsigned long long *ctx ___bpf_ctx_decl(args)); \
+typeof(name(0)) name(unsigned long long *ctx) \
+{ \
+ return ____##name(ctx ___bpf_ctx_arg(args)); \
+} \
+static __always_inline typeof(name(0)) \
+____##name(unsigned long long *ctx ___bpf_ctx_decl(args))
+
struct pt_regs;
-#define ___bpf_kprobe_args0() ctx
-#define ___bpf_kprobe_args1(x) \
- ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
-#define ___bpf_kprobe_args2(x, args...) \
- ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
-#define ___bpf_kprobe_args3(x, args...) \
- ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
-#define ___bpf_kprobe_args4(x, args...) \
- ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
-#define ___bpf_kprobe_args5(x, args...) \
- ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
-#define ___bpf_kprobe_args(args...) \
- ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
+#define ___bpf_kprobe_args0() ctx
+#define ___bpf_kprobe_args1(x) ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx)
+#define ___bpf_kprobe_args2(x, args...) ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx)
+#define ___bpf_kprobe_args3(x, args...) ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx)
+#define ___bpf_kprobe_args4(x, args...) ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx)
+#define ___bpf_kprobe_args5(x, args...) ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx)
+#define ___bpf_kprobe_args6(x, args...) ___bpf_kprobe_args5(args), (void *)PT_REGS_PARM6(ctx)
+#define ___bpf_kprobe_args7(x, args...) ___bpf_kprobe_args6(args), (void *)PT_REGS_PARM7(ctx)
+#define ___bpf_kprobe_args8(x, args...) ___bpf_kprobe_args7(args), (void *)PT_REGS_PARM8(ctx)
+#define ___bpf_kprobe_args(args...) ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args)
/*
* BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for
@@ -376,7 +809,7 @@ struct pt_regs;
*/
#define BPF_KPROBE(name, args...) \
name(struct pt_regs *ctx); \
-static __attribute__((always_inline)) typeof(name(0)) \
+static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \
{ \
@@ -385,14 +818,12 @@ typeof(name(0)) name(struct pt_regs *ctx) \
return ____##name(___bpf_kprobe_args(args)); \
_Pragma("GCC diagnostic pop") \
} \
-static __attribute__((always_inline)) typeof(name(0)) \
+static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args)
-#define ___bpf_kretprobe_args0() ctx
-#define ___bpf_kretprobe_args1(x) \
- ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
-#define ___bpf_kretprobe_args(args...) \
- ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
+#define ___bpf_kretprobe_args0() ctx
+#define ___bpf_kretprobe_args1(x) ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx)
+#define ___bpf_kretprobe_args(args...) ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args)
/*
* BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional
@@ -402,7 +833,7 @@ ____##name(struct pt_regs *ctx, ##args)
*/
#define BPF_KRETPROBE(name, args...) \
name(struct pt_regs *ctx); \
-static __attribute__((always_inline)) typeof(name(0)) \
+static __always_inline typeof(name(0)) \
____##name(struct pt_regs *ctx, ##args); \
typeof(name(0)) name(struct pt_regs *ctx) \
{ \
@@ -413,4 +844,80 @@ typeof(name(0)) name(struct pt_regs *ctx) \
} \
static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
+/* If kernel has CONFIG_ARCH_HAS_SYSCALL_WRAPPER, read pt_regs directly */
+#define ___bpf_syscall_args0() ctx
+#define ___bpf_syscall_args1(x) ___bpf_syscall_args0(), (void *)PT_REGS_PARM1_SYSCALL(regs)
+#define ___bpf_syscall_args2(x, args...) ___bpf_syscall_args1(args), (void *)PT_REGS_PARM2_SYSCALL(regs)
+#define ___bpf_syscall_args3(x, args...) ___bpf_syscall_args2(args), (void *)PT_REGS_PARM3_SYSCALL(regs)
+#define ___bpf_syscall_args4(x, args...) ___bpf_syscall_args3(args), (void *)PT_REGS_PARM4_SYSCALL(regs)
+#define ___bpf_syscall_args5(x, args...) ___bpf_syscall_args4(args), (void *)PT_REGS_PARM5_SYSCALL(regs)
+#define ___bpf_syscall_args6(x, args...) ___bpf_syscall_args5(args), (void *)PT_REGS_PARM6_SYSCALL(regs)
+#define ___bpf_syscall_args7(x, args...) ___bpf_syscall_args6(args), (void *)PT_REGS_PARM7_SYSCALL(regs)
+#define ___bpf_syscall_args(args...) ___bpf_apply(___bpf_syscall_args, ___bpf_narg(args))(args)
+
+/* If kernel doesn't have CONFIG_ARCH_HAS_SYSCALL_WRAPPER, we have to BPF_CORE_READ from pt_regs */
+#define ___bpf_syswrap_args0() ctx
+#define ___bpf_syswrap_args1(x) ___bpf_syswrap_args0(), (void *)PT_REGS_PARM1_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args2(x, args...) ___bpf_syswrap_args1(args), (void *)PT_REGS_PARM2_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args3(x, args...) ___bpf_syswrap_args2(args), (void *)PT_REGS_PARM3_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args4(x, args...) ___bpf_syswrap_args3(args), (void *)PT_REGS_PARM4_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args5(x, args...) ___bpf_syswrap_args4(args), (void *)PT_REGS_PARM5_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args6(x, args...) ___bpf_syswrap_args5(args), (void *)PT_REGS_PARM6_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args7(x, args...) ___bpf_syswrap_args6(args), (void *)PT_REGS_PARM7_CORE_SYSCALL(regs)
+#define ___bpf_syswrap_args(args...) ___bpf_apply(___bpf_syswrap_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_KSYSCALL is a variant of BPF_KPROBE, which is intended for
+ * tracing syscall functions, like __x64_sys_close. It hides the underlying
+ * platform-specific low-level way of getting syscall input arguments from
+ * struct pt_regs, and provides a familiar typed and named function arguments
+ * syntax and semantics of accessing syscall input parameters.
+ *
+ * Original struct pt_regs * context is preserved as 'ctx' argument. This might
+ * be necessary when using BPF helpers like bpf_perf_event_output().
+ *
+ * At the moment BPF_KSYSCALL does not transparently handle all the calling
+ * convention quirks for the following syscalls:
+ *
+ * - mmap(): __ARCH_WANT_SYS_OLD_MMAP.
+ * - clone(): CONFIG_CLONE_BACKWARDS, CONFIG_CLONE_BACKWARDS2 and
+ * CONFIG_CLONE_BACKWARDS3.
+ * - socket-related syscalls: __ARCH_WANT_SYS_SOCKETCALL.
+ * - compat syscalls.
+ *
+ * This may or may not change in the future. User needs to take extra measures
+ * to handle such quirks explicitly, if necessary.
+ *
+ * This macro relies on BPF CO-RE support and virtual __kconfig externs.
+ */
+#define BPF_KSYSCALL(name, args...) \
+name(struct pt_regs *ctx); \
+extern _Bool LINUX_HAS_SYSCALL_WRAPPER __kconfig; \
+static __always_inline typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ struct pt_regs *regs = LINUX_HAS_SYSCALL_WRAPPER \
+ ? (struct pt_regs *)PT_REGS_PARM1(ctx) \
+ : ctx; \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ if (LINUX_HAS_SYSCALL_WRAPPER) \
+ return ____##name(___bpf_syswrap_args(args)); \
+ else \
+ return ____##name(___bpf_syscall_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __always_inline typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args)
+
+#define BPF_KPROBE_SYSCALL BPF_KSYSCALL
+
+/* BPF_UPROBE and BPF_URETPROBE are identical to BPF_KPROBE and BPF_KRETPROBE,
+ * but are named way less confusingly for SEC("uprobe") and SEC("uretprobe")
+ * use cases.
+ */
+#define BPF_UPROBE(name, args...) BPF_KPROBE(name, ##args)
+#define BPF_URETPROBE(name, args...) BPF_KRETPROBE(name, ##args)
+
#endif
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index bfef3d606b54..0a2c079244b6 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
+#include <byteswap.h>
#include <endian.h>
#include <stdio.h>
#include <stdlib.h>
@@ -20,9 +21,7 @@
#include "libbpf.h"
#include "libbpf_internal.h"
#include "hashmap.h"
-
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+#include "strset.h"
#define BTF_MAX_NR_TYPES 0x7fffffffU
#define BTF_MAX_STR_OFFSET 0x7fffffffU
@@ -30,17 +29,98 @@
static struct btf_type btf_void;
struct btf {
- union {
- struct btf_header *hdr;
- void *data;
- };
- struct btf_type **types;
- const char *strings;
- void *nohdr_data;
+ /* raw BTF data in native endianness */
+ void *raw_data;
+ /* raw BTF data in non-native endianness */
+ void *raw_data_swapped;
+ __u32 raw_size;
+ /* whether target endianness differs from the native one */
+ bool swapped_endian;
+
+ /*
+ * When BTF is loaded from an ELF or raw memory it is stored
+ * in a contiguous memory block. The hdr, type_data, and, strs_data
+ * point inside that memory region to their respective parts of BTF
+ * representation:
+ *
+ * +--------------------------------+
+ * | Header | Types | Strings |
+ * +--------------------------------+
+ * ^ ^ ^
+ * | | |
+ * hdr | |
+ * types_data-+ |
+ * strs_data------------+
+ *
+ * If BTF data is later modified, e.g., due to types added or
+ * removed, BTF deduplication performed, etc, this contiguous
+ * representation is broken up into three independently allocated
+ * memory regions to be able to modify them independently.
+ * raw_data is nulled out at that point, but can be later allocated
+ * and cached again if user calls btf__raw_data(), at which point
+ * raw_data will contain a contiguous copy of header, types, and
+ * strings:
+ *
+ * +----------+ +---------+ +-----------+
+ * | Header | | Types | | Strings |
+ * +----------+ +---------+ +-----------+
+ * ^ ^ ^
+ * | | |
+ * hdr | |
+ * types_data----+ |
+ * strset__data(strs_set)-----+
+ *
+ * +----------+---------+-----------+
+ * | Header | Types | Strings |
+ * raw_data----->+----------+---------+-----------+
+ */
+ struct btf_header *hdr;
+
+ void *types_data;
+ size_t types_data_cap; /* used size stored in hdr->type_len */
+
+ /* type ID to `struct btf_type *` lookup index
+ * type_offs[0] corresponds to the first non-VOID type:
+ * - for base BTF it's type [1];
+ * - for split BTF it's the first non-base BTF type.
+ */
+ __u32 *type_offs;
+ size_t type_offs_cap;
+ /* number of types in this BTF instance:
+ * - doesn't include special [0] void type;
+ * - for split BTF counts number of types added on top of base BTF.
+ */
__u32 nr_types;
- __u32 types_size;
- __u32 data_size;
+ /* if not NULL, points to the base BTF on top of which the current
+ * split BTF is based
+ */
+ struct btf *base_btf;
+ /* BTF type ID of the first type in this BTF instance:
+ * - for base BTF it's equal to 1;
+ * - for split BTF it's equal to biggest type ID of base BTF plus 1.
+ */
+ int start_id;
+ /* logical string offset of this BTF instance:
+ * - for base BTF it's equal to 0;
+ * - for split BTF it's equal to total size of base BTF's string section size.
+ */
+ int start_str_off;
+
+ /* only one of strs_data or strs_set can be non-NULL, depending on
+ * whether BTF is in a modifiable state (strs_set is used) or not
+ * (strs_data points inside raw_data)
+ */
+ void *strs_data;
+ /* a set of unique strings */
+ struct strset *strs_set;
+ /* whether strings are already deduplicated */
+ bool strs_deduped;
+
+ /* BTF object FD, if loaded into kernel */
int fd;
+
+ /* Pointer size (in bytes) for a target architecture of this BTF */
+ int ptr_sz;
};
static inline __u64 ptr_to_u64(const void *ptr)
@@ -48,110 +128,166 @@ static inline __u64 ptr_to_u64(const void *ptr)
return (__u64) (unsigned long) ptr;
}
-static int btf_add_type(struct btf *btf, struct btf_type *t)
+/* Ensure given dynamically allocated memory region pointed to by *data* with
+ * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
+ * memory to accommodate *add_cnt* new elements, assuming *cur_cnt* elements
+ * are already used. At most *max_cnt* elements can be ever allocated.
+ * If necessary, memory is reallocated and all existing data is copied over,
+ * new pointer to the memory region is stored at *data, new memory region
+ * capacity (in number of elements) is stored in *cap.
+ * On success, memory pointer to the beginning of unused memory is returned.
+ * On error, NULL is returned.
+ */
+void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
+ size_t cur_cnt, size_t max_cnt, size_t add_cnt)
{
- if (btf->types_size - btf->nr_types < 2) {
- struct btf_type **new_types;
- __u32 expand_by, new_size;
+ size_t new_cnt;
+ void *new_data;
- if (btf->types_size == BTF_MAX_NR_TYPES)
- return -E2BIG;
+ if (cur_cnt + add_cnt <= *cap_cnt)
+ return *data + cur_cnt * elem_sz;
- expand_by = max(btf->types_size >> 2, 16U);
- new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
+ /* requested more than the set limit */
+ if (cur_cnt + add_cnt > max_cnt)
+ return NULL;
- new_types = realloc(btf->types, sizeof(*new_types) * new_size);
- if (!new_types)
- return -ENOMEM;
+ new_cnt = *cap_cnt;
+ new_cnt += new_cnt / 4; /* expand by 25% */
+ if (new_cnt < 16) /* but at least 16 elements */
+ new_cnt = 16;
+ if (new_cnt > max_cnt) /* but not exceeding a set limit */
+ new_cnt = max_cnt;
+ if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */
+ new_cnt = cur_cnt + add_cnt;
+
+ new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
+ if (!new_data)
+ return NULL;
- if (btf->nr_types == 0)
- new_types[0] = &btf_void;
+ /* zero out newly allocated portion of memory */
+ memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
- btf->types = new_types;
- btf->types_size = new_size;
- }
+ *data = new_data;
+ *cap_cnt = new_cnt;
+ return new_data + cur_cnt * elem_sz;
+}
+
+/* Ensure given dynamically allocated memory region has enough allocated space
+ * to accommodate *need_cnt* elements of size *elem_sz* bytes each
+ */
+int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
+{
+ void *p;
- btf->types[++(btf->nr_types)] = t;
+ if (need_cnt <= *cap_cnt)
+ return 0;
+
+ p = libbpf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
+ if (!p)
+ return -ENOMEM;
return 0;
}
+static void *btf_add_type_offs_mem(struct btf *btf, size_t add_cnt)
+{
+ return libbpf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
+ btf->nr_types, BTF_MAX_NR_TYPES, add_cnt);
+}
+
+static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
+{
+ __u32 *p;
+
+ p = btf_add_type_offs_mem(btf, 1);
+ if (!p)
+ return -ENOMEM;
+
+ *p = type_off;
+ return 0;
+}
+
+static void btf_bswap_hdr(struct btf_header *h)
+{
+ h->magic = bswap_16(h->magic);
+ h->hdr_len = bswap_32(h->hdr_len);
+ h->type_off = bswap_32(h->type_off);
+ h->type_len = bswap_32(h->type_len);
+ h->str_off = bswap_32(h->str_off);
+ h->str_len = bswap_32(h->str_len);
+}
+
static int btf_parse_hdr(struct btf *btf)
{
- const struct btf_header *hdr = btf->hdr;
+ struct btf_header *hdr = btf->hdr;
__u32 meta_left;
- if (btf->data_size < sizeof(struct btf_header)) {
+ if (btf->raw_size < sizeof(struct btf_header)) {
pr_debug("BTF header not found\n");
return -EINVAL;
}
- if (hdr->magic != BTF_MAGIC) {
- pr_debug("Invalid BTF magic:%x\n", hdr->magic);
- return -EINVAL;
- }
-
- if (hdr->version != BTF_VERSION) {
- pr_debug("Unsupported BTF version:%u\n", hdr->version);
- return -ENOTSUP;
- }
-
- if (hdr->flags) {
- pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
- return -ENOTSUP;
- }
-
- meta_left = btf->data_size - sizeof(*hdr);
- if (!meta_left) {
- pr_debug("BTF has no data\n");
+ if (hdr->magic == bswap_16(BTF_MAGIC)) {
+ btf->swapped_endian = true;
+ if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
+ pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
+ bswap_32(hdr->hdr_len));
+ return -ENOTSUP;
+ }
+ btf_bswap_hdr(hdr);
+ } else if (hdr->magic != BTF_MAGIC) {
+ pr_debug("Invalid BTF magic: %x\n", hdr->magic);
return -EINVAL;
}
- if (meta_left < hdr->type_off) {
- pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
+ if (btf->raw_size < hdr->hdr_len) {
+ pr_debug("BTF header len %u larger than data size %u\n",
+ hdr->hdr_len, btf->raw_size);
return -EINVAL;
}
- if (meta_left < hdr->str_off) {
- pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
+ meta_left = btf->raw_size - hdr->hdr_len;
+ if (meta_left < (long long)hdr->str_off + hdr->str_len) {
+ pr_debug("Invalid BTF total size: %u\n", btf->raw_size);
return -EINVAL;
}
- if (hdr->type_off >= hdr->str_off) {
- pr_debug("BTF type section offset >= string section offset. No type?\n");
+ if ((long long)hdr->type_off + hdr->type_len > hdr->str_off) {
+ pr_debug("Invalid BTF data sections layout: type data at %u + %u, strings data at %u + %u\n",
+ hdr->type_off, hdr->type_len, hdr->str_off, hdr->str_len);
return -EINVAL;
}
- if (hdr->type_off & 0x02) {
+ if (hdr->type_off % 4) {
pr_debug("BTF type section is not aligned to 4 bytes\n");
return -EINVAL;
}
- btf->nohdr_data = btf->hdr + 1;
-
return 0;
}
static int btf_parse_str_sec(struct btf *btf)
{
const struct btf_header *hdr = btf->hdr;
- const char *start = btf->nohdr_data + hdr->str_off;
+ const char *start = btf->strs_data;
const char *end = start + btf->hdr->str_len;
- if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
- start[0] || end[-1]) {
+ if (btf->base_btf && hdr->str_len == 0)
+ return 0;
+ if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET || end[-1]) {
+ pr_debug("Invalid BTF string section\n");
+ return -EINVAL;
+ }
+ if (!btf->base_btf && start[0]) {
pr_debug("Invalid BTF string section\n");
return -EINVAL;
}
-
- btf->strings = start;
-
return 0;
}
-static int btf_type_size(struct btf_type *t)
+static int btf_type_size(const struct btf_type *t)
{
- int base_size = sizeof(struct btf_type);
+ const int base_size = sizeof(struct btf_type);
__u16 vlen = btf_vlen(t);
switch (btf_kind(t)) {
@@ -162,11 +298,15 @@ static int btf_type_size(struct btf_type *t)
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_TYPE_TAG:
return base_size;
case BTF_KIND_INT:
return base_size + sizeof(__u32);
case BTF_KIND_ENUM:
return base_size + vlen * sizeof(struct btf_enum);
+ case BTF_KIND_ENUM64:
+ return base_size + vlen * sizeof(struct btf_enum64);
case BTF_KIND_ARRAY:
return base_size + sizeof(struct btf_array);
case BTF_KIND_STRUCT:
@@ -178,6 +318,92 @@ static int btf_type_size(struct btf_type *t)
return base_size + sizeof(struct btf_var);
case BTF_KIND_DATASEC:
return base_size + vlen * sizeof(struct btf_var_secinfo);
+ case BTF_KIND_DECL_TAG:
+ return base_size + sizeof(struct btf_decl_tag);
+ default:
+ pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
+ return -EINVAL;
+ }
+}
+
+static void btf_bswap_type_base(struct btf_type *t)
+{
+ t->name_off = bswap_32(t->name_off);
+ t->info = bswap_32(t->info);
+ t->type = bswap_32(t->type);
+}
+
+static int btf_bswap_type_rest(struct btf_type *t)
+{
+ struct btf_var_secinfo *v;
+ struct btf_enum64 *e64;
+ struct btf_member *m;
+ struct btf_array *a;
+ struct btf_param *p;
+ struct btf_enum *e;
+ __u16 vlen = btf_vlen(t);
+ int i;
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_TYPE_TAG:
+ return 0;
+ case BTF_KIND_INT:
+ *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
+ return 0;
+ case BTF_KIND_ENUM:
+ for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
+ e->name_off = bswap_32(e->name_off);
+ e->val = bswap_32(e->val);
+ }
+ return 0;
+ case BTF_KIND_ENUM64:
+ for (i = 0, e64 = btf_enum64(t); i < vlen; i++, e64++) {
+ e64->name_off = bswap_32(e64->name_off);
+ e64->val_lo32 = bswap_32(e64->val_lo32);
+ e64->val_hi32 = bswap_32(e64->val_hi32);
+ }
+ return 0;
+ case BTF_KIND_ARRAY:
+ a = btf_array(t);
+ a->type = bswap_32(a->type);
+ a->index_type = bswap_32(a->index_type);
+ a->nelems = bswap_32(a->nelems);
+ return 0;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
+ m->name_off = bswap_32(m->name_off);
+ m->type = bswap_32(m->type);
+ m->offset = bswap_32(m->offset);
+ }
+ return 0;
+ case BTF_KIND_FUNC_PROTO:
+ for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
+ p->name_off = bswap_32(p->name_off);
+ p->type = bswap_32(p->type);
+ }
+ return 0;
+ case BTF_KIND_VAR:
+ btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
+ return 0;
+ case BTF_KIND_DATASEC:
+ for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
+ v->type = bswap_32(v->type);
+ v->offset = bswap_32(v->offset);
+ v->size = bswap_32(v->size);
+ }
+ return 0;
+ case BTF_KIND_DECL_TAG:
+ btf_decl_tag(t)->component_idx = bswap_32(btf_decl_tag(t)->component_idx);
+ return 0;
default:
pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
return -EINVAL;
@@ -187,38 +413,180 @@ static int btf_type_size(struct btf_type *t)
static int btf_parse_type_sec(struct btf *btf)
{
struct btf_header *hdr = btf->hdr;
- void *nohdr_data = btf->nohdr_data;
- void *next_type = nohdr_data + hdr->type_off;
- void *end_type = nohdr_data + hdr->str_off;
+ void *next_type = btf->types_data;
+ void *end_type = next_type + hdr->type_len;
+ int err, type_size;
- while (next_type < end_type) {
- struct btf_type *t = next_type;
- int type_size;
- int err;
+ while (next_type + sizeof(struct btf_type) <= end_type) {
+ if (btf->swapped_endian)
+ btf_bswap_type_base(next_type);
- type_size = btf_type_size(t);
+ type_size = btf_type_size(next_type);
if (type_size < 0)
return type_size;
- next_type += type_size;
- err = btf_add_type(btf, t);
+ if (next_type + type_size > end_type) {
+ pr_warn("BTF type [%d] is malformed\n", btf->start_id + btf->nr_types);
+ return -EINVAL;
+ }
+
+ if (btf->swapped_endian && btf_bswap_type_rest(next_type))
+ return -EINVAL;
+
+ err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
if (err)
return err;
+
+ next_type += type_size;
+ btf->nr_types++;
+ }
+
+ if (next_type != end_type) {
+ pr_warn("BTF types data is malformed\n");
+ return -EINVAL;
}
return 0;
}
-__u32 btf__get_nr_types(const struct btf *btf)
+__u32 btf__type_cnt(const struct btf *btf)
+{
+ return btf->start_id + btf->nr_types;
+}
+
+const struct btf *btf__base_btf(const struct btf *btf)
+{
+ return btf->base_btf;
+}
+
+/* internal helper returning non-const pointer to a type */
+struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id)
{
- return btf->nr_types;
+ if (type_id == 0)
+ return &btf_void;
+ if (type_id < btf->start_id)
+ return btf_type_by_id(btf->base_btf, type_id);
+ return btf->types_data + btf->type_offs[type_id - btf->start_id];
}
const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
{
- if (type_id > btf->nr_types)
- return NULL;
+ if (type_id >= btf->start_id + btf->nr_types)
+ return errno = EINVAL, NULL;
+ return btf_type_by_id((struct btf *)btf, type_id);
+}
+
+static int determine_ptr_size(const struct btf *btf)
+{
+ static const char * const long_aliases[] = {
+ "long",
+ "long int",
+ "int long",
+ "unsigned long",
+ "long unsigned",
+ "unsigned long int",
+ "unsigned int long",
+ "long unsigned int",
+ "long int unsigned",
+ "int unsigned long",
+ "int long unsigned",
+ };
+ const struct btf_type *t;
+ const char *name;
+ int i, j, n;
+
+ if (btf->base_btf && btf->base_btf->ptr_sz > 0)
+ return btf->base_btf->ptr_sz;
+
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
+ t = btf__type_by_id(btf, i);
+ if (!btf_is_int(t))
+ continue;
+
+ if (t->size != 4 && t->size != 8)
+ continue;
+
+ name = btf__name_by_offset(btf, t->name_off);
+ if (!name)
+ continue;
+
+ for (j = 0; j < ARRAY_SIZE(long_aliases); j++) {
+ if (strcmp(name, long_aliases[j]) == 0)
+ return t->size;
+ }
+ }
+
+ return -1;
+}
+
+static size_t btf_ptr_sz(const struct btf *btf)
+{
+ if (!btf->ptr_sz)
+ ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
+ return btf->ptr_sz < 0 ? sizeof(void *) : btf->ptr_sz;
+}
+
+/* Return pointer size this BTF instance assumes. The size is heuristically
+ * determined by looking for 'long' or 'unsigned long' integer type and
+ * recording its size in bytes. If BTF type information doesn't have any such
+ * type, this function returns 0. In the latter case, native architecture's
+ * pointer size is assumed, so will be either 4 or 8, depending on
+ * architecture that libbpf was compiled for. It's possible to override
+ * guessed value by using btf__set_pointer_size() API.
+ */
+size_t btf__pointer_size(const struct btf *btf)
+{
+ if (!btf->ptr_sz)
+ ((struct btf *)btf)->ptr_sz = determine_ptr_size(btf);
+
+ if (btf->ptr_sz < 0)
+ /* not enough BTF type info to guess */
+ return 0;
+
+ return btf->ptr_sz;
+}
+
+/* Override or set pointer size in bytes. Only values of 4 and 8 are
+ * supported.
+ */
+int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
+{
+ if (ptr_sz != 4 && ptr_sz != 8)
+ return libbpf_err(-EINVAL);
+ btf->ptr_sz = ptr_sz;
+ return 0;
+}
+
+static bool is_host_big_endian(void)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return false;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return true;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+}
- return btf->types[type_id];
+enum btf_endianness btf__endianness(const struct btf *btf)
+{
+ if (is_host_big_endian())
+ return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
+ else
+ return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
+}
+
+int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
+{
+ if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
+ return libbpf_err(-EINVAL);
+
+ btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
+ if (!btf->swapped_endian) {
+ free(btf->raw_data_swapped);
+ btf->raw_data_swapped = NULL;
+ }
+ return 0;
}
static bool btf_type_is_void(const struct btf_type *t)
@@ -242,35 +610,38 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
int i;
t = btf__type_by_id(btf, type_id);
- for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
- i++) {
+ for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); i++) {
switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_DATASEC:
+ case BTF_KIND_FLOAT:
size = t->size;
goto done;
case BTF_KIND_PTR:
- size = sizeof(void *);
+ size = btf_ptr_sz(btf);
goto done;
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_VAR:
+ case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
type_id = t->type;
break;
case BTF_KIND_ARRAY:
array = btf_array(t);
if (nelems && array->nelems > UINT32_MAX / nelems)
- return -E2BIG;
+ return libbpf_err(-E2BIG);
nelems *= array->nelems;
type_id = array->type;
break;
default:
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
t = btf__type_by_id(btf, type_id);
@@ -278,9 +649,9 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
done:
if (size < 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (nelems && size > UINT32_MAX / nelems)
- return -E2BIG;
+ return libbpf_err(-E2BIG);
return nelems * size;
}
@@ -293,13 +664,16 @@ int btf__align_of(const struct btf *btf, __u32 id)
switch (kind) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
- return min(sizeof(void *), (size_t)t->size);
+ case BTF_KIND_ENUM64:
+ case BTF_KIND_FLOAT:
+ return min(btf_ptr_sz(btf), (size_t)t->size);
case BTF_KIND_PTR:
- return sizeof(void *);
+ return btf_ptr_sz(btf);
case BTF_KIND_TYPEDEF:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPE_TAG:
return btf__align_of(btf, t->type);
case BTF_KIND_ARRAY:
return btf__align_of(btf, btf_array(t)->type);
@@ -312,15 +686,28 @@ int btf__align_of(const struct btf *btf, __u32 id)
for (i = 0; i < vlen; i++, m++) {
align = btf__align_of(btf, m->type);
if (align <= 0)
- return align;
+ return libbpf_err(align);
max_align = max(max_align, align);
+
+ /* if field offset isn't aligned according to field
+ * type's alignment, then struct must be packed
+ */
+ if (btf_member_bitfield_size(t, i) == 0 &&
+ (m->offset % (8 * align)) != 0)
+ return 1;
}
+ /* if struct/union size isn't a multiple of its alignment,
+ * then struct must be packed
+ */
+ if ((t->size % max_align) != 0)
+ return 1;
+
return max_align;
}
default:
pr_warn("unsupported BTF_KIND:%u\n", btf_kind(t));
- return 0;
+ return errno = EINVAL, 0;
}
}
@@ -339,39 +726,39 @@ int btf__resolve_type(const struct btf *btf, __u32 type_id)
}
if (depth == MAX_RESOLVE_DEPTH || btf_type_is_void_or_null(t))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
return type_id;
}
__s32 btf__find_by_name(const struct btf *btf, const char *type_name)
{
- __u32 i;
+ __u32 i, nr_types = btf__type_cnt(btf);
if (!strcmp(type_name, "void"))
return 0;
- for (i = 1; i <= btf->nr_types; i++) {
- const struct btf_type *t = btf->types[i];
+ for (i = 1; i < nr_types; i++) {
+ const struct btf_type *t = btf__type_by_id(btf, i);
const char *name = btf__name_by_offset(btf, t->name_off);
if (name && !strcmp(type_name, name))
return i;
}
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
-__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
- __u32 kind)
+static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
+ const char *type_name, __u32 kind)
{
- __u32 i;
+ __u32 i, nr_types = btf__type_cnt(btf);
if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
return 0;
- for (i = 1; i <= btf->nr_types; i++) {
- const struct btf_type *t = btf->types[i];
+ for (i = start_id; i < nr_types; i++) {
+ const struct btf_type *t = btf__type_by_id(btf, i);
const char *name;
if (btf_kind(t) != kind)
@@ -381,23 +768,103 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
return i;
}
- return -ENOENT;
+ return libbpf_err(-ENOENT);
+}
+
+__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
+ __u32 kind)
+{
+ return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
+}
+
+__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
+ __u32 kind)
+{
+ return btf_find_by_name_kind(btf, 1, type_name, kind);
+}
+
+static bool btf_is_modifiable(const struct btf *btf)
+{
+ return (void *)btf->hdr != btf->raw_data;
}
void btf__free(struct btf *btf)
{
- if (!btf)
+ if (IS_ERR_OR_NULL(btf))
return;
- if (btf->fd != -1)
+ if (btf->fd >= 0)
close(btf->fd);
- free(btf->data);
- free(btf->types);
+ if (btf_is_modifiable(btf)) {
+ /* if BTF was modified after loading, it will have a split
+ * in-memory representation for header, types, and strings
+ * sections, so we need to free all of them individually. It
+ * might still have a cached contiguous raw data present,
+ * which will be unconditionally freed below.
+ */
+ free(btf->hdr);
+ free(btf->types_data);
+ strset__free(btf->strs_set);
+ }
+ free(btf->raw_data);
+ free(btf->raw_data_swapped);
+ free(btf->type_offs);
free(btf);
}
-struct btf *btf__new(__u8 *data, __u32 size)
+static struct btf *btf_new_empty(struct btf *base_btf)
+{
+ struct btf *btf;
+
+ btf = calloc(1, sizeof(*btf));
+ if (!btf)
+ return ERR_PTR(-ENOMEM);
+
+ btf->nr_types = 0;
+ btf->start_id = 1;
+ btf->start_str_off = 0;
+ btf->fd = -1;
+ btf->ptr_sz = sizeof(void *);
+ btf->swapped_endian = false;
+
+ if (base_btf) {
+ btf->base_btf = base_btf;
+ btf->start_id = btf__type_cnt(base_btf);
+ btf->start_str_off = base_btf->hdr->str_len;
+ }
+
+ /* +1 for empty string at offset 0 */
+ btf->raw_size = sizeof(struct btf_header) + (base_btf ? 0 : 1);
+ btf->raw_data = calloc(1, btf->raw_size);
+ if (!btf->raw_data) {
+ free(btf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ btf->hdr = btf->raw_data;
+ btf->hdr->hdr_len = sizeof(struct btf_header);
+ btf->hdr->magic = BTF_MAGIC;
+ btf->hdr->version = BTF_VERSION;
+
+ btf->types_data = btf->raw_data + btf->hdr->hdr_len;
+ btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
+ btf->hdr->str_len = base_btf ? 0 : 1; /* empty string at offset 0 */
+
+ return btf;
+}
+
+struct btf *btf__new_empty(void)
+{
+ return libbpf_ptr(btf_new_empty(NULL));
+}
+
+struct btf *btf__new_empty_split(struct btf *base_btf)
+{
+ return libbpf_ptr(btf_new_empty(base_btf));
+}
+
+static struct btf *btf_new(const void *data, __u32 size, struct btf *base_btf)
{
struct btf *btf;
int err;
@@ -406,27 +873,38 @@ struct btf *btf__new(__u8 *data, __u32 size)
if (!btf)
return ERR_PTR(-ENOMEM);
+ btf->nr_types = 0;
+ btf->start_id = 1;
+ btf->start_str_off = 0;
btf->fd = -1;
- btf->data = malloc(size);
- if (!btf->data) {
+ if (base_btf) {
+ btf->base_btf = base_btf;
+ btf->start_id = btf__type_cnt(base_btf);
+ btf->start_str_off = base_btf->hdr->str_len;
+ }
+
+ btf->raw_data = malloc(size);
+ if (!btf->raw_data) {
err = -ENOMEM;
goto done;
}
+ memcpy(btf->raw_data, data, size);
+ btf->raw_size = size;
- memcpy(btf->data, data, size);
- btf->data_size = size;
-
+ btf->hdr = btf->raw_data;
err = btf_parse_hdr(btf);
if (err)
goto done;
+ btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
+ btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
+
err = btf_parse_str_sec(btf);
+ err = err ?: btf_parse_type_sec(btf);
if (err)
goto done;
- err = btf_parse_type_sec(btf);
-
done:
if (err) {
btf__free(btf);
@@ -436,18 +914,13 @@ done:
return btf;
}
-static bool btf_check_endianness(const GElf_Ehdr *ehdr)
+struct btf *btf__new(const void *data, __u32 size)
{
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
-#else
-# error "Unrecognized __BYTE_ORDER__"
-#endif
+ return libbpf_ptr(btf_new(data, size, NULL));
}
-struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
+static struct btf *btf_parse_elf(const char *path, struct btf *base_btf,
+ struct btf_ext **btf_ext)
{
Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
int err = 0, fd = -1, idx = 0;
@@ -455,13 +928,14 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
Elf_Scn *scn = NULL;
Elf *elf = NULL;
GElf_Ehdr ehdr;
+ size_t shstrndx;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n", path);
return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
}
- fd = open(path, O_RDONLY);
+ fd = open(path, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
pr_warn("failed to open %s: %s\n", path, strerror(errno));
@@ -479,11 +953,14 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
pr_warn("failed to get EHDR from %s\n", path);
goto done;
}
- if (!btf_check_endianness(&ehdr)) {
- pr_warn("non-native ELF endianness is not supported\n");
+
+ if (elf_getshdrstrndx(elf, &shstrndx)) {
+ pr_warn("failed to get section names section index for %s\n",
+ path);
goto done;
}
- if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
+
+ if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) {
pr_warn("failed to get e_shstrndx from %s\n", path);
goto done;
}
@@ -498,7 +975,7 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
idx, path);
goto done;
}
- name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
+ name = elf_strptr(elf, shstrndx, sh.sh_name);
if (!name) {
pr_warn("failed to get section(%d) name from %s\n",
idx, path);
@@ -523,20 +1000,32 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
}
}
- err = 0;
-
if (!btf_data) {
- err = -ENOENT;
+ pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path);
+ err = -ENODATA;
goto done;
}
- btf = btf__new(btf_data->d_buf, btf_data->d_size);
- if (IS_ERR(btf))
+ btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf);
+ err = libbpf_get_error(btf);
+ if (err)
goto done;
+ switch (gelf_getclass(elf)) {
+ case ELFCLASS32:
+ btf__set_pointer_size(btf, 4);
+ break;
+ case ELFCLASS64:
+ btf__set_pointer_size(btf, 8);
+ break;
+ default:
+ pr_warn("failed to get ELF class (bitness) for %s\n", path);
+ break;
+ }
+
if (btf_ext && btf_ext_data) {
- *btf_ext = btf_ext__new(btf_ext_data->d_buf,
- btf_ext_data->d_size);
- if (IS_ERR(*btf_ext))
+ *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
+ err = libbpf_get_error(*btf_ext);
+ if (err)
goto done;
} else if (btf_ext) {
*btf_ext = NULL;
@@ -546,153 +1035,206 @@ done:
elf_end(elf);
close(fd);
- if (err)
- return ERR_PTR(err);
- /*
- * btf is always parsed before btf_ext, so no need to clean up
- * btf_ext, if btf loading failed
- */
- if (IS_ERR(btf))
+ if (!err)
return btf;
- if (btf_ext && IS_ERR(*btf_ext)) {
- btf__free(btf);
- err = PTR_ERR(*btf_ext);
- return ERR_PTR(err);
- }
- return btf;
+
+ if (btf_ext)
+ btf_ext__free(*btf_ext);
+ btf__free(btf);
+
+ return ERR_PTR(err);
}
-static int compare_vsi_off(const void *_a, const void *_b)
+struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
{
- const struct btf_var_secinfo *a = _a;
- const struct btf_var_secinfo *b = _b;
+ return libbpf_ptr(btf_parse_elf(path, NULL, btf_ext));
+}
- return a->offset - b->offset;
+struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf)
+{
+ return libbpf_ptr(btf_parse_elf(path, base_btf, NULL));
}
-static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
- struct btf_type *t)
+static struct btf *btf_parse_raw(const char *path, struct btf *base_btf)
{
- __u32 size = 0, off = 0, i, vars = btf_vlen(t);
- const char *name = btf__name_by_offset(btf, t->name_off);
- const struct btf_type *t_var;
- struct btf_var_secinfo *vsi;
- const struct btf_var *var;
- int ret;
+ struct btf *btf = NULL;
+ void *data = NULL;
+ FILE *f = NULL;
+ __u16 magic;
+ int err = 0;
+ long sz;
- if (!name) {
- pr_debug("No name found in string section for DATASEC kind.\n");
- return -ENOENT;
+ f = fopen(path, "rb");
+ if (!f) {
+ err = -errno;
+ goto err_out;
}
- /* .extern datasec size and var offsets were set correctly during
- * extern collection step, so just skip straight to sorting variables
- */
- if (t->size)
- goto sort_vars;
+ /* check BTF magic */
+ if (fread(&magic, 1, sizeof(magic), f) < sizeof(magic)) {
+ err = -EIO;
+ goto err_out;
+ }
+ if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
+ /* definitely not a raw BTF */
+ err = -EPROTO;
+ goto err_out;
+ }
- ret = bpf_object__section_size(obj, name, &size);
- if (ret || !size || (t->size && t->size != size)) {
- pr_debug("Invalid size for section %s: %u bytes\n", name, size);
- return -ENOENT;
+ /* get file size */
+ if (fseek(f, 0, SEEK_END)) {
+ err = -errno;
+ goto err_out;
+ }
+ sz = ftell(f);
+ if (sz < 0) {
+ err = -errno;
+ goto err_out;
+ }
+ /* rewind to the start */
+ if (fseek(f, 0, SEEK_SET)) {
+ err = -errno;
+ goto err_out;
}
- t->size = size;
+ /* pre-alloc memory and read all of BTF data */
+ data = malloc(sz);
+ if (!data) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ if (fread(data, 1, sz, f) < sz) {
+ err = -EIO;
+ goto err_out;
+ }
- for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
- t_var = btf__type_by_id(btf, vsi->type);
- var = btf_var(t_var);
+ /* finally parse BTF data */
+ btf = btf_new(data, sz, base_btf);
- if (!btf_is_var(t_var)) {
- pr_debug("Non-VAR type seen in section %s\n", name);
- return -EINVAL;
- }
+err_out:
+ free(data);
+ if (f)
+ fclose(f);
+ return err ? ERR_PTR(err) : btf;
+}
- if (var->linkage == BTF_VAR_STATIC)
- continue;
+struct btf *btf__parse_raw(const char *path)
+{
+ return libbpf_ptr(btf_parse_raw(path, NULL));
+}
- name = btf__name_by_offset(btf, t_var->name_off);
- if (!name) {
- pr_debug("No name found in string section for VAR kind\n");
- return -ENOENT;
- }
+struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf)
+{
+ return libbpf_ptr(btf_parse_raw(path, base_btf));
+}
- ret = bpf_object__variable_offset(obj, name, &off);
- if (ret) {
- pr_debug("No offset found in symbol table for VAR %s\n",
- name);
- return -ENOENT;
- }
+static struct btf *btf_parse(const char *path, struct btf *base_btf, struct btf_ext **btf_ext)
+{
+ struct btf *btf;
+ int err;
- vsi->offset = off;
- }
+ if (btf_ext)
+ *btf_ext = NULL;
-sort_vars:
- qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
- return 0;
+ btf = btf_parse_raw(path, base_btf);
+ err = libbpf_get_error(btf);
+ if (!err)
+ return btf;
+ if (err != -EPROTO)
+ return ERR_PTR(err);
+ return btf_parse_elf(path, base_btf, btf_ext);
}
-int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
+struct btf *btf__parse(const char *path, struct btf_ext **btf_ext)
{
- int err = 0;
- __u32 i;
-
- for (i = 1; i <= btf->nr_types; i++) {
- struct btf_type *t = btf->types[i];
-
- /* Loader needs to fix up some of the things compiler
- * couldn't get its hands on while emitting BTF. This
- * is section size and global variable offset. We use
- * the info from the ELF itself for this purpose.
- */
- if (btf_is_datasec(t)) {
- err = btf_fixup_datasec(obj, btf, t);
- if (err)
- break;
- }
- }
+ return libbpf_ptr(btf_parse(path, NULL, btf_ext));
+}
- return err;
+struct btf *btf__parse_split(const char *path, struct btf *base_btf)
+{
+ return libbpf_ptr(btf_parse(path, base_btf, NULL));
}
-int btf__load(struct btf *btf)
+static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
+
+int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level)
{
- __u32 log_buf_size = 0;
- char *log_buf = NULL;
+ LIBBPF_OPTS(bpf_btf_load_opts, opts);
+ __u32 buf_sz = 0, raw_size;
+ char *buf = NULL, *tmp;
+ void *raw_data;
int err = 0;
if (btf->fd >= 0)
- return -EEXIST;
+ return libbpf_err(-EEXIST);
+ if (log_sz && !log_buf)
+ return libbpf_err(-EINVAL);
+
+ /* cache native raw data representation */
+ raw_data = btf_get_raw_data(btf, &raw_size, false);
+ if (!raw_data) {
+ err = -ENOMEM;
+ goto done;
+ }
+ btf->raw_size = raw_size;
+ btf->raw_data = raw_data;
retry_load:
- if (log_buf_size) {
- log_buf = malloc(log_buf_size);
- if (!log_buf)
- return -ENOMEM;
+ /* if log_level is 0, we won't provide log_buf/log_size to the kernel,
+ * initially. Only if BTF loading fails, we bump log_level to 1 and
+ * retry, using either auto-allocated or custom log_buf. This way
+ * non-NULL custom log_buf provides a buffer just in case, but hopes
+ * for successful load and no need for log_buf.
+ */
+ if (log_level) {
+ /* if caller didn't provide custom log_buf, we'll keep
+ * allocating our own progressively bigger buffers for BTF
+ * verification log
+ */
+ if (!log_buf) {
+ buf_sz = max((__u32)BPF_LOG_BUF_SIZE, buf_sz * 2);
+ tmp = realloc(buf, buf_sz);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto done;
+ }
+ buf = tmp;
+ buf[0] = '\0';
+ }
- *log_buf = 0;
+ opts.log_buf = log_buf ? log_buf : buf;
+ opts.log_size = log_buf ? log_sz : buf_sz;
+ opts.log_level = log_level;
}
- btf->fd = bpf_load_btf(btf->data, btf->data_size,
- log_buf, log_buf_size, false);
+ btf->fd = bpf_btf_load(raw_data, raw_size, &opts);
if (btf->fd < 0) {
- if (!log_buf || errno == ENOSPC) {
- log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
- log_buf_size << 1);
- free(log_buf);
+ /* time to turn on verbose mode and try again */
+ if (log_level == 0) {
+ log_level = 1;
goto retry_load;
}
+ /* only retry if caller didn't provide custom log_buf, but
+ * make sure we can never overflow buf_sz
+ */
+ if (!log_buf && errno == ENOSPC && buf_sz <= UINT_MAX / 2)
+ goto retry_load;
err = -errno;
- pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno);
- if (*log_buf)
- pr_warn("%s\n", log_buf);
- goto done;
+ pr_warn("BTF loading error: %d\n", err);
+ /* don't print out contents of custom log_buf */
+ if (!log_buf && buf[0])
+ pr_warn("-- BEGIN BTF LOAD LOG ---\n%s\n-- END BTF LOAD LOG --\n", buf);
}
done:
- free(log_buf);
- return err;
+ free(buf);
+ return libbpf_err(err);
+}
+
+int btf__load_into_kernel(struct btf *btf)
+{
+ return btf_load_into_kernel(btf, NULL, 0, 0);
}
int btf__fd(const struct btf *btf)
@@ -700,50 +1242,125 @@ int btf__fd(const struct btf *btf)
return btf->fd;
}
-const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
+void btf__set_fd(struct btf *btf, int fd)
{
- *size = btf->data_size;
- return btf->data;
+ btf->fd = fd;
}
-const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
+static const void *btf_strs_data(const struct btf *btf)
{
- if (offset < btf->hdr->str_len)
- return &btf->strings[offset];
- else
+ return btf->strs_data ? btf->strs_data : strset__data(btf->strs_set);
+}
+
+static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
+{
+ struct btf_header *hdr = btf->hdr;
+ struct btf_type *t;
+ void *data, *p;
+ __u32 data_sz;
+ int i;
+
+ data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
+ if (data) {
+ *size = btf->raw_size;
+ return data;
+ }
+
+ data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
+ data = calloc(1, data_sz);
+ if (!data)
return NULL;
+ p = data;
+
+ memcpy(p, hdr, hdr->hdr_len);
+ if (swap_endian)
+ btf_bswap_hdr(p);
+ p += hdr->hdr_len;
+
+ memcpy(p, btf->types_data, hdr->type_len);
+ if (swap_endian) {
+ for (i = 0; i < btf->nr_types; i++) {
+ t = p + btf->type_offs[i];
+ /* btf_bswap_type_rest() relies on native t->info, so
+ * we swap base type info after we swapped all the
+ * additional information
+ */
+ if (btf_bswap_type_rest(t))
+ goto err_out;
+ btf_bswap_type_base(t);
+ }
+ }
+ p += hdr->type_len;
+
+ memcpy(p, btf_strs_data(btf), hdr->str_len);
+ p += hdr->str_len;
+
+ *size = data_sz;
+ return data;
+err_out:
+ free(data);
+ return NULL;
}
-int btf__get_from_id(__u32 id, struct btf **btf)
+const void *btf__raw_data(const struct btf *btf_ro, __u32 *size)
+{
+ struct btf *btf = (struct btf *)btf_ro;
+ __u32 data_sz;
+ void *data;
+
+ data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
+ if (!data)
+ return errno = ENOMEM, NULL;
+
+ btf->raw_size = data_sz;
+ if (btf->swapped_endian)
+ btf->raw_data_swapped = data;
+ else
+ btf->raw_data = data;
+ *size = data_sz;
+ return data;
+}
+
+__attribute__((alias("btf__raw_data")))
+const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
+
+const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
{
- struct bpf_btf_info btf_info = { 0 };
+ if (offset < btf->start_str_off)
+ return btf__str_by_offset(btf->base_btf, offset);
+ else if (offset - btf->start_str_off < btf->hdr->str_len)
+ return btf_strs_data(btf) + (offset - btf->start_str_off);
+ else
+ return errno = EINVAL, NULL;
+}
+
+const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
+{
+ return btf__str_by_offset(btf, offset);
+}
+
+struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf)
+{
+ struct bpf_btf_info btf_info;
__u32 len = sizeof(btf_info);
__u32 last_size;
- int btf_fd;
+ struct btf *btf;
void *ptr;
int err;
- err = 0;
- *btf = NULL;
- btf_fd = bpf_btf_get_fd_by_id(id);
- if (btf_fd < 0)
- return 0;
-
- /* we won't know btf_size until we call bpf_obj_get_info_by_fd(). so
+ /* we won't know btf_size until we call bpf_btf_get_info_by_fd(). so
* let's start with a sane default - 4KiB here - and resize it only if
- * bpf_obj_get_info_by_fd() needs a bigger buffer.
+ * bpf_btf_get_info_by_fd() needs a bigger buffer.
*/
- btf_info.btf_size = 4096;
- last_size = btf_info.btf_size;
+ last_size = 4096;
ptr = malloc(last_size);
- if (!ptr) {
- err = -ENOMEM;
- goto exit_free;
- }
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
- memset(ptr, 0, last_size);
+ memset(&btf_info, 0, sizeof(btf_info));
btf_info.btf = ptr_to_u64(ptr);
- err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+ btf_info.btf_size = last_size;
+ err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
if (!err && btf_info.btf_size > last_size) {
void *temp_ptr;
@@ -751,104 +1368,1282 @@ int btf__get_from_id(__u32 id, struct btf **btf)
last_size = btf_info.btf_size;
temp_ptr = realloc(ptr, last_size);
if (!temp_ptr) {
- err = -ENOMEM;
+ btf = ERR_PTR(-ENOMEM);
goto exit_free;
}
ptr = temp_ptr;
- memset(ptr, 0, last_size);
+
+ len = sizeof(btf_info);
+ memset(&btf_info, 0, sizeof(btf_info));
btf_info.btf = ptr_to_u64(ptr);
- err = bpf_obj_get_info_by_fd(btf_fd, &btf_info, &len);
+ btf_info.btf_size = last_size;
+
+ err = bpf_btf_get_info_by_fd(btf_fd, &btf_info, &len);
}
if (err || btf_info.btf_size > last_size) {
- err = errno;
+ btf = err ? ERR_PTR(-errno) : ERR_PTR(-E2BIG);
goto exit_free;
}
- *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
- if (IS_ERR(*btf)) {
- err = PTR_ERR(*btf);
- *btf = NULL;
- }
+ btf = btf_new(ptr, btf_info.btf_size, base_btf);
exit_free:
- close(btf_fd);
free(ptr);
+ return btf;
+}
+
+struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf)
+{
+ struct btf *btf;
+ int btf_fd;
+
+ btf_fd = bpf_btf_get_fd_by_id(id);
+ if (btf_fd < 0)
+ return libbpf_err_ptr(-errno);
+
+ btf = btf_get_from_fd(btf_fd, base_btf);
+ close(btf_fd);
+
+ return libbpf_ptr(btf);
+}
+
+struct btf *btf__load_from_kernel_by_id(__u32 id)
+{
+ return btf__load_from_kernel_by_id_split(id, NULL);
+}
+static void btf_invalidate_raw_data(struct btf *btf)
+{
+ if (btf->raw_data) {
+ free(btf->raw_data);
+ btf->raw_data = NULL;
+ }
+ if (btf->raw_data_swapped) {
+ free(btf->raw_data_swapped);
+ btf->raw_data_swapped = NULL;
+ }
+}
+
+/* Ensure BTF is ready to be modified (by splitting into a three memory
+ * regions for header, types, and strings). Also invalidate cached
+ * raw_data, if any.
+ */
+static int btf_ensure_modifiable(struct btf *btf)
+{
+ void *hdr, *types;
+ struct strset *set = NULL;
+ int err = -ENOMEM;
+
+ if (btf_is_modifiable(btf)) {
+ /* any BTF modification invalidates raw_data */
+ btf_invalidate_raw_data(btf);
+ return 0;
+ }
+
+ /* split raw data into three memory regions */
+ hdr = malloc(btf->hdr->hdr_len);
+ types = malloc(btf->hdr->type_len);
+ if (!hdr || !types)
+ goto err_out;
+
+ memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
+ memcpy(types, btf->types_data, btf->hdr->type_len);
+
+ /* build lookup index for all strings */
+ set = strset__new(BTF_MAX_STR_OFFSET, btf->strs_data, btf->hdr->str_len);
+ if (IS_ERR(set)) {
+ err = PTR_ERR(set);
+ goto err_out;
+ }
+
+ /* only when everything was successful, update internal state */
+ btf->hdr = hdr;
+ btf->types_data = types;
+ btf->types_data_cap = btf->hdr->type_len;
+ btf->strs_data = NULL;
+ btf->strs_set = set;
+ /* if BTF was created from scratch, all strings are guaranteed to be
+ * unique and deduplicated
+ */
+ if (btf->hdr->str_len == 0)
+ btf->strs_deduped = true;
+ if (!btf->base_btf && btf->hdr->str_len == 1)
+ btf->strs_deduped = true;
+
+ /* invalidate raw_data representation */
+ btf_invalidate_raw_data(btf);
+
+ return 0;
+
+err_out:
+ strset__free(set);
+ free(hdr);
+ free(types);
return err;
}
-int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
- __u32 expected_key_size, __u32 expected_value_size,
- __u32 *key_type_id, __u32 *value_type_id)
+/* Find an offset in BTF string section that corresponds to a given string *s*.
+ * Returns:
+ * - >0 offset into string section, if string is found;
+ * - -ENOENT, if string is not in the string section;
+ * - <0, on any other error.
+ */
+int btf__find_str(struct btf *btf, const char *s)
{
- const struct btf_type *container_type;
- const struct btf_member *key, *value;
- const size_t max_name = 256;
- char container_name[max_name];
- __s64 key_size, value_size;
- __s32 container_id;
+ int off;
- if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
- max_name) {
- pr_warn("map:%s length of '____btf_map_%s' is too long\n",
- map_name, map_name);
- return -EINVAL;
+ if (btf->base_btf) {
+ off = btf__find_str(btf->base_btf, s);
+ if (off != -ENOENT)
+ return off;
}
- container_id = btf__find_by_name(btf, container_name);
- if (container_id < 0) {
- pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
- map_name, container_name);
- return container_id;
+ /* BTF needs to be in a modifiable state to build string lookup index */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ off = strset__find_str(btf->strs_set, s);
+ if (off < 0)
+ return libbpf_err(off);
+
+ return btf->start_str_off + off;
+}
+
+/* Add a string s to the BTF string section.
+ * Returns:
+ * - > 0 offset into string section, on success;
+ * - < 0, on error.
+ */
+int btf__add_str(struct btf *btf, const char *s)
+{
+ int off;
+
+ if (btf->base_btf) {
+ off = btf__find_str(btf->base_btf, s);
+ if (off != -ENOENT)
+ return off;
}
- container_type = btf__type_by_id(btf, container_id);
- if (!container_type) {
- pr_warn("map:%s cannot find BTF type for container_id:%u\n",
- map_name, container_id);
- return -EINVAL;
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ off = strset__add_str(btf->strs_set, s);
+ if (off < 0)
+ return libbpf_err(off);
+
+ btf->hdr->str_len = strset__data_size(btf->strs_set);
+
+ return btf->start_str_off + off;
+}
+
+static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
+{
+ return libbpf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
+ btf->hdr->type_len, UINT_MAX, add_sz);
+}
+
+static void btf_type_inc_vlen(struct btf_type *t)
+{
+ t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
+}
+
+static int btf_commit_type(struct btf *btf, int data_sz)
+{
+ int err;
+
+ err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ if (err)
+ return libbpf_err(err);
+
+ btf->hdr->type_len += data_sz;
+ btf->hdr->str_off += data_sz;
+ btf->nr_types++;
+ return btf->start_id + btf->nr_types - 1;
+}
+
+struct btf_pipe {
+ const struct btf *src;
+ struct btf *dst;
+ struct hashmap *str_off_map; /* map string offsets from src to dst */
+};
+
+static int btf_rewrite_str(__u32 *str_off, void *ctx)
+{
+ struct btf_pipe *p = ctx;
+ long mapped_off;
+ int off, err;
+
+ if (!*str_off) /* nothing to do for empty strings */
+ return 0;
+
+ if (p->str_off_map &&
+ hashmap__find(p->str_off_map, *str_off, &mapped_off)) {
+ *str_off = mapped_off;
+ return 0;
}
- if (!btf_is_struct(container_type) || btf_vlen(container_type) < 2) {
- pr_warn("map:%s container_name:%s is an invalid container struct\n",
- map_name, container_name);
- return -EINVAL;
+ off = btf__add_str(p->dst, btf__str_by_offset(p->src, *str_off));
+ if (off < 0)
+ return off;
+
+ /* Remember string mapping from src to dst. It avoids
+ * performing expensive string comparisons.
+ */
+ if (p->str_off_map) {
+ err = hashmap__append(p->str_off_map, *str_off, off);
+ if (err)
+ return err;
}
- key = btf_members(container_type);
- value = key + 1;
+ *str_off = off;
+ return 0;
+}
+
+int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type)
+{
+ struct btf_pipe p = { .src = src_btf, .dst = btf };
+ struct btf_type *t;
+ int sz, err;
+
+ sz = btf_type_size(src_type);
+ if (sz < 0)
+ return libbpf_err(sz);
+
+ /* deconstruct BTF, if necessary, and invalidate raw_data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ memcpy(t, src_type, sz);
+
+ err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
+ if (err)
+ return libbpf_err(err);
+
+ return btf_commit_type(btf, sz);
+}
+
+static int btf_rewrite_type_ids(__u32 *type_id, void *ctx)
+{
+ struct btf *btf = ctx;
+
+ if (!*type_id) /* nothing to do for VOID references */
+ return 0;
+
+ /* we haven't updated btf's type count yet, so
+ * btf->start_id + btf->nr_types - 1 is the type ID offset we should
+ * add to all newly added BTF types
+ */
+ *type_id += btf->start_id + btf->nr_types - 1;
+ return 0;
+}
+
+static size_t btf_dedup_identity_hash_fn(long key, void *ctx);
+static bool btf_dedup_equal_fn(long k1, long k2, void *ctx);
+
+int btf__add_btf(struct btf *btf, const struct btf *src_btf)
+{
+ struct btf_pipe p = { .src = src_btf, .dst = btf };
+ int data_sz, sz, cnt, i, err, old_strs_len;
+ __u32 *off;
+ void *t;
+
+ /* appending split BTF isn't supported yet */
+ if (src_btf->base_btf)
+ return libbpf_err(-ENOTSUP);
- key_size = btf__resolve_size(btf, key->type);
- if (key_size < 0) {
- pr_warn("map:%s invalid BTF key_type_size\n", map_name);
- return key_size;
+ /* deconstruct BTF, if necessary, and invalidate raw_data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ /* remember original strings section size if we have to roll back
+ * partial strings section changes
+ */
+ old_strs_len = btf->hdr->str_len;
+
+ data_sz = src_btf->hdr->type_len;
+ cnt = btf__type_cnt(src_btf) - 1;
+
+ /* pre-allocate enough memory for new types */
+ t = btf_add_type_mem(btf, data_sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ /* pre-allocate enough memory for type offset index for new types */
+ off = btf_add_type_offs_mem(btf, cnt);
+ if (!off)
+ return libbpf_err(-ENOMEM);
+
+ /* Map the string offsets from src_btf to the offsets from btf to improve performance */
+ p.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
+ if (IS_ERR(p.str_off_map))
+ return libbpf_err(-ENOMEM);
+
+ /* bulk copy types data for all types from src_btf */
+ memcpy(t, src_btf->types_data, data_sz);
+
+ for (i = 0; i < cnt; i++) {
+ sz = btf_type_size(t);
+ if (sz < 0) {
+ /* unlikely, has to be corrupted src_btf */
+ err = sz;
+ goto err_out;
+ }
+
+ /* fill out type ID to type offset mapping for lookups by type ID */
+ *off = t - btf->types_data;
+
+ /* add, dedup, and remap strings referenced by this BTF type */
+ err = btf_type_visit_str_offs(t, btf_rewrite_str, &p);
+ if (err)
+ goto err_out;
+
+ /* remap all type IDs referenced from this BTF type */
+ err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf);
+ if (err)
+ goto err_out;
+
+ /* go to next type data and type offset index entry */
+ t += sz;
+ off++;
}
- if (expected_key_size != key_size) {
- pr_warn("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
- map_name, (__u32)key_size, expected_key_size);
+ /* Up until now any of the copied type data was effectively invisible,
+ * so if we exited early before this point due to error, BTF would be
+ * effectively unmodified. There would be extra internal memory
+ * pre-allocated, but it would not be available for querying. But now
+ * that we've copied and rewritten all the data successfully, we can
+ * update type count and various internal offsets and sizes to
+ * "commit" the changes and made them visible to the outside world.
+ */
+ btf->hdr->type_len += data_sz;
+ btf->hdr->str_off += data_sz;
+ btf->nr_types += cnt;
+
+ hashmap__free(p.str_off_map);
+
+ /* return type ID of the first added BTF type */
+ return btf->start_id + btf->nr_types - cnt;
+err_out:
+ /* zero out preallocated memory as if it was just allocated with
+ * libbpf_add_mem()
+ */
+ memset(btf->types_data + btf->hdr->type_len, 0, data_sz);
+ memset(btf->strs_data + old_strs_len, 0, btf->hdr->str_len - old_strs_len);
+
+ /* and now restore original strings section size; types data size
+ * wasn't modified, so doesn't need restoring, see big comment above
+ */
+ btf->hdr->str_len = old_strs_len;
+
+ hashmap__free(p.str_off_map);
+
+ return libbpf_err(err);
+}
+
+/*
+ * Append new BTF_KIND_INT type with:
+ * - *name* - non-empty, non-NULL type name;
+ * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
+ * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
+{
+ struct btf_type *t;
+ int sz, name_off;
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+ /* byte_sz must be power of 2 */
+ if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
+ return libbpf_err(-EINVAL);
+ if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
+ return libbpf_err(-EINVAL);
+
+ /* deconstruct BTF, if necessary, and invalidate raw_data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type) + sizeof(int);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ /* if something goes wrong later, we might end up with an extra string,
+ * but that shouldn't be a problem, because BTF can't be constructed
+ * completely anyway and will most probably be just discarded
+ */
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ t->name_off = name_off;
+ t->info = btf_type_info(BTF_KIND_INT, 0, 0);
+ t->size = byte_sz;
+ /* set INT info, we don't allow setting legacy bit offset/size */
+ *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
+
+ return btf_commit_type(btf, sz);
+}
+
+/*
+ * Append new BTF_KIND_FLOAT type with:
+ * - *name* - non-empty, non-NULL type name;
+ * - *sz* - size of the type, in bytes;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_float(struct btf *btf, const char *name, size_t byte_sz)
+{
+ struct btf_type *t;
+ int sz, name_off;
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+
+ /* byte_sz must be one of the explicitly allowed values */
+ if (byte_sz != 2 && byte_sz != 4 && byte_sz != 8 && byte_sz != 12 &&
+ byte_sz != 16)
+ return libbpf_err(-EINVAL);
+
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ t->name_off = name_off;
+ t->info = btf_type_info(BTF_KIND_FLOAT, 0, 0);
+ t->size = byte_sz;
+
+ return btf_commit_type(btf, sz);
+}
+
+/* it's completely legal to append BTF types with type IDs pointing forward to
+ * types that haven't been appended yet, so we only make sure that id looks
+ * sane, we can't guarantee that ID will always be valid
+ */
+static int validate_type_id(int id)
+{
+ if (id < 0 || id > BTF_MAX_NR_TYPES)
return -EINVAL;
+ return 0;
+}
+
+/* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
+static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
+{
+ struct btf_type *t;
+ int sz, name_off = 0;
+
+ if (validate_type_id(ref_type_id))
+ return libbpf_err(-EINVAL);
+
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+ }
+
+ t->name_off = name_off;
+ t->info = btf_type_info(kind, 0, 0);
+ t->type = ref_type_id;
+
+ return btf_commit_type(btf, sz);
+}
+
+/*
+ * Append new BTF_KIND_PTR type with:
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_ptr(struct btf *btf, int ref_type_id)
+{
+ return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_ARRAY type with:
+ * - *index_type_id* - type ID of the type describing array index;
+ * - *elem_type_id* - type ID of the type describing array element;
+ * - *nr_elems* - the size of the array;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
+{
+ struct btf_type *t;
+ struct btf_array *a;
+ int sz;
+
+ if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
+ return libbpf_err(-EINVAL);
+
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type) + sizeof(struct btf_array);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ t->name_off = 0;
+ t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
+ t->size = 0;
+
+ a = btf_array(t);
+ a->type = elem_type_id;
+ a->index_type = index_type_id;
+ a->nelems = nr_elems;
+
+ return btf_commit_type(btf, sz);
+}
+
+/* generic STRUCT/UNION append function */
+static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
+{
+ struct btf_type *t;
+ int sz, name_off = 0;
+
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
}
- value_size = btf__resolve_size(btf, value->type);
- if (value_size < 0) {
- pr_warn("map:%s invalid BTF value_type_size\n", map_name);
- return value_size;
+ /* start out with vlen=0 and no kflag; this will be adjusted when
+ * adding each member
+ */
+ t->name_off = name_off;
+ t->info = btf_type_info(kind, 0, 0);
+ t->size = bytes_sz;
+
+ return btf_commit_type(btf, sz);
+}
+
+/*
+ * Append new BTF_KIND_STRUCT type with:
+ * - *name* - name of the struct, can be NULL or empty for anonymous structs;
+ * - *byte_sz* - size of the struct, in bytes;
+ *
+ * Struct initially has no fields in it. Fields can be added by
+ * btf__add_field() right after btf__add_struct() succeeds.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
+{
+ return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
+}
+
+/*
+ * Append new BTF_KIND_UNION type with:
+ * - *name* - name of the union, can be NULL or empty for anonymous union;
+ * - *byte_sz* - size of the union, in bytes;
+ *
+ * Union initially has no fields in it. Fields can be added by
+ * btf__add_field() right after btf__add_union() succeeds. All fields
+ * should have *bit_offset* of 0.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
+{
+ return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
+}
+
+static struct btf_type *btf_last_type(struct btf *btf)
+{
+ return btf_type_by_id(btf, btf__type_cnt(btf) - 1);
+}
+
+/*
+ * Append new field for the current STRUCT/UNION type with:
+ * - *name* - name of the field, can be NULL or empty for anonymous field;
+ * - *type_id* - type ID for the type describing field type;
+ * - *bit_offset* - bit offset of the start of the field within struct/union;
+ * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_field(struct btf *btf, const char *name, int type_id,
+ __u32 bit_offset, __u32 bit_size)
+{
+ struct btf_type *t;
+ struct btf_member *m;
+ bool is_bitfield;
+ int sz, name_off = 0;
+
+ /* last type should be union/struct */
+ if (btf->nr_types == 0)
+ return libbpf_err(-EINVAL);
+ t = btf_last_type(btf);
+ if (!btf_is_composite(t))
+ return libbpf_err(-EINVAL);
+
+ if (validate_type_id(type_id))
+ return libbpf_err(-EINVAL);
+ /* best-effort bit field offset/size enforcement */
+ is_bitfield = bit_size || (bit_offset % 8 != 0);
+ if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
+ return libbpf_err(-EINVAL);
+
+ /* only offset 0 is allowed for unions */
+ if (btf_is_union(t) && bit_offset)
+ return libbpf_err(-EINVAL);
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_member);
+ m = btf_add_type_mem(btf, sz);
+ if (!m)
+ return libbpf_err(-ENOMEM);
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
}
- if (expected_value_size != value_size) {
- pr_warn("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
- map_name, (__u32)value_size, expected_value_size);
- return -EINVAL;
+ m->name_off = name_off;
+ m->type = type_id;
+ m->offset = bit_offset | (bit_size << 24);
+
+ /* btf_add_type_mem can invalidate t pointer */
+ t = btf_last_type(btf);
+ /* update parent type's vlen and kflag */
+ t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ return 0;
+}
+
+static int btf_add_enum_common(struct btf *btf, const char *name, __u32 byte_sz,
+ bool is_signed, __u8 kind)
+{
+ struct btf_type *t;
+ int sz, name_off = 0;
+
+ /* byte_sz must be power of 2 */
+ if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
+ return libbpf_err(-EINVAL);
+
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+ }
+
+ /* start out with vlen=0; it will be adjusted when adding enum values */
+ t->name_off = name_off;
+ t->info = btf_type_info(kind, 0, is_signed);
+ t->size = byte_sz;
+
+ return btf_commit_type(btf, sz);
+}
+
+/*
+ * Append new BTF_KIND_ENUM type with:
+ * - *name* - name of the enum, can be NULL or empty for anonymous enums;
+ * - *byte_sz* - size of the enum, in bytes.
+ *
+ * Enum initially has no enum values in it (and corresponds to enum forward
+ * declaration). Enumerator values can be added by btf__add_enum_value()
+ * immediately after btf__add_enum() succeeds.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
+{
+ /*
+ * set the signedness to be unsigned, it will change to signed
+ * if any later enumerator is negative.
+ */
+ return btf_add_enum_common(btf, name, byte_sz, false, BTF_KIND_ENUM);
+}
+
+/*
+ * Append new enum value for the current ENUM type with:
+ * - *name* - name of the enumerator value, can't be NULL or empty;
+ * - *value* - integer value corresponding to enum value *name*;
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
+{
+ struct btf_type *t;
+ struct btf_enum *v;
+ int sz, name_off;
+
+ /* last type should be BTF_KIND_ENUM */
+ if (btf->nr_types == 0)
+ return libbpf_err(-EINVAL);
+ t = btf_last_type(btf);
+ if (!btf_is_enum(t))
+ return libbpf_err(-EINVAL);
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+ if (value < INT_MIN || value > UINT_MAX)
+ return libbpf_err(-E2BIG);
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_enum);
+ v = btf_add_type_mem(btf, sz);
+ if (!v)
+ return libbpf_err(-ENOMEM);
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ v->name_off = name_off;
+ v->val = value;
+
+ /* update parent type's vlen */
+ t = btf_last_type(btf);
+ btf_type_inc_vlen(t);
+
+ /* if negative value, set signedness to signed */
+ if (value < 0)
+ t->info = btf_type_info(btf_kind(t), btf_vlen(t), true);
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ return 0;
+}
+
+/*
+ * Append new BTF_KIND_ENUM64 type with:
+ * - *name* - name of the enum, can be NULL or empty for anonymous enums;
+ * - *byte_sz* - size of the enum, in bytes.
+ * - *is_signed* - whether the enum values are signed or not;
+ *
+ * Enum initially has no enum values in it (and corresponds to enum forward
+ * declaration). Enumerator values can be added by btf__add_enum64_value()
+ * immediately after btf__add_enum64() succeeds.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_enum64(struct btf *btf, const char *name, __u32 byte_sz,
+ bool is_signed)
+{
+ return btf_add_enum_common(btf, name, byte_sz, is_signed,
+ BTF_KIND_ENUM64);
+}
+
+/*
+ * Append new enum value for the current ENUM64 type with:
+ * - *name* - name of the enumerator value, can't be NULL or empty;
+ * - *value* - integer value corresponding to enum value *name*;
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value)
+{
+ struct btf_enum64 *v;
+ struct btf_type *t;
+ int sz, name_off;
+
+ /* last type should be BTF_KIND_ENUM64 */
+ if (btf->nr_types == 0)
+ return libbpf_err(-EINVAL);
+ t = btf_last_type(btf);
+ if (!btf_is_enum64(t))
+ return libbpf_err(-EINVAL);
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_enum64);
+ v = btf_add_type_mem(btf, sz);
+ if (!v)
+ return libbpf_err(-ENOMEM);
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ v->name_off = name_off;
+ v->val_lo32 = (__u32)value;
+ v->val_hi32 = value >> 32;
+
+ /* update parent type's vlen */
+ t = btf_last_type(btf);
+ btf_type_inc_vlen(t);
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ return 0;
+}
+
+/*
+ * Append new BTF_KIND_FWD type with:
+ * - *name*, non-empty/non-NULL name;
+ * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
+ * BTF_FWD_UNION, or BTF_FWD_ENUM;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
+{
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+
+ switch (fwd_kind) {
+ case BTF_FWD_STRUCT:
+ case BTF_FWD_UNION: {
+ struct btf_type *t;
+ int id;
+
+ id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
+ if (id <= 0)
+ return id;
+ t = btf_type_by_id(btf, id);
+ t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
+ return id;
+ }
+ case BTF_FWD_ENUM:
+ /* enum forward in BTF currently is just an enum with no enum
+ * values; we also assume a standard 4-byte size for it
+ */
+ return btf__add_enum(btf, name, sizeof(int));
+ default:
+ return libbpf_err(-EINVAL);
+ }
+}
+
+/*
+ * Append new BTF_KING_TYPEDEF type with:
+ * - *name*, non-empty/non-NULL name;
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
+{
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+
+ return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_VOLATILE type with:
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_volatile(struct btf *btf, int ref_type_id)
+{
+ return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_CONST type with:
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_const(struct btf *btf, int ref_type_id)
+{
+ return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_RESTRICT type with:
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_restrict(struct btf *btf, int ref_type_id)
+{
+ return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_TYPE_TAG type with:
+ * - *value*, non-empty/non-NULL tag value;
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
+{
+ if (!value || !value[0])
+ return libbpf_err(-EINVAL);
+
+ return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_FUNC type with:
+ * - *name*, non-empty/non-NULL name;
+ * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_func(struct btf *btf, const char *name,
+ enum btf_func_linkage linkage, int proto_type_id)
+{
+ int id;
+
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+ if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
+ linkage != BTF_FUNC_EXTERN)
+ return libbpf_err(-EINVAL);
+
+ id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
+ if (id > 0) {
+ struct btf_type *t = btf_type_by_id(btf, id);
+
+ t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
+ }
+ return libbpf_err(id);
+}
+
+/*
+ * Append new BTF_KIND_FUNC_PROTO with:
+ * - *ret_type_id* - type ID for return result of a function.
+ *
+ * Function prototype initially has no arguments, but they can be added by
+ * btf__add_func_param() one by one, immediately after
+ * btf__add_func_proto() succeeded.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_func_proto(struct btf *btf, int ret_type_id)
+{
+ struct btf_type *t;
+ int sz;
+
+ if (validate_type_id(ret_type_id))
+ return libbpf_err(-EINVAL);
+
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ /* start out with vlen=0; this will be adjusted when adding enum
+ * values, if necessary
+ */
+ t->name_off = 0;
+ t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
+ t->type = ret_type_id;
+
+ return btf_commit_type(btf, sz);
+}
+
+/*
+ * Append new function parameter for current FUNC_PROTO type with:
+ * - *name* - parameter name, can be NULL or empty;
+ * - *type_id* - type ID describing the type of the parameter.
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_func_param(struct btf *btf, const char *name, int type_id)
+{
+ struct btf_type *t;
+ struct btf_param *p;
+ int sz, name_off = 0;
+
+ if (validate_type_id(type_id))
+ return libbpf_err(-EINVAL);
+
+ /* last type should be BTF_KIND_FUNC_PROTO */
+ if (btf->nr_types == 0)
+ return libbpf_err(-EINVAL);
+ t = btf_last_type(btf);
+ if (!btf_is_func_proto(t))
+ return libbpf_err(-EINVAL);
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_param);
+ p = btf_add_type_mem(btf, sz);
+ if (!p)
+ return libbpf_err(-ENOMEM);
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
}
- *key_type_id = key->type;
- *value_type_id = value->type;
+ p->name_off = name_off;
+ p->type = type_id;
+ /* update parent type's vlen */
+ t = btf_last_type(btf);
+ btf_type_inc_vlen(t);
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
return 0;
}
+/*
+ * Append new BTF_KIND_VAR type with:
+ * - *name* - non-empty/non-NULL name;
+ * - *linkage* - variable linkage, one of BTF_VAR_STATIC,
+ * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
+ * - *type_id* - type ID of the type describing the type of the variable.
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
+{
+ struct btf_type *t;
+ struct btf_var *v;
+ int sz, name_off;
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+ if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
+ linkage != BTF_VAR_GLOBAL_EXTERN)
+ return libbpf_err(-EINVAL);
+ if (validate_type_id(type_id))
+ return libbpf_err(-EINVAL);
+
+ /* deconstruct BTF, if necessary, and invalidate raw_data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type) + sizeof(struct btf_var);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ t->name_off = name_off;
+ t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
+ t->type = type_id;
+
+ v = btf_var(t);
+ v->linkage = linkage;
+
+ return btf_commit_type(btf, sz);
+}
+
+/*
+ * Append new BTF_KIND_DATASEC type with:
+ * - *name* - non-empty/non-NULL name;
+ * - *byte_sz* - data section size, in bytes.
+ *
+ * Data section is initially empty. Variables info can be added with
+ * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
+{
+ struct btf_type *t;
+ int sz, name_off;
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return libbpf_err(-EINVAL);
+
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ /* start with vlen=0, which will be update as var_secinfos are added */
+ t->name_off = name_off;
+ t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
+ t->size = byte_sz;
+
+ return btf_commit_type(btf, sz);
+}
+
+/*
+ * Append new data section variable information entry for current DATASEC type:
+ * - *var_type_id* - type ID, describing type of the variable;
+ * - *offset* - variable offset within data section, in bytes;
+ * - *byte_sz* - variable size, in bytes.
+ *
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
+{
+ struct btf_type *t;
+ struct btf_var_secinfo *v;
+ int sz;
+
+ /* last type should be BTF_KIND_DATASEC */
+ if (btf->nr_types == 0)
+ return libbpf_err(-EINVAL);
+ t = btf_last_type(btf);
+ if (!btf_is_datasec(t))
+ return libbpf_err(-EINVAL);
+
+ if (validate_type_id(var_type_id))
+ return libbpf_err(-EINVAL);
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_var_secinfo);
+ v = btf_add_type_mem(btf, sz);
+ if (!v)
+ return libbpf_err(-ENOMEM);
+
+ v->type = var_type_id;
+ v->offset = offset;
+ v->size = byte_sz;
+
+ /* update parent type's vlen */
+ t = btf_last_type(btf);
+ btf_type_inc_vlen(t);
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ return 0;
+}
+
+/*
+ * Append new BTF_KIND_DECL_TAG type with:
+ * - *value* - non-empty/non-NULL string;
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * - *component_idx* - -1 for tagging reference type, otherwise struct/union
+ * member or function argument index;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
+ int component_idx)
+{
+ struct btf_type *t;
+ int sz, value_off;
+
+ if (!value || !value[0] || component_idx < -1)
+ return libbpf_err(-EINVAL);
+
+ if (validate_type_id(ref_type_id))
+ return libbpf_err(-EINVAL);
+
+ if (btf_ensure_modifiable(btf))
+ return libbpf_err(-ENOMEM);
+
+ sz = sizeof(struct btf_type) + sizeof(struct btf_decl_tag);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return libbpf_err(-ENOMEM);
+
+ value_off = btf__add_str(btf, value);
+ if (value_off < 0)
+ return value_off;
+
+ t->name_off = value_off;
+ t->info = btf_type_info(BTF_KIND_DECL_TAG, 0, false);
+ t->type = ref_type_id;
+ btf_decl_tag(t)->component_idx = component_idx;
+
+ return btf_commit_type(btf, sz);
+}
+
struct btf_ext_sec_setup_param {
__u32 off;
__u32 len;
@@ -863,6 +2658,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
+ size_t sec_cnt = 0;
/* The start of the info sec (including the __u32 record_size). */
void *info;
@@ -926,8 +2722,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
return -EINVAL;
}
- total_record_size = sec_hdrlen +
- (__u64)num_records * record_size;
+ total_record_size = sec_hdrlen + (__u64)num_records * record_size;
if (info_left < total_record_size) {
pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
@@ -936,12 +2731,14 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
info_left -= total_record_size;
sinfo = (void *)sinfo + total_record_size;
+ sec_cnt++;
}
ext_info = ext_sec->ext_info;
ext_info->len = ext_sec->len - sizeof(__u32);
ext_info->rec_size = record_size;
ext_info->info = info + sizeof(__u32);
+ ext_info->sec_cnt = sec_cnt;
return 0;
}
@@ -972,14 +2769,14 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext)
+static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
{
struct btf_ext_sec_setup_param param = {
- .off = btf_ext->hdr->field_reloc_off,
- .len = btf_ext->hdr->field_reloc_len,
- .min_rec_size = sizeof(struct bpf_field_reloc),
- .ext_info = &btf_ext->field_reloc_info,
- .desc = "field_reloc",
+ .off = btf_ext->hdr->core_relo_off,
+ .len = btf_ext->hdr->core_relo_len,
+ .min_rec_size = sizeof(struct bpf_core_relo),
+ .ext_info = &btf_ext->core_relo_info,
+ .desc = "core_relo",
};
return btf_ext_setup_info(btf_ext, &param);
@@ -995,7 +2792,10 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
return -EINVAL;
}
- if (hdr->magic != BTF_MAGIC) {
+ if (hdr->magic == bswap_16(BTF_MAGIC)) {
+ pr_warn("BTF.ext in non-native endianness is not supported\n");
+ return -ENOTSUP;
+ } else if (hdr->magic != BTF_MAGIC) {
pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
return -EINVAL;
}
@@ -1020,24 +2820,23 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
void btf_ext__free(struct btf_ext *btf_ext)
{
- if (!btf_ext)
+ if (IS_ERR_OR_NULL(btf_ext))
return;
+ free(btf_ext->func_info.sec_idxs);
+ free(btf_ext->line_info.sec_idxs);
+ free(btf_ext->core_relo_info.sec_idxs);
free(btf_ext->data);
free(btf_ext);
}
-struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
+struct btf_ext *btf_ext__new(const __u8 *data, __u32 size)
{
struct btf_ext *btf_ext;
int err;
- err = btf_ext_parse_hdr(data, size);
- if (err)
- return ERR_PTR(err);
-
btf_ext = calloc(1, sizeof(struct btf_ext));
if (!btf_ext)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
btf_ext->data_size = size;
btf_ext->data = malloc(size);
@@ -1047,9 +2846,15 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
}
memcpy(btf_ext->data, data, size);
- if (btf_ext->hdr->hdr_len <
- offsetofend(struct btf_ext_header, line_info_len))
+ err = btf_ext_parse_hdr(btf_ext->data, size);
+ if (err)
+ goto done;
+
+ if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, line_info_len)) {
+ err = -EINVAL;
goto done;
+ }
+
err = btf_ext_setup_func_info(btf_ext);
if (err)
goto done;
@@ -1058,17 +2863,17 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
if (err)
goto done;
- if (btf_ext->hdr->hdr_len <
- offsetofend(struct btf_ext_header, field_reloc_len))
- goto done;
- err = btf_ext_setup_field_reloc(btf_ext);
+ if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
+ goto done; /* skip core relos parsing */
+
+ err = btf_ext_setup_core_relos(btf_ext);
if (err)
goto done;
done:
if (err) {
btf_ext__free(btf_ext);
- return ERR_PTR(err);
+ return libbpf_err_ptr(err);
}
return btf_ext;
@@ -1080,91 +2885,16 @@ const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size)
return btf_ext->data;
}
-static int btf_ext_reloc_info(const struct btf *btf,
- const struct btf_ext_info *ext_info,
- const char *sec_name, __u32 insns_cnt,
- void **info, __u32 *cnt)
-{
- __u32 sec_hdrlen = sizeof(struct btf_ext_info_sec);
- __u32 i, record_size, existing_len, records_len;
- struct btf_ext_info_sec *sinfo;
- const char *info_sec_name;
- __u64 remain_len;
- void *data;
-
- record_size = ext_info->rec_size;
- sinfo = ext_info->info;
- remain_len = ext_info->len;
- while (remain_len > 0) {
- records_len = sinfo->num_info * record_size;
- info_sec_name = btf__name_by_offset(btf, sinfo->sec_name_off);
- if (strcmp(info_sec_name, sec_name)) {
- remain_len -= sec_hdrlen + records_len;
- sinfo = (void *)sinfo + sec_hdrlen + records_len;
- continue;
- }
-
- existing_len = (*cnt) * record_size;
- data = realloc(*info, existing_len + records_len);
- if (!data)
- return -ENOMEM;
-
- memcpy(data + existing_len, sinfo->data, records_len);
- /* adjust insn_off only, the rest data will be passed
- * to the kernel.
- */
- for (i = 0; i < sinfo->num_info; i++) {
- __u32 *insn_off;
-
- insn_off = data + existing_len + (i * record_size);
- *insn_off = *insn_off / sizeof(struct bpf_insn) +
- insns_cnt;
- }
- *info = data;
- *cnt += sinfo->num_info;
- return 0;
- }
-
- return -ENOENT;
-}
-
-int btf_ext__reloc_func_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **func_info, __u32 *cnt)
-{
- return btf_ext_reloc_info(btf, &btf_ext->func_info, sec_name,
- insns_cnt, func_info, cnt);
-}
-
-int btf_ext__reloc_line_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **line_info, __u32 *cnt)
-{
- return btf_ext_reloc_info(btf, &btf_ext->line_info, sec_name,
- insns_cnt, line_info, cnt);
-}
-
-__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext)
-{
- return btf_ext->func_info.rec_size;
-}
-
-__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
-{
- return btf_ext->line_info.rec_size;
-}
-
struct btf_dedup;
-static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
- const struct btf_dedup_opts *opts);
+static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
static void btf_dedup_free(struct btf_dedup *d);
+static int btf_dedup_prep(struct btf_dedup *d);
static int btf_dedup_strings(struct btf_dedup *d);
static int btf_dedup_prim_types(struct btf_dedup *d);
static int btf_dedup_struct_types(struct btf_dedup *d);
static int btf_dedup_ref_types(struct btf_dedup *d);
+static int btf_dedup_resolve_fwds(struct btf_dedup *d);
static int btf_dedup_compact_types(struct btf_dedup *d);
static int btf_dedup_remap_types(struct btf_dedup *d);
@@ -1272,15 +3002,16 @@ static int btf_dedup_remap_types(struct btf_dedup *d);
* Algorithm summary
* =================
*
- * Algorithm completes its work in 6 separate passes:
+ * Algorithm completes its work in 7 separate passes:
*
* 1. Strings deduplication.
* 2. Primitive types deduplication (int, enum, fwd).
* 3. Struct/union types deduplication.
- * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
+ * 4. Resolve unambiguous forward declarations.
+ * 5. Reference types deduplication (pointers, typedefs, arrays, funcs, func
* protos, and const/volatile/restrict modifiers).
- * 5. Types compaction.
- * 6. Types remapping.
+ * 6. Types compaction.
+ * 7. Types remapping.
*
* Algorithm determines canonical type descriptor, which is a single
* representative type for each truly unique type. This canonical type is the
@@ -1305,17 +3036,30 @@ static int btf_dedup_remap_types(struct btf_dedup *d);
* deduplicating structs/unions is described in greater details in comments for
* `btf_dedup_is_equiv` function.
*/
-int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
- const struct btf_dedup_opts *opts)
+int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts)
{
- struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
+ struct btf_dedup *d;
int err;
+ if (!OPTS_VALID(opts, btf_dedup_opts))
+ return libbpf_err(-EINVAL);
+
+ d = btf_dedup_new(btf, opts);
if (IS_ERR(d)) {
pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
- return -EINVAL;
+ return libbpf_err(-EINVAL);
+ }
+
+ if (btf_ensure_modifiable(btf)) {
+ err = -ENOMEM;
+ goto done;
}
+ err = btf_dedup_prep(d);
+ if (err) {
+ pr_debug("btf_dedup_prep failed:%d\n", err);
+ goto done;
+ }
err = btf_dedup_strings(d);
if (err < 0) {
pr_debug("btf_dedup_strings failed:%d\n", err);
@@ -1331,6 +3075,11 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
pr_debug("btf_dedup_struct_types failed:%d\n", err);
goto done;
}
+ err = btf_dedup_resolve_fwds(d);
+ if (err < 0) {
+ pr_debug("btf_dedup_resolve_fwds failed:%d\n", err);
+ goto done;
+ }
err = btf_dedup_ref_types(d);
if (err < 0) {
pr_debug("btf_dedup_ref_types failed:%d\n", err);
@@ -1349,7 +3098,7 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
done:
btf_dedup_free(d);
- return err;
+ return libbpf_err(err);
}
#define BTF_UNPROCESSED_ID ((__u32)-1)
@@ -1378,21 +3127,17 @@ struct btf_dedup {
__u32 *hypot_list;
size_t hypot_cnt;
size_t hypot_cap;
+ /* Whether hypothetical mapping, if successful, would need to adjust
+ * already canonicalized types (due to a new forward declaration to
+ * concrete type resolution). In such case, during split BTF dedup
+ * candidate type would still be considered as different, because base
+ * BTF is considered to be immutable.
+ */
+ bool hypot_adjust_canon;
/* Various option modifying behavior of algorithm */
struct btf_dedup_opts opts;
-};
-
-struct btf_str_ptr {
- const char *str;
- __u32 new_off;
- bool used;
-};
-
-struct btf_str_ptrs {
- struct btf_str_ptr *ptrs;
- const char *data;
- __u32 cnt;
- __u32 cap;
+ /* temporary strings deduplication state */
+ struct strset *strs_set;
};
static long hash_combine(long h, long value)
@@ -1401,12 +3146,11 @@ static long hash_combine(long h, long value)
}
#define for_each_dedup_cand(d, node, hash) \
- hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
+ hashmap__for_each_key_entry(d->dedup_table, node, hash)
static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
{
- return hashmap__append(d->dedup_table,
- (void *)hash, (void *)(long)type_id);
+ return hashmap__append(d->dedup_table, hash, type_id);
}
static int btf_dedup_hypot_map_add(struct btf_dedup *d,
@@ -1416,7 +3160,7 @@ static int btf_dedup_hypot_map_add(struct btf_dedup *d,
__u32 *new_list;
d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
- new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
+ new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
if (!new_list)
return -ENOMEM;
d->hypot_list = new_list;
@@ -1433,6 +3177,7 @@ static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
for (i = 0; i < d->hypot_cnt; i++)
d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
d->hypot_cnt = 0;
+ d->hypot_adjust_canon = false;
}
static void btf_dedup_free(struct btf_dedup *d)
@@ -1452,38 +3197,35 @@ static void btf_dedup_free(struct btf_dedup *d)
free(d);
}
-static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
+static size_t btf_dedup_identity_hash_fn(long key, void *ctx)
{
- return (size_t)key;
+ return key;
}
-static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
+static size_t btf_dedup_collision_hash_fn(long key, void *ctx)
{
return 0;
}
-static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
+static bool btf_dedup_equal_fn(long k1, long k2, void *ctx)
{
return k1 == k2;
}
-static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
- const struct btf_dedup_opts *opts)
+static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
{
struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
- int i, err = 0;
+ int i, err = 0, type_cnt;
if (!d)
return ERR_PTR(-ENOMEM);
- d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
- /* dedup_table_size is now used only to force collisions in tests */
- if (opts && opts->dedup_table_size == 1)
+ if (OPTS_GET(opts, force_collisions, false))
hash_fn = btf_dedup_collision_hash_fn;
d->btf = btf;
- d->btf_ext = btf_ext;
+ d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
if (IS_ERR(d->dedup_table)) {
@@ -1492,15 +3234,16 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
goto done;
}
- d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
+ type_cnt = btf__type_cnt(btf);
+ d->map = malloc(sizeof(__u32) * type_cnt);
if (!d->map) {
err = -ENOMEM;
goto done;
}
/* special BTF "void" type is made canonical immediately */
d->map[0] = 0;
- for (i = 1; i <= btf->nr_types; i++) {
- struct btf_type *t = d->btf->types[i];
+ for (i = 1; i < type_cnt; i++) {
+ struct btf_type *t = btf_type_by_id(d->btf, i);
/* VAR and DATASEC are never deduped and are self-canonical */
if (btf_is_var(t) || btf_is_datasec(t))
@@ -1509,12 +3252,12 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
d->map[i] = BTF_UNPROCESSED_ID;
}
- d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
+ d->hypot_map = malloc(sizeof(__u32) * type_cnt);
if (!d->hypot_map) {
err = -ENOMEM;
goto done;
}
- for (i = 0; i <= btf->nr_types; i++)
+ for (i = 0; i < type_cnt; i++)
d->hypot_map[i] = BTF_UNPROCESSED_ID;
done:
@@ -1526,157 +3269,59 @@ done:
return d;
}
-typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
-
/*
* Iterate over all possible places in .BTF and .BTF.ext that can reference
* string and pass pointer to it to a provided callback `fn`.
*/
-static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
+static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void *ctx)
{
- void *line_data_cur, *line_data_end;
- int i, j, r, rec_size;
- struct btf_type *t;
+ int i, r;
+
+ for (i = 0; i < d->btf->nr_types; i++) {
+ struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
- for (i = 1; i <= d->btf->nr_types; i++) {
- t = d->btf->types[i];
- r = fn(&t->name_off, ctx);
+ r = btf_type_visit_str_offs(t, fn, ctx);
if (r)
return r;
-
- switch (btf_kind(t)) {
- case BTF_KIND_STRUCT:
- case BTF_KIND_UNION: {
- struct btf_member *m = btf_members(t);
- __u16 vlen = btf_vlen(t);
-
- for (j = 0; j < vlen; j++) {
- r = fn(&m->name_off, ctx);
- if (r)
- return r;
- m++;
- }
- break;
- }
- case BTF_KIND_ENUM: {
- struct btf_enum *m = btf_enum(t);
- __u16 vlen = btf_vlen(t);
-
- for (j = 0; j < vlen; j++) {
- r = fn(&m->name_off, ctx);
- if (r)
- return r;
- m++;
- }
- break;
- }
- case BTF_KIND_FUNC_PROTO: {
- struct btf_param *m = btf_params(t);
- __u16 vlen = btf_vlen(t);
-
- for (j = 0; j < vlen; j++) {
- r = fn(&m->name_off, ctx);
- if (r)
- return r;
- m++;
- }
- break;
- }
- default:
- break;
- }
}
if (!d->btf_ext)
return 0;
- line_data_cur = d->btf_ext->line_info.info;
- line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
- rec_size = d->btf_ext->line_info.rec_size;
-
- while (line_data_cur < line_data_end) {
- struct btf_ext_info_sec *sec = line_data_cur;
- struct bpf_line_info_min *line_info;
- __u32 num_info = sec->num_info;
-
- r = fn(&sec->sec_name_off, ctx);
- if (r)
- return r;
-
- line_data_cur += sizeof(struct btf_ext_info_sec);
- for (i = 0; i < num_info; i++) {
- line_info = line_data_cur;
- r = fn(&line_info->file_name_off, ctx);
- if (r)
- return r;
- r = fn(&line_info->line_off, ctx);
- if (r)
- return r;
- line_data_cur += rec_size;
- }
- }
-
- return 0;
-}
-
-static int str_sort_by_content(const void *a1, const void *a2)
-{
- const struct btf_str_ptr *p1 = a1;
- const struct btf_str_ptr *p2 = a2;
-
- return strcmp(p1->str, p2->str);
-}
-
-static int str_sort_by_offset(const void *a1, const void *a2)
-{
- const struct btf_str_ptr *p1 = a1;
- const struct btf_str_ptr *p2 = a2;
-
- if (p1->str != p2->str)
- return p1->str < p2->str ? -1 : 1;
- return 0;
-}
-
-static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
-{
- const struct btf_str_ptr *p = pelem;
+ r = btf_ext_visit_str_offs(d->btf_ext, fn, ctx);
+ if (r)
+ return r;
- if (str_ptr != p->str)
- return (const char *)str_ptr < p->str ? -1 : 1;
return 0;
}
-static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
+static int strs_dedup_remap_str_off(__u32 *str_off_ptr, void *ctx)
{
- struct btf_str_ptrs *strs;
- struct btf_str_ptr *s;
+ struct btf_dedup *d = ctx;
+ __u32 str_off = *str_off_ptr;
+ const char *s;
+ int off, err;
- if (*str_off_ptr == 0)
+ /* don't touch empty string or string in main BTF */
+ if (str_off == 0 || str_off < d->btf->start_str_off)
return 0;
- strs = ctx;
- s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
- sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
- if (!s)
- return -EINVAL;
- s->used = true;
- return 0;
-}
-
-static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
-{
- struct btf_str_ptrs *strs;
- struct btf_str_ptr *s;
+ s = btf__str_by_offset(d->btf, str_off);
+ if (d->btf->base_btf) {
+ err = btf__find_str(d->btf->base_btf, s);
+ if (err >= 0) {
+ *str_off_ptr = err;
+ return 0;
+ }
+ if (err != -ENOENT)
+ return err;
+ }
- if (*str_off_ptr == 0)
- return 0;
+ off = strset__add_str(d->strs_set, s);
+ if (off < 0)
+ return off;
- strs = ctx;
- s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
- sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
- if (!s)
- return -EINVAL;
- *str_off_ptr = s->new_off;
+ *str_off_ptr = d->btf->start_str_off + off;
return 0;
}
@@ -1693,116 +3338,43 @@ static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
*/
static int btf_dedup_strings(struct btf_dedup *d)
{
- const struct btf_header *hdr = d->btf->hdr;
- char *start = (char *)d->btf->nohdr_data + hdr->str_off;
- char *end = start + d->btf->hdr->str_len;
- char *p = start, *tmp_strs = NULL;
- struct btf_str_ptrs strs = {
- .cnt = 0,
- .cap = 0,
- .ptrs = NULL,
- .data = start,
- };
- int i, j, err = 0, grp_idx;
- bool grp_used;
-
- /* build index of all strings */
- while (p < end) {
- if (strs.cnt + 1 > strs.cap) {
- struct btf_str_ptr *new_ptrs;
-
- strs.cap += max(strs.cnt / 2, 16U);
- new_ptrs = realloc(strs.ptrs,
- sizeof(strs.ptrs[0]) * strs.cap);
- if (!new_ptrs) {
- err = -ENOMEM;
- goto done;
- }
- strs.ptrs = new_ptrs;
- }
-
- strs.ptrs[strs.cnt].str = p;
- strs.ptrs[strs.cnt].used = false;
+ int err;
- p += strlen(p) + 1;
- strs.cnt++;
- }
+ if (d->btf->strs_deduped)
+ return 0;
- /* temporary storage for deduplicated strings */
- tmp_strs = malloc(d->btf->hdr->str_len);
- if (!tmp_strs) {
- err = -ENOMEM;
- goto done;
+ d->strs_set = strset__new(BTF_MAX_STR_OFFSET, NULL, 0);
+ if (IS_ERR(d->strs_set)) {
+ err = PTR_ERR(d->strs_set);
+ goto err_out;
}
- /* mark all used strings */
- strs.ptrs[0].used = true;
- err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
- if (err)
- goto done;
-
- /* sort strings by context, so that we can identify duplicates */
- qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
-
- /*
- * iterate groups of equal strings and if any instance in a group was
- * referenced, emit single instance and remember new offset
- */
- p = tmp_strs;
- grp_idx = 0;
- grp_used = strs.ptrs[0].used;
- /* iterate past end to avoid code duplication after loop */
- for (i = 1; i <= strs.cnt; i++) {
- /*
- * when i == strs.cnt, we want to skip string comparison and go
- * straight to handling last group of strings (otherwise we'd
- * need to handle last group after the loop w/ duplicated code)
- */
- if (i < strs.cnt &&
- !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
- grp_used = grp_used || strs.ptrs[i].used;
- continue;
- }
-
- /*
- * this check would have been required after the loop to handle
- * last group of strings, but due to <= condition in a loop
- * we avoid that duplication
+ if (!d->btf->base_btf) {
+ /* insert empty string; we won't be looking it up during strings
+ * dedup, but it's good to have it for generic BTF string lookups
*/
- if (grp_used) {
- int new_off = p - tmp_strs;
- __u32 len = strlen(strs.ptrs[grp_idx].str);
-
- memmove(p, strs.ptrs[grp_idx].str, len + 1);
- for (j = grp_idx; j < i; j++)
- strs.ptrs[j].new_off = new_off;
- p += len + 1;
- }
-
- if (i < strs.cnt) {
- grp_idx = i;
- grp_used = strs.ptrs[i].used;
- }
+ err = strset__add_str(d->strs_set, "");
+ if (err < 0)
+ goto err_out;
}
- /* replace original strings with deduped ones */
- d->btf->hdr->str_len = p - tmp_strs;
- memmove(start, tmp_strs, d->btf->hdr->str_len);
- end = start + d->btf->hdr->str_len;
-
- /* restore original order for further binary search lookups */
- qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
-
/* remap string offsets */
- err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
+ err = btf_for_each_str_off(d, strs_dedup_remap_str_off, d);
if (err)
- goto done;
+ goto err_out;
+
+ /* replace BTF string data and hash with deduped ones */
+ strset__free(d->btf->strs_set);
+ d->btf->hdr->str_len = strset__data_size(d->strs_set);
+ d->btf->strs_set = d->strs_set;
+ d->strs_set = NULL;
+ d->btf->strs_deduped = true;
+ return 0;
- d->btf->hdr->str_len = end - start;
+err_out:
+ strset__free(d->strs_set);
+ d->strs_set = NULL;
-done:
- free(tmp_strs);
- free(strs.ptrs);
return err;
}
@@ -1823,8 +3395,8 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
t1->size == t2->size;
}
-/* Calculate type signature hash of INT. */
-static long btf_hash_int(struct btf_type *t)
+/* Calculate type signature hash of INT or TAG. */
+static long btf_hash_int_decl_tag(struct btf_type *t)
{
__u32 info = *(__u32 *)(t + 1);
long h;
@@ -1834,8 +3406,8 @@ static long btf_hash_int(struct btf_type *t)
return h;
}
-/* Check structural equality of two INTs. */
-static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
+/* Check structural equality of two INTs or TAGs. */
+static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2)
{
__u32 info1, info2;
@@ -1846,28 +3418,22 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
return info1 == info2;
}
-/* Calculate type signature hash of ENUM. */
+/* Calculate type signature hash of ENUM/ENUM64. */
static long btf_hash_enum(struct btf_type *t)
{
long h;
- /* don't hash vlen and enum members to support enum fwd resolving */
+ /* don't hash vlen, enum members and size to support enum fwd resolving */
h = hash_combine(0, t->name_off);
- h = hash_combine(h, t->info & ~0xffff);
- h = hash_combine(h, t->size);
return h;
}
-/* Check structural equality of two ENUMs. */
-static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
+static bool btf_equal_enum_members(struct btf_type *t1, struct btf_type *t2)
{
const struct btf_enum *m1, *m2;
__u16 vlen;
int i;
- if (!btf_equal_common(t1, t2))
- return false;
-
vlen = btf_vlen(t1);
m1 = btf_enum(t1);
m2 = btf_enum(t2);
@@ -1880,19 +3446,55 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
return true;
}
+static bool btf_equal_enum64_members(struct btf_type *t1, struct btf_type *t2)
+{
+ const struct btf_enum64 *m1, *m2;
+ __u16 vlen;
+ int i;
+
+ vlen = btf_vlen(t1);
+ m1 = btf_enum64(t1);
+ m2 = btf_enum64(t2);
+ for (i = 0; i < vlen; i++) {
+ if (m1->name_off != m2->name_off || m1->val_lo32 != m2->val_lo32 ||
+ m1->val_hi32 != m2->val_hi32)
+ return false;
+ m1++;
+ m2++;
+ }
+ return true;
+}
+
+/* Check structural equality of two ENUMs or ENUM64s. */
+static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
+{
+ if (!btf_equal_common(t1, t2))
+ return false;
+
+ /* t1 & t2 kinds are identical because of btf_equal_common */
+ if (btf_kind(t1) == BTF_KIND_ENUM)
+ return btf_equal_enum_members(t1, t2);
+ else
+ return btf_equal_enum64_members(t1, t2);
+}
+
static inline bool btf_is_enum_fwd(struct btf_type *t)
{
- return btf_is_enum(t) && btf_vlen(t) == 0;
+ return btf_is_any_enum(t) && btf_vlen(t) == 0;
}
static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
{
if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
return btf_equal_enum(t1, t2);
- /* ignore vlen when comparing */
+ /* At this point either t1 or t2 or both are forward declarations, thus:
+ * - skip comparing vlen because it is zero for forward declarations;
+ * - skip comparing size to allow enum forward declarations
+ * to be compatible with enum64 full declarations;
+ * - skip comparing kind for the same reason.
+ */
return t1->name_off == t2->name_off &&
- (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
- t1->size == t2->size;
+ btf_is_any_enum(t1) && btf_is_any_enum(t2);
}
/*
@@ -1917,8 +3519,8 @@ static long btf_hash_struct(struct btf_type *t)
}
/*
- * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
- * IDs. This check is performed during type graph equivalence check and
+ * Check structural compatibility of two STRUCTs/UNIONs, ignoring referenced
+ * type IDs. This check is performed during type graph equivalence check and
* referenced types equivalence is checked separately.
*/
static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
@@ -2067,6 +3669,70 @@ static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
return true;
}
+/* Prepare split BTF for deduplication by calculating hashes of base BTF's
+ * types and initializing the rest of the state (canonical type mapping) for
+ * the fixed base BTF part.
+ */
+static int btf_dedup_prep(struct btf_dedup *d)
+{
+ struct btf_type *t;
+ int type_id;
+ long h;
+
+ if (!d->btf->base_btf)
+ return 0;
+
+ for (type_id = 1; type_id < d->btf->start_id; type_id++) {
+ t = btf_type_by_id(d->btf, type_id);
+
+ /* all base BTF types are self-canonical by definition */
+ d->map[type_id] = type_id;
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_VAR:
+ case BTF_KIND_DATASEC:
+ /* VAR and DATASEC are never hash/deduplicated */
+ continue;
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_FWD:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_TYPE_TAG:
+ h = btf_hash_common(t);
+ break;
+ case BTF_KIND_INT:
+ case BTF_KIND_DECL_TAG:
+ h = btf_hash_int_decl_tag(t);
+ break;
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ h = btf_hash_enum(t);
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ h = btf_hash_struct(t);
+ break;
+ case BTF_KIND_ARRAY:
+ h = btf_hash_array(t);
+ break;
+ case BTF_KIND_FUNC_PROTO:
+ h = btf_hash_fnproto(t);
+ break;
+ default:
+ pr_debug("unknown kind %d for type [%d]\n", btf_kind(t), type_id);
+ return -EINVAL;
+ }
+ if (btf_dedup_table_add(d, h, type_id))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
/*
* Deduplicate primitive types, that can't reference other types, by calculating
* their type signature hash and comparing them with any possible canonical
@@ -2075,7 +3741,7 @@ static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
*/
static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
{
- struct btf_type *t = d->btf->types[type_id];
+ struct btf_type *t = btf_type_by_id(d->btf, type_id);
struct hashmap_entry *hash_entry;
struct btf_type *cand;
/* if we don't find equivalent type, then we are canonical */
@@ -2096,14 +3762,16 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_FUNC_PROTO:
case BTF_KIND_VAR:
case BTF_KIND_DATASEC:
+ case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
return 0;
case BTF_KIND_INT:
- h = btf_hash_int(t);
+ h = btf_hash_int_decl_tag(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
- if (btf_equal_int(t, cand)) {
+ cand_id = hash_entry->value;
+ cand = btf_type_by_id(d->btf, cand_id);
+ if (btf_equal_int_tag(t, cand)) {
new_id = cand_id;
break;
}
@@ -2111,16 +3779,15 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
h = btf_hash_enum(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand_id = hash_entry->value;
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_enum(t, cand)) {
new_id = cand_id;
break;
}
- if (d->opts.dont_resolve_fwds)
- continue;
if (btf_compat_enum(t, cand)) {
if (btf_is_enum_fwd(t)) {
/* resolve fwd to full enum */
@@ -2134,10 +3801,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
break;
case BTF_KIND_FWD:
+ case BTF_KIND_FLOAT:
h = btf_hash_common(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand_id = hash_entry->value;
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_common(t, cand)) {
new_id = cand_id;
break;
@@ -2160,8 +3828,8 @@ static int btf_dedup_prim_types(struct btf_dedup *d)
{
int i, err;
- for (i = 1; i <= d->btf->nr_types; i++) {
- err = btf_dedup_prim_type(d, i);
+ for (i = 0; i < d->btf->nr_types; i++) {
+ err = btf_dedup_prim_type(d, d->btf->start_id + i);
if (err)
return err;
}
@@ -2196,13 +3864,13 @@ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
{
__u32 orig_type_id = type_id;
- if (!btf_is_fwd(d->btf->types[type_id]))
+ if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
return type_id;
while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
type_id = d->map[type_id];
- if (!btf_is_fwd(d->btf->types[type_id]))
+ if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
return type_id;
return orig_type_id;
@@ -2214,6 +3882,46 @@ static inline __u16 btf_fwd_kind(struct btf_type *t)
return btf_kflag(t) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
}
+/* Check if given two types are identical ARRAY definitions */
+static bool btf_dedup_identical_arrays(struct btf_dedup *d, __u32 id1, __u32 id2)
+{
+ struct btf_type *t1, *t2;
+
+ t1 = btf_type_by_id(d->btf, id1);
+ t2 = btf_type_by_id(d->btf, id2);
+ if (!btf_is_array(t1) || !btf_is_array(t2))
+ return false;
+
+ return btf_equal_array(t1, t2);
+}
+
+/* Check if given two types are identical STRUCT/UNION definitions */
+static bool btf_dedup_identical_structs(struct btf_dedup *d, __u32 id1, __u32 id2)
+{
+ const struct btf_member *m1, *m2;
+ struct btf_type *t1, *t2;
+ int n, i;
+
+ t1 = btf_type_by_id(d->btf, id1);
+ t2 = btf_type_by_id(d->btf, id2);
+
+ if (!btf_is_composite(t1) || btf_kind(t1) != btf_kind(t2))
+ return false;
+
+ if (!btf_shallow_equal_struct(t1, t2))
+ return false;
+
+ m1 = btf_members(t1);
+ m2 = btf_members(t2);
+ for (i = 0, n = btf_vlen(t1); i < n; i++, m1++, m2++) {
+ if (m1->type != m2->type &&
+ !btf_dedup_identical_arrays(d, m1->type, m2->type) &&
+ !btf_dedup_identical_structs(d, m1->type, m2->type))
+ return false;
+ }
+ return true;
+}
+
/*
* Check equivalence of BTF type graph formed by candidate struct/union (we'll
* call it "candidate graph" in this description for brevity) to a type graph
@@ -2324,14 +4032,36 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
canon_id = resolve_fwd_id(d, canon_id);
hypot_type_id = d->hypot_map[canon_id];
- if (hypot_type_id <= BTF_MAX_NR_TYPES)
- return hypot_type_id == cand_id;
+ if (hypot_type_id <= BTF_MAX_NR_TYPES) {
+ if (hypot_type_id == cand_id)
+ return 1;
+ /* In some cases compiler will generate different DWARF types
+ * for *identical* array type definitions and use them for
+ * different fields within the *same* struct. This breaks type
+ * equivalence check, which makes an assumption that candidate
+ * types sub-graph has a consistent and deduped-by-compiler
+ * types within a single CU. So work around that by explicitly
+ * allowing identical array types here.
+ */
+ if (btf_dedup_identical_arrays(d, hypot_type_id, cand_id))
+ return 1;
+ /* It turns out that similar situation can happen with
+ * struct/union sometimes, sigh... Handle the case where
+ * structs/unions are exactly the same, down to the referenced
+ * type IDs. Anything more complicated (e.g., if referenced
+ * types are different, but equivalent) is *way more*
+ * complicated and requires a many-to-many equivalence mapping.
+ */
+ if (btf_dedup_identical_structs(d, hypot_type_id, cand_id))
+ return 1;
+ return 0;
+ }
if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
return -ENOMEM;
- cand_type = d->btf->types[cand_id];
- canon_type = d->btf->types[canon_id];
+ cand_type = btf_type_by_id(d->btf, cand_id);
+ canon_type = btf_type_by_id(d->btf, canon_id);
cand_kind = btf_kind(cand_type);
canon_kind = btf_kind(canon_type);
@@ -2339,8 +4069,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return 0;
/* FWD <--> STRUCT/UNION equivalence check, if enabled */
- if (!d->opts.dont_resolve_fwds
- && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
+ if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
&& cand_kind != canon_kind) {
__u16 real_kind;
__u16 fwd_kind;
@@ -2351,6 +4080,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
} else {
real_kind = cand_kind;
fwd_kind = btf_fwd_kind(canon_type);
+ /* we'd need to resolve base FWD to STRUCT/UNION */
+ if (fwd_kind == real_kind && canon_id < d->btf->start_id)
+ d->hypot_adjust_canon = true;
}
return fwd_kind == real_kind;
}
@@ -2360,15 +4092,14 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
switch (cand_kind) {
case BTF_KIND_INT:
- return btf_equal_int(cand_type, canon_type);
+ return btf_equal_int_tag(cand_type, canon_type);
case BTF_KIND_ENUM:
- if (d->opts.dont_resolve_fwds)
- return btf_equal_enum(cand_type, canon_type);
- else
- return btf_compat_enum(cand_type, canon_type);
+ case BTF_KIND_ENUM64:
+ return btf_compat_enum(cand_type, canon_type);
case BTF_KIND_FWD:
+ case BTF_KIND_FLOAT:
return btf_equal_common(cand_type, canon_type);
case BTF_KIND_CONST:
@@ -2377,6 +4108,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_TYPE_TAG:
if (cand_type->info != canon_type->info)
return 0;
return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
@@ -2388,8 +4120,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return 0;
cand_arr = btf_array(cand_type);
canon_arr = btf_array(canon_type);
- eq = btf_dedup_is_equiv(d,
- cand_arr->index_type, canon_arr->index_type);
+ eq = btf_dedup_is_equiv(d, cand_arr->index_type, canon_arr->index_type);
if (eq <= 0)
return eq;
return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
@@ -2472,18 +4203,18 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
*/
static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
{
- __u32 cand_type_id, targ_type_id;
+ __u32 canon_type_id, targ_type_id;
__u16 t_kind, c_kind;
__u32 t_id, c_id;
int i;
for (i = 0; i < d->hypot_cnt; i++) {
- cand_type_id = d->hypot_list[i];
- targ_type_id = d->hypot_map[cand_type_id];
+ canon_type_id = d->hypot_list[i];
+ targ_type_id = d->hypot_map[canon_type_id];
t_id = resolve_type_id(d, targ_type_id);
- c_id = resolve_type_id(d, cand_type_id);
- t_kind = btf_kind(d->btf->types[t_id]);
- c_kind = btf_kind(d->btf->types[c_id]);
+ c_id = resolve_type_id(d, canon_type_id);
+ t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
+ c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
/*
* Resolve FWD into STRUCT/UNION.
* It's ok to resolve FWD into STRUCT/UNION that's not yet
@@ -2496,9 +4227,26 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
* stability is not a requirement for STRUCT/UNION equivalence
* checks, though.
*/
+
+ /* if it's the split BTF case, we still need to point base FWD
+ * to STRUCT/UNION in a split BTF, because FWDs from split BTF
+ * will be resolved against base FWD. If we don't point base
+ * canonical FWD to the resolved STRUCT/UNION, then all the
+ * FWDs in split BTF won't be correctly resolved to a proper
+ * STRUCT/UNION.
+ */
if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
d->map[c_id] = t_id;
- else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
+
+ /* if graph equivalence determined that we'd need to adjust
+ * base canonical types, then we need to only point base FWDs
+ * to STRUCTs/UNIONs and do no more modifications. For all
+ * other purposes the type graphs were not equivalent.
+ */
+ if (d->hypot_adjust_canon)
+ continue;
+
+ if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
d->map[t_id] = c_id;
if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
@@ -2551,7 +4299,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
if (d->map[type_id] <= BTF_MAX_NR_TYPES)
return 0;
- t = d->btf->types[type_id];
+ t = btf_type_by_id(d->btf, type_id);
kind = btf_kind(t);
if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
@@ -2559,7 +4307,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_struct(t);
for_each_dedup_cand(d, hash_entry, h) {
- __u32 cand_id = (__u32)(long)hash_entry->value;
+ __u32 cand_id = hash_entry->value;
int eq;
/*
@@ -2572,7 +4320,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
* creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
* FWD and compatible STRUCT/UNION are considered equivalent.
*/
- cand_type = d->btf->types[cand_id];
+ cand_type = btf_type_by_id(d->btf, cand_id);
if (!btf_shallow_equal_struct(t, cand_type))
continue;
@@ -2582,8 +4330,10 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
return eq;
if (!eq)
continue;
- new_id = cand_id;
btf_dedup_merge_hypot_map(d);
+ if (d->hypot_adjust_canon) /* not really equivalent */
+ continue;
+ new_id = cand_id;
break;
}
@@ -2598,8 +4348,8 @@ static int btf_dedup_struct_types(struct btf_dedup *d)
{
int i, err;
- for (i = 1; i <= d->btf->nr_types; i++) {
- err = btf_dedup_struct_type(d, i);
+ for (i = 0; i < d->btf->nr_types; i++) {
+ err = btf_dedup_struct_type(d, d->btf->start_id + i);
if (err)
return err;
}
@@ -2644,7 +4394,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
if (d->map[type_id] <= BTF_MAX_NR_TYPES)
return resolve_type_id(d, type_id);
- t = d->btf->types[type_id];
+ t = btf_type_by_id(d->btf, type_id);
d->map[type_id] = BTF_IN_PROGRESS_ID;
switch (btf_kind(t)) {
@@ -2654,6 +4404,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
+ case BTF_KIND_TYPE_TAG:
ref_type_id = btf_dedup_ref_type(d, t->type);
if (ref_type_id < 0)
return ref_type_id;
@@ -2661,8 +4412,8 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_common(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand_id = hash_entry->value;
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_common(t, cand)) {
new_id = cand_id;
break;
@@ -2670,6 +4421,23 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
}
break;
+ case BTF_KIND_DECL_TAG:
+ ref_type_id = btf_dedup_ref_type(d, t->type);
+ if (ref_type_id < 0)
+ return ref_type_id;
+ t->type = ref_type_id;
+
+ h = btf_hash_int_decl_tag(t);
+ for_each_dedup_cand(d, hash_entry, h) {
+ cand_id = hash_entry->value;
+ cand = btf_type_by_id(d->btf, cand_id);
+ if (btf_equal_int_tag(t, cand)) {
+ new_id = cand_id;
+ break;
+ }
+ }
+ break;
+
case BTF_KIND_ARRAY: {
struct btf_array *info = btf_array(t);
@@ -2685,8 +4453,8 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_array(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand_id = hash_entry->value;
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_array(t, cand)) {
new_id = cand_id;
break;
@@ -2717,8 +4485,8 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_fnproto(t);
for_each_dedup_cand(d, hash_entry, h) {
- cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand_id = hash_entry->value;
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_fnproto(t, cand)) {
new_id = cand_id;
break;
@@ -2742,8 +4510,8 @@ static int btf_dedup_ref_types(struct btf_dedup *d)
{
int i, err;
- for (i = 1; i <= d->btf->nr_types; i++) {
- err = btf_dedup_ref_type(d, i);
+ for (i = 0; i < d->btf->nr_types; i++) {
+ err = btf_dedup_ref_type(d, d->btf->start_id + i);
if (err < 0)
return err;
}
@@ -2754,6 +4522,134 @@ static int btf_dedup_ref_types(struct btf_dedup *d)
}
/*
+ * Collect a map from type names to type ids for all canonical structs
+ * and unions. If the same name is shared by several canonical types
+ * use a special value 0 to indicate this fact.
+ */
+static int btf_dedup_fill_unique_names_map(struct btf_dedup *d, struct hashmap *names_map)
+{
+ __u32 nr_types = btf__type_cnt(d->btf);
+ struct btf_type *t;
+ __u32 type_id;
+ __u16 kind;
+ int err;
+
+ /*
+ * Iterate over base and split module ids in order to get all
+ * available structs in the map.
+ */
+ for (type_id = 1; type_id < nr_types; ++type_id) {
+ t = btf_type_by_id(d->btf, type_id);
+ kind = btf_kind(t);
+
+ if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
+ continue;
+
+ /* Skip non-canonical types */
+ if (type_id != d->map[type_id])
+ continue;
+
+ err = hashmap__add(names_map, t->name_off, type_id);
+ if (err == -EEXIST)
+ err = hashmap__set(names_map, t->name_off, 0, NULL, NULL);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int btf_dedup_resolve_fwd(struct btf_dedup *d, struct hashmap *names_map, __u32 type_id)
+{
+ struct btf_type *t = btf_type_by_id(d->btf, type_id);
+ enum btf_fwd_kind fwd_kind = btf_kflag(t);
+ __u16 cand_kind, kind = btf_kind(t);
+ struct btf_type *cand_t;
+ uintptr_t cand_id;
+
+ if (kind != BTF_KIND_FWD)
+ return 0;
+
+ /* Skip if this FWD already has a mapping */
+ if (type_id != d->map[type_id])
+ return 0;
+
+ if (!hashmap__find(names_map, t->name_off, &cand_id))
+ return 0;
+
+ /* Zero is a special value indicating that name is not unique */
+ if (!cand_id)
+ return 0;
+
+ cand_t = btf_type_by_id(d->btf, cand_id);
+ cand_kind = btf_kind(cand_t);
+ if ((cand_kind == BTF_KIND_STRUCT && fwd_kind != BTF_FWD_STRUCT) ||
+ (cand_kind == BTF_KIND_UNION && fwd_kind != BTF_FWD_UNION))
+ return 0;
+
+ d->map[type_id] = cand_id;
+
+ return 0;
+}
+
+/*
+ * Resolve unambiguous forward declarations.
+ *
+ * The lion's share of all FWD declarations is resolved during
+ * `btf_dedup_struct_types` phase when different type graphs are
+ * compared against each other. However, if in some compilation unit a
+ * FWD declaration is not a part of a type graph compared against
+ * another type graph that declaration's canonical type would not be
+ * changed. Example:
+ *
+ * CU #1:
+ *
+ * struct foo;
+ * struct foo *some_global;
+ *
+ * CU #2:
+ *
+ * struct foo { int u; };
+ * struct foo *another_global;
+ *
+ * After `btf_dedup_struct_types` the BTF looks as follows:
+ *
+ * [1] STRUCT 'foo' size=4 vlen=1 ...
+ * [2] INT 'int' size=4 ...
+ * [3] PTR '(anon)' type_id=1
+ * [4] FWD 'foo' fwd_kind=struct
+ * [5] PTR '(anon)' type_id=4
+ *
+ * This pass assumes that such FWD declarations should be mapped to
+ * structs or unions with identical name in case if the name is not
+ * ambiguous.
+ */
+static int btf_dedup_resolve_fwds(struct btf_dedup *d)
+{
+ int i, err;
+ struct hashmap *names_map;
+
+ names_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL);
+ if (IS_ERR(names_map))
+ return PTR_ERR(names_map);
+
+ err = btf_dedup_fill_unique_names_map(d, names_map);
+ if (err < 0)
+ goto exit;
+
+ for (i = 0; i < d->btf->nr_types; i++) {
+ err = btf_dedup_resolve_fwd(d, names_map, d->btf->start_id + i);
+ if (err < 0)
+ break;
+ }
+
+exit:
+ hashmap__free(names_map);
+ return err;
+}
+
+/*
* Compact types.
*
* After we established for each type its corresponding canonical representative
@@ -2766,51 +4662,49 @@ static int btf_dedup_ref_types(struct btf_dedup *d)
*/
static int btf_dedup_compact_types(struct btf_dedup *d)
{
- struct btf_type **new_types;
- __u32 next_type_id = 1;
- char *types_start, *p;
- int i, len;
+ __u32 *new_offs;
+ __u32 next_type_id = d->btf->start_id;
+ const struct btf_type *t;
+ void *p;
+ int i, id, len;
/* we are going to reuse hypot_map to store compaction remapping */
d->hypot_map[0] = 0;
- for (i = 1; i <= d->btf->nr_types; i++)
- d->hypot_map[i] = BTF_UNPROCESSED_ID;
+ /* base BTF types are not renumbered */
+ for (id = 1; id < d->btf->start_id; id++)
+ d->hypot_map[id] = id;
+ for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++)
+ d->hypot_map[id] = BTF_UNPROCESSED_ID;
- types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
- p = types_start;
+ p = d->btf->types_data;
- for (i = 1; i <= d->btf->nr_types; i++) {
- if (d->map[i] != i)
+ for (i = 0, id = d->btf->start_id; i < d->btf->nr_types; i++, id++) {
+ if (d->map[id] != id)
continue;
- len = btf_type_size(d->btf->types[i]);
+ t = btf__type_by_id(d->btf, id);
+ len = btf_type_size(t);
if (len < 0)
return len;
- memmove(p, d->btf->types[i], len);
- d->hypot_map[i] = next_type_id;
- d->btf->types[next_type_id] = (struct btf_type *)p;
+ memmove(p, t, len);
+ d->hypot_map[id] = next_type_id;
+ d->btf->type_offs[next_type_id - d->btf->start_id] = p - d->btf->types_data;
p += len;
next_type_id++;
}
/* shrink struct btf's internal types index and update btf_header */
- d->btf->nr_types = next_type_id - 1;
- d->btf->types_size = d->btf->nr_types;
- d->btf->hdr->type_len = p - types_start;
- new_types = realloc(d->btf->types,
- (1 + d->btf->nr_types) * sizeof(struct btf_type *));
- if (!new_types)
+ d->btf->nr_types = next_type_id - d->btf->start_id;
+ d->btf->type_offs_cap = d->btf->nr_types;
+ d->btf->hdr->type_len = p - d->btf->types_data;
+ new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
+ sizeof(*new_offs));
+ if (d->btf->type_offs_cap && !new_offs)
return -ENOMEM;
- d->btf->types = new_types;
-
- /* make sure string section follows type information without gaps */
- d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
- memmove(p, d->btf->strings, d->btf->hdr->str_len);
- d->btf->strings = p;
- p += d->btf->hdr->str_len;
-
- d->btf->data_size = p - (char *)d->btf->data;
+ d->btf->type_offs = new_offs;
+ d->btf->hdr->str_off = d->btf->hdr->type_len;
+ d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
return 0;
}
@@ -2820,15 +4714,18 @@ static int btf_dedup_compact_types(struct btf_dedup *d)
* then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
* which is populated during compaction phase.
*/
-static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
+static int btf_dedup_remap_type_id(__u32 *type_id, void *ctx)
{
+ struct btf_dedup *d = ctx;
__u32 resolved_type_id, new_type_id;
- resolved_type_id = resolve_type_id(d, type_id);
+ resolved_type_id = resolve_type_id(d, *type_id);
new_type_id = d->hypot_map[resolved_type_id];
if (new_type_id > BTF_MAX_NR_TYPES)
return -EINVAL;
- return new_type_id;
+
+ *type_id = new_type_id;
+ return 0;
}
/*
@@ -2841,15 +4738,92 @@ static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
* referenced from any BTF type (e.g., struct fields, func proto args, etc) to
* their final deduped type IDs.
*/
-static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
+static int btf_dedup_remap_types(struct btf_dedup *d)
{
- struct btf_type *t = d->btf->types[type_id];
int i, r;
+ for (i = 0; i < d->btf->nr_types; i++) {
+ struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i);
+
+ r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d);
+ if (r)
+ return r;
+ }
+
+ if (!d->btf_ext)
+ return 0;
+
+ r = btf_ext_visit_type_ids(d->btf_ext, btf_dedup_remap_type_id, d);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/*
+ * Probe few well-known locations for vmlinux kernel image and try to load BTF
+ * data out of it to use for target BTF.
+ */
+struct btf *btf__load_vmlinux_btf(void)
+{
+ const char *locations[] = {
+ /* try canonical vmlinux BTF through sysfs first */
+ "/sys/kernel/btf/vmlinux",
+ /* fall back to trying to find vmlinux on disk otherwise */
+ "/boot/vmlinux-%1$s",
+ "/lib/modules/%1$s/vmlinux-%1$s",
+ "/lib/modules/%1$s/build/vmlinux",
+ "/usr/lib/modules/%1$s/kernel/vmlinux",
+ "/usr/lib/debug/boot/vmlinux-%1$s",
+ "/usr/lib/debug/boot/vmlinux-%1$s.debug",
+ "/usr/lib/debug/lib/modules/%1$s/vmlinux",
+ };
+ char path[PATH_MAX + 1];
+ struct utsname buf;
+ struct btf *btf;
+ int i, err;
+
+ uname(&buf);
+
+ for (i = 0; i < ARRAY_SIZE(locations); i++) {
+ snprintf(path, PATH_MAX, locations[i], buf.release);
+
+ if (faccessat(AT_FDCWD, path, R_OK, AT_EACCESS))
+ continue;
+
+ btf = btf__parse(path, NULL);
+ err = libbpf_get_error(btf);
+ pr_debug("loading kernel BTF '%s': %d\n", path, err);
+ if (err)
+ continue;
+
+ return btf;
+ }
+
+ pr_warn("failed to find valid kernel BTF\n");
+ return libbpf_err_ptr(-ESRCH);
+}
+
+struct btf *libbpf_find_kernel_btf(void) __attribute__((alias("btf__load_vmlinux_btf")));
+
+struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf)
+{
+ char path[80];
+
+ snprintf(path, sizeof(path), "/sys/kernel/btf/%s", module_name);
+ return btf__parse_split(path, vmlinux_btf);
+}
+
+int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx)
+{
+ int i, n, err;
+
switch (btf_kind(t)) {
case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
case BTF_KIND_ENUM:
- break;
+ case BTF_KIND_ENUM64:
+ return 0;
case BTF_KIND_FWD:
case BTF_KIND_CONST:
@@ -2859,175 +4833,193 @@ static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
- r = btf_dedup_remap_type_id(d, t->type);
- if (r < 0)
- return r;
- t->type = r;
- break;
+ case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
+ return visit(&t->type, ctx);
case BTF_KIND_ARRAY: {
- struct btf_array *arr_info = btf_array(t);
+ struct btf_array *a = btf_array(t);
- r = btf_dedup_remap_type_id(d, arr_info->type);
- if (r < 0)
- return r;
- arr_info->type = r;
- r = btf_dedup_remap_type_id(d, arr_info->index_type);
- if (r < 0)
- return r;
- arr_info->index_type = r;
- break;
+ err = visit(&a->type, ctx);
+ err = err ?: visit(&a->index_type, ctx);
+ return err;
}
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
- struct btf_member *member = btf_members(t);
- __u16 vlen = btf_vlen(t);
+ struct btf_member *m = btf_members(t);
- for (i = 0; i < vlen; i++) {
- r = btf_dedup_remap_type_id(d, member->type);
- if (r < 0)
- return r;
- member->type = r;
- member++;
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->type, ctx);
+ if (err)
+ return err;
}
- break;
+ return 0;
}
case BTF_KIND_FUNC_PROTO: {
- struct btf_param *param = btf_params(t);
- __u16 vlen = btf_vlen(t);
+ struct btf_param *m = btf_params(t);
- r = btf_dedup_remap_type_id(d, t->type);
- if (r < 0)
- return r;
- t->type = r;
-
- for (i = 0; i < vlen; i++) {
- r = btf_dedup_remap_type_id(d, param->type);
- if (r < 0)
- return r;
- param->type = r;
- param++;
+ err = visit(&t->type, ctx);
+ if (err)
+ return err;
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->type, ctx);
+ if (err)
+ return err;
}
- break;
+ return 0;
}
case BTF_KIND_DATASEC: {
- struct btf_var_secinfo *var = btf_var_secinfos(t);
- __u16 vlen = btf_vlen(t);
+ struct btf_var_secinfo *m = btf_var_secinfos(t);
- for (i = 0; i < vlen; i++) {
- r = btf_dedup_remap_type_id(d, var->type);
- if (r < 0)
- return r;
- var->type = r;
- var++;
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->type, ctx);
+ if (err)
+ return err;
}
- break;
+ return 0;
}
default:
return -EINVAL;
}
-
- return 0;
}
-static int btf_dedup_remap_types(struct btf_dedup *d)
+int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx)
{
- int i, r;
+ int i, n, err;
- for (i = 1; i <= d->btf->nr_types; i++) {
- r = btf_dedup_remap_type(d, i);
- if (r < 0)
- return r;
+ err = visit(&t->name_off, ctx);
+ if (err)
+ return err;
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ struct btf_member *m = btf_members(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->name_off, ctx);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_ENUM: {
+ struct btf_enum *m = btf_enum(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->name_off, ctx);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ case BTF_KIND_ENUM64: {
+ struct btf_enum64 *m = btf_enum64(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->name_off, ctx);
+ if (err)
+ return err;
+ }
+ break;
}
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *m = btf_params(t);
+
+ for (i = 0, n = btf_vlen(t); i < n; i++, m++) {
+ err = visit(&m->name_off, ctx);
+ if (err)
+ return err;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+
return 0;
}
-static struct btf *btf_load_raw(const char *path)
+int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx)
{
- struct btf *btf;
- size_t read_cnt;
- struct stat st;
- void *data;
- FILE *f;
-
- if (stat(path, &st))
- return ERR_PTR(-errno);
+ const struct btf_ext_info *seg;
+ struct btf_ext_info_sec *sec;
+ int i, err;
- data = malloc(st.st_size);
- if (!data)
- return ERR_PTR(-ENOMEM);
+ seg = &btf_ext->func_info;
+ for_each_btf_ext_sec(seg, sec) {
+ struct bpf_func_info_min *rec;
- f = fopen(path, "rb");
- if (!f) {
- btf = ERR_PTR(-errno);
- goto cleanup;
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = visit(&rec->type_id, ctx);
+ if (err < 0)
+ return err;
+ }
}
- read_cnt = fread(data, 1, st.st_size, f);
- fclose(f);
- if (read_cnt < st.st_size) {
- btf = ERR_PTR(-EBADF);
- goto cleanup;
- }
+ seg = &btf_ext->core_relo_info;
+ for_each_btf_ext_sec(seg, sec) {
+ struct bpf_core_relo *rec;
- btf = btf__new(data, read_cnt);
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = visit(&rec->type_id, ctx);
+ if (err < 0)
+ return err;
+ }
+ }
-cleanup:
- free(data);
- return btf;
+ return 0;
}
-/*
- * Probe few well-known locations for vmlinux kernel image and try to load BTF
- * data out of it to use for target BTF.
- */
-struct btf *libbpf_find_kernel_btf(void)
+int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx)
{
- struct {
- const char *path_fmt;
- bool raw_btf;
- } locations[] = {
- /* try canonical vmlinux BTF through sysfs first */
- { "/sys/kernel/btf/vmlinux", true /* raw BTF */ },
- /* fall back to trying to find vmlinux ELF on disk otherwise */
- { "/boot/vmlinux-%1$s" },
- { "/lib/modules/%1$s/vmlinux-%1$s" },
- { "/lib/modules/%1$s/build/vmlinux" },
- { "/usr/lib/modules/%1$s/kernel/vmlinux" },
- { "/usr/lib/debug/boot/vmlinux-%1$s" },
- { "/usr/lib/debug/boot/vmlinux-%1$s.debug" },
- { "/usr/lib/debug/lib/modules/%1$s/vmlinux" },
- };
- char path[PATH_MAX + 1];
- struct utsname buf;
- struct btf *btf;
- int i;
+ const struct btf_ext_info *seg;
+ struct btf_ext_info_sec *sec;
+ int i, err;
- uname(&buf);
+ seg = &btf_ext->func_info;
+ for_each_btf_ext_sec(seg, sec) {
+ err = visit(&sec->sec_name_off, ctx);
+ if (err)
+ return err;
+ }
- for (i = 0; i < ARRAY_SIZE(locations); i++) {
- snprintf(path, PATH_MAX, locations[i].path_fmt, buf.release);
+ seg = &btf_ext->line_info;
+ for_each_btf_ext_sec(seg, sec) {
+ struct bpf_line_info_min *rec;
- if (access(path, R_OK))
- continue;
+ err = visit(&sec->sec_name_off, ctx);
+ if (err)
+ return err;
- if (locations[i].raw_btf)
- btf = btf_load_raw(path);
- else
- btf = btf__parse_elf(path, NULL);
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = visit(&rec->file_name_off, ctx);
+ if (err)
+ return err;
+ err = visit(&rec->line_off, ctx);
+ if (err)
+ return err;
+ }
+ }
- pr_debug("loading kernel BTF '%s': %ld\n",
- path, IS_ERR(btf) ? PTR_ERR(btf) : 0);
- if (IS_ERR(btf))
- continue;
+ seg = &btf_ext->core_relo_info;
+ for_each_btf_ext_sec(seg, sec) {
+ struct bpf_core_relo *rec;
- return btf;
+ err = visit(&sec->sec_name_off, ctx);
+ if (err)
+ return err;
+
+ for_each_btf_ext_rec(seg, sec, i, rec) {
+ err = visit(&rec->access_str_off, ctx);
+ if (err)
+ return err;
+ }
}
- pr_warn("failed to find valid kernel BTF\n");
- return ERR_PTR(-ESRCH);
+ return 0;
}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 70c1b7ec2bd0..8e6880d91c84 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -1,10 +1,12 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/* Copyright (c) 2018 Facebook */
+/*! \file */
#ifndef __LIBBPF_BTF_H
#define __LIBBPF_BTF_H
#include <stdarg.h>
+#include <stdbool.h>
#include <linux/btf.h>
#include <linux/types.h>
@@ -24,106 +26,225 @@ struct btf_type;
struct bpf_object;
-/*
- * The .BTF.ext ELF section layout defined as
- * struct btf_ext_header
- * func_info subsection
+enum btf_endianness {
+ BTF_LITTLE_ENDIAN = 0,
+ BTF_BIG_ENDIAN = 1,
+};
+
+/**
+ * @brief **btf__free()** frees all data of a BTF object
+ * @param btf BTF object to free
+ */
+LIBBPF_API void btf__free(struct btf *btf);
+
+/**
+ * @brief **btf__new()** creates a new instance of a BTF object from the raw
+ * bytes of an ELF's BTF section
+ * @param data raw bytes
+ * @param size number of bytes passed in `data`
+ * @return new BTF object instance which has to be eventually freed with
+ * **btf__free()**
+ *
+ * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
+ * error code from such a pointer `libbpf_get_error()` should be used. If
+ * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
+ * returned on error instead. In both cases thread-local `errno` variable is
+ * always set to error code as well.
+ */
+LIBBPF_API struct btf *btf__new(const void *data, __u32 size);
+
+/**
+ * @brief **btf__new_split()** create a new instance of a BTF object from the
+ * provided raw data bytes. It takes another BTF instance, **base_btf**, which
+ * serves as a base BTF, which is extended by types in a newly created BTF
+ * instance
+ * @param data raw bytes
+ * @param size length of raw bytes
+ * @param base_btf the base BTF object
+ * @return new BTF object instance which has to be eventually freed with
+ * **btf__free()**
*
- * The func_info subsection layout:
- * record size for struct bpf_func_info in the func_info subsection
- * struct btf_sec_func_info for section #1
- * a list of bpf_func_info records for section #1
- * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
- * but may not be identical
- * struct btf_sec_func_info for section #2
- * a list of bpf_func_info records for section #2
- * ......
+ * If *base_btf* is NULL, `btf__new_split()` is equivalent to `btf__new()` and
+ * creates non-split BTF.
*
- * Note that the bpf_func_info record size in .BTF.ext may not
- * be the same as the one defined in include/uapi/linux/bpf.h.
- * The loader should ensure that record_size meets minimum
- * requirement and pass the record as is to the kernel. The
- * kernel will handle the func_info properly based on its contents.
+ * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
+ * error code from such a pointer `libbpf_get_error()` should be used. If
+ * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
+ * returned on error instead. In both cases thread-local `errno` variable is
+ * always set to error code as well.
*/
-struct btf_ext_header {
- __u16 magic;
- __u8 version;
- __u8 flags;
- __u32 hdr_len;
-
- /* All offsets are in bytes relative to the end of this header */
- __u32 func_info_off;
- __u32 func_info_len;
- __u32 line_info_off;
- __u32 line_info_len;
-
- /* optional part of .BTF.ext header */
- __u32 field_reloc_off;
- __u32 field_reloc_len;
-};
+LIBBPF_API struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf);
-LIBBPF_API void btf__free(struct btf *btf);
-LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
-LIBBPF_API struct btf *btf__parse_elf(const char *path,
- struct btf_ext **btf_ext);
-LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
-LIBBPF_API int btf__load(struct btf *btf);
+/**
+ * @brief **btf__new_empty()** creates an empty BTF object. Use
+ * `btf__add_*()` to populate such BTF object.
+ * @return new BTF object instance which has to be eventually freed with
+ * **btf__free()**
+ *
+ * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
+ * error code from such a pointer `libbpf_get_error()` should be used. If
+ * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
+ * returned on error instead. In both cases thread-local `errno` variable is
+ * always set to error code as well.
+ */
+LIBBPF_API struct btf *btf__new_empty(void);
+
+/**
+ * @brief **btf__new_empty_split()** creates an unpopulated BTF object from an
+ * ELF BTF section except with a base BTF on top of which split BTF should be
+ * based
+ * @return new BTF object instance which has to be eventually freed with
+ * **btf__free()**
+ *
+ * If *base_btf* is NULL, `btf__new_empty_split()` is equivalent to
+ * `btf__new_empty()` and creates non-split BTF.
+ *
+ * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract
+ * error code from such a pointer `libbpf_get_error()` should be used. If
+ * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is
+ * returned on error instead. In both cases thread-local `errno` variable is
+ * always set to error code as well.
+ */
+LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf);
+
+LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
+LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf);
+LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
+LIBBPF_API struct btf *btf__parse_elf_split(const char *path, struct btf *base_btf);
+LIBBPF_API struct btf *btf__parse_raw(const char *path);
+LIBBPF_API struct btf *btf__parse_raw_split(const char *path, struct btf *base_btf);
+
+LIBBPF_API struct btf *btf__load_vmlinux_btf(void);
+LIBBPF_API struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_btf);
+
+LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id);
+LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf);
+
+LIBBPF_API int btf__load_into_kernel(struct btf *btf);
LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
const char *type_name);
LIBBPF_API __s32 btf__find_by_name_kind(const struct btf *btf,
const char *type_name, __u32 kind);
-LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
+LIBBPF_API __u32 btf__type_cnt(const struct btf *btf);
+LIBBPF_API const struct btf *btf__base_btf(const struct btf *btf);
LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
+LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
+LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz);
+LIBBPF_API enum btf_endianness btf__endianness(const struct btf *btf);
+LIBBPF_API int btf__set_endianness(struct btf *btf, enum btf_endianness endian);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
LIBBPF_API int btf__fd(const struct btf *btf);
-LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
+LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
+LIBBPF_API const void *btf__raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
-LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
-LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
- __u32 expected_key_size,
- __u32 expected_value_size,
- __u32 *key_type_id, __u32 *value_type_id);
+LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
-LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size);
+LIBBPF_API struct btf_ext *btf_ext__new(const __u8 *data, __u32 size);
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
-LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext,
- __u32 *size);
-LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **func_info, __u32 *cnt);
-LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **line_info, __u32 *cnt);
-LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
-LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
-
-LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
+LIBBPF_API const void *btf_ext__raw_data(const struct btf_ext *btf_ext, __u32 *size);
+
+LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
+LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
+LIBBPF_API int btf__add_type(struct btf *btf, const struct btf *src_btf,
+ const struct btf_type *src_type);
+/**
+ * @brief **btf__add_btf()** appends all the BTF types from *src_btf* into *btf*
+ * @param btf BTF object which all the BTF types and strings are added to
+ * @param src_btf BTF object which all BTF types and referenced strings are copied from
+ * @return BTF type ID of the first appended BTF type, or negative error code
+ *
+ * **btf__add_btf()** can be used to simply and efficiently append the entire
+ * contents of one BTF object to another one. All the BTF type data is copied
+ * over, all referenced type IDs are adjusted by adding a necessary ID offset.
+ * Only strings referenced from BTF types are copied over and deduplicated, so
+ * if there were some unused strings in *src_btf*, those won't be copied over,
+ * which is consistent with the general string deduplication semantics of BTF
+ * writing APIs.
+ *
+ * If any error is encountered during this process, the contents of *btf* is
+ * left intact, which means that **btf__add_btf()** follows the transactional
+ * semantics and the operation as a whole is all-or-nothing.
+ *
+ * *src_btf* has to be non-split BTF, as of now copying types from split BTF
+ * is not supported and will result in -ENOTSUP error code returned.
+ */
+LIBBPF_API int btf__add_btf(struct btf *btf, const struct btf *src_btf);
+
+LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding);
+LIBBPF_API int btf__add_float(struct btf *btf, const char *name, size_t byte_sz);
+LIBBPF_API int btf__add_ptr(struct btf *btf, int ref_type_id);
+LIBBPF_API int btf__add_array(struct btf *btf,
+ int index_type_id, int elem_type_id, __u32 nr_elems);
+/* struct/union construction APIs */
+LIBBPF_API int btf__add_struct(struct btf *btf, const char *name, __u32 sz);
+LIBBPF_API int btf__add_union(struct btf *btf, const char *name, __u32 sz);
+LIBBPF_API int btf__add_field(struct btf *btf, const char *name, int field_type_id,
+ __u32 bit_offset, __u32 bit_size);
+
+/* enum construction APIs */
+LIBBPF_API int btf__add_enum(struct btf *btf, const char *name, __u32 bytes_sz);
+LIBBPF_API int btf__add_enum_value(struct btf *btf, const char *name, __s64 value);
+LIBBPF_API int btf__add_enum64(struct btf *btf, const char *name, __u32 bytes_sz, bool is_signed);
+LIBBPF_API int btf__add_enum64_value(struct btf *btf, const char *name, __u64 value);
+
+enum btf_fwd_kind {
+ BTF_FWD_STRUCT = 0,
+ BTF_FWD_UNION = 1,
+ BTF_FWD_ENUM = 2,
+};
+
+LIBBPF_API int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind);
+LIBBPF_API int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id);
+LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id);
+LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id);
+LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id);
+LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id);
+
+/* func and func_proto construction APIs */
+LIBBPF_API int btf__add_func(struct btf *btf, const char *name,
+ enum btf_func_linkage linkage, int proto_type_id);
+LIBBPF_API int btf__add_func_proto(struct btf *btf, int ret_type_id);
+LIBBPF_API int btf__add_func_param(struct btf *btf, const char *name, int type_id);
+
+/* var & datasec construction APIs */
+LIBBPF_API int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id);
+LIBBPF_API int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz);
+LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id,
+ __u32 offset, __u32 byte_sz);
+
+/* tag construction API */
+LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_type_id,
+ int component_idx);
struct btf_dedup_opts {
- unsigned int dedup_table_size;
- bool dont_resolve_fwds;
+ size_t sz;
+ /* optional .BTF.ext info to dedup along the main BTF info */
+ struct btf_ext *btf_ext;
+ /* force hash collisions (used for testing) */
+ bool force_collisions;
+ size_t :0;
};
+#define btf_dedup_opts__last_field force_collisions
-LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
- const struct btf_dedup_opts *opts);
+LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
struct btf_dump;
struct btf_dump_opts {
- void *ctx;
+ size_t sz;
};
+#define btf_dump_opts__last_field sz
typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const struct btf_dump_opts *opts,
- btf_dump_printf_fn_t printf_fn);
+ btf_dump_printf_fn_t printf_fn,
+ void *ctx,
+ const struct btf_dump_opts *opts);
+
LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
@@ -143,16 +264,59 @@ struct btf_dump_emit_type_decl_opts {
* necessary indentation already
*/
int indent_level;
+ /* strip all the const/volatile/restrict mods */
+ bool strip_mods;
+ size_t :0;
};
-#define btf_dump_emit_type_decl_opts__last_field indent_level
+#define btf_dump_emit_type_decl_opts__last_field strip_mods
LIBBPF_API int
btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
const struct btf_dump_emit_type_decl_opts *opts);
+
+struct btf_dump_type_data_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ const char *indent_str;
+ int indent_level;
+ /* below match "show" flags for bpf_show_snprintf() */
+ bool compact; /* no newlines/indentation */
+ bool skip_names; /* skip member/type names */
+ bool emit_zeroes; /* show 0-valued fields */
+ size_t :0;
+};
+#define btf_dump_type_data_opts__last_field emit_zeroes
+
+LIBBPF_API int
+btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
+ const void *data, size_t data_sz,
+ const struct btf_dump_type_data_opts *opts);
+
/*
- * A set of helpers for easier BTF types handling
+ * A set of helpers for easier BTF types handling.
+ *
+ * The inline functions below rely on constants from the kernel headers which
+ * may not be available for applications including this header file. To avoid
+ * compilation errors, we define all the constants here that were added after
+ * the initial introduction of the BTF_KIND* constants.
*/
+#ifndef BTF_KIND_FUNC
+#define BTF_KIND_FUNC 12 /* Function */
+#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */
+#endif
+#ifndef BTF_KIND_VAR
+#define BTF_KIND_VAR 14 /* Variable */
+#define BTF_KIND_DATASEC 15 /* Section */
+#endif
+#ifndef BTF_KIND_FLOAT
+#define BTF_KIND_FLOAT 16 /* Floating point */
+#endif
+/* The kernel header switched to enums, so the following were never #defined */
+#define BTF_KIND_DECL_TAG 17 /* Decl Tag */
+#define BTF_KIND_TYPE_TAG 18 /* Type Tag */
+#define BTF_KIND_ENUM64 19 /* Enum for up-to 64bit values */
+
static inline __u16 btf_kind(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info);
@@ -168,6 +332,11 @@ static inline bool btf_kflag(const struct btf_type *t)
return BTF_INFO_KFLAG(t->info);
}
+static inline bool btf_is_void(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_UNKN;
+}
+
static inline bool btf_is_int(const struct btf_type *t)
{
return btf_kind(t) == BTF_KIND_INT;
@@ -205,6 +374,11 @@ static inline bool btf_is_enum(const struct btf_type *t)
return btf_kind(t) == BTF_KIND_ENUM;
}
+static inline bool btf_is_enum64(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_ENUM64;
+}
+
static inline bool btf_is_fwd(const struct btf_type *t)
{
return btf_kind(t) == BTF_KIND_FWD;
@@ -236,7 +410,8 @@ static inline bool btf_is_mod(const struct btf_type *t)
return kind == BTF_KIND_VOLATILE ||
kind == BTF_KIND_CONST ||
- kind == BTF_KIND_RESTRICT;
+ kind == BTF_KIND_RESTRICT ||
+ kind == BTF_KIND_TYPE_TAG;
}
static inline bool btf_is_func(const struct btf_type *t)
@@ -259,6 +434,33 @@ static inline bool btf_is_datasec(const struct btf_type *t)
return btf_kind(t) == BTF_KIND_DATASEC;
}
+static inline bool btf_is_float(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_FLOAT;
+}
+
+static inline bool btf_is_decl_tag(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_DECL_TAG;
+}
+
+static inline bool btf_is_type_tag(const struct btf_type *t)
+{
+ return btf_kind(t) == BTF_KIND_TYPE_TAG;
+}
+
+static inline bool btf_is_any_enum(const struct btf_type *t)
+{
+ return btf_is_enum(t) || btf_is_enum64(t);
+}
+
+static inline bool btf_kind_core_compat(const struct btf_type *t1,
+ const struct btf_type *t2)
+{
+ return btf_kind(t1) == btf_kind(t2) ||
+ (btf_is_any_enum(t1) && btf_is_any_enum(t2));
+}
+
static inline __u8 btf_int_encoding(const struct btf_type *t)
{
return BTF_INT_ENCODING(*(__u32 *)(t + 1));
@@ -284,6 +486,39 @@ static inline struct btf_enum *btf_enum(const struct btf_type *t)
return (struct btf_enum *)(t + 1);
}
+struct btf_enum64;
+
+static inline struct btf_enum64 *btf_enum64(const struct btf_type *t)
+{
+ return (struct btf_enum64 *)(t + 1);
+}
+
+static inline __u64 btf_enum64_value(const struct btf_enum64 *e)
+{
+ /* struct btf_enum64 is introduced in Linux 6.0, which is very
+ * bleeding-edge. Here we are avoiding relying on struct btf_enum64
+ * definition coming from kernel UAPI headers to support wider range
+ * of system-wide kernel headers.
+ *
+ * Given this header can be also included from C++ applications, that
+ * further restricts C tricks we can use (like using compatible
+ * anonymous struct). So just treat struct btf_enum64 as
+ * a three-element array of u32 and access second (lo32) and third
+ * (hi32) elements directly.
+ *
+ * For reference, here is a struct btf_enum64 definition:
+ *
+ * const struct btf_enum64 {
+ * __u32 name_off;
+ * __u32 val_lo32;
+ * __u32 val_hi32;
+ * };
+ */
+ const __u32 *e64 = (const __u32 *)e;
+
+ return ((__u64)e64[2] << 32) | e64[1];
+}
+
static inline struct btf_member *btf_members(const struct btf_type *t)
{
return (struct btf_member *)(t + 1);
@@ -327,6 +562,12 @@ btf_var_secinfos(const struct btf_type *t)
return (struct btf_var_secinfo *)(t + 1);
}
+struct btf_decl_tag;
+static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t)
+{
+ return (struct btf_decl_tag *)(t + 1);
+}
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 0c28ee82834b..580985ee5545 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -10,17 +10,18 @@
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
+#include <ctype.h>
+#include <endian.h>
#include <errno.h>
+#include <limits.h>
#include <linux/err.h>
#include <linux/btf.h>
+#include <linux/kernel.h>
#include "btf.h"
#include "hashmap.h"
#include "libbpf.h"
#include "libbpf_internal.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
@@ -55,16 +56,41 @@ struct btf_dump_type_aux_state {
__u8 referenced: 1;
};
+/* indent string length; one indent string is added for each indent level */
+#define BTF_DATA_INDENT_STR_LEN 32
+
+/*
+ * Common internal data for BTF type data dump operations.
+ */
+struct btf_dump_data {
+ const void *data_end; /* end of valid data to show */
+ bool compact;
+ bool skip_names;
+ bool emit_zeroes;
+ __u8 indent_lvl; /* base indent level */
+ char indent_str[BTF_DATA_INDENT_STR_LEN];
+ /* below are used during iteration */
+ int depth;
+ bool is_array_member;
+ bool is_array_terminated;
+ bool is_array_char;
+};
+
struct btf_dump {
const struct btf *btf;
- const struct btf_ext *btf_ext;
btf_dump_printf_fn_t printf_fn;
- struct btf_dump_opts opts;
+ void *cb_ctx;
+ int ptr_sz;
+ bool strip_mods;
+ bool skip_anon_defs;
+ int last_id;
/* per-type auxiliary state */
struct btf_dump_type_aux_state *type_states;
+ size_t type_states_cap;
/* per-type optional cached unique name, must be freed, if present */
const char **cached_names;
+ size_t cached_names_cap;
/* topo-sorted list of dependent type definitions */
__u32 *emit_queue;
@@ -86,23 +112,20 @@ struct btf_dump {
* name occurrences
*/
struct hashmap *ident_names;
+ /*
+ * data for typed display; allocated if needed.
+ */
+ struct btf_dump_data *typed_dump;
};
-static size_t str_hash_fn(const void *key, void *ctx)
+static size_t str_hash_fn(long key, void *ctx)
{
- const char *s = key;
- size_t h = 0;
-
- while (*s) {
- h = h * 31 + *s;
- s++;
- }
- return h;
+ return str_hash((void *)key);
}
-static bool str_equal_fn(const void *a, const void *b, void *ctx)
+static bool str_equal_fn(long a, long b, void *ctx)
{
- return strcmp(a, b) == 0;
+ return strcmp((void *)a, (void *)b) == 0;
}
static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
@@ -115,28 +138,35 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
va_list args;
va_start(args, fmt);
- d->printf_fn(d->opts.ctx, fmt, args);
+ d->printf_fn(d->cb_ctx, fmt, args);
va_end(args);
}
static int btf_dump_mark_referenced(struct btf_dump *d);
+static int btf_dump_resize(struct btf_dump *d);
struct btf_dump *btf_dump__new(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const struct btf_dump_opts *opts,
- btf_dump_printf_fn_t printf_fn)
+ btf_dump_printf_fn_t printf_fn,
+ void *ctx,
+ const struct btf_dump_opts *opts)
{
struct btf_dump *d;
int err;
+ if (!OPTS_VALID(opts, btf_dump_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ if (!printf_fn)
+ return libbpf_err_ptr(-EINVAL);
+
d = calloc(1, sizeof(struct btf_dump));
if (!d)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
d->btf = btf;
- d->btf_ext = btf_ext;
d->printf_fn = printf_fn;
- d->opts.ctx = opts ? opts->ctx : NULL;
+ d->cb_ctx = ctx;
+ d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
if (IS_ERR(d->type_names)) {
@@ -150,45 +180,68 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
d->ident_names = NULL;
goto err;
}
- d->type_states = calloc(1 + btf__get_nr_types(d->btf),
- sizeof(d->type_states[0]));
- if (!d->type_states) {
- err = -ENOMEM;
- goto err;
- }
- d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
- sizeof(d->cached_names[0]));
- if (!d->cached_names) {
- err = -ENOMEM;
- goto err;
- }
-
- /* VOID is special */
- d->type_states[0].order_state = ORDERED;
- d->type_states[0].emit_state = EMITTED;
- /* eagerly determine referenced types for anon enums */
- err = btf_dump_mark_referenced(d);
+ err = btf_dump_resize(d);
if (err)
goto err;
return d;
err:
btf_dump__free(d);
- return ERR_PTR(err);
+ return libbpf_err_ptr(err);
+}
+
+static int btf_dump_resize(struct btf_dump *d)
+{
+ int err, last_id = btf__type_cnt(d->btf) - 1;
+
+ if (last_id <= d->last_id)
+ return 0;
+
+ if (libbpf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
+ sizeof(*d->type_states), last_id + 1))
+ return -ENOMEM;
+ if (libbpf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
+ sizeof(*d->cached_names), last_id + 1))
+ return -ENOMEM;
+
+ if (d->last_id == 0) {
+ /* VOID is special */
+ d->type_states[0].order_state = ORDERED;
+ d->type_states[0].emit_state = EMITTED;
+ }
+
+ /* eagerly determine referenced types for anon enums */
+ err = btf_dump_mark_referenced(d);
+ if (err)
+ return err;
+
+ d->last_id = last_id;
+ return 0;
+}
+
+static void btf_dump_free_names(struct hashmap *map)
+{
+ size_t bkt;
+ struct hashmap_entry *cur;
+
+ hashmap__for_each_entry(map, cur, bkt)
+ free((void *)cur->pkey);
+
+ hashmap__free(map);
}
void btf_dump__free(struct btf_dump *d)
{
- int i, cnt;
+ int i;
- if (!d)
+ if (IS_ERR_OR_NULL(d))
return;
free(d->type_states);
if (d->cached_names) {
/* any set cached name is owned by us and should be freed */
- for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) {
+ for (i = 0; i <= d->last_id; i++) {
if (d->cached_names[i])
free((void *)d->cached_names[i]);
}
@@ -196,8 +249,8 @@ void btf_dump__free(struct btf_dump *d)
free(d->cached_names);
free(d->emit_queue);
free(d->decl_stack);
- hashmap__free(d->type_names);
- hashmap__free(d->ident_names);
+ btf_dump_free_names(d->type_names);
+ btf_dump_free_names(d->ident_names);
free(d);
}
@@ -225,13 +278,17 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
{
int err, i;
- if (id > btf__get_nr_types(d->btf))
- return -EINVAL;
+ if (id >= btf__type_cnt(d->btf))
+ return libbpf_err(-EINVAL);
+
+ err = btf_dump_resize(d);
+ if (err)
+ return libbpf_err(err);
d->emit_queue_cnt = 0;
err = btf_dump_order_type(d, id, false);
if (err < 0)
- return err;
+ return libbpf_err(err);
for (i = 0; i < d->emit_queue_cnt; i++)
btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/);
@@ -253,18 +310,20 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
*/
static int btf_dump_mark_referenced(struct btf_dump *d)
{
- int i, j, n = btf__get_nr_types(d->btf);
+ int i, j, n = btf__type_cnt(d->btf);
const struct btf_type *t;
__u16 vlen;
- for (i = 1; i <= n; i++) {
+ for (i = d->last_id + 1; i < n; i++) {
t = btf__type_by_id(d->btf, i);
vlen = btf_vlen(t);
switch (btf_kind(t)) {
case BTF_KIND_INT:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_FWD:
+ case BTF_KIND_FLOAT:
break;
case BTF_KIND_VOLATILE:
@@ -274,6 +333,8 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
+ case BTF_KIND_DECL_TAG:
+ case BTF_KIND_TYPE_TAG:
d->type_states[t->type].referenced = 1;
break;
@@ -312,6 +373,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
}
return 0;
}
+
static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
{
__u32 *new_queue;
@@ -319,8 +381,7 @@ static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
if (d->emit_queue_cnt >= d->emit_queue_cap) {
new_cap = max(16, d->emit_queue_cap * 3 / 2);
- new_queue = realloc(d->emit_queue,
- new_cap * sizeof(new_queue[0]));
+ new_queue = libbpf_reallocarray(d->emit_queue, new_cap, sizeof(new_queue[0]));
if (!new_queue)
return -ENOMEM;
d->emit_queue = new_queue;
@@ -439,6 +500,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
switch (btf_kind(t)) {
case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
tstate->order_state = ORDERED;
return 0;
@@ -448,7 +510,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
return err;
case BTF_KIND_ARRAY:
- return btf_dump_order_type(d, btf_array(t)->type, through_ptr);
+ return btf_dump_order_type(d, btf_array(t)->type, false);
case BTF_KIND_STRUCT:
case BTF_KIND_UNION: {
@@ -480,6 +542,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
return 1;
}
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_FWD:
/*
* non-anonymous or non-referenced enums are top-level
@@ -516,6 +579,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPE_TAG:
return btf_dump_order_type(d, t->type, through_ptr);
case BTF_KIND_FUNC_PROTO: {
@@ -540,6 +604,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DATASEC:
+ case BTF_KIND_DECL_TAG:
d->type_states[id].order_state = ORDERED;
return 0;
@@ -548,6 +613,9 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
}
}
+static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
+ const struct btf_type *t);
+
static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t);
static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
@@ -658,7 +726,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
if (!btf_dump_is_blacklisted(d, id)) {
btf_dump_emit_typedef_def(d, id, t, 0);
btf_dump_printf(d, ";\n\n");
- };
+ }
tstate->fwd_emitted = 1;
break;
default:
@@ -670,9 +738,13 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
switch (kind) {
case BTF_KIND_INT:
+ /* Emit type alias definitions if necessary */
+ btf_dump_emit_missing_aliases(d, id, t);
+
tstate->emit_state = EMITTED;
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
if (top_level_def) {
btf_dump_emit_enum_def(d, id, t, 0);
btf_dump_printf(d, ";\n\n");
@@ -683,6 +755,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ case BTF_KIND_TYPE_TAG:
btf_dump_emit_type(d, t->type, cont_id);
break;
case BTF_KIND_ARRAY:
@@ -743,11 +816,11 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
break;
case BTF_KIND_FUNC_PROTO: {
const struct btf_param *p = btf_params(t);
- __u16 vlen = btf_vlen(t);
+ __u16 n = btf_vlen(t);
int i;
btf_dump_emit_type(d, t->type, cont_id);
- for (i = 0; i < vlen; i++, p++)
+ for (i = 0; i < n; i++, p++)
btf_dump_emit_type(d, p->type, cont_id);
break;
@@ -761,14 +834,9 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
const struct btf_type *t)
{
const struct btf_member *m;
- int align, i, bit_sz;
+ int max_align = 1, align, i, bit_sz;
__u16 vlen;
- align = btf__align_of(btf, id);
- /* size of a non-packed struct has to be a multiple of its alignment*/
- if (align && t->size % align)
- return true;
-
m = btf_members(t);
vlen = btf_vlen(t);
/* all non-bitfield fields have to be naturally aligned */
@@ -777,8 +845,11 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
bit_sz = btf_member_bitfield_size(t, i);
if (align && bit_sz == 0 && m->offset % (8 * align) != 0)
return true;
+ max_align = max(align, max_align);
}
-
+ /* size of a non-packed struct has to be a multiple of its alignment */
+ if (t->size % max_align != 0)
+ return true;
/*
* if original struct was marked as packed, but its layout is
* naturally aligned, we'll detect that it's not packed
@@ -786,52 +857,106 @@ static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
return false;
}
-static int chip_away_bits(int total, int at_most)
-{
- return total % at_most ? : at_most;
-}
-
static void btf_dump_emit_bit_padding(const struct btf_dump *d,
- int cur_off, int m_off, int m_bit_sz,
- int align, int lvl)
+ int cur_off, int next_off, int next_align,
+ bool in_bitfield, int lvl)
{
- int off_diff = m_off - cur_off;
- int ptr_bits = sizeof(void *) * 8;
+ const struct {
+ const char *name;
+ int bits;
+ } pads[] = {
+ {"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
+ };
+ int new_off, pad_bits, bits, i;
+ const char *pad_type;
+
+ if (cur_off >= next_off)
+ return; /* no gap */
+
+ /* For filling out padding we want to take advantage of
+ * natural alignment rules to minimize unnecessary explicit
+ * padding. First, we find the largest type (among long, int,
+ * short, or char) that can be used to force naturally aligned
+ * boundary. Once determined, we'll use such type to fill in
+ * the remaining padding gap. In some cases we can rely on
+ * compiler filling some gaps, but sometimes we need to force
+ * alignment to close natural alignment with markers like
+ * `long: 0` (this is always the case for bitfields). Note
+ * that even if struct itself has, let's say 4-byte alignment
+ * (i.e., it only uses up to int-aligned types), using `long:
+ * X;` explicit padding doesn't actually change struct's
+ * overall alignment requirements, but compiler does take into
+ * account that type's (long, in this example) natural
+ * alignment requirements when adding implicit padding. We use
+ * this fact heavily and don't worry about ruining correct
+ * struct alignment requirement.
+ */
+ for (i = 0; i < ARRAY_SIZE(pads); i++) {
+ pad_bits = pads[i].bits;
+ pad_type = pads[i].name;
- if (off_diff <= 0)
- /* no gap */
- return;
- if (m_bit_sz == 0 && off_diff < align * 8)
- /* natural padding will take care of a gap */
- return;
+ new_off = roundup(cur_off, pad_bits);
+ if (new_off <= next_off)
+ break;
+ }
- while (off_diff > 0) {
- const char *pad_type;
- int pad_bits;
-
- if (ptr_bits > 32 && off_diff > 32) {
- pad_type = "long";
- pad_bits = chip_away_bits(off_diff, ptr_bits);
- } else if (off_diff > 16) {
- pad_type = "int";
- pad_bits = chip_away_bits(off_diff, 32);
- } else if (off_diff > 8) {
- pad_type = "short";
- pad_bits = chip_away_bits(off_diff, 16);
- } else {
- pad_type = "char";
- pad_bits = chip_away_bits(off_diff, 8);
+ if (new_off > cur_off && new_off <= next_off) {
+ /* We need explicit `<type>: 0` aligning mark if next
+ * field is right on alignment offset and its
+ * alignment requirement is less strict than <type>'s
+ * alignment (so compiler won't naturally align to the
+ * offset we expect), or if subsequent `<type>: X`,
+ * will actually completely fit in the remaining hole,
+ * making compiler basically ignore `<type>: X`
+ * completely.
+ */
+ if (in_bitfield ||
+ (new_off == next_off && roundup(cur_off, next_align * 8) != new_off) ||
+ (new_off != next_off && next_off - new_off <= new_off - cur_off))
+ /* but for bitfields we'll emit explicit bit count */
+ btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type,
+ in_bitfield ? new_off - cur_off : 0);
+ cur_off = new_off;
+ }
+
+ /* Now we know we start at naturally aligned offset for a chosen
+ * padding type (long, int, short, or char), and so the rest is just
+ * a straightforward filling of remaining padding gap with full
+ * `<type>: sizeof(<type>);` markers, except for the last one, which
+ * might need smaller than sizeof(<type>) padding.
+ */
+ while (cur_off != next_off) {
+ bits = min(next_off - cur_off, pad_bits);
+ if (bits == pad_bits) {
+ btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
+ cur_off += bits;
+ continue;
+ }
+ /* For the remainder padding that doesn't cover entire
+ * pad_type bit length, we pick the smallest necessary type.
+ * This is pure aesthetics, we could have just used `long`,
+ * but having smallest necessary one communicates better the
+ * scale of the padding gap.
+ */
+ for (i = ARRAY_SIZE(pads) - 1; i >= 0; i--) {
+ pad_type = pads[i].name;
+ pad_bits = pads[i].bits;
+ if (pad_bits < bits)
+ continue;
+
+ btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, bits);
+ cur_off += bits;
+ break;
}
- btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
- off_diff -= pad_bits;
}
}
static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
- btf_dump_printf(d, "%s %s",
+ btf_dump_printf(d, "%s%s%s",
btf_is_struct(t) ? "struct" : "union",
+ t->name_off ? " " : "",
btf_dump_type_name(d, id));
}
@@ -842,9 +967,11 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
{
const struct btf_member *m = btf_members(t);
bool is_struct = btf_is_struct(t);
- int align, i, packed, off = 0;
+ bool packed, prev_bitfield = false;
+ int align, i, off = 0;
__u16 vlen = btf_vlen(t);
+ align = btf__align_of(d->btf, id);
packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
btf_dump_printf(d, "%s%s%s {",
@@ -854,79 +981,195 @@ static void btf_dump_emit_struct_def(struct btf_dump *d,
for (i = 0; i < vlen; i++, m++) {
const char *fname;
- int m_off, m_sz;
+ int m_off, m_sz, m_align;
+ bool in_bitfield;
fname = btf_name_of(d, m->name_off);
m_sz = btf_member_bitfield_size(t, i);
m_off = btf_member_bit_offset(t, i);
- align = packed ? 1 : btf__align_of(d->btf, m->type);
+ m_align = packed ? 1 : btf__align_of(d->btf, m->type);
- btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
+ in_bitfield = prev_bitfield && m_sz != 0;
+
+ btf_dump_emit_bit_padding(d, off, m_off, m_align, in_bitfield, lvl + 1);
btf_dump_printf(d, "\n%s", pfx(lvl + 1));
btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
if (m_sz) {
btf_dump_printf(d, ": %d", m_sz);
off = m_off + m_sz;
+ prev_bitfield = true;
} else {
- m_sz = max(0, btf__resolve_size(d->btf, m->type));
+ m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type));
off = m_off + m_sz * 8;
+ prev_bitfield = false;
}
+
btf_dump_printf(d, ";");
}
/* pad at the end, if necessary */
- if (is_struct) {
- align = packed ? 1 : btf__align_of(d->btf, id);
- btf_dump_emit_bit_padding(d, off, t->size * 8, 0, align,
- lvl + 1);
- }
+ if (is_struct)
+ btf_dump_emit_bit_padding(d, off, t->size * 8, align, false, lvl + 1);
- if (vlen)
+ /*
+ * Keep `struct empty {}` on a single line,
+ * only print newline when there are regular or padding fields.
+ */
+ if (vlen || t->size) {
btf_dump_printf(d, "\n");
- btf_dump_printf(d, "%s}", pfx(lvl));
+ btf_dump_printf(d, "%s}", pfx(lvl));
+ } else {
+ btf_dump_printf(d, "}");
+ }
if (packed)
btf_dump_printf(d, " __attribute__((packed))");
}
+static const char *missing_base_types[][2] = {
+ /*
+ * GCC emits typedefs to its internal __PolyX_t types when compiling Arm
+ * SIMD intrinsics. Alias them to standard base types.
+ */
+ { "__Poly8_t", "unsigned char" },
+ { "__Poly16_t", "unsigned short" },
+ { "__Poly64_t", "unsigned long long" },
+ { "__Poly128_t", "unsigned __int128" },
+};
+
+static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
+ const struct btf_type *t)
+{
+ const char *name = btf_dump_type_name(d, id);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
+ if (strcmp(name, missing_base_types[i][0]) == 0) {
+ btf_dump_printf(d, "typedef %s %s;\n\n",
+ missing_base_types[i][1], name);
+ break;
+ }
+ }
+}
+
static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
const struct btf_type *t)
{
btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
}
-static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
- const struct btf_type *t,
- int lvl)
+static void btf_dump_emit_enum32_val(struct btf_dump *d,
+ const struct btf_type *t,
+ int lvl, __u16 vlen)
{
const struct btf_enum *v = btf_enum(t);
- __u16 vlen = btf_vlen(t);
+ bool is_signed = btf_kflag(t);
+ const char *fmt_str;
+ const char *name;
+ size_t dup_cnt;
+ int i;
+
+ for (i = 0; i < vlen; i++, v++) {
+ name = btf_name_of(d, v->name_off);
+ /* enumerators share namespace with typedef idents */
+ dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
+ if (dup_cnt > 1) {
+ fmt_str = is_signed ? "\n%s%s___%zd = %d," : "\n%s%s___%zd = %u,";
+ btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, dup_cnt, v->val);
+ } else {
+ fmt_str = is_signed ? "\n%s%s = %d," : "\n%s%s = %u,";
+ btf_dump_printf(d, fmt_str, pfx(lvl + 1), name, v->val);
+ }
+ }
+}
+
+static void btf_dump_emit_enum64_val(struct btf_dump *d,
+ const struct btf_type *t,
+ int lvl, __u16 vlen)
+{
+ const struct btf_enum64 *v = btf_enum64(t);
+ bool is_signed = btf_kflag(t);
+ const char *fmt_str;
const char *name;
size_t dup_cnt;
+ __u64 val;
int i;
+ for (i = 0; i < vlen; i++, v++) {
+ name = btf_name_of(d, v->name_off);
+ dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
+ val = btf_enum64_value(v);
+ if (dup_cnt > 1) {
+ fmt_str = is_signed ? "\n%s%s___%zd = %lldLL,"
+ : "\n%s%s___%zd = %lluULL,";
+ btf_dump_printf(d, fmt_str,
+ pfx(lvl + 1), name, dup_cnt,
+ (unsigned long long)val);
+ } else {
+ fmt_str = is_signed ? "\n%s%s = %lldLL,"
+ : "\n%s%s = %lluULL,";
+ btf_dump_printf(d, fmt_str,
+ pfx(lvl + 1), name,
+ (unsigned long long)val);
+ }
+ }
+}
+static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
+ const struct btf_type *t,
+ int lvl)
+{
+ __u16 vlen = btf_vlen(t);
+
btf_dump_printf(d, "enum%s%s",
t->name_off ? " " : "",
btf_dump_type_name(d, id));
- if (vlen) {
- btf_dump_printf(d, " {");
- for (i = 0; i < vlen; i++, v++) {
- name = btf_name_of(d, v->name_off);
- /* enumerators share namespace with typedef idents */
- dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
- if (dup_cnt > 1) {
- btf_dump_printf(d, "\n%s%s___%zu = %u,",
- pfx(lvl + 1), name, dup_cnt,
- (__u32)v->val);
- } else {
- btf_dump_printf(d, "\n%s%s = %u,",
- pfx(lvl + 1), name,
- (__u32)v->val);
+ if (!vlen)
+ return;
+
+ btf_dump_printf(d, " {");
+ if (btf_is_enum(t))
+ btf_dump_emit_enum32_val(d, t, lvl, vlen);
+ else
+ btf_dump_emit_enum64_val(d, t, lvl, vlen);
+ btf_dump_printf(d, "\n%s}", pfx(lvl));
+
+ /* special case enums with special sizes */
+ if (t->size == 1) {
+ /* one-byte enums can be forced with mode(byte) attribute */
+ btf_dump_printf(d, " __attribute__((mode(byte)))");
+ } else if (t->size == 8 && d->ptr_sz == 8) {
+ /* enum can be 8-byte sized if one of the enumerator values
+ * doesn't fit in 32-bit integer, or by adding mode(word)
+ * attribute (but probably only on 64-bit architectures); do
+ * our best here to try to satisfy the contract without adding
+ * unnecessary attributes
+ */
+ bool needs_word_mode;
+
+ if (btf_is_enum(t)) {
+ /* enum can't represent 64-bit values, so we need word mode */
+ needs_word_mode = true;
+ } else {
+ /* enum64 needs mode(word) if none of its values has
+ * non-zero upper 32-bits (which means that all values
+ * fit in 32-bit integers and won't cause compiler to
+ * bump enum to be 64-bit naturally
+ */
+ int i;
+
+ needs_word_mode = true;
+ for (i = 0; i < vlen; i++) {
+ if (btf_enum64(t)[i].val_hi32 != 0) {
+ needs_word_mode = false;
+ break;
+ }
}
}
- btf_dump_printf(d, "\n%s}", pfx(lvl));
+ if (needs_word_mode)
+ btf_dump_printf(d, " __attribute__((mode(word)))");
}
+
}
static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
@@ -967,8 +1210,7 @@ static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
if (d->decl_stack_cnt >= d->decl_stack_cap) {
new_cap = max(16, d->decl_stack_cap * 3 / 2);
- new_stack = realloc(d->decl_stack,
- new_cap * sizeof(new_stack[0]));
+ new_stack = libbpf_reallocarray(d->decl_stack, new_cap, sizeof(new_stack[0]));
if (!new_stack)
return -ENOMEM;
d->decl_stack = new_stack;
@@ -1025,14 +1267,20 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
const struct btf_dump_emit_type_decl_opts *opts)
{
const char *fname;
- int lvl;
+ int lvl, err;
if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
- return -EINVAL;
+ return libbpf_err(-EINVAL);
+
+ err = btf_dump_resize(d);
+ if (err)
+ return libbpf_err(err);
fname = OPTS_GET(opts, field_name, "");
lvl = OPTS_GET(opts, indent_level, 0);
+ d->strip_mods = OPTS_GET(opts, strip_mods, false);
btf_dump_emit_type_decl(d, id, fname, lvl);
+ d->strip_mods = false;
return 0;
}
@@ -1045,6 +1293,10 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
stack_start = d->decl_stack_cnt;
for (;;) {
+ t = btf__type_by_id(d->btf, id);
+ if (d->strip_mods && btf_is_mod(t))
+ goto skip_mod;
+
err = btf_dump_push_decl_stack_id(d, id);
if (err < 0) {
/*
@@ -1056,18 +1308,18 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
d->decl_stack_cnt = stack_start;
return;
}
-
+skip_mod:
/* VOID */
if (id == 0)
break;
- t = btf__type_by_id(d->btf, id);
switch (btf_kind(t)) {
case BTF_KIND_PTR:
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_FUNC_PROTO:
+ case BTF_KIND_TYPE_TAG:
id = t->type;
break;
case BTF_KIND_ARRAY:
@@ -1075,10 +1327,12 @@ static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
break;
case BTF_KIND_INT:
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
case BTF_KIND_FWD:
case BTF_KIND_STRUCT:
case BTF_KIND_UNION:
case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FLOAT:
goto done;
default:
pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
@@ -1137,6 +1391,20 @@ static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
}
}
+static void btf_dump_drop_mods(struct btf_dump *d, struct id_stack *decl_stack)
+{
+ const struct btf_type *t;
+ __u32 id;
+
+ while (decl_stack->cnt) {
+ id = decl_stack->ids[decl_stack->cnt - 1];
+ t = btf__type_by_id(d->btf, id);
+ if (!btf_is_mod(t))
+ return;
+ decl_stack->cnt--;
+ }
+}
+
static void btf_dump_emit_name(const struct btf_dump *d,
const char *name, bool last_was_ptr)
{
@@ -1179,6 +1447,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
switch (kind) {
case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
btf_dump_emit_mods(d, decls);
name = btf_name_of(d, t->name_off);
btf_dump_printf(d, "%s", name);
@@ -1187,15 +1456,16 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
case BTF_KIND_UNION:
btf_dump_emit_mods(d, decls);
/* inline anonymous struct/union */
- if (t->name_off == 0)
+ if (t->name_off == 0 && !d->skip_anon_defs)
btf_dump_emit_struct_def(d, id, t, lvl);
else
btf_dump_emit_struct_fwd(d, id, t);
break;
case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
btf_dump_emit_mods(d, decls);
/* inline anonymous enum */
- if (t->name_off == 0)
+ if (t->name_off == 0 && !d->skip_anon_defs)
btf_dump_emit_enum_def(d, id, t, lvl);
else
btf_dump_emit_enum_fwd(d, id, t);
@@ -1220,6 +1490,11 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
case BTF_KIND_RESTRICT:
btf_dump_printf(d, " restrict");
break;
+ case BTF_KIND_TYPE_TAG:
+ btf_dump_emit_mods(d, decls);
+ name = btf_name_of(d, t->name_off);
+ btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
+ break;
case BTF_KIND_ARRAY: {
const struct btf_array *a = btf_array(t);
const struct btf_type *next_t;
@@ -1235,14 +1510,7 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
* a const/volatile modifier for array, so we are
* going to silently skip them here.
*/
- while (decls->cnt) {
- next_id = decls->ids[decls->cnt - 1];
- next_t = btf__type_by_id(d->btf, next_id);
- if (btf_is_mod(next_t))
- decls->cnt--;
- else
- break;
- }
+ btf_dump_drop_mods(d, decls);
if (decls->cnt == 0) {
btf_dump_emit_name(d, fname, last_was_ptr);
@@ -1270,7 +1538,15 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
__u16 vlen = btf_vlen(t);
int i;
- btf_dump_emit_mods(d, decls);
+ /*
+ * GCC emits extra volatile qualifier for
+ * __attribute__((noreturn)) function pointers. Clang
+ * doesn't do it. It's a GCC quirk for backwards
+ * compatibility with code written for GCC <2.5. So,
+ * similarly to extra qualifiers for array, just drop
+ * them, instead of handling them.
+ */
+ btf_dump_drop_mods(d, decls);
if (decls->cnt) {
btf_dump_printf(d, " (");
btf_dump_emit_type_chain(d, decls, fname, lvl);
@@ -1319,15 +1595,59 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
btf_dump_emit_name(d, fname, last_was_ptr);
}
+/* show type name as (type_name) */
+static void btf_dump_emit_type_cast(struct btf_dump *d, __u32 id,
+ bool top_level)
+{
+ const struct btf_type *t;
+
+ /* for array members, we don't bother emitting type name for each
+ * member to avoid the redundancy of
+ * .name = (char[4])[(char)'f',(char)'o',(char)'o',]
+ */
+ if (d->typed_dump->is_array_member)
+ return;
+
+ /* avoid type name specification for variable/section; it will be done
+ * for the associated variable value(s).
+ */
+ t = btf__type_by_id(d->btf, id);
+ if (btf_is_var(t) || btf_is_datasec(t))
+ return;
+
+ if (top_level)
+ btf_dump_printf(d, "(");
+
+ d->skip_anon_defs = true;
+ d->strip_mods = true;
+ btf_dump_emit_type_decl(d, id, "", 0);
+ d->strip_mods = false;
+ d->skip_anon_defs = false;
+
+ if (top_level)
+ btf_dump_printf(d, ")");
+}
+
/* return number of duplicates (occurrences) of a given name */
static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
const char *orig_name)
{
+ char *old_name, *new_name;
size_t dup_cnt = 0;
+ int err;
+
+ new_name = strdup(orig_name);
+ if (!new_name)
+ return 1;
- hashmap__find(name_map, orig_name, (void **)&dup_cnt);
+ (void)hashmap__find(name_map, orig_name, &dup_cnt);
dup_cnt++;
- hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL);
+
+ err = hashmap__set(name_map, new_name, dup_cnt, &old_name, NULL);
+ if (err)
+ free(new_name);
+
+ free(old_name);
return dup_cnt;
}
@@ -1347,6 +1667,11 @@ static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
if (s->name_resolved)
return *cached_name ? *cached_name : orig_name;
+ if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) {
+ s->name_resolved = 1;
+ return orig_name;
+ }
+
dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
if (dup_cnt > 1) {
const size_t max_len = 256;
@@ -1369,3 +1694,833 @@ static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id)
{
return btf_dump_resolve_name(d, id, d->ident_names);
}
+
+static int btf_dump_dump_type_data(struct btf_dump *d,
+ const char *fname,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data,
+ __u8 bits_offset,
+ __u8 bit_sz);
+
+static const char *btf_dump_data_newline(struct btf_dump *d)
+{
+ return d->typed_dump->compact || d->typed_dump->depth == 0 ? "" : "\n";
+}
+
+static const char *btf_dump_data_delim(struct btf_dump *d)
+{
+ return d->typed_dump->depth == 0 ? "" : ",";
+}
+
+static void btf_dump_data_pfx(struct btf_dump *d)
+{
+ int i, lvl = d->typed_dump->indent_lvl + d->typed_dump->depth;
+
+ if (d->typed_dump->compact)
+ return;
+
+ for (i = 0; i < lvl; i++)
+ btf_dump_printf(d, "%s", d->typed_dump->indent_str);
+}
+
+/* A macro is used here as btf_type_value[s]() appends format specifiers
+ * to the format specifier passed in; these do the work of appending
+ * delimiters etc while the caller simply has to specify the type values
+ * in the format specifier + value(s).
+ */
+#define btf_dump_type_values(d, fmt, ...) \
+ btf_dump_printf(d, fmt "%s%s", \
+ ##__VA_ARGS__, \
+ btf_dump_data_delim(d), \
+ btf_dump_data_newline(d))
+
+static int btf_dump_unsupported_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id)
+{
+ btf_dump_printf(d, "<unsupported kind:%u>", btf_kind(t));
+ return -ENOTSUP;
+}
+
+static int btf_dump_get_bitfield_value(struct btf_dump *d,
+ const struct btf_type *t,
+ const void *data,
+ __u8 bits_offset,
+ __u8 bit_sz,
+ __u64 *value)
+{
+ __u16 left_shift_bits, right_shift_bits;
+ const __u8 *bytes = data;
+ __u8 nr_copy_bits;
+ __u64 num = 0;
+ int i;
+
+ /* Maximum supported bitfield size is 64 bits */
+ if (t->size > 8) {
+ pr_warn("unexpected bitfield size %d\n", t->size);
+ return -EINVAL;
+ }
+
+ /* Bitfield value retrieval is done in two steps; first relevant bytes are
+ * stored in num, then we left/right shift num to eliminate irrelevant bits.
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ for (i = t->size - 1; i >= 0; i--)
+ num = num * 256 + bytes[i];
+ nr_copy_bits = bit_sz + bits_offset;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ for (i = 0; i < t->size; i++)
+ num = num * 256 + bytes[i];
+ nr_copy_bits = t->size * 8 - bits_offset;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+ left_shift_bits = 64 - nr_copy_bits;
+ right_shift_bits = 64 - bit_sz;
+
+ *value = (num << left_shift_bits) >> right_shift_bits;
+
+ return 0;
+}
+
+static int btf_dump_bitfield_check_zero(struct btf_dump *d,
+ const struct btf_type *t,
+ const void *data,
+ __u8 bits_offset,
+ __u8 bit_sz)
+{
+ __u64 check_num;
+ int err;
+
+ err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &check_num);
+ if (err)
+ return err;
+ if (check_num == 0)
+ return -ENODATA;
+ return 0;
+}
+
+static int btf_dump_bitfield_data(struct btf_dump *d,
+ const struct btf_type *t,
+ const void *data,
+ __u8 bits_offset,
+ __u8 bit_sz)
+{
+ __u64 print_num;
+ int err;
+
+ err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz, &print_num);
+ if (err)
+ return err;
+
+ btf_dump_type_values(d, "0x%llx", (unsigned long long)print_num);
+
+ return 0;
+}
+
+/* ints, floats and ptrs */
+static int btf_dump_base_type_check_zero(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data)
+{
+ static __u8 bytecmp[16] = {};
+ int nr_bytes;
+
+ /* For pointer types, pointer size is not defined on a per-type basis.
+ * On dump creation however, we store the pointer size.
+ */
+ if (btf_kind(t) == BTF_KIND_PTR)
+ nr_bytes = d->ptr_sz;
+ else
+ nr_bytes = t->size;
+
+ if (nr_bytes < 1 || nr_bytes > 16) {
+ pr_warn("unexpected size %d for id [%u]\n", nr_bytes, id);
+ return -EINVAL;
+ }
+
+ if (memcmp(data, bytecmp, nr_bytes) == 0)
+ return -ENODATA;
+ return 0;
+}
+
+static bool ptr_is_aligned(const struct btf *btf, __u32 type_id,
+ const void *data)
+{
+ int alignment = btf__align_of(btf, type_id);
+
+ if (alignment == 0)
+ return false;
+
+ return ((uintptr_t)data) % alignment == 0;
+}
+
+static int btf_dump_int_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 type_id,
+ const void *data,
+ __u8 bits_offset)
+{
+ __u8 encoding = btf_int_encoding(t);
+ bool sign = encoding & BTF_INT_SIGNED;
+ char buf[16] __attribute__((aligned(16)));
+ int sz = t->size;
+
+ if (sz == 0 || sz > sizeof(buf)) {
+ pr_warn("unexpected size %d for id [%u]\n", sz, type_id);
+ return -EINVAL;
+ }
+
+ /* handle packed int data - accesses of integers not aligned on
+ * int boundaries can cause problems on some platforms.
+ */
+ if (!ptr_is_aligned(d->btf, type_id, data)) {
+ memcpy(buf, data, sz);
+ data = buf;
+ }
+
+ switch (sz) {
+ case 16: {
+ const __u64 *ints = data;
+ __u64 lsi, msi;
+
+ /* avoid use of __int128 as some 32-bit platforms do not
+ * support it.
+ */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ lsi = ints[0];
+ msi = ints[1];
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ lsi = ints[1];
+ msi = ints[0];
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+ if (msi == 0)
+ btf_dump_type_values(d, "0x%llx", (unsigned long long)lsi);
+ else
+ btf_dump_type_values(d, "0x%llx%016llx", (unsigned long long)msi,
+ (unsigned long long)lsi);
+ break;
+ }
+ case 8:
+ if (sign)
+ btf_dump_type_values(d, "%lld", *(long long *)data);
+ else
+ btf_dump_type_values(d, "%llu", *(unsigned long long *)data);
+ break;
+ case 4:
+ if (sign)
+ btf_dump_type_values(d, "%d", *(__s32 *)data);
+ else
+ btf_dump_type_values(d, "%u", *(__u32 *)data);
+ break;
+ case 2:
+ if (sign)
+ btf_dump_type_values(d, "%d", *(__s16 *)data);
+ else
+ btf_dump_type_values(d, "%u", *(__u16 *)data);
+ break;
+ case 1:
+ if (d->typed_dump->is_array_char) {
+ /* check for null terminator */
+ if (d->typed_dump->is_array_terminated)
+ break;
+ if (*(char *)data == '\0') {
+ d->typed_dump->is_array_terminated = true;
+ break;
+ }
+ if (isprint(*(char *)data)) {
+ btf_dump_type_values(d, "'%c'", *(char *)data);
+ break;
+ }
+ }
+ if (sign)
+ btf_dump_type_values(d, "%d", *(__s8 *)data);
+ else
+ btf_dump_type_values(d, "%u", *(__u8 *)data);
+ break;
+ default:
+ pr_warn("unexpected sz %d for id [%u]\n", sz, type_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union float_data {
+ long double ld;
+ double d;
+ float f;
+};
+
+static int btf_dump_float_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 type_id,
+ const void *data)
+{
+ const union float_data *flp = data;
+ union float_data fl;
+ int sz = t->size;
+
+ /* handle unaligned data; copy to local union */
+ if (!ptr_is_aligned(d->btf, type_id, data)) {
+ memcpy(&fl, data, sz);
+ flp = &fl;
+ }
+
+ switch (sz) {
+ case 16:
+ btf_dump_type_values(d, "%Lf", flp->ld);
+ break;
+ case 8:
+ btf_dump_type_values(d, "%lf", flp->d);
+ break;
+ case 4:
+ btf_dump_type_values(d, "%f", flp->f);
+ break;
+ default:
+ pr_warn("unexpected size %d for id [%u]\n", sz, type_id);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int btf_dump_var_data(struct btf_dump *d,
+ const struct btf_type *v,
+ __u32 id,
+ const void *data)
+{
+ enum btf_func_linkage linkage = btf_var(v)->linkage;
+ const struct btf_type *t;
+ const char *l;
+ __u32 type_id;
+
+ switch (linkage) {
+ case BTF_FUNC_STATIC:
+ l = "static ";
+ break;
+ case BTF_FUNC_EXTERN:
+ l = "extern ";
+ break;
+ case BTF_FUNC_GLOBAL:
+ default:
+ l = "";
+ break;
+ }
+
+ /* format of output here is [linkage] [type] [varname] = (type)value,
+ * for example "static int cpu_profile_flip = (int)1"
+ */
+ btf_dump_printf(d, "%s", l);
+ type_id = v->type;
+ t = btf__type_by_id(d->btf, type_id);
+ btf_dump_emit_type_cast(d, type_id, false);
+ btf_dump_printf(d, " %s = ", btf_name_of(d, v->name_off));
+ return btf_dump_dump_type_data(d, NULL, t, type_id, data, 0, 0);
+}
+
+static int btf_dump_array_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data)
+{
+ const struct btf_array *array = btf_array(t);
+ const struct btf_type *elem_type;
+ __u32 i, elem_type_id;
+ __s64 elem_size;
+ bool is_array_member;
+
+ elem_type_id = array->type;
+ elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
+ elem_size = btf__resolve_size(d->btf, elem_type_id);
+ if (elem_size <= 0) {
+ pr_warn("unexpected elem size %zd for array type [%u]\n",
+ (ssize_t)elem_size, id);
+ return -EINVAL;
+ }
+
+ if (btf_is_int(elem_type)) {
+ /*
+ * BTF_INT_CHAR encoding never seems to be set for
+ * char arrays, so if size is 1 and element is
+ * printable as a char, we'll do that.
+ */
+ if (elem_size == 1)
+ d->typed_dump->is_array_char = true;
+ }
+
+ /* note that we increment depth before calling btf_dump_print() below;
+ * this is intentional. btf_dump_data_newline() will not print a
+ * newline for depth 0 (since this leaves us with trailing newlines
+ * at the end of typed display), so depth is incremented first.
+ * For similar reasons, we decrement depth before showing the closing
+ * parenthesis.
+ */
+ d->typed_dump->depth++;
+ btf_dump_printf(d, "[%s", btf_dump_data_newline(d));
+
+ /* may be a multidimensional array, so store current "is array member"
+ * status so we can restore it correctly later.
+ */
+ is_array_member = d->typed_dump->is_array_member;
+ d->typed_dump->is_array_member = true;
+ for (i = 0; i < array->nelems; i++, data += elem_size) {
+ if (d->typed_dump->is_array_terminated)
+ break;
+ btf_dump_dump_type_data(d, NULL, elem_type, elem_type_id, data, 0, 0);
+ }
+ d->typed_dump->is_array_member = is_array_member;
+ d->typed_dump->depth--;
+ btf_dump_data_pfx(d);
+ btf_dump_type_values(d, "]");
+
+ return 0;
+}
+
+static int btf_dump_struct_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data)
+{
+ const struct btf_member *m = btf_members(t);
+ __u16 n = btf_vlen(t);
+ int i, err = 0;
+
+ /* note that we increment depth before calling btf_dump_print() below;
+ * this is intentional. btf_dump_data_newline() will not print a
+ * newline for depth 0 (since this leaves us with trailing newlines
+ * at the end of typed display), so depth is incremented first.
+ * For similar reasons, we decrement depth before showing the closing
+ * parenthesis.
+ */
+ d->typed_dump->depth++;
+ btf_dump_printf(d, "{%s", btf_dump_data_newline(d));
+
+ for (i = 0; i < n; i++, m++) {
+ const struct btf_type *mtype;
+ const char *mname;
+ __u32 moffset;
+ __u8 bit_sz;
+
+ mtype = btf__type_by_id(d->btf, m->type);
+ mname = btf_name_of(d, m->name_off);
+ moffset = btf_member_bit_offset(t, i);
+
+ bit_sz = btf_member_bitfield_size(t, i);
+ err = btf_dump_dump_type_data(d, mname, mtype, m->type, data + moffset / 8,
+ moffset % 8, bit_sz);
+ if (err < 0)
+ return err;
+ }
+ d->typed_dump->depth--;
+ btf_dump_data_pfx(d);
+ btf_dump_type_values(d, "}");
+ return err;
+}
+
+union ptr_data {
+ unsigned int p;
+ unsigned long long lp;
+};
+
+static int btf_dump_ptr_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data)
+{
+ if (ptr_is_aligned(d->btf, id, data) && d->ptr_sz == sizeof(void *)) {
+ btf_dump_type_values(d, "%p", *(void **)data);
+ } else {
+ union ptr_data pt;
+
+ memcpy(&pt, data, d->ptr_sz);
+ if (d->ptr_sz == 4)
+ btf_dump_type_values(d, "0x%x", pt.p);
+ else
+ btf_dump_type_values(d, "0x%llx", pt.lp);
+ }
+ return 0;
+}
+
+static int btf_dump_get_enum_value(struct btf_dump *d,
+ const struct btf_type *t,
+ const void *data,
+ __u32 id,
+ __s64 *value)
+{
+ bool is_signed = btf_kflag(t);
+
+ if (!ptr_is_aligned(d->btf, id, data)) {
+ __u64 val;
+ int err;
+
+ err = btf_dump_get_bitfield_value(d, t, data, 0, 0, &val);
+ if (err)
+ return err;
+ *value = (__s64)val;
+ return 0;
+ }
+
+ switch (t->size) {
+ case 8:
+ *value = *(__s64 *)data;
+ return 0;
+ case 4:
+ *value = is_signed ? (__s64)*(__s32 *)data : *(__u32 *)data;
+ return 0;
+ case 2:
+ *value = is_signed ? *(__s16 *)data : *(__u16 *)data;
+ return 0;
+ case 1:
+ *value = is_signed ? *(__s8 *)data : *(__u8 *)data;
+ return 0;
+ default:
+ pr_warn("unexpected size %d for enum, id:[%u]\n", t->size, id);
+ return -EINVAL;
+ }
+}
+
+static int btf_dump_enum_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data)
+{
+ bool is_signed;
+ __s64 value;
+ int i, err;
+
+ err = btf_dump_get_enum_value(d, t, data, id, &value);
+ if (err)
+ return err;
+
+ is_signed = btf_kflag(t);
+ if (btf_is_enum(t)) {
+ const struct btf_enum *e;
+
+ for (i = 0, e = btf_enum(t); i < btf_vlen(t); i++, e++) {
+ if (value != e->val)
+ continue;
+ btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
+ return 0;
+ }
+
+ btf_dump_type_values(d, is_signed ? "%d" : "%u", value);
+ } else {
+ const struct btf_enum64 *e;
+
+ for (i = 0, e = btf_enum64(t); i < btf_vlen(t); i++, e++) {
+ if (value != btf_enum64_value(e))
+ continue;
+ btf_dump_type_values(d, "%s", btf_name_of(d, e->name_off));
+ return 0;
+ }
+
+ btf_dump_type_values(d, is_signed ? "%lldLL" : "%lluULL",
+ (unsigned long long)value);
+ }
+ return 0;
+}
+
+static int btf_dump_datasec_data(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data)
+{
+ const struct btf_var_secinfo *vsi;
+ const struct btf_type *var;
+ __u32 i;
+ int err;
+
+ btf_dump_type_values(d, "SEC(\"%s\") ", btf_name_of(d, t->name_off));
+
+ for (i = 0, vsi = btf_var_secinfos(t); i < btf_vlen(t); i++, vsi++) {
+ var = btf__type_by_id(d->btf, vsi->type);
+ err = btf_dump_dump_type_data(d, NULL, var, vsi->type, data + vsi->offset, 0, 0);
+ if (err < 0)
+ return err;
+ btf_dump_printf(d, ";");
+ }
+ return 0;
+}
+
+/* return size of type, or if base type overflows, return -E2BIG. */
+static int btf_dump_type_data_check_overflow(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data,
+ __u8 bits_offset)
+{
+ __s64 size = btf__resolve_size(d->btf, id);
+
+ if (size < 0 || size >= INT_MAX) {
+ pr_warn("unexpected size [%zu] for id [%u]\n",
+ (size_t)size, id);
+ return -EINVAL;
+ }
+
+ /* Only do overflow checking for base types; we do not want to
+ * avoid showing part of a struct, union or array, even if we
+ * do not have enough data to show the full object. By
+ * restricting overflow checking to base types we can ensure
+ * that partial display succeeds, while avoiding overflowing
+ * and using bogus data for display.
+ */
+ t = skip_mods_and_typedefs(d->btf, id, NULL);
+ if (!t) {
+ pr_warn("unexpected error skipping mods/typedefs for id [%u]\n",
+ id);
+ return -EINVAL;
+ }
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ if (data + bits_offset / 8 + size > d->typed_dump->data_end)
+ return -E2BIG;
+ break;
+ default:
+ break;
+ }
+ return (int)size;
+}
+
+static int btf_dump_type_data_check_zero(struct btf_dump *d,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data,
+ __u8 bits_offset,
+ __u8 bit_sz)
+{
+ __s64 value;
+ int i, err;
+
+ /* toplevel exceptions; we show zero values if
+ * - we ask for them (emit_zeros)
+ * - if we are at top-level so we see "struct empty { }"
+ * - or if we are an array member and the array is non-empty and
+ * not a char array; we don't want to be in a situation where we
+ * have an integer array 0, 1, 0, 1 and only show non-zero values.
+ * If the array contains zeroes only, or is a char array starting
+ * with a '\0', the array-level check_zero() will prevent showing it;
+ * we are concerned with determining zero value at the array member
+ * level here.
+ */
+ if (d->typed_dump->emit_zeroes || d->typed_dump->depth == 0 ||
+ (d->typed_dump->is_array_member &&
+ !d->typed_dump->is_array_char))
+ return 0;
+
+ t = skip_mods_and_typedefs(d->btf, id, NULL);
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_INT:
+ if (bit_sz)
+ return btf_dump_bitfield_check_zero(d, t, data, bits_offset, bit_sz);
+ return btf_dump_base_type_check_zero(d, t, id, data);
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_PTR:
+ return btf_dump_base_type_check_zero(d, t, id, data);
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *array = btf_array(t);
+ const struct btf_type *elem_type;
+ __u32 elem_type_id, elem_size;
+ bool ischar;
+
+ elem_type_id = array->type;
+ elem_size = btf__resolve_size(d->btf, elem_type_id);
+ elem_type = skip_mods_and_typedefs(d->btf, elem_type_id, NULL);
+
+ ischar = btf_is_int(elem_type) && elem_size == 1;
+
+ /* check all elements; if _any_ element is nonzero, all
+ * of array is displayed. We make an exception however
+ * for char arrays where the first element is 0; these
+ * are considered zeroed also, even if later elements are
+ * non-zero because the string is terminated.
+ */
+ for (i = 0; i < array->nelems; i++) {
+ if (i == 0 && ischar && *(char *)data == 0)
+ return -ENODATA;
+ err = btf_dump_type_data_check_zero(d, elem_type,
+ elem_type_id,
+ data +
+ (i * elem_size),
+ bits_offset, 0);
+ if (err != -ENODATA)
+ return err;
+ }
+ return -ENODATA;
+ }
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m = btf_members(t);
+ __u16 n = btf_vlen(t);
+
+ /* if any struct/union member is non-zero, the struct/union
+ * is considered non-zero and dumped.
+ */
+ for (i = 0; i < n; i++, m++) {
+ const struct btf_type *mtype;
+ __u32 moffset;
+
+ mtype = btf__type_by_id(d->btf, m->type);
+ moffset = btf_member_bit_offset(t, i);
+
+ /* btf_int_bits() does not store member bitfield size;
+ * bitfield size needs to be stored here so int display
+ * of member can retrieve it.
+ */
+ bit_sz = btf_member_bitfield_size(t, i);
+ err = btf_dump_type_data_check_zero(d, mtype, m->type, data + moffset / 8,
+ moffset % 8, bit_sz);
+ if (err != ENODATA)
+ return err;
+ }
+ return -ENODATA;
+ }
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ err = btf_dump_get_enum_value(d, t, data, id, &value);
+ if (err)
+ return err;
+ if (value == 0)
+ return -ENODATA;
+ return 0;
+ default:
+ return 0;
+ }
+}
+
+/* returns size of data dumped, or error. */
+static int btf_dump_dump_type_data(struct btf_dump *d,
+ const char *fname,
+ const struct btf_type *t,
+ __u32 id,
+ const void *data,
+ __u8 bits_offset,
+ __u8 bit_sz)
+{
+ int size, err = 0;
+
+ size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset);
+ if (size < 0)
+ return size;
+ err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz);
+ if (err) {
+ /* zeroed data is expected and not an error, so simply skip
+ * dumping such data. Record other errors however.
+ */
+ if (err == -ENODATA)
+ return size;
+ return err;
+ }
+ btf_dump_data_pfx(d);
+
+ if (!d->typed_dump->skip_names) {
+ if (fname && strlen(fname) > 0)
+ btf_dump_printf(d, ".%s = ", fname);
+ btf_dump_emit_type_cast(d, id, true);
+ }
+
+ t = skip_mods_and_typedefs(d->btf, id, NULL);
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_FWD:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_FUNC_PROTO:
+ case BTF_KIND_DECL_TAG:
+ err = btf_dump_unsupported_data(d, t, id);
+ break;
+ case BTF_KIND_INT:
+ if (bit_sz)
+ err = btf_dump_bitfield_data(d, t, data, bits_offset, bit_sz);
+ else
+ err = btf_dump_int_data(d, t, id, data, bits_offset);
+ break;
+ case BTF_KIND_FLOAT:
+ err = btf_dump_float_data(d, t, id, data);
+ break;
+ case BTF_KIND_PTR:
+ err = btf_dump_ptr_data(d, t, id, data);
+ break;
+ case BTF_KIND_ARRAY:
+ err = btf_dump_array_data(d, t, id, data);
+ break;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ err = btf_dump_struct_data(d, t, id, data);
+ break;
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ /* handle bitfield and int enum values */
+ if (bit_sz) {
+ __u64 print_num;
+ __s64 enum_val;
+
+ err = btf_dump_get_bitfield_value(d, t, data, bits_offset, bit_sz,
+ &print_num);
+ if (err)
+ break;
+ enum_val = (__s64)print_num;
+ err = btf_dump_enum_data(d, t, id, &enum_val);
+ } else
+ err = btf_dump_enum_data(d, t, id, data);
+ break;
+ case BTF_KIND_VAR:
+ err = btf_dump_var_data(d, t, id, data);
+ break;
+ case BTF_KIND_DATASEC:
+ err = btf_dump_datasec_data(d, t, id, data);
+ break;
+ default:
+ pr_warn("unexpected kind [%u] for id [%u]\n",
+ BTF_INFO_KIND(t->info), id);
+ return -EINVAL;
+ }
+ if (err < 0)
+ return err;
+ return size;
+}
+
+int btf_dump__dump_type_data(struct btf_dump *d, __u32 id,
+ const void *data, size_t data_sz,
+ const struct btf_dump_type_data_opts *opts)
+{
+ struct btf_dump_data typed_dump = {};
+ const struct btf_type *t;
+ int ret;
+
+ if (!OPTS_VALID(opts, btf_dump_type_data_opts))
+ return libbpf_err(-EINVAL);
+
+ t = btf__type_by_id(d->btf, id);
+ if (!t)
+ return libbpf_err(-ENOENT);
+
+ d->typed_dump = &typed_dump;
+ d->typed_dump->data_end = data + data_sz;
+ d->typed_dump->indent_lvl = OPTS_GET(opts, indent_level, 0);
+
+ /* default indent string is a tab */
+ if (!OPTS_GET(opts, indent_str, NULL))
+ d->typed_dump->indent_str[0] = '\t';
+ else
+ libbpf_strlcpy(d->typed_dump->indent_str, opts->indent_str,
+ sizeof(d->typed_dump->indent_str));
+
+ d->typed_dump->compact = OPTS_GET(opts, compact, false);
+ d->typed_dump->skip_names = OPTS_GET(opts, skip_names, false);
+ d->typed_dump->emit_zeroes = OPTS_GET(opts, emit_zeroes, false);
+
+ ret = btf_dump_dump_type_data(d, NULL, t, id, data, 0, 0);
+
+ d->typed_dump = NULL;
+
+ return libbpf_err(ret);
+}
diff --git a/tools/lib/bpf/gen_loader.c b/tools/lib/bpf/gen_loader.c
new file mode 100644
index 000000000000..83e8e3bfd8ff
--- /dev/null
+++ b/tools/lib/bpf/gen_loader.c
@@ -0,0 +1,1123 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2021 Facebook */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <linux/filter.h>
+#include <sys/param.h>
+#include "btf.h"
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_internal.h"
+#include "hashmap.h"
+#include "bpf_gen_internal.h"
+#include "skel_internal.h"
+#include <asm/byteorder.h>
+
+#define MAX_USED_MAPS 64
+#define MAX_USED_PROGS 32
+#define MAX_KFUNC_DESCS 256
+#define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS)
+
+/* The following structure describes the stack layout of the loader program.
+ * In addition R6 contains the pointer to context.
+ * R7 contains the result of the last sys_bpf command (typically error or FD).
+ * R9 contains the result of the last sys_close command.
+ *
+ * Naming convention:
+ * ctx - bpf program context
+ * stack - bpf program stack
+ * blob - bpf_attr-s, strings, insns, map data.
+ * All the bytes that loader prog will use for read/write.
+ */
+struct loader_stack {
+ __u32 btf_fd;
+ __u32 inner_map_fd;
+ __u32 prog_fd[MAX_USED_PROGS];
+};
+
+#define stack_off(field) \
+ (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
+
+#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
+
+static int blob_fd_array_off(struct bpf_gen *gen, int index)
+{
+ return gen->fd_array + index * sizeof(int);
+}
+
+static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
+{
+ size_t off = gen->insn_cur - gen->insn_start;
+ void *insn_start;
+
+ if (gen->error)
+ return gen->error;
+ if (size > INT32_MAX || off + size > INT32_MAX) {
+ gen->error = -ERANGE;
+ return -ERANGE;
+ }
+ insn_start = realloc(gen->insn_start, off + size);
+ if (!insn_start) {
+ gen->error = -ENOMEM;
+ free(gen->insn_start);
+ gen->insn_start = NULL;
+ return -ENOMEM;
+ }
+ gen->insn_start = insn_start;
+ gen->insn_cur = insn_start + off;
+ return 0;
+}
+
+static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
+{
+ size_t off = gen->data_cur - gen->data_start;
+ void *data_start;
+
+ if (gen->error)
+ return gen->error;
+ if (size > INT32_MAX || off + size > INT32_MAX) {
+ gen->error = -ERANGE;
+ return -ERANGE;
+ }
+ data_start = realloc(gen->data_start, off + size);
+ if (!data_start) {
+ gen->error = -ENOMEM;
+ free(gen->data_start);
+ gen->data_start = NULL;
+ return -ENOMEM;
+ }
+ gen->data_start = data_start;
+ gen->data_cur = data_start + off;
+ return 0;
+}
+
+static void emit(struct bpf_gen *gen, struct bpf_insn insn)
+{
+ if (realloc_insn_buf(gen, sizeof(insn)))
+ return;
+ memcpy(gen->insn_cur, &insn, sizeof(insn));
+ gen->insn_cur += sizeof(insn);
+}
+
+static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
+{
+ emit(gen, insn1);
+ emit(gen, insn2);
+}
+
+static int add_data(struct bpf_gen *gen, const void *data, __u32 size);
+static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off);
+
+void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps)
+{
+ size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz;
+ int i;
+
+ gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
+ gen->log_level = log_level;
+ /* save ctx pointer into R6 */
+ emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
+
+ /* bzero stack */
+ emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
+ emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
+
+ /* amount of stack actually used, only used to calculate iterations, not stack offset */
+ nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]);
+ /* jump over cleanup code */
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
+ /* size of cleanup code below (including map fd cleanup) */
+ (nr_progs_sz / 4) * 3 + 2 +
+ /* 6 insns for emit_sys_close_blob,
+ * 6 insns for debug_regs in emit_sys_close_blob
+ */
+ nr_maps * (6 + (gen->log_level ? 6 : 0))));
+
+ /* remember the label where all error branches will jump to */
+ gen->cleanup_label = gen->insn_cur - gen->insn_start;
+ /* emit cleanup code: close all temp FDs */
+ for (i = 0; i < nr_progs_sz; i += 4) {
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
+ emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
+ }
+ for (i = 0; i < nr_maps; i++)
+ emit_sys_close_blob(gen, blob_fd_array_off(gen, i));
+ /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
+ emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
+ emit(gen, BPF_EXIT_INSN());
+}
+
+static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
+{
+ __u32 size8 = roundup(size, 8);
+ __u64 zero = 0;
+ void *prev;
+
+ if (realloc_data_buf(gen, size8))
+ return 0;
+ prev = gen->data_cur;
+ if (data) {
+ memcpy(gen->data_cur, data, size);
+ memcpy(gen->data_cur + size, &zero, size8 - size);
+ } else {
+ memset(gen->data_cur, 0, size8);
+ }
+ gen->data_cur += size8;
+ return prev - gen->data_start;
+}
+
+/* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
+ * to start of fd_array. Caller can decide if it is usable or not.
+ */
+static int add_map_fd(struct bpf_gen *gen)
+{
+ if (gen->nr_maps == MAX_USED_MAPS) {
+ pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
+ gen->error = -E2BIG;
+ return 0;
+ }
+ return gen->nr_maps++;
+}
+
+static int add_kfunc_btf_fd(struct bpf_gen *gen)
+{
+ int cur;
+
+ if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
+ cur = add_data(gen, NULL, sizeof(int));
+ return (cur - gen->fd_array) / sizeof(int);
+ }
+ return MAX_USED_MAPS + gen->nr_fd_array++;
+}
+
+static int insn_bytes_to_bpf_size(__u32 sz)
+{
+ switch (sz) {
+ case 8: return BPF_DW;
+ case 4: return BPF_W;
+ case 2: return BPF_H;
+ case 1: return BPF_B;
+ default: return -1;
+ }
+}
+
+/* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
+static void emit_rel_store(struct bpf_gen *gen, int off, int data)
+{
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, data));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
+{
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_off));
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
+{
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_off));
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
+}
+
+static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
+ bool check_non_zero)
+{
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
+ if (check_non_zero)
+ /* If value in ctx is zero don't update the blob.
+ * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
+ */
+ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
+{
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
+}
+
+static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
+{
+ emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
+ emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
+}
+
+static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
+{
+ emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, attr));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
+ /* remember the result in R7 */
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+}
+
+static bool is_simm16(__s64 value)
+{
+ return value == (__s64)(__s16)value;
+}
+
+static void emit_check_err(struct bpf_gen *gen)
+{
+ __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
+
+ /* R7 contains result of last sys_bpf command.
+ * if (R7 < 0) goto cleanup;
+ */
+ if (is_simm16(off)) {
+ emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
+ } else {
+ gen->error = -ERANGE;
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
+ }
+}
+
+/* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
+static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
+ const char *fmt, va_list args)
+{
+ char buf[1024];
+ int addr, len, ret;
+
+ if (!gen->log_level)
+ return;
+ ret = vsnprintf(buf, sizeof(buf), fmt, args);
+ if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
+ /* The special case to accommodate common debug_ret():
+ * to avoid specifying BPF_REG_7 and adding " r=%%d" to
+ * prints explicitly.
+ */
+ strcat(buf, " r=%d");
+ len = strlen(buf) + 1;
+ addr = add_data(gen, buf, len);
+
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, addr));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ if (reg1 >= 0)
+ emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
+ if (reg2 >= 0)
+ emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
+}
+
+static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ emit_debug(gen, reg1, reg2, fmt, args);
+ va_end(args);
+}
+
+static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ emit_debug(gen, BPF_REG_7, -1, fmt, args);
+ va_end(args);
+}
+
+static void __emit_sys_close(struct bpf_gen *gen)
+{
+ emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
+ /* 2 is the number of the following insns
+ * * 6 is additional insns in debug_regs
+ */
+ 2 + (gen->log_level ? 6 : 0)));
+ emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
+ debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
+}
+
+static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
+{
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
+ __emit_sys_close(gen);
+}
+
+static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
+{
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_off));
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
+ __emit_sys_close(gen);
+}
+
+int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps)
+{
+ int i;
+
+ if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) {
+ pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n",
+ nr_progs, gen->nr_progs, nr_maps, gen->nr_maps);
+ gen->error = -EFAULT;
+ return gen->error;
+ }
+ emit_sys_close_stack(gen, stack_off(btf_fd));
+ for (i = 0; i < gen->nr_progs; i++)
+ move_stack2ctx(gen,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * gen->nr_maps +
+ sizeof(struct bpf_prog_desc) * i +
+ offsetof(struct bpf_prog_desc, prog_fd), 4,
+ stack_off(prog_fd[i]));
+ for (i = 0; i < gen->nr_maps; i++)
+ move_blob2ctx(gen,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * i +
+ offsetof(struct bpf_map_desc, map_fd), 4,
+ blob_fd_array_off(gen, i));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
+ emit(gen, BPF_EXIT_INSN());
+ pr_debug("gen: finish %d\n", gen->error);
+ if (!gen->error) {
+ struct gen_loader_opts *opts = gen->opts;
+
+ opts->insns = gen->insn_start;
+ opts->insns_sz = gen->insn_cur - gen->insn_start;
+ opts->data = gen->data_start;
+ opts->data_sz = gen->data_cur - gen->data_start;
+ }
+ return gen->error;
+}
+
+void bpf_gen__free(struct bpf_gen *gen)
+{
+ if (!gen)
+ return;
+ free(gen->data_start);
+ free(gen->insn_start);
+ free(gen);
+}
+
+void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
+ __u32 btf_raw_size)
+{
+ int attr_size = offsetofend(union bpf_attr, btf_log_level);
+ int btf_data, btf_load_attr;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: load_btf: size %d\n", btf_raw_size);
+ btf_data = add_data(gen, btf_raw_data, btf_raw_size);
+
+ attr.btf_size = btf_raw_size;
+ btf_load_attr = add_data(gen, &attr, attr_size);
+
+ /* populate union bpf_attr with user provided log details */
+ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
+ offsetof(struct bpf_loader_ctx, log_level), false);
+ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
+ offsetof(struct bpf_loader_ctx, log_size), false);
+ move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
+ offsetof(struct bpf_loader_ctx, log_buf), false);
+ /* populate union bpf_attr with a pointer to the BTF data */
+ emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
+ /* emit BTF_LOAD command */
+ emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
+ debug_ret(gen, "btf_load size %d", btf_raw_size);
+ emit_check_err(gen);
+ /* remember btf_fd in the stack, if successful */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
+}
+
+void bpf_gen__map_create(struct bpf_gen *gen,
+ enum bpf_map_type map_type,
+ const char *map_name,
+ __u32 key_size, __u32 value_size, __u32 max_entries,
+ struct bpf_map_create_opts *map_attr, int map_idx)
+{
+ int attr_size = offsetofend(union bpf_attr, map_extra);
+ bool close_inner_map_fd = false;
+ int map_create_attr, idx;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ attr.map_type = map_type;
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.map_flags = map_attr->map_flags;
+ attr.map_extra = map_attr->map_extra;
+ if (map_name)
+ libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name));
+ attr.numa_node = map_attr->numa_node;
+ attr.map_ifindex = map_attr->map_ifindex;
+ attr.max_entries = max_entries;
+ attr.btf_key_type_id = map_attr->btf_key_type_id;
+ attr.btf_value_type_id = map_attr->btf_value_type_id;
+
+ pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
+ attr.map_name, map_idx, map_type, attr.btf_value_type_id);
+
+ map_create_attr = add_data(gen, &attr, attr_size);
+ if (attr.btf_value_type_id)
+ /* populate union bpf_attr with btf_fd saved in the stack earlier */
+ move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
+ stack_off(btf_fd));
+ switch (attr.map_type) {
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
+ stack_off(inner_map_fd));
+ close_inner_map_fd = true;
+ break;
+ default:
+ break;
+ }
+ /* conditionally update max_entries */
+ if (map_idx >= 0)
+ move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * map_idx +
+ offsetof(struct bpf_map_desc, max_entries),
+ true /* check that max_entries != 0 */);
+ /* emit MAP_CREATE command */
+ emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
+ debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
+ attr.map_name, map_idx, map_type, value_size,
+ attr.btf_value_type_id);
+ emit_check_err(gen);
+ /* remember map_fd in the stack, if successful */
+ if (map_idx < 0) {
+ /* This bpf_gen__map_create() function is called with map_idx >= 0
+ * for all maps that libbpf loading logic tracks.
+ * It's called with -1 to create an inner map.
+ */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
+ stack_off(inner_map_fd)));
+ } else if (map_idx != gen->nr_maps) {
+ gen->error = -EDOM; /* internal bug */
+ return;
+ } else {
+ /* add_map_fd does gen->nr_maps++ */
+ idx = add_map_fd(gen);
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_fd_array_off(gen, idx)));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
+ }
+ if (close_inner_map_fd)
+ emit_sys_close_stack(gen, stack_off(inner_map_fd));
+}
+
+void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
+ enum bpf_attach_type type)
+{
+ const char *prefix;
+ int kind, ret;
+
+ btf_get_kernel_prefix_kind(type, &prefix, &kind);
+ gen->attach_kind = kind;
+ ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
+ prefix, attach_name);
+ if (ret >= sizeof(gen->attach_target))
+ gen->error = -ENOSPC;
+}
+
+static void emit_find_attach_target(struct bpf_gen *gen)
+{
+ int name, len = strlen(gen->attach_target) + 1;
+
+ pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
+ name = add_data(gen, gen->attach_target, len);
+
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, name));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ debug_ret(gen, "find_by_name_kind(%s,%d)",
+ gen->attach_target, gen->attach_kind);
+ emit_check_err(gen);
+ /* if successful, btf_id is in lower 32-bit of R7 and
+ * btf_obj_fd is in upper 32-bit
+ */
+}
+
+void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
+ bool is_typeless, bool is_ld64, int kind, int insn_idx)
+{
+ struct ksym_relo_desc *relo;
+
+ relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
+ if (!relo) {
+ gen->error = -ENOMEM;
+ return;
+ }
+ gen->relos = relo;
+ relo += gen->relo_cnt;
+ relo->name = name;
+ relo->is_weak = is_weak;
+ relo->is_typeless = is_typeless;
+ relo->is_ld64 = is_ld64;
+ relo->kind = kind;
+ relo->insn_idx = insn_idx;
+ gen->relo_cnt++;
+}
+
+/* returns existing ksym_desc with ref incremented, or inserts a new one */
+static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
+{
+ struct ksym_desc *kdesc;
+ int i;
+
+ for (i = 0; i < gen->nr_ksyms; i++) {
+ kdesc = &gen->ksyms[i];
+ if (kdesc->kind == relo->kind && kdesc->is_ld64 == relo->is_ld64 &&
+ !strcmp(kdesc->name, relo->name)) {
+ kdesc->ref++;
+ return kdesc;
+ }
+ }
+ kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
+ if (!kdesc) {
+ gen->error = -ENOMEM;
+ return NULL;
+ }
+ gen->ksyms = kdesc;
+ kdesc = &gen->ksyms[gen->nr_ksyms++];
+ kdesc->name = relo->name;
+ kdesc->kind = relo->kind;
+ kdesc->ref = 1;
+ kdesc->off = 0;
+ kdesc->insn = 0;
+ kdesc->is_ld64 = relo->is_ld64;
+ return kdesc;
+}
+
+/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
+ * Returns result in BPF_REG_7
+ */
+static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
+{
+ int name_off, len = strlen(relo->name) + 1;
+
+ name_off = add_data(gen, relo->name, len);
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, name_off));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
+}
+
+/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
+ * Returns result in BPF_REG_7
+ * Returns u64 symbol addr in BPF_REG_9
+ */
+static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
+{
+ int name_off, len = strlen(relo->name) + 1, res_off;
+
+ name_off = add_data(gen, relo->name, len);
+ res_off = add_data(gen, NULL, 8); /* res is u64 */
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, name_off));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, res_off));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
+ emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
+ emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
+ debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
+}
+
+/* Expects:
+ * BPF_REG_8 - pointer to instruction
+ *
+ * We need to reuse BTF fd for same symbol otherwise each relocation takes a new
+ * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
+ * this would mean a new BTF fd index for each entry. By pairing symbol name
+ * with index, we get the insn->imm, insn->off pairing that kernel uses for
+ * kfunc_tab, which becomes the effective limit even though all of them may
+ * share same index in fd_array (such that kfunc_btf_tab has 1 element).
+ */
+static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
+{
+ struct ksym_desc *kdesc;
+ int btf_fd_idx;
+
+ kdesc = get_ksym_desc(gen, relo);
+ if (!kdesc)
+ return;
+ /* try to copy from existing bpf_insn */
+ if (kdesc->ref > 1) {
+ move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + offsetof(struct bpf_insn, imm));
+ move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
+ kdesc->insn + offsetof(struct bpf_insn, off));
+ goto log;
+ }
+ /* remember insn offset, so we can copy BTF ID and FD later */
+ kdesc->insn = insn;
+ emit_bpf_find_by_name_kind(gen, relo);
+ if (!relo->is_weak)
+ emit_check_err(gen);
+ /* get index in fd_array to store BTF FD at */
+ btf_fd_idx = add_kfunc_btf_fd(gen);
+ if (btf_fd_idx > INT16_MAX) {
+ pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
+ btf_fd_idx, relo->name);
+ gen->error = -E2BIG;
+ return;
+ }
+ kdesc->off = btf_fd_idx;
+ /* jump to success case */
+ emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
+ /* set value for imm, off as 0 */
+ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
+ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
+ /* skip success case for ret < 0 */
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10));
+ /* store btf_id into insn[insn_idx].imm */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
+ /* obtain fd in BPF_REG_9 */
+ emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
+ /* jump to fd_array store if fd denotes module BTF */
+ emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2));
+ /* set the default value for off */
+ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
+ /* skip BTF fd store for vmlinux BTF */
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
+ /* load fd_array slot pointer */
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
+ /* store BTF fd in slot */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
+ /* store index into insn[insn_idx].off */
+ emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
+log:
+ if (!gen->log_level)
+ return;
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
+ offsetof(struct bpf_insn, imm)));
+ emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
+ offsetof(struct bpf_insn, off)));
+ debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
+ relo->name, kdesc->ref);
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
+ debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
+ relo->name, kdesc->ref);
+}
+
+static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
+ int ref)
+{
+ if (!gen->log_level)
+ return;
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
+ offsetof(struct bpf_insn, imm)));
+ emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
+ offsetof(struct bpf_insn, imm)));
+ debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
+ relo->is_typeless, relo->is_weak, relo->name, ref);
+ emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
+ debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
+ relo->is_typeless, relo->is_weak, relo->name, ref);
+}
+
+/* Expects:
+ * BPF_REG_8 - pointer to instruction
+ */
+static void emit_relo_ksym_typeless(struct bpf_gen *gen,
+ struct ksym_relo_desc *relo, int insn)
+{
+ struct ksym_desc *kdesc;
+
+ kdesc = get_ksym_desc(gen, relo);
+ if (!kdesc)
+ return;
+ /* try to copy from existing ldimm64 insn */
+ if (kdesc->ref > 1) {
+ move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + offsetof(struct bpf_insn, imm));
+ move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
+ goto log;
+ }
+ /* remember insn offset, so we can copy ksym addr later */
+ kdesc->insn = insn;
+ /* skip typeless ksym_desc in fd closing loop in cleanup_relos */
+ kdesc->typeless = true;
+ emit_bpf_kallsyms_lookup_name(gen, relo);
+ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
+ emit_check_err(gen);
+ /* store lower half of addr into insn[insn_idx].imm */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
+ /* store upper half of addr into insn[insn_idx + 1].imm */
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
+ sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
+log:
+ emit_ksym_relo_log(gen, relo, kdesc->ref);
+}
+
+static __u32 src_reg_mask(void)
+{
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ return 0x0f; /* src_reg,dst_reg,... */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ return 0xf0; /* dst_reg,src_reg,... */
+#else
+#error "Unsupported bit endianness, cannot proceed"
+#endif
+}
+
+/* Expects:
+ * BPF_REG_8 - pointer to instruction
+ */
+static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
+{
+ struct ksym_desc *kdesc;
+ __u32 reg_mask;
+
+ kdesc = get_ksym_desc(gen, relo);
+ if (!kdesc)
+ return;
+ /* try to copy from existing ldimm64 insn */
+ if (kdesc->ref > 1) {
+ move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
+ move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
+ kdesc->insn + offsetof(struct bpf_insn, imm));
+ /* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
+ * If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
+ */
+ emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
+ goto clear_src_reg;
+ }
+ /* remember insn offset, so we can copy BTF ID and FD later */
+ kdesc->insn = insn;
+ emit_bpf_find_by_name_kind(gen, relo);
+ if (!relo->is_weak)
+ emit_check_err(gen);
+ /* jump to success case */
+ emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
+ /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */
+ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
+ emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
+ /* skip success case for ret < 0 */
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4));
+ /* store btf_id into insn[insn_idx].imm */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
+ /* store btf_obj_fd into insn[insn_idx + 1].imm */
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
+ sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
+ /* skip src_reg adjustment */
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
+clear_src_reg:
+ /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
+ reg_mask = src_reg_mask();
+ emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
+ emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
+ emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
+
+ emit_ksym_relo_log(gen, relo, kdesc->ref);
+}
+
+void bpf_gen__record_relo_core(struct bpf_gen *gen,
+ const struct bpf_core_relo *core_relo)
+{
+ struct bpf_core_relo *relos;
+
+ relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos));
+ if (!relos) {
+ gen->error = -ENOMEM;
+ return;
+ }
+ gen->core_relos = relos;
+ relos += gen->core_relo_cnt;
+ memcpy(relos, core_relo, sizeof(*relos));
+ gen->core_relo_cnt++;
+}
+
+static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
+{
+ int insn;
+
+ pr_debug("gen: emit_relo (%d): %s at %d %s\n",
+ relo->kind, relo->name, relo->insn_idx, relo->is_ld64 ? "ld64" : "call");
+ insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
+ if (relo->is_ld64) {
+ if (relo->is_typeless)
+ emit_relo_ksym_typeless(gen, relo, insn);
+ else
+ emit_relo_ksym_btf(gen, relo, insn);
+ } else {
+ emit_relo_kfunc_btf(gen, relo, insn);
+ }
+}
+
+static void emit_relos(struct bpf_gen *gen, int insns)
+{
+ int i;
+
+ for (i = 0; i < gen->relo_cnt; i++)
+ emit_relo(gen, gen->relos + i, insns);
+}
+
+static void cleanup_core_relo(struct bpf_gen *gen)
+{
+ if (!gen->core_relo_cnt)
+ return;
+ free(gen->core_relos);
+ gen->core_relo_cnt = 0;
+ gen->core_relos = NULL;
+}
+
+static void cleanup_relos(struct bpf_gen *gen, int insns)
+{
+ struct ksym_desc *kdesc;
+ int i, insn;
+
+ for (i = 0; i < gen->nr_ksyms; i++) {
+ kdesc = &gen->ksyms[i];
+ /* only close fds for typed ksyms and kfuncs */
+ if (kdesc->is_ld64 && !kdesc->typeless) {
+ /* close fd recorded in insn[insn_idx + 1].imm */
+ insn = kdesc->insn;
+ insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
+ emit_sys_close_blob(gen, insn);
+ } else if (!kdesc->is_ld64) {
+ emit_sys_close_blob(gen, blob_fd_array_off(gen, kdesc->off));
+ if (kdesc->off < MAX_FD_ARRAY_SZ)
+ gen->nr_fd_array--;
+ }
+ }
+ if (gen->nr_ksyms) {
+ free(gen->ksyms);
+ gen->nr_ksyms = 0;
+ gen->ksyms = NULL;
+ }
+ if (gen->relo_cnt) {
+ free(gen->relos);
+ gen->relo_cnt = 0;
+ gen->relos = NULL;
+ }
+ cleanup_core_relo(gen);
+}
+
+void bpf_gen__prog_load(struct bpf_gen *gen,
+ enum bpf_prog_type prog_type, const char *prog_name,
+ const char *license, struct bpf_insn *insns, size_t insn_cnt,
+ struct bpf_prog_load_opts *load_attr, int prog_idx)
+{
+ int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos;
+ int attr_size = offsetofend(union bpf_attr, core_relo_rec_size);
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n",
+ prog_type, insn_cnt, prog_idx);
+ /* add license string to blob of bytes */
+ license_off = add_data(gen, license, strlen(license) + 1);
+ /* add insns to blob of bytes */
+ insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
+
+ attr.prog_type = prog_type;
+ attr.expected_attach_type = load_attr->expected_attach_type;
+ attr.attach_btf_id = load_attr->attach_btf_id;
+ attr.prog_ifindex = load_attr->prog_ifindex;
+ attr.kern_version = 0;
+ attr.insn_cnt = (__u32)insn_cnt;
+ attr.prog_flags = load_attr->prog_flags;
+
+ attr.func_info_rec_size = load_attr->func_info_rec_size;
+ attr.func_info_cnt = load_attr->func_info_cnt;
+ func_info = add_data(gen, load_attr->func_info,
+ attr.func_info_cnt * attr.func_info_rec_size);
+
+ attr.line_info_rec_size = load_attr->line_info_rec_size;
+ attr.line_info_cnt = load_attr->line_info_cnt;
+ line_info = add_data(gen, load_attr->line_info,
+ attr.line_info_cnt * attr.line_info_rec_size);
+
+ attr.core_relo_rec_size = sizeof(struct bpf_core_relo);
+ attr.core_relo_cnt = gen->core_relo_cnt;
+ core_relos = add_data(gen, gen->core_relos,
+ attr.core_relo_cnt * attr.core_relo_rec_size);
+
+ libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name));
+ prog_load_attr = add_data(gen, &attr, attr_size);
+
+ /* populate union bpf_attr with a pointer to license */
+ emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
+
+ /* populate union bpf_attr with a pointer to instructions */
+ emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
+
+ /* populate union bpf_attr with a pointer to func_info */
+ emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
+
+ /* populate union bpf_attr with a pointer to line_info */
+ emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
+
+ /* populate union bpf_attr with a pointer to core_relos */
+ emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos);
+
+ /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
+ emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
+
+ /* populate union bpf_attr with user provided log details */
+ move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
+ offsetof(struct bpf_loader_ctx, log_level), false);
+ move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
+ offsetof(struct bpf_loader_ctx, log_size), false);
+ move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
+ offsetof(struct bpf_loader_ctx, log_buf), false);
+ /* populate union bpf_attr with btf_fd saved in the stack earlier */
+ move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
+ stack_off(btf_fd));
+ if (gen->attach_kind) {
+ emit_find_attach_target(gen);
+ /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, prog_load_attr));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
+ offsetof(union bpf_attr, attach_btf_id)));
+ emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
+ offsetof(union bpf_attr, attach_btf_obj_fd)));
+ }
+ emit_relos(gen, insns_off);
+ /* emit PROG_LOAD command */
+ emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
+ debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
+ /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
+ cleanup_relos(gen, insns_off);
+ if (gen->attach_kind) {
+ emit_sys_close_blob(gen,
+ attr_field(prog_load_attr, attach_btf_obj_fd));
+ gen->attach_kind = 0;
+ }
+ emit_check_err(gen);
+ /* remember prog_fd in the stack, if successful */
+ emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
+ stack_off(prog_fd[gen->nr_progs])));
+ gen->nr_progs++;
+}
+
+void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
+ __u32 value_size)
+{
+ int attr_size = offsetofend(union bpf_attr, flags);
+ int map_update_attr, value, key;
+ union bpf_attr attr;
+ int zero = 0;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: map_update_elem: idx %d\n", map_idx);
+
+ value = add_data(gen, pvalue, value_size);
+ key = add_data(gen, &zero, sizeof(zero));
+
+ /* if (map_desc[map_idx].initial_value) {
+ * if (ctx->flags & BPF_SKEL_KERNEL)
+ * bpf_probe_read_kernel(value, value_size, initial_value);
+ * else
+ * bpf_copy_from_user(value, value_size, initial_value);
+ * }
+ */
+ emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
+ sizeof(struct bpf_loader_ctx) +
+ sizeof(struct bpf_map_desc) * map_idx +
+ offsetof(struct bpf_map_desc, initial_value)));
+ emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8));
+ emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
+ 0, 0, 0, value));
+ emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
+ emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6,
+ offsetof(struct bpf_loader_ctx, flags)));
+ emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
+ emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1));
+ emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
+
+ map_update_attr = add_data(gen, &attr, attr_size);
+ move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
+ blob_fd_array_off(gen, map_idx));
+ emit_rel_store(gen, attr_field(map_update_attr, key), key);
+ emit_rel_store(gen, attr_field(map_update_attr, value), value);
+ /* emit MAP_UPDATE_ELEM command */
+ emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
+ debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
+ emit_check_err(gen);
+}
+
+void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot,
+ int inner_map_idx)
+{
+ int attr_size = offsetofend(union bpf_attr, flags);
+ int map_update_attr, key;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n",
+ outer_map_idx, slot, inner_map_idx);
+
+ key = add_data(gen, &slot, sizeof(slot));
+
+ map_update_attr = add_data(gen, &attr, attr_size);
+ move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
+ blob_fd_array_off(gen, outer_map_idx));
+ emit_rel_store(gen, attr_field(map_update_attr, key), key);
+ emit_rel_store(gen, attr_field(map_update_attr, value),
+ blob_fd_array_off(gen, inner_map_idx));
+
+ /* emit MAP_UPDATE_ELEM command */
+ emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
+ debug_ret(gen, "populate_outer_map outer %d key %d inner %d",
+ outer_map_idx, slot, inner_map_idx);
+ emit_check_err(gen);
+}
+
+void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
+{
+ int attr_size = offsetofend(union bpf_attr, map_fd);
+ int map_freeze_attr;
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_size);
+ pr_debug("gen: map_freeze: idx %d\n", map_idx);
+ map_freeze_attr = add_data(gen, &attr, attr_size);
+ move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
+ blob_fd_array_off(gen, map_idx));
+ /* emit MAP_FREEZE command */
+ emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
+ debug_ret(gen, "map_freeze");
+ emit_check_err(gen);
+}
diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c
index 54c30c802070..140ee4055676 100644
--- a/tools/lib/bpf/hashmap.c
+++ b/tools/lib/bpf/hashmap.c
@@ -15,6 +15,9 @@
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+/* prevent accidental re-addition of reallocarray() */
+#pragma GCC poison reallocarray
+
/* start with 4 buckets */
#define HASHMAP_MIN_CAP_BITS 2
@@ -59,13 +62,20 @@ struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
void hashmap__clear(struct hashmap *map)
{
+ struct hashmap_entry *cur, *tmp;
+ size_t bkt;
+
+ hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
+ free(cur);
+ }
free(map->buckets);
+ map->buckets = NULL;
map->cap = map->cap_bits = map->sz = 0;
}
void hashmap__free(struct hashmap *map)
{
- if (!map)
+ if (IS_ERR_OR_NULL(map))
return;
hashmap__clear(map);
@@ -93,8 +103,7 @@ static int hashmap_grow(struct hashmap *map)
struct hashmap_entry **new_buckets;
struct hashmap_entry *cur, *tmp;
size_t new_cap_bits, new_cap;
- size_t h;
- int bkt;
+ size_t h, bkt;
new_cap_bits = map->cap_bits + 1;
if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
@@ -119,7 +128,7 @@ static int hashmap_grow(struct hashmap *map)
}
static bool hashmap_find_entry(const struct hashmap *map,
- const void *key, size_t hash,
+ const long key, size_t hash,
struct hashmap_entry ***pprev,
struct hashmap_entry **entry)
{
@@ -142,18 +151,18 @@ static bool hashmap_find_entry(const struct hashmap *map,
return false;
}
-int hashmap__insert(struct hashmap *map, const void *key, void *value,
- enum hashmap_insert_strategy strategy,
- const void **old_key, void **old_value)
+int hashmap_insert(struct hashmap *map, long key, long value,
+ enum hashmap_insert_strategy strategy,
+ long *old_key, long *old_value)
{
struct hashmap_entry *entry;
size_t h;
int err;
if (old_key)
- *old_key = NULL;
+ *old_key = 0;
if (old_value)
- *old_value = NULL;
+ *old_value = 0;
h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
if (strategy != HASHMAP_APPEND &&
@@ -194,7 +203,7 @@ int hashmap__insert(struct hashmap *map, const void *key, void *value,
return 0;
}
-bool hashmap__find(const struct hashmap *map, const void *key, void **value)
+bool hashmap_find(const struct hashmap *map, long key, long *value)
{
struct hashmap_entry *entry;
size_t h;
@@ -208,8 +217,8 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value)
return true;
}
-bool hashmap__delete(struct hashmap *map, const void *key,
- const void **old_key, void **old_value)
+bool hashmap_delete(struct hashmap *map, long key,
+ long *old_key, long *old_value)
{
struct hashmap_entry **pprev, *entry;
size_t h;
@@ -229,4 +238,3 @@ bool hashmap__delete(struct hashmap *map, const void *key,
return true;
}
-
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
index bae8879cdf58..0a5bf1937a7c 100644
--- a/tools/lib/bpf/hashmap.h
+++ b/tools/lib/bpf/hashmap.h
@@ -10,25 +10,62 @@
#include <stdbool.h>
#include <stddef.h>
-#ifdef __GLIBC__
-#include <bits/wordsize.h>
-#else
-#include <bits/reg.h>
-#endif
-#include "libbpf_internal.h"
+#include <limits.h>
static inline size_t hash_bits(size_t h, int bits)
{
/* shuffle bits and return requested number of upper bits */
- return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
+ if (bits == 0)
+ return 0;
+
+#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
+ /* LP64 case */
+ return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
+#elif (__SIZEOF_SIZE_T__ <= __SIZEOF_LONG__)
+ return (h * 2654435769lu) >> (__SIZEOF_LONG__ * 8 - bits);
+#else
+# error "Unsupported size_t size"
+#endif
}
-typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
-typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);
+/* generic C-string hashing function */
+static inline size_t str_hash(const char *s)
+{
+ size_t h = 0;
+ while (*s) {
+ h = h * 31 + *s;
+ s++;
+ }
+ return h;
+}
+
+typedef size_t (*hashmap_hash_fn)(long key, void *ctx);
+typedef bool (*hashmap_equal_fn)(long key1, long key2, void *ctx);
+
+/*
+ * Hashmap interface is polymorphic, keys and values could be either
+ * long-sized integers or pointers, this is achieved as follows:
+ * - interface functions that operate on keys and values are hidden
+ * behind auxiliary macros, e.g. hashmap_insert <-> hashmap__insert;
+ * - these auxiliary macros cast the key and value parameters as
+ * long or long *, so the user does not have to specify the casts explicitly;
+ * - for pointer parameters (e.g. old_key) the size of the pointed
+ * type is verified by hashmap_cast_ptr using _Static_assert;
+ * - when iterating using hashmap__for_each_* forms
+ * hasmap_entry->key should be used for integer keys and
+ * hasmap_entry->pkey should be used for pointer keys,
+ * same goes for values.
+ */
struct hashmap_entry {
- const void *key;
- void *value;
+ union {
+ long key;
+ const void *pkey;
+ };
+ union {
+ long value;
+ void *pvalue;
+ };
struct hashmap_entry *next;
};
@@ -85,6 +122,13 @@ enum hashmap_insert_strategy {
HASHMAP_APPEND,
};
+#define hashmap_cast_ptr(p) ({ \
+ _Static_assert((__builtin_constant_p((p)) ? (p) == NULL : 0) || \
+ sizeof(*(p)) == sizeof(long), \
+ #p " pointee should be a long-sized integer or a pointer"); \
+ (long *)(p); \
+})
+
/*
* hashmap__insert() adds key/value entry w/ various semantics, depending on
* provided strategy value. If a given key/value pair replaced already
@@ -92,42 +136,38 @@ enum hashmap_insert_strategy {
* through old_key and old_value to allow calling code do proper memory
* management.
*/
-int hashmap__insert(struct hashmap *map, const void *key, void *value,
- enum hashmap_insert_strategy strategy,
- const void **old_key, void **old_value);
+int hashmap_insert(struct hashmap *map, long key, long value,
+ enum hashmap_insert_strategy strategy,
+ long *old_key, long *old_value);
-static inline int hashmap__add(struct hashmap *map,
- const void *key, void *value)
-{
- return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL);
-}
+#define hashmap__insert(map, key, value, strategy, old_key, old_value) \
+ hashmap_insert((map), (long)(key), (long)(value), (strategy), \
+ hashmap_cast_ptr(old_key), \
+ hashmap_cast_ptr(old_value))
-static inline int hashmap__set(struct hashmap *map,
- const void *key, void *value,
- const void **old_key, void **old_value)
-{
- return hashmap__insert(map, key, value, HASHMAP_SET,
- old_key, old_value);
-}
+#define hashmap__add(map, key, value) \
+ hashmap__insert((map), (key), (value), HASHMAP_ADD, NULL, NULL)
-static inline int hashmap__update(struct hashmap *map,
- const void *key, void *value,
- const void **old_key, void **old_value)
-{
- return hashmap__insert(map, key, value, HASHMAP_UPDATE,
- old_key, old_value);
-}
+#define hashmap__set(map, key, value, old_key, old_value) \
+ hashmap__insert((map), (key), (value), HASHMAP_SET, (old_key), (old_value))
-static inline int hashmap__append(struct hashmap *map,
- const void *key, void *value)
-{
- return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL);
-}
+#define hashmap__update(map, key, value, old_key, old_value) \
+ hashmap__insert((map), (key), (value), HASHMAP_UPDATE, (old_key), (old_value))
+
+#define hashmap__append(map, key, value) \
+ hashmap__insert((map), (key), (value), HASHMAP_APPEND, NULL, NULL)
+
+bool hashmap_delete(struct hashmap *map, long key, long *old_key, long *old_value);
+
+#define hashmap__delete(map, key, old_key, old_value) \
+ hashmap_delete((map), (long)(key), \
+ hashmap_cast_ptr(old_key), \
+ hashmap_cast_ptr(old_value))
-bool hashmap__delete(struct hashmap *map, const void *key,
- const void **old_key, void **old_value);
+bool hashmap_find(const struct hashmap *map, long key, long *value);
-bool hashmap__find(const struct hashmap *map, const void *key, void **value);
+#define hashmap__find(map, key, value) \
+ hashmap_find((map), (long)(key), hashmap_cast_ptr(value))
/*
* hashmap__for_each_entry - iterate over all entries in hashmap
@@ -160,17 +200,17 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value);
* @key: key to iterate entries for
*/
#define hashmap__for_each_key_entry(map, cur, _key) \
- for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
- map->cap_bits); \
- map->buckets ? map->buckets[bkt] : NULL; }); \
+ for (cur = map->buckets \
+ ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
+ : NULL; \
cur; \
cur = cur->next) \
if (map->equal_fn(cur->key, (_key), map->ctx))
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
- for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
- map->cap_bits); \
- cur = map->buckets ? map->buckets[bkt] : NULL; }); \
+ for (cur = map->buckets \
+ ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
+ : NULL; \
cur && ({ tmp = cur->next; true; }); \
cur = tmp) \
if (map->equal_fn(cur->key, (_key), map->ctx))
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index ff9174282a8c..ad1ec893b41b 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -31,11 +31,9 @@
#include <linux/bpf.h>
#include <linux/btf.h>
#include <linux/filter.h>
-#include <linux/list.h>
#include <linux/limits.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
-#include <linux/version.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -44,7 +42,6 @@
#include <sys/vfs.h>
#include <sys/utsname.h>
#include <sys/resource.h>
-#include <tools/libc_compat.h>
#include <libelf.h>
#include <gelf.h>
#include <zlib.h>
@@ -55,18 +52,15 @@
#include "str_error.h"
#include "libbpf_internal.h"
#include "hashmap.h"
-
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
-#ifndef EM_BPF
-#define EM_BPF 247
-#endif
+#include "bpf_gen_internal.h"
+#include "zip.h"
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC 0xcafe4a11
#endif
+#define BPF_INSN_SZ (sizeof(struct bpf_insn))
+
/* vsprintf() in __base_pr() uses nonliteral format string. It may break
* compilation if user enables corresponding warning. Disable it explicitly.
*/
@@ -75,10 +69,141 @@
#define __printf(a, b) __attribute__((format(printf, a, b)))
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
-static struct bpf_program *bpf_object__find_prog_by_idx(struct bpf_object *obj,
- int idx);
-static const struct btf_type *
-skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
+static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
+
+static const char * const attach_type_name[] = {
+ [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
+ [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress",
+ [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create",
+ [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release",
+ [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops",
+ [BPF_CGROUP_DEVICE] = "cgroup_device",
+ [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind",
+ [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind",
+ [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect",
+ [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect",
+ [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind",
+ [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind",
+ [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername",
+ [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername",
+ [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname",
+ [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname",
+ [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg",
+ [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg",
+ [BPF_CGROUP_SYSCTL] = "cgroup_sysctl",
+ [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg",
+ [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg",
+ [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt",
+ [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt",
+ [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
+ [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
+ [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
+ [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
+ [BPF_LIRC_MODE2] = "lirc_mode2",
+ [BPF_FLOW_DISSECTOR] = "flow_dissector",
+ [BPF_TRACE_RAW_TP] = "trace_raw_tp",
+ [BPF_TRACE_FENTRY] = "trace_fentry",
+ [BPF_TRACE_FEXIT] = "trace_fexit",
+ [BPF_MODIFY_RETURN] = "modify_return",
+ [BPF_LSM_MAC] = "lsm_mac",
+ [BPF_LSM_CGROUP] = "lsm_cgroup",
+ [BPF_SK_LOOKUP] = "sk_lookup",
+ [BPF_TRACE_ITER] = "trace_iter",
+ [BPF_XDP_DEVMAP] = "xdp_devmap",
+ [BPF_XDP_CPUMAP] = "xdp_cpumap",
+ [BPF_XDP] = "xdp",
+ [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select",
+ [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
+ [BPF_PERF_EVENT] = "perf_event",
+ [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
+ [BPF_STRUCT_OPS] = "struct_ops",
+};
+
+static const char * const link_type_name[] = {
+ [BPF_LINK_TYPE_UNSPEC] = "unspec",
+ [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
+ [BPF_LINK_TYPE_TRACING] = "tracing",
+ [BPF_LINK_TYPE_CGROUP] = "cgroup",
+ [BPF_LINK_TYPE_ITER] = "iter",
+ [BPF_LINK_TYPE_NETNS] = "netns",
+ [BPF_LINK_TYPE_XDP] = "xdp",
+ [BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
+ [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
+ [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
+ [BPF_LINK_TYPE_NETFILTER] = "netfilter",
+};
+
+static const char * const map_type_name[] = {
+ [BPF_MAP_TYPE_UNSPEC] = "unspec",
+ [BPF_MAP_TYPE_HASH] = "hash",
+ [BPF_MAP_TYPE_ARRAY] = "array",
+ [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
+ [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
+ [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
+ [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
+ [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
+ [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
+ [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
+ [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
+ [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
+ [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
+ [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
+ [BPF_MAP_TYPE_DEVMAP] = "devmap",
+ [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
+ [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
+ [BPF_MAP_TYPE_CPUMAP] = "cpumap",
+ [BPF_MAP_TYPE_XSKMAP] = "xskmap",
+ [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
+ [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
+ [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
+ [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
+ [BPF_MAP_TYPE_QUEUE] = "queue",
+ [BPF_MAP_TYPE_STACK] = "stack",
+ [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
+ [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
+ [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
+ [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
+ [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
+ [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
+ [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
+ [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
+};
+
+static const char * const prog_type_name[] = {
+ [BPF_PROG_TYPE_UNSPEC] = "unspec",
+ [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
+ [BPF_PROG_TYPE_KPROBE] = "kprobe",
+ [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
+ [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
+ [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
+ [BPF_PROG_TYPE_XDP] = "xdp",
+ [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
+ [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
+ [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
+ [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
+ [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
+ [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
+ [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
+ [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
+ [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
+ [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
+ [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
+ [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
+ [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
+ [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
+ [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
+ [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
+ [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
+ [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
+ [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
+ [BPF_PROG_TYPE_TRACING] = "tracing",
+ [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
+ [BPF_PROG_TYPE_EXT] = "ext",
+ [BPF_PROG_TYPE_LSM] = "lsm",
+ [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
+ [BPF_PROG_TYPE_SYSCALL] = "syscall",
+ [BPF_PROG_TYPE_NETFILTER] = "netfilter",
+};
static int __base_pr(enum libbpf_print_level level, const char *format,
va_list args)
@@ -93,9 +218,10 @@ static libbpf_print_fn_t __libbpf_pr = __base_pr;
libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
{
- libbpf_print_fn_t old_print_fn = __libbpf_pr;
+ libbpf_print_fn_t old_print_fn;
+
+ old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
- __libbpf_pr = fn;
return old_print_fn;
}
@@ -103,13 +229,20 @@ __printf(2, 3)
void libbpf_print(enum libbpf_print_level level, const char *format, ...)
{
va_list args;
+ int old_errno;
+ libbpf_print_fn_t print_fn;
- if (!__libbpf_pr)
+ print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
+ if (!print_fn)
return;
+ old_errno = errno;
+
va_start(args, format);
__libbpf_pr(level, format, args);
va_end(args);
+
+ errno = old_errno;
}
static void pr_perm_msg(int err)
@@ -154,44 +287,92 @@ static void pr_perm_msg(int err)
___err; })
#endif
-#ifdef HAVE_LIBELF_MMAP_SUPPORT
-# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
-#else
-# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
-#endif
-
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
}
-struct bpf_capabilities {
- /* v4.14: kernel support for program & map names. */
- __u32 name:1;
- /* v5.2: kernel support for global data sections. */
- __u32 global_data:1;
- /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
- __u32 btf_func:1;
- /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
- __u32 btf_datasec:1;
- /* BPF_F_MMAPABLE is supported for arrays */
- __u32 array_mmap:1;
- /* BTF_FUNC_GLOBAL is supported */
- __u32 btf_func_global:1;
-};
+int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
+{
+ /* as of v1.0 libbpf_set_strict_mode() is a no-op */
+ return 0;
+}
+
+__u32 libbpf_major_version(void)
+{
+ return LIBBPF_MAJOR_VERSION;
+}
+
+__u32 libbpf_minor_version(void)
+{
+ return LIBBPF_MINOR_VERSION;
+}
+
+const char *libbpf_version_string(void)
+{
+#define __S(X) #X
+#define _S(X) __S(X)
+ return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
+#undef _S
+#undef __S
+}
enum reloc_type {
RELO_LD64,
RELO_CALL,
RELO_DATA,
- RELO_EXTERN,
+ RELO_EXTERN_LD64,
+ RELO_EXTERN_CALL,
+ RELO_SUBPROG_ADDR,
+ RELO_CORE,
};
struct reloc_desc {
enum reloc_type type;
int insn_idx;
- int map_idx;
- int sym_off;
+ union {
+ const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
+ struct {
+ int map_idx;
+ int sym_off;
+ int ext_idx;
+ };
+ };
+};
+
+/* stored as sec_def->cookie for all libbpf-supported SEC()s */
+enum sec_def_flags {
+ SEC_NONE = 0,
+ /* expected_attach_type is optional, if kernel doesn't support that */
+ SEC_EXP_ATTACH_OPT = 1,
+ /* legacy, only used by libbpf_get_type_names() and
+ * libbpf_attach_type_by_name(), not used by libbpf itself at all.
+ * This used to be associated with cgroup (and few other) BPF programs
+ * that were attachable through BPF_PROG_ATTACH command. Pretty
+ * meaningless nowadays, though.
+ */
+ SEC_ATTACHABLE = 2,
+ SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
+ /* attachment target is specified through BTF ID in either kernel or
+ * other BPF program's BTF object
+ */
+ SEC_ATTACH_BTF = 4,
+ /* BPF program type allows sleeping/blocking in kernel */
+ SEC_SLEEPABLE = 8,
+ /* BPF program support non-linear XDP buffer */
+ SEC_XDP_FRAGS = 16,
+};
+
+struct bpf_sec_def {
+ char *sec;
+ enum bpf_prog_type prog_type;
+ enum bpf_attach_type expected_attach_type;
+ long cookie;
+ int handler_id;
+
+ libbpf_prog_setup_fn_t prog_setup_fn;
+ libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
+ libbpf_prog_attach_fn_t prog_attach_fn;
};
/*
@@ -199,42 +380,67 @@ struct reloc_desc {
* linux/filter.h.
*/
struct bpf_program {
- /* Index in elf obj file, for relocation use. */
- int idx;
char *name;
- int prog_ifindex;
- char *section_name;
- /* section_name with / replaced by _; makes recursive pinning
- * in bpf_object__pin_programs easier
+ char *sec_name;
+ size_t sec_idx;
+ const struct bpf_sec_def *sec_def;
+ /* this program's instruction offset (in number of instructions)
+ * within its containing ELF section
+ */
+ size_t sec_insn_off;
+ /* number of original instructions in ELF section belonging to this
+ * program, not taking into account subprogram instructions possible
+ * appended later during relocation
+ */
+ size_t sec_insn_cnt;
+ /* Offset (in number of instructions) of the start of instruction
+ * belonging to this BPF program within its containing main BPF
+ * program. For the entry-point (main) BPF program, this is always
+ * zero. For a sub-program, this gets reset before each of main BPF
+ * programs are processed and relocated and is used to determined
+ * whether sub-program was already appended to the main program, and
+ * if yes, at which instruction offset.
+ */
+ size_t sub_insn_off;
+
+ /* instructions that belong to BPF program; insns[0] is located at
+ * sec_insn_off instruction within its ELF section in ELF file, so
+ * when mapping ELF file instruction index to the local instruction,
+ * one needs to subtract sec_insn_off; and vice versa.
*/
- char *pin_name;
struct bpf_insn *insns;
- size_t insns_cnt, main_prog_cnt;
- enum bpf_prog_type type;
+ /* actual number of instruction in this BPF program's image; for
+ * entry-point BPF programs this includes the size of main program
+ * itself plus all the used sub-programs, appended at the end
+ */
+ size_t insns_cnt;
struct reloc_desc *reloc_desc;
int nr_reloc;
- int log_level;
- struct {
- int nr;
- int *fds;
- } instances;
- bpf_program_prep_t preprocessor;
+ /* BPF verifier log settings */
+ char *log_buf;
+ size_t log_size;
+ __u32 log_level;
struct bpf_object *obj;
- void *priv;
- bpf_program_clear_priv_t clear_priv;
+ int fd;
+ bool autoload;
+ bool autoattach;
+ bool mark_btf_static;
+ enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
+
+ int prog_ifindex;
+ __u32 attach_btf_obj_fd;
__u32 attach_btf_id;
__u32 attach_prog_fd;
+
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_cnt;
- struct bpf_capabilities *caps;
-
void *line_info;
__u32 line_info_rec_size;
__u32 line_info_cnt;
@@ -266,7 +472,9 @@ struct bpf_struct_ops {
#define BSS_SEC ".bss"
#define RODATA_SEC ".rodata"
#define KCONFIG_SEC ".kconfig"
+#define KSYMS_SEC ".ksyms"
#define STRUCT_OPS_SEC ".struct_ops"
+#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
enum libbpf_map_type {
LIBBPF_MAP_UNSPEC,
@@ -276,57 +484,140 @@ enum libbpf_map_type {
LIBBPF_MAP_KCONFIG,
};
-static const char * const libbpf_type_to_btf_name[] = {
- [LIBBPF_MAP_DATA] = DATA_SEC,
- [LIBBPF_MAP_BSS] = BSS_SEC,
- [LIBBPF_MAP_RODATA] = RODATA_SEC,
- [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
+struct bpf_map_def {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
+ unsigned int max_entries;
+ unsigned int map_flags;
};
struct bpf_map {
+ struct bpf_object *obj;
char *name;
+ /* real_name is defined for special internal maps (.rodata*,
+ * .data*, .bss, .kconfig) and preserves their original ELF section
+ * name. This is important to be able to find corresponding BTF
+ * DATASEC information.
+ */
+ char *real_name;
int fd;
int sec_idx;
size_t sec_offset;
int map_ifindex;
int inner_map_fd;
struct bpf_map_def def;
+ __u32 numa_node;
+ __u32 btf_var_idx;
__u32 btf_key_type_id;
__u32 btf_value_type_id;
__u32 btf_vmlinux_value_type_id;
- void *priv;
- bpf_map_clear_priv_t clear_priv;
enum libbpf_map_type libbpf_type;
void *mmaped;
struct bpf_struct_ops *st_ops;
+ struct bpf_map *inner_map;
+ void **init_slots;
+ int init_slots_sz;
char *pin_path;
bool pinned;
bool reused;
+ bool autocreate;
+ __u64 map_extra;
};
enum extern_type {
EXT_UNKNOWN,
- EXT_CHAR,
- EXT_BOOL,
- EXT_INT,
- EXT_TRISTATE,
- EXT_CHAR_ARR,
+ EXT_KCFG,
+ EXT_KSYM,
+};
+
+enum kcfg_type {
+ KCFG_UNKNOWN,
+ KCFG_CHAR,
+ KCFG_BOOL,
+ KCFG_INT,
+ KCFG_TRISTATE,
+ KCFG_CHAR_ARR,
};
struct extern_desc {
- const char *name;
+ enum extern_type type;
int sym_idx;
int btf_id;
- enum extern_type type;
- int sz;
- int align;
- int data_off;
- bool is_signed;
- bool is_weak;
+ int sec_btf_id;
+ const char *name;
bool is_set;
+ bool is_weak;
+ union {
+ struct {
+ enum kcfg_type type;
+ int sz;
+ int align;
+ int data_off;
+ bool is_signed;
+ } kcfg;
+ struct {
+ unsigned long long addr;
+
+ /* target btf_id of the corresponding kernel var. */
+ int kernel_btf_obj_fd;
+ int kernel_btf_id;
+
+ /* local btf_id of the ksym extern's type. */
+ __u32 type_id;
+ /* BTF fd index to be patched in for insn->off, this is
+ * 0 for vmlinux BTF, index in obj->fd_array for module
+ * BTF
+ */
+ __s16 btf_fd_idx;
+ } ksym;
+ };
+};
+
+struct module_btf {
+ struct btf *btf;
+ char *name;
+ __u32 id;
+ int fd;
+ int fd_array_idx;
};
-static LIST_HEAD(bpf_objects_list);
+enum sec_type {
+ SEC_UNUSED = 0,
+ SEC_RELO,
+ SEC_BSS,
+ SEC_DATA,
+ SEC_RODATA,
+};
+
+struct elf_sec_desc {
+ enum sec_type sec_type;
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+};
+
+struct elf_state {
+ int fd;
+ const void *obj_buf;
+ size_t obj_buf_sz;
+ Elf *elf;
+ Elf64_Ehdr *ehdr;
+ Elf_Data *symbols;
+ Elf_Data *st_ops_data;
+ Elf_Data *st_ops_link_data;
+ size_t shstrndx; /* section index for section name strings */
+ size_t strtabidx;
+ struct elf_sec_desc *secs;
+ size_t sec_cnt;
+ int btf_maps_shndx;
+ __u32 btf_maps_sec_btf_id;
+ int text_shndx;
+ int symbols_shndx;
+ int st_ops_shndx;
+ int st_ops_link_shndx;
+};
+
+struct usdt_manager;
struct bpf_object {
char name[BPF_OBJ_NAME_LEN];
@@ -345,82 +636,63 @@ struct bpf_object {
int kconfig_map_idx;
bool loaded;
- bool has_pseudo_calls;
+ bool has_subcalls;
+ bool has_rodata;
- /*
- * Information when doing elf related work. Only valid if fd
- * is valid.
- */
- struct {
- int fd;
- const void *obj_buf;
- size_t obj_buf_sz;
- Elf *elf;
- GElf_Ehdr ehdr;
- Elf_Data *symbols;
- Elf_Data *data;
- Elf_Data *rodata;
- Elf_Data *bss;
- Elf_Data *st_ops_data;
- size_t strtabidx;
- struct {
- GElf_Shdr shdr;
- Elf_Data *data;
- } *reloc_sects;
- int nr_reloc_sects;
- int maps_shndx;
- int btf_maps_shndx;
- int text_shndx;
- int symbols_shndx;
- int data_shndx;
- int rodata_shndx;
- int bss_shndx;
- int st_ops_shndx;
- } efile;
- /*
- * All loaded bpf_object is linked in a list, which is
- * hidden to caller. bpf_objects__<func> handlers deal with
- * all objects.
- */
- struct list_head list;
+ struct bpf_gen *gen_loader;
+
+ /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
+ struct elf_state efile;
struct btf *btf;
+ struct btf_ext *btf_ext;
+
/* Parse and load BTF vmlinux if any of the programs in the object need
* it at load time.
*/
struct btf *btf_vmlinux;
- struct btf_ext *btf_ext;
-
- void *priv;
- bpf_object_clear_priv_t clear_priv;
-
- struct bpf_capabilities caps;
+ /* Path to the custom BTF to be used for BPF CO-RE relocations as an
+ * override for vmlinux BTF.
+ */
+ char *btf_custom_path;
+ /* vmlinux BTF override for CO-RE relocations */
+ struct btf *btf_vmlinux_override;
+ /* Lazily initialized kernel module BTFs */
+ struct module_btf *btf_modules;
+ bool btf_modules_loaded;
+ size_t btf_module_cnt;
+ size_t btf_module_cap;
+
+ /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
+ char *log_buf;
+ size_t log_size;
+ __u32 log_level;
+
+ int *fd_array;
+ size_t fd_array_cap;
+ size_t fd_array_cnt;
+
+ struct usdt_manager *usdt_man;
char path[];
};
-#define obj_elf_valid(o) ((o)->efile.elf)
+
+static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
+static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
+static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
+static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
+static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
+static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
+static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
+static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
+static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
void bpf_program__unload(struct bpf_program *prog)
{
- int i;
-
if (!prog)
return;
- /*
- * If the object is opened but the program was never loaded,
- * it is possible that prog->instances.nr == -1.
- */
- if (prog->instances.nr > 0) {
- for (i = 0; i < prog->instances.nr; i++)
- zclose(prog->instances.fds[i]);
- } else if (prog->instances.nr != -1) {
- pr_warn("Internal error: instances.nr is %d\n",
- prog->instances.nr);
- }
-
- prog->instances.nr = -1;
- zfree(&prog->instances.fds);
+ zclose(prog->fd);
zfree(&prog->func_info);
zfree(&prog->line_info);
@@ -431,182 +703,182 @@ static void bpf_program__exit(struct bpf_program *prog)
if (!prog)
return;
- if (prog->clear_priv)
- prog->clear_priv(prog, prog->priv);
-
- prog->priv = NULL;
- prog->clear_priv = NULL;
-
bpf_program__unload(prog);
zfree(&prog->name);
- zfree(&prog->section_name);
- zfree(&prog->pin_name);
+ zfree(&prog->sec_name);
zfree(&prog->insns);
zfree(&prog->reloc_desc);
prog->nr_reloc = 0;
prog->insns_cnt = 0;
- prog->idx = -1;
+ prog->sec_idx = -1;
}
-static char *__bpf_program__pin_name(struct bpf_program *prog)
+static bool insn_is_subprog_call(const struct bpf_insn *insn)
{
- char *name, *p;
+ return BPF_CLASS(insn->code) == BPF_JMP &&
+ BPF_OP(insn->code) == BPF_CALL &&
+ BPF_SRC(insn->code) == BPF_K &&
+ insn->src_reg == BPF_PSEUDO_CALL &&
+ insn->dst_reg == 0 &&
+ insn->off == 0;
+}
- name = p = strdup(prog->section_name);
- while ((p = strchr(p, '/')))
- *p = '_';
+static bool is_call_insn(const struct bpf_insn *insn)
+{
+ return insn->code == (BPF_JMP | BPF_CALL);
+}
- return name;
+static bool insn_is_pseudo_func(struct bpf_insn *insn)
+{
+ return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
}
static int
-bpf_program__init(void *data, size_t size, char *section_name, int idx,
- struct bpf_program *prog)
+bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
+ const char *name, size_t sec_idx, const char *sec_name,
+ size_t sec_off, void *insn_data, size_t insn_data_sz)
{
- const size_t bpf_insn_sz = sizeof(struct bpf_insn);
-
- if (size == 0 || size % bpf_insn_sz) {
- pr_warn("corrupted section '%s', size: %zu\n",
- section_name, size);
+ if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
+ pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
+ sec_name, name, sec_off, insn_data_sz);
return -EINVAL;
}
memset(prog, 0, sizeof(*prog));
+ prog->obj = obj;
- prog->section_name = strdup(section_name);
- if (!prog->section_name) {
- pr_warn("failed to alloc name for prog under section(%d) %s\n",
- idx, section_name);
- goto errout;
+ prog->sec_idx = sec_idx;
+ prog->sec_insn_off = sec_off / BPF_INSN_SZ;
+ prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
+ /* insns_cnt can later be increased by appending used subprograms */
+ prog->insns_cnt = prog->sec_insn_cnt;
+
+ prog->type = BPF_PROG_TYPE_UNSPEC;
+ prog->fd = -1;
+
+ /* libbpf's convention for SEC("?abc...") is that it's just like
+ * SEC("abc...") but the corresponding bpf_program starts out with
+ * autoload set to false.
+ */
+ if (sec_name[0] == '?') {
+ prog->autoload = false;
+ /* from now on forget there was ? in section name */
+ sec_name++;
+ } else {
+ prog->autoload = true;
}
- prog->pin_name = __bpf_program__pin_name(prog);
- if (!prog->pin_name) {
- pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
- idx, section_name);
+ prog->autoattach = true;
+
+ /* inherit object's log_level */
+ prog->log_level = obj->log_level;
+
+ prog->sec_name = strdup(sec_name);
+ if (!prog->sec_name)
goto errout;
- }
- prog->insns = malloc(size);
- if (!prog->insns) {
- pr_warn("failed to alloc insns for prog under section %s\n",
- section_name);
+ prog->name = strdup(name);
+ if (!prog->name)
goto errout;
- }
- prog->insns_cnt = size / bpf_insn_sz;
- memcpy(prog->insns, data, size);
- prog->idx = idx;
- prog->instances.fds = NULL;
- prog->instances.nr = -1;
- prog->type = BPF_PROG_TYPE_UNSPEC;
+
+ prog->insns = malloc(insn_data_sz);
+ if (!prog->insns)
+ goto errout;
+ memcpy(prog->insns, insn_data, insn_data_sz);
return 0;
errout:
+ pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
bpf_program__exit(prog);
return -ENOMEM;
}
static int
-bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
- char *section_name, int idx)
+bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
+ const char *sec_name, int sec_idx)
{
- struct bpf_program prog, *progs;
- int nr_progs, err;
-
- err = bpf_program__init(data, size, section_name, idx, &prog);
- if (err)
- return err;
+ Elf_Data *symbols = obj->efile.symbols;
+ struct bpf_program *prog, *progs;
+ void *data = sec_data->d_buf;
+ size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
+ int nr_progs, err, i;
+ const char *name;
+ Elf64_Sym *sym;
- prog.caps = &obj->caps;
progs = obj->programs;
nr_progs = obj->nr_programs;
+ nr_syms = symbols->d_size / sizeof(Elf64_Sym);
- progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
- if (!progs) {
- /*
- * In this case the original obj->programs
- * is still valid, so don't need special treat for
- * bpf_close_object().
- */
- pr_warn("failed to alloc a new program under section '%s'\n",
- section_name);
- bpf_program__exit(&prog);
- return -ENOMEM;
- }
+ for (i = 0; i < nr_syms; i++) {
+ sym = elf_sym_by_idx(obj, i);
- pr_debug("found program %s\n", prog.section_name);
- obj->programs = progs;
- obj->nr_programs = nr_progs + 1;
- prog.obj = obj;
- progs[nr_progs] = prog;
- return 0;
-}
+ if (sym->st_shndx != sec_idx)
+ continue;
+ if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
+ continue;
-static int
-bpf_object__init_prog_names(struct bpf_object *obj)
-{
- Elf_Data *symbols = obj->efile.symbols;
- struct bpf_program *prog;
- size_t pi, si;
+ prog_sz = sym->st_size;
+ sec_off = sym->st_value;
- for (pi = 0; pi < obj->nr_programs; pi++) {
- const char *name = NULL;
+ name = elf_sym_str(obj, sym->st_name);
+ if (!name) {
+ pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
+ sec_name, sec_off);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- prog = &obj->programs[pi];
+ if (sec_off + prog_sz > sec_sz) {
+ pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
+ sec_name, sec_off);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
- si++) {
- GElf_Sym sym;
+ if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
+ pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
+ return -ENOTSUP;
+ }
- if (!gelf_getsym(symbols, si, &sym))
- continue;
- if (sym.st_shndx != prog->idx)
- continue;
- if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
- continue;
+ pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
+ sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
- name = elf_strptr(obj->efile.elf,
- obj->efile.strtabidx,
- sym.st_name);
- if (!name) {
- pr_warn("failed to get sym name string for prog %s\n",
- prog->section_name);
- return -LIBBPF_ERRNO__LIBELF;
- }
+ progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
+ if (!progs) {
+ /*
+ * In this case the original obj->programs
+ * is still valid, so don't need special treat for
+ * bpf_close_object().
+ */
+ pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
+ sec_name, name);
+ return -ENOMEM;
}
+ obj->programs = progs;
- if (!name && prog->idx == obj->efile.text_shndx)
- name = ".text";
+ prog = &progs[nr_progs];
- if (!name) {
- pr_warn("failed to find sym for prog %s\n",
- prog->section_name);
- return -EINVAL;
- }
+ err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
+ sec_off, data + sec_off, prog_sz);
+ if (err)
+ return err;
- prog->name = strdup(name);
- if (!prog->name) {
- pr_warn("failed to allocate memory for prog sym %s\n",
- name);
- return -ENOMEM;
- }
+ /* if function is a global/weak symbol, but has restricted
+ * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
+ * as static to enable more permissive BPF verification mode
+ * with more outside context available to BPF verifier
+ */
+ if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL
+ && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
+ || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
+ prog->mark_btf_static = true;
+
+ nr_progs++;
+ obj->nr_programs = nr_progs;
}
return 0;
}
-static __u32 get_kernel_version(void)
-{
- __u32 major, minor, patch;
- struct utsname info;
-
- uname(&info);
- if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
- return 0;
- return KERNEL_VERSION(major, minor, patch);
-}
-
static const struct btf_member *
find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
{
@@ -787,24 +1059,24 @@ static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
if (btf_is_ptr(mtype)) {
struct bpf_program *prog;
- mtype = skip_mods_and_typedefs(btf, mtype->type, &mtype_id);
+ prog = st_ops->progs[i];
+ if (!prog)
+ continue;
+
kern_mtype = skip_mods_and_typedefs(kern_btf,
kern_mtype->type,
&kern_mtype_id);
- if (!btf_is_func_proto(mtype) ||
- !btf_is_func_proto(kern_mtype)) {
- pr_warn("struct_ops init_kern %s: non func ptr %s is not supported\n",
+
+ /* mtype->type must be a func_proto which was
+ * guaranteed in bpf_object__collect_st_ops_relos(),
+ * so only check kern_mtype for func_proto here.
+ */
+ if (!btf_is_func_proto(kern_mtype)) {
+ pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
map->name, mname);
return -ENOTSUP;
}
- prog = st_ops->progs[i];
- if (!prog) {
- pr_debug("struct_ops init_kern %s: func ptr %s is not set\n",
- map->name, mname);
- continue;
- }
-
prog->attach_btf_id = kern_type_id;
prog->expected_attach_type = kern_member_idx;
@@ -856,7 +1128,8 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
return 0;
}
-static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
+static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
+ int shndx, Elf_Data *data, __u32 map_flags)
{
const struct btf_type *type, *datasec;
const struct btf_var_secinfo *vsi;
@@ -867,15 +1140,15 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
struct bpf_map *map;
__u32 i;
- if (obj->efile.st_ops_shndx == -1)
+ if (shndx == -1)
return 0;
btf = obj->btf;
- datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
+ datasec_id = btf__find_by_name_kind(btf, sec_name,
BTF_KIND_DATASEC);
if (datasec_id < 0) {
pr_warn("struct_ops init: DATASEC %s not found\n",
- STRUCT_OPS_SEC);
+ sec_name);
return -EINVAL;
}
@@ -888,7 +1161,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
type_id = btf__resolve_type(obj->btf, vsi->type);
if (type_id < 0) {
pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
- vsi->type, STRUCT_OPS_SEC);
+ vsi->type, sec_name);
return -EINVAL;
}
@@ -907,7 +1180,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
if (IS_ERR(map))
return PTR_ERR(map);
- map->sec_idx = obj->efile.st_ops_shndx;
+ map->sec_idx = shndx;
map->sec_offset = vsi->offset;
map->name = strdup(var_name);
if (!map->name)
@@ -917,6 +1190,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
map->def.key_size = sizeof(int);
map->def.value_size = type->size;
map->def.max_entries = 1;
+ map->def.map_flags = map_flags;
map->st_ops = calloc(1, sizeof(*map->st_ops));
if (!map->st_ops)
@@ -929,14 +1203,14 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
return -ENOMEM;
- if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
+ if (vsi->offset + type->size > data->d_size) {
pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
- var_name, STRUCT_OPS_SEC);
+ var_name, sec_name);
return -EINVAL;
}
memcpy(st_ops->data,
- obj->efile.st_ops_data->d_buf + vsi->offset,
+ data->d_buf + vsi->offset,
type->size);
st_ops->tname = tname;
st_ops->type = type;
@@ -949,6 +1223,19 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
return 0;
}
+static int bpf_object_init_struct_ops(struct bpf_object *obj)
+{
+ int err;
+
+ err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx,
+ obj->efile.st_ops_data, 0);
+ err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC,
+ obj->efile.st_ops_link_shndx,
+ obj->efile.st_ops_link_data,
+ BPF_F_LINK);
+ return err;
+}
+
static struct bpf_object *bpf_object__new(const char *path,
const void *obj_buf,
size_t obj_buf_sz,
@@ -965,12 +1252,10 @@ static struct bpf_object *bpf_object__new(const char *path,
strcpy(obj->path, path);
if (obj_name) {
- strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
- obj->name[sizeof(obj->name) - 1] = 0;
+ libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
} else {
/* Using basename() GNU version which doesn't modify arg. */
- strncpy(obj->name, basename((void *)path),
- sizeof(obj->name) - 1);
+ libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
end = strchr(obj->name, '.');
if (end)
*end = 0;
@@ -985,39 +1270,30 @@ static struct bpf_object *bpf_object__new(const char *path,
*/
obj->efile.obj_buf = obj_buf;
obj->efile.obj_buf_sz = obj_buf_sz;
- obj->efile.maps_shndx = -1;
obj->efile.btf_maps_shndx = -1;
- obj->efile.data_shndx = -1;
- obj->efile.rodata_shndx = -1;
- obj->efile.bss_shndx = -1;
obj->efile.st_ops_shndx = -1;
+ obj->efile.st_ops_link_shndx = -1;
obj->kconfig_map_idx = -1;
obj->kern_version = get_kernel_version();
obj->loaded = false;
- INIT_LIST_HEAD(&obj->list);
- list_add(&obj->list, &bpf_objects_list);
return obj;
}
static void bpf_object__elf_finish(struct bpf_object *obj)
{
- if (!obj_elf_valid(obj))
+ if (!obj->efile.elf)
return;
- if (obj->efile.elf) {
- elf_end(obj->efile.elf);
- obj->efile.elf = NULL;
- }
+ elf_end(obj->efile.elf);
+ obj->efile.elf = NULL;
obj->efile.symbols = NULL;
- obj->efile.data = NULL;
- obj->efile.rodata = NULL;
- obj->efile.bss = NULL;
obj->efile.st_ops_data = NULL;
+ obj->efile.st_ops_link_data = NULL;
- zfree(&obj->efile.reloc_sects);
- obj->efile.nr_reloc_sects = 0;
+ zfree(&obj->efile.secs);
+ obj->efile.sec_cnt = 0;
zclose(obj->efile.fd);
obj->efile.obj_buf = NULL;
obj->efile.obj_buf_sz = 0;
@@ -1025,53 +1301,77 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
static int bpf_object__elf_init(struct bpf_object *obj)
{
+ Elf64_Ehdr *ehdr;
int err = 0;
- GElf_Ehdr *ep;
+ Elf *elf;
- if (obj_elf_valid(obj)) {
- pr_warn("elf init: internal error\n");
+ if (obj->efile.elf) {
+ pr_warn("elf: init internal error\n");
return -LIBBPF_ERRNO__LIBELF;
}
if (obj->efile.obj_buf_sz > 0) {
- /*
- * obj_buf should have been validated by
- * bpf_object__open_buffer().
- */
- obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
- obj->efile.obj_buf_sz);
+ /* obj_buf should have been validated by bpf_object__open_mem(). */
+ elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
} else {
- obj->efile.fd = open(obj->path, O_RDONLY);
+ obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
if (obj->efile.fd < 0) {
char errmsg[STRERR_BUFSIZE], *cp;
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("failed to open %s: %s\n", obj->path, cp);
+ pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
return err;
}
- obj->efile.elf = elf_begin(obj->efile.fd,
- LIBBPF_ELF_C_READ_MMAP, NULL);
+ elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
}
- if (!obj->efile.elf) {
- pr_warn("failed to open %s as ELF file\n", obj->path);
+ if (!elf) {
+ pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__LIBELF;
goto errout;
}
- if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
- pr_warn("failed to get EHDR from %s\n", obj->path);
+ obj->efile.elf = elf;
+
+ if (elf_kind(elf) != ELF_K_ELF) {
+ err = -LIBBPF_ERRNO__FORMAT;
+ pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
+ goto errout;
+ }
+
+ if (gelf_getclass(elf) != ELFCLASS64) {
+ err = -LIBBPF_ERRNO__FORMAT;
+ pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
+ goto errout;
+ }
+
+ obj->efile.ehdr = ehdr = elf64_getehdr(elf);
+ if (!obj->efile.ehdr) {
+ pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
+ err = -LIBBPF_ERRNO__FORMAT;
+ goto errout;
+ }
+
+ if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
+ pr_warn("elf: failed to get section names section index for %s: %s\n",
+ obj->path, elf_errmsg(-1));
+ err = -LIBBPF_ERRNO__FORMAT;
+ goto errout;
+ }
+
+ /* ELF is corrupted/truncated, avoid calling elf_strptr. */
+ if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
+ pr_warn("elf: failed to get section names strings from %s: %s\n",
+ obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
- ep = &obj->efile.ehdr;
/* Old LLVM set e_machine to EM_NONE */
- if (ep->e_type != ET_REL ||
- (ep->e_machine && ep->e_machine != EM_BPF)) {
- pr_warn("%s is not an eBPF object file\n", obj->path);
+ if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
+ pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
@@ -1084,23 +1384,30 @@ errout:
static int bpf_object__check_endianness(struct bpf_object *obj)
{
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
return 0;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
return 0;
#else
# error "Unrecognized __BYTE_ORDER__"
#endif
- pr_warn("endianness mismatch.\n");
+ pr_warn("elf: endianness mismatch in %s.\n", obj->path);
return -LIBBPF_ERRNO__ENDIAN;
}
static int
bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
{
- memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
+ if (!data) {
+ pr_warn("invalid license section in %s\n", obj->path);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+ /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
+ * go over allowed ELF data section buffer
+ */
+ libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
pr_debug("license of %s is %s\n", obj->path, obj->license);
return 0;
}
@@ -1110,7 +1417,7 @@ bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
{
__u32 kver;
- if (size != sizeof(kver)) {
+ if (!data || size != sizeof(kver)) {
pr_warn("invalid kver section in %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
@@ -1128,147 +1435,69 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
return false;
}
-static int bpf_object_search_section_size(const struct bpf_object *obj,
- const char *name, size_t *d_size)
+static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
{
- const GElf_Ehdr *ep = &obj->efile.ehdr;
- Elf *elf = obj->efile.elf;
- Elf_Scn *scn = NULL;
- int idx = 0;
-
- while ((scn = elf_nextscn(elf, scn)) != NULL) {
- const char *sec_name;
- Elf_Data *data;
- GElf_Shdr sh;
-
- idx++;
- if (gelf_getshdr(scn, &sh) != &sh) {
- pr_warn("failed to get section(%d) header from %s\n",
- idx, obj->path);
- return -EIO;
- }
-
- sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
- if (!sec_name) {
- pr_warn("failed to get section(%d) name from %s\n",
- idx, obj->path);
- return -EIO;
- }
-
- if (strcmp(name, sec_name))
- continue;
+ Elf_Data *data;
+ Elf_Scn *scn;
- data = elf_getdata(scn, 0);
- if (!data) {
- pr_warn("failed to get section(%d) data from %s(%s)\n",
- idx, name, obj->path);
- return -EIO;
- }
+ if (!name)
+ return -EINVAL;
- *d_size = data->d_size;
- return 0;
+ scn = elf_sec_by_name(obj, name);
+ data = elf_sec_data(obj, scn);
+ if (data) {
+ *size = data->d_size;
+ return 0; /* found it */
}
return -ENOENT;
}
-int bpf_object__section_size(const struct bpf_object *obj, const char *name,
- __u32 *size)
-{
- int ret = -ENOENT;
- size_t d_size;
-
- *size = 0;
- if (!name) {
- return -EINVAL;
- } else if (!strcmp(name, DATA_SEC)) {
- if (obj->efile.data)
- *size = obj->efile.data->d_size;
- } else if (!strcmp(name, BSS_SEC)) {
- if (obj->efile.bss)
- *size = obj->efile.bss->d_size;
- } else if (!strcmp(name, RODATA_SEC)) {
- if (obj->efile.rodata)
- *size = obj->efile.rodata->d_size;
- } else if (!strcmp(name, STRUCT_OPS_SEC)) {
- if (obj->efile.st_ops_data)
- *size = obj->efile.st_ops_data->d_size;
- } else {
- ret = bpf_object_search_section_size(obj, name, &d_size);
- if (!ret)
- *size = d_size;
- }
-
- return *size ? 0 : ret;
-}
-
-int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
- __u32 *off)
+static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
{
Elf_Data *symbols = obj->efile.symbols;
const char *sname;
size_t si;
- if (!name || !off)
- return -EINVAL;
+ for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
+ Elf64_Sym *sym = elf_sym_by_idx(obj, si);
- for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
- GElf_Sym sym;
-
- if (!gelf_getsym(symbols, si, &sym))
+ if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
continue;
- if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
- GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
+
+ if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
+ ELF64_ST_BIND(sym->st_info) != STB_WEAK)
continue;
- sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name);
+ sname = elf_sym_str(obj, sym->st_name);
if (!sname) {
- pr_warn("failed to get sym name string for var %s\n",
- name);
- return -EIO;
- }
- if (strcmp(name, sname) == 0) {
- *off = sym.st_value;
- return 0;
+ pr_warn("failed to get sym name string for var %s\n", name);
+ return ERR_PTR(-EIO);
}
+ if (strcmp(name, sname) == 0)
+ return sym;
}
- return -ENOENT;
+ return ERR_PTR(-ENOENT);
}
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
{
- struct bpf_map *new_maps;
- size_t new_cap;
- int i;
-
- if (obj->nr_maps < obj->maps_cap)
- return &obj->maps[obj->nr_maps++];
-
- new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
- new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
- if (!new_maps) {
- pr_warn("alloc maps for object failed\n");
- return ERR_PTR(-ENOMEM);
- }
+ struct bpf_map *map;
+ int err;
- obj->maps_cap = new_cap;
- obj->maps = new_maps;
+ err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
+ sizeof(*obj->maps), obj->nr_maps + 1);
+ if (err)
+ return ERR_PTR(err);
- /* zero out new maps */
- memset(obj->maps + obj->nr_maps, 0,
- (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
- /*
- * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
- * when failure (zclose won't close negative fd)).
- */
- for (i = obj->nr_maps; i < obj->maps_cap; i++) {
- obj->maps[i].fd = -1;
- obj->maps[i].inner_map_fd = -1;
- }
+ map = &obj->maps[obj->nr_maps++];
+ map->obj = obj;
+ map->fd = -1;
+ map->inner_map_fd = -1;
+ map->autocreate = true;
- return &obj->maps[obj->nr_maps++];
+ return map;
}
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
@@ -1281,17 +1510,55 @@ static size_t bpf_map_mmap_sz(const struct bpf_map *map)
return map_sz;
}
-static char *internal_map_name(struct bpf_object *obj,
- enum libbpf_map_type type)
+static char *internal_map_name(struct bpf_object *obj, const char *real_name)
{
char map_name[BPF_OBJ_NAME_LEN], *p;
- const char *sfx = libbpf_type_to_btf_name[type];
- int sfx_len = max((size_t)7, strlen(sfx));
- int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
- strlen(obj->name));
+ int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
+
+ /* This is one of the more confusing parts of libbpf for various
+ * reasons, some of which are historical. The original idea for naming
+ * internal names was to include as much of BPF object name prefix as
+ * possible, so that it can be distinguished from similar internal
+ * maps of a different BPF object.
+ * As an example, let's say we have bpf_object named 'my_object_name'
+ * and internal map corresponding to '.rodata' ELF section. The final
+ * map name advertised to user and to the kernel will be
+ * 'my_objec.rodata', taking first 8 characters of object name and
+ * entire 7 characters of '.rodata'.
+ * Somewhat confusingly, if internal map ELF section name is shorter
+ * than 7 characters, e.g., '.bss', we still reserve 7 characters
+ * for the suffix, even though we only have 4 actual characters, and
+ * resulting map will be called 'my_objec.bss', not even using all 15
+ * characters allowed by the kernel. Oh well, at least the truncated
+ * object name is somewhat consistent in this case. But if the map
+ * name is '.kconfig', we'll still have entirety of '.kconfig' added
+ * (8 chars) and thus will be left with only first 7 characters of the
+ * object name ('my_obje'). Happy guessing, user, that the final map
+ * name will be "my_obje.kconfig".
+ * Now, with libbpf starting to support arbitrarily named .rodata.*
+ * and .data.* data sections, it's possible that ELF section name is
+ * longer than allowed 15 chars, so we now need to be careful to take
+ * only up to 15 first characters of ELF name, taking no BPF object
+ * name characters at all. So '.rodata.abracadabra' will result in
+ * '.rodata.abracad' kernel and user-visible name.
+ * We need to keep this convoluted logic intact for .data, .bss and
+ * .rodata maps, but for new custom .data.custom and .rodata.custom
+ * maps we use their ELF names as is, not prepending bpf_object name
+ * in front. We still need to truncate them to 15 characters for the
+ * kernel. Full name can be recovered for such maps by using DATASEC
+ * BTF type associated with such map's value type, though.
+ */
+ if (sfx_len >= BPF_OBJ_NAME_LEN)
+ sfx_len = BPF_OBJ_NAME_LEN - 1;
+
+ /* if there are two or more dots in map name, it's a custom dot map */
+ if (strchr(real_name + 1, '.') != NULL)
+ pfx_len = 0;
+ else
+ pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
- sfx_len, libbpf_type_to_btf_name[type]);
+ sfx_len, real_name);
/* sanitise map name to characters allowed by kernel */
for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
@@ -1302,8 +1569,42 @@ static char *internal_map_name(struct bpf_object *obj,
}
static int
+map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
+
+/* Internal BPF map is mmap()'able only if at least one of corresponding
+ * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
+ * variable and it's not marked as __hidden (which turns it into, effectively,
+ * a STATIC variable).
+ */
+static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
+{
+ const struct btf_type *t, *vt;
+ struct btf_var_secinfo *vsi;
+ int i, n;
+
+ if (!map->btf_value_type_id)
+ return false;
+
+ t = btf__type_by_id(obj->btf, map->btf_value_type_id);
+ if (!btf_is_datasec(t))
+ return false;
+
+ vsi = btf_var_secinfos(t);
+ for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
+ vt = btf__type_by_id(obj->btf, vsi->type);
+ if (!btf_is_var(vt))
+ continue;
+
+ if (btf_var(vt)->linkage != BTF_VAR_STATIC)
+ return true;
+ }
+
+ return false;
+}
+
+static int
bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
- int sec_idx, void *data, size_t data_sz)
+ const char *real_name, int sec_idx, void *data, size_t data_sz)
{
struct bpf_map_def *def;
struct bpf_map *map;
@@ -1316,9 +1617,11 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
map->libbpf_type = type;
map->sec_idx = sec_idx;
map->sec_offset = 0;
- map->name = internal_map_name(obj, type);
- if (!map->name) {
- pr_warn("failed to alloc map name\n");
+ map->real_name = strdup(real_name);
+ map->name = internal_map_name(obj, real_name);
+ if (!map->real_name || !map->name) {
+ zfree(&map->real_name);
+ zfree(&map->name);
return -ENOMEM;
}
@@ -1329,7 +1632,12 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
def->max_entries = 1;
def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
? BPF_F_RDONLY_PROG : 0;
- def->map_flags |= BPF_F_MMAPABLE;
+
+ /* failures are fine because of maps like .rodata.str1.1 */
+ (void) map_fill_btf_type_info(obj, map);
+
+ if (map_is_mmapable(obj, map))
+ def->map_flags |= BPF_F_MMAPABLE;
pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
map->name, map->sec_idx, map->sec_offset, def->map_flags);
@@ -1341,6 +1649,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
map->mmaped = NULL;
pr_warn("failed to alloc map '%s' content buffer: %d\n",
map->name, err);
+ zfree(&map->real_name);
zfree(&map->name);
return err;
}
@@ -1354,32 +1663,47 @@ bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
static int bpf_object__init_global_data_maps(struct bpf_object *obj)
{
- int err;
+ struct elf_sec_desc *sec_desc;
+ const char *sec_name;
+ int err = 0, sec_idx;
/*
* Populate obj->maps with libbpf internal maps.
*/
- if (obj->efile.data_shndx >= 0) {
- err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
- obj->efile.data_shndx,
- obj->efile.data->d_buf,
- obj->efile.data->d_size);
- if (err)
- return err;
- }
- if (obj->efile.rodata_shndx >= 0) {
- err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
- obj->efile.rodata_shndx,
- obj->efile.rodata->d_buf,
- obj->efile.rodata->d_size);
- if (err)
- return err;
- }
- if (obj->efile.bss_shndx >= 0) {
- err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
- obj->efile.bss_shndx,
- NULL,
- obj->efile.bss->d_size);
+ for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
+ sec_desc = &obj->efile.secs[sec_idx];
+
+ /* Skip recognized sections with size 0. */
+ if (!sec_desc->data || sec_desc->data->d_size == 0)
+ continue;
+
+ switch (sec_desc->sec_type) {
+ case SEC_DATA:
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
+ sec_name, sec_idx,
+ sec_desc->data->d_buf,
+ sec_desc->data->d_size);
+ break;
+ case SEC_RODATA:
+ obj->has_rodata = true;
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
+ sec_name, sec_idx,
+ sec_desc->data->d_buf,
+ sec_desc->data->d_size);
+ break;
+ case SEC_BSS:
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
+ sec_name, sec_idx,
+ NULL,
+ sec_desc->data->d_size);
+ break;
+ default:
+ /* skip */
+ break;
+ }
if (err)
return err;
}
@@ -1399,19 +1723,19 @@ static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
return NULL;
}
-static int set_ext_value_tri(struct extern_desc *ext, void *ext_val,
- char value)
+static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
+ char value)
{
- switch (ext->type) {
- case EXT_BOOL:
+ switch (ext->kcfg.type) {
+ case KCFG_BOOL:
if (value == 'm') {
- pr_warn("extern %s=%c should be tristate or char\n",
+ pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
ext->name, value);
return -EINVAL;
}
*(bool *)ext_val = value == 'y' ? true : false;
break;
- case EXT_TRISTATE:
+ case KCFG_TRISTATE:
if (value == 'y')
*(enum libbpf_tristate *)ext_val = TRI_YES;
else if (value == 'm')
@@ -1419,14 +1743,14 @@ static int set_ext_value_tri(struct extern_desc *ext, void *ext_val,
else /* value == 'n' */
*(enum libbpf_tristate *)ext_val = TRI_NO;
break;
- case EXT_CHAR:
+ case KCFG_CHAR:
*(char *)ext_val = value;
break;
- case EXT_UNKNOWN:
- case EXT_INT:
- case EXT_CHAR_ARR:
+ case KCFG_UNKNOWN:
+ case KCFG_INT:
+ case KCFG_CHAR_ARR:
default:
- pr_warn("extern %s=%c should be bool, tristate, or char\n",
+ pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
ext->name, value);
return -EINVAL;
}
@@ -1434,29 +1758,30 @@ static int set_ext_value_tri(struct extern_desc *ext, void *ext_val,
return 0;
}
-static int set_ext_value_str(struct extern_desc *ext, char *ext_val,
- const char *value)
+static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
+ const char *value)
{
size_t len;
- if (ext->type != EXT_CHAR_ARR) {
- pr_warn("extern %s=%s should char array\n", ext->name, value);
+ if (ext->kcfg.type != KCFG_CHAR_ARR) {
+ pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
+ ext->name, value);
return -EINVAL;
}
len = strlen(value);
if (value[len - 1] != '"') {
- pr_warn("extern '%s': invalid string config '%s'\n",
+ pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
ext->name, value);
return -EINVAL;
}
/* strip quotes */
len -= 2;
- if (len >= ext->sz) {
- pr_warn("extern '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
- ext->name, value, len, ext->sz - 1);
- len = ext->sz - 1;
+ if (len >= ext->kcfg.sz) {
+ pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
+ ext->name, value, len, ext->kcfg.sz - 1);
+ len = ext->kcfg.sz - 1;
}
memcpy(ext_val, value + 1, len);
ext_val[len] = '\0';
@@ -1483,11 +1808,11 @@ static int parse_u64(const char *value, __u64 *res)
return 0;
}
-static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v)
+static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
{
- int bit_sz = ext->sz * 8;
+ int bit_sz = ext->kcfg.sz * 8;
- if (ext->sz == 8)
+ if (ext->kcfg.sz == 8)
return true;
/* Validate that value stored in u64 fits in integer of `ext->sz`
@@ -1502,32 +1827,47 @@ static bool is_ext_value_in_range(const struct extern_desc *ext, __u64 v)
* For unsigned target integer, check that all the (64 - Y) bits are
* zero.
*/
- if (ext->is_signed)
+ if (ext->kcfg.is_signed)
return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
else
return (v >> bit_sz) == 0;
}
-static int set_ext_value_num(struct extern_desc *ext, void *ext_val,
- __u64 value)
+static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
+ __u64 value)
{
- if (ext->type != EXT_INT && ext->type != EXT_CHAR) {
- pr_warn("extern %s=%llu should be integer\n",
+ if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
+ ext->kcfg.type != KCFG_BOOL) {
+ pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
ext->name, (unsigned long long)value);
return -EINVAL;
}
- if (!is_ext_value_in_range(ext, value)) {
- pr_warn("extern %s=%llu value doesn't fit in %d bytes\n",
- ext->name, (unsigned long long)value, ext->sz);
+ if (ext->kcfg.type == KCFG_BOOL && value > 1) {
+ pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
+ ext->name, (unsigned long long)value);
+ return -EINVAL;
+
+ }
+ if (!is_kcfg_value_in_range(ext, value)) {
+ pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
+ ext->name, (unsigned long long)value, ext->kcfg.sz);
return -ERANGE;
}
- switch (ext->sz) {
- case 1: *(__u8 *)ext_val = value; break;
- case 2: *(__u16 *)ext_val = value; break;
- case 4: *(__u32 *)ext_val = value; break;
- case 8: *(__u64 *)ext_val = value; break;
- default:
- return -EINVAL;
+ switch (ext->kcfg.sz) {
+ case 1:
+ *(__u8 *)ext_val = value;
+ break;
+ case 2:
+ *(__u16 *)ext_val = value;
+ break;
+ case 4:
+ *(__u32 *)ext_val = value;
+ break;
+ case 8:
+ *(__u64 *)ext_val = value;
+ break;
+ default:
+ return -EINVAL;
}
ext->is_set = true;
return 0;
@@ -1542,7 +1882,7 @@ static int bpf_object__process_kconfig_line(struct bpf_object *obj,
void *ext_val;
__u64 num;
- if (strncmp(buf, "CONFIG_", 7))
+ if (!str_has_pfx(buf, "CONFIG_"))
return 0;
sep = strchr(buf, '=');
@@ -1567,30 +1907,33 @@ static int bpf_object__process_kconfig_line(struct bpf_object *obj,
if (!ext || ext->is_set)
return 0;
- ext_val = data + ext->data_off;
+ ext_val = data + ext->kcfg.data_off;
value = sep + 1;
switch (*value) {
case 'y': case 'n': case 'm':
- err = set_ext_value_tri(ext, ext_val, *value);
+ err = set_kcfg_value_tri(ext, ext_val, *value);
break;
case '"':
- err = set_ext_value_str(ext, ext_val, value);
+ err = set_kcfg_value_str(ext, ext_val, value);
break;
default:
/* assume integer */
err = parse_u64(value, &num);
if (err) {
- pr_warn("extern %s=%s should be integer\n",
- ext->name, value);
+ pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
return err;
}
- err = set_ext_value_num(ext, ext_val, num);
+ if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
+ pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
+ return -EINVAL;
+ }
+ err = set_kcfg_value_num(ext, ext_val, num);
break;
}
if (err)
return err;
- pr_debug("extern %s=%s\n", ext->name, value);
+ pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
return 0;
}
@@ -1661,18 +2004,22 @@ static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
static int bpf_object__init_kconfig_map(struct bpf_object *obj)
{
- struct extern_desc *last_ext;
+ struct extern_desc *last_ext = NULL, *ext;
size_t map_sz;
- int err;
+ int i, err;
- if (obj->nr_extern == 0)
- return 0;
+ for (i = 0; i < obj->nr_extern; i++) {
+ ext = &obj->externs[i];
+ if (ext->type == EXT_KCFG)
+ last_ext = ext;
+ }
- last_ext = &obj->externs[obj->nr_extern - 1];
- map_sz = last_ext->data_off + last_ext->sz;
+ if (!last_ext)
+ return 0;
+ map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
- obj->efile.symbols_shndx,
+ ".kconfig", obj->efile.symbols_shndx,
NULL, map_sz);
if (err)
return err;
@@ -1682,131 +2029,7 @@ static int bpf_object__init_kconfig_map(struct bpf_object *obj)
return 0;
}
-static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
-{
- Elf_Data *symbols = obj->efile.symbols;
- int i, map_def_sz = 0, nr_maps = 0, nr_syms;
- Elf_Data *data = NULL;
- Elf_Scn *scn;
-
- if (obj->efile.maps_shndx < 0)
- return 0;
-
- if (!symbols)
- return -EINVAL;
-
- scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
- if (scn)
- data = elf_getdata(scn, NULL);
- if (!scn || !data) {
- pr_warn("failed to get Elf_Data from map section %d\n",
- obj->efile.maps_shndx);
- return -EINVAL;
- }
-
- /*
- * Count number of maps. Each map has a name.
- * Array of maps is not supported: only the first element is
- * considered.
- *
- * TODO: Detect array of map and report error.
- */
- nr_syms = symbols->d_size / sizeof(GElf_Sym);
- for (i = 0; i < nr_syms; i++) {
- GElf_Sym sym;
-
- if (!gelf_getsym(symbols, i, &sym))
- continue;
- if (sym.st_shndx != obj->efile.maps_shndx)
- continue;
- nr_maps++;
- }
- /* Assume equally sized map definitions */
- pr_debug("maps in %s: %d maps in %zd bytes\n",
- obj->path, nr_maps, data->d_size);
-
- if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
- pr_warn("unable to determine map definition size section %s, %d maps in %zd bytes\n",
- obj->path, nr_maps, data->d_size);
- return -EINVAL;
- }
- map_def_sz = data->d_size / nr_maps;
-
- /* Fill obj->maps using data in "maps" section. */
- for (i = 0; i < nr_syms; i++) {
- GElf_Sym sym;
- const char *map_name;
- struct bpf_map_def *def;
- struct bpf_map *map;
-
- if (!gelf_getsym(symbols, i, &sym))
- continue;
- if (sym.st_shndx != obj->efile.maps_shndx)
- continue;
-
- map = bpf_object__add_map(obj);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name);
- if (!map_name) {
- pr_warn("failed to get map #%d name sym string for obj %s\n",
- i, obj->path);
- return -LIBBPF_ERRNO__FORMAT;
- }
-
- map->libbpf_type = LIBBPF_MAP_UNSPEC;
- map->sec_idx = sym.st_shndx;
- map->sec_offset = sym.st_value;
- pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
- map_name, map->sec_idx, map->sec_offset);
- if (sym.st_value + map_def_sz > data->d_size) {
- pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
- obj->path, map_name);
- return -EINVAL;
- }
-
- map->name = strdup(map_name);
- if (!map->name) {
- pr_warn("failed to alloc map name\n");
- return -ENOMEM;
- }
- pr_debug("map %d is \"%s\"\n", i, map->name);
- def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
- /*
- * If the definition of the map in the object file fits in
- * bpf_map_def, copy it. Any extra fields in our version
- * of bpf_map_def will default to zero as a result of the
- * calloc above.
- */
- if (map_def_sz <= sizeof(struct bpf_map_def)) {
- memcpy(&map->def, def, map_def_sz);
- } else {
- /*
- * Here the map structure being read is bigger than what
- * we expect, truncate if the excess bits are all zero.
- * If they are not zero, reject this map as
- * incompatible.
- */
- char *b;
-
- for (b = ((char *)def) + sizeof(struct bpf_map_def);
- b < ((char *)def) + map_def_sz; b++) {
- if (*b != 0) {
- pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
- obj->path, map_name);
- if (strict)
- return -EINVAL;
- }
- }
- memcpy(&map->def, def, sizeof(struct bpf_map_def));
- }
- }
- return 0;
-}
-
-static const struct btf_type *
+const struct btf_type *
skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
{
const struct btf_type *t = btf__type_by_id(btf, id);
@@ -1837,6 +2060,38 @@ resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
return btf_is_func_proto(t) ? t : NULL;
}
+static const char *__btf_kind_str(__u16 kind)
+{
+ switch (kind) {
+ case BTF_KIND_UNKN: return "void";
+ case BTF_KIND_INT: return "int";
+ case BTF_KIND_PTR: return "ptr";
+ case BTF_KIND_ARRAY: return "array";
+ case BTF_KIND_STRUCT: return "struct";
+ case BTF_KIND_UNION: return "union";
+ case BTF_KIND_ENUM: return "enum";
+ case BTF_KIND_FWD: return "fwd";
+ case BTF_KIND_TYPEDEF: return "typedef";
+ case BTF_KIND_VOLATILE: return "volatile";
+ case BTF_KIND_CONST: return "const";
+ case BTF_KIND_RESTRICT: return "restrict";
+ case BTF_KIND_FUNC: return "func";
+ case BTF_KIND_FUNC_PROTO: return "func_proto";
+ case BTF_KIND_VAR: return "var";
+ case BTF_KIND_DATASEC: return "datasec";
+ case BTF_KIND_FLOAT: return "float";
+ case BTF_KIND_DECL_TAG: return "decl_tag";
+ case BTF_KIND_TYPE_TAG: return "type_tag";
+ case BTF_KIND_ENUM64: return "enum64";
+ default: return "unknown";
+ }
+}
+
+const char *btf_kind_str(const struct btf_type *t)
+{
+ return __btf_kind_str(btf_kind(t));
+}
+
/*
* Fetch integer attribute of BTF map definition. Such attributes are
* represented using a pointer to an array, in which dimensionality of array
@@ -1853,8 +2108,8 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
const struct btf_type *arr_t;
if (!btf_is_ptr(t)) {
- pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
- map_name, name, btf_kind(t));
+ pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
+ map_name, name, btf_kind_str(t));
return false;
}
@@ -1865,8 +2120,8 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
return false;
}
if (!btf_is_array(arr_t)) {
- pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
- map_name, name, btf_kind(arr_t));
+ pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
+ map_name, name, btf_kind_str(arr_t));
return false;
}
arr_info = btf_array(arr_t);
@@ -1874,239 +2129,255 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
return true;
}
+static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
+{
+ int len;
+
+ len = snprintf(buf, buf_sz, "%s/%s", path, name);
+ if (len < 0)
+ return -EINVAL;
+ if (len >= buf_sz)
+ return -ENAMETOOLONG;
+
+ return 0;
+}
+
static int build_map_pin_path(struct bpf_map *map, const char *path)
{
char buf[PATH_MAX];
- int err, len;
+ int err;
if (!path)
path = "/sys/fs/bpf";
- len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
- if (len < 0)
- return -EINVAL;
- else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
-
- err = bpf_map__set_pin_path(map, buf);
+ err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
if (err)
return err;
- return 0;
+ return bpf_map__set_pin_path(map, buf);
}
-static int bpf_object__init_user_btf_map(struct bpf_object *obj,
- const struct btf_type *sec,
- int var_idx, int sec_idx,
- const Elf_Data *data, bool strict,
- const char *pin_root_path)
+/* should match definition in bpf_helpers.h */
+enum libbpf_pin_type {
+ LIBBPF_PIN_NONE,
+ /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
+ LIBBPF_PIN_BY_NAME,
+};
+
+int parse_btf_map_def(const char *map_name, struct btf *btf,
+ const struct btf_type *def_t, bool strict,
+ struct btf_map_def *map_def, struct btf_map_def *inner_def)
{
- const struct btf_type *var, *def, *t;
- const struct btf_var_secinfo *vi;
- const struct btf_var *var_extra;
+ const struct btf_type *t;
const struct btf_member *m;
- const char *map_name;
- struct bpf_map *map;
+ bool is_inner = inner_def == NULL;
int vlen, i;
- vi = btf_var_secinfos(sec) + var_idx;
- var = btf__type_by_id(obj->btf, vi->type);
- var_extra = btf_var(var);
- map_name = btf__name_by_offset(obj->btf, var->name_off);
- vlen = btf_vlen(var);
-
- if (map_name == NULL || map_name[0] == '\0') {
- pr_warn("map #%d: empty name.\n", var_idx);
- return -EINVAL;
- }
- if ((__u64)vi->offset + vi->size > data->d_size) {
- pr_warn("map '%s' BTF data is corrupted.\n", map_name);
- return -EINVAL;
- }
- if (!btf_is_var(var)) {
- pr_warn("map '%s': unexpected var kind %u.\n",
- map_name, btf_kind(var));
- return -EINVAL;
- }
- if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
- var_extra->linkage != BTF_VAR_STATIC) {
- pr_warn("map '%s': unsupported var linkage %u.\n",
- map_name, var_extra->linkage);
- return -EOPNOTSUPP;
- }
-
- def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
- if (!btf_is_struct(def)) {
- pr_warn("map '%s': unexpected def kind %u.\n",
- map_name, btf_kind(var));
- return -EINVAL;
- }
- if (def->size > vi->size) {
- pr_warn("map '%s': invalid def size.\n", map_name);
- return -EINVAL;
- }
-
- map = bpf_object__add_map(obj);
- if (IS_ERR(map))
- return PTR_ERR(map);
- map->name = strdup(map_name);
- if (!map->name) {
- pr_warn("map '%s': failed to alloc map name.\n", map_name);
- return -ENOMEM;
- }
- map->libbpf_type = LIBBPF_MAP_UNSPEC;
- map->def.type = BPF_MAP_TYPE_UNSPEC;
- map->sec_idx = sec_idx;
- map->sec_offset = vi->offset;
- pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
- map_name, map->sec_idx, map->sec_offset);
-
- vlen = btf_vlen(def);
- m = btf_members(def);
+ vlen = btf_vlen(def_t);
+ m = btf_members(def_t);
for (i = 0; i < vlen; i++, m++) {
- const char *name = btf__name_by_offset(obj->btf, m->name_off);
+ const char *name = btf__name_by_offset(btf, m->name_off);
if (!name) {
pr_warn("map '%s': invalid field #%d.\n", map_name, i);
return -EINVAL;
}
if (strcmp(name, "type") == 0) {
- if (!get_map_field_int(map_name, obj->btf, m,
- &map->def.type))
+ if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
return -EINVAL;
- pr_debug("map '%s': found type = %u.\n",
- map_name, map->def.type);
+ map_def->parts |= MAP_DEF_MAP_TYPE;
} else if (strcmp(name, "max_entries") == 0) {
- if (!get_map_field_int(map_name, obj->btf, m,
- &map->def.max_entries))
+ if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
return -EINVAL;
- pr_debug("map '%s': found max_entries = %u.\n",
- map_name, map->def.max_entries);
+ map_def->parts |= MAP_DEF_MAX_ENTRIES;
} else if (strcmp(name, "map_flags") == 0) {
- if (!get_map_field_int(map_name, obj->btf, m,
- &map->def.map_flags))
+ if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
+ return -EINVAL;
+ map_def->parts |= MAP_DEF_MAP_FLAGS;
+ } else if (strcmp(name, "numa_node") == 0) {
+ if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
return -EINVAL;
- pr_debug("map '%s': found map_flags = %u.\n",
- map_name, map->def.map_flags);
+ map_def->parts |= MAP_DEF_NUMA_NODE;
} else if (strcmp(name, "key_size") == 0) {
__u32 sz;
- if (!get_map_field_int(map_name, obj->btf, m, &sz))
+ if (!get_map_field_int(map_name, btf, m, &sz))
return -EINVAL;
- pr_debug("map '%s': found key_size = %u.\n",
- map_name, sz);
- if (map->def.key_size && map->def.key_size != sz) {
+ if (map_def->key_size && map_def->key_size != sz) {
pr_warn("map '%s': conflicting key size %u != %u.\n",
- map_name, map->def.key_size, sz);
+ map_name, map_def->key_size, sz);
return -EINVAL;
}
- map->def.key_size = sz;
+ map_def->key_size = sz;
+ map_def->parts |= MAP_DEF_KEY_SIZE;
} else if (strcmp(name, "key") == 0) {
__s64 sz;
- t = btf__type_by_id(obj->btf, m->type);
+ t = btf__type_by_id(btf, m->type);
if (!t) {
pr_warn("map '%s': key type [%d] not found.\n",
map_name, m->type);
return -EINVAL;
}
if (!btf_is_ptr(t)) {
- pr_warn("map '%s': key spec is not PTR: %u.\n",
- map_name, btf_kind(t));
+ pr_warn("map '%s': key spec is not PTR: %s.\n",
+ map_name, btf_kind_str(t));
return -EINVAL;
}
- sz = btf__resolve_size(obj->btf, t->type);
+ sz = btf__resolve_size(btf, t->type);
if (sz < 0) {
pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
map_name, t->type, (ssize_t)sz);
return sz;
}
- pr_debug("map '%s': found key [%u], sz = %zd.\n",
- map_name, t->type, (ssize_t)sz);
- if (map->def.key_size && map->def.key_size != sz) {
+ if (map_def->key_size && map_def->key_size != sz) {
pr_warn("map '%s': conflicting key size %u != %zd.\n",
- map_name, map->def.key_size, (ssize_t)sz);
+ map_name, map_def->key_size, (ssize_t)sz);
return -EINVAL;
}
- map->def.key_size = sz;
- map->btf_key_type_id = t->type;
+ map_def->key_size = sz;
+ map_def->key_type_id = t->type;
+ map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
} else if (strcmp(name, "value_size") == 0) {
__u32 sz;
- if (!get_map_field_int(map_name, obj->btf, m, &sz))
+ if (!get_map_field_int(map_name, btf, m, &sz))
return -EINVAL;
- pr_debug("map '%s': found value_size = %u.\n",
- map_name, sz);
- if (map->def.value_size && map->def.value_size != sz) {
+ if (map_def->value_size && map_def->value_size != sz) {
pr_warn("map '%s': conflicting value size %u != %u.\n",
- map_name, map->def.value_size, sz);
+ map_name, map_def->value_size, sz);
return -EINVAL;
}
- map->def.value_size = sz;
+ map_def->value_size = sz;
+ map_def->parts |= MAP_DEF_VALUE_SIZE;
} else if (strcmp(name, "value") == 0) {
__s64 sz;
- t = btf__type_by_id(obj->btf, m->type);
+ t = btf__type_by_id(btf, m->type);
if (!t) {
pr_warn("map '%s': value type [%d] not found.\n",
map_name, m->type);
return -EINVAL;
}
if (!btf_is_ptr(t)) {
- pr_warn("map '%s': value spec is not PTR: %u.\n",
- map_name, btf_kind(t));
+ pr_warn("map '%s': value spec is not PTR: %s.\n",
+ map_name, btf_kind_str(t));
return -EINVAL;
}
- sz = btf__resolve_size(obj->btf, t->type);
+ sz = btf__resolve_size(btf, t->type);
if (sz < 0) {
pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
map_name, t->type, (ssize_t)sz);
return sz;
}
- pr_debug("map '%s': found value [%u], sz = %zd.\n",
- map_name, t->type, (ssize_t)sz);
- if (map->def.value_size && map->def.value_size != sz) {
+ if (map_def->value_size && map_def->value_size != sz) {
pr_warn("map '%s': conflicting value size %u != %zd.\n",
- map_name, map->def.value_size, (ssize_t)sz);
+ map_name, map_def->value_size, (ssize_t)sz);
return -EINVAL;
}
- map->def.value_size = sz;
- map->btf_value_type_id = t->type;
- } else if (strcmp(name, "pinning") == 0) {
- __u32 val;
+ map_def->value_size = sz;
+ map_def->value_type_id = t->type;
+ map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
+ }
+ else if (strcmp(name, "values") == 0) {
+ bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
+ bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
+ const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
+ char inner_map_name[128];
int err;
- if (!get_map_field_int(map_name, obj->btf, m, &val))
+ if (is_inner) {
+ pr_warn("map '%s': multi-level inner maps not supported.\n",
+ map_name);
+ return -ENOTSUP;
+ }
+ if (i != vlen - 1) {
+ pr_warn("map '%s': '%s' member should be last.\n",
+ map_name, name);
+ return -EINVAL;
+ }
+ if (!is_map_in_map && !is_prog_array) {
+ pr_warn("map '%s': should be map-in-map or prog-array.\n",
+ map_name);
+ return -ENOTSUP;
+ }
+ if (map_def->value_size && map_def->value_size != 4) {
+ pr_warn("map '%s': conflicting value size %u != 4.\n",
+ map_name, map_def->value_size);
+ return -EINVAL;
+ }
+ map_def->value_size = 4;
+ t = btf__type_by_id(btf, m->type);
+ if (!t) {
+ pr_warn("map '%s': %s type [%d] not found.\n",
+ map_name, desc, m->type);
return -EINVAL;
- pr_debug("map '%s': found pinning = %u.\n",
- map_name, val);
+ }
+ if (!btf_is_array(t) || btf_array(t)->nelems) {
+ pr_warn("map '%s': %s spec is not a zero-sized array.\n",
+ map_name, desc);
+ return -EINVAL;
+ }
+ t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
+ if (!btf_is_ptr(t)) {
+ pr_warn("map '%s': %s def is of unexpected kind %s.\n",
+ map_name, desc, btf_kind_str(t));
+ return -EINVAL;
+ }
+ t = skip_mods_and_typedefs(btf, t->type, NULL);
+ if (is_prog_array) {
+ if (!btf_is_func_proto(t)) {
+ pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
+ map_name, btf_kind_str(t));
+ return -EINVAL;
+ }
+ continue;
+ }
+ if (!btf_is_struct(t)) {
+ pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
+ map_name, btf_kind_str(t));
+ return -EINVAL;
+ }
- if (val != LIBBPF_PIN_NONE &&
- val != LIBBPF_PIN_BY_NAME) {
+ snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
+ err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
+ if (err)
+ return err;
+
+ map_def->parts |= MAP_DEF_INNER_MAP;
+ } else if (strcmp(name, "pinning") == 0) {
+ __u32 val;
+
+ if (is_inner) {
+ pr_warn("map '%s': inner def can't be pinned.\n", map_name);
+ return -EINVAL;
+ }
+ if (!get_map_field_int(map_name, btf, m, &val))
+ return -EINVAL;
+ if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
pr_warn("map '%s': invalid pinning value %u.\n",
map_name, val);
return -EINVAL;
}
- if (val == LIBBPF_PIN_BY_NAME) {
- err = build_map_pin_path(map, pin_root_path);
- if (err) {
- pr_warn("map '%s': couldn't build pin path.\n",
- map_name);
- return err;
- }
- }
+ map_def->pinning = val;
+ map_def->parts |= MAP_DEF_PINNING;
+ } else if (strcmp(name, "map_extra") == 0) {
+ __u32 map_extra;
+
+ if (!get_map_field_int(map_name, btf, m, &map_extra))
+ return -EINVAL;
+ map_def->map_extra = map_extra;
+ map_def->parts |= MAP_DEF_MAP_EXTRA;
} else {
if (strict) {
- pr_warn("map '%s': unknown field '%s'.\n",
- map_name, name);
+ pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
return -ENOTSUP;
}
- pr_debug("map '%s': ignoring unknown field '%s'.\n",
- map_name, name);
+ pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
}
}
- if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
+ if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
pr_warn("map '%s': map type isn't specified.\n", map_name);
return -EINVAL;
}
@@ -2114,6 +2385,201 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
return 0;
}
+static size_t adjust_ringbuf_sz(size_t sz)
+{
+ __u32 page_sz = sysconf(_SC_PAGE_SIZE);
+ __u32 mul;
+
+ /* if user forgot to set any size, make sure they see error */
+ if (sz == 0)
+ return 0;
+ /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
+ * a power-of-2 multiple of kernel's page size. If user diligently
+ * satisified these conditions, pass the size through.
+ */
+ if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
+ return sz;
+
+ /* Otherwise find closest (page_sz * power_of_2) product bigger than
+ * user-set size to satisfy both user size request and kernel
+ * requirements and substitute correct max_entries for map creation.
+ */
+ for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
+ if (mul * page_sz > sz)
+ return mul * page_sz;
+ }
+
+ /* if it's impossible to satisfy the conditions (i.e., user size is
+ * very close to UINT_MAX but is not a power-of-2 multiple of
+ * page_size) then just return original size and let kernel reject it
+ */
+ return sz;
+}
+
+static bool map_is_ringbuf(const struct bpf_map *map)
+{
+ return map->def.type == BPF_MAP_TYPE_RINGBUF ||
+ map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
+}
+
+static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
+{
+ map->def.type = def->map_type;
+ map->def.key_size = def->key_size;
+ map->def.value_size = def->value_size;
+ map->def.max_entries = def->max_entries;
+ map->def.map_flags = def->map_flags;
+ map->map_extra = def->map_extra;
+
+ map->numa_node = def->numa_node;
+ map->btf_key_type_id = def->key_type_id;
+ map->btf_value_type_id = def->value_type_id;
+
+ /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
+ if (map_is_ringbuf(map))
+ map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
+
+ if (def->parts & MAP_DEF_MAP_TYPE)
+ pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
+
+ if (def->parts & MAP_DEF_KEY_TYPE)
+ pr_debug("map '%s': found key [%u], sz = %u.\n",
+ map->name, def->key_type_id, def->key_size);
+ else if (def->parts & MAP_DEF_KEY_SIZE)
+ pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
+
+ if (def->parts & MAP_DEF_VALUE_TYPE)
+ pr_debug("map '%s': found value [%u], sz = %u.\n",
+ map->name, def->value_type_id, def->value_size);
+ else if (def->parts & MAP_DEF_VALUE_SIZE)
+ pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
+
+ if (def->parts & MAP_DEF_MAX_ENTRIES)
+ pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
+ if (def->parts & MAP_DEF_MAP_FLAGS)
+ pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
+ if (def->parts & MAP_DEF_MAP_EXTRA)
+ pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
+ (unsigned long long)def->map_extra);
+ if (def->parts & MAP_DEF_PINNING)
+ pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
+ if (def->parts & MAP_DEF_NUMA_NODE)
+ pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
+
+ if (def->parts & MAP_DEF_INNER_MAP)
+ pr_debug("map '%s': found inner map definition.\n", map->name);
+}
+
+static const char *btf_var_linkage_str(__u32 linkage)
+{
+ switch (linkage) {
+ case BTF_VAR_STATIC: return "static";
+ case BTF_VAR_GLOBAL_ALLOCATED: return "global";
+ case BTF_VAR_GLOBAL_EXTERN: return "extern";
+ default: return "unknown";
+ }
+}
+
+static int bpf_object__init_user_btf_map(struct bpf_object *obj,
+ const struct btf_type *sec,
+ int var_idx, int sec_idx,
+ const Elf_Data *data, bool strict,
+ const char *pin_root_path)
+{
+ struct btf_map_def map_def = {}, inner_def = {};
+ const struct btf_type *var, *def;
+ const struct btf_var_secinfo *vi;
+ const struct btf_var *var_extra;
+ const char *map_name;
+ struct bpf_map *map;
+ int err;
+
+ vi = btf_var_secinfos(sec) + var_idx;
+ var = btf__type_by_id(obj->btf, vi->type);
+ var_extra = btf_var(var);
+ map_name = btf__name_by_offset(obj->btf, var->name_off);
+
+ if (map_name == NULL || map_name[0] == '\0') {
+ pr_warn("map #%d: empty name.\n", var_idx);
+ return -EINVAL;
+ }
+ if ((__u64)vi->offset + vi->size > data->d_size) {
+ pr_warn("map '%s' BTF data is corrupted.\n", map_name);
+ return -EINVAL;
+ }
+ if (!btf_is_var(var)) {
+ pr_warn("map '%s': unexpected var kind %s.\n",
+ map_name, btf_kind_str(var));
+ return -EINVAL;
+ }
+ if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
+ pr_warn("map '%s': unsupported map linkage %s.\n",
+ map_name, btf_var_linkage_str(var_extra->linkage));
+ return -EOPNOTSUPP;
+ }
+
+ def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
+ if (!btf_is_struct(def)) {
+ pr_warn("map '%s': unexpected def kind %s.\n",
+ map_name, btf_kind_str(var));
+ return -EINVAL;
+ }
+ if (def->size > vi->size) {
+ pr_warn("map '%s': invalid def size.\n", map_name);
+ return -EINVAL;
+ }
+
+ map = bpf_object__add_map(obj);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+ map->name = strdup(map_name);
+ if (!map->name) {
+ pr_warn("map '%s': failed to alloc map name.\n", map_name);
+ return -ENOMEM;
+ }
+ map->libbpf_type = LIBBPF_MAP_UNSPEC;
+ map->def.type = BPF_MAP_TYPE_UNSPEC;
+ map->sec_idx = sec_idx;
+ map->sec_offset = vi->offset;
+ map->btf_var_idx = var_idx;
+ pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
+ map_name, map->sec_idx, map->sec_offset);
+
+ err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
+ if (err)
+ return err;
+
+ fill_map_from_def(map, &map_def);
+
+ if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
+ err = build_map_pin_path(map, pin_root_path);
+ if (err) {
+ pr_warn("map '%s': couldn't build pin path.\n", map->name);
+ return err;
+ }
+ }
+
+ if (map_def.parts & MAP_DEF_INNER_MAP) {
+ map->inner_map = calloc(1, sizeof(*map->inner_map));
+ if (!map->inner_map)
+ return -ENOMEM;
+ map->inner_map->fd = -1;
+ map->inner_map->sec_idx = sec_idx;
+ map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
+ if (!map->inner_map->name)
+ return -ENOMEM;
+ sprintf(map->inner_map->name, "%s.inner", map_name);
+
+ fill_map_from_def(map->inner_map, &inner_def);
+ }
+
+ err = map_fill_btf_type_info(obj, map);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
const char *pin_root_path)
{
@@ -2127,23 +2593,23 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
if (obj->efile.btf_maps_shndx < 0)
return 0;
- scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
- if (scn)
- data = elf_getdata(scn, NULL);
+ scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
+ data = elf_sec_data(obj, scn);
if (!scn || !data) {
- pr_warn("failed to get Elf_Data from map section %d (%s)\n",
- obj->efile.maps_shndx, MAPS_ELF_SEC);
+ pr_warn("elf: failed to get %s map definitions for %s\n",
+ MAPS_ELF_SEC, obj->path);
return -EINVAL;
}
- nr_types = btf__get_nr_types(obj->btf);
- for (i = 1; i <= nr_types; i++) {
+ nr_types = btf__type_cnt(obj->btf);
+ for (i = 1; i < nr_types; i++) {
t = btf__type_by_id(obj->btf, i);
if (!btf_is_datasec(t))
continue;
name = btf__name_by_offset(obj->btf, t->name_off);
if (strcmp(name, MAPS_ELF_SEC) == 0) {
sec = t;
+ obj->efile.btf_maps_sec_btf_id = i;
break;
}
}
@@ -2171,57 +2637,62 @@ static int bpf_object__init_maps(struct bpf_object *obj,
{
const char *pin_root_path;
bool strict;
- int err;
+ int err = 0;
strict = !OPTS_GET(opts, relaxed_maps, false);
pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
- err = bpf_object__init_user_maps(obj, strict);
- err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
+ err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
err = err ?: bpf_object__init_global_data_maps(obj);
err = err ?: bpf_object__init_kconfig_map(obj);
- err = err ?: bpf_object__init_struct_ops_maps(obj);
- if (err)
- return err;
+ err = err ?: bpf_object_init_struct_ops(obj);
- return 0;
+ return err;
}
static bool section_have_execinstr(struct bpf_object *obj, int idx)
{
- Elf_Scn *scn;
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
- scn = elf_getscn(obj->efile.elf, idx);
- if (!scn)
+ sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
+ if (!sh)
return false;
- if (gelf_getshdr(scn, &sh) != &sh)
- return false;
+ return sh->sh_flags & SHF_EXECINSTR;
+}
- if (sh.sh_flags & SHF_EXECINSTR)
- return true;
+static bool btf_needs_sanitization(struct bpf_object *obj)
+{
+ bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
+ bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
+ bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
+ bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
+ bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
+ bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
+ bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
- return false;
+ return !has_func || !has_datasec || !has_func_global || !has_float ||
+ !has_decl_tag || !has_type_tag || !has_enum64;
}
-static void bpf_object__sanitize_btf(struct bpf_object *obj)
+static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
{
- bool has_func_global = obj->caps.btf_func_global;
- bool has_datasec = obj->caps.btf_datasec;
- bool has_func = obj->caps.btf_func;
- struct btf *btf = obj->btf;
+ bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
+ bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
+ bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
+ bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
+ bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
+ bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
+ bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
+ int enum64_placeholder_id = 0;
struct btf_type *t;
int i, j, vlen;
- if (!obj->btf || (has_func && has_datasec && has_func_global))
- return;
-
- for (i = 1; i <= btf__get_nr_types(btf); i++) {
+ for (i = 1; i < btf__type_cnt(btf); i++) {
t = (struct btf_type *)btf__type_by_id(btf, i);
- if (!has_datasec && btf_is_var(t)) {
- /* replace VAR with INT */
+ if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
+ /* replace VAR/DECL_TAG with INT */
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
/*
* using size = 1 is the safest choice, 4 will be too
@@ -2265,31 +2736,56 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
} else if (!has_func_global && btf_is_func(t)) {
/* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
+ } else if (!has_float && btf_is_float(t)) {
+ /* replace FLOAT with an equally-sized empty STRUCT;
+ * since C compilers do not accept e.g. "float" as a
+ * valid struct name, make it anonymous
+ */
+ t->name_off = 0;
+ t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
+ } else if (!has_type_tag && btf_is_type_tag(t)) {
+ /* replace TYPE_TAG with a CONST */
+ t->name_off = 0;
+ t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
+ } else if (!has_enum64 && btf_is_enum(t)) {
+ /* clear the kflag */
+ t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
+ } else if (!has_enum64 && btf_is_enum64(t)) {
+ /* replace ENUM64 with a union */
+ struct btf_member *m;
+
+ if (enum64_placeholder_id == 0) {
+ enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
+ if (enum64_placeholder_id < 0)
+ return enum64_placeholder_id;
+
+ t = (struct btf_type *)btf__type_by_id(btf, i);
+ }
+
+ m = btf_members(t);
+ vlen = btf_vlen(t);
+ t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
+ for (j = 0; j < vlen; j++, m++) {
+ m->type = enum64_placeholder_id;
+ m->offset = 0;
+ }
}
}
-}
-static void bpf_object__sanitize_btf_ext(struct bpf_object *obj)
-{
- if (!obj->btf_ext)
- return;
-
- if (!obj->caps.btf_func) {
- btf_ext__free(obj->btf_ext);
- obj->btf_ext = NULL;
- }
+ return 0;
}
static bool libbpf_needs_btf(const struct bpf_object *obj)
{
return obj->efile.btf_maps_shndx >= 0 ||
obj->efile.st_ops_shndx >= 0 ||
+ obj->efile.st_ops_link_shndx >= 0 ||
obj->nr_extern > 0;
}
static bool kernel_needs_btf(const struct bpf_object *obj)
{
- return obj->efile.st_ops_shndx >= 0;
+ return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0;
}
static int bpf_object__init_btf(struct bpf_object *obj,
@@ -2300,29 +2796,69 @@ static int bpf_object__init_btf(struct bpf_object *obj,
if (btf_data) {
obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
- if (IS_ERR(obj->btf)) {
- err = PTR_ERR(obj->btf);
+ err = libbpf_get_error(obj->btf);
+ if (err) {
obj->btf = NULL;
- pr_warn("Error loading ELF section %s: %d.\n",
- BTF_ELF_SEC, err);
+ pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
goto out;
}
- err = 0;
+ /* enforce 8-byte pointers for BPF-targeted BTFs */
+ btf__set_pointer_size(obj->btf, 8);
}
if (btf_ext_data) {
+ struct btf_ext_info *ext_segs[3];
+ int seg_num, sec_num;
+
if (!obj->btf) {
pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
goto out;
}
- obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
- btf_ext_data->d_size);
- if (IS_ERR(obj->btf_ext)) {
- pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
- BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
+ obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
+ err = libbpf_get_error(obj->btf_ext);
+ if (err) {
+ pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
+ BTF_EXT_ELF_SEC, err);
obj->btf_ext = NULL;
goto out;
}
+
+ /* setup .BTF.ext to ELF section mapping */
+ ext_segs[0] = &obj->btf_ext->func_info;
+ ext_segs[1] = &obj->btf_ext->line_info;
+ ext_segs[2] = &obj->btf_ext->core_relo_info;
+ for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
+ struct btf_ext_info *seg = ext_segs[seg_num];
+ const struct btf_ext_info_sec *sec;
+ const char *sec_name;
+ Elf_Scn *scn;
+
+ if (seg->sec_cnt == 0)
+ continue;
+
+ seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
+ if (!seg->sec_idxs) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sec_num = 0;
+ for_each_btf_ext_sec(seg, sec) {
+ /* preventively increment index to avoid doing
+ * this before every continue below
+ */
+ sec_num++;
+
+ sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
+ if (str_is_empty(sec_name))
+ continue;
+ scn = elf_sec_by_name(obj, sec_name);
+ if (!scn)
+ continue;
+
+ seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
+ }
+ }
}
out:
if (err && libbpf_needs_btf(obj)) {
@@ -2332,31 +2868,134 @@ out:
return 0;
}
-static int bpf_object__finalize_btf(struct bpf_object *obj)
+static int compare_vsi_off(const void *_a, const void *_b)
+{
+ const struct btf_var_secinfo *a = _a;
+ const struct btf_var_secinfo *b = _b;
+
+ return a->offset - b->offset;
+}
+
+static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
+ struct btf_type *t)
{
+ __u32 size = 0, i, vars = btf_vlen(t);
+ const char *sec_name = btf__name_by_offset(btf, t->name_off);
+ struct btf_var_secinfo *vsi;
+ bool fixup_offsets = false;
int err;
- if (!obj->btf)
- return 0;
+ if (!sec_name) {
+ pr_debug("No name found in string section for DATASEC kind.\n");
+ return -ENOENT;
+ }
+
+ /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
+ * variable offsets set at the previous step. Further, not every
+ * extern BTF VAR has corresponding ELF symbol preserved, so we skip
+ * all fixups altogether for such sections and go straight to sorting
+ * VARs within their DATASEC.
+ */
+ if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
+ goto sort_vars;
+
+ /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
+ * fix this up. But BPF static linker already fixes this up and fills
+ * all the sizes and offsets during static linking. So this step has
+ * to be optional. But the STV_HIDDEN handling is non-optional for any
+ * non-extern DATASEC, so the variable fixup loop below handles both
+ * functions at the same time, paying the cost of BTF VAR <-> ELF
+ * symbol matching just once.
+ */
+ if (t->size == 0) {
+ err = find_elf_sec_sz(obj, sec_name, &size);
+ if (err || !size) {
+ pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
+ sec_name, size, err);
+ return -ENOENT;
+ }
+
+ t->size = size;
+ fixup_offsets = true;
+ }
+
+ for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
+ const struct btf_type *t_var;
+ struct btf_var *var;
+ const char *var_name;
+ Elf64_Sym *sym;
- err = btf__finalize_data(obj, obj->btf);
- if (!err)
+ t_var = btf__type_by_id(btf, vsi->type);
+ if (!t_var || !btf_is_var(t_var)) {
+ pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
+ return -EINVAL;
+ }
+
+ var = btf_var(t_var);
+ if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
+ continue;
+
+ var_name = btf__name_by_offset(btf, t_var->name_off);
+ if (!var_name) {
+ pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
+ sec_name, i);
+ return -ENOENT;
+ }
+
+ sym = find_elf_var_sym(obj, var_name);
+ if (IS_ERR(sym)) {
+ pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
+ sec_name, var_name);
+ return -ENOENT;
+ }
+
+ if (fixup_offsets)
+ vsi->offset = sym->st_value;
+
+ /* if variable is a global/weak symbol, but has restricted
+ * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
+ * as static. This follows similar logic for functions (BPF
+ * subprogs) and influences libbpf's further decisions about
+ * whether to make global data BPF array maps as
+ * BPF_F_MMAPABLE.
+ */
+ if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
+ || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
+ var->linkage = BTF_VAR_STATIC;
+ }
+
+sort_vars:
+ qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
+ return 0;
+}
+
+static int bpf_object_fixup_btf(struct bpf_object *obj)
+{
+ int i, n, err = 0;
+
+ if (!obj->btf)
return 0;
- pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
- btf__free(obj->btf);
- obj->btf = NULL;
- btf_ext__free(obj->btf_ext);
- obj->btf_ext = NULL;
+ n = btf__type_cnt(obj->btf);
+ for (i = 1; i < n; i++) {
+ struct btf_type *t = btf_type_by_id(obj->btf, i);
- if (libbpf_needs_btf(obj)) {
- pr_warn("BTF is required, but is missing or corrupted.\n");
- return -ENOENT;
+ /* Loader needs to fix up some of the things compiler
+ * couldn't get its hands on while emitting BTF. This
+ * is section size and global variable offset. We use
+ * the info from the ELF itself for this purpose.
+ */
+ if (btf_is_datasec(t)) {
+ err = btf_fixup_datasec(obj, obj->btf, t);
+ if (err)
+ return err;
+ }
}
+
return 0;
}
-static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
+static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
{
if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
prog->type == BPF_PROG_TYPE_LSM)
@@ -2371,229 +3010,578 @@ static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
return false;
}
-static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
+static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
{
struct bpf_program *prog;
- int err;
+ int i;
+
+ /* CO-RE relocations need kernel BTF, only when btf_custom_path
+ * is not specified
+ */
+ if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
+ return true;
+
+ /* Support for typed ksyms needs kernel BTF */
+ for (i = 0; i < obj->nr_extern; i++) {
+ const struct extern_desc *ext;
+
+ ext = &obj->externs[i];
+ if (ext->type == EXT_KSYM && ext->ksym.type_id)
+ return true;
+ }
bpf_object__for_each_program(prog, obj) {
- if (libbpf_prog_needs_vmlinux_btf(prog)) {
- obj->btf_vmlinux = libbpf_find_kernel_btf();
- if (IS_ERR(obj->btf_vmlinux)) {
- err = PTR_ERR(obj->btf_vmlinux);
- pr_warn("Error loading vmlinux BTF: %d\n", err);
- obj->btf_vmlinux = NULL;
- return err;
- }
- return 0;
- }
+ if (!prog->autoload)
+ continue;
+ if (prog_needs_vmlinux_btf(prog))
+ return true;
}
+ return false;
+}
+
+static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
+{
+ int err;
+
+ /* btf_vmlinux could be loaded earlier */
+ if (obj->btf_vmlinux || obj->gen_loader)
+ return 0;
+
+ if (!force && !obj_needs_vmlinux_btf(obj))
+ return 0;
+
+ obj->btf_vmlinux = btf__load_vmlinux_btf();
+ err = libbpf_get_error(obj->btf_vmlinux);
+ if (err) {
+ pr_warn("Error loading vmlinux BTF: %d\n", err);
+ obj->btf_vmlinux = NULL;
+ return err;
+ }
return 0;
}
static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
{
- int err = 0;
+ struct btf *kern_btf = obj->btf;
+ bool btf_mandatory, sanitize;
+ int i, err = 0;
if (!obj->btf)
return 0;
- bpf_object__sanitize_btf(obj);
- bpf_object__sanitize_btf_ext(obj);
+ if (!kernel_supports(obj, FEAT_BTF)) {
+ if (kernel_needs_btf(obj)) {
+ err = -EOPNOTSUPP;
+ goto report;
+ }
+ pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
+ return 0;
+ }
- err = btf__load(obj->btf);
- if (err) {
- pr_warn("Error loading %s into kernel: %d.\n",
- BTF_ELF_SEC, err);
- btf__free(obj->btf);
- obj->btf = NULL;
- /* btf_ext can't exist without btf, so free it as well */
- if (obj->btf_ext) {
- btf_ext__free(obj->btf_ext);
- obj->btf_ext = NULL;
+ /* Even though some subprogs are global/weak, user might prefer more
+ * permissive BPF verification process that BPF verifier performs for
+ * static functions, taking into account more context from the caller
+ * functions. In such case, they need to mark such subprogs with
+ * __attribute__((visibility("hidden"))) and libbpf will adjust
+ * corresponding FUNC BTF type to be marked as static and trigger more
+ * involved BPF verification process.
+ */
+ for (i = 0; i < obj->nr_programs; i++) {
+ struct bpf_program *prog = &obj->programs[i];
+ struct btf_type *t;
+ const char *name;
+ int j, n;
+
+ if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
+ continue;
+
+ n = btf__type_cnt(obj->btf);
+ for (j = 1; j < n; j++) {
+ t = btf_type_by_id(obj->btf, j);
+ if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
+ continue;
+
+ name = btf__str_by_offset(obj->btf, t->name_off);
+ if (strcmp(name, prog->name) != 0)
+ continue;
+
+ t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
+ break;
}
+ }
- if (kernel_needs_btf(obj))
+ sanitize = btf_needs_sanitization(obj);
+ if (sanitize) {
+ const void *raw_data;
+ __u32 sz;
+
+ /* clone BTF to sanitize a copy and leave the original intact */
+ raw_data = btf__raw_data(obj->btf, &sz);
+ kern_btf = btf__new(raw_data, sz);
+ err = libbpf_get_error(kern_btf);
+ if (err)
+ return err;
+
+ /* enforce 8-byte pointers for BPF-targeted BTFs */
+ btf__set_pointer_size(obj->btf, 8);
+ err = bpf_object__sanitize_btf(obj, kern_btf);
+ if (err)
return err;
}
- return 0;
+
+ if (obj->gen_loader) {
+ __u32 raw_size = 0;
+ const void *raw_data = btf__raw_data(kern_btf, &raw_size);
+
+ if (!raw_data)
+ return -ENOMEM;
+ bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
+ /* Pretend to have valid FD to pass various fd >= 0 checks.
+ * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
+ */
+ btf__set_fd(kern_btf, 0);
+ } else {
+ /* currently BPF_BTF_LOAD only supports log_level 1 */
+ err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
+ obj->log_level ? 1 : 0);
+ }
+ if (sanitize) {
+ if (!err) {
+ /* move fd to libbpf's BTF */
+ btf__set_fd(obj->btf, btf__fd(kern_btf));
+ btf__set_fd(kern_btf, -1);
+ }
+ btf__free(kern_btf);
+ }
+report:
+ if (err) {
+ btf_mandatory = kernel_needs_btf(obj);
+ pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
+ btf_mandatory ? "BTF is mandatory, can't proceed."
+ : "BTF is optional, ignoring.");
+ if (!btf_mandatory)
+ err = 0;
+ }
+ return err;
+}
+
+static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
+{
+ const char *name;
+
+ name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
+ if (!name) {
+ pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
+ off, obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+
+ return name;
+}
+
+static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
+{
+ const char *name;
+
+ name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
+ if (!name) {
+ pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
+ off, obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+
+ return name;
+}
+
+static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
+{
+ Elf_Scn *scn;
+
+ scn = elf_getscn(obj->efile.elf, idx);
+ if (!scn) {
+ pr_warn("elf: failed to get section(%zu) from %s: %s\n",
+ idx, obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+ return scn;
+}
+
+static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
+{
+ Elf_Scn *scn = NULL;
+ Elf *elf = obj->efile.elf;
+ const char *sec_name;
+
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ sec_name = elf_sec_name(obj, scn);
+ if (!sec_name)
+ return NULL;
+
+ if (strcmp(sec_name, name) != 0)
+ continue;
+
+ return scn;
+ }
+ return NULL;
+}
+
+static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
+{
+ Elf64_Shdr *shdr;
+
+ if (!scn)
+ return NULL;
+
+ shdr = elf64_getshdr(scn);
+ if (!shdr) {
+ pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
+ elf_ndxscn(scn), obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+
+ return shdr;
+}
+
+static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
+{
+ const char *name;
+ Elf64_Shdr *sh;
+
+ if (!scn)
+ return NULL;
+
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
+ return NULL;
+
+ name = elf_sec_str(obj, sh->sh_name);
+ if (!name) {
+ pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
+ elf_ndxscn(scn), obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+
+ return name;
+}
+
+static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
+{
+ Elf_Data *data;
+
+ if (!scn)
+ return NULL;
+
+ data = elf_getdata(scn, 0);
+ if (!data) {
+ pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
+ elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
+ obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+
+ return data;
+}
+
+static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
+{
+ if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
+ return NULL;
+
+ return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
+}
+
+static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
+{
+ if (idx >= data->d_size / sizeof(Elf64_Rel))
+ return NULL;
+
+ return (Elf64_Rel *)data->d_buf + idx;
+}
+
+static bool is_sec_name_dwarf(const char *name)
+{
+ /* approximation, but the actual list is too long */
+ return str_has_pfx(name, ".debug_");
+}
+
+static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
+{
+ /* no special handling of .strtab */
+ if (hdr->sh_type == SHT_STRTAB)
+ return true;
+
+ /* ignore .llvm_addrsig section as well */
+ if (hdr->sh_type == SHT_LLVM_ADDRSIG)
+ return true;
+
+ /* no subprograms will lead to an empty .text section, ignore it */
+ if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
+ strcmp(name, ".text") == 0)
+ return true;
+
+ /* DWARF sections */
+ if (is_sec_name_dwarf(name))
+ return true;
+
+ if (str_has_pfx(name, ".rel")) {
+ name += sizeof(".rel") - 1;
+ /* DWARF section relocations */
+ if (is_sec_name_dwarf(name))
+ return true;
+
+ /* .BTF and .BTF.ext don't need relocations */
+ if (strcmp(name, BTF_ELF_SEC) == 0 ||
+ strcmp(name, BTF_EXT_ELF_SEC) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static int cmp_progs(const void *_a, const void *_b)
+{
+ const struct bpf_program *a = _a;
+ const struct bpf_program *b = _b;
+
+ if (a->sec_idx != b->sec_idx)
+ return a->sec_idx < b->sec_idx ? -1 : 1;
+
+ /* sec_insn_off can't be the same within the section */
+ return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
}
static int bpf_object__elf_collect(struct bpf_object *obj)
{
+ struct elf_sec_desc *sec_desc;
Elf *elf = obj->efile.elf;
- GElf_Ehdr *ep = &obj->efile.ehdr;
Elf_Data *btf_ext_data = NULL;
Elf_Data *btf_data = NULL;
- Elf_Scn *scn = NULL;
int idx = 0, err = 0;
+ const char *name;
+ Elf_Data *data;
+ Elf_Scn *scn;
+ Elf64_Shdr *sh;
- /* Elf is corrupted/truncated, avoid calling elf_strptr. */
- if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
- pr_warn("failed to get e_shstrndx from %s\n", obj->path);
+ /* ELF section indices are 0-based, but sec #0 is special "invalid"
+ * section. Since section count retrieved by elf_getshdrnum() does
+ * include sec #0, it is already the necessary size of an array to keep
+ * all the sections.
+ */
+ if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
+ pr_warn("elf: failed to get the number of sections for %s: %s\n",
+ obj->path, elf_errmsg(-1));
return -LIBBPF_ERRNO__FORMAT;
}
+ obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
+ if (!obj->efile.secs)
+ return -ENOMEM;
+ /* a bunch of ELF parsing functionality depends on processing symbols,
+ * so do the first pass and find the symbol table
+ */
+ scn = NULL;
while ((scn = elf_nextscn(elf, scn)) != NULL) {
- char *name;
- GElf_Shdr sh;
- Elf_Data *data;
-
- idx++;
- if (gelf_getshdr(scn, &sh) != &sh) {
- pr_warn("failed to get section(%d) header from %s\n",
- idx, obj->path);
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return -LIBBPF_ERRNO__FORMAT;
+
+ if (sh->sh_type == SHT_SYMTAB) {
+ if (obj->efile.symbols) {
+ pr_warn("elf: multiple symbol tables in %s\n", obj->path);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ data = elf_sec_data(obj, scn);
+ if (!data)
+ return -LIBBPF_ERRNO__FORMAT;
+
+ idx = elf_ndxscn(scn);
+
+ obj->efile.symbols = data;
+ obj->efile.symbols_shndx = idx;
+ obj->efile.strtabidx = sh->sh_link;
}
+ }
- name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
- if (!name) {
- pr_warn("failed to get section(%d) name from %s\n",
- idx, obj->path);
+ if (!obj->efile.symbols) {
+ pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
+ obj->path);
+ return -ENOENT;
+ }
+
+ scn = NULL;
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ idx = elf_ndxscn(scn);
+ sec_desc = &obj->efile.secs[idx];
+
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh)
return -LIBBPF_ERRNO__FORMAT;
- }
- data = elf_getdata(scn, 0);
- if (!data) {
- pr_warn("failed to get section(%d) data from %s(%s)\n",
- idx, name, obj->path);
+ name = elf_sec_str(obj, sh->sh_name);
+ if (!name)
return -LIBBPF_ERRNO__FORMAT;
- }
- pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
+
+ if (ignore_elf_section(sh, name))
+ continue;
+
+ data = elf_sec_data(obj, scn);
+ if (!data)
+ return -LIBBPF_ERRNO__FORMAT;
+
+ pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
idx, name, (unsigned long)data->d_size,
- (int)sh.sh_link, (unsigned long)sh.sh_flags,
- (int)sh.sh_type);
+ (int)sh->sh_link, (unsigned long)sh->sh_flags,
+ (int)sh->sh_type);
if (strcmp(name, "license") == 0) {
- err = bpf_object__init_license(obj,
- data->d_buf,
- data->d_size);
+ err = bpf_object__init_license(obj, data->d_buf, data->d_size);
if (err)
return err;
} else if (strcmp(name, "version") == 0) {
- err = bpf_object__init_kversion(obj,
- data->d_buf,
- data->d_size);
+ err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
if (err)
return err;
} else if (strcmp(name, "maps") == 0) {
- obj->efile.maps_shndx = idx;
+ pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
+ return -ENOTSUP;
} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
obj->efile.btf_maps_shndx = idx;
} else if (strcmp(name, BTF_ELF_SEC) == 0) {
+ if (sh->sh_type != SHT_PROGBITS)
+ return -LIBBPF_ERRNO__FORMAT;
btf_data = data;
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
- btf_ext_data = data;
- } else if (sh.sh_type == SHT_SYMTAB) {
- if (obj->efile.symbols) {
- pr_warn("bpf: multiple SYMTAB in %s\n",
- obj->path);
+ if (sh->sh_type != SHT_PROGBITS)
return -LIBBPF_ERRNO__FORMAT;
- }
- obj->efile.symbols = data;
- obj->efile.symbols_shndx = idx;
- obj->efile.strtabidx = sh.sh_link;
- } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
- if (sh.sh_flags & SHF_EXECINSTR) {
+ btf_ext_data = data;
+ } else if (sh->sh_type == SHT_SYMTAB) {
+ /* already processed during the first pass above */
+ } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
+ if (sh->sh_flags & SHF_EXECINSTR) {
if (strcmp(name, ".text") == 0)
obj->efile.text_shndx = idx;
- err = bpf_object__add_program(obj, data->d_buf,
- data->d_size,
- name, idx);
- if (err) {
- char errmsg[STRERR_BUFSIZE];
- char *cp;
-
- cp = libbpf_strerror_r(-err, errmsg,
- sizeof(errmsg));
- pr_warn("failed to alloc program %s (%s): %s",
- name, obj->path, cp);
+ err = bpf_object__add_programs(obj, data, name, idx);
+ if (err)
return err;
- }
- } else if (strcmp(name, DATA_SEC) == 0) {
- obj->efile.data = data;
- obj->efile.data_shndx = idx;
- } else if (strcmp(name, RODATA_SEC) == 0) {
- obj->efile.rodata = data;
- obj->efile.rodata_shndx = idx;
+ } else if (strcmp(name, DATA_SEC) == 0 ||
+ str_has_pfx(name, DATA_SEC ".")) {
+ sec_desc->sec_type = SEC_DATA;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
+ } else if (strcmp(name, RODATA_SEC) == 0 ||
+ str_has_pfx(name, RODATA_SEC ".")) {
+ sec_desc->sec_type = SEC_RODATA;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
} else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
obj->efile.st_ops_data = data;
obj->efile.st_ops_shndx = idx;
+ } else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) {
+ obj->efile.st_ops_link_data = data;
+ obj->efile.st_ops_link_shndx = idx;
} else {
- pr_debug("skip section(%d) %s\n", idx, name);
+ pr_info("elf: skipping unrecognized data section(%d) %s\n",
+ idx, name);
}
- } else if (sh.sh_type == SHT_REL) {
- int nr_sects = obj->efile.nr_reloc_sects;
- void *sects = obj->efile.reloc_sects;
- int sec = sh.sh_info; /* points to other section */
+ } else if (sh->sh_type == SHT_REL) {
+ int targ_sec_idx = sh->sh_info; /* points to other section */
+
+ if (sh->sh_entsize != sizeof(Elf64_Rel) ||
+ targ_sec_idx >= obj->efile.sec_cnt)
+ return -LIBBPF_ERRNO__FORMAT;
/* Only do relo for section with exec instructions */
- if (!section_have_execinstr(obj, sec) &&
- strcmp(name, ".rel" STRUCT_OPS_SEC)) {
- pr_debug("skip relo %s(%d) for section(%d)\n",
- name, idx, sec);
+ if (!section_have_execinstr(obj, targ_sec_idx) &&
+ strcmp(name, ".rel" STRUCT_OPS_SEC) &&
+ strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
+ strcmp(name, ".rel" MAPS_ELF_SEC)) {
+ pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
+ idx, name, targ_sec_idx,
+ elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
continue;
}
- sects = reallocarray(sects, nr_sects + 1,
- sizeof(*obj->efile.reloc_sects));
- if (!sects) {
- pr_warn("reloc_sects realloc failed\n");
- return -ENOMEM;
- }
-
- obj->efile.reloc_sects = sects;
- obj->efile.nr_reloc_sects++;
-
- obj->efile.reloc_sects[nr_sects].shdr = sh;
- obj->efile.reloc_sects[nr_sects].data = data;
- } else if (sh.sh_type == SHT_NOBITS &&
- strcmp(name, BSS_SEC) == 0) {
- obj->efile.bss = data;
- obj->efile.bss_shndx = idx;
+ sec_desc->sec_type = SEC_RELO;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
+ } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
+ str_has_pfx(name, BSS_SEC "."))) {
+ sec_desc->sec_type = SEC_BSS;
+ sec_desc->shdr = sh;
+ sec_desc->data = data;
} else {
- pr_debug("skip section(%d) %s\n", idx, name);
+ pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
+ (size_t)sh->sh_size);
}
}
if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
- pr_warn("Corrupted ELF file: index of strtab invalid\n");
+ pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
+
+ /* sort BPF programs by section name and in-section instruction offset
+ * for faster search
+ */
+ if (obj->nr_programs)
+ qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
+
return bpf_object__init_btf(obj, btf_data, btf_ext_data);
}
-static bool sym_is_extern(const GElf_Sym *sym)
+static bool sym_is_extern(const Elf64_Sym *sym)
{
- int bind = GELF_ST_BIND(sym->st_info);
+ int bind = ELF64_ST_BIND(sym->st_info);
/* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
return sym->st_shndx == SHN_UNDEF &&
(bind == STB_GLOBAL || bind == STB_WEAK) &&
- GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
+ ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
+}
+
+static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
+{
+ int bind = ELF64_ST_BIND(sym->st_info);
+ int type = ELF64_ST_TYPE(sym->st_info);
+
+ /* in .text section */
+ if (sym->st_shndx != text_shndx)
+ return false;
+
+ /* local function */
+ if (bind == STB_LOCAL && type == STT_SECTION)
+ return true;
+
+ /* global function */
+ return bind == STB_GLOBAL && type == STT_FUNC;
}
static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
{
const struct btf_type *t;
- const char *var_name;
+ const char *tname;
int i, n;
if (!btf)
return -ESRCH;
- n = btf__get_nr_types(btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
t = btf__type_by_id(btf, i);
- if (!btf_is_var(t))
+ if (!btf_is_var(t) && !btf_is_func(t))
continue;
- var_name = btf__name_by_offset(btf, t->name_off);
- if (strcmp(var_name, ext_name))
+ tname = btf__name_by_offset(btf, t->name_off);
+ if (strcmp(tname, ext_name))
continue;
- if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
+ if (btf_is_var(t) &&
+ btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
+ return -EINVAL;
+
+ if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
return -EINVAL;
return i;
@@ -2602,8 +3590,33 @@ static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
return -ENOENT;
}
-static enum extern_type find_extern_type(const struct btf *btf, int id,
- bool *is_signed)
+static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
+ const struct btf_var_secinfo *vs;
+ const struct btf_type *t;
+ int i, j, n;
+
+ if (!btf)
+ return -ESRCH;
+
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
+ t = btf__type_by_id(btf, i);
+
+ if (!btf_is_datasec(t))
+ continue;
+
+ vs = btf_var_secinfos(t);
+ for (j = 0; j < btf_vlen(t); j++, vs++) {
+ if (vs->type == ext_btf_id)
+ return i;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
+ bool *is_signed)
{
const struct btf_type *t;
const char *name;
@@ -2618,29 +3631,33 @@ static enum extern_type find_extern_type(const struct btf *btf, int id,
int enc = btf_int_encoding(t);
if (enc & BTF_INT_BOOL)
- return t->size == 1 ? EXT_BOOL : EXT_UNKNOWN;
+ return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
if (is_signed)
*is_signed = enc & BTF_INT_SIGNED;
if (t->size == 1)
- return EXT_CHAR;
+ return KCFG_CHAR;
if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
- return EXT_UNKNOWN;
- return EXT_INT;
+ return KCFG_UNKNOWN;
+ return KCFG_INT;
}
case BTF_KIND_ENUM:
if (t->size != 4)
- return EXT_UNKNOWN;
+ return KCFG_UNKNOWN;
if (strcmp(name, "libbpf_tristate"))
- return EXT_UNKNOWN;
- return EXT_TRISTATE;
+ return KCFG_UNKNOWN;
+ return KCFG_TRISTATE;
+ case BTF_KIND_ENUM64:
+ if (strcmp(name, "libbpf_tristate"))
+ return KCFG_UNKNOWN;
+ return KCFG_TRISTATE;
case BTF_KIND_ARRAY:
if (btf_array(t)->nelems == 0)
- return EXT_UNKNOWN;
- if (find_extern_type(btf, btf_array(t)->type, NULL) != EXT_CHAR)
- return EXT_UNKNOWN;
- return EXT_CHAR_ARR;
+ return KCFG_UNKNOWN;
+ if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
+ return KCFG_UNKNOWN;
+ return KCFG_CHAR_ARR;
default:
- return EXT_UNKNOWN;
+ return KCFG_UNKNOWN;
}
}
@@ -2649,51 +3666,115 @@ static int cmp_externs(const void *_a, const void *_b)
const struct extern_desc *a = _a;
const struct extern_desc *b = _b;
- /* descending order by alignment requirements */
- if (a->align != b->align)
- return a->align > b->align ? -1 : 1;
- /* ascending order by size, within same alignment class */
- if (a->sz != b->sz)
- return a->sz < b->sz ? -1 : 1;
+ if (a->type != b->type)
+ return a->type < b->type ? -1 : 1;
+
+ if (a->type == EXT_KCFG) {
+ /* descending order by alignment requirements */
+ if (a->kcfg.align != b->kcfg.align)
+ return a->kcfg.align > b->kcfg.align ? -1 : 1;
+ /* ascending order by size, within same alignment class */
+ if (a->kcfg.sz != b->kcfg.sz)
+ return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
+ }
+
/* resolve ties by name */
return strcmp(a->name, b->name);
}
+static int find_int_btf_id(const struct btf *btf)
+{
+ const struct btf_type *t;
+ int i, n;
+
+ n = btf__type_cnt(btf);
+ for (i = 1; i < n; i++) {
+ t = btf__type_by_id(btf, i);
+
+ if (btf_is_int(t) && btf_int_bits(t) == 32)
+ return i;
+ }
+
+ return 0;
+}
+
+static int add_dummy_ksym_var(struct btf *btf)
+{
+ int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
+ const struct btf_var_secinfo *vs;
+ const struct btf_type *sec;
+
+ if (!btf)
+ return 0;
+
+ sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
+ BTF_KIND_DATASEC);
+ if (sec_btf_id < 0)
+ return 0;
+
+ sec = btf__type_by_id(btf, sec_btf_id);
+ vs = btf_var_secinfos(sec);
+ for (i = 0; i < btf_vlen(sec); i++, vs++) {
+ const struct btf_type *vt;
+
+ vt = btf__type_by_id(btf, vs->type);
+ if (btf_is_func(vt))
+ break;
+ }
+
+ /* No func in ksyms sec. No need to add dummy var. */
+ if (i == btf_vlen(sec))
+ return 0;
+
+ int_btf_id = find_int_btf_id(btf);
+ dummy_var_btf_id = btf__add_var(btf,
+ "dummy_ksym",
+ BTF_VAR_GLOBAL_ALLOCATED,
+ int_btf_id);
+ if (dummy_var_btf_id < 0)
+ pr_warn("cannot create a dummy_ksym var\n");
+
+ return dummy_var_btf_id;
+}
+
static int bpf_object__collect_externs(struct bpf_object *obj)
{
+ struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
const struct btf_type *t;
struct extern_desc *ext;
- int i, n, off, btf_id;
- struct btf_type *sec;
- const char *ext_name;
+ int i, n, off, dummy_var_btf_id;
+ const char *ext_name, *sec_name;
Elf_Scn *scn;
- GElf_Shdr sh;
+ Elf64_Shdr *sh;
if (!obj->efile.symbols)
return 0;
- scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
- if (!scn)
- return -LIBBPF_ERRNO__FORMAT;
- if (gelf_getshdr(scn, &sh) != &sh)
+ scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
+ sh = elf_sec_hdr(obj, scn);
+ if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
return -LIBBPF_ERRNO__FORMAT;
- n = sh.sh_size / sh.sh_entsize;
+ dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
+ if (dummy_var_btf_id < 0)
+ return dummy_var_btf_id;
+
+ n = sh->sh_size / sh->sh_entsize;
pr_debug("looking for externs among %d symbols...\n", n);
+
for (i = 0; i < n; i++) {
- GElf_Sym sym;
+ Elf64_Sym *sym = elf_sym_by_idx(obj, i);
- if (!gelf_getsym(obj->efile.symbols, i, &sym))
+ if (!sym)
return -LIBBPF_ERRNO__FORMAT;
- if (!sym_is_extern(&sym))
+ if (!sym_is_extern(sym))
continue;
- ext_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name);
+ ext_name = elf_sym_str(obj, sym->st_name);
if (!ext_name || !ext_name[0])
continue;
ext = obj->externs;
- ext = reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
+ ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
if (!ext)
return -ENOMEM;
obj->externs = ext;
@@ -2710,23 +3791,50 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
t = btf__type_by_id(obj->btf, ext->btf_id);
ext->name = btf__name_by_offset(obj->btf, t->name_off);
ext->sym_idx = i;
- ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
- ext->sz = btf__resolve_size(obj->btf, t->type);
- if (ext->sz <= 0) {
- pr_warn("failed to resolve size of extern '%s': %d\n",
- ext_name, ext->sz);
- return ext->sz;
- }
- ext->align = btf__align_of(obj->btf, t->type);
- if (ext->align <= 0) {
- pr_warn("failed to determine alignment of extern '%s': %d\n",
- ext_name, ext->align);
- return -EINVAL;
+ ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
+
+ ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
+ if (ext->sec_btf_id <= 0) {
+ pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
+ ext_name, ext->btf_id, ext->sec_btf_id);
+ return ext->sec_btf_id;
}
- ext->type = find_extern_type(obj->btf, t->type,
- &ext->is_signed);
- if (ext->type == EXT_UNKNOWN) {
- pr_warn("extern '%s' type is unsupported\n", ext_name);
+ sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
+ sec_name = btf__name_by_offset(obj->btf, sec->name_off);
+
+ if (strcmp(sec_name, KCONFIG_SEC) == 0) {
+ if (btf_is_func(t)) {
+ pr_warn("extern function %s is unsupported under %s section\n",
+ ext->name, KCONFIG_SEC);
+ return -ENOTSUP;
+ }
+ kcfg_sec = sec;
+ ext->type = EXT_KCFG;
+ ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
+ if (ext->kcfg.sz <= 0) {
+ pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
+ ext_name, ext->kcfg.sz);
+ return ext->kcfg.sz;
+ }
+ ext->kcfg.align = btf__align_of(obj->btf, t->type);
+ if (ext->kcfg.align <= 0) {
+ pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
+ ext_name, ext->kcfg.align);
+ return -EINVAL;
+ }
+ ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
+ &ext->kcfg.is_signed);
+ if (ext->kcfg.type == KCFG_UNKNOWN) {
+ pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
+ return -ENOTSUP;
+ }
+ } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
+ ksym_sec = sec;
+ ext->type = EXT_KSYM;
+ skip_mods_and_typedefs(obj->btf, t->type,
+ &ext->ksym.type_id);
+ } else {
+ pr_warn("unrecognized extern section '%s'\n", sec_name);
return -ENOTSUP;
}
}
@@ -2735,70 +3843,113 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
if (!obj->nr_extern)
return 0;
- /* sort externs by (alignment, size, name) and calculate their offsets
- * within a map */
+ /* sort externs by type, for kcfg ones also by (align, size, name) */
qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
- off = 0;
- for (i = 0; i < obj->nr_extern; i++) {
- ext = &obj->externs[i];
- ext->data_off = roundup(off, ext->align);
- off = ext->data_off + ext->sz;
- pr_debug("extern #%d: symbol %d, off %u, name %s\n",
- i, ext->sym_idx, ext->data_off, ext->name);
- }
- btf_id = btf__find_by_name(obj->btf, KCONFIG_SEC);
- if (btf_id <= 0) {
- pr_warn("no BTF info found for '%s' datasec\n", KCONFIG_SEC);
- return -ESRCH;
- }
+ /* for .ksyms section, we need to turn all externs into allocated
+ * variables in BTF to pass kernel verification; we do this by
+ * pretending that each extern is a 8-byte variable
+ */
+ if (ksym_sec) {
+ /* find existing 4-byte integer type in BTF to use for fake
+ * extern variables in DATASEC
+ */
+ int int_btf_id = find_int_btf_id(obj->btf);
+ /* For extern function, a dummy_var added earlier
+ * will be used to replace the vs->type and
+ * its name string will be used to refill
+ * the missing param's name.
+ */
+ const struct btf_type *dummy_var;
- sec = (struct btf_type *)btf__type_by_id(obj->btf, btf_id);
- sec->size = off;
- n = btf_vlen(sec);
- for (i = 0; i < n; i++) {
- struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
-
- t = btf__type_by_id(obj->btf, vs->type);
- ext_name = btf__name_by_offset(obj->btf, t->name_off);
- ext = find_extern_by_name(obj, ext_name);
- if (!ext) {
- pr_warn("failed to find extern definition for BTF var '%s'\n",
- ext_name);
- return -ESRCH;
+ dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
+ for (i = 0; i < obj->nr_extern; i++) {
+ ext = &obj->externs[i];
+ if (ext->type != EXT_KSYM)
+ continue;
+ pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
+ i, ext->sym_idx, ext->name);
}
- vs->offset = ext->data_off;
- btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
- }
- return 0;
-}
+ sec = ksym_sec;
+ n = btf_vlen(sec);
+ for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
+ struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
+ struct btf_type *vt;
-static struct bpf_program *
-bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
-{
- struct bpf_program *prog;
- size_t i;
+ vt = (void *)btf__type_by_id(obj->btf, vs->type);
+ ext_name = btf__name_by_offset(obj->btf, vt->name_off);
+ ext = find_extern_by_name(obj, ext_name);
+ if (!ext) {
+ pr_warn("failed to find extern definition for BTF %s '%s'\n",
+ btf_kind_str(vt), ext_name);
+ return -ESRCH;
+ }
+ if (btf_is_func(vt)) {
+ const struct btf_type *func_proto;
+ struct btf_param *param;
+ int j;
+
+ func_proto = btf__type_by_id(obj->btf,
+ vt->type);
+ param = btf_params(func_proto);
+ /* Reuse the dummy_var string if the
+ * func proto does not have param name.
+ */
+ for (j = 0; j < btf_vlen(func_proto); j++)
+ if (param[j].type && !param[j].name_off)
+ param[j].name_off =
+ dummy_var->name_off;
+ vs->type = dummy_var_btf_id;
+ vt->info &= ~0xffff;
+ vt->info |= BTF_FUNC_GLOBAL;
+ } else {
+ btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
+ vt->type = int_btf_id;
+ }
+ vs->offset = off;
+ vs->size = sizeof(int);
+ }
+ sec->size = off;
+ }
- for (i = 0; i < obj->nr_programs; i++) {
- prog = &obj->programs[i];
- if (prog->idx == idx)
- return prog;
+ if (kcfg_sec) {
+ sec = kcfg_sec;
+ /* for kcfg externs calculate their offsets within a .kconfig map */
+ off = 0;
+ for (i = 0; i < obj->nr_extern; i++) {
+ ext = &obj->externs[i];
+ if (ext->type != EXT_KCFG)
+ continue;
+
+ ext->kcfg.data_off = roundup(off, ext->kcfg.align);
+ off = ext->kcfg.data_off + ext->kcfg.sz;
+ pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
+ i, ext->sym_idx, ext->kcfg.data_off, ext->name);
+ }
+ sec->size = off;
+ n = btf_vlen(sec);
+ for (i = 0; i < n; i++) {
+ struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
+
+ t = btf__type_by_id(obj->btf, vs->type);
+ ext_name = btf__name_by_offset(obj->btf, t->name_off);
+ ext = find_extern_by_name(obj, ext_name);
+ if (!ext) {
+ pr_warn("failed to find extern definition for BTF var '%s'\n",
+ ext_name);
+ return -ESRCH;
+ }
+ btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
+ vs->offset = ext->kcfg.data_off;
+ }
}
- return NULL;
+ return 0;
}
-struct bpf_program *
-bpf_object__find_program_by_title(const struct bpf_object *obj,
- const char *title)
+static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
{
- struct bpf_program *pos;
-
- bpf_object__for_each_program(pos, obj) {
- if (pos->section_name && !strcmp(pos->section_name, title))
- return pos;
- }
- return NULL;
+ return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
}
struct bpf_program *
@@ -2808,119 +3959,151 @@ bpf_object__find_program_by_name(const struct bpf_object *obj,
struct bpf_program *prog;
bpf_object__for_each_program(prog, obj) {
+ if (prog_is_subprog(obj, prog))
+ continue;
if (!strcmp(prog->name, name))
return prog;
}
- return NULL;
+ return errno = ENOENT, NULL;
}
static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
int shndx)
{
- return shndx == obj->efile.data_shndx ||
- shndx == obj->efile.bss_shndx ||
- shndx == obj->efile.rodata_shndx;
+ switch (obj->efile.secs[shndx].sec_type) {
+ case SEC_BSS:
+ case SEC_DATA:
+ case SEC_RODATA:
+ return true;
+ default:
+ return false;
+ }
}
static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
int shndx)
{
- return shndx == obj->efile.maps_shndx ||
- shndx == obj->efile.btf_maps_shndx;
+ return shndx == obj->efile.btf_maps_shndx;
}
static enum libbpf_map_type
bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
{
- if (shndx == obj->efile.data_shndx)
- return LIBBPF_MAP_DATA;
- else if (shndx == obj->efile.bss_shndx)
+ if (shndx == obj->efile.symbols_shndx)
+ return LIBBPF_MAP_KCONFIG;
+
+ switch (obj->efile.secs[shndx].sec_type) {
+ case SEC_BSS:
return LIBBPF_MAP_BSS;
- else if (shndx == obj->efile.rodata_shndx)
+ case SEC_DATA:
+ return LIBBPF_MAP_DATA;
+ case SEC_RODATA:
return LIBBPF_MAP_RODATA;
- else if (shndx == obj->efile.symbols_shndx)
- return LIBBPF_MAP_KCONFIG;
- else
+ default:
return LIBBPF_MAP_UNSPEC;
+ }
}
static int bpf_program__record_reloc(struct bpf_program *prog,
struct reloc_desc *reloc_desc,
- __u32 insn_idx, const char *name,
- const GElf_Sym *sym, const GElf_Rel *rel)
+ __u32 insn_idx, const char *sym_name,
+ const Elf64_Sym *sym, const Elf64_Rel *rel)
{
struct bpf_insn *insn = &prog->insns[insn_idx];
size_t map_idx, nr_maps = prog->obj->nr_maps;
struct bpf_object *obj = prog->obj;
__u32 shdr_idx = sym->st_shndx;
enum libbpf_map_type type;
+ const char *sym_sec_name;
struct bpf_map *map;
+ if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
+ pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
+ prog->name, sym_name, insn_idx, insn->code);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ if (sym_is_extern(sym)) {
+ int sym_idx = ELF64_R_SYM(rel->r_info);
+ int i, n = obj->nr_extern;
+ struct extern_desc *ext;
+
+ for (i = 0; i < n; i++) {
+ ext = &obj->externs[i];
+ if (ext->sym_idx == sym_idx)
+ break;
+ }
+ if (i >= n) {
+ pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
+ prog->name, sym_name, sym_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
+ prog->name, i, ext->name, ext->sym_idx, insn_idx);
+ if (insn->code == (BPF_JMP | BPF_CALL))
+ reloc_desc->type = RELO_EXTERN_CALL;
+ else
+ reloc_desc->type = RELO_EXTERN_LD64;
+ reloc_desc->insn_idx = insn_idx;
+ reloc_desc->ext_idx = i;
+ return 0;
+ }
+
/* sub-program call relocation */
- if (insn->code == (BPF_JMP | BPF_CALL)) {
+ if (is_call_insn(insn)) {
if (insn->src_reg != BPF_PSEUDO_CALL) {
- pr_warn("incorrect bpf_call opcode\n");
+ pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
return -LIBBPF_ERRNO__RELOC;
}
/* text_shndx can be 0, if no default "main" program exists */
if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
- pr_warn("bad call relo against section %u\n", shdr_idx);
+ sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
+ pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
+ prog->name, sym_name, sym_sec_name);
return -LIBBPF_ERRNO__RELOC;
}
- if (sym->st_value % 8) {
- pr_warn("bad call relo offset: %zu\n",
- (size_t)sym->st_value);
+ if (sym->st_value % BPF_INSN_SZ) {
+ pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
+ prog->name, sym_name, (size_t)sym->st_value);
return -LIBBPF_ERRNO__RELOC;
}
reloc_desc->type = RELO_CALL;
reloc_desc->insn_idx = insn_idx;
reloc_desc->sym_off = sym->st_value;
- obj->has_pseudo_calls = true;
return 0;
}
- if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
- pr_warn("invalid relo for insns[%d].code 0x%x\n",
- insn_idx, insn->code);
+ if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
+ pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
+ prog->name, sym_name, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
- if (sym_is_extern(sym)) {
- int sym_idx = GELF_R_SYM(rel->r_info);
- int i, n = obj->nr_extern;
- struct extern_desc *ext;
-
- for (i = 0; i < n; i++) {
- ext = &obj->externs[i];
- if (ext->sym_idx == sym_idx)
- break;
- }
- if (i >= n) {
- pr_warn("extern relo failed to find extern for sym %d\n",
- sym_idx);
+ /* loading subprog addresses */
+ if (sym_is_subprog(sym, obj->efile.text_shndx)) {
+ /* global_func: sym->st_value = offset in the section, insn->imm = 0.
+ * local_func: sym->st_value = 0, insn->imm = offset in the section.
+ */
+ if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
+ pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
+ prog->name, sym_name, (size_t)sym->st_value, insn->imm);
return -LIBBPF_ERRNO__RELOC;
}
- pr_debug("found extern #%d '%s' (sym %d, off %u) for insn %u\n",
- i, ext->name, ext->sym_idx, ext->data_off, insn_idx);
- reloc_desc->type = RELO_EXTERN;
+
+ reloc_desc->type = RELO_SUBPROG_ADDR;
reloc_desc->insn_idx = insn_idx;
- reloc_desc->sym_off = ext->data_off;
+ reloc_desc->sym_off = sym->st_value;
return 0;
}
- if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
- pr_warn("invalid relo for \'%s\' in special section 0x%x; forgot to initialize global var?..\n",
- name, shdr_idx);
- return -LIBBPF_ERRNO__RELOC;
- }
-
type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
+ sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
/* generic map reference relocation */
if (type == LIBBPF_MAP_UNSPEC) {
if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
- pr_warn("bad map relo against section %u\n",
- shdr_idx);
+ pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
+ prog->name, sym_name, sym_sec_name);
return -LIBBPF_ERRNO__RELOC;
}
for (map_idx = 0; map_idx < nr_maps; map_idx++) {
@@ -2929,14 +4112,14 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
map->sec_idx != sym->st_shndx ||
map->sec_offset != sym->st_value)
continue;
- pr_debug("found map %zd (%s, sec %d, off %zu) for insn %u\n",
- map_idx, map->name, map->sec_idx,
+ pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
+ prog->name, map_idx, map->name, map->sec_idx,
map->sec_offset, insn_idx);
break;
}
if (map_idx >= nr_maps) {
- pr_warn("map relo failed to find map for sec %u, off %zu\n",
- shdr_idx, (size_t)sym->st_value);
+ pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
+ prog->name, sym_sec_name, (size_t)sym->st_value);
return -LIBBPF_ERRNO__RELOC;
}
reloc_desc->type = RELO_LD64;
@@ -2948,21 +4131,22 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
/* global data map relocation */
if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
- pr_warn("bad data relo against section %u\n", shdr_idx);
+ pr_warn("prog '%s': bad data relo against section '%s'\n",
+ prog->name, sym_sec_name);
return -LIBBPF_ERRNO__RELOC;
}
for (map_idx = 0; map_idx < nr_maps; map_idx++) {
map = &obj->maps[map_idx];
- if (map->libbpf_type != type)
+ if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
continue;
- pr_debug("found data map %zd (%s, sec %d, off %zu) for insn %u\n",
- map_idx, map->name, map->sec_idx, map->sec_offset,
- insn_idx);
+ pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
+ prog->name, map_idx, map->name, map->sec_idx,
+ map->sec_offset, insn_idx);
break;
}
if (map_idx >= nr_maps) {
- pr_warn("data relo failed to find map for sec %u\n",
- shdr_idx);
+ pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
+ prog->name, sym_sec_name);
return -LIBBPF_ERRNO__RELOC;
}
@@ -2973,108 +4157,240 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
return 0;
}
+static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
+{
+ return insn_idx >= prog->sec_insn_off &&
+ insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
+}
+
+static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
+ size_t sec_idx, size_t insn_idx)
+{
+ int l = 0, r = obj->nr_programs - 1, m;
+ struct bpf_program *prog;
+
+ if (!obj->nr_programs)
+ return NULL;
+
+ while (l < r) {
+ m = l + (r - l + 1) / 2;
+ prog = &obj->programs[m];
+
+ if (prog->sec_idx < sec_idx ||
+ (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
+ l = m;
+ else
+ r = m - 1;
+ }
+ /* matching program could be at index l, but it still might be the
+ * wrong one, so we need to double check conditions for the last time
+ */
+ prog = &obj->programs[l];
+ if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
+ return prog;
+ return NULL;
+}
+
static int
-bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
- Elf_Data *data, struct bpf_object *obj)
+bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
{
- Elf_Data *symbols = obj->efile.symbols;
+ const char *relo_sec_name, *sec_name;
+ size_t sec_idx = shdr->sh_info, sym_idx;
+ struct bpf_program *prog;
+ struct reloc_desc *relos;
int err, i, nrels;
+ const char *sym_name;
+ __u32 insn_idx;
+ Elf_Scn *scn;
+ Elf_Data *scn_data;
+ Elf64_Sym *sym;
+ Elf64_Rel *rel;
- pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
- nrels = shdr->sh_size / shdr->sh_entsize;
+ if (sec_idx >= obj->efile.sec_cnt)
+ return -EINVAL;
- prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
- if (!prog->reloc_desc) {
- pr_warn("failed to alloc memory in relocation\n");
- return -ENOMEM;
- }
- prog->nr_reloc = nrels;
+ scn = elf_sec_by_idx(obj, sec_idx);
+ scn_data = elf_sec_data(obj, scn);
+
+ relo_sec_name = elf_sec_str(obj, shdr->sh_name);
+ sec_name = elf_sec_name(obj, scn);
+ if (!relo_sec_name || !sec_name)
+ return -EINVAL;
+
+ pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
+ relo_sec_name, sec_idx, sec_name);
+ nrels = shdr->sh_size / shdr->sh_entsize;
for (i = 0; i < nrels; i++) {
- const char *name;
- __u32 insn_idx;
- GElf_Sym sym;
- GElf_Rel rel;
+ rel = elf_rel_by_idx(data, i);
+ if (!rel) {
+ pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- if (!gelf_getrel(data, i, &rel)) {
- pr_warn("relocation: failed to get %d reloc\n", i);
+ sym_idx = ELF64_R_SYM(rel->r_info);
+ sym = elf_sym_by_idx(obj, sym_idx);
+ if (!sym) {
+ pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
+ relo_sec_name, sym_idx, i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
- pr_warn("relocation: symbol %"PRIx64" not found\n",
- GELF_R_SYM(rel.r_info));
+
+ if (sym->st_shndx >= obj->efile.sec_cnt) {
+ pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
+ relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (rel.r_offset % sizeof(struct bpf_insn))
+
+ if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
+ pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
+ relo_sec_name, (size_t)rel->r_offset, i);
return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ insn_idx = rel->r_offset / BPF_INSN_SZ;
+ /* relocations against static functions are recorded as
+ * relocations against the section that contains a function;
+ * in such case, symbol will be STT_SECTION and sym.st_name
+ * will point to empty string (0), so fetch section name
+ * instead
+ */
+ if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
+ sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
+ else
+ sym_name = elf_sym_str(obj, sym->st_name);
+ sym_name = sym_name ?: "<?";
- insn_idx = rel.r_offset / sizeof(struct bpf_insn);
- name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name) ? : "<?>";
+ pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
+ relo_sec_name, i, insn_idx, sym_name);
- pr_debug("relo for shdr %u, symb %zu, value %zu, type %d, bind %d, name %d (\'%s\'), insn %u\n",
- (__u32)sym.st_shndx, (size_t)GELF_R_SYM(rel.r_info),
- (size_t)sym.st_value, GELF_ST_TYPE(sym.st_info),
- GELF_ST_BIND(sym.st_info), sym.st_name, name,
- insn_idx);
+ prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
+ if (!prog) {
+ pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
+ relo_sec_name, i, sec_name, insn_idx);
+ continue;
+ }
- err = bpf_program__record_reloc(prog, &prog->reloc_desc[i],
- insn_idx, name, &sym, &rel);
+ relos = libbpf_reallocarray(prog->reloc_desc,
+ prog->nr_reloc + 1, sizeof(*relos));
+ if (!relos)
+ return -ENOMEM;
+ prog->reloc_desc = relos;
+
+ /* adjust insn_idx to local BPF program frame of reference */
+ insn_idx -= prog->sec_insn_off;
+ err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
+ insn_idx, sym_name, sym, rel);
if (err)
return err;
+
+ prog->nr_reloc++;
}
return 0;
}
-static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
+static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
{
- struct bpf_map_def *def = &map->def;
- __u32 key_type_id = 0, value_type_id = 0;
- int ret;
+ int id;
+
+ if (!obj->btf)
+ return -ENOENT;
/* if it's BTF-defined map, we don't need to search for type IDs.
* For struct_ops map, it does not need btf_key_type_id and
* btf_value_type_id.
*/
- if (map->sec_idx == obj->efile.btf_maps_shndx ||
- bpf_map__is_struct_ops(map))
+ if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
return 0;
- if (!bpf_map__is_internal(map)) {
- ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
- def->value_size, &key_type_id,
- &value_type_id);
- } else {
- /*
- * LLVM annotates global data differently in BTF, that is,
- * only as '.data', '.bss' or '.rodata'.
- */
- ret = btf__find_by_name(obj->btf,
- libbpf_type_to_btf_name[map->libbpf_type]);
+ /*
+ * LLVM annotates global data differently in BTF, that is,
+ * only as '.data', '.bss' or '.rodata'.
+ */
+ if (!bpf_map__is_internal(map))
+ return -ENOENT;
+
+ id = btf__find_by_name(obj->btf, map->real_name);
+ if (id < 0)
+ return id;
+
+ map->btf_key_type_id = 0;
+ map->btf_value_type_id = id;
+ return 0;
+}
+
+static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
+{
+ char file[PATH_MAX], buff[4096];
+ FILE *fp;
+ __u32 val;
+ int err;
+
+ snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
+ memset(info, 0, sizeof(*info));
+
+ fp = fopen(file, "r");
+ if (!fp) {
+ err = -errno;
+ pr_warn("failed to open %s: %d. No procfs support?\n", file,
+ err);
+ return err;
}
- if (ret < 0)
- return ret;
- map->btf_key_type_id = key_type_id;
- map->btf_value_type_id = bpf_map__is_internal(map) ?
- ret : value_type_id;
+ while (fgets(buff, sizeof(buff), fp)) {
+ if (sscanf(buff, "map_type:\t%u", &val) == 1)
+ info->type = val;
+ else if (sscanf(buff, "key_size:\t%u", &val) == 1)
+ info->key_size = val;
+ else if (sscanf(buff, "value_size:\t%u", &val) == 1)
+ info->value_size = val;
+ else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
+ info->max_entries = val;
+ else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
+ info->map_flags = val;
+ }
+
+ fclose(fp);
+
+ return 0;
+}
+
+bool bpf_map__autocreate(const struct bpf_map *map)
+{
+ return map->autocreate;
+}
+
+int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
+{
+ if (map->obj->loaded)
+ return libbpf_err(-EBUSY);
+
+ map->autocreate = autocreate;
return 0;
}
int bpf_map__reuse_fd(struct bpf_map *map, int fd)
{
- struct bpf_map_info info = {};
- __u32 len = sizeof(info);
+ struct bpf_map_info info;
+ __u32 len = sizeof(info), name_len;
int new_fd, err;
char *new_name;
- err = bpf_obj_get_info_by_fd(fd, &info, &len);
+ memset(&info, 0, len);
+ err = bpf_map_get_info_by_fd(fd, &info, &len);
+ if (err && errno == EINVAL)
+ err = bpf_get_map_info_from_fdinfo(fd, &info);
if (err)
- return err;
+ return libbpf_err(err);
+
+ name_len = strlen(info.name);
+ if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
+ new_name = strdup(map->name);
+ else
+ new_name = strdup(info.name);
- new_name = strdup(info.name);
if (!new_name)
- return -errno;
+ return libbpf_err(-errno);
new_fd = open("/", O_RDONLY | O_CLOEXEC);
if (new_fd < 0) {
@@ -3105,6 +4421,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd)
map->btf_key_type_id = info.btf_key_type_id;
map->btf_value_type_id = info.btf_value_type_id;
map->reused = true;
+ map->map_extra = info.map_extra;
return 0;
@@ -3112,68 +4429,102 @@ err_close_new_fd:
close(new_fd);
err_free_new_name:
free(new_name);
- return err;
+ return libbpf_err(err);
}
-int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
+__u32 bpf_map__max_entries(const struct bpf_map *map)
{
- if (!map || !max_entries)
- return -EINVAL;
+ return map->def.max_entries;
+}
- /* If map already created, its attributes can't be changed. */
- if (map->fd >= 0)
- return -EBUSY;
+struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
+{
+ if (!bpf_map_type__is_map_in_map(map->def.type))
+ return errno = EINVAL, NULL;
+
+ return map->inner_map;
+}
+
+int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
+{
+ if (map->obj->loaded)
+ return libbpf_err(-EBUSY);
map->def.max_entries = max_entries;
+ /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
+ if (map_is_ringbuf(map))
+ map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
+
return 0;
}
static int
-bpf_object__probe_name(struct bpf_object *obj)
+bpf_object__probe_loading(struct bpf_object *obj)
{
- struct bpf_load_program_attr attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- int ret;
+ int ret, insn_cnt = ARRAY_SIZE(insns);
- /* make sure basic loading works */
+ if (obj->gen_loader)
+ return 0;
- memset(&attr, 0, sizeof(attr));
- attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- attr.insns = insns;
- attr.insns_cnt = ARRAY_SIZE(insns);
- attr.license = "GPL";
+ ret = bump_rlimit_memlock();
+ if (ret)
+ pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
- ret = bpf_load_program_xattr(&attr, NULL, 0);
+ /* make sure basic loading works */
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
+ if (ret < 0)
+ ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
if (ret < 0) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
- __func__, cp, errno);
- return -errno;
+ ret = errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
+ "program. Make sure your kernel supports BPF "
+ "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
+ "set to big enough value.\n", __func__, cp, ret);
+ return -ret;
}
close(ret);
- /* now try the same program, but with the name */
+ return 0;
+}
- attr.name = "test";
- ret = bpf_load_program_xattr(&attr, NULL, 0);
- if (ret >= 0) {
- obj->caps.name = 1;
- close(ret);
- }
+static int probe_fd(int fd)
+{
+ if (fd >= 0)
+ close(fd);
+ return fd >= 0;
+}
- return 0;
+static int probe_kern_prog_name(void)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, prog_name);
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ union bpf_attr attr;
+ int ret;
+
+ memset(&attr, 0, attr_sz);
+ attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ attr.license = ptr_to_u64("GPL");
+ attr.insns = ptr_to_u64(insns);
+ attr.insn_cnt = (__u32)ARRAY_SIZE(insns);
+ libbpf_strlcpy(attr.prog_name, "libbpf_nametest", sizeof(attr.prog_name));
+
+ /* make sure loading with name works */
+ ret = sys_bpf_prog_load(&attr, attr_sz, PROG_LOAD_ATTEMPTS);
+ return probe_fd(ret);
}
-static int
-bpf_object__probe_global_data(struct bpf_object *obj)
+static int probe_kern_global_data(void)
{
- struct bpf_load_program_attr prg_attr;
- struct bpf_create_map_attr map_attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
@@ -3181,41 +4532,37 @@ bpf_object__probe_global_data(struct bpf_object *obj)
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- int ret, map;
-
- memset(&map_attr, 0, sizeof(map_attr));
- map_attr.map_type = BPF_MAP_TYPE_ARRAY;
- map_attr.key_size = sizeof(int);
- map_attr.value_size = 32;
- map_attr.max_entries = 1;
+ int ret, map, insn_cnt = ARRAY_SIZE(insns);
- map = bpf_create_map_xattr(&map_attr);
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_global", sizeof(int), 32, 1, NULL);
if (map < 0) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
- __func__, cp, errno);
- return -errno;
+ __func__, cp, -ret);
+ return ret;
}
insns[0].imm = map;
- memset(&prg_attr, 0, sizeof(prg_attr));
- prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
- prg_attr.insns = insns;
- prg_attr.insns_cnt = ARRAY_SIZE(insns);
- prg_attr.license = "GPL";
+ ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
+ close(map);
+ return probe_fd(ret);
+}
- ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
- if (ret >= 0) {
- obj->caps.global_data = 1;
- close(ret);
- }
+static int probe_kern_btf(void)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
- close(map);
- return 0;
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
}
-static int bpf_object__probe_btf_func(struct bpf_object *obj)
+static int probe_kern_btf_func(void)
{
static const char strs[] = "\0int\0x\0a";
/* void x(int a) {} */
@@ -3228,20 +4575,12 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj)
/* FUNC x */ /* [3] */
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
};
- int btf_fd;
-
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
- if (btf_fd >= 0) {
- obj->caps.btf_func = 1;
- close(btf_fd);
- return 1;
- }
- return 0;
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
}
-static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
+static int probe_kern_btf_func_global(void)
{
static const char strs[] = "\0int\0x\0a";
/* static void x(int a) {} */
@@ -3254,20 +4593,12 @@ static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
/* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
};
- int btf_fd;
-
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
- if (btf_fd >= 0) {
- obj->caps.btf_func_global = 1;
- close(btf_fd);
- return 1;
- }
- return 0;
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
}
-static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
+static int probe_kern_btf_datasec(void)
{
static const char strs[] = "\0x\0.data";
/* static int a; */
@@ -3281,73 +4612,324 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
};
- int btf_fd;
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
- if (btf_fd >= 0) {
- obj->caps.btf_datasec = 1;
- close(btf_fd);
- return 1;
- }
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
+}
- return 0;
+static int probe_kern_btf_float(void)
+{
+ static const char strs[] = "\0float";
+ __u32 types[] = {
+ /* float */
+ BTF_TYPE_FLOAT_ENC(1, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
}
-static int bpf_object__probe_array_mmap(struct bpf_object *obj)
+static int probe_kern_btf_decl_tag(void)
{
- struct bpf_create_map_attr attr = {
- .map_type = BPF_MAP_TYPE_ARRAY,
- .map_flags = BPF_F_MMAPABLE,
- .key_size = sizeof(int),
- .value_size = sizeof(int),
- .max_entries = 1,
+ static const char strs[] = "\0tag";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* VAR x */ /* [2] */
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
+ BTF_VAR_STATIC,
+ /* attr */
+ BTF_TYPE_DECL_TAG_ENC(1, 2, -1),
};
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
+}
+
+static int probe_kern_btf_type_tag(void)
+{
+ static const char strs[] = "\0tag";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ /* attr */
+ BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
+ /* ptr */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
+}
+
+static int probe_kern_array_mmap(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts, .map_flags = BPF_F_MMAPABLE);
int fd;
- fd = bpf_create_map_xattr(&attr);
- if (fd >= 0) {
- obj->caps.array_mmap = 1;
- close(fd);
- return 1;
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_mmap", sizeof(int), sizeof(int), 1, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_kern_exp_attach_type(void)
+{
+ LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int fd, insn_cnt = ARRAY_SIZE(insns);
+
+ /* use any valid combination of program type and (optional)
+ * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
+ * to see if kernel supports expected_attach_type field for
+ * BPF_PROG_LOAD command
+ */
+ fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
+ return probe_fd(fd);
+}
+
+static int probe_kern_probe_read_kernel(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
+ BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
+ BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
+ BPF_EXIT_INSN(),
+ };
+ int fd, insn_cnt = ARRAY_SIZE(insns);
+
+ fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
+ return probe_fd(fd);
+}
+
+static int probe_prog_bind_map(void)
+{
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
+
+ map = bpf_map_create(BPF_MAP_TYPE_ARRAY, "libbpf_det_bind", sizeof(int), 32, 1, NULL);
+ if (map < 0) {
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, -ret);
+ return ret;
}
- return 0;
+ prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
+ if (prog < 0) {
+ close(map);
+ return 0;
+ }
+
+ ret = bpf_prog_bind_map(prog, map, NULL);
+
+ close(map);
+ close(prog);
+
+ return ret >= 0;
}
-static int
-bpf_object__probe_caps(struct bpf_object *obj)
-{
- int (*probe_fn[])(struct bpf_object *obj) = {
- bpf_object__probe_name,
- bpf_object__probe_global_data,
- bpf_object__probe_btf_func,
- bpf_object__probe_btf_func_global,
- bpf_object__probe_btf_datasec,
- bpf_object__probe_array_mmap,
+static int probe_module_btf(void)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+ struct bpf_btf_info info;
+ __u32 len = sizeof(info);
+ char name[16];
+ int fd, err;
+
+ fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
+ if (fd < 0)
+ return 0; /* BTF not supported at all */
+
+ memset(&info, 0, sizeof(info));
+ info.name = ptr_to_u64(name);
+ info.name_len = sizeof(name);
+
+ /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
+ * kernel's module BTF support coincides with support for
+ * name/name_len fields in struct bpf_btf_info.
+ */
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
+ close(fd);
+ return !err;
+}
+
+static int probe_perf_link(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int prog_fd, link_fd, err;
+
+ prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
+ insns, ARRAY_SIZE(insns), NULL);
+ if (prog_fd < 0)
+ return -errno;
+
+ /* use invalid perf_event FD to get EBADF, if link is supported;
+ * otherwise EINVAL should be returned
+ */
+ link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL);
+ err = -errno; /* close() can clobber errno */
+
+ if (link_fd >= 0)
+ close(link_fd);
+ close(prog_fd);
+
+ return link_fd < 0 && err == -EBADF;
+}
+
+static int probe_kern_bpf_cookie(void)
+{
+ struct bpf_insn insns[] = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_attach_cookie),
+ BPF_EXIT_INSN(),
+ };
+ int ret, insn_cnt = ARRAY_SIZE(insns);
+
+ ret = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
+ return probe_fd(ret);
+}
+
+static int probe_kern_btf_enum64(void)
+{
+ static const char strs[] = "\0enum64";
+ __u32 types[] = {
+ BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_ENUM64, 0, 0), 8),
};
- int i, ret;
- for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
- ret = probe_fn[i](obj);
- if (ret < 0)
- pr_debug("Probe #%d failed with %d.\n", i, ret);
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
+}
+
+static int probe_kern_syscall_wrapper(void);
+
+enum kern_feature_result {
+ FEAT_UNKNOWN = 0,
+ FEAT_SUPPORTED = 1,
+ FEAT_MISSING = 2,
+};
+
+typedef int (*feature_probe_fn)(void);
+
+static struct kern_feature_desc {
+ const char *desc;
+ feature_probe_fn probe;
+ enum kern_feature_result res;
+} feature_probes[__FEAT_CNT] = {
+ [FEAT_PROG_NAME] = {
+ "BPF program name", probe_kern_prog_name,
+ },
+ [FEAT_GLOBAL_DATA] = {
+ "global variables", probe_kern_global_data,
+ },
+ [FEAT_BTF] = {
+ "minimal BTF", probe_kern_btf,
+ },
+ [FEAT_BTF_FUNC] = {
+ "BTF functions", probe_kern_btf_func,
+ },
+ [FEAT_BTF_GLOBAL_FUNC] = {
+ "BTF global function", probe_kern_btf_func_global,
+ },
+ [FEAT_BTF_DATASEC] = {
+ "BTF data section and variable", probe_kern_btf_datasec,
+ },
+ [FEAT_ARRAY_MMAP] = {
+ "ARRAY map mmap()", probe_kern_array_mmap,
+ },
+ [FEAT_EXP_ATTACH_TYPE] = {
+ "BPF_PROG_LOAD expected_attach_type attribute",
+ probe_kern_exp_attach_type,
+ },
+ [FEAT_PROBE_READ_KERN] = {
+ "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
+ },
+ [FEAT_PROG_BIND_MAP] = {
+ "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
+ },
+ [FEAT_MODULE_BTF] = {
+ "module BTF support", probe_module_btf,
+ },
+ [FEAT_BTF_FLOAT] = {
+ "BTF_KIND_FLOAT support", probe_kern_btf_float,
+ },
+ [FEAT_PERF_LINK] = {
+ "BPF perf link support", probe_perf_link,
+ },
+ [FEAT_BTF_DECL_TAG] = {
+ "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
+ },
+ [FEAT_BTF_TYPE_TAG] = {
+ "BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
+ },
+ [FEAT_MEMCG_ACCOUNT] = {
+ "memcg-based memory accounting", probe_memcg_account,
+ },
+ [FEAT_BPF_COOKIE] = {
+ "BPF cookie support", probe_kern_bpf_cookie,
+ },
+ [FEAT_BTF_ENUM64] = {
+ "BTF_KIND_ENUM64 support", probe_kern_btf_enum64,
+ },
+ [FEAT_SYSCALL_WRAPPER] = {
+ "Kernel using syscall wrapper", probe_kern_syscall_wrapper,
+ },
+};
+
+bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
+{
+ struct kern_feature_desc *feat = &feature_probes[feat_id];
+ int ret;
+
+ if (obj && obj->gen_loader)
+ /* To generate loader program assume the latest kernel
+ * to avoid doing extra prog_load, map_create syscalls.
+ */
+ return true;
+
+ if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
+ ret = feat->probe();
+ if (ret > 0) {
+ WRITE_ONCE(feat->res, FEAT_SUPPORTED);
+ } else if (ret == 0) {
+ WRITE_ONCE(feat->res, FEAT_MISSING);
+ } else {
+ pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
+ WRITE_ONCE(feat->res, FEAT_MISSING);
+ }
}
- return 0;
+ return READ_ONCE(feat->res) == FEAT_SUPPORTED;
}
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
{
- struct bpf_map_info map_info = {};
+ struct bpf_map_info map_info;
char msg[STRERR_BUFSIZE];
- __u32 map_info_len;
-
- map_info_len = sizeof(map_info);
+ __u32 map_info_len = sizeof(map_info);
+ int err;
- if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
- pr_warn("failed to get map info for map FD %d: %s\n",
- map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
+ memset(&map_info, 0, map_info_len);
+ err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
+ if (err && errno == EINVAL)
+ err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
+ if (err) {
+ pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
+ libbpf_strerror_r(errno, msg, sizeof(msg)));
return false;
}
@@ -3355,7 +4937,8 @@ static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
map_info.key_size == map->def.key_size &&
map_info.value_size == map->def.value_size &&
map_info.max_entries == map->def.max_entries &&
- map_info.map_flags == map->def.map_flags);
+ map_info.map_flags == map->def.map_flags &&
+ map_info.map_extra == map->map_extra);
}
static int
@@ -3387,10 +4970,10 @@ bpf_object__reuse_map(struct bpf_map *map)
}
err = bpf_map__reuse_fd(map, pin_fd);
- if (err) {
- close(pin_fd);
+ close(pin_fd);
+ if (err)
return err;
- }
+
map->pinned = true;
pr_debug("reused pinned map at '%s'\n", map->pin_path);
@@ -3404,10 +4987,13 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
char *cp, errmsg[STRERR_BUFSIZE];
int err, zero = 0;
- /* kernel already zero-initializes .bss map. */
- if (map_type == LIBBPF_MAP_BSS)
+ if (obj->gen_loader) {
+ bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
+ map->mmaped, map->def.value_size);
+ if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
+ bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
return 0;
-
+ }
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
if (err) {
err = -errno;
@@ -3431,376 +5017,325 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
return 0;
}
-static int
-bpf_object__create_maps(struct bpf_object *obj)
+static void bpf_map__destroy(struct bpf_map *map);
+
+static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
- struct bpf_create_map_attr create_attr = {};
- int nr_cpus = 0;
- unsigned int i;
- int err;
+ LIBBPF_OPTS(bpf_map_create_opts, create_attr);
+ struct bpf_map_def *def = &map->def;
+ const char *map_name = NULL;
+ int err = 0;
- for (i = 0; i < obj->nr_maps; i++) {
- struct bpf_map *map = &obj->maps[i];
- struct bpf_map_def *def = &map->def;
- char *cp, errmsg[STRERR_BUFSIZE];
- int *pfd = &map->fd;
+ if (kernel_supports(obj, FEAT_PROG_NAME))
+ map_name = map->name;
+ create_attr.map_ifindex = map->map_ifindex;
+ create_attr.map_flags = def->map_flags;
+ create_attr.numa_node = map->numa_node;
+ create_attr.map_extra = map->map_extra;
- if (map->pin_path) {
- err = bpf_object__reuse_map(map);
+ if (bpf_map__is_struct_ops(map))
+ create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
+
+ if (obj->btf && btf__fd(obj->btf) >= 0) {
+ create_attr.btf_fd = btf__fd(obj->btf);
+ create_attr.btf_key_type_id = map->btf_key_type_id;
+ create_attr.btf_value_type_id = map->btf_value_type_id;
+ }
+
+ if (bpf_map_type__is_map_in_map(def->type)) {
+ if (map->inner_map) {
+ err = bpf_object__create_map(obj, map->inner_map, true);
if (err) {
- pr_warn("error reusing pinned map %s\n",
- map->name);
+ pr_warn("map '%s': failed to create inner map: %d\n",
+ map->name, err);
return err;
}
+ map->inner_map_fd = bpf_map__fd(map->inner_map);
}
+ if (map->inner_map_fd >= 0)
+ create_attr.inner_map_fd = map->inner_map_fd;
+ }
- if (map->fd >= 0) {
- pr_debug("skip map create (preset) %s: fd=%d\n",
- map->name, map->fd);
- continue;
- }
-
- if (obj->caps.name)
- create_attr.name = map->name;
- create_attr.map_ifindex = map->map_ifindex;
- create_attr.map_type = def->type;
- create_attr.map_flags = def->map_flags;
- create_attr.key_size = def->key_size;
- create_attr.value_size = def->value_size;
- if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
- !def->max_entries) {
- if (!nr_cpus)
- nr_cpus = libbpf_num_possible_cpus();
- if (nr_cpus < 0) {
- pr_warn("failed to determine number of system CPUs: %d\n",
- nr_cpus);
- err = nr_cpus;
- goto err_out;
- }
- pr_debug("map '%s': setting size to %d\n",
- map->name, nr_cpus);
- create_attr.max_entries = nr_cpus;
- } else {
- create_attr.max_entries = def->max_entries;
- }
+ switch (def->type) {
+ case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+ case BPF_MAP_TYPE_CGROUP_ARRAY:
+ case BPF_MAP_TYPE_STACK_TRACE:
+ case BPF_MAP_TYPE_ARRAY_OF_MAPS:
+ case BPF_MAP_TYPE_HASH_OF_MAPS:
+ case BPF_MAP_TYPE_DEVMAP:
+ case BPF_MAP_TYPE_DEVMAP_HASH:
+ case BPF_MAP_TYPE_CPUMAP:
+ case BPF_MAP_TYPE_XSKMAP:
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ case BPF_MAP_TYPE_QUEUE:
+ case BPF_MAP_TYPE_STACK:
create_attr.btf_fd = 0;
create_attr.btf_key_type_id = 0;
create_attr.btf_value_type_id = 0;
- if (bpf_map_type__is_map_in_map(def->type) &&
- map->inner_map_fd >= 0)
- create_attr.inner_map_fd = map->inner_map_fd;
- if (bpf_map__is_struct_ops(map))
- create_attr.btf_vmlinux_value_type_id =
- map->btf_vmlinux_value_type_id;
-
- if (obj->btf && !bpf_map_find_btf_info(obj, map)) {
- create_attr.btf_fd = btf__fd(obj->btf);
- create_attr.btf_key_type_id = map->btf_key_type_id;
- create_attr.btf_value_type_id = map->btf_value_type_id;
- }
-
- *pfd = bpf_create_map_xattr(&create_attr);
- if (*pfd < 0 && (create_attr.btf_key_type_id ||
- create_attr.btf_value_type_id)) {
- err = -errno;
- cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
- map->name, cp, err);
- create_attr.btf_fd = 0;
- create_attr.btf_key_type_id = 0;
- create_attr.btf_value_type_id = 0;
- map->btf_key_type_id = 0;
- map->btf_value_type_id = 0;
- *pfd = bpf_create_map_xattr(&create_attr);
- }
-
- if (*pfd < 0) {
- size_t j;
+ map->btf_key_type_id = 0;
+ map->btf_value_type_id = 0;
+ default:
+ break;
+ }
- err = -errno;
-err_out:
- cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("failed to create map (name: '%s'): %s(%d)\n",
- map->name, cp, err);
- pr_perm_msg(err);
- for (j = 0; j < i; j++)
- zclose(obj->maps[j].fd);
- return err;
- }
+ if (obj->gen_loader) {
+ bpf_gen__map_create(obj->gen_loader, def->type, map_name,
+ def->key_size, def->value_size, def->max_entries,
+ &create_attr, is_inner ? -1 : map - obj->maps);
+ /* Pretend to have valid FD to pass various fd >= 0 checks.
+ * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
+ */
+ map->fd = 0;
+ } else {
+ map->fd = bpf_map_create(def->type, map_name,
+ def->key_size, def->value_size,
+ def->max_entries, &create_attr);
+ }
+ if (map->fd < 0 && (create_attr.btf_key_type_id ||
+ create_attr.btf_value_type_id)) {
+ char *cp, errmsg[STRERR_BUFSIZE];
- if (bpf_map__is_internal(map)) {
- err = bpf_object__populate_internal_map(obj, map);
- if (err < 0) {
- zclose(*pfd);
- goto err_out;
- }
- }
+ err = -errno;
+ cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
+ pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
+ map->name, cp, err);
+ create_attr.btf_fd = 0;
+ create_attr.btf_key_type_id = 0;
+ create_attr.btf_value_type_id = 0;
+ map->btf_key_type_id = 0;
+ map->btf_value_type_id = 0;
+ map->fd = bpf_map_create(def->type, map_name,
+ def->key_size, def->value_size,
+ def->max_entries, &create_attr);
+ }
- if (map->pin_path && !map->pinned) {
- err = bpf_map__pin(map, NULL);
- if (err) {
- pr_warn("failed to auto-pin map name '%s' at '%s'\n",
- map->name, map->pin_path);
- return err;
- }
- }
+ err = map->fd < 0 ? -errno : 0;
- pr_debug("created map %s: fd=%d\n", map->name, *pfd);
+ if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
+ if (obj->gen_loader)
+ map->inner_map->fd = -1;
+ bpf_map__destroy(map->inner_map);
+ zfree(&map->inner_map);
}
- return 0;
+ return err;
}
-static int
-check_btf_ext_reloc_err(struct bpf_program *prog, int err,
- void *btf_prog_info, const char *info_name)
+static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
{
- if (err != -ENOENT) {
- pr_warn("Error in loading %s for sec %s.\n",
- info_name, prog->section_name);
- return err;
- }
+ const struct bpf_map *targ_map;
+ unsigned int i;
+ int fd, err = 0;
- /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
+ for (i = 0; i < map->init_slots_sz; i++) {
+ if (!map->init_slots[i])
+ continue;
- if (btf_prog_info) {
- /*
- * Some info has already been found but has problem
- * in the last btf_ext reloc. Must have to error out.
- */
- pr_warn("Error in relocating %s for sec %s.\n",
- info_name, prog->section_name);
- return err;
+ targ_map = map->init_slots[i];
+ fd = bpf_map__fd(targ_map);
+
+ if (obj->gen_loader) {
+ bpf_gen__populate_outer_map(obj->gen_loader,
+ map - obj->maps, i,
+ targ_map - obj->maps);
+ } else {
+ err = bpf_map_update_elem(map->fd, &i, &fd, 0);
+ }
+ if (err) {
+ err = -errno;
+ pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
+ map->name, i, targ_map->name, fd, err);
+ return err;
+ }
+ pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
+ map->name, i, targ_map->name, fd);
}
- /* Have problem loading the very first info. Ignore the rest. */
- pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
- info_name, prog->section_name, info_name);
+ zfree(&map->init_slots);
+ map->init_slots_sz = 0;
+
return 0;
}
-static int
-bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
- const char *section_name, __u32 insn_offset)
+static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
{
- int err;
+ const struct bpf_program *targ_prog;
+ unsigned int i;
+ int fd, err;
- if (!insn_offset || prog->func_info) {
- /*
- * !insn_offset => main program
- *
- * For sub prog, the main program's func_info has to
- * be loaded first (i.e. prog->func_info != NULL)
- */
- err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
- section_name, insn_offset,
- &prog->func_info,
- &prog->func_info_cnt);
- if (err)
- return check_btf_ext_reloc_err(prog, err,
- prog->func_info,
- "bpf_func_info");
+ if (obj->gen_loader)
+ return -ENOTSUP;
- prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
- }
+ for (i = 0; i < map->init_slots_sz; i++) {
+ if (!map->init_slots[i])
+ continue;
- if (!insn_offset || prog->line_info) {
- err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
- section_name, insn_offset,
- &prog->line_info,
- &prog->line_info_cnt);
- if (err)
- return check_btf_ext_reloc_err(prog, err,
- prog->line_info,
- "bpf_line_info");
+ targ_prog = map->init_slots[i];
+ fd = bpf_program__fd(targ_prog);
- prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
+ err = bpf_map_update_elem(map->fd, &i, &fd, 0);
+ if (err) {
+ err = -errno;
+ pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
+ map->name, i, targ_prog->name, fd, err);
+ return err;
+ }
+ pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
+ map->name, i, targ_prog->name, fd);
}
+ zfree(&map->init_slots);
+ map->init_slots_sz = 0;
+
return 0;
}
-#define BPF_CORE_SPEC_MAX_LEN 64
-
-/* represents BPF CO-RE field or array element accessor */
-struct bpf_core_accessor {
- __u32 type_id; /* struct/union type or array element type */
- __u32 idx; /* field index or array index */
- const char *name; /* field name or NULL for array accessor */
-};
-
-struct bpf_core_spec {
- const struct btf *btf;
- /* high-level spec: named fields and array indices only */
- struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
- /* high-level spec length */
- int len;
- /* raw, low-level spec: 1-to-1 with accessor spec string */
- int raw_spec[BPF_CORE_SPEC_MAX_LEN];
- /* raw spec length */
- int raw_len;
- /* field bit offset represented by spec */
- __u32 bit_offset;
-};
-
-static bool str_is_empty(const char *s)
+static int bpf_object_init_prog_arrays(struct bpf_object *obj)
{
- return !s || !s[0];
-}
+ struct bpf_map *map;
+ int i, err;
-static bool is_flex_arr(const struct btf *btf,
- const struct bpf_core_accessor *acc,
- const struct btf_array *arr)
-{
- const struct btf_type *t;
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &obj->maps[i];
- /* not a flexible array, if not inside a struct or has non-zero size */
- if (!acc->name || arr->nelems > 0)
- return false;
+ if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
+ continue;
- /* has to be the last member of enclosing struct */
- t = btf__type_by_id(btf, acc->type_id);
- return acc->idx == btf_vlen(t) - 1;
+ err = init_prog_array_slots(obj, map);
+ if (err < 0) {
+ zclose(map->fd);
+ return err;
+ }
+ }
+ return 0;
}
-/*
- * Turn bpf_field_reloc into a low- and high-level spec representation,
- * validating correctness along the way, as well as calculating resulting
- * field bit offset, specified by accessor string. Low-level spec captures
- * every single level of nestedness, including traversing anonymous
- * struct/union members. High-level one only captures semantically meaningful
- * "turning points": named fields and array indicies.
- * E.g., for this case:
- *
- * struct sample {
- * int __unimportant;
- * struct {
- * int __1;
- * int __2;
- * int a[7];
- * };
- * };
- *
- * struct sample *s = ...;
- *
- * int x = &s->a[3]; // access string = '0:1:2:3'
- *
- * Low-level spec has 1:1 mapping with each element of access string (it's
- * just a parsed access string representation): [0, 1, 2, 3].
- *
- * High-level spec will capture only 3 points:
- * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
- * - field 'a' access (corresponds to '2' in low-level spec);
- * - array element #3 access (corresponds to '3' in low-level spec).
- *
- */
-static int bpf_core_spec_parse(const struct btf *btf,
- __u32 type_id,
- const char *spec_str,
- struct bpf_core_spec *spec)
+static int map_set_def_max_entries(struct bpf_map *map)
{
- int access_idx, parsed_len, i;
- struct bpf_core_accessor *acc;
- const struct btf_type *t;
- const char *name;
- __u32 id;
- __s64 sz;
+ if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
+ int nr_cpus;
- if (str_is_empty(spec_str) || *spec_str == ':')
- return -EINVAL;
-
- memset(spec, 0, sizeof(*spec));
- spec->btf = btf;
-
- /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
- while (*spec_str) {
- if (*spec_str == ':')
- ++spec_str;
- if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
- return -EINVAL;
- if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
- return -E2BIG;
- spec_str += parsed_len;
- spec->raw_spec[spec->raw_len++] = access_idx;
+ nr_cpus = libbpf_num_possible_cpus();
+ if (nr_cpus < 0) {
+ pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
+ map->name, nr_cpus);
+ return nr_cpus;
+ }
+ pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
+ map->def.max_entries = nr_cpus;
}
- if (spec->raw_len == 0)
- return -EINVAL;
-
- /* first spec value is always reloc type array index */
- t = skip_mods_and_typedefs(btf, type_id, &id);
- if (!t)
- return -EINVAL;
-
- access_idx = spec->raw_spec[0];
- spec->spec[0].type_id = id;
- spec->spec[0].idx = access_idx;
- spec->len++;
-
- sz = btf__resolve_size(btf, id);
- if (sz < 0)
- return sz;
- spec->bit_offset = access_idx * sz * 8;
-
- for (i = 1; i < spec->raw_len; i++) {
- t = skip_mods_and_typedefs(btf, id, &id);
- if (!t)
- return -EINVAL;
+ return 0;
+}
- access_idx = spec->raw_spec[i];
- acc = &spec->spec[spec->len];
+static int
+bpf_object__create_maps(struct bpf_object *obj)
+{
+ struct bpf_map *map;
+ char *cp, errmsg[STRERR_BUFSIZE];
+ unsigned int i, j;
+ int err;
+ bool retried;
- if (btf_is_composite(t)) {
- const struct btf_member *m;
- __u32 bit_offset;
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &obj->maps[i];
- if (access_idx >= btf_vlen(t))
- return -EINVAL;
+ /* To support old kernels, we skip creating global data maps
+ * (.rodata, .data, .kconfig, etc); later on, during program
+ * loading, if we detect that at least one of the to-be-loaded
+ * programs is referencing any global data map, we'll error
+ * out with program name and relocation index logged.
+ * This approach allows to accommodate Clang emitting
+ * unnecessary .rodata.str1.1 sections for string literals,
+ * but also it allows to have CO-RE applications that use
+ * global variables in some of BPF programs, but not others.
+ * If those global variable-using programs are not loaded at
+ * runtime due to bpf_program__set_autoload(prog, false),
+ * bpf_object loading will succeed just fine even on old
+ * kernels.
+ */
+ if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
+ map->autocreate = false;
- bit_offset = btf_member_bit_offset(t, access_idx);
- spec->bit_offset += bit_offset;
+ if (!map->autocreate) {
+ pr_debug("map '%s': skipped auto-creating...\n", map->name);
+ continue;
+ }
- m = btf_members(t) + access_idx;
- if (m->name_off) {
- name = btf__name_by_offset(btf, m->name_off);
- if (str_is_empty(name))
- return -EINVAL;
+ err = map_set_def_max_entries(map);
+ if (err)
+ goto err_out;
- acc->type_id = id;
- acc->idx = access_idx;
- acc->name = name;
- spec->len++;
+ retried = false;
+retry:
+ if (map->pin_path) {
+ err = bpf_object__reuse_map(map);
+ if (err) {
+ pr_warn("map '%s': error reusing pinned map\n",
+ map->name);
+ goto err_out;
}
+ if (retried && map->fd < 0) {
+ pr_warn("map '%s': cannot find pinned map\n",
+ map->name);
+ err = -ENOENT;
+ goto err_out;
+ }
+ }
- id = m->type;
- } else if (btf_is_array(t)) {
- const struct btf_array *a = btf_array(t);
- bool flex;
+ if (map->fd >= 0) {
+ pr_debug("map '%s': skipping creation (preset fd=%d)\n",
+ map->name, map->fd);
+ } else {
+ err = bpf_object__create_map(obj, map, false);
+ if (err)
+ goto err_out;
- t = skip_mods_and_typedefs(btf, a->type, &id);
- if (!t)
- return -EINVAL;
+ pr_debug("map '%s': created successfully, fd=%d\n",
+ map->name, map->fd);
- flex = is_flex_arr(btf, acc - 1, a);
- if (!flex && access_idx >= a->nelems)
- return -EINVAL;
+ if (bpf_map__is_internal(map)) {
+ err = bpf_object__populate_internal_map(obj, map);
+ if (err < 0) {
+ zclose(map->fd);
+ goto err_out;
+ }
+ }
- spec->spec[spec->len].type_id = id;
- spec->spec[spec->len].idx = access_idx;
- spec->len++;
+ if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
+ err = init_map_in_map_slots(obj, map);
+ if (err < 0) {
+ zclose(map->fd);
+ goto err_out;
+ }
+ }
+ }
- sz = btf__resolve_size(btf, id);
- if (sz < 0)
- return sz;
- spec->bit_offset += access_idx * sz * 8;
- } else {
- pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
- type_id, spec_str, i, id, btf_kind(t));
- return -EINVAL;
+ if (map->pin_path && !map->pinned) {
+ err = bpf_map__pin(map, NULL);
+ if (err) {
+ zclose(map->fd);
+ if (!retried && err == -EEXIST) {
+ retried = true;
+ goto retry;
+ }
+ pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
+ map->name, map->pin_path, err);
+ goto err_out;
+ }
}
}
return 0;
+
+err_out:
+ cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
+ pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
+ pr_perm_msg(err);
+ for (j = 0; j < i; j++)
+ zclose(obj->maps[j].fd);
+ return err;
}
static bool bpf_core_is_flavor_sep(const char *s)
@@ -3815,7 +5350,7 @@ static bool bpf_core_is_flavor_sep(const char *s)
* before last triple underscore. Struct name part after last triple
* underscore is ignored by BPF CO-RE relocation during relocation matching.
*/
-static size_t bpf_core_essential_name_len(const char *name)
+size_t bpf_core_essential_name_len(const char *name)
{
size_t n = strlen(name);
int i;
@@ -3827,769 +5362,362 @@ static size_t bpf_core_essential_name_len(const char *name)
return n;
}
-/* dynamically sized list of type IDs */
-struct ids_vec {
- __u32 *data;
- int len;
-};
-
-static void bpf_core_free_cands(struct ids_vec *cand_ids)
+void bpf_core_free_cands(struct bpf_core_cand_list *cands)
{
- free(cand_ids->data);
- free(cand_ids);
+ if (!cands)
+ return;
+
+ free(cands->cands);
+ free(cands);
}
-static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
- __u32 local_type_id,
- const struct btf *targ_btf)
+int bpf_core_add_cands(struct bpf_core_cand *local_cand,
+ size_t local_essent_len,
+ const struct btf *targ_btf,
+ const char *targ_btf_name,
+ int targ_start_id,
+ struct bpf_core_cand_list *cands)
{
- size_t local_essent_len, targ_essent_len;
- const char *local_name, *targ_name;
- const struct btf_type *t;
- struct ids_vec *cand_ids;
- __u32 *new_ids;
- int i, err, n;
-
- t = btf__type_by_id(local_btf, local_type_id);
- if (!t)
- return ERR_PTR(-EINVAL);
-
- local_name = btf__name_by_offset(local_btf, t->name_off);
- if (str_is_empty(local_name))
- return ERR_PTR(-EINVAL);
- local_essent_len = bpf_core_essential_name_len(local_name);
+ struct bpf_core_cand *new_cands, *cand;
+ const struct btf_type *t, *local_t;
+ const char *targ_name, *local_name;
+ size_t targ_essent_len;
+ int n, i;
- cand_ids = calloc(1, sizeof(*cand_ids));
- if (!cand_ids)
- return ERR_PTR(-ENOMEM);
+ local_t = btf__type_by_id(local_cand->btf, local_cand->id);
+ local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
- n = btf__get_nr_types(targ_btf);
- for (i = 1; i <= n; i++) {
+ n = btf__type_cnt(targ_btf);
+ for (i = targ_start_id; i < n; i++) {
t = btf__type_by_id(targ_btf, i);
- targ_name = btf__name_by_offset(targ_btf, t->name_off);
- if (str_is_empty(targ_name))
+ if (!btf_kind_core_compat(t, local_t))
continue;
- t = skip_mods_and_typedefs(targ_btf, i, NULL);
- if (!btf_is_composite(t) && !btf_is_array(t))
+ targ_name = btf__name_by_offset(targ_btf, t->name_off);
+ if (str_is_empty(targ_name))
continue;
targ_essent_len = bpf_core_essential_name_len(targ_name);
if (targ_essent_len != local_essent_len)
continue;
- if (strncmp(local_name, targ_name, local_essent_len) == 0) {
- pr_debug("[%d] %s: found candidate [%d] %s\n",
- local_type_id, local_name, i, targ_name);
- new_ids = reallocarray(cand_ids->data,
- cand_ids->len + 1,
- sizeof(*cand_ids->data));
- if (!new_ids) {
- err = -ENOMEM;
- goto err_out;
- }
- cand_ids->data = new_ids;
- cand_ids->data[cand_ids->len++] = i;
- }
- }
- return cand_ids;
-err_out:
- bpf_core_free_cands(cand_ids);
- return ERR_PTR(err);
-}
-
-/* Check two types for compatibility, skipping const/volatile/restrict and
- * typedefs, to ensure we are relocating compatible entities:
- * - any two STRUCTs/UNIONs are compatible and can be mixed;
- * - any two FWDs are compatible, if their names match (modulo flavor suffix);
- * - any two PTRs are always compatible;
- * - for ENUMs, names should be the same (ignoring flavor suffix) or at
- * least one of enums should be anonymous;
- * - for ENUMs, check sizes, names are ignored;
- * - for INT, size and signedness are ignored;
- * - for ARRAY, dimensionality is ignored, element types are checked for
- * compatibility recursively;
- * - everything else shouldn't be ever a target of relocation.
- * These rules are not set in stone and probably will be adjusted as we get
- * more experience with using BPF CO-RE relocations.
- */
-static int bpf_core_fields_are_compat(const struct btf *local_btf,
- __u32 local_id,
- const struct btf *targ_btf,
- __u32 targ_id)
-{
- const struct btf_type *local_type, *targ_type;
+ if (strncmp(local_name, targ_name, local_essent_len) != 0)
+ continue;
-recur:
- local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
- targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
- if (!local_type || !targ_type)
- return -EINVAL;
+ pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
+ local_cand->id, btf_kind_str(local_t),
+ local_name, i, btf_kind_str(t), targ_name,
+ targ_btf_name);
+ new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
+ sizeof(*cands->cands));
+ if (!new_cands)
+ return -ENOMEM;
- if (btf_is_composite(local_type) && btf_is_composite(targ_type))
- return 1;
- if (btf_kind(local_type) != btf_kind(targ_type))
- return 0;
+ cand = &new_cands[cands->len];
+ cand->btf = targ_btf;
+ cand->id = i;
- switch (btf_kind(local_type)) {
- case BTF_KIND_PTR:
- return 1;
- case BTF_KIND_FWD:
- case BTF_KIND_ENUM: {
- const char *local_name, *targ_name;
- size_t local_len, targ_len;
-
- local_name = btf__name_by_offset(local_btf,
- local_type->name_off);
- targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
- local_len = bpf_core_essential_name_len(local_name);
- targ_len = bpf_core_essential_name_len(targ_name);
- /* one of them is anonymous or both w/ same flavor-less names */
- return local_len == 0 || targ_len == 0 ||
- (local_len == targ_len &&
- strncmp(local_name, targ_name, local_len) == 0);
- }
- case BTF_KIND_INT:
- /* just reject deprecated bitfield-like integers; all other
- * integers are by default compatible between each other
- */
- return btf_int_offset(local_type) == 0 &&
- btf_int_offset(targ_type) == 0;
- case BTF_KIND_ARRAY:
- local_id = btf_array(local_type)->type;
- targ_id = btf_array(targ_type)->type;
- goto recur;
- default:
- pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
- btf_kind(local_type), local_id, targ_id);
- return 0;
+ cands->cands = new_cands;
+ cands->len++;
}
+ return 0;
}
-/*
- * Given single high-level named field accessor in local type, find
- * corresponding high-level accessor for a target type. Along the way,
- * maintain low-level spec for target as well. Also keep updating target
- * bit offset.
- *
- * Searching is performed through recursive exhaustive enumeration of all
- * fields of a struct/union. If there are any anonymous (embedded)
- * structs/unions, they are recursively searched as well. If field with
- * desired name is found, check compatibility between local and target types,
- * before returning result.
- *
- * 1 is returned, if field is found.
- * 0 is returned if no compatible field is found.
- * <0 is returned on error.
- */
-static int bpf_core_match_member(const struct btf *local_btf,
- const struct bpf_core_accessor *local_acc,
- const struct btf *targ_btf,
- __u32 targ_id,
- struct bpf_core_spec *spec,
- __u32 *next_targ_id)
-{
- const struct btf_type *local_type, *targ_type;
- const struct btf_member *local_member, *m;
- const char *local_name, *targ_name;
- __u32 local_id;
- int i, n, found;
-
- targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
- if (!targ_type)
- return -EINVAL;
- if (!btf_is_composite(targ_type))
- return 0;
-
- local_id = local_acc->type_id;
- local_type = btf__type_by_id(local_btf, local_id);
- local_member = btf_members(local_type) + local_acc->idx;
- local_name = btf__name_by_offset(local_btf, local_member->name_off);
+static int load_module_btfs(struct bpf_object *obj)
+{
+ struct bpf_btf_info info;
+ struct module_btf *mod_btf;
+ struct btf *btf;
+ char name[64];
+ __u32 id = 0, len;
+ int err, fd;
- n = btf_vlen(targ_type);
- m = btf_members(targ_type);
- for (i = 0; i < n; i++, m++) {
- __u32 bit_offset;
+ if (obj->btf_modules_loaded)
+ return 0;
- bit_offset = btf_member_bit_offset(targ_type, i);
+ if (obj->gen_loader)
+ return 0;
- /* too deep struct/union/array nesting */
- if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
- return -E2BIG;
+ /* don't do this again, even if we find no module BTFs */
+ obj->btf_modules_loaded = true;
- /* speculate this member will be the good one */
- spec->bit_offset += bit_offset;
- spec->raw_spec[spec->raw_len++] = i;
-
- targ_name = btf__name_by_offset(targ_btf, m->name_off);
- if (str_is_empty(targ_name)) {
- /* embedded struct/union, we need to go deeper */
- found = bpf_core_match_member(local_btf, local_acc,
- targ_btf, m->type,
- spec, next_targ_id);
- if (found) /* either found or error */
- return found;
- } else if (strcmp(local_name, targ_name) == 0) {
- /* matching named field */
- struct bpf_core_accessor *targ_acc;
-
- targ_acc = &spec->spec[spec->len++];
- targ_acc->type_id = targ_id;
- targ_acc->idx = i;
- targ_acc->name = targ_name;
-
- *next_targ_id = m->type;
- found = bpf_core_fields_are_compat(local_btf,
- local_member->type,
- targ_btf, m->type);
- if (!found)
- spec->len--; /* pop accessor */
- return found;
- }
- /* member turned out not to be what we looked for */
- spec->bit_offset -= bit_offset;
- spec->raw_len--;
- }
+ /* kernel too old to support module BTFs */
+ if (!kernel_supports(obj, FEAT_MODULE_BTF))
+ return 0;
- return 0;
-}
+ while (true) {
+ err = bpf_btf_get_next_id(id, &id);
+ if (err && errno == ENOENT)
+ return 0;
+ if (err) {
+ err = -errno;
+ pr_warn("failed to iterate BTF objects: %d\n", err);
+ return err;
+ }
-/*
- * Try to match local spec to a target type and, if successful, produce full
- * target spec (high-level, low-level + bit offset).
- */
-static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
- const struct btf *targ_btf, __u32 targ_id,
- struct bpf_core_spec *targ_spec)
-{
- const struct btf_type *targ_type;
- const struct bpf_core_accessor *local_acc;
- struct bpf_core_accessor *targ_acc;
- int i, sz, matched;
+ fd = bpf_btf_get_fd_by_id(id);
+ if (fd < 0) {
+ if (errno == ENOENT)
+ continue; /* expected race: BTF was unloaded */
+ err = -errno;
+ pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
+ return err;
+ }
- memset(targ_spec, 0, sizeof(*targ_spec));
- targ_spec->btf = targ_btf;
+ len = sizeof(info);
+ memset(&info, 0, sizeof(info));
+ info.name = ptr_to_u64(name);
+ info.name_len = sizeof(name);
- local_acc = &local_spec->spec[0];
- targ_acc = &targ_spec->spec[0];
+ err = bpf_btf_get_info_by_fd(fd, &info, &len);
+ if (err) {
+ err = -errno;
+ pr_warn("failed to get BTF object #%d info: %d\n", id, err);
+ goto err_out;
+ }
- for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
- targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
- &targ_id);
- if (!targ_type)
- return -EINVAL;
+ /* ignore non-module BTFs */
+ if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
+ close(fd);
+ continue;
+ }
- if (local_acc->name) {
- matched = bpf_core_match_member(local_spec->btf,
- local_acc,
- targ_btf, targ_id,
- targ_spec, &targ_id);
- if (matched <= 0)
- return matched;
- } else {
- /* for i=0, targ_id is already treated as array element
- * type (because it's the original struct), for others
- * we should find array element type first
- */
- if (i > 0) {
- const struct btf_array *a;
- bool flex;
-
- if (!btf_is_array(targ_type))
- return 0;
-
- a = btf_array(targ_type);
- flex = is_flex_arr(targ_btf, targ_acc - 1, a);
- if (!flex && local_acc->idx >= a->nelems)
- return 0;
- if (!skip_mods_and_typedefs(targ_btf, a->type,
- &targ_id))
- return -EINVAL;
- }
+ btf = btf_get_from_fd(fd, obj->btf_vmlinux);
+ err = libbpf_get_error(btf);
+ if (err) {
+ pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
+ name, id, err);
+ goto err_out;
+ }
- /* too deep struct/union/array nesting */
- if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
- return -E2BIG;
+ err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
+ sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
+ if (err)
+ goto err_out;
- targ_acc->type_id = targ_id;
- targ_acc->idx = local_acc->idx;
- targ_acc->name = NULL;
- targ_spec->len++;
- targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
- targ_spec->raw_len++;
+ mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
- sz = btf__resolve_size(targ_btf, targ_id);
- if (sz < 0)
- return sz;
- targ_spec->bit_offset += local_acc->idx * sz * 8;
+ mod_btf->btf = btf;
+ mod_btf->id = id;
+ mod_btf->fd = fd;
+ mod_btf->name = strdup(name);
+ if (!mod_btf->name) {
+ err = -ENOMEM;
+ goto err_out;
}
+ continue;
+
+err_out:
+ close(fd);
+ return err;
}
- return 1;
+ return 0;
}
-static int bpf_core_calc_field_relo(const struct bpf_program *prog,
- const struct bpf_field_reloc *relo,
- const struct bpf_core_spec *spec,
- __u32 *val, bool *validate)
+static struct bpf_core_cand_list *
+bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
{
- const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
- const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
- __u32 byte_off, byte_sz, bit_off, bit_sz;
- const struct btf_member *m;
- const struct btf_type *mt;
- bool bitfield;
- __s64 sz;
-
- /* a[n] accessor needs special handling */
- if (!acc->name) {
- if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
- *val = spec->bit_offset / 8;
- } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
- sz = btf__resolve_size(spec->btf, acc->type_id);
- if (sz < 0)
- return -EINVAL;
- *val = sz;
- } else {
- pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
- bpf_program__title(prog, false),
- relo->kind, relo->insn_off / 8);
- return -EINVAL;
- }
- if (validate)
- *validate = true;
- return 0;
- }
+ struct bpf_core_cand local_cand = {};
+ struct bpf_core_cand_list *cands;
+ const struct btf *main_btf;
+ const struct btf_type *local_t;
+ const char *local_name;
+ size_t local_essent_len;
+ int err, i;
- m = btf_members(t) + acc->idx;
- mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
- bit_off = spec->bit_offset;
- bit_sz = btf_member_bitfield_size(t, acc->idx);
-
- bitfield = bit_sz > 0;
- if (bitfield) {
- byte_sz = mt->size;
- byte_off = bit_off / 8 / byte_sz * byte_sz;
- /* figure out smallest int size necessary for bitfield load */
- while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
- if (byte_sz >= 8) {
- /* bitfield can't be read with 64-bit read */
- pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
- bpf_program__title(prog, false),
- relo->kind, relo->insn_off / 8);
- return -E2BIG;
- }
- byte_sz *= 2;
- byte_off = bit_off / 8 / byte_sz * byte_sz;
- }
- } else {
- sz = btf__resolve_size(spec->btf, m->type);
- if (sz < 0)
- return -EINVAL;
- byte_sz = sz;
- byte_off = spec->bit_offset / 8;
- bit_sz = byte_sz * 8;
- }
+ local_cand.btf = local_btf;
+ local_cand.id = local_type_id;
+ local_t = btf__type_by_id(local_btf, local_type_id);
+ if (!local_t)
+ return ERR_PTR(-EINVAL);
- /* for bitfields, all the relocatable aspects are ambiguous and we
- * might disagree with compiler, so turn off validation of expected
- * value, except for signedness
- */
- if (validate)
- *validate = !bitfield;
+ local_name = btf__name_by_offset(local_btf, local_t->name_off);
+ if (str_is_empty(local_name))
+ return ERR_PTR(-EINVAL);
+ local_essent_len = bpf_core_essential_name_len(local_name);
- switch (relo->kind) {
- case BPF_FIELD_BYTE_OFFSET:
- *val = byte_off;
- break;
- case BPF_FIELD_BYTE_SIZE:
- *val = byte_sz;
- break;
- case BPF_FIELD_SIGNED:
- /* enums will be assumed unsigned */
- *val = btf_is_enum(mt) ||
- (btf_int_encoding(mt) & BTF_INT_SIGNED);
- if (validate)
- *validate = true; /* signedness is never ambiguous */
- break;
- case BPF_FIELD_LSHIFT_U64:
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- *val = 64 - (bit_off + bit_sz - byte_off * 8);
-#else
- *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
-#endif
- break;
- case BPF_FIELD_RSHIFT_U64:
- *val = 64 - bit_sz;
- if (validate)
- *validate = true; /* right shift is never ambiguous */
- break;
- case BPF_FIELD_EXISTS:
- default:
- pr_warn("prog '%s': unknown relo %d at insn #%d\n",
- bpf_program__title(prog, false),
- relo->kind, relo->insn_off / 8);
- return -EINVAL;
- }
+ cands = calloc(1, sizeof(*cands));
+ if (!cands)
+ return ERR_PTR(-ENOMEM);
- return 0;
-}
+ /* Attempt to find target candidates in vmlinux BTF first */
+ main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
+ err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
+ if (err)
+ goto err_out;
-/*
- * Patch relocatable BPF instruction.
- *
- * Patched value is determined by relocation kind and target specification.
- * For field existence relocation target spec will be NULL if field is not
- * found.
- * Expected insn->imm value is determined using relocation kind and local
- * spec, and is checked before patching instruction. If actual insn->imm value
- * is wrong, bail out with error.
- *
- * Currently three kinds of BPF instructions are supported:
- * 1. rX = <imm> (assignment with immediate operand);
- * 2. rX += <imm> (arithmetic operations with immediate operand);
- */
-static int bpf_core_reloc_insn(struct bpf_program *prog,
- const struct bpf_field_reloc *relo,
- int relo_idx,
- const struct bpf_core_spec *local_spec,
- const struct bpf_core_spec *targ_spec)
-{
- __u32 orig_val, new_val;
- struct bpf_insn *insn;
- bool validate = true;
- int insn_idx, err;
- __u8 class;
+ /* if vmlinux BTF has any candidate, don't got for module BTFs */
+ if (cands->len)
+ return cands;
- if (relo->insn_off % sizeof(struct bpf_insn))
- return -EINVAL;
- insn_idx = relo->insn_off / sizeof(struct bpf_insn);
- insn = &prog->insns[insn_idx];
- class = BPF_CLASS(insn->code);
-
- if (relo->kind == BPF_FIELD_EXISTS) {
- orig_val = 1; /* can't generate EXISTS relo w/o local field */
- new_val = targ_spec ? 1 : 0;
- } else if (!targ_spec) {
- pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
- bpf_program__title(prog, false), relo_idx, insn_idx);
- insn->code = BPF_JMP | BPF_CALL;
- insn->dst_reg = 0;
- insn->src_reg = 0;
- insn->off = 0;
- /* if this instruction is reachable (not a dead code),
- * verifier will complain with the following message:
- * invalid func unknown#195896080
- */
- insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
- return 0;
- } else {
- err = bpf_core_calc_field_relo(prog, relo, local_spec,
- &orig_val, &validate);
- if (err)
- return err;
- err = bpf_core_calc_field_relo(prog, relo, targ_spec,
- &new_val, NULL);
- if (err)
- return err;
- }
+ /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
+ if (obj->btf_vmlinux_override)
+ return cands;
- switch (class) {
- case BPF_ALU:
- case BPF_ALU64:
- if (BPF_SRC(insn->code) != BPF_K)
- return -EINVAL;
- if (validate && insn->imm != orig_val) {
- pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, insn->imm, orig_val, new_val);
- return -EINVAL;
- }
- orig_val = insn->imm;
- insn->imm = new_val;
- pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
- bpf_program__title(prog, false), relo_idx, insn_idx,
- orig_val, new_val);
- break;
- case BPF_LDX:
- case BPF_ST:
- case BPF_STX:
- if (validate && insn->off != orig_val) {
- pr_warn("prog '%s': relo #%d: unexpected insn #%d (LD/LDX/ST/STX) value: got %u, exp %u -> %u\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, insn->off, orig_val, new_val);
- return -EINVAL;
- }
- if (new_val > SHRT_MAX) {
- pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, new_val);
- return -ERANGE;
- }
- orig_val = insn->off;
- insn->off = new_val;
- pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
- bpf_program__title(prog, false), relo_idx, insn_idx,
- orig_val, new_val);
- break;
- default:
- pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, insn->code, insn->src_reg, insn->dst_reg,
- insn->off, insn->imm);
- return -EINVAL;
+ /* now look through module BTFs, trying to still find candidates */
+ err = load_module_btfs(obj);
+ if (err)
+ goto err_out;
+
+ for (i = 0; i < obj->btf_module_cnt; i++) {
+ err = bpf_core_add_cands(&local_cand, local_essent_len,
+ obj->btf_modules[i].btf,
+ obj->btf_modules[i].name,
+ btf__type_cnt(obj->btf_vmlinux),
+ cands);
+ if (err)
+ goto err_out;
}
- return 0;
+ return cands;
+err_out:
+ bpf_core_free_cands(cands);
+ return ERR_PTR(err);
}
-/* Output spec definition in the format:
- * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
- * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
+/* Check local and target types for compatibility. This check is used for
+ * type-based CO-RE relocations and follow slightly different rules than
+ * field-based relocations. This function assumes that root types were already
+ * checked for name match. Beyond that initial root-level name check, names
+ * are completely ignored. Compatibility rules are as follows:
+ * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+ * kind should match for local and target types (i.e., STRUCT is not
+ * compatible with UNION);
+ * - for ENUMs, the size is ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - CONST/VOLATILE/RESTRICT modifiers are ignored;
+ * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
+ * - FUNC_PROTOs are compatible if they have compatible signature: same
+ * number of input args and compatible return and argument types.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
*/
-static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
+int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
+ const struct btf *targ_btf, __u32 targ_id)
{
- const struct btf_type *t;
- const char *s;
- __u32 type_id;
- int i;
-
- type_id = spec->spec[0].type_id;
- t = btf__type_by_id(spec->btf, type_id);
- s = btf__name_by_offset(spec->btf, t->name_off);
- libbpf_print(level, "[%u] %s + ", type_id, s);
-
- for (i = 0; i < spec->raw_len; i++)
- libbpf_print(level, "%d%s", spec->raw_spec[i],
- i == spec->raw_len - 1 ? " => " : ":");
-
- libbpf_print(level, "%u.%u @ &x",
- spec->bit_offset / 8, spec->bit_offset % 8);
-
- for (i = 0; i < spec->len; i++) {
- if (spec->spec[i].name)
- libbpf_print(level, ".%s", spec->spec[i].name);
- else
- libbpf_print(level, "[%u]", spec->spec[i].idx);
- }
+ return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
+}
+int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
+ const struct btf *targ_btf, __u32 targ_id)
+{
+ return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
}
-static size_t bpf_core_hash_fn(const void *key, void *ctx)
+static size_t bpf_core_hash_fn(const long key, void *ctx)
{
- return (size_t)key;
+ return key;
}
-static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
+static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
{
return k1 == k2;
}
-static void *u32_as_hash_key(__u32 x)
+static int record_relo_core(struct bpf_program *prog,
+ const struct bpf_core_relo *core_relo, int insn_idx)
{
- return (void *)(uintptr_t)x;
+ struct reloc_desc *relos, *relo;
+
+ relos = libbpf_reallocarray(prog->reloc_desc,
+ prog->nr_reloc + 1, sizeof(*relos));
+ if (!relos)
+ return -ENOMEM;
+ relo = &relos[prog->nr_reloc];
+ relo->type = RELO_CORE;
+ relo->insn_idx = insn_idx;
+ relo->core_relo = core_relo;
+ prog->reloc_desc = relos;
+ prog->nr_reloc++;
+ return 0;
}
-/*
- * CO-RE relocate single instruction.
- *
- * The outline and important points of the algorithm:
- * 1. For given local type, find corresponding candidate target types.
- * Candidate type is a type with the same "essential" name, ignoring
- * everything after last triple underscore (___). E.g., `sample`,
- * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
- * for each other. Names with triple underscore are referred to as
- * "flavors" and are useful, among other things, to allow to
- * specify/support incompatible variations of the same kernel struct, which
- * might differ between different kernel versions and/or build
- * configurations.
- *
- * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
- * converter, when deduplicated BTF of a kernel still contains more than
- * one different types with the same name. In that case, ___2, ___3, etc
- * are appended starting from second name conflict. But start flavors are
- * also useful to be defined "locally", in BPF program, to extract same
- * data from incompatible changes between different kernel
- * versions/configurations. For instance, to handle field renames between
- * kernel versions, one can use two flavors of the struct name with the
- * same common name and use conditional relocations to extract that field,
- * depending on target kernel version.
- * 2. For each candidate type, try to match local specification to this
- * candidate target type. Matching involves finding corresponding
- * high-level spec accessors, meaning that all named fields should match,
- * as well as all array accesses should be within the actual bounds. Also,
- * types should be compatible (see bpf_core_fields_are_compat for details).
- * 3. It is supported and expected that there might be multiple flavors
- * matching the spec. As long as all the specs resolve to the same set of
- * offsets across all candidates, there is no error. If there is any
- * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
- * imprefection of BTF deduplication, which can cause slight duplication of
- * the same BTF type, if some directly or indirectly referenced (by
- * pointer) type gets resolved to different actual types in different
- * object files. If such situation occurs, deduplicated BTF will end up
- * with two (or more) structurally identical types, which differ only in
- * types they refer to through pointer. This should be OK in most cases and
- * is not an error.
- * 4. Candidate types search is performed by linearly scanning through all
- * types in target BTF. It is anticipated that this is overall more
- * efficient memory-wise and not significantly worse (if not better)
- * CPU-wise compared to prebuilding a map from all local type names to
- * a list of candidate type names. It's also sped up by caching resolved
- * list of matching candidates per each local "root" type ID, that has at
- * least one bpf_field_reloc associated with it. This list is shared
- * between multiple relocations for the same type ID and is updated as some
- * of the candidates are pruned due to structural incompatibility.
- */
-static int bpf_core_reloc_field(struct bpf_program *prog,
- const struct bpf_field_reloc *relo,
+static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
+{
+ struct reloc_desc *relo;
+ int i;
+
+ for (i = 0; i < prog->nr_reloc; i++) {
+ relo = &prog->reloc_desc[i];
+ if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
+ continue;
+
+ return relo->core_relo;
+ }
+
+ return NULL;
+}
+
+static int bpf_core_resolve_relo(struct bpf_program *prog,
+ const struct bpf_core_relo *relo,
int relo_idx,
const struct btf *local_btf,
- const struct btf *targ_btf,
- struct hashmap *cand_cache)
-{
- const char *prog_name = bpf_program__title(prog, false);
- struct bpf_core_spec local_spec, cand_spec, targ_spec;
- const void *type_key = u32_as_hash_key(relo->type_id);
- const struct btf_type *local_type, *cand_type;
- const char *local_name, *cand_name;
- struct ids_vec *cand_ids;
- __u32 local_id, cand_id;
- const char *spec_str;
- int i, j, err;
-
- local_id = relo->type_id;
+ struct hashmap *cand_cache,
+ struct bpf_core_relo_res *targ_res)
+{
+ struct bpf_core_spec specs_scratch[3] = {};
+ struct bpf_core_cand_list *cands = NULL;
+ const char *prog_name = prog->name;
+ const struct btf_type *local_type;
+ const char *local_name;
+ __u32 local_id = relo->type_id;
+ int err;
+
local_type = btf__type_by_id(local_btf, local_id);
if (!local_type)
return -EINVAL;
local_name = btf__name_by_offset(local_btf, local_type->name_off);
- if (str_is_empty(local_name))
- return -EINVAL;
-
- spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
- if (str_is_empty(spec_str))
- return -EINVAL;
-
- err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
- if (err) {
- pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
- prog_name, relo_idx, local_id, local_name, spec_str,
- err);
+ if (!local_name)
return -EINVAL;
- }
- pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
- relo->kind);
- bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
- libbpf_print(LIBBPF_DEBUG, "\n");
-
- if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
- cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
- if (IS_ERR(cand_ids)) {
- pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
- prog_name, relo_idx, local_id, local_name,
- PTR_ERR(cand_ids));
- return PTR_ERR(cand_ids);
+ if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
+ !hashmap__find(cand_cache, local_id, &cands)) {
+ cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
+ if (IS_ERR(cands)) {
+ pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
+ prog_name, relo_idx, local_id, btf_kind_str(local_type),
+ local_name, PTR_ERR(cands));
+ return PTR_ERR(cands);
}
- err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
+ err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
if (err) {
- bpf_core_free_cands(cand_ids);
- return err;
- }
- }
-
- for (i = 0, j = 0; i < cand_ids->len; i++) {
- cand_id = cand_ids->data[i];
- cand_type = btf__type_by_id(targ_btf, cand_id);
- cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
-
- err = bpf_core_spec_match(&local_spec, targ_btf,
- cand_id, &cand_spec);
- pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
- prog_name, relo_idx, i, cand_name);
- bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
- libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
- if (err < 0) {
- pr_warn("prog '%s': relo #%d: matching error: %d\n",
- prog_name, relo_idx, err);
+ bpf_core_free_cands(cands);
return err;
}
- if (err == 0)
- continue;
-
- if (j == 0) {
- targ_spec = cand_spec;
- } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
- /* if there are many candidates, they should all
- * resolve to the same bit offset
- */
- pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
- prog_name, relo_idx, cand_spec.bit_offset,
- targ_spec.bit_offset);
- return -EINVAL;
- }
-
- cand_ids->data[j++] = cand_spec.spec[0].type_id;
- }
-
- /*
- * For BPF_FIELD_EXISTS relo or when used BPF program has field
- * existence checks or kernel version/config checks, it's expected
- * that we might not find any candidates. In this case, if field
- * wasn't found in any candidate, the list of candidates shouldn't
- * change at all, we'll just handle relocating appropriately,
- * depending on relo's kind.
- */
- if (j > 0)
- cand_ids->len = j;
-
- /*
- * If no candidates were found, it might be both a programmer error,
- * as well as expected case, depending whether instruction w/
- * relocation is guarded in some way that makes it unreachable (dead
- * code) if relocation can't be resolved. This is handled in
- * bpf_core_reloc_insn() uniformly by replacing that instruction with
- * BPF helper call insn (using invalid helper ID). If that instruction
- * is indeed unreachable, then it will be ignored and eliminated by
- * verifier. If it was an error, then verifier will complain and point
- * to a specific instruction number in its log.
- */
- if (j == 0)
- pr_debug("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
- prog_name, relo_idx, local_id, local_name, spec_str);
-
- /* bpf_core_reloc_insn should know how to handle missing targ_spec */
- err = bpf_core_reloc_insn(prog, relo, relo_idx, &local_spec,
- j ? &targ_spec : NULL);
- if (err) {
- pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
- prog_name, relo_idx, relo->insn_off, err);
- return -EINVAL;
}
- return 0;
+ return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
+ targ_res);
}
static int
-bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
+bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
{
const struct btf_ext_info_sec *sec;
- const struct bpf_field_reloc *rec;
+ struct bpf_core_relo_res targ_res;
+ const struct bpf_core_relo *rec;
const struct btf_ext_info *seg;
struct hashmap_entry *entry;
struct hashmap *cand_cache = NULL;
struct bpf_program *prog;
- struct btf *targ_btf;
+ struct bpf_insn *insn;
const char *sec_name;
- int i, err = 0;
+ int i, err = 0, insn_idx, sec_idx, sec_num;
- if (targ_btf_path)
- targ_btf = btf__parse_elf(targ_btf_path, NULL);
- else
- targ_btf = libbpf_find_kernel_btf();
- if (IS_ERR(targ_btf)) {
- pr_warn("failed to get target BTF: %ld\n", PTR_ERR(targ_btf));
- return PTR_ERR(targ_btf);
+ if (obj->btf_ext->core_relo_info.len == 0)
+ return 0;
+
+ if (targ_btf_path) {
+ obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
+ err = libbpf_get_error(obj->btf_vmlinux_override);
+ if (err) {
+ pr_warn("failed to parse target BTF: %d\n", err);
+ return err;
+ }
}
cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
@@ -4598,166 +5726,711 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
goto out;
}
- seg = &obj->btf_ext->field_reloc_info;
+ seg = &obj->btf_ext->core_relo_info;
+ sec_num = 0;
for_each_btf_ext_sec(seg, sec) {
+ sec_idx = seg->sec_idxs[sec_num];
+ sec_num++;
+
sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
if (str_is_empty(sec_name)) {
err = -EINVAL;
goto out;
}
- prog = bpf_object__find_program_by_title(obj, sec_name);
- if (!prog) {
- pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
- sec_name);
- err = -EINVAL;
- goto out;
- }
- pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
- sec_name, sec->num_info);
+ pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) {
- err = bpf_core_reloc_field(prog, rec, i, obj->btf,
- targ_btf, cand_cache);
+ if (rec->insn_off % BPF_INSN_SZ)
+ return -EINVAL;
+ insn_idx = rec->insn_off / BPF_INSN_SZ;
+ prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
+ if (!prog) {
+ /* When __weak subprog is "overridden" by another instance
+ * of the subprog from a different object file, linker still
+ * appends all the .BTF.ext info that used to belong to that
+ * eliminated subprogram.
+ * This is similar to what x86-64 linker does for relocations.
+ * So just ignore such relocations just like we ignore
+ * subprog instructions when discovering subprograms.
+ */
+ pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
+ sec_name, i, insn_idx);
+ continue;
+ }
+ /* no need to apply CO-RE relocation if the program is
+ * not going to be loaded
+ */
+ if (!prog->autoload)
+ continue;
+
+ /* adjust insn_idx from section frame of reference to the local
+ * program's frame of reference; (sub-)program code is not yet
+ * relocated, so it's enough to just subtract in-section offset
+ */
+ insn_idx = insn_idx - prog->sec_insn_off;
+ if (insn_idx >= prog->insns_cnt)
+ return -EINVAL;
+ insn = &prog->insns[insn_idx];
+
+ err = record_relo_core(prog, rec, insn_idx);
+ if (err) {
+ pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
+ prog->name, i, err);
+ goto out;
+ }
+
+ if (prog->obj->gen_loader)
+ continue;
+
+ err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
if (err) {
pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
- sec_name, i, err);
+ prog->name, i, err);
+ goto out;
+ }
+
+ err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
+ if (err) {
+ pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
+ prog->name, i, insn_idx, err);
goto out;
}
}
}
out:
- btf__free(targ_btf);
+ /* obj->btf_vmlinux and module BTFs are freed after object load */
+ btf__free(obj->btf_vmlinux_override);
+ obj->btf_vmlinux_override = NULL;
+
if (!IS_ERR_OR_NULL(cand_cache)) {
hashmap__for_each_entry(cand_cache, entry, i) {
- bpf_core_free_cands(entry->value);
+ bpf_core_free_cands(entry->pvalue);
}
hashmap__free(cand_cache);
}
return err;
}
-static int
-bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
+/* base map load ldimm64 special constant, used also for log fixup logic */
+#define POISON_LDIMM64_MAP_BASE 2001000000
+#define POISON_LDIMM64_MAP_PFX "200100"
+
+static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
+ int insn_idx, struct bpf_insn *insn,
+ int map_idx, const struct bpf_map *map)
{
- int err = 0;
+ int i;
- if (obj->btf_ext->field_reloc_info.len)
- err = bpf_core_reloc_fields(obj, targ_btf_path);
+ pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
+ prog->name, relo_idx, insn_idx, map_idx, map->name);
- return err;
+ /* we turn single ldimm64 into two identical invalid calls */
+ for (i = 0; i < 2; i++) {
+ insn->code = BPF_JMP | BPF_CALL;
+ insn->dst_reg = 0;
+ insn->src_reg = 0;
+ insn->off = 0;
+ /* if this instruction is reachable (not a dead code),
+ * verifier will complain with something like:
+ * invalid func unknown#2001000123
+ * where lower 123 is map index into obj->maps[] array
+ */
+ insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
+
+ insn++;
+ }
+}
+
+/* unresolved kfunc call special constant, used also for log fixup logic */
+#define POISON_CALL_KFUNC_BASE 2002000000
+#define POISON_CALL_KFUNC_PFX "2002"
+
+static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
+ int insn_idx, struct bpf_insn *insn,
+ int ext_idx, const struct extern_desc *ext)
+{
+ pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n",
+ prog->name, relo_idx, insn_idx, ext->name);
+
+ /* we turn kfunc call into invalid helper call with identifiable constant */
+ insn->code = BPF_JMP | BPF_CALL;
+ insn->dst_reg = 0;
+ insn->src_reg = 0;
+ insn->off = 0;
+ /* if this instruction is reachable (not a dead code),
+ * verifier will complain with something like:
+ * invalid func unknown#2001000123
+ * where lower 123 is extern index into obj->externs[] array
+ */
+ insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
}
+/* Relocate data references within program code:
+ * - map references;
+ * - global variable references;
+ * - extern references.
+ */
static int
-bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
- struct reloc_desc *relo)
+bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
{
- struct bpf_insn *insn, *new_insn;
- struct bpf_program *text;
- size_t new_cnt;
- int err;
+ int i;
- if (prog->idx != obj->efile.text_shndx && prog->main_prog_cnt == 0) {
- text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
- if (!text) {
- pr_warn("no .text section found yet relo into text exist\n");
- return -LIBBPF_ERRNO__RELOC;
- }
- new_cnt = prog->insns_cnt + text->insns_cnt;
- new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
- if (!new_insn) {
- pr_warn("oom in prog realloc\n");
- return -ENOMEM;
+ for (i = 0; i < prog->nr_reloc; i++) {
+ struct reloc_desc *relo = &prog->reloc_desc[i];
+ struct bpf_insn *insn = &prog->insns[relo->insn_idx];
+ const struct bpf_map *map;
+ struct extern_desc *ext;
+
+ switch (relo->type) {
+ case RELO_LD64:
+ map = &obj->maps[relo->map_idx];
+ if (obj->gen_loader) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
+ insn[0].imm = relo->map_idx;
+ } else if (map->autocreate) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_FD;
+ insn[0].imm = map->fd;
+ } else {
+ poison_map_ldimm64(prog, i, relo->insn_idx, insn,
+ relo->map_idx, map);
+ }
+ break;
+ case RELO_DATA:
+ map = &obj->maps[relo->map_idx];
+ insn[1].imm = insn[0].imm + relo->sym_off;
+ if (obj->gen_loader) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
+ insn[0].imm = relo->map_idx;
+ } else if (map->autocreate) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn[0].imm = map->fd;
+ } else {
+ poison_map_ldimm64(prog, i, relo->insn_idx, insn,
+ relo->map_idx, map);
+ }
+ break;
+ case RELO_EXTERN_LD64:
+ ext = &obj->externs[relo->ext_idx];
+ if (ext->type == EXT_KCFG) {
+ if (obj->gen_loader) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
+ insn[0].imm = obj->kconfig_map_idx;
+ } else {
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
+ }
+ insn[1].imm = ext->kcfg.data_off;
+ } else /* EXT_KSYM */ {
+ if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
+ insn[0].src_reg = BPF_PSEUDO_BTF_ID;
+ insn[0].imm = ext->ksym.kernel_btf_id;
+ insn[1].imm = ext->ksym.kernel_btf_obj_fd;
+ } else { /* typeless ksyms or unresolved typed ksyms */
+ insn[0].imm = (__u32)ext->ksym.addr;
+ insn[1].imm = ext->ksym.addr >> 32;
+ }
+ }
+ break;
+ case RELO_EXTERN_CALL:
+ ext = &obj->externs[relo->ext_idx];
+ insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
+ if (ext->is_set) {
+ insn[0].imm = ext->ksym.kernel_btf_id;
+ insn[0].off = ext->ksym.btf_fd_idx;
+ } else { /* unresolved weak kfunc call */
+ poison_kfunc_call(prog, i, relo->insn_idx, insn,
+ relo->ext_idx, ext);
+ }
+ break;
+ case RELO_SUBPROG_ADDR:
+ if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
+ pr_warn("prog '%s': relo #%d: bad insn\n",
+ prog->name, i);
+ return -EINVAL;
+ }
+ /* handled already */
+ break;
+ case RELO_CALL:
+ /* handled already */
+ break;
+ case RELO_CORE:
+ /* will be handled by bpf_program_record_relos() */
+ break;
+ default:
+ pr_warn("prog '%s': relo #%d: bad relo type %d\n",
+ prog->name, i, relo->type);
+ return -EINVAL;
}
- prog->insns = new_insn;
+ }
- if (obj->btf_ext) {
- err = bpf_program_reloc_btf_ext(prog, obj,
- text->section_name,
- prog->insns_cnt);
- if (err)
- return err;
+ return 0;
+}
+
+static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
+ const struct bpf_program *prog,
+ const struct btf_ext_info *ext_info,
+ void **prog_info, __u32 *prog_rec_cnt,
+ __u32 *prog_rec_sz)
+{
+ void *copy_start = NULL, *copy_end = NULL;
+ void *rec, *rec_end, *new_prog_info;
+ const struct btf_ext_info_sec *sec;
+ size_t old_sz, new_sz;
+ int i, sec_num, sec_idx, off_adj;
+
+ sec_num = 0;
+ for_each_btf_ext_sec(ext_info, sec) {
+ sec_idx = ext_info->sec_idxs[sec_num];
+ sec_num++;
+ if (prog->sec_idx != sec_idx)
+ continue;
+
+ for_each_btf_ext_rec(ext_info, sec, i, rec) {
+ __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
+
+ if (insn_off < prog->sec_insn_off)
+ continue;
+ if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
+ break;
+
+ if (!copy_start)
+ copy_start = rec;
+ copy_end = rec + ext_info->rec_size;
}
- memcpy(new_insn + prog->insns_cnt, text->insns,
- text->insns_cnt * sizeof(*insn));
- prog->main_prog_cnt = prog->insns_cnt;
- prog->insns_cnt = new_cnt;
- pr_debug("added %zd insn from %s to prog %s\n",
- text->insns_cnt, text->section_name,
- prog->section_name);
+ if (!copy_start)
+ return -ENOENT;
+
+ /* append func/line info of a given (sub-)program to the main
+ * program func/line info
+ */
+ old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
+ new_sz = old_sz + (copy_end - copy_start);
+ new_prog_info = realloc(*prog_info, new_sz);
+ if (!new_prog_info)
+ return -ENOMEM;
+ *prog_info = new_prog_info;
+ *prog_rec_cnt = new_sz / ext_info->rec_size;
+ memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
+
+ /* Kernel instruction offsets are in units of 8-byte
+ * instructions, while .BTF.ext instruction offsets generated
+ * by Clang are in units of bytes. So convert Clang offsets
+ * into kernel offsets and adjust offset according to program
+ * relocated position.
+ */
+ off_adj = prog->sub_insn_off - prog->sec_insn_off;
+ rec = new_prog_info + old_sz;
+ rec_end = new_prog_info + new_sz;
+ for (; rec < rec_end; rec += ext_info->rec_size) {
+ __u32 *insn_off = rec;
+
+ *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
+ }
+ *prog_rec_sz = ext_info->rec_size;
+ return 0;
}
- insn = &prog->insns[relo->insn_idx];
- insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
- return 0;
+ return -ENOENT;
}
static int
-bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
+reloc_prog_func_and_line_info(const struct bpf_object *obj,
+ struct bpf_program *main_prog,
+ const struct bpf_program *prog)
{
- int i, err;
+ int err;
- if (!prog)
+ /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
+ * supprot func/line info
+ */
+ if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
return 0;
- if (obj->btf_ext) {
- err = bpf_program_reloc_btf_ext(prog, obj,
- prog->section_name, 0);
- if (err)
+ /* only attempt func info relocation if main program's func_info
+ * relocation was successful
+ */
+ if (main_prog != prog && !main_prog->func_info)
+ goto line_info;
+
+ err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
+ &main_prog->func_info,
+ &main_prog->func_info_cnt,
+ &main_prog->func_info_rec_size);
+ if (err) {
+ if (err != -ENOENT) {
+ pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
+ prog->name, err);
+ return err;
+ }
+ if (main_prog->func_info) {
+ /*
+ * Some info has already been found but has problem
+ * in the last btf_ext reloc. Must have to error out.
+ */
+ pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
return err;
+ }
+ /* Have problem loading the very first info. Ignore the rest. */
+ pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
+ prog->name);
}
- if (!prog->reloc_desc)
+line_info:
+ /* don't relocate line info if main program's relocation failed */
+ if (main_prog != prog && !main_prog->line_info)
return 0;
- for (i = 0; i < prog->nr_reloc; i++) {
- struct reloc_desc *relo = &prog->reloc_desc[i];
- struct bpf_insn *insn = &prog->insns[relo->insn_idx];
+ err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
+ &main_prog->line_info,
+ &main_prog->line_info_cnt,
+ &main_prog->line_info_rec_size);
+ if (err) {
+ if (err != -ENOENT) {
+ pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
+ prog->name, err);
+ return err;
+ }
+ if (main_prog->line_info) {
+ /*
+ * Some info has already been found but has problem
+ * in the last btf_ext reloc. Must have to error out.
+ */
+ pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
+ return err;
+ }
+ /* Have problem loading the very first info. Ignore the rest. */
+ pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
+ prog->name);
+ }
+ return 0;
+}
+
+static int cmp_relo_by_insn_idx(const void *key, const void *elem)
+{
+ size_t insn_idx = *(const size_t *)key;
+ const struct reloc_desc *relo = elem;
+
+ if (insn_idx == relo->insn_idx)
+ return 0;
+ return insn_idx < relo->insn_idx ? -1 : 1;
+}
+
+static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
+{
+ if (!prog->nr_reloc)
+ return NULL;
+ return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
+ sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
+}
+
+static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
+{
+ int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
+ struct reloc_desc *relos;
+ int i;
+
+ if (main_prog == subprog)
+ return 0;
+ relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
+ if (!relos)
+ return -ENOMEM;
+ if (subprog->nr_reloc)
+ memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
+ sizeof(*relos) * subprog->nr_reloc);
+
+ for (i = main_prog->nr_reloc; i < new_cnt; i++)
+ relos[i].insn_idx += subprog->sub_insn_off;
+ /* After insn_idx adjustment the 'relos' array is still sorted
+ * by insn_idx and doesn't break bsearch.
+ */
+ main_prog->reloc_desc = relos;
+ main_prog->nr_reloc = new_cnt;
+ return 0;
+}
+
+static int
+bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
+ struct bpf_program *prog)
+{
+ size_t sub_insn_idx, insn_idx, new_cnt;
+ struct bpf_program *subprog;
+ struct bpf_insn *insns, *insn;
+ struct reloc_desc *relo;
+ int err;
+
+ err = reloc_prog_func_and_line_info(obj, main_prog, prog);
+ if (err)
+ return err;
+
+ for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
+ insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
+ if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
+ continue;
+
+ relo = find_prog_insn_relo(prog, insn_idx);
+ if (relo && relo->type == RELO_EXTERN_CALL)
+ /* kfunc relocations will be handled later
+ * in bpf_object__relocate_data()
+ */
+ continue;
+ if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
+ pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
+ prog->name, insn_idx, relo->type);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (relo) {
+ /* sub-program instruction index is a combination of
+ * an offset of a symbol pointed to by relocation and
+ * call instruction's imm field; for global functions,
+ * call always has imm = -1, but for static functions
+ * relocation is against STT_SECTION and insn->imm
+ * points to a start of a static function
+ *
+ * for subprog addr relocation, the relo->sym_off + insn->imm is
+ * the byte offset in the corresponding section.
+ */
+ if (relo->type == RELO_CALL)
+ sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
+ else
+ sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
+ } else if (insn_is_pseudo_func(insn)) {
+ /*
+ * RELO_SUBPROG_ADDR relo is always emitted even if both
+ * functions are in the same section, so it shouldn't reach here.
+ */
+ pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
+ prog->name, insn_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ } else {
+ /* if subprogram call is to a static function within
+ * the same ELF section, there won't be any relocation
+ * emitted, but it also means there is no additional
+ * offset necessary, insns->imm is relative to
+ * instruction's original position within the section
+ */
+ sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
+ }
- if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
- pr_warn("relocation out of range: '%s'\n",
- prog->section_name);
+ /* we enforce that sub-programs should be in .text section */
+ subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
+ if (!subprog) {
+ pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
+ prog->name);
return -LIBBPF_ERRNO__RELOC;
}
- switch (relo->type) {
- case RELO_LD64:
- insn[0].src_reg = BPF_PSEUDO_MAP_FD;
- insn[0].imm = obj->maps[relo->map_idx].fd;
- break;
- case RELO_DATA:
- insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
- insn[1].imm = insn[0].imm + relo->sym_off;
- insn[0].imm = obj->maps[relo->map_idx].fd;
- break;
- case RELO_EXTERN:
- insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
- insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
- insn[1].imm = relo->sym_off;
- break;
- case RELO_CALL:
- err = bpf_program__reloc_text(prog, obj, relo);
+ /* if it's the first call instruction calling into this
+ * subprogram (meaning this subprog hasn't been processed
+ * yet) within the context of current main program:
+ * - append it at the end of main program's instructions blog;
+ * - process is recursively, while current program is put on hold;
+ * - if that subprogram calls some other not yet processes
+ * subprogram, same thing will happen recursively until
+ * there are no more unprocesses subprograms left to append
+ * and relocate.
+ */
+ if (subprog->sub_insn_off == 0) {
+ subprog->sub_insn_off = main_prog->insns_cnt;
+
+ new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
+ insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
+ if (!insns) {
+ pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
+ return -ENOMEM;
+ }
+ main_prog->insns = insns;
+ main_prog->insns_cnt = new_cnt;
+
+ memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
+ subprog->insns_cnt * sizeof(*insns));
+
+ pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
+ main_prog->name, subprog->insns_cnt, subprog->name);
+
+ /* The subprog insns are now appended. Append its relos too. */
+ err = append_subprog_relos(main_prog, subprog);
+ if (err)
+ return err;
+ err = bpf_object__reloc_code(obj, main_prog, subprog);
if (err)
return err;
- break;
- default:
- pr_warn("relo #%d: bad relo type %d\n", i, relo->type);
- return -EINVAL;
}
+
+ /* main_prog->insns memory could have been re-allocated, so
+ * calculate pointer again
+ */
+ insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
+ /* calculate correct instruction position within current main
+ * prog; each main prog can have a different set of
+ * subprograms appended (potentially in different order as
+ * well), so position of any subprog can be different for
+ * different main programs
+ */
+ insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
+
+ pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
+ prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
}
- zfree(&prog->reloc_desc);
- prog->nr_reloc = 0;
return 0;
}
+/*
+ * Relocate sub-program calls.
+ *
+ * Algorithm operates as follows. Each entry-point BPF program (referred to as
+ * main prog) is processed separately. For each subprog (non-entry functions,
+ * that can be called from either entry progs or other subprogs) gets their
+ * sub_insn_off reset to zero. This serves as indicator that this subprogram
+ * hasn't been yet appended and relocated within current main prog. Once its
+ * relocated, sub_insn_off will point at the position within current main prog
+ * where given subprog was appended. This will further be used to relocate all
+ * the call instructions jumping into this subprog.
+ *
+ * We start with main program and process all call instructions. If the call
+ * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
+ * is zero), subprog instructions are appended at the end of main program's
+ * instruction array. Then main program is "put on hold" while we recursively
+ * process newly appended subprogram. If that subprogram calls into another
+ * subprogram that hasn't been appended, new subprogram is appended again to
+ * the *main* prog's instructions (subprog's instructions are always left
+ * untouched, as they need to be in unmodified state for subsequent main progs
+ * and subprog instructions are always sent only as part of a main prog) and
+ * the process continues recursively. Once all the subprogs called from a main
+ * prog or any of its subprogs are appended (and relocated), all their
+ * positions within finalized instructions array are known, so it's easy to
+ * rewrite call instructions with correct relative offsets, corresponding to
+ * desired target subprog.
+ *
+ * Its important to realize that some subprogs might not be called from some
+ * main prog and any of its called/used subprogs. Those will keep their
+ * subprog->sub_insn_off as zero at all times and won't be appended to current
+ * main prog and won't be relocated within the context of current main prog.
+ * They might still be used from other main progs later.
+ *
+ * Visually this process can be shown as below. Suppose we have two main
+ * programs mainA and mainB and BPF object contains three subprogs: subA,
+ * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
+ * subC both call subB:
+ *
+ * +--------+ +-------+
+ * | v v |
+ * +--+---+ +--+-+-+ +---+--+
+ * | subA | | subB | | subC |
+ * +--+---+ +------+ +---+--+
+ * ^ ^
+ * | |
+ * +---+-------+ +------+----+
+ * | mainA | | mainB |
+ * +-----------+ +-----------+
+ *
+ * We'll start relocating mainA, will find subA, append it and start
+ * processing sub A recursively:
+ *
+ * +-----------+------+
+ * | mainA | subA |
+ * +-----------+------+
+ *
+ * At this point we notice that subB is used from subA, so we append it and
+ * relocate (there are no further subcalls from subB):
+ *
+ * +-----------+------+------+
+ * | mainA | subA | subB |
+ * +-----------+------+------+
+ *
+ * At this point, we relocate subA calls, then go one level up and finish with
+ * relocatin mainA calls. mainA is done.
+ *
+ * For mainB process is similar but results in different order. We start with
+ * mainB and skip subA and subB, as mainB never calls them (at least
+ * directly), but we see subC is needed, so we append and start processing it:
+ *
+ * +-----------+------+
+ * | mainB | subC |
+ * +-----------+------+
+ * Now we see subC needs subB, so we go back to it, append and relocate it:
+ *
+ * +-----------+------+------+
+ * | mainB | subC | subB |
+ * +-----------+------+------+
+ *
+ * At this point we unwind recursion, relocate calls in subC, then in mainB.
+ */
+static int
+bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
+{
+ struct bpf_program *subprog;
+ int i, err;
+
+ /* mark all subprogs as not relocated (yet) within the context of
+ * current main program
+ */
+ for (i = 0; i < obj->nr_programs; i++) {
+ subprog = &obj->programs[i];
+ if (!prog_is_subprog(obj, subprog))
+ continue;
+
+ subprog->sub_insn_off = 0;
+ }
+
+ err = bpf_object__reloc_code(obj, prog, prog);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void
+bpf_object__free_relocs(struct bpf_object *obj)
+{
+ struct bpf_program *prog;
+ int i;
+
+ /* free up relocation descriptors */
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ zfree(&prog->reloc_desc);
+ prog->nr_reloc = 0;
+ }
+}
+
+static int cmp_relocs(const void *_a, const void *_b)
+{
+ const struct reloc_desc *a = _a;
+ const struct reloc_desc *b = _b;
+
+ if (a->insn_idx != b->insn_idx)
+ return a->insn_idx < b->insn_idx ? -1 : 1;
+
+ /* no two relocations should have the same insn_idx, but ... */
+ if (a->type != b->type)
+ return a->type < b->type ? -1 : 1;
+
+ return 0;
+}
+
+static void bpf_object__sort_relos(struct bpf_object *obj)
+{
+ int i;
+
+ for (i = 0; i < obj->nr_programs; i++) {
+ struct bpf_program *p = &obj->programs[i];
+
+ if (!p->nr_reloc)
+ continue;
+
+ qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
+ }
+}
+
static int
bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
{
struct bpf_program *prog;
- size_t i;
+ size_t i, j;
int err;
if (obj->btf_ext) {
@@ -4767,310 +6440,854 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
err);
return err;
}
+ bpf_object__sort_relos(obj);
}
- /* ensure .text is relocated first, as it's going to be copied as-is
- * later for sub-program calls
+
+ /* Before relocating calls pre-process relocations and mark
+ * few ld_imm64 instructions that points to subprogs.
+ * Otherwise bpf_object__reloc_code() later would have to consider
+ * all ld_imm64 insns as relocation candidates. That would
+ * reduce relocation speed, since amount of find_prog_insn_relo()
+ * would increase and most of them will fail to find a relo.
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- if (prog->idx != obj->efile.text_shndx)
+ for (j = 0; j < prog->nr_reloc; j++) {
+ struct reloc_desc *relo = &prog->reloc_desc[j];
+ struct bpf_insn *insn = &prog->insns[relo->insn_idx];
+
+ /* mark the insn, so it's recognized by insn_is_pseudo_func() */
+ if (relo->type == RELO_SUBPROG_ADDR)
+ insn[0].src_reg = BPF_PSEUDO_FUNC;
+ }
+ }
+
+ /* relocate subprogram calls and append used subprograms to main
+ * programs; each copy of subprogram code needs to be relocated
+ * differently for each main program, because its code location might
+ * have changed.
+ * Append subprog relos to main programs to allow data relos to be
+ * processed after text is completely relocated.
+ */
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ /* sub-program's sub-calls are relocated within the context of
+ * its main program only
+ */
+ if (prog_is_subprog(obj, prog))
+ continue;
+ if (!prog->autoload)
continue;
- err = bpf_program__relocate(prog, obj);
+ err = bpf_object__relocate_calls(obj, prog);
if (err) {
- pr_warn("failed to relocate '%s'\n", prog->section_name);
+ pr_warn("prog '%s': failed to relocate calls: %d\n",
+ prog->name, err);
return err;
}
- break;
}
- /* now relocate everything but .text, which by now is relocated
- * properly, so we can copy raw sub-program instructions as is safely
- */
+ /* Process data relos for main programs */
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- if (prog->idx == obj->efile.text_shndx)
+ if (prog_is_subprog(obj, prog))
continue;
-
- err = bpf_program__relocate(prog, obj);
+ if (!prog->autoload)
+ continue;
+ err = bpf_object__relocate_data(obj, prog);
if (err) {
- pr_warn("failed to relocate '%s'\n", prog->section_name);
+ pr_warn("prog '%s': failed to relocate data references: %d\n",
+ prog->name, err);
return err;
}
}
+
return 0;
}
-static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj,
- GElf_Shdr *shdr,
- Elf_Data *data);
+static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
+ Elf64_Shdr *shdr, Elf_Data *data);
-static int bpf_object__collect_reloc(struct bpf_object *obj)
+static int bpf_object__collect_map_relos(struct bpf_object *obj,
+ Elf64_Shdr *shdr, Elf_Data *data)
{
- int i, err;
+ const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
+ int i, j, nrels, new_sz;
+ const struct btf_var_secinfo *vi = NULL;
+ const struct btf_type *sec, *var, *def;
+ struct bpf_map *map = NULL, *targ_map = NULL;
+ struct bpf_program *targ_prog = NULL;
+ bool is_prog_array, is_map_in_map;
+ const struct btf_member *member;
+ const char *name, *mname, *type;
+ unsigned int moff;
+ Elf64_Sym *sym;
+ Elf64_Rel *rel;
+ void *tmp;
+
+ if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
+ return -EINVAL;
+ sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
+ if (!sec)
+ return -EINVAL;
+
+ nrels = shdr->sh_size / shdr->sh_entsize;
+ for (i = 0; i < nrels; i++) {
+ rel = elf_rel_by_idx(data, i);
+ if (!rel) {
+ pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
+ if (!sym) {
+ pr_warn(".maps relo #%d: symbol %zx not found\n",
+ i, (size_t)ELF64_R_SYM(rel->r_info));
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+ name = elf_sym_str(obj, sym->st_name) ?: "<?>";
+
+ pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
+ i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
+ (size_t)rel->r_offset, sym->st_name, name);
+
+ for (j = 0; j < obj->nr_maps; j++) {
+ map = &obj->maps[j];
+ if (map->sec_idx != obj->efile.btf_maps_shndx)
+ continue;
+
+ vi = btf_var_secinfos(sec) + map->btf_var_idx;
+ if (vi->offset <= rel->r_offset &&
+ rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
+ break;
+ }
+ if (j == obj->nr_maps) {
+ pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
+ i, name, (size_t)rel->r_offset);
+ return -EINVAL;
+ }
+
+ is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
+ is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
+ type = is_map_in_map ? "map" : "prog";
+ if (is_map_in_map) {
+ if (sym->st_shndx != obj->efile.btf_maps_shndx) {
+ pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
+ i, name);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
+ map->def.key_size != sizeof(int)) {
+ pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
+ i, map->name, sizeof(int));
+ return -EINVAL;
+ }
+ targ_map = bpf_object__find_map_by_name(obj, name);
+ if (!targ_map) {
+ pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
+ i, name);
+ return -ESRCH;
+ }
+ } else if (is_prog_array) {
+ targ_prog = bpf_object__find_program_by_name(obj, name);
+ if (!targ_prog) {
+ pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
+ i, name);
+ return -ESRCH;
+ }
+ if (targ_prog->sec_idx != sym->st_shndx ||
+ targ_prog->sec_insn_off * 8 != sym->st_value ||
+ prog_is_subprog(obj, targ_prog)) {
+ pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
+ i, name);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ var = btf__type_by_id(obj->btf, vi->type);
+ def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
+ if (btf_vlen(def) == 0)
+ return -EINVAL;
+ member = btf_members(def) + btf_vlen(def) - 1;
+ mname = btf__name_by_offset(obj->btf, member->name_off);
+ if (strcmp(mname, "values"))
+ return -EINVAL;
- if (!obj_elf_valid(obj)) {
- pr_warn("Internal error: elf object is closed\n");
- return -LIBBPF_ERRNO__INTERNAL;
+ moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
+ if (rel->r_offset - vi->offset < moff)
+ return -EINVAL;
+
+ moff = rel->r_offset - vi->offset - moff;
+ /* here we use BPF pointer size, which is always 64 bit, as we
+ * are parsing ELF that was built for BPF target
+ */
+ if (moff % bpf_ptr_sz)
+ return -EINVAL;
+ moff /= bpf_ptr_sz;
+ if (moff >= map->init_slots_sz) {
+ new_sz = moff + 1;
+ tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
+ if (!tmp)
+ return -ENOMEM;
+ map->init_slots = tmp;
+ memset(map->init_slots + map->init_slots_sz, 0,
+ (new_sz - map->init_slots_sz) * host_ptr_sz);
+ map->init_slots_sz = new_sz;
+ }
+ map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
+
+ pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
+ i, map->name, moff, type, name);
}
- for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
- GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
- Elf_Data *data = obj->efile.reloc_sects[i].data;
- int idx = shdr->sh_info;
- struct bpf_program *prog;
+ return 0;
+}
+
+static int bpf_object__collect_relos(struct bpf_object *obj)
+{
+ int i, err;
+
+ for (i = 0; i < obj->efile.sec_cnt; i++) {
+ struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+ int idx;
+
+ if (sec_desc->sec_type != SEC_RELO)
+ continue;
+
+ shdr = sec_desc->shdr;
+ data = sec_desc->data;
+ idx = shdr->sh_info;
if (shdr->sh_type != SHT_REL) {
pr_warn("internal error at %d\n", __LINE__);
return -LIBBPF_ERRNO__INTERNAL;
}
- if (idx == obj->efile.st_ops_shndx) {
- err = bpf_object__collect_struct_ops_map_reloc(obj,
- shdr,
- data);
- if (err)
- return err;
+ if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx)
+ err = bpf_object__collect_st_ops_relos(obj, shdr, data);
+ else if (idx == obj->efile.btf_maps_shndx)
+ err = bpf_object__collect_map_relos(obj, shdr, data);
+ else
+ err = bpf_object__collect_prog_relos(obj, shdr, data);
+ if (err)
+ return err;
+ }
+
+ bpf_object__sort_relos(obj);
+ return 0;
+}
+
+static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
+{
+ if (BPF_CLASS(insn->code) == BPF_JMP &&
+ BPF_OP(insn->code) == BPF_CALL &&
+ BPF_SRC(insn->code) == BPF_K &&
+ insn->src_reg == 0 &&
+ insn->dst_reg == 0) {
+ *func_id = insn->imm;
+ return true;
+ }
+ return false;
+}
+
+static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
+{
+ struct bpf_insn *insn = prog->insns;
+ enum bpf_func_id func_id;
+ int i;
+
+ if (obj->gen_loader)
+ return 0;
+
+ for (i = 0; i < prog->insns_cnt; i++, insn++) {
+ if (!insn_is_helper_call(insn, &func_id))
continue;
+
+ /* on kernels that don't yet support
+ * bpf_probe_read_{kernel,user}[_str] helpers, fall back
+ * to bpf_probe_read() which works well for old kernels
+ */
+ switch (func_id) {
+ case BPF_FUNC_probe_read_kernel:
+ case BPF_FUNC_probe_read_user:
+ if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
+ insn->imm = BPF_FUNC_probe_read;
+ break;
+ case BPF_FUNC_probe_read_kernel_str:
+ case BPF_FUNC_probe_read_user_str:
+ if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
+ insn->imm = BPF_FUNC_probe_read_str;
+ break;
+ default:
+ break;
}
+ }
+ return 0;
+}
- prog = bpf_object__find_prog_by_idx(obj, idx);
- if (!prog) {
- pr_warn("relocation failed: no section(%d)\n", idx);
- return -LIBBPF_ERRNO__RELOC;
+static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
+ int *btf_obj_fd, int *btf_type_id);
+
+/* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
+static int libbpf_prepare_prog_load(struct bpf_program *prog,
+ struct bpf_prog_load_opts *opts, long cookie)
+{
+ enum sec_def_flags def = cookie;
+
+ /* old kernels might not support specifying expected_attach_type */
+ if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
+ opts->expected_attach_type = 0;
+
+ if (def & SEC_SLEEPABLE)
+ opts->prog_flags |= BPF_F_SLEEPABLE;
+
+ if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
+ opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
+
+ if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
+ int btf_obj_fd = 0, btf_type_id = 0, err;
+ const char *attach_name;
+
+ attach_name = strchr(prog->sec_name, '/');
+ if (!attach_name) {
+ /* if BPF program is annotated with just SEC("fentry")
+ * (or similar) without declaratively specifying
+ * target, then it is expected that target will be
+ * specified with bpf_program__set_attach_target() at
+ * runtime before BPF object load step. If not, then
+ * there is nothing to load into the kernel as BPF
+ * verifier won't be able to validate BPF program
+ * correctness anyways.
+ */
+ pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
+ prog->name);
+ return -EINVAL;
}
+ attach_name++; /* skip over / */
- err = bpf_program__collect_reloc(prog, shdr, data, obj);
+ err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
if (err)
return err;
+
+ /* cache resolved BTF FD and BTF type ID in the prog */
+ prog->attach_btf_obj_fd = btf_obj_fd;
+ prog->attach_btf_id = btf_type_id;
+
+ /* but by now libbpf common logic is not utilizing
+ * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
+ * this callback is called after opts were populated by
+ * libbpf, so this callback has to update opts explicitly here
+ */
+ opts->attach_btf_obj_fd = btf_obj_fd;
+ opts->attach_btf_id = btf_type_id;
}
return 0;
}
-static int
-load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
- char *license, __u32 kern_version, int *pfd)
+static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
+
+static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
+ struct bpf_insn *insns, int insns_cnt,
+ const char *license, __u32 kern_version, int *prog_fd)
{
- struct bpf_load_program_attr load_attr;
+ LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
+ const char *prog_name = NULL;
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
- char *log_buf = NULL;
- int btf_fd, ret;
+ char *log_buf = NULL, *tmp;
+ int btf_fd, ret, err;
+ bool own_log_buf = true;
+ __u32 log_level = prog->log_level;
+
+ if (prog->type == BPF_PROG_TYPE_UNSPEC) {
+ /*
+ * The program type must be set. Most likely we couldn't find a proper
+ * section definition at load time, and thus we didn't infer the type.
+ */
+ pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
+ prog->name, prog->sec_name);
+ return -EINVAL;
+ }
if (!insns || !insns_cnt)
return -EINVAL;
- memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
- load_attr.prog_type = prog->type;
load_attr.expected_attach_type = prog->expected_attach_type;
- if (prog->caps->name)
- load_attr.name = prog->name;
- load_attr.insns = insns;
- load_attr.insns_cnt = insns_cnt;
- load_attr.license = license;
- if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
- prog->type == BPF_PROG_TYPE_LSM) {
- load_attr.attach_btf_id = prog->attach_btf_id;
- } else if (prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_EXT) {
- load_attr.attach_prog_fd = prog->attach_prog_fd;
- load_attr.attach_btf_id = prog->attach_btf_id;
- } else {
- load_attr.kern_version = kern_version;
- load_attr.prog_ifindex = prog->prog_ifindex;
- }
- /* if .BTF.ext was loaded, kernel supports associated BTF for prog */
- if (prog->obj->btf_ext)
- btf_fd = bpf_object__btf_fd(prog->obj);
- else
- btf_fd = -1;
- load_attr.prog_btf_fd = btf_fd >= 0 ? btf_fd : 0;
- load_attr.func_info = prog->func_info;
- load_attr.func_info_rec_size = prog->func_info_rec_size;
- load_attr.func_info_cnt = prog->func_info_cnt;
- load_attr.line_info = prog->line_info;
- load_attr.line_info_rec_size = prog->line_info_rec_size;
- load_attr.line_info_cnt = prog->line_info_cnt;
- load_attr.log_level = prog->log_level;
+ if (kernel_supports(obj, FEAT_PROG_NAME))
+ prog_name = prog->name;
+ load_attr.attach_prog_fd = prog->attach_prog_fd;
+ load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
+ load_attr.attach_btf_id = prog->attach_btf_id;
+ load_attr.kern_version = kern_version;
+ load_attr.prog_ifindex = prog->prog_ifindex;
+
+ /* specify func_info/line_info only if kernel supports them */
+ btf_fd = bpf_object__btf_fd(obj);
+ if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
+ load_attr.prog_btf_fd = btf_fd;
+ load_attr.func_info = prog->func_info;
+ load_attr.func_info_rec_size = prog->func_info_rec_size;
+ load_attr.func_info_cnt = prog->func_info_cnt;
+ load_attr.line_info = prog->line_info;
+ load_attr.line_info_rec_size = prog->line_info_rec_size;
+ load_attr.line_info_cnt = prog->line_info_cnt;
+ }
+ load_attr.log_level = log_level;
load_attr.prog_flags = prog->prog_flags;
+ load_attr.fd_array = obj->fd_array;
-retry_load:
- if (log_buf_size) {
- log_buf = malloc(log_buf_size);
- if (!log_buf)
- return -ENOMEM;
+ /* adjust load_attr if sec_def provides custom preload callback */
+ if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
+ err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
+ if (err < 0) {
+ pr_warn("prog '%s': failed to prepare load attributes: %d\n",
+ prog->name, err);
+ return err;
+ }
+ insns = prog->insns;
+ insns_cnt = prog->insns_cnt;
+ }
- *log_buf = 0;
+ if (obj->gen_loader) {
+ bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
+ license, insns, insns_cnt, &load_attr,
+ prog - obj->programs);
+ *prog_fd = -1;
+ return 0;
}
- ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size);
+retry_load:
+ /* if log_level is zero, we don't request logs initially even if
+ * custom log_buf is specified; if the program load fails, then we'll
+ * bump log_level to 1 and use either custom log_buf or we'll allocate
+ * our own and retry the load to get details on what failed
+ */
+ if (log_level) {
+ if (prog->log_buf) {
+ log_buf = prog->log_buf;
+ log_buf_size = prog->log_size;
+ own_log_buf = false;
+ } else if (obj->log_buf) {
+ log_buf = obj->log_buf;
+ log_buf_size = obj->log_size;
+ own_log_buf = false;
+ } else {
+ log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
+ tmp = realloc(log_buf, log_buf_size);
+ if (!tmp) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ log_buf = tmp;
+ log_buf[0] = '\0';
+ own_log_buf = true;
+ }
+ }
+ load_attr.log_buf = log_buf;
+ load_attr.log_size = log_buf_size;
+ load_attr.log_level = log_level;
+
+ ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
if (ret >= 0) {
- if (log_buf && load_attr.log_level)
- pr_debug("verifier log:\n%s", log_buf);
- *pfd = ret;
+ if (log_level && own_log_buf) {
+ pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
+ prog->name, log_buf);
+ }
+
+ if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
+ struct bpf_map *map;
+ int i;
+
+ for (i = 0; i < obj->nr_maps; i++) {
+ map = &prog->obj->maps[i];
+ if (map->libbpf_type != LIBBPF_MAP_RODATA)
+ continue;
+
+ if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) {
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warn("prog '%s': failed to bind map '%s': %s\n",
+ prog->name, map->real_name, cp);
+ /* Don't fail hard if can't bind rodata. */
+ }
+ }
+ }
+
+ *prog_fd = ret;
ret = 0;
goto out;
}
- if (!log_buf || errno == ENOSPC) {
- log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
- log_buf_size << 1);
-
- free(log_buf);
+ if (log_level == 0) {
+ log_level = 1;
goto retry_load;
}
+ /* On ENOSPC, increase log buffer size and retry, unless custom
+ * log_buf is specified.
+ * Be careful to not overflow u32, though. Kernel's log buf size limit
+ * isn't part of UAPI so it can always be bumped to full 4GB. So don't
+ * multiply by 2 unless we are sure we'll fit within 32 bits.
+ * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
+ */
+ if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
+ goto retry_load;
+
ret = -errno;
+
+ /* post-process verifier log to improve error descriptions */
+ fixup_verifier_log(prog, log_buf, log_buf_size);
+
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("load bpf program failed: %s\n", cp);
+ pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
pr_perm_msg(ret);
- if (log_buf && log_buf[0] != '\0') {
- ret = -LIBBPF_ERRNO__VERIFY;
- pr_warn("-- BEGIN DUMP LOG ---\n");
- pr_warn("\n%s\n", log_buf);
- pr_warn("-- END LOG --\n");
- } else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
- pr_warn("Program too large (%zu insns), at most %d insns\n",
- load_attr.insns_cnt, BPF_MAXINSNS);
- ret = -LIBBPF_ERRNO__PROG2BIG;
- } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
- /* Wrong program type? */
- int fd;
-
- load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
- load_attr.expected_attach_type = 0;
- fd = bpf_load_program_xattr(&load_attr, NULL, 0);
- if (fd >= 0) {
- close(fd);
- ret = -LIBBPF_ERRNO__PROGTYPE;
- goto out;
- }
+ if (own_log_buf && log_buf && log_buf[0] != '\0') {
+ pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
+ prog->name, log_buf);
}
out:
- free(log_buf);
+ if (own_log_buf)
+ free(log_buf);
return ret;
}
-static int libbpf_find_attach_btf_id(struct bpf_program *prog);
-
-int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
+static char *find_prev_line(char *buf, char *cur)
{
- int err = 0, fd, i, btf_id;
+ char *p;
- if ((prog->type == BPF_PROG_TYPE_TRACING ||
- prog->type == BPF_PROG_TYPE_LSM ||
- prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
- btf_id = libbpf_find_attach_btf_id(prog);
- if (btf_id <= 0)
- return btf_id;
- prog->attach_btf_id = btf_id;
- }
+ if (cur == buf) /* end of a log buf */
+ return NULL;
- if (prog->instances.nr < 0 || !prog->instances.fds) {
- if (prog->preprocessor) {
- pr_warn("Internal error: can't load program '%s'\n",
- prog->section_name);
- return -LIBBPF_ERRNO__INTERNAL;
- }
+ p = cur - 1;
+ while (p - 1 >= buf && *(p - 1) != '\n')
+ p--;
- prog->instances.fds = malloc(sizeof(int));
- if (!prog->instances.fds) {
- pr_warn("Not enough memory for BPF fds\n");
- return -ENOMEM;
- }
- prog->instances.nr = 1;
- prog->instances.fds[0] = -1;
- }
+ return p;
+}
- if (!prog->preprocessor) {
- if (prog->instances.nr != 1) {
- pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
- prog->section_name, prog->instances.nr);
+static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
+ char *orig, size_t orig_sz, const char *patch)
+{
+ /* size of the remaining log content to the right from the to-be-replaced part */
+ size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
+ size_t patch_sz = strlen(patch);
+
+ if (patch_sz != orig_sz) {
+ /* If patch line(s) are longer than original piece of verifier log,
+ * shift log contents by (patch_sz - orig_sz) bytes to the right
+ * starting from after to-be-replaced part of the log.
+ *
+ * If patch line(s) are shorter than original piece of verifier log,
+ * shift log contents by (orig_sz - patch_sz) bytes to the left
+ * starting from after to-be-replaced part of the log
+ *
+ * We need to be careful about not overflowing available
+ * buf_sz capacity. If that's the case, we'll truncate the end
+ * of the original log, as necessary.
+ */
+ if (patch_sz > orig_sz) {
+ if (orig + patch_sz >= buf + buf_sz) {
+ /* patch is big enough to cover remaining space completely */
+ patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
+ rem_sz = 0;
+ } else if (patch_sz - orig_sz > buf_sz - log_sz) {
+ /* patch causes part of remaining log to be truncated */
+ rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
+ }
}
- err = load_program(prog, prog->insns, prog->insns_cnt,
- license, kern_ver, &fd);
- if (!err)
- prog->instances.fds[0] = fd;
- goto out;
+ /* shift remaining log to the right by calculated amount */
+ memmove(orig + patch_sz, orig + orig_sz, rem_sz);
}
- for (i = 0; i < prog->instances.nr; i++) {
- struct bpf_prog_prep_result result;
- bpf_program_prep_t preprocessor = prog->preprocessor;
+ memcpy(orig, patch, patch_sz);
+}
- memset(&result, 0, sizeof(result));
- err = preprocessor(prog, i, prog->insns,
- prog->insns_cnt, &result);
- if (err) {
- pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
- i, prog->section_name);
- goto out;
- }
+static void fixup_log_failed_core_relo(struct bpf_program *prog,
+ char *buf, size_t buf_sz, size_t log_sz,
+ char *line1, char *line2, char *line3)
+{
+ /* Expected log for failed and not properly guarded CO-RE relocation:
+ * line1 -> 123: (85) call unknown#195896080
+ * line2 -> invalid func unknown#195896080
+ * line3 -> <anything else or end of buffer>
+ *
+ * "123" is the index of the instruction that was poisoned. We extract
+ * instruction index to find corresponding CO-RE relocation and
+ * replace this part of the log with more relevant information about
+ * failed CO-RE relocation.
+ */
+ const struct bpf_core_relo *relo;
+ struct bpf_core_spec spec;
+ char patch[512], spec_buf[256];
+ int insn_idx, err, spec_len;
- if (!result.new_insn_ptr || !result.new_insn_cnt) {
- pr_debug("Skip loading the %dth instance of program '%s'\n",
- i, prog->section_name);
- prog->instances.fds[i] = -1;
- if (result.pfd)
- *result.pfd = -1;
- continue;
- }
+ if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
+ return;
- err = load_program(prog, result.new_insn_ptr,
- result.new_insn_cnt, license, kern_ver, &fd);
- if (err) {
- pr_warn("Loading the %dth instance of program '%s' failed\n",
- i, prog->section_name);
- goto out;
- }
+ relo = find_relo_core(prog, insn_idx);
+ if (!relo)
+ return;
- if (result.pfd)
- *result.pfd = fd;
- prog->instances.fds[i] = fd;
- }
-out:
+ err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
if (err)
- pr_warn("failed to load program '%s'\n", prog->section_name);
- zfree(&prog->insns);
- prog->insns_cnt = 0;
- return err;
+ return;
+
+ spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
+ snprintf(patch, sizeof(patch),
+ "%d: <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation %s%s\n",
+ insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
+
+ patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
+}
+
+static void fixup_log_missing_map_load(struct bpf_program *prog,
+ char *buf, size_t buf_sz, size_t log_sz,
+ char *line1, char *line2, char *line3)
+{
+ /* Expected log for failed and not properly guarded map reference:
+ * line1 -> 123: (85) call unknown#2001000345
+ * line2 -> invalid func unknown#2001000345
+ * line3 -> <anything else or end of buffer>
+ *
+ * "123" is the index of the instruction that was poisoned.
+ * "345" in "2001000345" is a map index in obj->maps to fetch map name.
+ */
+ struct bpf_object *obj = prog->obj;
+ const struct bpf_map *map;
+ int insn_idx, map_idx;
+ char patch[128];
+
+ if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
+ return;
+
+ map_idx -= POISON_LDIMM64_MAP_BASE;
+ if (map_idx < 0 || map_idx >= obj->nr_maps)
+ return;
+ map = &obj->maps[map_idx];
+
+ snprintf(patch, sizeof(patch),
+ "%d: <invalid BPF map reference>\n"
+ "BPF map '%s' is referenced but wasn't created\n",
+ insn_idx, map->name);
+
+ patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
+}
+
+static void fixup_log_missing_kfunc_call(struct bpf_program *prog,
+ char *buf, size_t buf_sz, size_t log_sz,
+ char *line1, char *line2, char *line3)
+{
+ /* Expected log for failed and not properly guarded kfunc call:
+ * line1 -> 123: (85) call unknown#2002000345
+ * line2 -> invalid func unknown#2002000345
+ * line3 -> <anything else or end of buffer>
+ *
+ * "123" is the index of the instruction that was poisoned.
+ * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
+ */
+ struct bpf_object *obj = prog->obj;
+ const struct extern_desc *ext;
+ int insn_idx, ext_idx;
+ char patch[128];
+
+ if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2)
+ return;
+
+ ext_idx -= POISON_CALL_KFUNC_BASE;
+ if (ext_idx < 0 || ext_idx >= obj->nr_extern)
+ return;
+ ext = &obj->externs[ext_idx];
+
+ snprintf(patch, sizeof(patch),
+ "%d: <invalid kfunc call>\n"
+ "kfunc '%s' is referenced but wasn't resolved\n",
+ insn_idx, ext->name);
+
+ patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
}
-static bool bpf_program__is_function_storage(const struct bpf_program *prog,
- const struct bpf_object *obj)
+static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
{
- return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
+ /* look for familiar error patterns in last N lines of the log */
+ const size_t max_last_line_cnt = 10;
+ char *prev_line, *cur_line, *next_line;
+ size_t log_sz;
+ int i;
+
+ if (!buf)
+ return;
+
+ log_sz = strlen(buf) + 1;
+ next_line = buf + log_sz - 1;
+
+ for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
+ cur_line = find_prev_line(buf, next_line);
+ if (!cur_line)
+ return;
+
+ if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
+ prev_line = find_prev_line(buf, cur_line);
+ if (!prev_line)
+ continue;
+
+ /* failed CO-RE relocation case */
+ fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
+ prev_line, cur_line, next_line);
+ return;
+ } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) {
+ prev_line = find_prev_line(buf, cur_line);
+ if (!prev_line)
+ continue;
+
+ /* reference to uncreated BPF map */
+ fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
+ prev_line, cur_line, next_line);
+ return;
+ } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) {
+ prev_line = find_prev_line(buf, cur_line);
+ if (!prev_line)
+ continue;
+
+ /* reference to unresolved kfunc */
+ fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz,
+ prev_line, cur_line, next_line);
+ return;
+ }
+ }
+}
+
+static int bpf_program_record_relos(struct bpf_program *prog)
+{
+ struct bpf_object *obj = prog->obj;
+ int i;
+
+ for (i = 0; i < prog->nr_reloc; i++) {
+ struct reloc_desc *relo = &prog->reloc_desc[i];
+ struct extern_desc *ext = &obj->externs[relo->ext_idx];
+ int kind;
+
+ switch (relo->type) {
+ case RELO_EXTERN_LD64:
+ if (ext->type != EXT_KSYM)
+ continue;
+ kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
+ BTF_KIND_VAR : BTF_KIND_FUNC;
+ bpf_gen__record_extern(obj->gen_loader, ext->name,
+ ext->is_weak, !ext->ksym.type_id,
+ true, kind, relo->insn_idx);
+ break;
+ case RELO_EXTERN_CALL:
+ bpf_gen__record_extern(obj->gen_loader, ext->name,
+ ext->is_weak, false, false, BTF_KIND_FUNC,
+ relo->insn_idx);
+ break;
+ case RELO_CORE: {
+ struct bpf_core_relo cr = {
+ .insn_off = relo->insn_idx * 8,
+ .type_id = relo->core_relo->type_id,
+ .access_str_off = relo->core_relo->access_str_off,
+ .kind = relo->core_relo->kind,
+ };
+
+ bpf_gen__record_relo_core(obj->gen_loader, &cr);
+ break;
+ }
+ default:
+ continue;
+ }
+ }
+ return 0;
}
static int
bpf_object__load_progs(struct bpf_object *obj, int log_level)
{
+ struct bpf_program *prog;
size_t i;
int err;
for (i = 0; i < obj->nr_programs; i++) {
- if (bpf_program__is_function_storage(&obj->programs[i], obj))
- continue;
- obj->programs[i].log_level |= log_level;
- err = bpf_program__load(&obj->programs[i],
- obj->license,
- obj->kern_version);
+ prog = &obj->programs[i];
+ err = bpf_object__sanitize_prog(obj, prog);
if (err)
return err;
}
+
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ if (prog_is_subprog(obj, prog))
+ continue;
+ if (!prog->autoload) {
+ pr_debug("prog '%s': skipped loading\n", prog->name);
+ continue;
+ }
+ prog->log_level |= log_level;
+
+ if (obj->gen_loader)
+ bpf_program_record_relos(prog);
+
+ err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
+ obj->license, obj->kern_version, &prog->fd);
+ if (err) {
+ pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
+ return err;
+ }
+ }
+
+ bpf_object__free_relocs(obj);
return 0;
}
-static struct bpf_object *
-__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
- const struct bpf_object_open_opts *opts)
+static const struct bpf_sec_def *find_sec_def(const char *sec_name);
+
+static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
{
- const char *obj_name, *kconfig;
struct bpf_program *prog;
+ int err;
+
+ bpf_object__for_each_program(prog, obj) {
+ prog->sec_def = find_sec_def(prog->sec_name);
+ if (!prog->sec_def) {
+ /* couldn't guess, but user might manually specify */
+ pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
+ prog->name, prog->sec_name);
+ continue;
+ }
+
+ prog->type = prog->sec_def->prog_type;
+ prog->expected_attach_type = prog->sec_def->expected_attach_type;
+
+ /* sec_def can have custom callback which should be called
+ * after bpf_program is initialized to adjust its properties
+ */
+ if (prog->sec_def->prog_setup_fn) {
+ err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
+ if (err < 0) {
+ pr_warn("prog '%s': failed to initialize: %d\n",
+ prog->name, err);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
+ const struct bpf_object_open_opts *opts)
+{
+ const char *obj_name, *kconfig, *btf_tmp_path;
struct bpf_object *obj;
char tmp_name[64];
int err;
+ char *log_buf;
+ size_t log_size;
+ __u32 log_level;
if (elf_version(EV_CURRENT) == EV_NONE) {
pr_warn("failed to init libelf for %s\n",
@@ -5093,50 +7310,56 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
pr_debug("loading object '%s' from buffer\n", obj_name);
}
+ log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
+ log_size = OPTS_GET(opts, kernel_log_size, 0);
+ log_level = OPTS_GET(opts, kernel_log_level, 0);
+ if (log_size > UINT_MAX)
+ return ERR_PTR(-EINVAL);
+ if (log_size && !log_buf)
+ return ERR_PTR(-EINVAL);
+
obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
if (IS_ERR(obj))
return obj;
+ obj->log_buf = log_buf;
+ obj->log_size = log_size;
+ obj->log_level = log_level;
+
+ btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
+ if (btf_tmp_path) {
+ if (strlen(btf_tmp_path) >= PATH_MAX) {
+ err = -ENAMETOOLONG;
+ goto out;
+ }
+ obj->btf_custom_path = strdup(btf_tmp_path);
+ if (!obj->btf_custom_path) {
+ err = -ENOMEM;
+ goto out;
+ }
+ }
+
kconfig = OPTS_GET(opts, kconfig, NULL);
if (kconfig) {
obj->kconfig = strdup(kconfig);
- if (!obj->kconfig)
- return ERR_PTR(-ENOMEM);
+ if (!obj->kconfig) {
+ err = -ENOMEM;
+ goto out;
+ }
}
err = bpf_object__elf_init(obj);
err = err ? : bpf_object__check_endianness(obj);
err = err ? : bpf_object__elf_collect(obj);
err = err ? : bpf_object__collect_externs(obj);
- err = err ? : bpf_object__finalize_btf(obj);
+ err = err ? : bpf_object_fixup_btf(obj);
err = err ? : bpf_object__init_maps(obj, opts);
- err = err ? : bpf_object__init_prog_names(obj);
- err = err ? : bpf_object__collect_reloc(obj);
+ err = err ? : bpf_object_init_progs(obj, opts);
+ err = err ? : bpf_object__collect_relos(obj);
if (err)
goto out;
- bpf_object__elf_finish(obj);
- bpf_object__for_each_program(prog, obj) {
- enum bpf_prog_type prog_type;
- enum bpf_attach_type attach_type;
-
- if (prog->type != BPF_PROG_TYPE_UNSPEC)
- continue;
-
- err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
- &attach_type);
- if (err == -ESRCH)
- /* couldn't guess, but user might manually specify */
- continue;
- if (err)
- goto out;
-
- bpf_program__set_type(prog, prog_type);
- bpf_program__set_expected_attach_type(prog, attach_type);
- if (prog_type == BPF_PROG_TYPE_TRACING ||
- prog_type == BPF_PROG_TYPE_EXT)
- prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
- }
+ bpf_object__elf_finish(obj);
return obj;
out:
@@ -5144,80 +7367,38 @@ out:
return ERR_PTR(err);
}
-static struct bpf_object *
-__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
-{
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
- .relaxed_maps = flags & MAPS_RELAX_COMPAT,
- );
-
- /* param validation */
- if (!attr->file)
- return NULL;
-
- pr_debug("loading %s\n", attr->file);
- return __bpf_object__open(attr->file, NULL, 0, &opts);
-}
-
-struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
-{
- return __bpf_object__open_xattr(attr, 0);
-}
-
-struct bpf_object *bpf_object__open(const char *path)
-{
- struct bpf_object_open_attr attr = {
- .file = path,
- .prog_type = BPF_PROG_TYPE_UNSPEC,
- };
-
- return bpf_object__open_xattr(&attr);
-}
-
struct bpf_object *
bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
{
if (!path)
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
pr_debug("loading %s\n", path);
- return __bpf_object__open(path, NULL, 0, opts);
+ return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
}
-struct bpf_object *
-bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
- const struct bpf_object_open_opts *opts)
+struct bpf_object *bpf_object__open(const char *path)
{
- if (!obj_buf || obj_buf_sz == 0)
- return ERR_PTR(-EINVAL);
-
- return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
+ return bpf_object__open_file(path, NULL);
}
struct bpf_object *
-bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
- const char *name)
+bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
+ const struct bpf_object_open_opts *opts)
{
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
- .object_name = name,
- /* wrong default, but backwards-compatible */
- .relaxed_maps = true,
- );
-
- /* returning NULL is wrong, but backwards-compatible */
if (!obj_buf || obj_buf_sz == 0)
- return NULL;
+ return libbpf_err_ptr(-EINVAL);
- return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
+ return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
}
-int bpf_object__unload(struct bpf_object *obj)
+static int bpf_object_unload(struct bpf_object *obj)
{
size_t i;
if (!obj)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
for (i = 0; i < obj->nr_maps; i++) {
zclose(obj->maps[i].fd);
@@ -5238,67 +7419,355 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
bpf_object__for_each_map(m, obj) {
if (!bpf_map__is_internal(m))
continue;
- if (!obj->caps.global_data) {
- pr_warn("kernel doesn't support global data\n");
- return -ENOTSUP;
+ if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
+ m->def.map_flags &= ~BPF_F_MMAPABLE;
+ }
+
+ return 0;
+}
+
+int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
+{
+ char sym_type, sym_name[500];
+ unsigned long long sym_addr;
+ int ret, err = 0;
+ FILE *f;
+
+ f = fopen("/proc/kallsyms", "r");
+ if (!f) {
+ err = -errno;
+ pr_warn("failed to open /proc/kallsyms: %d\n", err);
+ return err;
+ }
+
+ while (true) {
+ ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
+ &sym_addr, &sym_type, sym_name);
+ if (ret == EOF && feof(f))
+ break;
+ if (ret != 3) {
+ pr_warn("failed to read kallsyms entry: %d\n", ret);
+ err = -EINVAL;
+ break;
+ }
+
+ err = cb(sym_addr, sym_type, sym_name, ctx);
+ if (err)
+ break;
+ }
+
+ fclose(f);
+ return err;
+}
+
+static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
+ const char *sym_name, void *ctx)
+{
+ struct bpf_object *obj = ctx;
+ const struct btf_type *t;
+ struct extern_desc *ext;
+
+ ext = find_extern_by_name(obj, sym_name);
+ if (!ext || ext->type != EXT_KSYM)
+ return 0;
+
+ t = btf__type_by_id(obj->btf, ext->btf_id);
+ if (!btf_is_var(t))
+ return 0;
+
+ if (ext->is_set && ext->ksym.addr != sym_addr) {
+ pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
+ sym_name, ext->ksym.addr, sym_addr);
+ return -EINVAL;
+ }
+ if (!ext->is_set) {
+ ext->is_set = true;
+ ext->ksym.addr = sym_addr;
+ pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
+ }
+ return 0;
+}
+
+static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
+{
+ return libbpf_kallsyms_parse(kallsyms_cb, obj);
+}
+
+static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
+ __u16 kind, struct btf **res_btf,
+ struct module_btf **res_mod_btf)
+{
+ struct module_btf *mod_btf;
+ struct btf *btf;
+ int i, id, err;
+
+ btf = obj->btf_vmlinux;
+ mod_btf = NULL;
+ id = btf__find_by_name_kind(btf, ksym_name, kind);
+
+ if (id == -ENOENT) {
+ err = load_module_btfs(obj);
+ if (err)
+ return err;
+
+ for (i = 0; i < obj->btf_module_cnt; i++) {
+ /* we assume module_btf's BTF FD is always >0 */
+ mod_btf = &obj->btf_modules[i];
+ btf = mod_btf->btf;
+ id = btf__find_by_name_kind_own(btf, ksym_name, kind);
+ if (id != -ENOENT)
+ break;
+ }
+ }
+ if (id <= 0)
+ return -ESRCH;
+
+ *res_btf = btf;
+ *res_mod_btf = mod_btf;
+ return id;
+}
+
+static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
+ struct extern_desc *ext)
+{
+ const struct btf_type *targ_var, *targ_type;
+ __u32 targ_type_id, local_type_id;
+ struct module_btf *mod_btf = NULL;
+ const char *targ_var_name;
+ struct btf *btf = NULL;
+ int id, err;
+
+ id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
+ if (id < 0) {
+ if (id == -ESRCH && ext->is_weak)
+ return 0;
+ pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
+ ext->name);
+ return id;
+ }
+
+ /* find local type_id */
+ local_type_id = ext->ksym.type_id;
+
+ /* find target type_id */
+ targ_var = btf__type_by_id(btf, id);
+ targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
+ targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
+
+ err = bpf_core_types_are_compat(obj->btf, local_type_id,
+ btf, targ_type_id);
+ if (err <= 0) {
+ const struct btf_type *local_type;
+ const char *targ_name, *local_name;
+
+ local_type = btf__type_by_id(obj->btf, local_type_id);
+ local_name = btf__name_by_offset(obj->btf, local_type->name_off);
+ targ_name = btf__name_by_offset(btf, targ_type->name_off);
+
+ pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
+ ext->name, local_type_id,
+ btf_kind_str(local_type), local_name, targ_type_id,
+ btf_kind_str(targ_type), targ_name);
+ return -EINVAL;
+ }
+
+ ext->is_set = true;
+ ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
+ ext->ksym.kernel_btf_id = id;
+ pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
+ ext->name, id, btf_kind_str(targ_var), targ_var_name);
+
+ return 0;
+}
+
+static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
+ struct extern_desc *ext)
+{
+ int local_func_proto_id, kfunc_proto_id, kfunc_id;
+ struct module_btf *mod_btf = NULL;
+ const struct btf_type *kern_func;
+ struct btf *kern_btf = NULL;
+ int ret;
+
+ local_func_proto_id = ext->ksym.type_id;
+
+ kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
+ if (kfunc_id < 0) {
+ if (kfunc_id == -ESRCH && ext->is_weak)
+ return 0;
+ pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
+ ext->name);
+ return kfunc_id;
+ }
+
+ kern_func = btf__type_by_id(kern_btf, kfunc_id);
+ kfunc_proto_id = kern_func->type;
+
+ ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
+ kern_btf, kfunc_proto_id);
+ if (ret <= 0) {
+ pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
+ ext->name, local_func_proto_id,
+ mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
+ return -EINVAL;
+ }
+
+ /* set index for module BTF fd in fd_array, if unset */
+ if (mod_btf && !mod_btf->fd_array_idx) {
+ /* insn->off is s16 */
+ if (obj->fd_array_cnt == INT16_MAX) {
+ pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
+ ext->name, mod_btf->fd_array_idx);
+ return -E2BIG;
}
- if (!obj->caps.array_mmap)
- m->def.map_flags ^= BPF_F_MMAPABLE;
+ /* Cannot use index 0 for module BTF fd */
+ if (!obj->fd_array_cnt)
+ obj->fd_array_cnt = 1;
+
+ ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
+ obj->fd_array_cnt + 1);
+ if (ret)
+ return ret;
+ mod_btf->fd_array_idx = obj->fd_array_cnt;
+ /* we assume module BTF FD is always >0 */
+ obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
}
+ ext->is_set = true;
+ ext->ksym.kernel_btf_id = kfunc_id;
+ ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
+ /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
+ * populates FD into ld_imm64 insn when it's used to point to kfunc.
+ * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
+ * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
+ */
+ ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
+ pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n",
+ ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
+
+ return 0;
+}
+
+static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
+{
+ const struct btf_type *t;
+ struct extern_desc *ext;
+ int i, err;
+
+ for (i = 0; i < obj->nr_extern; i++) {
+ ext = &obj->externs[i];
+ if (ext->type != EXT_KSYM || !ext->ksym.type_id)
+ continue;
+
+ if (obj->gen_loader) {
+ ext->is_set = true;
+ ext->ksym.kernel_btf_obj_fd = 0;
+ ext->ksym.kernel_btf_id = 0;
+ continue;
+ }
+ t = btf__type_by_id(obj->btf, ext->btf_id);
+ if (btf_is_var(t))
+ err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
+ else
+ err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
+ if (err)
+ return err;
+ }
return 0;
}
static int bpf_object__resolve_externs(struct bpf_object *obj,
const char *extra_kconfig)
{
- bool need_config = false;
+ bool need_config = false, need_kallsyms = false;
+ bool need_vmlinux_btf = false;
struct extern_desc *ext;
+ void *kcfg_data = NULL;
int err, i;
- void *data;
if (obj->nr_extern == 0)
return 0;
- data = obj->maps[obj->kconfig_map_idx].mmaped;
+ if (obj->kconfig_map_idx >= 0)
+ kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
for (i = 0; i < obj->nr_extern; i++) {
ext = &obj->externs[i];
- if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
- void *ext_val = data + ext->data_off;
- __u32 kver = get_kernel_version();
+ if (ext->type == EXT_KSYM) {
+ if (ext->ksym.type_id)
+ need_vmlinux_btf = true;
+ else
+ need_kallsyms = true;
+ continue;
+ } else if (ext->type == EXT_KCFG) {
+ void *ext_ptr = kcfg_data + ext->kcfg.data_off;
+ __u64 value = 0;
+
+ /* Kconfig externs need actual /proc/config.gz */
+ if (str_has_pfx(ext->name, "CONFIG_")) {
+ need_config = true;
+ continue;
+ }
- if (!kver) {
- pr_warn("failed to get kernel version\n");
+ /* Virtual kcfg externs are customly handled by libbpf */
+ if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
+ value = get_kernel_version();
+ if (!value) {
+ pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
+ return -EINVAL;
+ }
+ } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
+ value = kernel_supports(obj, FEAT_BPF_COOKIE);
+ } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
+ value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
+ } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
+ /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
+ * __kconfig externs, where LINUX_ ones are virtual and filled out
+ * customly by libbpf (their values don't come from Kconfig).
+ * If LINUX_xxx variable is not recognized by libbpf, but is marked
+ * __weak, it defaults to zero value, just like for CONFIG_xxx
+ * externs.
+ */
+ pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
return -EINVAL;
}
- err = set_ext_value_num(ext, ext_val, kver);
+
+ err = set_kcfg_value_num(ext, ext_ptr, value);
if (err)
return err;
- pr_debug("extern %s=0x%x\n", ext->name, kver);
- } else if (strncmp(ext->name, "CONFIG_", 7) == 0) {
- need_config = true;
+ pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
+ ext->name, (long long)value);
} else {
- pr_warn("unrecognized extern '%s'\n", ext->name);
+ pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
return -EINVAL;
}
}
if (need_config && extra_kconfig) {
- err = bpf_object__read_kconfig_mem(obj, extra_kconfig, data);
+ err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
if (err)
return -EINVAL;
need_config = false;
for (i = 0; i < obj->nr_extern; i++) {
ext = &obj->externs[i];
- if (!ext->is_set) {
+ if (ext->type == EXT_KCFG && !ext->is_set) {
need_config = true;
break;
}
}
}
if (need_config) {
- err = bpf_object__read_kconfig_file(obj, data);
+ err = bpf_object__read_kconfig_file(obj, kcfg_data);
+ if (err)
+ return -EINVAL;
+ }
+ if (need_kallsyms) {
+ err = bpf_object__read_kallsyms_file(obj);
+ if (err)
+ return -EINVAL;
+ }
+ if (need_vmlinux_btf) {
+ err = bpf_object__resolve_ksyms_btf_id(obj);
if (err)
return -EINVAL;
}
@@ -5306,10 +7775,10 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
ext = &obj->externs[i];
if (!ext->is_set && !ext->is_weak) {
- pr_warn("extern %s (strong) not resolved\n", ext->name);
+ pr_warn("extern '%s' (strong): not resolved\n", ext->name);
return -ESRCH;
} else if (!ext->is_set) {
- pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
+ pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
ext->name);
}
}
@@ -5317,37 +7786,91 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
return 0;
}
-int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
+static void bpf_map_prepare_vdata(const struct bpf_map *map)
+{
+ struct bpf_struct_ops *st_ops;
+ __u32 i;
+
+ st_ops = map->st_ops;
+ for (i = 0; i < btf_vlen(st_ops->type); i++) {
+ struct bpf_program *prog = st_ops->progs[i];
+ void *kern_data;
+ int prog_fd;
+
+ if (!prog)
+ continue;
+
+ prog_fd = bpf_program__fd(prog);
+ kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
+ *(unsigned long *)kern_data = prog_fd;
+ }
+}
+
+static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
+{
+ int i;
+
+ for (i = 0; i < obj->nr_maps; i++)
+ if (bpf_map__is_struct_ops(&obj->maps[i]))
+ bpf_map_prepare_vdata(&obj->maps[i]);
+
+ return 0;
+}
+
+static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
{
- struct bpf_object *obj;
int err, i;
- if (!attr)
- return -EINVAL;
- obj = attr->obj;
if (!obj)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
if (obj->loaded) {
- pr_warn("object should not be loaded twice\n");
- return -EINVAL;
+ pr_warn("object '%s': load can't be attempted twice\n", obj->name);
+ return libbpf_err(-EINVAL);
}
- obj->loaded = true;
+ if (obj->gen_loader)
+ bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
- err = bpf_object__probe_caps(obj);
+ err = bpf_object__probe_loading(obj);
+ err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__sanitize_maps(obj);
- err = err ? : bpf_object__load_vmlinux_btf(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
err = err ? : bpf_object__create_maps(obj);
- err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
- err = err ? : bpf_object__load_progs(obj, attr->log_level);
+ err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
+ err = err ? : bpf_object__load_progs(obj, extra_log_level);
+ err = err ? : bpf_object_init_prog_arrays(obj);
+ err = err ? : bpf_object_prepare_struct_ops(obj);
+
+ if (obj->gen_loader) {
+ /* reset FDs */
+ if (obj->btf)
+ btf__set_fd(obj->btf, -1);
+ for (i = 0; i < obj->nr_maps; i++)
+ obj->maps[i].fd = -1;
+ if (!err)
+ err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
+ }
+
+ /* clean up fd_array */
+ zfree(&obj->fd_array);
+
+ /* clean up module BTFs */
+ for (i = 0; i < obj->btf_module_cnt; i++) {
+ close(obj->btf_modules[i].fd);
+ btf__free(obj->btf_modules[i].btf);
+ free(obj->btf_modules[i].name);
+ }
+ free(obj->btf_modules);
+ /* clean up vmlinux BTF */
btf__free(obj->btf_vmlinux);
obj->btf_vmlinux = NULL;
+ obj->loaded = true; /* doesn't matter if successfully or not */
+
if (err)
goto out;
@@ -5358,18 +7881,14 @@ out:
if (obj->maps[i].pinned && !obj->maps[i].reused)
bpf_map__unpin(&obj->maps[i], NULL);
- bpf_object__unload(obj);
+ bpf_object_unload(obj);
pr_warn("failed to load object '%s'\n", obj->path);
- return err;
+ return libbpf_err(err);
}
int bpf_object__load(struct bpf_object *obj)
{
- struct bpf_object_load_attr attr = {
- .obj = obj,
- };
-
- return bpf_object__load_xattr(&attr);
+ return bpf_object_load(obj, 0, NULL);
}
static int make_parent_dir(const char *path)
@@ -5424,179 +7943,53 @@ static int check_path(const char *path)
return err;
}
-int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
- int instance)
+int bpf_program__pin(struct bpf_program *prog, const char *path)
{
char *cp, errmsg[STRERR_BUFSIZE];
int err;
- err = make_parent_dir(path);
- if (err)
- return err;
-
- err = check_path(path);
- if (err)
- return err;
-
- if (prog == NULL) {
- pr_warn("invalid program pointer\n");
- return -EINVAL;
- }
-
- if (instance < 0 || instance >= prog->instances.nr) {
- pr_warn("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->section_name, prog->instances.nr);
- return -EINVAL;
- }
-
- if (bpf_obj_pin(prog->instances.fds[instance], path)) {
- cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
- pr_warn("failed to pin program: %s\n", cp);
- return -errno;
- }
- pr_debug("pinned program '%s'\n", path);
-
- return 0;
-}
-
-int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
- int instance)
-{
- int err;
-
- err = check_path(path);
- if (err)
- return err;
-
- if (prog == NULL) {
- pr_warn("invalid program pointer\n");
- return -EINVAL;
- }
-
- if (instance < 0 || instance >= prog->instances.nr) {
- pr_warn("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->section_name, prog->instances.nr);
- return -EINVAL;
+ if (prog->fd < 0) {
+ pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
+ return libbpf_err(-EINVAL);
}
- err = unlink(path);
- if (err != 0)
- return -errno;
- pr_debug("unpinned program '%s'\n", path);
-
- return 0;
-}
-
-int bpf_program__pin(struct bpf_program *prog, const char *path)
-{
- int i, err;
-
err = make_parent_dir(path);
if (err)
- return err;
+ return libbpf_err(err);
err = check_path(path);
if (err)
- return err;
-
- if (prog == NULL) {
- pr_warn("invalid program pointer\n");
- return -EINVAL;
- }
-
- if (prog->instances.nr <= 0) {
- pr_warn("no instances of prog %s to pin\n",
- prog->section_name);
- return -EINVAL;
- }
-
- if (prog->instances.nr == 1) {
- /* don't create subdirs when pinning single instance */
- return bpf_program__pin_instance(prog, path, 0);
- }
-
- for (i = 0; i < prog->instances.nr; i++) {
- char buf[PATH_MAX];
- int len;
+ return libbpf_err(err);
- len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
- if (len < 0) {
- err = -EINVAL;
- goto err_unpin;
- } else if (len >= PATH_MAX) {
- err = -ENAMETOOLONG;
- goto err_unpin;
- }
-
- err = bpf_program__pin_instance(prog, buf, i);
- if (err)
- goto err_unpin;
+ if (bpf_obj_pin(prog->fd, path)) {
+ err = -errno;
+ cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
+ pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
+ return libbpf_err(err);
}
+ pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
return 0;
-
-err_unpin:
- for (i = i - 1; i >= 0; i--) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
- if (len < 0)
- continue;
- else if (len >= PATH_MAX)
- continue;
-
- bpf_program__unpin_instance(prog, buf, i);
- }
-
- rmdir(path);
-
- return err;
}
int bpf_program__unpin(struct bpf_program *prog, const char *path)
{
- int i, err;
-
- err = check_path(path);
- if (err)
- return err;
-
- if (prog == NULL) {
- pr_warn("invalid program pointer\n");
- return -EINVAL;
- }
-
- if (prog->instances.nr <= 0) {
- pr_warn("no instances of prog %s to pin\n",
- prog->section_name);
- return -EINVAL;
- }
+ int err;
- if (prog->instances.nr == 1) {
- /* don't create subdirs when pinning single instance */
- return bpf_program__unpin_instance(prog, path, 0);
+ if (prog->fd < 0) {
+ pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
+ return libbpf_err(-EINVAL);
}
- for (i = 0; i < prog->instances.nr; i++) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
- if (len < 0)
- return -EINVAL;
- else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
-
- err = bpf_program__unpin_instance(prog, buf, i);
- if (err)
- return err;
- }
+ err = check_path(path);
+ if (err)
+ return libbpf_err(err);
- err = rmdir(path);
+ err = unlink(path);
if (err)
- return -errno;
+ return libbpf_err(-errno);
+ pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
return 0;
}
@@ -5607,14 +8000,14 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
if (map == NULL) {
pr_warn("invalid map pointer\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (map->pin_path) {
if (path && strcmp(path, map->pin_path)) {
pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
bpf_map__name(map), map->pin_path, path);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
} else if (map->pinned) {
pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
bpf_map__name(map), map->pin_path);
@@ -5624,10 +8017,10 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
if (!path) {
pr_warn("missing a path to pin map '%s' at\n",
bpf_map__name(map));
- return -EINVAL;
+ return libbpf_err(-EINVAL);
} else if (map->pinned) {
pr_warn("map '%s' already pinned\n", bpf_map__name(map));
- return -EEXIST;
+ return libbpf_err(-EEXIST);
}
map->pin_path = strdup(path);
@@ -5639,11 +8032,11 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
err = make_parent_dir(map->pin_path);
if (err)
- return err;
+ return libbpf_err(err);
err = check_path(map->pin_path);
if (err)
- return err;
+ return libbpf_err(err);
if (bpf_obj_pin(map->fd, map->pin_path)) {
err = -errno;
@@ -5658,7 +8051,7 @@ int bpf_map__pin(struct bpf_map *map, const char *path)
out_err:
cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
pr_warn("failed to pin map: %s\n", cp);
- return err;
+ return libbpf_err(err);
}
int bpf_map__unpin(struct bpf_map *map, const char *path)
@@ -5667,29 +8060,29 @@ int bpf_map__unpin(struct bpf_map *map, const char *path)
if (map == NULL) {
pr_warn("invalid map pointer\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (map->pin_path) {
if (path && strcmp(path, map->pin_path)) {
pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
bpf_map__name(map), map->pin_path, path);
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
path = map->pin_path;
} else if (!path) {
pr_warn("no path to unpin map '%s' from\n",
bpf_map__name(map));
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
err = check_path(path);
if (err)
- return err;
+ return libbpf_err(err);
err = unlink(path);
if (err != 0)
- return -errno;
+ return libbpf_err(-errno);
map->pinned = false;
pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
@@ -5704,7 +8097,7 @@ int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
if (path) {
new = strdup(path);
if (!new)
- return -errno;
+ return libbpf_err(-errno);
}
free(map->pin_path);
@@ -5712,7 +8105,10 @@ int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
return 0;
}
-const char *bpf_map__get_pin_path(const struct bpf_map *map)
+__alias(bpf_map__pin_path)
+const char *bpf_map__get_pin_path(const struct bpf_map *map);
+
+const char *bpf_map__pin_path(const struct bpf_map *map)
{
return map->pin_path;
}
@@ -5722,35 +8118,41 @@ bool bpf_map__is_pinned(const struct bpf_map *map)
return map->pinned;
}
+static void sanitize_pin_path(char *s)
+{
+ /* bpffs disallows periods in path names */
+ while (*s) {
+ if (*s == '.')
+ *s = '_';
+ s++;
+ }
+}
+
int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
{
struct bpf_map *map;
int err;
if (!obj)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
if (!obj->loaded) {
pr_warn("object not yet loaded; load it first\n");
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
bpf_object__for_each_map(map, obj) {
char *pin_path = NULL;
char buf[PATH_MAX];
- if (path) {
- int len;
+ if (!map->autocreate)
+ continue;
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0) {
- err = -EINVAL;
- goto err_unpin_maps;
- } else if (len >= PATH_MAX) {
- err = -ENAMETOOLONG;
+ if (path) {
+ err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
+ if (err)
goto err_unpin_maps;
- }
+ sanitize_pin_path(buf);
pin_path = buf;
} else if (!map->pin_path) {
continue;
@@ -5764,14 +8166,14 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
return 0;
err_unpin_maps:
- while ((map = bpf_map__prev(map, obj))) {
+ while ((map = bpf_object__prev_map(obj, map))) {
if (!map->pin_path)
continue;
bpf_map__unpin(map, NULL);
}
- return err;
+ return libbpf_err(err);
}
int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
@@ -5780,21 +8182,17 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
int err;
if (!obj)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
bpf_object__for_each_map(map, obj) {
char *pin_path = NULL;
char buf[PATH_MAX];
if (path) {
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- bpf_map__name(map));
- if (len < 0)
- return -EINVAL;
- else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
+ err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
+ if (err)
+ return libbpf_err(err);
+ sanitize_pin_path(buf);
pin_path = buf;
} else if (!map->pin_path) {
continue;
@@ -5802,7 +8200,7 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
err = bpf_map__unpin(map, pin_path);
if (err)
- return err;
+ return libbpf_err(err);
}
return 0;
@@ -5811,29 +8209,21 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
{
struct bpf_program *prog;
+ char buf[PATH_MAX];
int err;
if (!obj)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
if (!obj->loaded) {
pr_warn("object not yet loaded; load it first\n");
- return -ENOENT;
+ return libbpf_err(-ENOENT);
}
bpf_object__for_each_program(prog, obj) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- prog->pin_name);
- if (len < 0) {
- err = -EINVAL;
- goto err_unpin_programs;
- } else if (len >= PATH_MAX) {
- err = -ENAMETOOLONG;
+ err = pathname_concat(buf, sizeof(buf), path, prog->name);
+ if (err)
goto err_unpin_programs;
- }
err = bpf_program__pin(prog, buf);
if (err)
@@ -5843,21 +8233,14 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
return 0;
err_unpin_programs:
- while ((prog = bpf_program__prev(prog, obj))) {
- char buf[PATH_MAX];
- int len;
-
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- prog->pin_name);
- if (len < 0)
- continue;
- else if (len >= PATH_MAX)
+ while ((prog = bpf_object__prev_program(obj, prog))) {
+ if (pathname_concat(buf, sizeof(buf), path, prog->name))
continue;
bpf_program__unpin(prog, buf);
}
- return err;
+ return libbpf_err(err);
}
int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
@@ -5866,22 +8249,18 @@ int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
int err;
if (!obj)
- return -ENOENT;
+ return libbpf_err(-ENOENT);
bpf_object__for_each_program(prog, obj) {
char buf[PATH_MAX];
- int len;
- len = snprintf(buf, PATH_MAX, "%s/%s", path,
- prog->pin_name);
- if (len < 0)
- return -EINVAL;
- else if (len >= PATH_MAX)
- return -ENAMETOOLONG;
+ err = pathname_concat(buf, sizeof(buf), path, prog->name);
+ if (err)
+ return libbpf_err(err);
err = bpf_program__unpin(prog, buf);
if (err)
- return err;
+ return libbpf_err(err);
}
return 0;
@@ -5893,56 +8272,67 @@ int bpf_object__pin(struct bpf_object *obj, const char *path)
err = bpf_object__pin_maps(obj, path);
if (err)
- return err;
+ return libbpf_err(err);
err = bpf_object__pin_programs(obj, path);
if (err) {
bpf_object__unpin_maps(obj, path);
- return err;
+ return libbpf_err(err);
}
return 0;
}
+static void bpf_map__destroy(struct bpf_map *map)
+{
+ if (map->inner_map) {
+ bpf_map__destroy(map->inner_map);
+ zfree(&map->inner_map);
+ }
+
+ zfree(&map->init_slots);
+ map->init_slots_sz = 0;
+
+ if (map->mmaped) {
+ munmap(map->mmaped, bpf_map_mmap_sz(map));
+ map->mmaped = NULL;
+ }
+
+ if (map->st_ops) {
+ zfree(&map->st_ops->data);
+ zfree(&map->st_ops->progs);
+ zfree(&map->st_ops->kern_func_off);
+ zfree(&map->st_ops);
+ }
+
+ zfree(&map->name);
+ zfree(&map->real_name);
+ zfree(&map->pin_path);
+
+ if (map->fd >= 0)
+ zclose(map->fd);
+}
+
void bpf_object__close(struct bpf_object *obj)
{
size_t i;
- if (!obj)
+ if (IS_ERR_OR_NULL(obj))
return;
- if (obj->clear_priv)
- obj->clear_priv(obj, obj->priv);
+ usdt_manager_free(obj->usdt_man);
+ obj->usdt_man = NULL;
+ bpf_gen__free(obj->gen_loader);
bpf_object__elf_finish(obj);
- bpf_object__unload(obj);
+ bpf_object_unload(obj);
btf__free(obj->btf);
btf_ext__free(obj->btf_ext);
- for (i = 0; i < obj->nr_maps; i++) {
- struct bpf_map *map = &obj->maps[i];
-
- if (map->clear_priv)
- map->clear_priv(map, map->priv);
- map->priv = NULL;
- map->clear_priv = NULL;
-
- if (map->mmaped) {
- munmap(map->mmaped, bpf_map_mmap_sz(map));
- map->mmaped = NULL;
- }
-
- if (map->st_ops) {
- zfree(&map->st_ops->data);
- zfree(&map->st_ops->progs);
- zfree(&map->st_ops->kern_func_off);
- zfree(&map->st_ops);
- }
-
- zfree(&map->name);
- zfree(&map->pin_path);
- }
+ for (i = 0; i < obj->nr_maps; i++)
+ bpf_map__destroy(&obj->maps[i]);
+ zfree(&obj->btf_custom_path);
zfree(&obj->kconfig);
zfree(&obj->externs);
obj->nr_extern = 0;
@@ -5956,32 +8346,12 @@ void bpf_object__close(struct bpf_object *obj)
}
zfree(&obj->programs);
- list_del(&obj->list);
free(obj);
}
-struct bpf_object *
-bpf_object__next(struct bpf_object *prev)
-{
- struct bpf_object *next;
-
- if (!prev)
- next = list_first_entry(&bpf_objects_list,
- struct bpf_object,
- list);
- else
- next = list_next_entry(prev, list);
-
- /* Empty list is noticed here so don't need checking on entry. */
- if (&next->list == &bpf_objects_list)
- return NULL;
-
- return next;
-}
-
const char *bpf_object__name(const struct bpf_object *obj)
{
- return obj ? obj->name : ERR_PTR(-EINVAL);
+ return obj ? obj->name : libbpf_err_ptr(-EINVAL);
}
unsigned int bpf_object__kversion(const struct bpf_object *obj)
@@ -5999,20 +8369,30 @@ int bpf_object__btf_fd(const struct bpf_object *obj)
return obj->btf ? btf__fd(obj->btf) : -1;
}
-int bpf_object__set_priv(struct bpf_object *obj, void *priv,
- bpf_object_clear_priv_t clear_priv)
+int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
{
- if (obj->priv && obj->clear_priv)
- obj->clear_priv(obj, obj->priv);
+ if (obj->loaded)
+ return libbpf_err(-EINVAL);
+
+ obj->kern_version = kern_version;
- obj->priv = priv;
- obj->clear_priv = clear_priv;
return 0;
}
-void *bpf_object__priv(const struct bpf_object *obj)
+int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
{
- return obj ? obj->priv : ERR_PTR(-EINVAL);
+ struct bpf_gen *gen;
+
+ if (!opts)
+ return -EFAULT;
+ if (!OPTS_VALID(opts, gen_loader_opts))
+ return -EINVAL;
+ gen = calloc(sizeof(*gen), 1);
+ if (!gen)
+ return -ENOMEM;
+ gen->opts = opts;
+ obj->gen_loader = gen;
+ return 0;
}
static struct bpf_program *
@@ -6032,7 +8412,7 @@ __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
if (p->obj != obj) {
pr_warn("error: program handler doesn't match object\n");
- return NULL;
+ return errno = EINVAL, NULL;
}
idx = (p - obj->programs) + (forward ? 1 : -1);
@@ -6042,365 +8422,446 @@ __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
}
struct bpf_program *
-bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
+bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
{
struct bpf_program *prog = prev;
do {
prog = __bpf_program__iter(prog, obj, true);
- } while (prog && bpf_program__is_function_storage(prog, obj));
+ } while (prog && prog_is_subprog(obj, prog));
return prog;
}
struct bpf_program *
-bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
+bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
{
struct bpf_program *prog = next;
do {
prog = __bpf_program__iter(prog, obj, false);
- } while (prog && bpf_program__is_function_storage(prog, obj));
+ } while (prog && prog_is_subprog(obj, prog));
return prog;
}
-int bpf_program__set_priv(struct bpf_program *prog, void *priv,
- bpf_program_clear_priv_t clear_priv)
+void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
{
- if (prog->priv && prog->clear_priv)
- prog->clear_priv(prog, prog->priv);
-
- prog->priv = priv;
- prog->clear_priv = clear_priv;
- return 0;
+ prog->prog_ifindex = ifindex;
}
-void *bpf_program__priv(const struct bpf_program *prog)
+const char *bpf_program__name(const struct bpf_program *prog)
{
- return prog ? prog->priv : ERR_PTR(-EINVAL);
+ return prog->name;
}
-void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
+const char *bpf_program__section_name(const struct bpf_program *prog)
{
- prog->prog_ifindex = ifindex;
+ return prog->sec_name;
}
-const char *bpf_program__name(const struct bpf_program *prog)
+bool bpf_program__autoload(const struct bpf_program *prog)
{
- return prog->name;
+ return prog->autoload;
}
-const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
+int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
{
- const char *title;
+ if (prog->obj->loaded)
+ return libbpf_err(-EINVAL);
- title = prog->section_name;
- if (needs_copy) {
- title = strdup(title);
- if (!title) {
- pr_warn("failed to strdup program title\n");
- return ERR_PTR(-ENOMEM);
- }
- }
+ prog->autoload = autoload;
+ return 0;
+}
- return title;
+bool bpf_program__autoattach(const struct bpf_program *prog)
+{
+ return prog->autoattach;
}
-int bpf_program__fd(const struct bpf_program *prog)
+void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
{
- return bpf_program__nth_fd(prog, 0);
+ prog->autoattach = autoattach;
}
-size_t bpf_program__size(const struct bpf_program *prog)
+const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
{
- return prog->insns_cnt * sizeof(struct bpf_insn);
+ return prog->insns;
}
-int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
- bpf_program_prep_t prep)
+size_t bpf_program__insn_cnt(const struct bpf_program *prog)
{
- int *instances_fds;
+ return prog->insns_cnt;
+}
- if (nr_instances <= 0 || !prep)
- return -EINVAL;
+int bpf_program__set_insns(struct bpf_program *prog,
+ struct bpf_insn *new_insns, size_t new_insn_cnt)
+{
+ struct bpf_insn *insns;
- if (prog->instances.nr > 0 || prog->instances.fds) {
- pr_warn("Can't set pre-processor after loading\n");
- return -EINVAL;
- }
+ if (prog->obj->loaded)
+ return -EBUSY;
- instances_fds = malloc(sizeof(int) * nr_instances);
- if (!instances_fds) {
- pr_warn("alloc memory failed for fds\n");
+ insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
+ if (!insns) {
+ pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
return -ENOMEM;
}
+ memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
- /* fill all fd with -1 */
- memset(instances_fds, -1, sizeof(int) * nr_instances);
-
- prog->instances.nr = nr_instances;
- prog->instances.fds = instances_fds;
- prog->preprocessor = prep;
+ prog->insns = insns;
+ prog->insns_cnt = new_insn_cnt;
return 0;
}
-int bpf_program__nth_fd(const struct bpf_program *prog, int n)
+int bpf_program__fd(const struct bpf_program *prog)
{
- int fd;
-
if (!prog)
- return -EINVAL;
-
- if (n >= prog->instances.nr || n < 0) {
- pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
- n, prog->section_name, prog->instances.nr);
- return -EINVAL;
- }
+ return libbpf_err(-EINVAL);
- fd = prog->instances.fds[n];
- if (fd < 0) {
- pr_warn("%dth instance of program '%s' is invalid\n",
- n, prog->section_name);
- return -ENOENT;
- }
+ if (prog->fd < 0)
+ return libbpf_err(-ENOENT);
- return fd;
+ return prog->fd;
}
-enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
+__alias(bpf_program__type)
+enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
+
+enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
{
return prog->type;
}
-void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
+int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
+
prog->type = type;
+ prog->sec_def = NULL;
+ return 0;
}
-static bool bpf_program__is_type(const struct bpf_program *prog,
- enum bpf_prog_type type)
-{
- return prog ? (prog->type == type) : false;
-}
-
-#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
-int bpf_program__set_##NAME(struct bpf_program *prog) \
-{ \
- if (!prog) \
- return -EINVAL; \
- bpf_program__set_type(prog, TYPE); \
- return 0; \
-} \
- \
-bool bpf_program__is_##NAME(const struct bpf_program *prog) \
-{ \
- return bpf_program__is_type(prog, TYPE); \
-} \
-
-BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
-BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
-BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
-BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
-BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
-BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
-BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
-BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
-BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
-BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
-BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
-BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
-
-enum bpf_attach_type
-bpf_program__get_expected_attach_type(struct bpf_program *prog)
+__alias(bpf_program__expected_attach_type)
+enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
+
+enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
{
return prog->expected_attach_type;
}
-void bpf_program__set_expected_attach_type(struct bpf_program *prog,
+int bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type)
{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
+
prog->expected_attach_type = type;
+ return 0;
}
-#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
- { string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
+__u32 bpf_program__flags(const struct bpf_program *prog)
+{
+ return prog->prog_flags;
+}
-/* Programs that can NOT be attached. */
-#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
+int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
+{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
-/* Programs that can be attached. */
-#define BPF_APROG_SEC(string, ptype, atype) \
- BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
+ prog->prog_flags = flags;
+ return 0;
+}
-/* Programs that must specify expected attach type at load time. */
-#define BPF_EAPROG_SEC(string, ptype, eatype) \
- BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
+__u32 bpf_program__log_level(const struct bpf_program *prog)
+{
+ return prog->log_level;
+}
-/* Programs that use BTF to identify attach point */
-#define BPF_PROG_BTF(string, ptype, eatype) \
- BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
+int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
+{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
-/* Programs that can be attached but attach type can't be identified by section
- * name. Kept for backward compatibility.
- */
-#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
+ prog->log_level = log_level;
+ return 0;
+}
-#define SEC_DEF(sec_pfx, ptype, ...) { \
- .sec = sec_pfx, \
- .len = sizeof(sec_pfx) - 1, \
- .prog_type = BPF_PROG_TYPE_##ptype, \
- __VA_ARGS__ \
+const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
+{
+ *log_size = prog->log_size;
+ return prog->log_buf;
}
-struct bpf_sec_def;
+int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
+{
+ if (log_size && !log_buf)
+ return -EINVAL;
+ if (prog->log_size > UINT_MAX)
+ return -EINVAL;
+ if (prog->obj->loaded)
+ return -EBUSY;
-typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
- struct bpf_program *prog);
+ prog->log_buf = log_buf;
+ prog->log_size = log_size;
+ return 0;
+}
-static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
- struct bpf_program *prog);
-static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
- struct bpf_program *prog);
-static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
- struct bpf_program *prog);
-static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
- struct bpf_program *prog);
-static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
- struct bpf_program *prog);
+#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
+ .sec = (char *)sec_pfx, \
+ .prog_type = BPF_PROG_TYPE_##ptype, \
+ .expected_attach_type = atype, \
+ .cookie = (long)(flags), \
+ .prog_prepare_load_fn = libbpf_prepare_prog_load, \
+ __VA_ARGS__ \
+}
-struct bpf_sec_def {
- const char *sec;
- size_t len;
- enum bpf_prog_type prog_type;
- enum bpf_attach_type expected_attach_type;
- bool is_attachable;
- bool is_attach_btf;
- enum bpf_attach_type attach_type;
- attach_fn_t attach_fn;
-};
+static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
+static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
static const struct bpf_sec_def section_defs[] = {
- BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
- BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
- SEC_DEF("kprobe/", KPROBE,
- .attach_fn = attach_kprobe),
- BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
- SEC_DEF("kretprobe/", KPROBE,
- .attach_fn = attach_kprobe),
- BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
- BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
- BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
- SEC_DEF("tracepoint/", TRACEPOINT,
- .attach_fn = attach_tp),
- SEC_DEF("tp/", TRACEPOINT,
- .attach_fn = attach_tp),
- SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
- .attach_fn = attach_raw_tp),
- SEC_DEF("raw_tp/", RAW_TRACEPOINT,
- .attach_fn = attach_raw_tp),
- SEC_DEF("tp_btf/", TRACING,
- .expected_attach_type = BPF_TRACE_RAW_TP,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("fentry/", TRACING,
- .expected_attach_type = BPF_TRACE_FENTRY,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("fmod_ret/", TRACING,
- .expected_attach_type = BPF_MODIFY_RETURN,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("fexit/", TRACING,
- .expected_attach_type = BPF_TRACE_FEXIT,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("freplace/", EXT,
- .is_attach_btf = true,
- .attach_fn = attach_trace),
- SEC_DEF("lsm/", LSM,
- .is_attach_btf = true,
- .expected_attach_type = BPF_LSM_MAC,
- .attach_fn = attach_lsm),
- BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP),
- BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
- BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
- BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
- BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
- BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
- BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
- BPF_CGROUP_INET_INGRESS),
- BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
- BPF_CGROUP_INET_EGRESS),
- BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
- BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
- BPF_CGROUP_INET_SOCK_CREATE),
- BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
- BPF_CGROUP_INET4_POST_BIND),
- BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
- BPF_CGROUP_INET6_POST_BIND),
- BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
- BPF_CGROUP_DEVICE),
- BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
- BPF_CGROUP_SOCK_OPS),
- BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
- BPF_SK_SKB_STREAM_PARSER),
- BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
- BPF_SK_SKB_STREAM_VERDICT),
- BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
- BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
- BPF_SK_MSG_VERDICT),
- BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
- BPF_LIRC_MODE2),
- BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
- BPF_FLOW_DISSECTOR),
- BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET4_BIND),
- BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET6_BIND),
- BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET4_CONNECT),
- BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_INET6_CONNECT),
- BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_UDP4_SENDMSG),
- BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_UDP6_SENDMSG),
- BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_UDP4_RECVMSG),
- BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
- BPF_CGROUP_UDP6_RECVMSG),
- BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
- BPF_CGROUP_SYSCTL),
- BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
- BPF_CGROUP_GETSOCKOPT),
- BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
- BPF_CGROUP_SETSOCKOPT),
- BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
+ SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE),
+ SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
+ SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
+ SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
+ SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
+ SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
+ SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
+ SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
+ SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
+ SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
+ SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
+ SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
+ SEC_DEF("usdt+", KPROBE, 0, SEC_NONE, attach_usdt),
+ SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE),
+ SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE),
+ SEC_DEF("action", SCHED_ACT, 0, SEC_NONE),
+ SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp),
+ SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp),
+ SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
+ SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+ SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+ SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
+ SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace),
+ SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
+ SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
+ SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
+ SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
+ SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
+ SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
+ SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
+ SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
+ SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
+ SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
+ SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
+ SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
+ SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE),
+ SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE),
+ SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE),
+ SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE),
+ SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE),
+ SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
+ SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
+ SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
+ SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE),
+ SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
+ SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
+ SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
+ SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
+ SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
+ SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE),
+ SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
+ SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
+ SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
+ SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
+ SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
+ SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
+ SEC_DEF("netfilter", NETFILTER, 0, SEC_NONE),
};
-#undef BPF_PROG_SEC_IMPL
-#undef BPF_PROG_SEC
-#undef BPF_APROG_SEC
-#undef BPF_EAPROG_SEC
-#undef BPF_APROG_COMPAT
-#undef SEC_DEF
+static size_t custom_sec_def_cnt;
+static struct bpf_sec_def *custom_sec_defs;
+static struct bpf_sec_def custom_fallback_def;
+static bool has_custom_fallback_def;
-#define MAX_TYPE_NAME_SIZE 32
+static int last_custom_sec_def_handler_id;
+
+int libbpf_register_prog_handler(const char *sec,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type exp_attach_type,
+ const struct libbpf_prog_handler_opts *opts)
+{
+ struct bpf_sec_def *sec_def;
+
+ if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
+ return libbpf_err(-EINVAL);
+
+ if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
+ return libbpf_err(-E2BIG);
+
+ if (sec) {
+ sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
+ sizeof(*sec_def));
+ if (!sec_def)
+ return libbpf_err(-ENOMEM);
+
+ custom_sec_defs = sec_def;
+ sec_def = &custom_sec_defs[custom_sec_def_cnt];
+ } else {
+ if (has_custom_fallback_def)
+ return libbpf_err(-EBUSY);
+
+ sec_def = &custom_fallback_def;
+ }
+
+ sec_def->sec = sec ? strdup(sec) : NULL;
+ if (sec && !sec_def->sec)
+ return libbpf_err(-ENOMEM);
+
+ sec_def->prog_type = prog_type;
+ sec_def->expected_attach_type = exp_attach_type;
+ sec_def->cookie = OPTS_GET(opts, cookie, 0);
+
+ sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
+ sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
+ sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
+
+ sec_def->handler_id = ++last_custom_sec_def_handler_id;
+
+ if (sec)
+ custom_sec_def_cnt++;
+ else
+ has_custom_fallback_def = true;
+
+ return sec_def->handler_id;
+}
+
+int libbpf_unregister_prog_handler(int handler_id)
+{
+ struct bpf_sec_def *sec_defs;
+ int i;
+
+ if (handler_id <= 0)
+ return libbpf_err(-EINVAL);
+
+ if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
+ memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
+ has_custom_fallback_def = false;
+ return 0;
+ }
+
+ for (i = 0; i < custom_sec_def_cnt; i++) {
+ if (custom_sec_defs[i].handler_id == handler_id)
+ break;
+ }
+
+ if (i == custom_sec_def_cnt)
+ return libbpf_err(-ENOENT);
+
+ free(custom_sec_defs[i].sec);
+ for (i = i + 1; i < custom_sec_def_cnt; i++)
+ custom_sec_defs[i - 1] = custom_sec_defs[i];
+ custom_sec_def_cnt--;
+
+ /* try to shrink the array, but it's ok if we couldn't */
+ sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
+ if (sec_defs)
+ custom_sec_defs = sec_defs;
+
+ return 0;
+}
+
+static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
+{
+ size_t len = strlen(sec_def->sec);
+
+ /* "type/" always has to have proper SEC("type/extras") form */
+ if (sec_def->sec[len - 1] == '/') {
+ if (str_has_pfx(sec_name, sec_def->sec))
+ return true;
+ return false;
+ }
+
+ /* "type+" means it can be either exact SEC("type") or
+ * well-formed SEC("type/extras") with proper '/' separator
+ */
+ if (sec_def->sec[len - 1] == '+') {
+ len--;
+ /* not even a prefix */
+ if (strncmp(sec_name, sec_def->sec, len) != 0)
+ return false;
+ /* exact match or has '/' separator */
+ if (sec_name[len] == '\0' || sec_name[len] == '/')
+ return true;
+ return false;
+ }
+
+ return strcmp(sec_name, sec_def->sec) == 0;
+}
static const struct bpf_sec_def *find_sec_def(const char *sec_name)
{
- int i, n = ARRAY_SIZE(section_defs);
+ const struct bpf_sec_def *sec_def;
+ int i, n;
+ n = custom_sec_def_cnt;
for (i = 0; i < n; i++) {
- if (strncmp(sec_name,
- section_defs[i].sec, section_defs[i].len))
- continue;
- return &section_defs[i];
+ sec_def = &custom_sec_defs[i];
+ if (sec_def_matches(sec_def, sec_name))
+ return sec_def;
+ }
+
+ n = ARRAY_SIZE(section_defs);
+ for (i = 0; i < n; i++) {
+ sec_def = &section_defs[i];
+ if (sec_def_matches(sec_def, sec_name))
+ return sec_def;
}
+
+ if (has_custom_fallback_def)
+ return &custom_fallback_def;
+
return NULL;
}
+#define MAX_TYPE_NAME_SIZE 32
+
static char *libbpf_get_type_names(bool attach_type)
{
int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
@@ -6413,8 +8874,15 @@ static char *libbpf_get_type_names(bool attach_type)
buf[0] = '\0';
/* Forge string buf with all available names */
for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
- if (attach_type && !section_defs[i].is_attachable)
- continue;
+ const struct bpf_sec_def *sec_def = &section_defs[i];
+
+ if (attach_type) {
+ if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
+ continue;
+
+ if (!(sec_def->cookie & SEC_ATTACHABLE))
+ continue;
+ }
if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
free(buf);
@@ -6434,7 +8902,7 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
char *type_names;
if (!name)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
sec_def = find_sec_def(name);
if (sec_def) {
@@ -6450,10 +8918,43 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
free(type_names);
}
- return -ESRCH;
+ return libbpf_err(-ESRCH);
+}
+
+const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
+{
+ if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
+ return NULL;
+
+ return attach_type_name[t];
+}
+
+const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
+{
+ if (t < 0 || t >= ARRAY_SIZE(link_type_name))
+ return NULL;
+
+ return link_type_name[t];
+}
+
+const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
+{
+ if (t < 0 || t >= ARRAY_SIZE(map_type_name))
+ return NULL;
+
+ return map_type_name[t];
+}
+
+const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
+{
+ if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
+ return NULL;
+
+ return prog_type_name[t];
}
static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
+ int sec_idx,
size_t offset)
{
struct bpf_map *map;
@@ -6463,7 +8964,8 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
map = &obj->maps[i];
if (!bpf_map__is_struct_ops(map))
continue;
- if (map->sec_offset <= offset &&
+ if (map->sec_idx == sec_idx &&
+ map->sec_offset <= offset &&
offset - map->sec_offset < map->def.value_size)
return map;
}
@@ -6472,9 +8974,8 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
}
/* Collect the reloc from ELF and populate the st_ops->progs[] */
-static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj,
- GElf_Shdr *shdr,
- Elf_Data *data)
+static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
+ Elf64_Shdr *shdr, Elf_Data *data)
{
const struct btf_member *member;
struct bpf_struct_ops *st_ops;
@@ -6482,53 +8983,58 @@ static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj,
unsigned int shdr_idx;
const struct btf *btf;
struct bpf_map *map;
- Elf_Data *symbols;
- unsigned int moff;
+ unsigned int moff, insn_idx;
const char *name;
__u32 member_idx;
- GElf_Sym sym;
- GElf_Rel rel;
+ Elf64_Sym *sym;
+ Elf64_Rel *rel;
int i, nrels;
- symbols = obj->efile.symbols;
btf = obj->btf;
nrels = shdr->sh_size / shdr->sh_entsize;
for (i = 0; i < nrels; i++) {
- if (!gelf_getrel(data, i, &rel)) {
+ rel = elf_rel_by_idx(data, i);
+ if (!rel) {
pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
+ sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
+ if (!sym) {
pr_warn("struct_ops reloc: symbol %zx not found\n",
- (size_t)GELF_R_SYM(rel.r_info));
+ (size_t)ELF64_R_SYM(rel->r_info));
return -LIBBPF_ERRNO__FORMAT;
}
- name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name) ? : "<?>";
- map = find_struct_ops_map_by_offset(obj, rel.r_offset);
+ name = elf_sym_str(obj, sym->st_name) ?: "<?>";
+ map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
if (!map) {
- pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
- (size_t)rel.r_offset);
+ pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
+ (size_t)rel->r_offset);
return -EINVAL;
}
- moff = rel.r_offset - map->sec_offset;
- shdr_idx = sym.st_shndx;
+ moff = rel->r_offset - map->sec_offset;
+ shdr_idx = sym->st_shndx;
st_ops = map->st_ops;
- pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
+ pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
map->name,
- (long long)(rel.r_info >> 32),
- (long long)sym.st_value,
- shdr_idx, (size_t)rel.r_offset,
- map->sec_offset, sym.st_name, name);
+ (long long)(rel->r_info >> 32),
+ (long long)sym->st_value,
+ shdr_idx, (size_t)rel->r_offset,
+ map->sec_offset, sym->st_name, name);
if (shdr_idx >= SHN_LORESERVE) {
- pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
- map->name, (size_t)rel.r_offset, shdr_idx);
+ pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
+ map->name, (size_t)rel->r_offset, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
+ if (sym->st_value % BPF_INSN_SZ) {
+ pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
+ map->name, (unsigned long long)sym->st_value);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+ insn_idx = sym->st_value / BPF_INSN_SZ;
member = find_member_by_offset(st_ops->type, moff * 8);
if (!member) {
@@ -6545,48 +9051,75 @@ static int bpf_object__collect_struct_ops_map_reloc(struct bpf_object *obj,
return -EINVAL;
}
- prog = bpf_object__find_prog_by_idx(obj, shdr_idx);
+ prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
if (!prog) {
pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
map->name, shdr_idx, name);
return -EINVAL;
}
- if (prog->type == BPF_PROG_TYPE_UNSPEC) {
- const struct bpf_sec_def *sec_def;
-
- sec_def = find_sec_def(prog->section_name);
- if (sec_def &&
- sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
- /* for pr_warn */
- prog->type = sec_def->prog_type;
- goto invalid_prog;
- }
+ /* prevent the use of BPF prog with invalid type */
+ if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
+ pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
+ map->name, prog->name);
+ return -EINVAL;
+ }
- prog->type = BPF_PROG_TYPE_STRUCT_OPS;
+ /* if we haven't yet processed this BPF program, record proper
+ * attach_btf_id and member_idx
+ */
+ if (!prog->attach_btf_id) {
prog->attach_btf_id = st_ops->type_id;
prog->expected_attach_type = member_idx;
- } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
- prog->attach_btf_id != st_ops->type_id ||
- prog->expected_attach_type != member_idx) {
- goto invalid_prog;
}
+
+ /* struct_ops BPF prog can be re-used between multiple
+ * .struct_ops & .struct_ops.link as long as it's the
+ * same struct_ops struct definition and the same
+ * function pointer field
+ */
+ if (prog->attach_btf_id != st_ops->type_id ||
+ prog->expected_attach_type != member_idx) {
+ pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
+ map->name, prog->name, prog->sec_name, prog->type,
+ prog->attach_btf_id, prog->expected_attach_type, name);
+ return -EINVAL;
+ }
+
st_ops->progs[member_idx] = prog;
}
return 0;
-
-invalid_prog:
- pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
- map->name, prog->name, prog->section_name, prog->type,
- prog->attach_btf_id, prog->expected_attach_type, name);
- return -EINVAL;
}
#define BTF_TRACE_PREFIX "btf_trace_"
#define BTF_LSM_PREFIX "bpf_lsm_"
+#define BTF_ITER_PREFIX "bpf_iter_"
#define BTF_MAX_NAME_SIZE 128
+void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
+ const char **prefix, int *kind)
+{
+ switch (attach_type) {
+ case BPF_TRACE_RAW_TP:
+ *prefix = BTF_TRACE_PREFIX;
+ *kind = BTF_KIND_TYPEDEF;
+ break;
+ case BPF_LSM_MAC:
+ case BPF_LSM_CGROUP:
+ *prefix = BTF_LSM_PREFIX;
+ *kind = BTF_KIND_FUNC;
+ break;
+ case BPF_TRACE_ITER:
+ *prefix = BTF_ITER_PREFIX;
+ *kind = BTF_KIND_FUNC;
+ break;
+ default:
+ *prefix = "";
+ *kind = BTF_KIND_FUNC;
+ }
+}
+
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind)
{
@@ -6596,7 +9129,7 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
ret = snprintf(btf_type_name, sizeof(btf_type_name),
"%s%s", prefix, name);
/* snprintf returns the number of characters written excluding the
- * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
+ * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
* indicates truncation.
*/
if (ret < 0 || ret >= sizeof(btf_type_name))
@@ -6604,60 +9137,61 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
return btf__find_by_name_kind(btf, btf_type_name, kind);
}
-static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name,
- enum bpf_attach_type attach_type)
+static inline int find_attach_btf_id(struct btf *btf, const char *name,
+ enum bpf_attach_type attach_type)
{
- int err;
-
- if (attach_type == BPF_TRACE_RAW_TP)
- err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
- BTF_KIND_TYPEDEF);
- else if (attach_type == BPF_LSM_MAC)
- err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
- BTF_KIND_FUNC);
- else
- err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
-
- if (err <= 0)
- pr_warn("%s is not found in vmlinux BTF\n", name);
+ const char *prefix;
+ int kind;
- return err;
+ btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
+ return find_btf_by_prefix_kind(btf, prefix, name, kind);
}
int libbpf_find_vmlinux_btf_id(const char *name,
enum bpf_attach_type attach_type)
{
struct btf *btf;
+ int err;
- btf = libbpf_find_kernel_btf();
- if (IS_ERR(btf)) {
+ btf = btf__load_vmlinux_btf();
+ err = libbpf_get_error(btf);
+ if (err) {
pr_warn("vmlinux BTF is not found\n");
- return -EINVAL;
+ return libbpf_err(err);
}
- return __find_vmlinux_btf_id(btf, name, attach_type);
+ err = find_attach_btf_id(btf, name, attach_type);
+ if (err <= 0)
+ pr_warn("%s is not found in vmlinux BTF\n", name);
+
+ btf__free(btf);
+ return libbpf_err(err);
}
static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
{
- struct bpf_prog_info_linear *info_linear;
- struct bpf_prog_info *info;
- struct btf *btf = NULL;
- int err = -EINVAL;
+ struct bpf_prog_info info;
+ __u32 info_len = sizeof(info);
+ struct btf *btf;
+ int err;
- info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
- if (IS_ERR_OR_NULL(info_linear)) {
- pr_warn("failed get_prog_info_linear for FD %d\n",
- attach_prog_fd);
- return -EINVAL;
+ memset(&info, 0, info_len);
+ err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
+ if (err) {
+ pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
+ attach_prog_fd, err);
+ return err;
}
- info = &info_linear->info;
- if (!info->btf_id) {
+
+ err = -EINVAL;
+ if (!info.btf_id) {
pr_warn("The target program doesn't have BTF\n");
goto out;
}
- if (btf__get_from_id(info->btf_id, &btf)) {
- pr_warn("Failed to get BTF of the program\n");
+ btf = btf__load_from_kernel_by_id(info.btf_id);
+ err = libbpf_get_error(btf);
+ if (err) {
+ pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
goto out;
}
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
@@ -6667,109 +9201,233 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
goto out;
}
out:
- free(info_linear);
return err;
}
-static int libbpf_find_attach_btf_id(struct bpf_program *prog)
+static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
+ enum bpf_attach_type attach_type,
+ int *btf_obj_fd, int *btf_type_id)
+{
+ int ret, i;
+
+ ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
+ if (ret > 0) {
+ *btf_obj_fd = 0; /* vmlinux BTF */
+ *btf_type_id = ret;
+ return 0;
+ }
+ if (ret != -ENOENT)
+ return ret;
+
+ ret = load_module_btfs(obj);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < obj->btf_module_cnt; i++) {
+ const struct module_btf *mod = &obj->btf_modules[i];
+
+ ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
+ if (ret > 0) {
+ *btf_obj_fd = mod->fd;
+ *btf_type_id = ret;
+ return 0;
+ }
+ if (ret == -ENOENT)
+ continue;
+
+ return ret;
+ }
+
+ return -ESRCH;
+}
+
+static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
+ int *btf_obj_fd, int *btf_type_id)
{
enum bpf_attach_type attach_type = prog->expected_attach_type;
__u32 attach_prog_fd = prog->attach_prog_fd;
- const char *name = prog->section_name;
- int i, err;
+ int err = 0;
- if (!name)
- return -EINVAL;
+ /* BPF program's BTF ID */
+ if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
+ if (!attach_prog_fd) {
+ pr_warn("prog '%s': attach program FD is not set\n", prog->name);
+ return -EINVAL;
+ }
+ err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
+ if (err < 0) {
+ pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
+ prog->name, attach_prog_fd, attach_name, err);
+ return err;
+ }
+ *btf_obj_fd = 0;
+ *btf_type_id = err;
+ return 0;
+ }
- for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
- if (!section_defs[i].is_attach_btf)
- continue;
- if (strncmp(name, section_defs[i].sec, section_defs[i].len))
- continue;
- if (attach_prog_fd)
- err = libbpf_find_prog_btf_id(name + section_defs[i].len,
- attach_prog_fd);
- else
- err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
- name + section_defs[i].len,
- attach_type);
+ /* kernel/module BTF ID */
+ if (prog->obj->gen_loader) {
+ bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
+ *btf_obj_fd = 0;
+ *btf_type_id = 1;
+ } else {
+ err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
+ }
+ if (err) {
+ pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
+ prog->name, attach_name, err);
return err;
}
- pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name);
- return -ESRCH;
+ return 0;
}
int libbpf_attach_type_by_name(const char *name,
enum bpf_attach_type *attach_type)
{
char *type_names;
- int i;
+ const struct bpf_sec_def *sec_def;
if (!name)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
- for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
- if (strncmp(name, section_defs[i].sec, section_defs[i].len))
- continue;
- if (!section_defs[i].is_attachable)
- return -EINVAL;
- *attach_type = section_defs[i].attach_type;
- return 0;
- }
- pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
- type_names = libbpf_get_type_names(true);
- if (type_names != NULL) {
- pr_debug("attachable section(type) names are:%s\n", type_names);
- free(type_names);
+ sec_def = find_sec_def(name);
+ if (!sec_def) {
+ pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
+ type_names = libbpf_get_type_names(true);
+ if (type_names != NULL) {
+ pr_debug("attachable section(type) names are:%s\n", type_names);
+ free(type_names);
+ }
+
+ return libbpf_err(-EINVAL);
}
- return -EINVAL;
+ if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
+ return libbpf_err(-EINVAL);
+ if (!(sec_def->cookie & SEC_ATTACHABLE))
+ return libbpf_err(-EINVAL);
+
+ *attach_type = sec_def->expected_attach_type;
+ return 0;
}
int bpf_map__fd(const struct bpf_map *map)
{
- return map ? map->fd : -EINVAL;
+ return map ? map->fd : libbpf_err(-EINVAL);
}
-const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
+static bool map_uses_real_name(const struct bpf_map *map)
{
- return map ? &map->def : ERR_PTR(-EINVAL);
+ /* Since libbpf started to support custom .data.* and .rodata.* maps,
+ * their user-visible name differs from kernel-visible name. Users see
+ * such map's corresponding ELF section name as a map name.
+ * This check distinguishes .data/.rodata from .data.* and .rodata.*
+ * maps to know which name has to be returned to the user.
+ */
+ if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
+ return true;
+ if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
+ return true;
+ return false;
}
const char *bpf_map__name(const struct bpf_map *map)
{
- return map ? map->name : NULL;
+ if (!map)
+ return NULL;
+
+ if (map_uses_real_name(map))
+ return map->real_name;
+
+ return map->name;
}
-__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
+enum bpf_map_type bpf_map__type(const struct bpf_map *map)
{
- return map ? map->btf_key_type_id : 0;
+ return map->def.type;
}
-__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
+int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
{
- return map ? map->btf_value_type_id : 0;
+ if (map->fd >= 0)
+ return libbpf_err(-EBUSY);
+ map->def.type = type;
+ return 0;
}
-int bpf_map__set_priv(struct bpf_map *map, void *priv,
- bpf_map_clear_priv_t clear_priv)
+__u32 bpf_map__map_flags(const struct bpf_map *map)
{
- if (!map)
- return -EINVAL;
+ return map->def.map_flags;
+}
- if (map->priv) {
- if (map->clear_priv)
- map->clear_priv(map, map->priv);
- }
+int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
+{
+ if (map->fd >= 0)
+ return libbpf_err(-EBUSY);
+ map->def.map_flags = flags;
+ return 0;
+}
+
+__u64 bpf_map__map_extra(const struct bpf_map *map)
+{
+ return map->map_extra;
+}
+
+int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
+{
+ if (map->fd >= 0)
+ return libbpf_err(-EBUSY);
+ map->map_extra = map_extra;
+ return 0;
+}
+
+__u32 bpf_map__numa_node(const struct bpf_map *map)
+{
+ return map->numa_node;
+}
+
+int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
+{
+ if (map->fd >= 0)
+ return libbpf_err(-EBUSY);
+ map->numa_node = numa_node;
+ return 0;
+}
+
+__u32 bpf_map__key_size(const struct bpf_map *map)
+{
+ return map->def.key_size;
+}
- map->priv = priv;
- map->clear_priv = clear_priv;
+int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
+{
+ if (map->fd >= 0)
+ return libbpf_err(-EBUSY);
+ map->def.key_size = size;
return 0;
}
-void *bpf_map__priv(const struct bpf_map *map)
+__u32 bpf_map__value_size(const struct bpf_map *map)
{
- return map ? map->priv : ERR_PTR(-EINVAL);
+ return map->def.value_size;
+}
+
+int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
+{
+ if (map->fd >= 0)
+ return libbpf_err(-EBUSY);
+ map->def.value_size = size;
+ return 0;
+}
+
+__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
+{
+ return map ? map->btf_key_type_id : 0;
+}
+
+__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
+{
+ return map ? map->btf_value_type_id : 0;
}
int bpf_map__set_initial_value(struct bpf_map *map,
@@ -6777,15 +9435,18 @@ int bpf_map__set_initial_value(struct bpf_map *map,
{
if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
size != map->def.value_size || map->fd >= 0)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
memcpy(map->mmaped, data, size);
return 0;
}
-bool bpf_map__is_offload_neutral(const struct bpf_map *map)
+const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
{
- return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
+ if (!map->mmaped)
+ return NULL;
+ *psize = map->def.value_size;
+ return map->mmaped;
}
bool bpf_map__is_internal(const struct bpf_map *map)
@@ -6793,20 +9454,32 @@ bool bpf_map__is_internal(const struct bpf_map *map)
return map->libbpf_type != LIBBPF_MAP_UNSPEC;
}
-void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
+__u32 bpf_map__ifindex(const struct bpf_map *map)
{
+ return map->map_ifindex;
+}
+
+int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
+{
+ if (map->fd >= 0)
+ return libbpf_err(-EBUSY);
map->map_ifindex = ifindex;
+ return 0;
}
int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
{
if (!bpf_map_type__is_map_in_map(map->def.type)) {
pr_warn("error: unsupported map type\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
}
if (map->inner_map_fd != -1) {
pr_warn("error: inner_map_fd already specified\n");
- return -EINVAL;
+ return libbpf_err(-EINVAL);
+ }
+ if (map->inner_map) {
+ bpf_map__destroy(map->inner_map);
+ zfree(&map->inner_map);
}
map->inner_map_fd = fd;
return 0;
@@ -6819,7 +9492,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
struct bpf_map *s, *e;
if (!obj || !obj->maps)
- return NULL;
+ return errno = EINVAL, NULL;
s = obj->maps;
e = obj->maps + obj->nr_maps;
@@ -6827,7 +9500,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
if ((m < s) || (m >= e)) {
pr_warn("error in %s: map handler doesn't belong to object\n",
__func__);
- return NULL;
+ return errno = EINVAL, NULL;
}
idx = (m - obj->maps) + i;
@@ -6837,7 +9510,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
}
struct bpf_map *
-bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
+bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
{
if (prev == NULL)
return obj->maps;
@@ -6846,7 +9519,7 @@ bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
}
struct bpf_map *
-bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
+bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
{
if (next == NULL) {
if (!obj->nr_maps)
@@ -6863,10 +9536,25 @@ bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
struct bpf_map *pos;
bpf_object__for_each_map(pos, obj) {
- if (pos->name && !strcmp(pos->name, name))
+ /* if it's a special internal map name (which always starts
+ * with dot) then check if that special name matches the
+ * real map name (ELF section name)
+ */
+ if (name[0] == '.') {
+ if (pos->real_name && strcmp(pos->real_name, name) == 0)
+ return pos;
+ continue;
+ }
+ /* otherwise map name has to be an exact match */
+ if (map_uses_real_name(pos)) {
+ if (strcmp(pos->real_name, name) == 0)
+ return pos;
+ continue;
+ }
+ if (strcmp(pos->name, name) == 0)
return pos;
}
- return NULL;
+ return errno = ENOENT, NULL;
}
int
@@ -6875,113 +9563,133 @@ bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
}
-struct bpf_map *
-bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
+static int validate_map_op(const struct bpf_map *map, size_t key_sz,
+ size_t value_sz, bool check_value_sz)
{
- return ERR_PTR(-ENOTSUP);
+ if (map->fd <= 0)
+ return -ENOENT;
+
+ if (map->def.key_size != key_sz) {
+ pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
+ map->name, key_sz, map->def.key_size);
+ return -EINVAL;
+ }
+
+ if (!check_value_sz)
+ return 0;
+
+ switch (map->def.type) {
+ case BPF_MAP_TYPE_PERCPU_ARRAY:
+ case BPF_MAP_TYPE_PERCPU_HASH:
+ case BPF_MAP_TYPE_LRU_PERCPU_HASH:
+ case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
+ int num_cpu = libbpf_num_possible_cpus();
+ size_t elem_sz = roundup(map->def.value_size, 8);
+
+ if (value_sz != num_cpu * elem_sz) {
+ pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
+ map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
+ return -EINVAL;
+ }
+ break;
+ }
+ default:
+ if (map->def.value_size != value_sz) {
+ pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
+ map->name, value_sz, map->def.value_size);
+ return -EINVAL;
+ }
+ break;
+ }
+ return 0;
}
-long libbpf_get_error(const void *ptr)
+int bpf_map__lookup_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ void *value, size_t value_sz, __u64 flags)
{
- return PTR_ERR_OR_ZERO(ptr);
+ int err;
+
+ err = validate_map_op(map, key_sz, value_sz, true);
+ if (err)
+ return libbpf_err(err);
+
+ return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
}
-int bpf_prog_load(const char *file, enum bpf_prog_type type,
- struct bpf_object **pobj, int *prog_fd)
+int bpf_map__update_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ const void *value, size_t value_sz, __u64 flags)
{
- struct bpf_prog_load_attr attr;
+ int err;
- memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
- attr.file = file;
- attr.prog_type = type;
- attr.expected_attach_type = 0;
+ err = validate_map_op(map, key_sz, value_sz, true);
+ if (err)
+ return libbpf_err(err);
- return bpf_prog_load_xattr(&attr, pobj, prog_fd);
+ return bpf_map_update_elem(map->fd, key, value, flags);
}
-int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
- struct bpf_object **pobj, int *prog_fd)
+int bpf_map__delete_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz, __u64 flags)
{
- struct bpf_object_open_attr open_attr = {};
- struct bpf_program *prog, *first_prog = NULL;
- struct bpf_object *obj;
- struct bpf_map *map;
int err;
- if (!attr)
- return -EINVAL;
- if (!attr->file)
- return -EINVAL;
-
- open_attr.file = attr->file;
- open_attr.prog_type = attr->prog_type;
+ err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
+ if (err)
+ return libbpf_err(err);
- obj = bpf_object__open_xattr(&open_attr);
- if (IS_ERR_OR_NULL(obj))
- return -ENOENT;
+ return bpf_map_delete_elem_flags(map->fd, key, flags);
+}
- bpf_object__for_each_program(prog, obj) {
- enum bpf_attach_type attach_type = attr->expected_attach_type;
- /*
- * to preserve backwards compatibility, bpf_prog_load treats
- * attr->prog_type, if specified, as an override to whatever
- * bpf_object__open guessed
- */
- if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
- bpf_program__set_type(prog, attr->prog_type);
- bpf_program__set_expected_attach_type(prog,
- attach_type);
- }
- if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
- /*
- * we haven't guessed from section name and user
- * didn't provide a fallback type, too bad...
- */
- bpf_object__close(obj);
- return -EINVAL;
- }
+int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ void *value, size_t value_sz, __u64 flags)
+{
+ int err;
- prog->prog_ifindex = attr->ifindex;
- prog->log_level = attr->log_level;
- prog->prog_flags = attr->prog_flags;
- if (!first_prog)
- first_prog = prog;
- }
+ err = validate_map_op(map, key_sz, value_sz, true);
+ if (err)
+ return libbpf_err(err);
- bpf_object__for_each_map(map, obj) {
- if (!bpf_map__is_offload_neutral(map))
- map->map_ifindex = attr->ifindex;
- }
+ return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
+}
- if (!first_prog) {
- pr_warn("object file doesn't contain bpf program\n");
- bpf_object__close(obj);
- return -ENOENT;
- }
+int bpf_map__get_next_key(const struct bpf_map *map,
+ const void *cur_key, void *next_key, size_t key_sz)
+{
+ int err;
- err = bpf_object__load(obj);
- if (err) {
- bpf_object__close(obj);
- return -EINVAL;
- }
+ err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
+ if (err)
+ return libbpf_err(err);
- *pobj = obj;
- *prog_fd = bpf_program__fd(first_prog);
- return 0;
+ return bpf_map_get_next_key(map->fd, cur_key, next_key);
}
-struct bpf_link {
- int (*detach)(struct bpf_link *link);
- int (*destroy)(struct bpf_link *link);
- char *pin_path; /* NULL, if not pinned */
- int fd; /* hook FD, -1 if not applicable */
- bool disconnected;
-};
+long libbpf_get_error(const void *ptr)
+{
+ if (!IS_ERR_OR_NULL(ptr))
+ return 0;
+
+ if (IS_ERR(ptr))
+ errno = -PTR_ERR(ptr);
+
+ /* If ptr == NULL, then errno should be already set by the failing
+ * API, because libbpf never returns NULL on success and it now always
+ * sets errno on error. So no extra errno handling for ptr == NULL
+ * case.
+ */
+ return -errno;
+}
/* Replace link's underlying BPF program with the new one */
int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
{
- return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
+ int ret;
+
+ ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
+ return libbpf_err_errno(ret);
}
/* Release "ownership" of underlying BPF resource (typically, BPF program
@@ -7003,18 +9711,19 @@ int bpf_link__destroy(struct bpf_link *link)
{
int err = 0;
- if (!link)
+ if (IS_ERR_OR_NULL(link))
return 0;
if (!link->disconnected && link->detach)
err = link->detach(link);
- if (link->destroy)
- link->destroy(link);
if (link->pin_path)
free(link->pin_path);
- free(link);
+ if (link->dealloc)
+ link->dealloc(link);
+ else
+ free(link);
- return err;
+ return libbpf_err(err);
}
int bpf_link__fd(const struct bpf_link *link)
@@ -7029,7 +9738,7 @@ const char *bpf_link__pin_path(const struct bpf_link *link)
static int bpf_link__detach_fd(struct bpf_link *link)
{
- return close(link->fd);
+ return libbpf_err_errno(close(link->fd));
}
struct bpf_link *bpf_link__open(const char *path)
@@ -7041,13 +9750,13 @@ struct bpf_link *bpf_link__open(const char *path)
if (fd < 0) {
fd = -errno;
pr_warn("failed to open link at %s: %d\n", path, fd);
- return ERR_PTR(fd);
+ return libbpf_err_ptr(fd);
}
link = calloc(1, sizeof(*link));
if (!link) {
close(fd);
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
}
link->detach = &bpf_link__detach_fd;
link->fd = fd;
@@ -7055,33 +9764,38 @@ struct bpf_link *bpf_link__open(const char *path)
link->pin_path = strdup(path);
if (!link->pin_path) {
bpf_link__destroy(link);
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
}
return link;
}
+int bpf_link__detach(struct bpf_link *link)
+{
+ return bpf_link_detach(link->fd) ? -errno : 0;
+}
+
int bpf_link__pin(struct bpf_link *link, const char *path)
{
int err;
if (link->pin_path)
- return -EBUSY;
+ return libbpf_err(-EBUSY);
err = make_parent_dir(path);
if (err)
- return err;
+ return libbpf_err(err);
err = check_path(path);
if (err)
- return err;
+ return libbpf_err(err);
link->pin_path = strdup(path);
if (!link->pin_path)
- return -ENOMEM;
+ return libbpf_err(-ENOMEM);
if (bpf_obj_pin(link->fd, link->pin_path)) {
err = -errno;
zfree(&link->pin_path);
- return err;
+ return libbpf_err(err);
}
pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
@@ -7093,7 +9807,7 @@ int bpf_link__unpin(struct bpf_link *link)
int err;
if (!link->pin_path)
- return -EINVAL;
+ return libbpf_err(-EINVAL);
err = unlink(link->pin_path);
if (err != 0)
@@ -7104,60 +9818,132 @@ int bpf_link__unpin(struct bpf_link *link)
return 0;
}
-static int bpf_link__detach_perf_event(struct bpf_link *link)
+struct bpf_link_perf {
+ struct bpf_link link;
+ int perf_event_fd;
+ /* legacy kprobe support: keep track of probe identifier and type */
+ char *legacy_probe_name;
+ bool legacy_is_kprobe;
+ bool legacy_is_retprobe;
+};
+
+static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
+static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
+
+static int bpf_link_perf_detach(struct bpf_link *link)
{
- int err;
+ struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
+ int err = 0;
- err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
- if (err)
+ if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
err = -errno;
+ if (perf_link->perf_event_fd != link->fd)
+ close(perf_link->perf_event_fd);
close(link->fd);
+
+ /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
+ if (perf_link->legacy_probe_name) {
+ if (perf_link->legacy_is_kprobe) {
+ err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
+ perf_link->legacy_is_retprobe);
+ } else {
+ err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
+ perf_link->legacy_is_retprobe);
+ }
+ }
+
return err;
}
-struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
- int pfd)
+static void bpf_link_perf_dealloc(struct bpf_link *link)
+{
+ struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
+
+ free(perf_link->legacy_probe_name);
+ free(perf_link);
+}
+
+struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
+ const struct bpf_perf_event_opts *opts)
{
char errmsg[STRERR_BUFSIZE];
- struct bpf_link *link;
- int prog_fd, err;
+ struct bpf_link_perf *link;
+ int prog_fd, link_fd = -1, err;
+ bool force_ioctl_attach;
+
+ if (!OPTS_VALID(opts, bpf_perf_event_opts))
+ return libbpf_err_ptr(-EINVAL);
if (pfd < 0) {
- pr_warn("program '%s': invalid perf event FD %d\n",
- bpf_program__title(prog, false), pfd);
- return ERR_PTR(-EINVAL);
+ pr_warn("prog '%s': invalid perf event FD %d\n",
+ prog->name, pfd);
+ return libbpf_err_ptr(-EINVAL);
}
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
- bpf_program__title(prog, false));
- return ERR_PTR(-EINVAL);
+ pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
}
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-ENOMEM);
- link->detach = &bpf_link__detach_perf_event;
- link->fd = pfd;
+ return libbpf_err_ptr(-ENOMEM);
+ link->link.detach = &bpf_link_perf_detach;
+ link->link.dealloc = &bpf_link_perf_dealloc;
+ link->perf_event_fd = pfd;
+
+ force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
+ if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
+ DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
+ .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
+
+ link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
+ if (link_fd < 0) {
+ err = -errno;
+ pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
+ prog->name, pfd,
+ err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto err_out;
+ }
+ link->link.fd = link_fd;
+ } else {
+ if (OPTS_GET(opts, bpf_cookie, 0)) {
+ pr_warn("prog '%s': user context value is not supported\n", prog->name);
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
- if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
- err = -errno;
- free(link);
- pr_warn("program '%s': failed to attach to pfd %d: %s\n",
- bpf_program__title(prog, false), pfd,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return ERR_PTR(err);
+ if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
+ err = -errno;
+ pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
+ prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ if (err == -EPROTO)
+ pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
+ prog->name, pfd);
+ goto err_out;
+ }
+ link->link.fd = pfd;
}
if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
err = -errno;
- free(link);
- pr_warn("program '%s': failed to enable pfd %d: %s\n",
- bpf_program__title(prog, false), pfd,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return ERR_PTR(err);
+ pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
+ prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto err_out;
}
- return link;
+
+ return &link->link;
+err_out:
+ if (link_fd >= 0)
+ close(link_fd);
+ free(link);
+ return libbpf_err_ptr(err);
+}
+
+struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
+{
+ return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
}
/*
@@ -7218,12 +10004,21 @@ static int determine_uprobe_retprobe_bit(void)
return parse_uint_from_file(file, "config:%d\n");
}
+#define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
+#define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
+
static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
- uint64_t offset, int pid)
+ uint64_t offset, int pid, size_t ref_ctr_off)
{
- struct perf_event_attr attr = {};
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ struct perf_event_attr attr;
char errmsg[STRERR_BUFSIZE];
- int type, pfd, err;
+ int type, pfd;
+
+ if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
+ return -EINVAL;
+
+ memset(&attr, 0, attr_sz);
type = uprobe ? determine_uprobe_perf_type()
: determine_kprobe_perf_type();
@@ -7245,8 +10040,9 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
}
attr.config |= 1 << bit;
}
- attr.size = sizeof(attr);
+ attr.size = attr_sz;
attr.type = type;
+ attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
attr.config2 = offset; /* kprobe_addr or probe_offset */
@@ -7255,100 +10051,1264 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
pid < 0 ? -1 : pid /* pid */,
pid == -1 ? 0 : -1 /* cpu */,
-1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
- if (pfd < 0) {
+ return pfd >= 0 ? pfd : -errno;
+}
+
+static int append_to_file(const char *file, const char *fmt, ...)
+{
+ int fd, n, err = 0;
+ va_list ap;
+ char buf[1024];
+
+ va_start(ap, fmt);
+ n = vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ if (n < 0 || n >= sizeof(buf))
+ return -EINVAL;
+
+ fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
+ if (fd < 0)
+ return -errno;
+
+ if (write(fd, buf, n) < 0)
err = -errno;
- pr_warn("%s perf_event_open() failed: %s\n",
- uprobe ? "uprobe" : "kprobe",
+
+ close(fd);
+ return err;
+}
+
+#define DEBUGFS "/sys/kernel/debug/tracing"
+#define TRACEFS "/sys/kernel/tracing"
+
+static bool use_debugfs(void)
+{
+ static int has_debugfs = -1;
+
+ if (has_debugfs < 0)
+ has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
+
+ return has_debugfs == 1;
+}
+
+static const char *tracefs_path(void)
+{
+ return use_debugfs() ? DEBUGFS : TRACEFS;
+}
+
+static const char *tracefs_kprobe_events(void)
+{
+ return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
+}
+
+static const char *tracefs_uprobe_events(void)
+{
+ return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
+}
+
+static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
+ const char *kfunc_name, size_t offset)
+{
+ static int index = 0;
+ int i;
+
+ snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
+ __sync_fetch_and_add(&index, 1));
+
+ /* sanitize binary_path in the probe name */
+ for (i = 0; buf[i]; i++) {
+ if (!isalnum(buf[i]))
+ buf[i] = '_';
+ }
+}
+
+static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
+ const char *kfunc_name, size_t offset)
+{
+ return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
+ retprobe ? 'r' : 'p',
+ retprobe ? "kretprobes" : "kprobes",
+ probe_name, kfunc_name, offset);
+}
+
+static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
+{
+ return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
+ retprobe ? "kretprobes" : "kprobes", probe_name);
+}
+
+static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
+{
+ char file[256];
+
+ snprintf(file, sizeof(file), "%s/events/%s/%s/id",
+ tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
+
+ return parse_uint_from_file(file, "%d\n");
+}
+
+static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
+ const char *kfunc_name, size_t offset, int pid)
+{
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ struct perf_event_attr attr;
+ char errmsg[STRERR_BUFSIZE];
+ int type, pfd, err;
+
+ err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
+ if (err < 0) {
+ pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
+ kfunc_name, offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return err;
}
+ type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
+ if (type < 0) {
+ err = type;
+ pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
+ kfunc_name, offset,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto err_clean_legacy;
+ }
+
+ memset(&attr, 0, attr_sz);
+ attr.size = attr_sz;
+ attr.config = type;
+ attr.type = PERF_TYPE_TRACEPOINT;
+
+ pfd = syscall(__NR_perf_event_open, &attr,
+ pid < 0 ? -1 : pid, /* pid */
+ pid == -1 ? 0 : -1, /* cpu */
+ -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
+ if (pfd < 0) {
+ err = -errno;
+ pr_warn("legacy kprobe perf_event_open() failed: %s\n",
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto err_clean_legacy;
+ }
return pfd;
+
+err_clean_legacy:
+ /* Clear the newly added legacy kprobe_event */
+ remove_kprobe_event_legacy(probe_name, retprobe);
+ return err;
}
-struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
- bool retprobe,
- const char *func_name)
+static const char *arch_specific_syscall_pfx(void)
+{
+#if defined(__x86_64__)
+ return "x64";
+#elif defined(__i386__)
+ return "ia32";
+#elif defined(__s390x__)
+ return "s390x";
+#elif defined(__s390__)
+ return "s390";
+#elif defined(__arm__)
+ return "arm";
+#elif defined(__aarch64__)
+ return "arm64";
+#elif defined(__mips__)
+ return "mips";
+#elif defined(__riscv)
+ return "riscv";
+#elif defined(__powerpc__)
+ return "powerpc";
+#elif defined(__powerpc64__)
+ return "powerpc64";
+#else
+ return NULL;
+#endif
+}
+
+static int probe_kern_syscall_wrapper(void)
+{
+ char syscall_name[64];
+ const char *ksys_pfx;
+
+ ksys_pfx = arch_specific_syscall_pfx();
+ if (!ksys_pfx)
+ return 0;
+
+ snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
+
+ if (determine_kprobe_perf_type() >= 0) {
+ int pfd;
+
+ pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
+ if (pfd >= 0)
+ close(pfd);
+
+ return pfd >= 0 ? 1 : 0;
+ } else { /* legacy mode */
+ char probe_name[128];
+
+ gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
+ if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
+ return 0;
+
+ (void)remove_kprobe_event_legacy(probe_name, false);
+ return 1;
+ }
+}
+
+struct bpf_link *
+bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
+ const char *func_name,
+ const struct bpf_kprobe_opts *opts)
{
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
+ enum probe_attach_mode attach_mode;
char errmsg[STRERR_BUFSIZE];
+ char *legacy_probe = NULL;
struct bpf_link *link;
+ size_t offset;
+ bool retprobe, legacy;
int pfd, err;
- pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
- 0 /* offset */, -1 /* pid */);
+ if (!OPTS_VALID(opts, bpf_kprobe_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
+ retprobe = OPTS_GET(opts, retprobe, false);
+ offset = OPTS_GET(opts, offset, 0);
+ pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
+
+ legacy = determine_kprobe_perf_type() < 0;
+ switch (attach_mode) {
+ case PROBE_ATTACH_MODE_LEGACY:
+ legacy = true;
+ pe_opts.force_ioctl_attach = true;
+ break;
+ case PROBE_ATTACH_MODE_PERF:
+ if (legacy)
+ return libbpf_err_ptr(-ENOTSUP);
+ pe_opts.force_ioctl_attach = true;
+ break;
+ case PROBE_ATTACH_MODE_LINK:
+ if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
+ return libbpf_err_ptr(-ENOTSUP);
+ break;
+ case PROBE_ATTACH_MODE_DEFAULT:
+ break;
+ default:
+ return libbpf_err_ptr(-EINVAL);
+ }
+
+ if (!legacy) {
+ pfd = perf_event_open_probe(false /* uprobe */, retprobe,
+ func_name, offset,
+ -1 /* pid */, 0 /* ref_ctr_off */);
+ } else {
+ char probe_name[256];
+
+ gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
+ func_name, offset);
+
+ legacy_probe = strdup(probe_name);
+ if (!legacy_probe)
+ return libbpf_err_ptr(-ENOMEM);
+
+ pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
+ offset, -1 /* pid */);
+ }
if (pfd < 0) {
- pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
- bpf_program__title(prog, false),
- retprobe ? "kretprobe" : "kprobe", func_name,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ err = -errno;
+ pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
+ prog->name, retprobe ? "kretprobe" : "kprobe",
+ func_name, offset,
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto err_out;
}
- link = bpf_program__attach_perf_event(prog, pfd);
- if (IS_ERR(link)) {
+ link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
+ err = libbpf_get_error(link);
+ if (err) {
close(pfd);
- err = PTR_ERR(link);
- pr_warn("program '%s': failed to attach to %s '%s': %s\n",
- bpf_program__title(prog, false),
- retprobe ? "kretprobe" : "kprobe", func_name,
+ pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
+ prog->name, retprobe ? "kretprobe" : "kprobe",
+ func_name, offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return link;
+ goto err_clean_legacy;
}
+ if (legacy) {
+ struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
+
+ perf_link->legacy_probe_name = legacy_probe;
+ perf_link->legacy_is_kprobe = true;
+ perf_link->legacy_is_retprobe = retprobe;
+ }
+
return link;
+
+err_clean_legacy:
+ if (legacy)
+ remove_kprobe_event_legacy(legacy_probe, retprobe);
+err_out:
+ free(legacy_probe);
+ return libbpf_err_ptr(err);
}
-static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
- struct bpf_program *prog)
+struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
+ bool retprobe,
+ const char *func_name)
{
- const char *func_name;
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
+ .retprobe = retprobe,
+ );
+
+ return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
+}
+
+struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
+ const char *syscall_name,
+ const struct bpf_ksyscall_opts *opts)
+{
+ LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
+ char func_name[128];
+
+ if (!OPTS_VALID(opts, bpf_ksyscall_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
+ /* arch_specific_syscall_pfx() should never return NULL here
+ * because it is guarded by kernel_supports(). However, since
+ * compiler does not know that we have an explicit conditional
+ * as well.
+ */
+ snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
+ arch_specific_syscall_pfx() ? : "", syscall_name);
+ } else {
+ snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
+ }
+
+ kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
+ kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
+
+ return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
+}
+
+/* Adapted from perf/util/string.c */
+static bool glob_match(const char *str, const char *pat)
+{
+ while (*str && *pat && *pat != '*') {
+ if (*pat == '?') { /* Matches any single character */
+ str++;
+ pat++;
+ continue;
+ }
+ if (*str != *pat)
+ return false;
+ str++;
+ pat++;
+ }
+ /* Check wild card */
+ if (*pat == '*') {
+ while (*pat == '*')
+ pat++;
+ if (!*pat) /* Tail wild card matches all */
+ return true;
+ while (*str)
+ if (glob_match(str++, pat))
+ return true;
+ }
+ return !*str && !*pat;
+}
+
+struct kprobe_multi_resolve {
+ const char *pattern;
+ unsigned long *addrs;
+ size_t cap;
+ size_t cnt;
+};
+
+static int
+resolve_kprobe_multi_cb(unsigned long long sym_addr, char sym_type,
+ const char *sym_name, void *ctx)
+{
+ struct kprobe_multi_resolve *res = ctx;
+ int err;
+
+ if (!glob_match(sym_name, res->pattern))
+ return 0;
+
+ err = libbpf_ensure_mem((void **) &res->addrs, &res->cap, sizeof(unsigned long),
+ res->cnt + 1);
+ if (err)
+ return err;
+
+ res->addrs[res->cnt++] = (unsigned long) sym_addr;
+ return 0;
+}
+
+struct bpf_link *
+bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
+ const char *pattern,
+ const struct bpf_kprobe_multi_opts *opts)
+{
+ LIBBPF_OPTS(bpf_link_create_opts, lopts);
+ struct kprobe_multi_resolve res = {
+ .pattern = pattern,
+ };
+ struct bpf_link *link = NULL;
+ char errmsg[STRERR_BUFSIZE];
+ const unsigned long *addrs;
+ int err, link_fd, prog_fd;
+ const __u64 *cookies;
+ const char **syms;
bool retprobe;
+ size_t cnt;
+
+ if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ syms = OPTS_GET(opts, syms, false);
+ addrs = OPTS_GET(opts, addrs, false);
+ cnt = OPTS_GET(opts, cnt, false);
+ cookies = OPTS_GET(opts, cookies, false);
+
+ if (!pattern && !addrs && !syms)
+ return libbpf_err_ptr(-EINVAL);
+ if (pattern && (addrs || syms || cookies || cnt))
+ return libbpf_err_ptr(-EINVAL);
+ if (!pattern && !cnt)
+ return libbpf_err_ptr(-EINVAL);
+ if (addrs && syms)
+ return libbpf_err_ptr(-EINVAL);
+
+ if (pattern) {
+ err = libbpf_kallsyms_parse(resolve_kprobe_multi_cb, &res);
+ if (err)
+ goto error;
+ if (!res.cnt) {
+ err = -ENOENT;
+ goto error;
+ }
+ addrs = res.addrs;
+ cnt = res.cnt;
+ }
+
+ retprobe = OPTS_GET(opts, retprobe, false);
- func_name = bpf_program__title(prog, false) + sec->len;
- retprobe = strcmp(sec->sec, "kretprobe/") == 0;
+ lopts.kprobe_multi.syms = syms;
+ lopts.kprobe_multi.addrs = addrs;
+ lopts.kprobe_multi.cookies = cookies;
+ lopts.kprobe_multi.cnt = cnt;
+ lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ err = -ENOMEM;
+ goto error;
+ }
+ link->detach = &bpf_link__detach_fd;
- return bpf_program__attach_kprobe(prog, retprobe, func_name);
+ prog_fd = bpf_program__fd(prog);
+ link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
+ if (link_fd < 0) {
+ err = -errno;
+ pr_warn("prog '%s': failed to attach: %s\n",
+ prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto error;
+ }
+ link->fd = link_fd;
+ free(res.addrs);
+ return link;
+
+error:
+ free(link);
+ free(res.addrs);
+ return libbpf_err_ptr(err);
}
-struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
- bool retprobe, pid_t pid,
- const char *binary_path,
- size_t func_offset)
+static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
+ unsigned long offset = 0;
+ const char *func_name;
+ char *func;
+ int n;
+
+ *link = NULL;
+
+ /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
+ if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
+ return 0;
+
+ opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
+ if (opts.retprobe)
+ func_name = prog->sec_name + sizeof("kretprobe/") - 1;
+ else
+ func_name = prog->sec_name + sizeof("kprobe/") - 1;
+
+ n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
+ if (n < 1) {
+ pr_warn("kprobe name is invalid: %s\n", func_name);
+ return -EINVAL;
+ }
+ if (opts.retprobe && offset != 0) {
+ free(func);
+ pr_warn("kretprobes do not support offset specification\n");
+ return -EINVAL;
+ }
+
+ opts.offset = offset;
+ *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
+ free(func);
+ return libbpf_get_error(*link);
+}
+
+static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ LIBBPF_OPTS(bpf_ksyscall_opts, opts);
+ const char *syscall_name;
+
+ *link = NULL;
+
+ /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
+ if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
+ return 0;
+
+ opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
+ if (opts.retprobe)
+ syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
+ else
+ syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
+
+ *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
+ return *link ? 0 : -errno;
+}
+
+static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
+ const char *spec;
+ char *pattern;
+ int n;
+
+ *link = NULL;
+
+ /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
+ if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
+ strcmp(prog->sec_name, "kretprobe.multi") == 0)
+ return 0;
+
+ opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
+ if (opts.retprobe)
+ spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
+ else
+ spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
+
+ n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
+ if (n < 1) {
+ pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
+ return -EINVAL;
+ }
+
+ *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
+ free(pattern);
+ return libbpf_get_error(*link);
+}
+
+static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
+ const char *binary_path, uint64_t offset)
+{
+ int i;
+
+ snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
+
+ /* sanitize binary_path in the probe name */
+ for (i = 0; buf[i]; i++) {
+ if (!isalnum(buf[i]))
+ buf[i] = '_';
+ }
+}
+
+static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
+ const char *binary_path, size_t offset)
+{
+ return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
+ retprobe ? 'r' : 'p',
+ retprobe ? "uretprobes" : "uprobes",
+ probe_name, binary_path, offset);
+}
+
+static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
+{
+ return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
+ retprobe ? "uretprobes" : "uprobes", probe_name);
+}
+
+static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
+{
+ char file[512];
+
+ snprintf(file, sizeof(file), "%s/events/%s/%s/id",
+ tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
+
+ return parse_uint_from_file(file, "%d\n");
+}
+
+static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
+ const char *binary_path, size_t offset, int pid)
+{
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ struct perf_event_attr attr;
+ int type, pfd, err;
+
+ err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
+ if (err < 0) {
+ pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
+ binary_path, (size_t)offset, err);
+ return err;
+ }
+ type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
+ if (type < 0) {
+ err = type;
+ pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
+ binary_path, offset, err);
+ goto err_clean_legacy;
+ }
+
+ memset(&attr, 0, attr_sz);
+ attr.size = attr_sz;
+ attr.config = type;
+ attr.type = PERF_TYPE_TRACEPOINT;
+
+ pfd = syscall(__NR_perf_event_open, &attr,
+ pid < 0 ? -1 : pid, /* pid */
+ pid == -1 ? 0 : -1, /* cpu */
+ -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
+ if (pfd < 0) {
+ err = -errno;
+ pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
+ goto err_clean_legacy;
+ }
+ return pfd;
+
+err_clean_legacy:
+ /* Clear the newly added legacy uprobe_event */
+ remove_uprobe_event_legacy(probe_name, retprobe);
+ return err;
+}
+
+/* Return next ELF section of sh_type after scn, or first of that type if scn is NULL. */
+static Elf_Scn *elf_find_next_scn_by_type(Elf *elf, int sh_type, Elf_Scn *scn)
+{
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ GElf_Shdr sh;
+
+ if (!gelf_getshdr(scn, &sh))
+ continue;
+ if (sh.sh_type == sh_type)
+ return scn;
+ }
+ return NULL;
+}
+
+/* Find offset of function name in the provided ELF object. "binary_path" is
+ * the path to the ELF binary represented by "elf", and only used for error
+ * reporting matters. "name" matches symbol name or name@@LIB for library
+ * functions.
+ */
+static long elf_find_func_offset(Elf *elf, const char *binary_path, const char *name)
+{
+ int i, sh_types[2] = { SHT_DYNSYM, SHT_SYMTAB };
+ bool is_shared_lib, is_name_qualified;
+ long ret = -ENOENT;
+ size_t name_len;
+ GElf_Ehdr ehdr;
+
+ if (!gelf_getehdr(elf, &ehdr)) {
+ pr_warn("elf: failed to get ehdr from %s: %s\n", binary_path, elf_errmsg(-1));
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ }
+ /* for shared lib case, we do not need to calculate relative offset */
+ is_shared_lib = ehdr.e_type == ET_DYN;
+
+ name_len = strlen(name);
+ /* Does name specify "@@LIB"? */
+ is_name_qualified = strstr(name, "@@") != NULL;
+
+ /* Search SHT_DYNSYM, SHT_SYMTAB for symbol. This search order is used because if
+ * a binary is stripped, it may only have SHT_DYNSYM, and a fully-statically
+ * linked binary may not have SHT_DYMSYM, so absence of a section should not be
+ * reported as a warning/error.
+ */
+ for (i = 0; i < ARRAY_SIZE(sh_types); i++) {
+ size_t nr_syms, strtabidx, idx;
+ Elf_Data *symbols = NULL;
+ Elf_Scn *scn = NULL;
+ int last_bind = -1;
+ const char *sname;
+ GElf_Shdr sh;
+
+ scn = elf_find_next_scn_by_type(elf, sh_types[i], NULL);
+ if (!scn) {
+ pr_debug("elf: failed to find symbol table ELF sections in '%s'\n",
+ binary_path);
+ continue;
+ }
+ if (!gelf_getshdr(scn, &sh))
+ continue;
+ strtabidx = sh.sh_link;
+ symbols = elf_getdata(scn, 0);
+ if (!symbols) {
+ pr_warn("elf: failed to get symbols for symtab section in '%s': %s\n",
+ binary_path, elf_errmsg(-1));
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ }
+ nr_syms = symbols->d_size / sh.sh_entsize;
+
+ for (idx = 0; idx < nr_syms; idx++) {
+ int curr_bind;
+ GElf_Sym sym;
+ Elf_Scn *sym_scn;
+ GElf_Shdr sym_sh;
+
+ if (!gelf_getsym(symbols, idx, &sym))
+ continue;
+
+ if (GELF_ST_TYPE(sym.st_info) != STT_FUNC)
+ continue;
+
+ sname = elf_strptr(elf, strtabidx, sym.st_name);
+ if (!sname)
+ continue;
+
+ curr_bind = GELF_ST_BIND(sym.st_info);
+
+ /* User can specify func, func@@LIB or func@@LIB_VERSION. */
+ if (strncmp(sname, name, name_len) != 0)
+ continue;
+ /* ...but we don't want a search for "foo" to match 'foo2" also, so any
+ * additional characters in sname should be of the form "@@LIB".
+ */
+ if (!is_name_qualified && sname[name_len] != '\0' && sname[name_len] != '@')
+ continue;
+
+ if (ret >= 0) {
+ /* handle multiple matches */
+ if (last_bind != STB_WEAK && curr_bind != STB_WEAK) {
+ /* Only accept one non-weak bind. */
+ pr_warn("elf: ambiguous match for '%s', '%s' in '%s'\n",
+ sname, name, binary_path);
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ } else if (curr_bind == STB_WEAK) {
+ /* already have a non-weak bind, and
+ * this is a weak bind, so ignore.
+ */
+ continue;
+ }
+ }
+
+ /* Transform symbol's virtual address (absolute for
+ * binaries and relative for shared libs) into file
+ * offset, which is what kernel is expecting for
+ * uprobe/uretprobe attachment.
+ * See Documentation/trace/uprobetracer.rst for more
+ * details.
+ * This is done by looking up symbol's containing
+ * section's header and using it's virtual address
+ * (sh_addr) and corresponding file offset (sh_offset)
+ * to transform sym.st_value (virtual address) into
+ * desired final file offset.
+ */
+ sym_scn = elf_getscn(elf, sym.st_shndx);
+ if (!sym_scn)
+ continue;
+ if (!gelf_getshdr(sym_scn, &sym_sh))
+ continue;
+
+ ret = sym.st_value - sym_sh.sh_addr + sym_sh.sh_offset;
+ last_bind = curr_bind;
+ }
+ if (ret > 0)
+ break;
+ }
+
+ if (ret > 0) {
+ pr_debug("elf: symbol address match for '%s' in '%s': 0x%lx\n", name, binary_path,
+ ret);
+ } else {
+ if (ret == 0) {
+ pr_warn("elf: '%s' is 0 in symtab for '%s': %s\n", name, binary_path,
+ is_shared_lib ? "should not be 0 in a shared library" :
+ "try using shared library path instead");
+ ret = -ENOENT;
+ } else {
+ pr_warn("elf: failed to find symbol '%s' in '%s'\n", name, binary_path);
+ }
+ }
+out:
+ return ret;
+}
+
+/* Find offset of function name in ELF object specified by path. "name" matches
+ * symbol name or name@@LIB for library functions.
+ */
+static long elf_find_func_offset_from_file(const char *binary_path, const char *name)
{
char errmsg[STRERR_BUFSIZE];
+ long ret = -ENOENT;
+ Elf *elf;
+ int fd;
+
+ fd = open(binary_path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ ret = -errno;
+ pr_warn("failed to open %s: %s\n", binary_path,
+ libbpf_strerror_r(ret, errmsg, sizeof(errmsg)));
+ return ret;
+ }
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf) {
+ pr_warn("elf: could not read elf from %s: %s\n", binary_path, elf_errmsg(-1));
+ close(fd);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ ret = elf_find_func_offset(elf, binary_path, name);
+ elf_end(elf);
+ close(fd);
+ return ret;
+}
+
+/* Find offset of function name in archive specified by path. Currently
+ * supported are .zip files that do not compress their contents, as used on
+ * Android in the form of APKs, for example. "file_name" is the name of the ELF
+ * file inside the archive. "func_name" matches symbol name or name@@LIB for
+ * library functions.
+ *
+ * An overview of the APK format specifically provided here:
+ * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
+ */
+static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
+ const char *func_name)
+{
+ struct zip_archive *archive;
+ struct zip_entry entry;
+ long ret;
+ Elf *elf;
+
+ archive = zip_archive_open(archive_path);
+ if (IS_ERR(archive)) {
+ ret = PTR_ERR(archive);
+ pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
+ return ret;
+ }
+
+ ret = zip_archive_find_entry(archive, file_name, &entry);
+ if (ret) {
+ pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
+ archive_path, ret);
+ goto out;
+ }
+ pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
+ (unsigned long)entry.data_offset);
+
+ if (entry.compression) {
+ pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
+ archive_path);
+ ret = -LIBBPF_ERRNO__FORMAT;
+ goto out;
+ }
+
+ elf = elf_memory((void *)entry.data, entry.data_length);
+ if (!elf) {
+ pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
+ elf_errmsg(-1));
+ ret = -LIBBPF_ERRNO__LIBELF;
+ goto out;
+ }
+
+ ret = elf_find_func_offset(elf, file_name, func_name);
+ if (ret > 0) {
+ pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
+ func_name, file_name, archive_path, entry.data_offset, ret,
+ ret + entry.data_offset);
+ ret += entry.data_offset;
+ }
+ elf_end(elf);
+
+out:
+ zip_archive_close(archive);
+ return ret;
+}
+
+static const char *arch_specific_lib_paths(void)
+{
+ /*
+ * Based on https://packages.debian.org/sid/libc6.
+ *
+ * Assume that the traced program is built for the same architecture
+ * as libbpf, which should cover the vast majority of cases.
+ */
+#if defined(__x86_64__)
+ return "/lib/x86_64-linux-gnu";
+#elif defined(__i386__)
+ return "/lib/i386-linux-gnu";
+#elif defined(__s390x__)
+ return "/lib/s390x-linux-gnu";
+#elif defined(__s390__)
+ return "/lib/s390-linux-gnu";
+#elif defined(__arm__) && defined(__SOFTFP__)
+ return "/lib/arm-linux-gnueabi";
+#elif defined(__arm__) && !defined(__SOFTFP__)
+ return "/lib/arm-linux-gnueabihf";
+#elif defined(__aarch64__)
+ return "/lib/aarch64-linux-gnu";
+#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
+ return "/lib/mips64el-linux-gnuabi64";
+#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
+ return "/lib/mipsel-linux-gnu";
+#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ return "/lib/powerpc64le-linux-gnu";
+#elif defined(__sparc__) && defined(__arch64__)
+ return "/lib/sparc64-linux-gnu";
+#elif defined(__riscv) && __riscv_xlen == 64
+ return "/lib/riscv64-linux-gnu";
+#else
+ return NULL;
+#endif
+}
+
+/* Get full path to program/shared library. */
+static int resolve_full_path(const char *file, char *result, size_t result_sz)
+{
+ const char *search_paths[3] = {};
+ int i, perm;
+
+ if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
+ search_paths[0] = getenv("LD_LIBRARY_PATH");
+ search_paths[1] = "/usr/lib64:/usr/lib";
+ search_paths[2] = arch_specific_lib_paths();
+ perm = R_OK;
+ } else {
+ search_paths[0] = getenv("PATH");
+ search_paths[1] = "/usr/bin:/usr/sbin";
+ perm = R_OK | X_OK;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
+ const char *s;
+
+ if (!search_paths[i])
+ continue;
+ for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
+ char *next_path;
+ int seg_len;
+
+ if (s[0] == ':')
+ s++;
+ next_path = strchr(s, ':');
+ seg_len = next_path ? next_path - s : strlen(s);
+ if (!seg_len)
+ continue;
+ snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
+ /* ensure it has required permissions */
+ if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
+ continue;
+ pr_debug("resolved '%s' to '%s'\n", file, result);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+LIBBPF_API struct bpf_link *
+bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
+ const char *binary_path, size_t func_offset,
+ const struct bpf_uprobe_opts *opts)
+{
+ const char *archive_path = NULL, *archive_sep = NULL;
+ char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
+ enum probe_attach_mode attach_mode;
+ char full_path[PATH_MAX];
struct bpf_link *link;
+ size_t ref_ctr_off;
int pfd, err;
+ bool retprobe, legacy;
+ const char *func_name;
+
+ if (!OPTS_VALID(opts, bpf_uprobe_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
+ retprobe = OPTS_GET(opts, retprobe, false);
+ ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
+ pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
+
+ if (!binary_path)
+ return libbpf_err_ptr(-EINVAL);
+
+ /* Check if "binary_path" refers to an archive. */
+ archive_sep = strstr(binary_path, "!/");
+ if (archive_sep) {
+ full_path[0] = '\0';
+ libbpf_strlcpy(full_path, binary_path,
+ min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
+ archive_path = full_path;
+ binary_path = archive_sep + 2;
+ } else if (!strchr(binary_path, '/')) {
+ err = resolve_full_path(binary_path, full_path, sizeof(full_path));
+ if (err) {
+ pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
+ prog->name, binary_path, err);
+ return libbpf_err_ptr(err);
+ }
+ binary_path = full_path;
+ }
+ func_name = OPTS_GET(opts, func_name, NULL);
+ if (func_name) {
+ long sym_off;
+
+ if (archive_path) {
+ sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
+ func_name);
+ binary_path = archive_path;
+ } else {
+ sym_off = elf_find_func_offset_from_file(binary_path, func_name);
+ }
+ if (sym_off < 0)
+ return libbpf_err_ptr(sym_off);
+ func_offset += sym_off;
+ }
+
+ legacy = determine_uprobe_perf_type() < 0;
+ switch (attach_mode) {
+ case PROBE_ATTACH_MODE_LEGACY:
+ legacy = true;
+ pe_opts.force_ioctl_attach = true;
+ break;
+ case PROBE_ATTACH_MODE_PERF:
+ if (legacy)
+ return libbpf_err_ptr(-ENOTSUP);
+ pe_opts.force_ioctl_attach = true;
+ break;
+ case PROBE_ATTACH_MODE_LINK:
+ if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
+ return libbpf_err_ptr(-ENOTSUP);
+ break;
+ case PROBE_ATTACH_MODE_DEFAULT:
+ break;
+ default:
+ return libbpf_err_ptr(-EINVAL);
+ }
- pfd = perf_event_open_probe(true /* uprobe */, retprobe,
- binary_path, func_offset, pid);
+ if (!legacy) {
+ pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
+ func_offset, pid, ref_ctr_off);
+ } else {
+ char probe_name[PATH_MAX + 64];
+
+ if (ref_ctr_off)
+ return libbpf_err_ptr(-EINVAL);
+
+ gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
+ binary_path, func_offset);
+
+ legacy_probe = strdup(probe_name);
+ if (!legacy_probe)
+ return libbpf_err_ptr(-ENOMEM);
+
+ pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
+ binary_path, func_offset, pid);
+ }
if (pfd < 0) {
- pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
- bpf_program__title(prog, false),
- retprobe ? "uretprobe" : "uprobe",
+ err = -errno;
+ pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
+ prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ goto err_out;
}
- link = bpf_program__attach_perf_event(prog, pfd);
- if (IS_ERR(link)) {
+
+ link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
+ err = libbpf_get_error(link);
+ if (err) {
close(pfd);
- err = PTR_ERR(link);
- pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
- bpf_program__title(prog, false),
- retprobe ? "uretprobe" : "uprobe",
+ pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
+ prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return link;
+ goto err_clean_legacy;
+ }
+ if (legacy) {
+ struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
+
+ perf_link->legacy_probe_name = legacy_probe;
+ perf_link->legacy_is_kprobe = false;
+ perf_link->legacy_is_retprobe = retprobe;
+ }
+ return link;
+
+err_clean_legacy:
+ if (legacy)
+ remove_uprobe_event_legacy(legacy_probe, retprobe);
+err_out:
+ free(legacy_probe);
+ return libbpf_err_ptr(err);
+}
+
+/* Format of u[ret]probe section definition supporting auto-attach:
+ * u[ret]probe/binary:function[+offset]
+ *
+ * binary can be an absolute/relative path or a filename; the latter is resolved to a
+ * full binary path via bpf_program__attach_uprobe_opts.
+ *
+ * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
+ * specified (and auto-attach is not possible) or the above format is specified for
+ * auto-attach.
+ */
+static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
+ char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
+ int n, ret = -EINVAL;
+ long offset = 0;
+
+ *link = NULL;
+
+ n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[a-zA-Z0-9_.]+%li",
+ &probe_type, &binary_path, &func_name, &offset);
+ switch (n) {
+ case 1:
+ /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
+ ret = 0;
+ break;
+ case 2:
+ pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
+ prog->name, prog->sec_name);
+ break;
+ case 3:
+ case 4:
+ opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
+ strcmp(probe_type, "uretprobe.s") == 0;
+ if (opts.retprobe && offset != 0) {
+ pr_warn("prog '%s': uretprobes do not support offset specification\n",
+ prog->name);
+ break;
+ }
+ opts.func_name = func_name;
+ *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
+ ret = libbpf_get_error(*link);
+ break;
+ default:
+ pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
+ prog->sec_name);
+ break;
+ }
+ free(probe_type);
+ free(binary_path);
+ free(func_name);
+
+ return ret;
+}
+
+struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
+ bool retprobe, pid_t pid,
+ const char *binary_path,
+ size_t func_offset)
+{
+ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
+
+ return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
+}
+
+struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
+ pid_t pid, const char *binary_path,
+ const char *usdt_provider, const char *usdt_name,
+ const struct bpf_usdt_opts *opts)
+{
+ char resolved_path[512];
+ struct bpf_object *obj = prog->obj;
+ struct bpf_link *link;
+ __u64 usdt_cookie;
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_uprobe_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ if (bpf_program__fd(prog) < 0) {
+ pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
+ if (!binary_path)
+ return libbpf_err_ptr(-EINVAL);
+
+ if (!strchr(binary_path, '/')) {
+ err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
+ if (err) {
+ pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
+ prog->name, binary_path, err);
+ return libbpf_err_ptr(err);
+ }
+ binary_path = resolved_path;
}
+
+ /* USDT manager is instantiated lazily on first USDT attach. It will
+ * be destroyed together with BPF object in bpf_object__close().
+ */
+ if (IS_ERR(obj->usdt_man))
+ return libbpf_ptr(obj->usdt_man);
+ if (!obj->usdt_man) {
+ obj->usdt_man = usdt_manager_new(obj);
+ if (IS_ERR(obj->usdt_man))
+ return libbpf_ptr(obj->usdt_man);
+ }
+
+ usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
+ link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
+ usdt_provider, usdt_name, usdt_cookie);
+ err = libbpf_get_error(link);
+ if (err)
+ return libbpf_err_ptr(err);
return link;
}
+static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ char *path = NULL, *provider = NULL, *name = NULL;
+ const char *sec_name;
+ int n, err;
+
+ sec_name = bpf_program__section_name(prog);
+ if (strcmp(sec_name, "usdt") == 0) {
+ /* no auto-attach for just SEC("usdt") */
+ *link = NULL;
+ return 0;
+ }
+
+ n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
+ if (n != 3) {
+ pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
+ sec_name);
+ err = -EINVAL;
+ } else {
+ *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
+ provider, name, NULL);
+ err = libbpf_get_error(*link);
+ }
+ free(path);
+ free(provider);
+ free(name);
+ return err;
+}
+
static int determine_tracepoint_id(const char *tp_category,
const char *tp_name)
{
char file[PATH_MAX];
int ret;
- ret = snprintf(file, sizeof(file),
- "/sys/kernel/debug/tracing/events/%s/%s/id",
- tp_category, tp_name);
+ ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
+ tracefs_path(), tp_category, tp_name);
if (ret < 0)
return -errno;
if (ret >= sizeof(file)) {
@@ -7362,7 +11322,8 @@ static int determine_tracepoint_id(const char *tp_category,
static int perf_event_open_tracepoint(const char *tp_category,
const char *tp_name)
{
- struct perf_event_attr attr = {};
+ const size_t attr_sz = sizeof(struct perf_event_attr);
+ struct perf_event_attr attr;
char errmsg[STRERR_BUFSIZE];
int tp_id, pfd, err;
@@ -7374,8 +11335,9 @@ static int perf_event_open_tracepoint(const char *tp_category,
return tp_id;
}
+ memset(&attr, 0, attr_sz);
attr.type = PERF_TYPE_TRACEPOINT;
- attr.size = sizeof(attr);
+ attr.size = attr_sz;
attr.config = tp_id;
pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
@@ -7390,62 +11352,80 @@ static int perf_event_open_tracepoint(const char *tp_category,
return pfd;
}
-struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
- const char *tp_category,
- const char *tp_name)
+struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
+ const char *tp_category,
+ const char *tp_name,
+ const struct bpf_tracepoint_opts *opts)
{
+ DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
int pfd, err;
+ if (!OPTS_VALID(opts, bpf_tracepoint_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
+
pfd = perf_event_open_tracepoint(tp_category, tp_name);
if (pfd < 0) {
- pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
- bpf_program__title(prog, false),
- tp_category, tp_name,
+ pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
+ prog->name, tp_category, tp_name,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ return libbpf_err_ptr(pfd);
}
- link = bpf_program__attach_perf_event(prog, pfd);
- if (IS_ERR(link)) {
+ link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
+ err = libbpf_get_error(link);
+ if (err) {
close(pfd);
- err = PTR_ERR(link);
- pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
- bpf_program__title(prog, false),
- tp_category, tp_name,
+ pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
+ prog->name, tp_category, tp_name,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
- return link;
+ return libbpf_err_ptr(err);
}
return link;
}
-static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
- struct bpf_program *prog)
+struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
+ const char *tp_category,
+ const char *tp_name)
+{
+ return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
+}
+
+static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
char *sec_name, *tp_cat, *tp_name;
- struct bpf_link *link;
- sec_name = strdup(bpf_program__title(prog, false));
+ *link = NULL;
+
+ /* no auto-attach for SEC("tp") or SEC("tracepoint") */
+ if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
+ return 0;
+
+ sec_name = strdup(prog->sec_name);
if (!sec_name)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- /* extract "tp/<category>/<name>" */
- tp_cat = sec_name + sec->len;
+ /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
+ if (str_has_pfx(prog->sec_name, "tp/"))
+ tp_cat = sec_name + sizeof("tp/") - 1;
+ else
+ tp_cat = sec_name + sizeof("tracepoint/") - 1;
tp_name = strchr(tp_cat, '/');
if (!tp_name) {
- link = ERR_PTR(-EINVAL);
- goto out;
+ free(sec_name);
+ return -EINVAL;
}
*tp_name = '\0';
tp_name++;
- link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
-out:
+ *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
free(sec_name);
- return link;
+ return libbpf_get_error(*link);
}
-struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
+struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
const char *tp_name)
{
char errmsg[STRERR_BUFSIZE];
@@ -7454,95 +11434,139 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
- return ERR_PTR(-EINVAL);
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
+ return libbpf_err_ptr(-EINVAL);
}
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
if (pfd < 0) {
pfd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
- bpf_program__title(prog, false), tp_name,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
+ prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ return libbpf_err_ptr(pfd);
}
link->fd = pfd;
return link;
}
-static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
- struct bpf_program *prog)
+static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
- const char *tp_name = bpf_program__title(prog, false) + sec->len;
+ static const char *const prefixes[] = {
+ "raw_tp",
+ "raw_tracepoint",
+ "raw_tp.w",
+ "raw_tracepoint.w",
+ };
+ size_t i;
+ const char *tp_name = NULL;
+
+ *link = NULL;
- return bpf_program__attach_raw_tracepoint(prog, tp_name);
+ for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
+ size_t pfx_len;
+
+ if (!str_has_pfx(prog->sec_name, prefixes[i]))
+ continue;
+
+ pfx_len = strlen(prefixes[i]);
+ /* no auto-attach case of, e.g., SEC("raw_tp") */
+ if (prog->sec_name[pfx_len] == '\0')
+ return 0;
+
+ if (prog->sec_name[pfx_len] != '/')
+ continue;
+
+ tp_name = prog->sec_name + pfx_len + 1;
+ break;
+ }
+
+ if (!tp_name) {
+ pr_warn("prog '%s': invalid section name '%s'\n",
+ prog->name, prog->sec_name);
+ return -EINVAL;
+ }
+
+ *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
+ return libbpf_get_error(*link);
}
/* Common logic for all BPF program types that attach to a btf_id */
-static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
+static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
+ const struct bpf_trace_opts *opts)
{
+ LIBBPF_OPTS(bpf_link_create_opts, link_opts);
char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
int prog_fd, pfd;
+ if (!OPTS_VALID(opts, bpf_trace_opts))
+ return libbpf_err_ptr(-EINVAL);
+
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
- return ERR_PTR(-EINVAL);
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
+ return libbpf_err_ptr(-EINVAL);
}
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
- pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
+ /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
+ link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
+ pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
if (pfd < 0) {
pfd = -errno;
free(link);
- pr_warn("program '%s': failed to attach: %s\n",
- bpf_program__title(prog, false),
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
- return ERR_PTR(pfd);
+ pr_warn("prog '%s': failed to attach: %s\n",
+ prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ return libbpf_err_ptr(pfd);
}
link->fd = pfd;
- return (struct bpf_link *)link;
+ return link;
}
-struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
+struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
{
- return bpf_program__attach_btf_id(prog);
+ return bpf_program__attach_btf_id(prog, NULL);
}
-struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
+struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
+ const struct bpf_trace_opts *opts)
{
- return bpf_program__attach_btf_id(prog);
+ return bpf_program__attach_btf_id(prog, opts);
}
-static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
- struct bpf_program *prog)
+struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
{
- return bpf_program__attach_trace(prog);
+ return bpf_program__attach_btf_id(prog, NULL);
}
-static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
- struct bpf_program *prog)
+static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
- return bpf_program__attach_lsm(prog);
+ *link = bpf_program__attach_trace(prog);
+ return libbpf_get_error(*link);
}
-struct bpf_link *
-bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
+static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
{
- const struct bpf_sec_def *sec_def;
+ *link = bpf_program__attach_lsm(prog);
+ return libbpf_get_error(*link);
+}
+
+static struct bpf_link *
+bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id,
+ const char *target_name)
+{
+ DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
+ .target_btf_id = btf_id);
enum bpf_attach_type attach_type;
char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
@@ -7550,101 +11574,255 @@ bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
- return ERR_PTR(-EINVAL);
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
+ return libbpf_err_ptr(-EINVAL);
}
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-ENOMEM);
+ return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
- attach_type = bpf_program__get_expected_attach_type(prog);
- if (!attach_type) {
- sec_def = find_sec_def(bpf_program__title(prog, false));
- if (sec_def)
- attach_type = sec_def->attach_type;
- }
- link_fd = bpf_link_create(prog_fd, cgroup_fd, attach_type, NULL);
+ attach_type = bpf_program__expected_attach_type(prog);
+ link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
if (link_fd < 0) {
link_fd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to cgroup: %s\n",
- bpf_program__title(prog, false),
+ pr_warn("prog '%s': failed to attach to %s: %s\n",
+ prog->name, target_name,
libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
- return ERR_PTR(link_fd);
+ return libbpf_err_ptr(link_fd);
}
link->fd = link_fd;
return link;
}
-struct bpf_link *bpf_program__attach(struct bpf_program *prog)
+struct bpf_link *
+bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
{
- const struct bpf_sec_def *sec_def;
+ return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
+}
- sec_def = find_sec_def(bpf_program__title(prog, false));
- if (!sec_def || !sec_def->attach_fn)
- return ERR_PTR(-ESRCH);
+struct bpf_link *
+bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
+{
+ return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
+}
- return sec_def->attach_fn(sec_def, prog);
+struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
+{
+ /* target_fd/target_ifindex use the same field in LINK_CREATE */
+ return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
}
-static int bpf_link__detach_struct_ops(struct bpf_link *link)
+struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
+ int target_fd,
+ const char *attach_func_name)
{
- __u32 zero = 0;
+ int btf_id;
- if (bpf_map_delete_elem(link->fd, &zero))
- return -errno;
+ if (!!target_fd != !!attach_func_name) {
+ pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
- return 0;
+ if (prog->type != BPF_PROG_TYPE_EXT) {
+ pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
+ prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
+ if (target_fd) {
+ btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
+ if (btf_id < 0)
+ return libbpf_err_ptr(btf_id);
+
+ return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
+ } else {
+ /* no target, so use raw_tracepoint_open for compatibility
+ * with old kernels
+ */
+ return bpf_program__attach_trace(prog);
+ }
}
-struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
+struct bpf_link *
+bpf_program__attach_iter(const struct bpf_program *prog,
+ const struct bpf_iter_attach_opts *opts)
{
- struct bpf_struct_ops *st_ops;
+ DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
+ char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
- __u32 i, zero = 0;
+ int prog_fd, link_fd;
+ __u32 target_fd = 0;
+
+ if (!OPTS_VALID(opts, bpf_iter_attach_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
+ link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
+
+ prog_fd = bpf_program__fd(prog);
+ if (prog_fd < 0) {
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
+ return libbpf_err_ptr(-EINVAL);
+ }
+
+ link = calloc(1, sizeof(*link));
+ if (!link)
+ return libbpf_err_ptr(-ENOMEM);
+ link->detach = &bpf_link__detach_fd;
+
+ link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
+ &link_create_opts);
+ if (link_fd < 0) {
+ link_fd = -errno;
+ free(link);
+ pr_warn("prog '%s': failed to attach to iterator: %s\n",
+ prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
+ return libbpf_err_ptr(link_fd);
+ }
+ link->fd = link_fd;
+ return link;
+}
+
+static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
+{
+ *link = bpf_program__attach_iter(prog, NULL);
+ return libbpf_get_error(*link);
+}
+
+struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
+{
+ struct bpf_link *link = NULL;
int err;
+ if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
+ return libbpf_err_ptr(-EOPNOTSUPP);
+
+ err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
+ if (err)
+ return libbpf_err_ptr(err);
+
+ /* When calling bpf_program__attach() explicitly, auto-attach support
+ * is expected to work, so NULL returned link is considered an error.
+ * This is different for skeleton's attach, see comment in
+ * bpf_object__attach_skeleton().
+ */
+ if (!link)
+ return libbpf_err_ptr(-EOPNOTSUPP);
+
+ return link;
+}
+
+struct bpf_link_struct_ops {
+ struct bpf_link link;
+ int map_fd;
+};
+
+static int bpf_link__detach_struct_ops(struct bpf_link *link)
+{
+ struct bpf_link_struct_ops *st_link;
+ __u32 zero = 0;
+
+ st_link = container_of(link, struct bpf_link_struct_ops, link);
+
+ if (st_link->map_fd < 0)
+ /* w/o a real link */
+ return bpf_map_delete_elem(link->fd, &zero);
+
+ return close(link->fd);
+}
+
+struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
+{
+ struct bpf_link_struct_ops *link;
+ __u32 zero = 0;
+ int err, fd;
+
if (!bpf_map__is_struct_ops(map) || map->fd == -1)
- return ERR_PTR(-EINVAL);
+ return libbpf_err_ptr(-EINVAL);
link = calloc(1, sizeof(*link));
if (!link)
- return ERR_PTR(-EINVAL);
-
- st_ops = map->st_ops;
- for (i = 0; i < btf_vlen(st_ops->type); i++) {
- struct bpf_program *prog = st_ops->progs[i];
- void *kern_data;
- int prog_fd;
+ return libbpf_err_ptr(-EINVAL);
+
+ /* kern_vdata should be prepared during the loading phase. */
+ err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
+ /* It can be EBUSY if the map has been used to create or
+ * update a link before. We don't allow updating the value of
+ * a struct_ops once it is set. That ensures that the value
+ * never changed. So, it is safe to skip EBUSY.
+ */
+ if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
+ free(link);
+ return libbpf_err_ptr(err);
+ }
- if (!prog)
- continue;
+ link->link.detach = bpf_link__detach_struct_ops;
- prog_fd = bpf_program__fd(prog);
- kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
- *(unsigned long *)kern_data = prog_fd;
+ if (!(map->def.map_flags & BPF_F_LINK)) {
+ /* w/o a real link */
+ link->link.fd = map->fd;
+ link->map_fd = -1;
+ return &link->link;
}
- err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
- if (err) {
- err = -errno;
+ fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
+ if (fd < 0) {
free(link);
- return ERR_PTR(err);
+ return libbpf_err_ptr(fd);
}
- link->detach = bpf_link__detach_struct_ops;
- link->fd = map->fd;
+ link->link.fd = fd;
+ link->map_fd = map->fd;
- return link;
+ return &link->link;
}
-enum bpf_perf_event_ret
-bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
- void **copy_mem, size_t *copy_size,
- bpf_perf_event_print_t fn, void *private_data)
+/*
+ * Swap the back struct_ops of a link with a new struct_ops map.
+ */
+int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
+{
+ struct bpf_link_struct_ops *st_ops_link;
+ __u32 zero = 0;
+ int err;
+
+ if (!bpf_map__is_struct_ops(map) || map->fd < 0)
+ return -EINVAL;
+
+ st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
+ /* Ensure the type of a link is correct */
+ if (st_ops_link->map_fd < 0)
+ return -EINVAL;
+
+ err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
+ /* It can be EBUSY if the map has been used to create or
+ * update a link before. We don't allow updating the value of
+ * a struct_ops once it is set. That ensures that the value
+ * never changed. So, it is safe to skip EBUSY.
+ */
+ if (err && err != -EBUSY)
+ return err;
+
+ err = bpf_link_update(link->fd, map->fd, NULL);
+ if (err < 0)
+ return err;
+
+ st_ops_link->map_fd = map->fd;
+
+ return 0;
+}
+
+typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
+ void *private_data);
+
+static enum bpf_perf_event_ret
+perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
+ void **copy_mem, size_t *copy_size,
+ bpf_perf_event_print_t fn, void *private_data)
{
struct perf_event_mmap_page *header = mmap_mem;
__u64 data_head = ring_buffer_read_head(header);
@@ -7686,7 +11864,7 @@ bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
}
ring_buffer_write_tail(header, data_tail);
- return ret;
+ return libbpf_err(ret);
}
struct perf_buffer;
@@ -7749,12 +11927,15 @@ void perf_buffer__free(struct perf_buffer *pb)
{
int i;
- if (!pb)
+ if (IS_ERR_OR_NULL(pb))
return;
if (pb->cpu_bufs) {
- for (i = 0; i < pb->cpu_cnt && pb->cpu_bufs[i]; i++) {
+ for (i = 0; i < pb->cpu_cnt; i++) {
struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
+ if (!cpu_buf)
+ continue;
+
bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
perf_buffer__free_cpu_buf(pb, cpu_buf);
}
@@ -7820,71 +12001,101 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params *p);
struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
+ perf_buffer_sample_fn sample_cb,
+ perf_buffer_lost_fn lost_cb,
+ void *ctx,
const struct perf_buffer_opts *opts)
{
+ const size_t attr_sz = sizeof(struct perf_event_attr);
struct perf_buffer_params p = {};
- struct perf_event_attr attr = { 0, };
+ struct perf_event_attr attr;
+ __u32 sample_period;
+
+ if (!OPTS_VALID(opts, perf_buffer_opts))
+ return libbpf_err_ptr(-EINVAL);
- attr.config = PERF_COUNT_SW_BPF_OUTPUT,
+ sample_period = OPTS_GET(opts, sample_period, 1);
+ if (!sample_period)
+ sample_period = 1;
+
+ memset(&attr, 0, attr_sz);
+ attr.size = attr_sz;
+ attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
attr.sample_type = PERF_SAMPLE_RAW;
- attr.sample_period = 1;
- attr.wakeup_events = 1;
+ attr.sample_period = sample_period;
+ attr.wakeup_events = sample_period;
p.attr = &attr;
- p.sample_cb = opts ? opts->sample_cb : NULL;
- p.lost_cb = opts ? opts->lost_cb : NULL;
- p.ctx = opts ? opts->ctx : NULL;
+ p.sample_cb = sample_cb;
+ p.lost_cb = lost_cb;
+ p.ctx = ctx;
- return __perf_buffer__new(map_fd, page_cnt, &p);
+ return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
-struct perf_buffer *
-perf_buffer__new_raw(int map_fd, size_t page_cnt,
- const struct perf_buffer_raw_opts *opts)
+struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
+ struct perf_event_attr *attr,
+ perf_buffer_event_fn event_cb, void *ctx,
+ const struct perf_buffer_raw_opts *opts)
{
struct perf_buffer_params p = {};
- p.attr = opts->attr;
- p.event_cb = opts->event_cb;
- p.ctx = opts->ctx;
- p.cpu_cnt = opts->cpu_cnt;
- p.cpus = opts->cpus;
- p.map_keys = opts->map_keys;
+ if (!attr)
+ return libbpf_err_ptr(-EINVAL);
+
+ if (!OPTS_VALID(opts, perf_buffer_raw_opts))
+ return libbpf_err_ptr(-EINVAL);
+
+ p.attr = attr;
+ p.event_cb = event_cb;
+ p.ctx = ctx;
+ p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
+ p.cpus = OPTS_GET(opts, cpus, NULL);
+ p.map_keys = OPTS_GET(opts, map_keys, NULL);
- return __perf_buffer__new(map_fd, page_cnt, &p);
+ return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params *p)
{
const char *online_cpus_file = "/sys/devices/system/cpu/online";
- struct bpf_map_info map = {};
+ struct bpf_map_info map;
char msg[STRERR_BUFSIZE];
struct perf_buffer *pb;
bool *online = NULL;
__u32 map_info_len;
int err, i, j, n;
- if (page_cnt & (page_cnt - 1)) {
+ if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
pr_warn("page count should be power of two, but is %zu\n",
page_cnt);
return ERR_PTR(-EINVAL);
}
+ /* best-effort sanity checks */
+ memset(&map, 0, sizeof(map));
map_info_len = sizeof(map);
- err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
+ err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
if (err) {
err = -errno;
- pr_warn("failed to get map info for map FD %d: %s\n",
- map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
- return ERR_PTR(err);
- }
-
- if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
- pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
- map.name);
- return ERR_PTR(-EINVAL);
+ /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
+ * -EBADFD, -EFAULT, or -E2BIG on real error
+ */
+ if (err != -EINVAL) {
+ pr_warn("failed to get map info for map FD %d: %s\n",
+ map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
+ return ERR_PTR(err);
+ }
+ pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
+ map_fd);
+ } else {
+ if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
+ pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
+ map.name);
+ return ERR_PTR(-EINVAL);
+ }
}
pb = calloc(1, sizeof(*pb));
@@ -7916,7 +12127,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
err = pb->cpu_cnt;
goto error;
}
- if (map.max_entries < pb->cpu_cnt)
+ if (map.max_entries && map.max_entries < pb->cpu_cnt)
pb->cpu_cnt = map.max_entries;
}
@@ -7997,7 +12208,7 @@ error:
struct perf_sample_raw {
struct perf_event_header header;
uint32_t size;
- char data[0];
+ char data[];
};
struct perf_sample_lost {
@@ -8045,301 +12256,166 @@ static int perf_buffer__process_records(struct perf_buffer *pb,
{
enum bpf_perf_event_ret ret;
- ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
- pb->page_size, &cpu_buf->buf,
- &cpu_buf->buf_size,
- perf_buffer__process_record, cpu_buf);
+ ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
+ pb->page_size, &cpu_buf->buf,
+ &cpu_buf->buf_size,
+ perf_buffer__process_record, cpu_buf);
if (ret != LIBBPF_PERF_EVENT_CONT)
return ret;
return 0;
}
+int perf_buffer__epoll_fd(const struct perf_buffer *pb)
+{
+ return pb->epoll_fd;
+}
+
int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
{
int i, cnt, err;
cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
+ if (cnt < 0)
+ return -errno;
+
for (i = 0; i < cnt; i++) {
struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
err = perf_buffer__process_records(pb, cpu_buf);
if (err) {
pr_warn("error while processing records: %d\n", err);
- return err;
+ return libbpf_err(err);
}
}
- return cnt < 0 ? -errno : cnt;
+ return cnt;
}
-struct bpf_prog_info_array_desc {
- int array_offset; /* e.g. offset of jited_prog_insns */
- int count_offset; /* e.g. offset of jited_prog_len */
- int size_offset; /* > 0: offset of rec size,
- * < 0: fix size of -size_offset
- */
-};
-
-static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
- [BPF_PROG_INFO_JITED_INSNS] = {
- offsetof(struct bpf_prog_info, jited_prog_insns),
- offsetof(struct bpf_prog_info, jited_prog_len),
- -1,
- },
- [BPF_PROG_INFO_XLATED_INSNS] = {
- offsetof(struct bpf_prog_info, xlated_prog_insns),
- offsetof(struct bpf_prog_info, xlated_prog_len),
- -1,
- },
- [BPF_PROG_INFO_MAP_IDS] = {
- offsetof(struct bpf_prog_info, map_ids),
- offsetof(struct bpf_prog_info, nr_map_ids),
- -(int)sizeof(__u32),
- },
- [BPF_PROG_INFO_JITED_KSYMS] = {
- offsetof(struct bpf_prog_info, jited_ksyms),
- offsetof(struct bpf_prog_info, nr_jited_ksyms),
- -(int)sizeof(__u64),
- },
- [BPF_PROG_INFO_JITED_FUNC_LENS] = {
- offsetof(struct bpf_prog_info, jited_func_lens),
- offsetof(struct bpf_prog_info, nr_jited_func_lens),
- -(int)sizeof(__u32),
- },
- [BPF_PROG_INFO_FUNC_INFO] = {
- offsetof(struct bpf_prog_info, func_info),
- offsetof(struct bpf_prog_info, nr_func_info),
- offsetof(struct bpf_prog_info, func_info_rec_size),
- },
- [BPF_PROG_INFO_LINE_INFO] = {
- offsetof(struct bpf_prog_info, line_info),
- offsetof(struct bpf_prog_info, nr_line_info),
- offsetof(struct bpf_prog_info, line_info_rec_size),
- },
- [BPF_PROG_INFO_JITED_LINE_INFO] = {
- offsetof(struct bpf_prog_info, jited_line_info),
- offsetof(struct bpf_prog_info, nr_jited_line_info),
- offsetof(struct bpf_prog_info, jited_line_info_rec_size),
- },
- [BPF_PROG_INFO_PROG_TAGS] = {
- offsetof(struct bpf_prog_info, prog_tags),
- offsetof(struct bpf_prog_info, nr_prog_tags),
- -(int)sizeof(__u8) * BPF_TAG_SIZE,
- },
-
-};
-
-static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
- int offset)
-{
- __u32 *array = (__u32 *)info;
-
- if (offset >= 0)
- return array[offset / sizeof(__u32)];
- return -(int)offset;
-}
-
-static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
- int offset)
+/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
+ * manager.
+ */
+size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
{
- __u64 *array = (__u64 *)info;
-
- if (offset >= 0)
- return array[offset / sizeof(__u64)];
- return -(int)offset;
+ return pb->cpu_cnt;
}
-static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
- __u32 val)
+/*
+ * Return perf_event FD of a ring buffer in *buf_idx* slot of
+ * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
+ * select()/poll()/epoll() Linux syscalls.
+ */
+int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
{
- __u32 *array = (__u32 *)info;
+ struct perf_cpu_buf *cpu_buf;
- if (offset >= 0)
- array[offset / sizeof(__u32)] = val;
-}
+ if (buf_idx >= pb->cpu_cnt)
+ return libbpf_err(-EINVAL);
-static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
- __u64 val)
-{
- __u64 *array = (__u64 *)info;
+ cpu_buf = pb->cpu_bufs[buf_idx];
+ if (!cpu_buf)
+ return libbpf_err(-ENOENT);
- if (offset >= 0)
- array[offset / sizeof(__u64)] = val;
+ return cpu_buf->fd;
}
-struct bpf_prog_info_linear *
-bpf_program__get_prog_info_linear(int fd, __u64 arrays)
+int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
{
- struct bpf_prog_info_linear *info_linear;
- struct bpf_prog_info info = {};
- __u32 info_len = sizeof(info);
- __u32 data_len = 0;
- int i, err;
- void *ptr;
-
- if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
- return ERR_PTR(-EINVAL);
-
- /* step 1: get array dimensions */
- err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
- if (err) {
- pr_debug("can't get prog info: %s", strerror(errno));
- return ERR_PTR(-EFAULT);
- }
-
- /* step 2: calculate total size of all arrays */
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- bool include_array = (arrays & (1UL << i)) > 0;
- struct bpf_prog_info_array_desc *desc;
- __u32 count, size;
-
- desc = bpf_prog_info_array_desc + i;
-
- /* kernel is too old to support this field */
- if (info_len < desc->array_offset + sizeof(__u32) ||
- info_len < desc->count_offset + sizeof(__u32) ||
- (desc->size_offset > 0 && info_len < desc->size_offset))
- include_array = false;
-
- if (!include_array) {
- arrays &= ~(1UL << i); /* clear the bit */
- continue;
- }
-
- count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
- size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
-
- data_len += count * size;
- }
-
- /* step 3: allocate continuous memory */
- data_len = roundup(data_len, sizeof(__u64));
- info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
- if (!info_linear)
- return ERR_PTR(-ENOMEM);
-
- /* step 4: fill data to info_linear->info */
- info_linear->arrays = arrays;
- memset(&info_linear->info, 0, sizeof(info));
- ptr = info_linear->data;
-
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- struct bpf_prog_info_array_desc *desc;
- __u32 count, size;
-
- if ((arrays & (1UL << i)) == 0)
- continue;
-
- desc = bpf_prog_info_array_desc + i;
- count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
- size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
- bpf_prog_info_set_offset_u32(&info_linear->info,
- desc->count_offset, count);
- bpf_prog_info_set_offset_u32(&info_linear->info,
- desc->size_offset, size);
- bpf_prog_info_set_offset_u64(&info_linear->info,
- desc->array_offset,
- ptr_to_u64(ptr));
- ptr += count * size;
- }
-
- /* step 5: call syscall again to get required arrays */
- err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
- if (err) {
- pr_debug("can't get prog info: %s", strerror(errno));
- free(info_linear);
- return ERR_PTR(-EFAULT);
- }
-
- /* step 6: verify the data */
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- struct bpf_prog_info_array_desc *desc;
- __u32 v1, v2;
-
- if ((arrays & (1UL << i)) == 0)
- continue;
-
- desc = bpf_prog_info_array_desc + i;
- v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
- v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
- desc->count_offset);
- if (v1 != v2)
- pr_warn("%s: mismatch in element count\n", __func__);
+ struct perf_cpu_buf *cpu_buf;
- v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
- v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
- desc->size_offset);
- if (v1 != v2)
- pr_warn("%s: mismatch in rec size\n", __func__);
- }
+ if (buf_idx >= pb->cpu_cnt)
+ return libbpf_err(-EINVAL);
- /* step 7: update info_len and data_len */
- info_linear->info_len = sizeof(struct bpf_prog_info);
- info_linear->data_len = data_len;
+ cpu_buf = pb->cpu_bufs[buf_idx];
+ if (!cpu_buf)
+ return libbpf_err(-ENOENT);
- return info_linear;
+ *buf = cpu_buf->base;
+ *buf_size = pb->mmap_size;
+ return 0;
}
-void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
+/*
+ * Consume data from perf ring buffer corresponding to slot *buf_idx* in
+ * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
+ * consume, do nothing and return success.
+ * Returns:
+ * - 0 on success;
+ * - <0 on failure.
+ */
+int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
{
- int i;
+ struct perf_cpu_buf *cpu_buf;
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- struct bpf_prog_info_array_desc *desc;
- __u64 addr, offs;
+ if (buf_idx >= pb->cpu_cnt)
+ return libbpf_err(-EINVAL);
- if ((info_linear->arrays & (1UL << i)) == 0)
- continue;
+ cpu_buf = pb->cpu_bufs[buf_idx];
+ if (!cpu_buf)
+ return libbpf_err(-ENOENT);
- desc = bpf_prog_info_array_desc + i;
- addr = bpf_prog_info_read_offset_u64(&info_linear->info,
- desc->array_offset);
- offs = addr - ptr_to_u64(info_linear->data);
- bpf_prog_info_set_offset_u64(&info_linear->info,
- desc->array_offset, offs);
- }
+ return perf_buffer__process_records(pb, cpu_buf);
}
-void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
+int perf_buffer__consume(struct perf_buffer *pb)
{
- int i;
+ int i, err;
- for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
- struct bpf_prog_info_array_desc *desc;
- __u64 addr, offs;
+ for (i = 0; i < pb->cpu_cnt; i++) {
+ struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
- if ((info_linear->arrays & (1UL << i)) == 0)
+ if (!cpu_buf)
continue;
- desc = bpf_prog_info_array_desc + i;
- offs = bpf_prog_info_read_offset_u64(&info_linear->info,
- desc->array_offset);
- addr = offs + ptr_to_u64(info_linear->data);
- bpf_prog_info_set_offset_u64(&info_linear->info,
- desc->array_offset, addr);
+ err = perf_buffer__process_records(pb, cpu_buf);
+ if (err) {
+ pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
+ return libbpf_err(err);
+ }
}
+ return 0;
}
int bpf_program__set_attach_target(struct bpf_program *prog,
int attach_prog_fd,
const char *attach_func_name)
{
- int btf_id;
+ int btf_obj_fd = 0, btf_id = 0, err;
- if (!prog || attach_prog_fd < 0 || !attach_func_name)
- return -EINVAL;
+ if (!prog || attach_prog_fd < 0)
+ return libbpf_err(-EINVAL);
- if (attach_prog_fd)
+ if (prog->obj->loaded)
+ return libbpf_err(-EINVAL);
+
+ if (attach_prog_fd && !attach_func_name) {
+ /* remember attach_prog_fd and let bpf_program__load() find
+ * BTF ID during the program load
+ */
+ prog->attach_prog_fd = attach_prog_fd;
+ return 0;
+ }
+
+ if (attach_prog_fd) {
btf_id = libbpf_find_prog_btf_id(attach_func_name,
attach_prog_fd);
- else
- btf_id = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
- attach_func_name,
- prog->expected_attach_type);
+ if (btf_id < 0)
+ return libbpf_err(btf_id);
+ } else {
+ if (!attach_func_name)
+ return libbpf_err(-EINVAL);
- if (btf_id < 0)
- return btf_id;
+ /* load btf_vmlinux, if not yet */
+ err = bpf_object__load_vmlinux_btf(prog->obj, true);
+ if (err)
+ return libbpf_err(err);
+ err = find_kernel_btf_id(prog->obj, attach_func_name,
+ prog->expected_attach_type,
+ &btf_obj_fd, &btf_id);
+ if (err)
+ return libbpf_err(err);
+ }
prog->attach_btf_id = btf_id;
+ prog->attach_btf_obj_fd = btf_obj_fd;
prog->attach_prog_fd = attach_prog_fd;
return 0;
}
@@ -8399,7 +12475,7 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
int fd, err = 0, len;
char buf[128];
- fd = open(fcpu, O_RDONLY);
+ fd = open(fcpu, O_RDONLY | O_CLOEXEC);
if (fd < 0) {
err = -errno;
pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
@@ -8434,7 +12510,7 @@ int libbpf_num_possible_cpus(void)
err = parse_cpu_mask_file(fcpu, &mask, &n);
if (err)
- return err;
+ return libbpf_err(err);
tmp_cpus = 0;
for (i = 0; i < n; i++) {
@@ -8447,6 +12523,49 @@ int libbpf_num_possible_cpus(void)
return tmp_cpus;
}
+static int populate_skeleton_maps(const struct bpf_object *obj,
+ struct bpf_map_skeleton *maps,
+ size_t map_cnt)
+{
+ int i;
+
+ for (i = 0; i < map_cnt; i++) {
+ struct bpf_map **map = maps[i].map;
+ const char *name = maps[i].name;
+ void **mmaped = maps[i].mmaped;
+
+ *map = bpf_object__find_map_by_name(obj, name);
+ if (!*map) {
+ pr_warn("failed to find skeleton map '%s'\n", name);
+ return -ESRCH;
+ }
+
+ /* externs shouldn't be pre-setup from user code */
+ if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
+ *mmaped = (*map)->mmaped;
+ }
+ return 0;
+}
+
+static int populate_skeleton_progs(const struct bpf_object *obj,
+ struct bpf_prog_skeleton *progs,
+ size_t prog_cnt)
+{
+ int i;
+
+ for (i = 0; i < prog_cnt; i++) {
+ struct bpf_program **prog = progs[i].prog;
+ const char *name = progs[i].name;
+
+ *prog = bpf_object__find_program_by_name(obj, name);
+ if (!*prog) {
+ pr_warn("failed to find skeleton program '%s'\n", name);
+ return -ESRCH;
+ }
+ }
+ return 0;
+}
+
int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
const struct bpf_object_open_opts *opts)
{
@@ -8454,7 +12573,7 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
.object_name = s->name,
);
struct bpf_object *obj;
- int i;
+ int err;
/* Attempt to preserve opts->object_name, unless overriden by user
* explicitly. Overwriting object name for skeletons is discouraged,
@@ -8469,44 +12588,99 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
}
obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
- if (IS_ERR(obj)) {
- pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
- s->name, PTR_ERR(obj));
- return PTR_ERR(obj);
+ err = libbpf_get_error(obj);
+ if (err) {
+ pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
+ s->name, err);
+ return libbpf_err(err);
}
*s->obj = obj;
+ err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
+ if (err) {
+ pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
+ return libbpf_err(err);
+ }
- for (i = 0; i < s->map_cnt; i++) {
- struct bpf_map **map = s->maps[i].map;
- const char *name = s->maps[i].name;
- void **mmaped = s->maps[i].mmaped;
+ err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
+ if (err) {
+ pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
+ return libbpf_err(err);
+ }
- *map = bpf_object__find_map_by_name(obj, name);
- if (!*map) {
- pr_warn("failed to find skeleton map '%s'\n", name);
- return -ESRCH;
- }
+ return 0;
+}
- /* externs shouldn't be pre-setup from user code */
- if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
- *mmaped = (*map)->mmaped;
+int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
+{
+ int err, len, var_idx, i;
+ const char *var_name;
+ const struct bpf_map *map;
+ struct btf *btf;
+ __u32 map_type_id;
+ const struct btf_type *map_type, *var_type;
+ const struct bpf_var_skeleton *var_skel;
+ struct btf_var_secinfo *var;
+
+ if (!s->obj)
+ return libbpf_err(-EINVAL);
+
+ btf = bpf_object__btf(s->obj);
+ if (!btf) {
+ pr_warn("subskeletons require BTF at runtime (object %s)\n",
+ bpf_object__name(s->obj));
+ return libbpf_err(-errno);
}
- for (i = 0; i < s->prog_cnt; i++) {
- struct bpf_program **prog = s->progs[i].prog;
- const char *name = s->progs[i].name;
+ err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
+ if (err) {
+ pr_warn("failed to populate subskeleton maps: %d\n", err);
+ return libbpf_err(err);
+ }
- *prog = bpf_object__find_program_by_name(obj, name);
- if (!*prog) {
- pr_warn("failed to find skeleton program '%s'\n", name);
- return -ESRCH;
+ err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
+ if (err) {
+ pr_warn("failed to populate subskeleton maps: %d\n", err);
+ return libbpf_err(err);
+ }
+
+ for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
+ var_skel = &s->vars[var_idx];
+ map = *var_skel->map;
+ map_type_id = bpf_map__btf_value_type_id(map);
+ map_type = btf__type_by_id(btf, map_type_id);
+
+ if (!btf_is_datasec(map_type)) {
+ pr_warn("type for map '%1$s' is not a datasec: %2$s",
+ bpf_map__name(map),
+ __btf_kind_str(btf_kind(map_type)));
+ return libbpf_err(-EINVAL);
+ }
+
+ len = btf_vlen(map_type);
+ var = btf_var_secinfos(map_type);
+ for (i = 0; i < len; i++, var++) {
+ var_type = btf__type_by_id(btf, var->type);
+ var_name = btf__name_by_offset(btf, var_type->name_off);
+ if (strcmp(var_name, var_skel->name) == 0) {
+ *var_skel->addr = map->mmaped + var->offset;
+ break;
+ }
}
}
-
return 0;
}
+void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
+{
+ if (!s)
+ return;
+ free(s->maps);
+ free(s->progs);
+ free(s->vars);
+ free(s);
+}
+
int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
{
int i, err;
@@ -8514,7 +12688,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
err = bpf_object__load(*s->obj);
if (err) {
pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
- return err;
+ return libbpf_err(err);
}
for (i = 0; i < s->map_cnt; i++) {
@@ -8553,7 +12727,7 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
*mmaped = NULL;
pr_warn("failed to re-mmap() map '%s': %d\n",
bpf_map__name(map), err);
- return err;
+ return libbpf_err(err);
}
}
@@ -8562,24 +12736,40 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
{
- int i;
+ int i, err;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
- const struct bpf_sec_def *sec_def;
- const char *sec_name = bpf_program__title(prog, false);
- sec_def = find_sec_def(sec_name);
- if (!sec_def || !sec_def->attach_fn)
+ if (!prog->autoload || !prog->autoattach)
continue;
- *link = sec_def->attach_fn(sec_def, prog);
- if (IS_ERR(*link)) {
- pr_warn("failed to auto-attach program '%s': %ld\n",
- bpf_program__name(prog), PTR_ERR(*link));
- return PTR_ERR(*link);
- }
+ /* auto-attaching not supported for this program */
+ if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
+ continue;
+
+ /* if user already set the link manually, don't attempt auto-attach */
+ if (*link)
+ continue;
+
+ err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
+ if (err) {
+ pr_warn("prog '%s': failed to auto-attach: %d\n",
+ bpf_program__name(prog), err);
+ return libbpf_err(err);
+ }
+
+ /* It's possible that for some SEC() definitions auto-attach
+ * is supported in some cases (e.g., if definition completely
+ * specifies target information), but is not in other cases.
+ * SEC("uprobe") is one such case. If user specified target
+ * binary and function name, such BPF program can be
+ * auto-attached. But if not, it shouldn't trigger skeleton's
+ * attach to fail. It should just be skipped.
+ * attach_fn signals such case with returning 0 (no error) and
+ * setting link to NULL.
+ */
}
return 0;
@@ -8592,14 +12782,16 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_link **link = s->progs[i].link;
- if (!IS_ERR_OR_NULL(*link))
- bpf_link__destroy(*link);
+ bpf_link__destroy(*link);
*link = NULL;
}
}
void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
{
+ if (!s)
+ return;
+
if (s->progs)
bpf_object__detach_skeleton(s);
if (s->obj)
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 44df1d3e7287..0b7362397ea3 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -18,11 +18,16 @@
#include <linux/bpf.h>
#include "libbpf_common.h"
+#include "libbpf_legacy.h"
#ifdef __cplusplus
extern "C" {
#endif
+LIBBPF_API __u32 libbpf_major_version(void);
+LIBBPF_API __u32 libbpf_minor_version(void);
+LIBBPF_API const char *libbpf_version_string(void);
+
enum libbpf_errno {
__LIBBPF_ERRNO__START = 4000,
@@ -46,6 +51,42 @@ enum libbpf_errno {
LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
+/**
+ * @brief **libbpf_bpf_attach_type_str()** converts the provided attach type
+ * value into a textual representation.
+ * @param t The attach type.
+ * @return Pointer to a static string identifying the attach type. NULL is
+ * returned for unknown **bpf_attach_type** values.
+ */
+LIBBPF_API const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t);
+
+/**
+ * @brief **libbpf_bpf_link_type_str()** converts the provided link type value
+ * into a textual representation.
+ * @param t The link type.
+ * @return Pointer to a static string identifying the link type. NULL is
+ * returned for unknown **bpf_link_type** values.
+ */
+LIBBPF_API const char *libbpf_bpf_link_type_str(enum bpf_link_type t);
+
+/**
+ * @brief **libbpf_bpf_map_type_str()** converts the provided map type value
+ * into a textual representation.
+ * @param t The map type.
+ * @return Pointer to a static string identifying the map type. NULL is
+ * returned for unknown **bpf_map_type** values.
+ */
+LIBBPF_API const char *libbpf_bpf_map_type_str(enum bpf_map_type t);
+
+/**
+ * @brief **libbpf_bpf_prog_type_str()** converts the provided program type
+ * value into a textual representation.
+ * @param t The program type.
+ * @return Pointer to a static string identifying the program type. NULL is
+ * returned for unknown **bpf_prog_type** values.
+ */
+LIBBPF_API const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t);
+
enum libbpf_print_level {
LIBBPF_WARN,
LIBBPF_INFO,
@@ -55,18 +96,21 @@ enum libbpf_print_level {
typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
const char *, va_list ap);
+/**
+ * @brief **libbpf_set_print()** sets user-provided log callback function to
+ * be used for libbpf warnings and informational messages.
+ * @param fn The log print function. If NULL, libbpf won't print anything.
+ * @return Pointer to old print function.
+ *
+ * This function is thread-safe.
+ */
LIBBPF_API libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn);
/* Hide internal to user */
struct bpf_object;
-struct bpf_object_open_attr {
- const char *file;
- enum bpf_prog_type prog_type;
-};
-
struct bpf_object_open_opts {
- /* size of this struct, for forward/backward compatiblity */
+ /* size of this struct, for forward/backward compatibility */
size_t sz;
/* object name override, if provided:
* - for object open from file, this will override setting object
@@ -77,50 +121,144 @@ struct bpf_object_open_opts {
const char *object_name;
/* parse map definitions non-strictly, allowing extra attributes/data */
bool relaxed_maps;
- /* DEPRECATED: handle CO-RE relocations non-strictly, allowing failures.
- * Value is ignored. Relocations always are processed non-strictly.
- * Non-relocatable instructions are replaced with invalid ones to
- * prevent accidental errors.
- * */
- bool relaxed_core_relocs;
/* maps that set the 'pinning' attribute in their definition will have
* their pin_path attribute set to a file in this directory, and be
* auto-pinned to that path on load; defaults to "/sys/fs/bpf".
*/
const char *pin_root_path;
- __u32 attach_prog_fd;
+
+ __u32 :32; /* stub out now removed attach_prog_fd */
+
/* Additional kernel config content that augments and overrides
* system Kconfig for CONFIG_xxx externs.
*/
const char *kconfig;
-};
-#define bpf_object_open_opts__last_field kconfig
+ /* Path to the custom BTF to be used for BPF CO-RE relocations.
+ * This custom BTF completely replaces the use of vmlinux BTF
+ * for the purpose of CO-RE relocations.
+ * NOTE: any other BPF feature (e.g., fentry/fexit programs,
+ * struct_ops, etc) will need actual kernel BTF at /sys/kernel/btf/vmlinux.
+ */
+ const char *btf_custom_path;
+ /* Pointer to a buffer for storing kernel logs for applicable BPF
+ * commands. Valid kernel_log_size has to be specified as well and are
+ * passed-through to bpf() syscall. Keep in mind that kernel might
+ * fail operation with -ENOSPC error if provided buffer is too small
+ * to contain entire log output.
+ * See the comment below for kernel_log_level for interaction between
+ * log_buf and log_level settings.
+ *
+ * If specified, this log buffer will be passed for:
+ * - each BPF progral load (BPF_PROG_LOAD) attempt, unless overriden
+ * with bpf_program__set_log() on per-program level, to get
+ * BPF verifier log output.
+ * - during BPF object's BTF load into kernel (BPF_BTF_LOAD) to get
+ * BTF sanity checking log.
+ *
+ * Each BPF command (BPF_BTF_LOAD or BPF_PROG_LOAD) will overwrite
+ * previous contents, so if you need more fine-grained control, set
+ * per-program buffer with bpf_program__set_log_buf() to preserve each
+ * individual program's verification log. Keep using kernel_log_buf
+ * for BTF verification log, if necessary.
+ */
+ char *kernel_log_buf;
+ size_t kernel_log_size;
+ /*
+ * Log level can be set independently from log buffer. Log_level=0
+ * means that libbpf will attempt loading BTF or program without any
+ * logging requested, but will retry with either its own or custom log
+ * buffer, if provided, and log_level=1 on any error.
+ * And vice versa, setting log_level>0 will request BTF or prog
+ * loading with verbose log from the first attempt (and as such also
+ * for successfully loaded BTF or program), and the actual log buffer
+ * could be either libbpf's own auto-allocated log buffer, if
+ * kernel_log_buffer is NULL, or user-provided custom kernel_log_buf.
+ * If user didn't provide custom log buffer, libbpf will emit captured
+ * logs through its print callback.
+ */
+ __u32 kernel_log_level;
+ size_t :0;
+};
+#define bpf_object_open_opts__last_field kernel_log_level
+
+/**
+ * @brief **bpf_object__open()** creates a bpf_object by opening
+ * the BPF ELF object file pointed to by the passed path and loading it
+ * into memory.
+ * @param path BPF object file path.
+ * @return pointer to the new bpf_object; or NULL is returned on error,
+ * error code is stored in errno
+ */
LIBBPF_API struct bpf_object *bpf_object__open(const char *path);
+
+/**
+ * @brief **bpf_object__open_file()** creates a bpf_object by opening
+ * the BPF ELF object file pointed to by the passed path and loading it
+ * into memory.
+ * @param path BPF object file path
+ * @param opts options for how to load the bpf object, this parameter is
+ * optional and can be set to NULL
+ * @return pointer to the new bpf_object; or NULL is returned on error,
+ * error code is stored in errno
+ */
LIBBPF_API struct bpf_object *
bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts);
+
+/**
+ * @brief **bpf_object__open_mem()** creates a bpf_object by reading
+ * the BPF objects raw bytes from a memory buffer containing a valid
+ * BPF ELF object file.
+ * @param obj_buf pointer to the buffer containing ELF file bytes
+ * @param obj_buf_sz number of bytes in the buffer
+ * @param opts options for how to load the bpf object
+ * @return pointer to the new bpf_object; or NULL is returned on error,
+ * error code is stored in errno
+ */
LIBBPF_API struct bpf_object *
bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
const struct bpf_object_open_opts *opts);
-/* deprecated bpf_object__open variants */
-LIBBPF_API struct bpf_object *
-bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
- const char *name);
-LIBBPF_API struct bpf_object *
-bpf_object__open_xattr(struct bpf_object_open_attr *attr);
-
-enum libbpf_pin_type {
- LIBBPF_PIN_NONE,
- /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
- LIBBPF_PIN_BY_NAME,
-};
+/**
+ * @brief **bpf_object__load()** loads BPF object into kernel.
+ * @param obj Pointer to a valid BPF object instance returned by
+ * **bpf_object__open*()** APIs
+ * @return 0, on success; negative error code, otherwise, error code is
+ * stored in errno
+ */
+LIBBPF_API int bpf_object__load(struct bpf_object *obj);
-/* pin_maps and unpin_maps can both be called with a NULL path, in which case
- * they will use the pin_path attribute of each map (and ignore all maps that
- * don't have a pin_path set).
+/**
+ * @brief **bpf_object__close()** closes a BPF object and releases all
+ * resources.
+ * @param obj Pointer to a valid BPF object
+ */
+LIBBPF_API void bpf_object__close(struct bpf_object *obj);
+
+/**
+ * @brief **bpf_object__pin_maps()** pins each map contained within
+ * the BPF object at the passed directory.
+ * @param obj Pointer to a valid BPF object
+ * @param path A directory where maps should be pinned.
+ * @return 0, on success; negative error code, otherwise
+ *
+ * If `path` is NULL `bpf_map__pin` (which is being used on each map)
+ * will use the pin_path attribute of each map. In this case, maps that
+ * don't have a pin_path set will be ignored.
*/
LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path);
+
+/**
+ * @brief **bpf_object__unpin_maps()** unpins each map contained within
+ * the BPF object found in the passed directory.
+ * @param obj Pointer to a valid BPF object
+ * @param path A directory where pinned maps should be searched for.
+ * @return 0, on success; negative error code, otherwise
+ *
+ * If `path` is NULL `bpf_map__unpin` (which is being used on each map)
+ * will use the pin_path attribute of each map. In this case, maps that
+ * don't have a pin_path set will be ignored.
+ */
LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj,
const char *path);
LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
@@ -128,45 +266,19 @@ LIBBPF_API int bpf_object__pin_programs(struct bpf_object *obj,
LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
const char *path);
LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
-LIBBPF_API void bpf_object__close(struct bpf_object *object);
-
-struct bpf_object_load_attr {
- struct bpf_object *obj;
- int log_level;
- const char *target_btf_path;
-};
-
-/* Load/unload object into/from kernel */
-LIBBPF_API int bpf_object__load(struct bpf_object *obj);
-LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
-LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
LIBBPF_API const char *bpf_object__name(const struct bpf_object *obj);
LIBBPF_API unsigned int bpf_object__kversion(const struct bpf_object *obj);
+LIBBPF_API int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version);
struct btf;
LIBBPF_API struct btf *bpf_object__btf(const struct bpf_object *obj);
LIBBPF_API int bpf_object__btf_fd(const struct bpf_object *obj);
LIBBPF_API struct bpf_program *
-bpf_object__find_program_by_title(const struct bpf_object *obj,
- const char *title);
-LIBBPF_API struct bpf_program *
bpf_object__find_program_by_name(const struct bpf_object *obj,
const char *name);
-LIBBPF_API struct bpf_object *bpf_object__next(struct bpf_object *prev);
-#define bpf_object__for_each_safe(pos, tmp) \
- for ((pos) = bpf_object__next(NULL), \
- (tmp) = bpf_object__next(pos); \
- (pos) != NULL; \
- (pos) = (tmp), (tmp) = bpf_object__next(tmp))
-
-typedef void (*bpf_object_clear_priv_t)(struct bpf_object *, void *);
-LIBBPF_API int bpf_object__set_priv(struct bpf_object *obj, void *priv,
- bpf_object_clear_priv_t clear_priv);
-LIBBPF_API void *bpf_object__priv(const struct bpf_object *prog);
-
LIBBPF_API int
libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
enum bpf_attach_type *expected_attach_type);
@@ -177,43 +289,106 @@ LIBBPF_API int libbpf_find_vmlinux_btf_id(const char *name,
/* Accessors of bpf_program */
struct bpf_program;
-LIBBPF_API struct bpf_program *bpf_program__next(struct bpf_program *prog,
- const struct bpf_object *obj);
-
-#define bpf_object__for_each_program(pos, obj) \
- for ((pos) = bpf_program__next(NULL, (obj)); \
- (pos) != NULL; \
- (pos) = bpf_program__next((pos), (obj)))
-LIBBPF_API struct bpf_program *bpf_program__prev(struct bpf_program *prog,
- const struct bpf_object *obj);
+LIBBPF_API struct bpf_program *
+bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prog);
-typedef void (*bpf_program_clear_priv_t)(struct bpf_program *, void *);
+#define bpf_object__for_each_program(pos, obj) \
+ for ((pos) = bpf_object__next_program((obj), NULL); \
+ (pos) != NULL; \
+ (pos) = bpf_object__next_program((obj), (pos)))
-LIBBPF_API int bpf_program__set_priv(struct bpf_program *prog, void *priv,
- bpf_program_clear_priv_t clear_priv);
+LIBBPF_API struct bpf_program *
+bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *prog);
-LIBBPF_API void *bpf_program__priv(const struct bpf_program *prog);
LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
__u32 ifindex);
LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
-LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
- bool needs_copy);
+LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
+LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
+LIBBPF_API bool bpf_program__autoattach(const struct bpf_program *prog);
+LIBBPF_API void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach);
+
+struct bpf_insn;
+
+/**
+ * @brief **bpf_program__insns()** gives read-only access to BPF program's
+ * underlying BPF instructions.
+ * @param prog BPF program for which to return instructions
+ * @return a pointer to an array of BPF instructions that belong to the
+ * specified BPF program
+ *
+ * Returned pointer is always valid and not NULL. Number of `struct bpf_insn`
+ * pointed to can be fetched using **bpf_program__insn_cnt()** API.
+ *
+ * Keep in mind, libbpf can modify and append/delete BPF program's
+ * instructions as it processes BPF object file and prepares everything for
+ * uploading into the kernel. So depending on the point in BPF object
+ * lifetime, **bpf_program__insns()** can return different sets of
+ * instructions. As an example, during BPF object load phase BPF program
+ * instructions will be CO-RE-relocated, BPF subprograms instructions will be
+ * appended, ldimm64 instructions will have FDs embedded, etc. So instructions
+ * returned before **bpf_object__load()** and after it might be quite
+ * different.
+ */
+LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog);
+
+/**
+ * @brief **bpf_program__set_insns()** can set BPF program's underlying
+ * BPF instructions.
+ *
+ * WARNING: This is a very advanced libbpf API and users need to know
+ * what they are doing. This should be used from prog_prepare_load_fn
+ * callback only.
+ *
+ * @param prog BPF program for which to return instructions
+ * @param new_insns a pointer to an array of BPF instructions
+ * @param new_insn_cnt number of `struct bpf_insn`'s that form
+ * specified BPF program
+ * @return 0, on success; negative error code, otherwise
+ */
+LIBBPF_API int bpf_program__set_insns(struct bpf_program *prog,
+ struct bpf_insn *new_insns, size_t new_insn_cnt);
-/* returns program size in bytes */
-LIBBPF_API size_t bpf_program__size(const struct bpf_program *prog);
+/**
+ * @brief **bpf_program__insn_cnt()** returns number of `struct bpf_insn`'s
+ * that form specified BPF program.
+ * @param prog BPF program for which to return number of BPF instructions
+ *
+ * See **bpf_program__insns()** documentation for notes on how libbpf can
+ * change instructions and their count during different phases of
+ * **bpf_object** lifetime.
+ */
+LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
-LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
- __u32 kern_version);
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
-LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
- const char *path,
- int instance);
-LIBBPF_API int bpf_program__unpin_instance(struct bpf_program *prog,
- const char *path,
- int instance);
+
+/**
+ * @brief **bpf_program__pin()** pins the BPF program to a file
+ * in the BPF FS specified by a path. This increments the programs
+ * reference count, allowing it to stay loaded after the process
+ * which loaded it has exited.
+ *
+ * @param prog BPF program to pin, must already be loaded
+ * @param path file path in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
LIBBPF_API int bpf_program__pin(struct bpf_program *prog, const char *path);
+
+/**
+ * @brief **bpf_program__unpin()** unpins the BPF program from a file
+ * in the BPFFS specified by a path. This decrements the programs
+ * reference count.
+ *
+ * The file pinning the BPF program can also be unlinked by a different
+ * process in which case this function will return an error.
+ *
+ * @param prog BPF program to unpin
+ * @param path file path to the pin in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
LIBBPF_API int bpf_program__unpin(struct bpf_program *prog, const char *path);
LIBBPF_API void bpf_program__unload(struct bpf_program *prog);
@@ -222,166 +397,412 @@ struct bpf_link;
LIBBPF_API struct bpf_link *bpf_link__open(const char *path);
LIBBPF_API int bpf_link__fd(const struct bpf_link *link);
LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link);
+/**
+ * @brief **bpf_link__pin()** pins the BPF link to a file
+ * in the BPF FS specified by a path. This increments the links
+ * reference count, allowing it to stay loaded after the process
+ * which loaded it has exited.
+ *
+ * @param link BPF link to pin, must already be loaded
+ * @param path file path in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
+
LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
+
+/**
+ * @brief **bpf_link__unpin()** unpins the BPF link from a file
+ * in the BPFFS specified by a path. This decrements the links
+ * reference count.
+ *
+ * The file pinning the BPF link can also be unlinked by a different
+ * process in which case this function will return an error.
+ *
+ * @param prog BPF program to unpin
+ * @param path file path to the pin in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
LIBBPF_API int bpf_link__update_program(struct bpf_link *link,
struct bpf_program *prog);
LIBBPF_API void bpf_link__disconnect(struct bpf_link *link);
+LIBBPF_API int bpf_link__detach(struct bpf_link *link);
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
+/**
+ * @brief **bpf_program__attach()** is a generic function for attaching
+ * a BPF program based on auto-detection of program type, attach type,
+ * and extra paremeters, where applicable.
+ *
+ * @param prog BPF program to attach
+ * @return Reference to the newly created BPF link; or NULL is returned on error,
+ * error code is stored in errno
+ *
+ * This is supported for:
+ * - kprobe/kretprobe (depends on SEC() definition)
+ * - uprobe/uretprobe (depends on SEC() definition)
+ * - tracepoint
+ * - raw tracepoint
+ * - tracing programs (typed raw TP/fentry/fexit/fmod_ret)
+ */
LIBBPF_API struct bpf_link *
-bpf_program__attach(struct bpf_program *prog);
-LIBBPF_API struct bpf_link *
-bpf_program__attach_perf_event(struct bpf_program *prog, int pfd);
-LIBBPF_API struct bpf_link *
-bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe,
- const char *func_name);
-LIBBPF_API struct bpf_link *
-bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe,
- pid_t pid, const char *binary_path,
- size_t func_offset);
-LIBBPF_API struct bpf_link *
-bpf_program__attach_tracepoint(struct bpf_program *prog,
- const char *tp_category,
- const char *tp_name);
+bpf_program__attach(const struct bpf_program *prog);
+
+struct bpf_perf_event_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* custom user-provided value fetchable through bpf_get_attach_cookie() */
+ __u64 bpf_cookie;
+ /* don't use BPF link when attach BPF program */
+ bool force_ioctl_attach;
+ size_t :0;
+};
+#define bpf_perf_event_opts__last_field force_ioctl_attach
+
LIBBPF_API struct bpf_link *
-bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
- const char *tp_name);
+bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd);
+
LIBBPF_API struct bpf_link *
-bpf_program__attach_trace(struct bpf_program *prog);
+bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
+ const struct bpf_perf_event_opts *opts);
+
+/**
+ * enum probe_attach_mode - the mode to attach kprobe/uprobe
+ *
+ * force libbpf to attach kprobe/uprobe in specific mode, -ENOTSUP will
+ * be returned if it is not supported by the kernel.
+ */
+enum probe_attach_mode {
+ /* attach probe in latest supported mode by kernel */
+ PROBE_ATTACH_MODE_DEFAULT = 0,
+ /* attach probe in legacy mode, using debugfs/tracefs */
+ PROBE_ATTACH_MODE_LEGACY,
+ /* create perf event with perf_event_open() syscall */
+ PROBE_ATTACH_MODE_PERF,
+ /* attach probe with BPF link */
+ PROBE_ATTACH_MODE_LINK,
+};
+
+struct bpf_kprobe_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* custom user-provided value fetchable through bpf_get_attach_cookie() */
+ __u64 bpf_cookie;
+ /* function's offset to install kprobe to */
+ size_t offset;
+ /* kprobe is return probe */
+ bool retprobe;
+ /* kprobe attach mode */
+ enum probe_attach_mode attach_mode;
+ size_t :0;
+};
+#define bpf_kprobe_opts__last_field attach_mode
+
LIBBPF_API struct bpf_link *
-bpf_program__attach_lsm(struct bpf_program *prog);
+bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe,
+ const char *func_name);
LIBBPF_API struct bpf_link *
-bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd);
+bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
+ const char *func_name,
+ const struct bpf_kprobe_opts *opts);
-struct bpf_map;
+struct bpf_kprobe_multi_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* array of function symbols to attach */
+ const char **syms;
+ /* array of function addresses to attach */
+ const unsigned long *addrs;
+ /* array of user-provided values fetchable through bpf_get_attach_cookie */
+ const __u64 *cookies;
+ /* number of elements in syms/addrs/cookies arrays */
+ size_t cnt;
+ /* create return kprobes */
+ bool retprobe;
+ size_t :0;
+};
-LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
+#define bpf_kprobe_multi_opts__last_field retprobe
-struct bpf_insn;
+LIBBPF_API struct bpf_link *
+bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
+ const char *pattern,
+ const struct bpf_kprobe_multi_opts *opts);
-/*
- * Libbpf allows callers to adjust BPF programs before being loaded
- * into kernel. One program in an object file can be transformed into
- * multiple variants to be attached to different hooks.
+struct bpf_ksyscall_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* custom user-provided value fetchable through bpf_get_attach_cookie() */
+ __u64 bpf_cookie;
+ /* attach as return probe? */
+ bool retprobe;
+ size_t :0;
+};
+#define bpf_ksyscall_opts__last_field retprobe
+
+/**
+ * @brief **bpf_program__attach_ksyscall()** attaches a BPF program
+ * to kernel syscall handler of a specified syscall. Optionally it's possible
+ * to request to install retprobe that will be triggered at syscall exit. It's
+ * also possible to associate BPF cookie (though options).
*
- * bpf_program_prep_t, bpf_program__set_prep and bpf_program__nth_fd
- * form an API for this purpose.
+ * Libbpf automatically will determine correct full kernel function name,
+ * which depending on system architecture and kernel version/configuration
+ * could be of the form __<arch>_sys_<syscall> or __se_sys_<syscall>, and will
+ * attach specified program using kprobe/kretprobe mechanism.
*
- * - bpf_program_prep_t:
- * Defines a 'preprocessor', which is a caller defined function
- * passed to libbpf through bpf_program__set_prep(), and will be
- * called before program is loaded. The processor should adjust
- * the program one time for each instance according to the instance id
- * passed to it.
+ * **bpf_program__attach_ksyscall()** is an API counterpart of declarative
+ * **SEC("ksyscall/<syscall>")** annotation of BPF programs.
*
- * - bpf_program__set_prep:
- * Attaches a preprocessor to a BPF program. The number of instances
- * that should be created is also passed through this function.
+ * At the moment **SEC("ksyscall")** and **bpf_program__attach_ksyscall()** do
+ * not handle all the calling convention quirks for mmap(), clone() and compat
+ * syscalls. It also only attaches to "native" syscall interfaces. If host
+ * system supports compat syscalls or defines 32-bit syscalls in 64-bit
+ * kernel, such syscall interfaces won't be attached to by libbpf.
*
- * - bpf_program__nth_fd:
- * After the program is loaded, get resulting FD of a given instance
- * of the BPF program.
+ * These limitations may or may not change in the future. Therefore it is
+ * recommended to use SEC("kprobe") for these syscalls or if working with
+ * compat and 32-bit interfaces is required.
*
- * If bpf_program__set_prep() is not used, the program would be loaded
- * without adjustment during bpf_object__load(). The program has only
- * one instance. In this case bpf_program__fd(prog) is equal to
- * bpf_program__nth_fd(prog, 0).
+ * @param prog BPF program to attach
+ * @param syscall_name Symbolic name of the syscall (e.g., "bpf")
+ * @param opts Additional options (see **struct bpf_ksyscall_opts**)
+ * @return Reference to the newly created BPF link; or NULL is returned on
+ * error, error code is stored in errno
*/
+LIBBPF_API struct bpf_link *
+bpf_program__attach_ksyscall(const struct bpf_program *prog,
+ const char *syscall_name,
+ const struct bpf_ksyscall_opts *opts);
-struct bpf_prog_prep_result {
- /*
- * If not NULL, load new instruction array.
- * If set to NULL, don't load this instance.
+struct bpf_uprobe_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* offset of kernel reference counted USDT semaphore, added in
+ * a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
*/
- struct bpf_insn *new_insn_ptr;
- int new_insn_cnt;
+ size_t ref_ctr_offset;
+ /* custom user-provided value fetchable through bpf_get_attach_cookie() */
+ __u64 bpf_cookie;
+ /* uprobe is return probe, invoked at function return time */
+ bool retprobe;
+ /* Function name to attach to. Could be an unqualified ("abc") or library-qualified
+ * "abc@LIBXYZ" name. To specify function entry, func_name should be set while
+ * func_offset argument to bpf_prog__attach_uprobe_opts() should be 0. To trace an
+ * offset within a function, specify func_name and use func_offset argument to specify
+ * offset within the function. Shared library functions must specify the shared library
+ * binary_path.
+ */
+ const char *func_name;
+ /* uprobe attach mode */
+ enum probe_attach_mode attach_mode;
+ size_t :0;
+};
+#define bpf_uprobe_opts__last_field attach_mode
+
+/**
+ * @brief **bpf_program__attach_uprobe()** attaches a BPF program
+ * to the userspace function which is found by binary path and
+ * offset. You can optionally specify a particular proccess to attach
+ * to. You can also optionally attach the program to the function
+ * exit instead of entry.
+ *
+ * @param prog BPF program to attach
+ * @param retprobe Attach to function exit
+ * @param pid Process ID to attach the uprobe to, 0 for self (own process),
+ * -1 for all processes
+ * @param binary_path Path to binary that contains the function symbol
+ * @param func_offset Offset within the binary of the function symbol
+ * @return Reference to the newly created BPF link; or NULL is returned on error,
+ * error code is stored in errno
+ */
+LIBBPF_API struct bpf_link *
+bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe,
+ pid_t pid, const char *binary_path,
+ size_t func_offset);
+
+/**
+ * @brief **bpf_program__attach_uprobe_opts()** is just like
+ * bpf_program__attach_uprobe() except with a options struct
+ * for various configurations.
+ *
+ * @param prog BPF program to attach
+ * @param pid Process ID to attach the uprobe to, 0 for self (own process),
+ * -1 for all processes
+ * @param binary_path Path to binary that contains the function symbol
+ * @param func_offset Offset within the binary of the function symbol
+ * @param opts Options for altering program attachment
+ * @return Reference to the newly created BPF link; or NULL is returned on error,
+ * error code is stored in errno
+ */
+LIBBPF_API struct bpf_link *
+bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
+ const char *binary_path, size_t func_offset,
+ const struct bpf_uprobe_opts *opts);
- /* If not NULL, result FD is written to it. */
- int *pfd;
+struct bpf_usdt_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* custom user-provided value accessible through usdt_cookie() */
+ __u64 usdt_cookie;
+ size_t :0;
};
+#define bpf_usdt_opts__last_field usdt_cookie
-/*
- * Parameters of bpf_program_prep_t:
- * - prog: The bpf_program being loaded.
- * - n: Index of instance being generated.
- * - insns: BPF instructions array.
- * - insns_cnt:Number of instructions in insns.
- * - res: Output parameter, result of transformation.
+/**
+ * @brief **bpf_program__attach_usdt()** is just like
+ * bpf_program__attach_uprobe_opts() except it covers USDT (User-space
+ * Statically Defined Tracepoint) attachment, instead of attaching to
+ * user-space function entry or exit.
*
- * Return value:
- * - Zero: pre-processing success.
- * - Non-zero: pre-processing error, stop loading.
+ * @param prog BPF program to attach
+ * @param pid Process ID to attach the uprobe to, 0 for self (own process),
+ * -1 for all processes
+ * @param binary_path Path to binary that contains provided USDT probe
+ * @param usdt_provider USDT provider name
+ * @param usdt_name USDT probe name
+ * @param opts Options for altering program attachment
+ * @return Reference to the newly created BPF link; or NULL is returned on error,
+ * error code is stored in errno
*/
-typedef int (*bpf_program_prep_t)(struct bpf_program *prog, int n,
- struct bpf_insn *insns, int insns_cnt,
- struct bpf_prog_prep_result *res);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_usdt(const struct bpf_program *prog,
+ pid_t pid, const char *binary_path,
+ const char *usdt_provider, const char *usdt_name,
+ const struct bpf_usdt_opts *opts);
+
+struct bpf_tracepoint_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* custom user-provided value fetchable through bpf_get_attach_cookie() */
+ __u64 bpf_cookie;
+};
+#define bpf_tracepoint_opts__last_field bpf_cookie
+
+LIBBPF_API struct bpf_link *
+bpf_program__attach_tracepoint(const struct bpf_program *prog,
+ const char *tp_category,
+ const char *tp_name);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
+ const char *tp_category,
+ const char *tp_name,
+ const struct bpf_tracepoint_opts *opts);
+
+LIBBPF_API struct bpf_link *
+bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
+ const char *tp_name);
-LIBBPF_API int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
- bpf_program_prep_t prep);
+struct bpf_trace_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* custom user-provided value fetchable through bpf_get_attach_cookie() */
+ __u64 cookie;
+};
+#define bpf_trace_opts__last_field cookie
-LIBBPF_API int bpf_program__nth_fd(const struct bpf_program *prog, int n);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_trace(const struct bpf_program *prog);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_trace_opts(const struct bpf_program *prog, const struct bpf_trace_opts *opts);
-/*
- * Adjust type of BPF program. Default is kprobe.
- */
-LIBBPF_API int bpf_program__set_socket_filter(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_tracepoint(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_raw_tracepoint(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_kprobe(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_lsm(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_struct_ops(struct bpf_program *prog);
-LIBBPF_API int bpf_program__set_extension(struct bpf_program *prog);
-
-LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog);
-LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
- enum bpf_prog_type type);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_lsm(const struct bpf_program *prog);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_freplace(const struct bpf_program *prog,
+ int target_fd, const char *attach_func_name);
+
+struct bpf_map;
+
+LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map);
+LIBBPF_API int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map);
+
+struct bpf_iter_attach_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ union bpf_iter_link_info *link_info;
+ __u32 link_info_len;
+};
+#define bpf_iter_attach_opts__last_field link_info_len
+
+LIBBPF_API struct bpf_link *
+bpf_program__attach_iter(const struct bpf_program *prog,
+ const struct bpf_iter_attach_opts *opts);
+
+LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog);
+
+/**
+ * @brief **bpf_program__set_type()** sets the program
+ * type of the passed BPF program.
+ * @param prog BPF program to set the program type for
+ * @param type program type to set the BPF map to have
+ * @return error code; or 0 if no error. An error occurs
+ * if the object is already loaded.
+ *
+ * This must be called before the BPF object is loaded,
+ * otherwise it has no effect and an error is returned.
+ */
+LIBBPF_API int bpf_program__set_type(struct bpf_program *prog,
+ enum bpf_prog_type type);
LIBBPF_API enum bpf_attach_type
-bpf_program__get_expected_attach_type(struct bpf_program *prog);
-LIBBPF_API void
+bpf_program__expected_attach_type(const struct bpf_program *prog);
+
+/**
+ * @brief **bpf_program__set_expected_attach_type()** sets the
+ * attach type of the passed BPF program. This is used for
+ * auto-detection of attachment when programs are loaded.
+ * @param prog BPF program to set the attach type for
+ * @param type attach type to set the BPF map to have
+ * @return error code; or 0 if no error. An error occurs
+ * if the object is already loaded.
+ *
+ * This must be called before the BPF object is loaded,
+ * otherwise it has no effect and an error is returned.
+ */
+LIBBPF_API int
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
+LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_flags(struct bpf_program *prog, __u32 flags);
+
+/* Per-program log level and log buffer getters/setters.
+ * See bpf_object_open_opts comments regarding log_level and log_buf
+ * interactions.
+ */
+LIBBPF_API __u32 bpf_program__log_level(const struct bpf_program *prog);
+LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level);
+LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
+LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
+
+/**
+ * @brief **bpf_program__set_attach_target()** sets BTF-based attach target
+ * for supported BPF program types:
+ * - BTF-aware raw tracepoints (tp_btf);
+ * - fentry/fexit/fmod_ret;
+ * - lsm;
+ * - freplace.
+ * @param prog BPF program to set the attach type for
+ * @param type attach type to set the BPF map to have
+ * @return error code; or 0 if no error occurred.
+ */
LIBBPF_API int
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
const char *attach_func_name);
-LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_kprobe(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_lsm(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_struct_ops(const struct bpf_program *prog);
-LIBBPF_API bool bpf_program__is_extension(const struct bpf_program *prog);
-
-/*
- * No need for __attribute__((packed)), all members of 'bpf_map_def'
- * are all aligned. In addition, using __attribute__((packed))
- * would trigger a -Wpacked warning message, and lead to an error
- * if -Werror is set.
- */
-struct bpf_map_def {
- unsigned int type;
- unsigned int key_size;
- unsigned int value_size;
- unsigned int max_entries;
- unsigned int map_flags;
-};
-
-/*
- * The 'struct bpf_map' in include/linux/bpf.h is internal to the kernel,
- * so no need to worry about a name clash.
+/**
+ * @brief **bpf_object__find_map_by_name()** returns BPF map of
+ * the given name, if it exists within the passed BPF object
+ * @param obj BPF object
+ * @param name name of the BPF map
+ * @return BPF map instance, if such map exists within the BPF object;
+ * or NULL otherwise.
*/
LIBBPF_API struct bpf_map *
bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
@@ -389,86 +810,461 @@ bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name);
LIBBPF_API int
bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name);
-/*
- * Get bpf_map through the offset of corresponding struct bpf_map_def
- * in the BPF object file.
- */
LIBBPF_API struct bpf_map *
-bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset);
+bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *map);
-LIBBPF_API struct bpf_map *
-bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj);
#define bpf_object__for_each_map(pos, obj) \
- for ((pos) = bpf_map__next(NULL, (obj)); \
+ for ((pos) = bpf_object__next_map((obj), NULL); \
(pos) != NULL; \
- (pos) = bpf_map__next((pos), (obj)))
+ (pos) = bpf_object__next_map((obj), (pos)))
#define bpf_map__for_each bpf_object__for_each_map
LIBBPF_API struct bpf_map *
-bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj);
-
+bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__set_autocreate()** sets whether libbpf has to auto-create
+ * BPF map during BPF object load phase.
+ * @param map the BPF map instance
+ * @param autocreate whether to create BPF map during BPF object load
+ * @return 0 on success; -EBUSY if BPF object was already loaded
+ *
+ * **bpf_map__set_autocreate()** allows to opt-out from libbpf auto-creating
+ * BPF map. By default, libbpf will attempt to create every single BPF map
+ * defined in BPF object file using BPF_MAP_CREATE command of bpf() syscall
+ * and fill in map FD in BPF instructions.
+ *
+ * This API allows to opt-out of this process for specific map instance. This
+ * can be useful if host kernel doesn't support such BPF map type or used
+ * combination of flags and user application wants to avoid creating such
+ * a map in the first place. User is still responsible to make sure that their
+ * BPF-side code that expects to use such missing BPF map is recognized by BPF
+ * verifier as dead code, otherwise BPF verifier will reject such BPF program.
+ */
+LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate);
+LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__fd()** gets the file descriptor of the passed
+ * BPF map
+ * @param map the BPF map instance
+ * @return the file descriptor; or -EINVAL in case of an error
+ */
LIBBPF_API int bpf_map__fd(const struct bpf_map *map);
-LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map);
+LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
+/* get map name */
LIBBPF_API const char *bpf_map__name(const struct bpf_map *map);
+/* get/set map type */
+LIBBPF_API enum bpf_map_type bpf_map__type(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type);
+/* get/set map size (max_entries) */
+LIBBPF_API __u32 bpf_map__max_entries(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries);
+/* get/set map flags */
+LIBBPF_API __u32 bpf_map__map_flags(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags);
+/* get/set map NUMA node */
+LIBBPF_API __u32 bpf_map__numa_node(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node);
+/* get/set map key size */
+LIBBPF_API __u32 bpf_map__key_size(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_key_size(struct bpf_map *map, __u32 size);
+/* get/set map value size */
+LIBBPF_API __u32 bpf_map__value_size(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_value_size(struct bpf_map *map, __u32 size);
+/* get map key/value BTF type IDs */
LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map);
LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map);
+/* get/set map if_index */
+LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
+/* get/set map map_extra flags */
+LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map);
+LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra);
-typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
-LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
- bpf_map_clear_priv_t clear_priv);
-LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
-LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd);
-LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries);
-LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
+LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
+
+/**
+ * @brief **bpf_map__is_internal()** tells the caller whether or not the
+ * passed map is a special map created by libbpf automatically for things like
+ * global variables, __ksym externs, Kconfig values, etc
+ * @param map the bpf_map
+ * @return true, if the map is an internal map; false, otherwise
+ */
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
-LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex);
+
+/**
+ * @brief **bpf_map__set_pin_path()** sets the path attribute that tells where the
+ * BPF map should be pinned. This does not actually create the 'pin'.
+ * @param map The bpf_map
+ * @param path The path
+ * @return 0, on success; negative error, otherwise
+ */
LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
-LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__pin_path()** gets the path attribute that tells where the
+ * BPF map should be pinned.
+ * @param map The bpf_map
+ * @return The path string; which can be NULL
+ */
+LIBBPF_API const char *bpf_map__pin_path(const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__is_pinned()** tells the caller whether or not the
+ * passed map has been pinned via a 'pin' file.
+ * @param map The bpf_map
+ * @return true, if the map is pinned; false, otherwise
+ */
LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__pin()** creates a file that serves as a 'pin'
+ * for the BPF map. This increments the reference count on the
+ * BPF map which will keep the BPF map loaded even after the
+ * userspace process which loaded it has exited.
+ * @param map The bpf_map to pin
+ * @param path A file path for the 'pin'
+ * @return 0, on success; negative error, otherwise
+ *
+ * If `path` is NULL the maps `pin_path` attribute will be used. If this is
+ * also NULL, an error will be returned and the map will not be pinned.
+ */
LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path);
+
+/**
+ * @brief **bpf_map__unpin()** removes the file that serves as a
+ * 'pin' for the BPF map.
+ * @param map The bpf_map to unpin
+ * @param path A file path for the 'pin'
+ * @return 0, on success; negative error, otherwise
+ *
+ * The `path` parameter can be NULL, in which case the `pin_path`
+ * map attribute is unpinned. If both the `path` parameter and
+ * `pin_path` map attribute are set, they must be equal.
+ */
LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path);
LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd);
+LIBBPF_API struct bpf_map *bpf_map__inner_map(struct bpf_map *map);
+
+/**
+ * @brief **bpf_map__lookup_elem()** allows to lookup BPF map value
+ * corresponding to provided key.
+ * @param map BPF map to lookup element in
+ * @param key pointer to memory containing bytes of the key used for lookup
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @param value pointer to memory in which looked up value will be stored
+ * @param value_sz size in byte of value data memory; it has to match BPF map
+ * definition's **value_size**. For per-CPU BPF maps value size has to be
+ * a product of BPF map value size and number of possible CPUs in the system
+ * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
+ * per-CPU values value size has to be aligned up to closest 8 bytes for
+ * alignment reasons, so expected size is: `round_up(value_size, 8)
+ * * libbpf_num_possible_cpus()`.
+ * @flags extra flags passed to kernel for this operation
+ * @return 0, on success; negative error, otherwise
+ *
+ * **bpf_map__lookup_elem()** is high-level equivalent of
+ * **bpf_map_lookup_elem()** API with added check for key and value size.
+ */
+LIBBPF_API int bpf_map__lookup_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ void *value, size_t value_sz, __u64 flags);
+
+/**
+ * @brief **bpf_map__update_elem()** allows to insert or update value in BPF
+ * map that corresponds to provided key.
+ * @param map BPF map to insert to or update element in
+ * @param key pointer to memory containing bytes of the key
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @param value pointer to memory containing bytes of the value
+ * @param value_sz size in byte of value data memory; it has to match BPF map
+ * definition's **value_size**. For per-CPU BPF maps value size has to be
+ * a product of BPF map value size and number of possible CPUs in the system
+ * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
+ * per-CPU values value size has to be aligned up to closest 8 bytes for
+ * alignment reasons, so expected size is: `round_up(value_size, 8)
+ * * libbpf_num_possible_cpus()`.
+ * @flags extra flags passed to kernel for this operation
+ * @return 0, on success; negative error, otherwise
+ *
+ * **bpf_map__update_elem()** is high-level equivalent of
+ * **bpf_map_update_elem()** API with added check for key and value size.
+ */
+LIBBPF_API int bpf_map__update_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ const void *value, size_t value_sz, __u64 flags);
+
+/**
+ * @brief **bpf_map__delete_elem()** allows to delete element in BPF map that
+ * corresponds to provided key.
+ * @param map BPF map to delete element from
+ * @param key pointer to memory containing bytes of the key
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @flags extra flags passed to kernel for this operation
+ * @return 0, on success; negative error, otherwise
+ *
+ * **bpf_map__delete_elem()** is high-level equivalent of
+ * **bpf_map_delete_elem()** API with added check for key size.
+ */
+LIBBPF_API int bpf_map__delete_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz, __u64 flags);
+
+/**
+ * @brief **bpf_map__lookup_and_delete_elem()** allows to lookup BPF map value
+ * corresponding to provided key and atomically delete it afterwards.
+ * @param map BPF map to lookup element in
+ * @param key pointer to memory containing bytes of the key used for lookup
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @param value pointer to memory in which looked up value will be stored
+ * @param value_sz size in byte of value data memory; it has to match BPF map
+ * definition's **value_size**. For per-CPU BPF maps value size has to be
+ * a product of BPF map value size and number of possible CPUs in the system
+ * (could be fetched with **libbpf_num_possible_cpus()**). Note also that for
+ * per-CPU values value size has to be aligned up to closest 8 bytes for
+ * alignment reasons, so expected size is: `round_up(value_size, 8)
+ * * libbpf_num_possible_cpus()`.
+ * @flags extra flags passed to kernel for this operation
+ * @return 0, on success; negative error, otherwise
+ *
+ * **bpf_map__lookup_and_delete_elem()** is high-level equivalent of
+ * **bpf_map_lookup_and_delete_elem()** API with added check for key and value size.
+ */
+LIBBPF_API int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
+ const void *key, size_t key_sz,
+ void *value, size_t value_sz, __u64 flags);
+
+/**
+ * @brief **bpf_map__get_next_key()** allows to iterate BPF map keys by
+ * fetching next key that follows current key.
+ * @param map BPF map to fetch next key from
+ * @param cur_key pointer to memory containing bytes of current key or NULL to
+ * fetch the first key
+ * @param next_key pointer to memory to write next key into
+ * @param key_sz size in bytes of key data, needs to match BPF map definition's **key_size**
+ * @return 0, on success; -ENOENT if **cur_key** is the last key in BPF map;
+ * negative error, otherwise
+ *
+ * **bpf_map__get_next_key()** is high-level equivalent of
+ * **bpf_map_get_next_key()** API with added check for key size.
+ */
+LIBBPF_API int bpf_map__get_next_key(const struct bpf_map *map,
+ const void *cur_key, void *next_key, size_t key_sz);
-LIBBPF_API long libbpf_get_error(const void *ptr);
+struct bpf_xdp_set_link_opts {
+ size_t sz;
+ int old_fd;
+ size_t :0;
+};
+#define bpf_xdp_set_link_opts__last_field old_fd
-struct bpf_prog_load_attr {
- const char *file;
- enum bpf_prog_type prog_type;
- enum bpf_attach_type expected_attach_type;
- int ifindex;
- int log_level;
- int prog_flags;
+struct bpf_xdp_attach_opts {
+ size_t sz;
+ int old_prog_fd;
+ size_t :0;
};
+#define bpf_xdp_attach_opts__last_field old_prog_fd
-LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
- struct bpf_object **pobj, int *prog_fd);
-LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
- struct bpf_object **pobj, int *prog_fd);
+struct bpf_xdp_query_opts {
+ size_t sz;
+ __u32 prog_id; /* output */
+ __u32 drv_prog_id; /* output */
+ __u32 hw_prog_id; /* output */
+ __u32 skb_prog_id; /* output */
+ __u8 attach_mode; /* output */
+ __u64 feature_flags; /* output */
+ size_t :0;
+};
+#define bpf_xdp_query_opts__last_field feature_flags
+
+LIBBPF_API int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags,
+ const struct bpf_xdp_attach_opts *opts);
+LIBBPF_API int bpf_xdp_detach(int ifindex, __u32 flags,
+ const struct bpf_xdp_attach_opts *opts);
+LIBBPF_API int bpf_xdp_query(int ifindex, int flags, struct bpf_xdp_query_opts *opts);
+LIBBPF_API int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id);
+
+/* TC related API */
+enum bpf_tc_attach_point {
+ BPF_TC_INGRESS = 1 << 0,
+ BPF_TC_EGRESS = 1 << 1,
+ BPF_TC_CUSTOM = 1 << 2,
+};
-struct xdp_link_info {
- __u32 prog_id;
- __u32 drv_prog_id;
- __u32 hw_prog_id;
- __u32 skb_prog_id;
- __u8 attach_mode;
+#define BPF_TC_PARENT(a, b) \
+ ((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU))
+
+enum bpf_tc_flags {
+ BPF_TC_F_REPLACE = 1 << 0,
};
-struct bpf_xdp_set_link_opts {
+struct bpf_tc_hook {
size_t sz;
- __u32 old_fd;
+ int ifindex;
+ enum bpf_tc_attach_point attach_point;
+ __u32 parent;
+ size_t :0;
};
-#define bpf_xdp_set_link_opts__last_field old_fd
+#define bpf_tc_hook__last_field parent
+
+struct bpf_tc_opts {
+ size_t sz;
+ int prog_fd;
+ __u32 flags;
+ __u32 prog_id;
+ __u32 handle;
+ __u32 priority;
+ size_t :0;
+};
+#define bpf_tc_opts__last_field priority
+
+LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook);
+LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook);
+LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook,
+ struct bpf_tc_opts *opts);
+LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts);
+LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook,
+ struct bpf_tc_opts *opts);
+
+/* Ring buffer APIs */
+struct ring_buffer;
+struct user_ring_buffer;
+
+typedef int (*ring_buffer_sample_fn)(void *ctx, void *data, size_t size);
-LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
-LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
- const struct bpf_xdp_set_link_opts *opts);
-LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
-LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
- size_t info_size, __u32 flags);
+struct ring_buffer_opts {
+ size_t sz; /* size of this struct, for forward/backward compatibility */
+};
+
+#define ring_buffer_opts__last_field sz
+
+LIBBPF_API struct ring_buffer *
+ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
+ const struct ring_buffer_opts *opts);
+LIBBPF_API void ring_buffer__free(struct ring_buffer *rb);
+LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd,
+ ring_buffer_sample_fn sample_cb, void *ctx);
+LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
+LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
+LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
+
+struct user_ring_buffer_opts {
+ size_t sz; /* size of this struct, for forward/backward compatibility */
+};
+
+#define user_ring_buffer_opts__last_field sz
+/**
+ * @brief **user_ring_buffer__new()** creates a new instance of a user ring
+ * buffer.
+ *
+ * @param map_fd A file descriptor to a BPF_MAP_TYPE_USER_RINGBUF map.
+ * @param opts Options for how the ring buffer should be created.
+ * @return A user ring buffer on success; NULL and errno being set on a
+ * failure.
+ */
+LIBBPF_API struct user_ring_buffer *
+user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts);
+
+/**
+ * @brief **user_ring_buffer__reserve()** reserves a pointer to a sample in the
+ * user ring buffer.
+ * @param rb A pointer to a user ring buffer.
+ * @param size The size of the sample, in bytes.
+ * @return A pointer to an 8-byte aligned reserved region of the user ring
+ * buffer; NULL, and errno being set if a sample could not be reserved.
+ *
+ * This function is *not* thread safe, and callers must synchronize accessing
+ * this function if there are multiple producers. If a size is requested that
+ * is larger than the size of the entire ring buffer, errno will be set to
+ * E2BIG and NULL is returned. If the ring buffer could accommodate the size,
+ * but currently does not have enough space, errno is set to ENOSPC and NULL is
+ * returned.
+ *
+ * After initializing the sample, callers must invoke
+ * **user_ring_buffer__submit()** to post the sample to the kernel. Otherwise,
+ * the sample must be freed with **user_ring_buffer__discard()**.
+ */
+LIBBPF_API void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size);
+
+/**
+ * @brief **user_ring_buffer__reserve_blocking()** reserves a record in the
+ * ring buffer, possibly blocking for up to @timeout_ms until a sample becomes
+ * available.
+ * @param rb The user ring buffer.
+ * @param size The size of the sample, in bytes.
+ * @param timeout_ms The amount of time, in milliseconds, for which the caller
+ * should block when waiting for a sample. -1 causes the caller to block
+ * indefinitely.
+ * @return A pointer to an 8-byte aligned reserved region of the user ring
+ * buffer; NULL, and errno being set if a sample could not be reserved.
+ *
+ * This function is *not* thread safe, and callers must synchronize
+ * accessing this function if there are multiple producers
+ *
+ * If **timeout_ms** is -1, the function will block indefinitely until a sample
+ * becomes available. Otherwise, **timeout_ms** must be non-negative, or errno
+ * is set to EINVAL, and NULL is returned. If **timeout_ms** is 0, no blocking
+ * will occur and the function will return immediately after attempting to
+ * reserve a sample.
+ *
+ * If **size** is larger than the size of the entire ring buffer, errno is set
+ * to E2BIG and NULL is returned. If the ring buffer could accommodate
+ * **size**, but currently does not have enough space, the caller will block
+ * until at most **timeout_ms** has elapsed. If insufficient space is available
+ * at that time, errno is set to ENOSPC, and NULL is returned.
+ *
+ * The kernel guarantees that it will wake up this thread to check if
+ * sufficient space is available in the ring buffer at least once per
+ * invocation of the **bpf_ringbuf_drain()** helper function, provided that at
+ * least one sample is consumed, and the BPF program did not invoke the
+ * function with BPF_RB_NO_WAKEUP. A wakeup may occur sooner than that, but the
+ * kernel does not guarantee this. If the helper function is invoked with
+ * BPF_RB_FORCE_WAKEUP, a wakeup event will be sent even if no sample is
+ * consumed.
+ *
+ * When a sample of size **size** is found within **timeout_ms**, a pointer to
+ * the sample is returned. After initializing the sample, callers must invoke
+ * **user_ring_buffer__submit()** to post the sample to the ring buffer.
+ * Otherwise, the sample must be freed with **user_ring_buffer__discard()**.
+ */
+LIBBPF_API void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb,
+ __u32 size,
+ int timeout_ms);
+
+/**
+ * @brief **user_ring_buffer__submit()** submits a previously reserved sample
+ * into the ring buffer.
+ * @param rb The user ring buffer.
+ * @param sample A reserved sample.
+ *
+ * It is not necessary to synchronize amongst multiple producers when invoking
+ * this function.
+ */
+LIBBPF_API void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample);
+
+/**
+ * @brief **user_ring_buffer__discard()** discards a previously reserved sample.
+ * @param rb The user ring buffer.
+ * @param sample A reserved sample.
+ *
+ * It is not necessary to synchronize amongst multiple producers when invoking
+ * this function.
+ */
+LIBBPF_API void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample);
+
+/**
+ * @brief **user_ring_buffer__free()** frees a ring buffer that was previously
+ * created with **user_ring_buffer__new()**.
+ * @param rb The user ring buffer being freed.
+ */
+LIBBPF_API void user_ring_buffer__free(struct user_ring_buffer *rb);
+
+/* Perf buffer APIs */
struct perf_buffer;
typedef void (*perf_buffer_sample_fn)(void *ctx, int cpu,
@@ -477,16 +1273,27 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
/* common use perf buffer options */
struct perf_buffer_opts {
- /* if specified, sample_cb is called for each sample */
- perf_buffer_sample_fn sample_cb;
- /* if specified, lost_cb is called for each batch of lost samples */
- perf_buffer_lost_fn lost_cb;
- /* ctx is provided to sample_cb and lost_cb */
- void *ctx;
+ size_t sz;
+ __u32 sample_period;
+ size_t :0;
};
-
+#define perf_buffer_opts__last_field sample_period
+
+/**
+ * @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified
+ * BPF_PERF_EVENT_ARRAY map
+ * @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF
+ * code to send data over to user-space
+ * @param page_cnt number of memory pages allocated for each per-CPU buffer
+ * @param sample_cb function called on each received data record
+ * @param lost_cb function called when record loss has occurred
+ * @param ctx user-provided extra context passed into *sample_cb* and *lost_cb*
+ * @return a new instance of struct perf_buffer on success, NULL on error with
+ * *errno* containing an error code
+ */
LIBBPF_API struct perf_buffer *
perf_buffer__new(int map_fd, size_t page_cnt,
+ perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
const struct perf_buffer_opts *opts);
enum bpf_perf_event_ret {
@@ -502,12 +1309,9 @@ typedef enum bpf_perf_event_ret
/* raw perf buffer options, giving most power and control */
struct perf_buffer_raw_opts {
- /* perf event attrs passed directly into perf_event_open() */
- struct perf_event_attr *attr;
- /* raw event callback */
- perf_buffer_event_fn event_cb;
- /* ctx is provided to event_cb */
- void *ctx;
+ size_t sz;
+ long :0;
+ long :0;
/* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
* max_entries of given PERF_EVENT_ARRAY map)
*/
@@ -517,21 +1321,38 @@ struct perf_buffer_raw_opts {
/* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */
int *map_keys;
};
+#define perf_buffer_raw_opts__last_field map_keys
+
+struct perf_event_attr;
LIBBPF_API struct perf_buffer *
-perf_buffer__new_raw(int map_fd, size_t page_cnt,
+perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
+ perf_buffer_event_fn event_cb, void *ctx,
const struct perf_buffer_raw_opts *opts);
LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
+LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
-
-typedef enum bpf_perf_event_ret
- (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
- void *private_data);
-LIBBPF_API enum bpf_perf_event_ret
-bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
- void **copy_mem, size_t *copy_size,
- bpf_perf_event_print_t fn, void *private_data);
+LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb);
+LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx);
+LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb);
+LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx);
+/**
+ * @brief **perf_buffer__buffer()** returns the per-cpu raw mmap()'ed underlying
+ * memory region of the ring buffer.
+ * This ring buffer can be used to implement a custom events consumer.
+ * The ring buffer starts with the *struct perf_event_mmap_page*, which
+ * holds the ring buffer managment fields, when accessing the header
+ * structure it's important to be SMP aware.
+ * You can refer to *perf_event_read_simple* for a simple example.
+ * @param pb the perf buffer structure
+ * @param buf_idx the buffer index to retreive
+ * @param buf (out) gets the base pointer of the mmap()'ed memory
+ * @param buf_size (out) gets the size of the mmap()'ed region
+ * @return 0 on success, negative error code for failure
+ */
+LIBBPF_API int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf,
+ size_t *buf_size);
struct bpf_prog_linfo;
struct bpf_prog_info;
@@ -554,79 +1375,53 @@ bpf_prog_linfo__lfind(const struct bpf_prog_linfo *prog_linfo,
* user, causing subsequent probes to fail. In this case, the caller may want
* to adjust that limit with setrlimit().
*/
-LIBBPF_API bool bpf_probe_prog_type(enum bpf_prog_type prog_type,
- __u32 ifindex);
-LIBBPF_API bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex);
-LIBBPF_API bool bpf_probe_helper(enum bpf_func_id id,
- enum bpf_prog_type prog_type, __u32 ifindex);
-LIBBPF_API bool bpf_probe_large_insn_limit(__u32 ifindex);
-/*
- * Get bpf_prog_info in continuous memory
- *
- * struct bpf_prog_info has multiple arrays. The user has option to choose
- * arrays to fetch from kernel. The following APIs provide an uniform way to
- * fetch these data. All arrays in bpf_prog_info are stored in a single
- * continuous memory region. This makes it easy to store the info in a
- * file.
- *
- * Before writing bpf_prog_info_linear to files, it is necessary to
- * translate pointers in bpf_prog_info to offsets. Helper functions
- * bpf_program__bpil_addr_to_offs() and bpf_program__bpil_offs_to_addr()
- * are introduced to switch between pointers and offsets.
- *
- * Examples:
- * # To fetch map_ids and prog_tags:
- * __u64 arrays = (1UL << BPF_PROG_INFO_MAP_IDS) |
- * (1UL << BPF_PROG_INFO_PROG_TAGS);
- * struct bpf_prog_info_linear *info_linear =
- * bpf_program__get_prog_info_linear(fd, arrays);
- *
- * # To save data in file
- * bpf_program__bpil_addr_to_offs(info_linear);
- * write(f, info_linear, sizeof(*info_linear) + info_linear->data_len);
- *
- * # To read data from file
- * read(f, info_linear, <proper_size>);
- * bpf_program__bpil_offs_to_addr(info_linear);
- */
-enum bpf_prog_info_array {
- BPF_PROG_INFO_FIRST_ARRAY = 0,
- BPF_PROG_INFO_JITED_INSNS = 0,
- BPF_PROG_INFO_XLATED_INSNS,
- BPF_PROG_INFO_MAP_IDS,
- BPF_PROG_INFO_JITED_KSYMS,
- BPF_PROG_INFO_JITED_FUNC_LENS,
- BPF_PROG_INFO_FUNC_INFO,
- BPF_PROG_INFO_LINE_INFO,
- BPF_PROG_INFO_JITED_LINE_INFO,
- BPF_PROG_INFO_PROG_TAGS,
- BPF_PROG_INFO_LAST_ARRAY,
-};
-
-struct bpf_prog_info_linear {
- /* size of struct bpf_prog_info, when the tool is compiled */
- __u32 info_len;
- /* total bytes allocated for data, round up to 8 bytes */
- __u32 data_len;
- /* which arrays are included in data */
- __u64 arrays;
- struct bpf_prog_info info;
- __u8 data[];
-};
-
-LIBBPF_API struct bpf_prog_info_linear *
-bpf_program__get_prog_info_linear(int fd, __u64 arrays);
-
-LIBBPF_API void
-bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
-
-LIBBPF_API void
-bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
+/**
+ * @brief **libbpf_probe_bpf_prog_type()** detects if host kernel supports
+ * BPF programs of a given type.
+ * @param prog_type BPF program type to detect kernel support for
+ * @param opts reserved for future extensibility, should be NULL
+ * @return 1, if given program type is supported; 0, if given program type is
+ * not supported; negative error code if feature detection failed or can't be
+ * performed
+ *
+ * Make sure the process has required set of CAP_* permissions (or runs as
+ * root) when performing feature checking.
+ */
+LIBBPF_API int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts);
+/**
+ * @brief **libbpf_probe_bpf_map_type()** detects if host kernel supports
+ * BPF maps of a given type.
+ * @param map_type BPF map type to detect kernel support for
+ * @param opts reserved for future extensibility, should be NULL
+ * @return 1, if given map type is supported; 0, if given map type is
+ * not supported; negative error code if feature detection failed or can't be
+ * performed
+ *
+ * Make sure the process has required set of CAP_* permissions (or runs as
+ * root) when performing feature checking.
+ */
+LIBBPF_API int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts);
+/**
+ * @brief **libbpf_probe_bpf_helper()** detects if host kernel supports the
+ * use of a given BPF helper from specified BPF program type.
+ * @param prog_type BPF program type used to check the support of BPF helper
+ * @param helper_id BPF helper ID (enum bpf_func_id) to check support for
+ * @param opts reserved for future extensibility, should be NULL
+ * @return 1, if given combination of program type and helper is supported; 0,
+ * if the combination is not supported; negative error code if feature
+ * detection for provided input arguments failed or can't be performed
+ *
+ * Make sure the process has required set of CAP_* permissions (or runs as
+ * root) when performing feature checking.
+ */
+LIBBPF_API int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type,
+ enum bpf_func_id helper_id, const void *opts);
-/*
- * A helper function to get the number of possible CPUs before looking up
- * per-CPU maps. Negative errno is returned on failure.
+/**
+ * @brief **libbpf_num_possible_cpus()** is a helper function to get the
+ * number of possible CPUs that the host kernel supports and expects.
+ * @return number of possible CPUs; or error code on failure
*
* Example usage:
*
@@ -636,7 +1431,6 @@ bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);
* }
* long values[ncpus];
* bpf_map_lookup_elem(per_cpu_map_fd, key, values);
- *
*/
LIBBPF_API int libbpf_num_possible_cpus(void);
@@ -656,17 +1450,17 @@ struct bpf_object_skeleton {
size_t sz; /* size of this struct, for forward/backward compatibility */
const char *name;
- void *data;
+ const void *data;
size_t data_sz;
struct bpf_object **obj;
int map_cnt;
- int map_skel_sz; /* sizeof(struct bpf_skeleton_map) */
+ int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */
struct bpf_map_skeleton *maps;
int prog_cnt;
- int prog_skel_sz; /* sizeof(struct bpf_skeleton_prog) */
+ int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */
struct bpf_prog_skeleton *progs;
};
@@ -678,12 +1472,183 @@ LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
+struct bpf_var_skeleton {
+ const char *name;
+ struct bpf_map **map;
+ void **addr;
+};
+
+struct bpf_object_subskeleton {
+ size_t sz; /* size of this struct, for forward/backward compatibility */
+
+ const struct bpf_object *obj;
+
+ int map_cnt;
+ int map_skel_sz; /* sizeof(struct bpf_map_skeleton) */
+ struct bpf_map_skeleton *maps;
+
+ int prog_cnt;
+ int prog_skel_sz; /* sizeof(struct bpf_prog_skeleton) */
+ struct bpf_prog_skeleton *progs;
+
+ int var_cnt;
+ int var_skel_sz; /* sizeof(struct bpf_var_skeleton) */
+ struct bpf_var_skeleton *vars;
+};
+
+LIBBPF_API int
+bpf_object__open_subskeleton(struct bpf_object_subskeleton *s);
+LIBBPF_API void
+bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s);
+
+struct gen_loader_opts {
+ size_t sz; /* size of this struct, for forward/backward compatibility */
+ const char *data;
+ const char *insns;
+ __u32 data_sz;
+ __u32 insns_sz;
+};
+
+#define gen_loader_opts__last_field insns_sz
+LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj,
+ struct gen_loader_opts *opts);
+
enum libbpf_tristate {
TRI_NO = 0,
TRI_YES = 1,
TRI_MODULE = 2,
};
+struct bpf_linker_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+};
+#define bpf_linker_opts__last_field sz
+
+struct bpf_linker_file_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+};
+#define bpf_linker_file_opts__last_field sz
+
+struct bpf_linker;
+
+LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
+LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
+ const char *filename,
+ const struct bpf_linker_file_opts *opts);
+LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
+LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);
+
+/*
+ * Custom handling of BPF program's SEC() definitions
+ */
+
+struct bpf_prog_load_opts; /* defined in bpf.h */
+
+/* Called during bpf_object__open() for each recognized BPF program. Callback
+ * can use various bpf_program__set_*() setters to adjust whatever properties
+ * are necessary.
+ */
+typedef int (*libbpf_prog_setup_fn_t)(struct bpf_program *prog, long cookie);
+
+/* Called right before libbpf performs bpf_prog_load() to load BPF program
+ * into the kernel. Callback can adjust opts as necessary.
+ */
+typedef int (*libbpf_prog_prepare_load_fn_t)(struct bpf_program *prog,
+ struct bpf_prog_load_opts *opts, long cookie);
+
+/* Called during skeleton attach or through bpf_program__attach(). If
+ * auto-attach is not supported, callback should return 0 and set link to
+ * NULL (it's not considered an error during skeleton attach, but it will be
+ * an error for bpf_program__attach() calls). On error, error should be
+ * returned directly and link set to NULL. On success, return 0 and set link
+ * to a valid struct bpf_link.
+ */
+typedef int (*libbpf_prog_attach_fn_t)(const struct bpf_program *prog, long cookie,
+ struct bpf_link **link);
+
+struct libbpf_prog_handler_opts {
+ /* size of this struct, for forward/backward compatibility */
+ size_t sz;
+ /* User-provided value that is passed to prog_setup_fn,
+ * prog_prepare_load_fn, and prog_attach_fn callbacks. Allows user to
+ * register one set of callbacks for multiple SEC() definitions and
+ * still be able to distinguish them, if necessary. For example,
+ * libbpf itself is using this to pass necessary flags (e.g.,
+ * sleepable flag) to a common internal SEC() handler.
+ */
+ long cookie;
+ /* BPF program initialization callback (see libbpf_prog_setup_fn_t).
+ * Callback is optional, pass NULL if it's not necessary.
+ */
+ libbpf_prog_setup_fn_t prog_setup_fn;
+ /* BPF program loading callback (see libbpf_prog_prepare_load_fn_t).
+ * Callback is optional, pass NULL if it's not necessary.
+ */
+ libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
+ /* BPF program attach callback (see libbpf_prog_attach_fn_t).
+ * Callback is optional, pass NULL if it's not necessary.
+ */
+ libbpf_prog_attach_fn_t prog_attach_fn;
+};
+#define libbpf_prog_handler_opts__last_field prog_attach_fn
+
+/**
+ * @brief **libbpf_register_prog_handler()** registers a custom BPF program
+ * SEC() handler.
+ * @param sec section prefix for which custom handler is registered
+ * @param prog_type BPF program type associated with specified section
+ * @param exp_attach_type Expected BPF attach type associated with specified section
+ * @param opts optional cookie, callbacks, and other extra options
+ * @return Non-negative handler ID is returned on success. This handler ID has
+ * to be passed to *libbpf_unregister_prog_handler()* to unregister such
+ * custom handler. Negative error code is returned on error.
+ *
+ * *sec* defines which SEC() definitions are handled by this custom handler
+ * registration. *sec* can have few different forms:
+ * - if *sec* is just a plain string (e.g., "abc"), it will match only
+ * SEC("abc"). If BPF program specifies SEC("abc/whatever") it will result
+ * in an error;
+ * - if *sec* is of the form "abc/", proper SEC() form is
+ * SEC("abc/something"), where acceptable "something" should be checked by
+ * *prog_init_fn* callback, if there are additional restrictions;
+ * - if *sec* is of the form "abc+", it will successfully match both
+ * SEC("abc") and SEC("abc/whatever") forms;
+ * - if *sec* is NULL, custom handler is registered for any BPF program that
+ * doesn't match any of the registered (custom or libbpf's own) SEC()
+ * handlers. There could be only one such generic custom handler registered
+ * at any given time.
+ *
+ * All custom handlers (except the one with *sec* == NULL) are processed
+ * before libbpf's own SEC() handlers. It is allowed to "override" libbpf's
+ * SEC() handlers by registering custom ones for the same section prefix
+ * (i.e., it's possible to have custom SEC("perf_event/LLC-load-misses")
+ * handler).
+ *
+ * Note, like much of global libbpf APIs (e.g., libbpf_set_print(),
+ * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs
+ * to ensure synchronization if there is a risk of running this API from
+ * multiple threads simultaneously.
+ */
+LIBBPF_API int libbpf_register_prog_handler(const char *sec,
+ enum bpf_prog_type prog_type,
+ enum bpf_attach_type exp_attach_type,
+ const struct libbpf_prog_handler_opts *opts);
+/**
+ * @brief *libbpf_unregister_prog_handler()* unregisters previously registered
+ * custom BPF program SEC() handler.
+ * @param handler_id handler ID returned by *libbpf_register_prog_handler()*
+ * after successful registration
+ * @return 0 on success, negative error code if handler isn't found
+ *
+ * Note, like much of global libbpf APIs (e.g., libbpf_set_print(),
+ * libbpf_set_strict_mode(), etc)) these APIs are not thread-safe. User needs
+ * to ensure synchronization if there is a risk of running this API from
+ * multiple threads simultaneously.
+ */
+LIBBPF_API int libbpf_unregister_prog_handler(int handler_id);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index bb8831605b25..a5aa3a383d69 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -1,29 +1,14 @@
LIBBPF_0.0.1 {
global:
bpf_btf_get_fd_by_id;
- bpf_create_map;
- bpf_create_map_in_map;
- bpf_create_map_in_map_node;
- bpf_create_map_name;
- bpf_create_map_node;
- bpf_create_map_xattr;
- bpf_load_btf;
- bpf_load_program;
- bpf_load_program_xattr;
bpf_map__btf_key_type_id;
bpf_map__btf_value_type_id;
- bpf_map__def;
bpf_map__fd;
- bpf_map__is_offload_neutral;
bpf_map__name;
- bpf_map__next;
bpf_map__pin;
- bpf_map__prev;
- bpf_map__priv;
bpf_map__reuse_fd;
bpf_map__set_ifindex;
bpf_map__set_inner_map_fd;
- bpf_map__set_priv;
bpf_map__unpin;
bpf_map_delete_elem;
bpf_map_get_fd_by_id;
@@ -38,79 +23,37 @@ LIBBPF_0.0.1 {
bpf_object__btf_fd;
bpf_object__close;
bpf_object__find_map_by_name;
- bpf_object__find_map_by_offset;
- bpf_object__find_program_by_title;
bpf_object__kversion;
bpf_object__load;
bpf_object__name;
- bpf_object__next;
bpf_object__open;
- bpf_object__open_buffer;
- bpf_object__open_xattr;
bpf_object__pin;
bpf_object__pin_maps;
bpf_object__pin_programs;
- bpf_object__priv;
- bpf_object__set_priv;
- bpf_object__unload;
bpf_object__unpin_maps;
bpf_object__unpin_programs;
- bpf_perf_event_read_simple;
bpf_prog_attach;
bpf_prog_detach;
bpf_prog_detach2;
bpf_prog_get_fd_by_id;
bpf_prog_get_next_id;
- bpf_prog_load;
- bpf_prog_load_xattr;
bpf_prog_query;
- bpf_prog_test_run;
- bpf_prog_test_run_xattr;
bpf_program__fd;
- bpf_program__is_kprobe;
- bpf_program__is_perf_event;
- bpf_program__is_raw_tracepoint;
- bpf_program__is_sched_act;
- bpf_program__is_sched_cls;
- bpf_program__is_socket_filter;
- bpf_program__is_tracepoint;
- bpf_program__is_xdp;
- bpf_program__load;
- bpf_program__next;
- bpf_program__nth_fd;
bpf_program__pin;
- bpf_program__pin_instance;
- bpf_program__prev;
- bpf_program__priv;
bpf_program__set_expected_attach_type;
bpf_program__set_ifindex;
- bpf_program__set_kprobe;
- bpf_program__set_perf_event;
- bpf_program__set_prep;
- bpf_program__set_priv;
- bpf_program__set_raw_tracepoint;
- bpf_program__set_sched_act;
- bpf_program__set_sched_cls;
- bpf_program__set_socket_filter;
- bpf_program__set_tracepoint;
bpf_program__set_type;
- bpf_program__set_xdp;
- bpf_program__title;
bpf_program__unload;
bpf_program__unpin;
- bpf_program__unpin_instance;
bpf_prog_linfo__free;
bpf_prog_linfo__new;
bpf_prog_linfo__lfind_addr_func;
bpf_prog_linfo__lfind;
bpf_raw_tracepoint_open;
- bpf_set_link_xdp_fd;
bpf_task_fd_query;
- bpf_verify_program;
btf__fd;
btf__find_by_name;
btf__free;
- btf__get_from_id;
btf__name_by_offset;
btf__new;
btf__resolve_size;
@@ -127,48 +70,24 @@ LIBBPF_0.0.1 {
LIBBPF_0.0.2 {
global:
- bpf_probe_helper;
- bpf_probe_map_type;
- bpf_probe_prog_type;
- bpf_map__resize;
bpf_map_lookup_elem_flags;
bpf_object__btf;
bpf_object__find_map_fd_by_name;
- bpf_get_link_xdp_id;
- btf__dedup;
- btf__get_map_kv_tids;
- btf__get_nr_types;
btf__get_raw_data;
- btf__load;
btf_ext__free;
- btf_ext__func_info_rec_size;
btf_ext__get_raw_data;
- btf_ext__line_info_rec_size;
btf_ext__new;
- btf_ext__reloc_func_info;
- btf_ext__reloc_line_info;
- xsk_umem__create;
- xsk_socket__create;
- xsk_umem__delete;
- xsk_socket__delete;
- xsk_umem__fd;
- xsk_socket__fd;
- bpf_program__get_prog_info_linear;
- bpf_program__bpil_addr_to_offs;
- bpf_program__bpil_offs_to_addr;
} LIBBPF_0.0.1;
LIBBPF_0.0.3 {
global:
bpf_map__is_internal;
bpf_map_freeze;
- btf__finalize_data;
} LIBBPF_0.0.2;
LIBBPF_0.0.4 {
global:
bpf_link__destroy;
- bpf_object__load_xattr;
bpf_program__attach_kprobe;
bpf_program__attach_perf_event;
bpf_program__attach_raw_tracepoint;
@@ -176,14 +95,10 @@ LIBBPF_0.0.4 {
bpf_program__attach_uprobe;
btf_dump__dump_type;
btf_dump__free;
- btf_dump__new;
btf__parse_elf;
libbpf_num_possible_cpus;
perf_buffer__free;
- perf_buffer__new;
- perf_buffer__new_raw;
perf_buffer__poll;
- xsk_umem__create;
} LIBBPF_0.0.3;
LIBBPF_0.0.5 {
@@ -193,7 +108,6 @@ LIBBPF_0.0.5 {
LIBBPF_0.0.6 {
global:
- bpf_get_link_xdp_info;
bpf_map__get_pin_path;
bpf_map__is_pinned;
bpf_map__set_pin_path;
@@ -202,9 +116,6 @@ LIBBPF_0.0.6 {
bpf_program__attach_trace;
bpf_program__get_expected_attach_type;
bpf_program__get_type;
- bpf_program__is_tracing;
- bpf_program__set_tracing;
- bpf_program__size;
btf__find_by_name_kind;
libbpf_find_vmlinux_btf_id;
} LIBBPF_0.0.5;
@@ -224,14 +135,8 @@ LIBBPF_0.0.7 {
bpf_object__detach_skeleton;
bpf_object__load_skeleton;
bpf_object__open_skeleton;
- bpf_probe_large_insn_limit;
- bpf_prog_attach_xattr;
bpf_program__attach;
bpf_program__name;
- bpf_program__is_extension;
- bpf_program__is_struct_ops;
- bpf_program__set_extension;
- bpf_program__set_struct_ops;
btf__align_of;
libbpf_find_kernel_btf;
} LIBBPF_0.0.6;
@@ -247,10 +152,242 @@ LIBBPF_0.0.8 {
bpf_link_create;
bpf_link_update;
bpf_map__set_initial_value;
+ bpf_prog_attach_opts;
bpf_program__attach_cgroup;
bpf_program__attach_lsm;
- bpf_program__is_lsm;
bpf_program__set_attach_target;
- bpf_program__set_lsm;
- bpf_set_link_xdp_fd_opts;
} LIBBPF_0.0.7;
+
+LIBBPF_0.0.9 {
+ global:
+ bpf_enable_stats;
+ bpf_iter_create;
+ bpf_link_get_fd_by_id;
+ bpf_link_get_next_id;
+ bpf_program__attach_iter;
+ bpf_program__attach_netns;
+ perf_buffer__consume;
+ ring_buffer__add;
+ ring_buffer__consume;
+ ring_buffer__free;
+ ring_buffer__new;
+ ring_buffer__poll;
+} LIBBPF_0.0.8;
+
+LIBBPF_0.1.0 {
+ global:
+ bpf_link__detach;
+ bpf_link_detach;
+ bpf_map__ifindex;
+ bpf_map__key_size;
+ bpf_map__map_flags;
+ bpf_map__max_entries;
+ bpf_map__numa_node;
+ bpf_map__set_key_size;
+ bpf_map__set_map_flags;
+ bpf_map__set_max_entries;
+ bpf_map__set_numa_node;
+ bpf_map__set_type;
+ bpf_map__set_value_size;
+ bpf_map__type;
+ bpf_map__value_size;
+ bpf_program__attach_xdp;
+ bpf_program__autoload;
+ bpf_program__set_autoload;
+ btf__parse;
+ btf__parse_raw;
+ btf__pointer_size;
+ btf__set_fd;
+ btf__set_pointer_size;
+} LIBBPF_0.0.9;
+
+LIBBPF_0.2.0 {
+ global:
+ bpf_prog_bind_map;
+ bpf_prog_test_run_opts;
+ bpf_program__attach_freplace;
+ bpf_program__section_name;
+ btf__add_array;
+ btf__add_const;
+ btf__add_enum;
+ btf__add_enum_value;
+ btf__add_datasec;
+ btf__add_datasec_var_info;
+ btf__add_field;
+ btf__add_func;
+ btf__add_func_param;
+ btf__add_func_proto;
+ btf__add_fwd;
+ btf__add_int;
+ btf__add_ptr;
+ btf__add_restrict;
+ btf__add_str;
+ btf__add_struct;
+ btf__add_typedef;
+ btf__add_union;
+ btf__add_var;
+ btf__add_volatile;
+ btf__endianness;
+ btf__find_str;
+ btf__new_empty;
+ btf__set_endianness;
+ btf__str_by_offset;
+ perf_buffer__buffer_cnt;
+ perf_buffer__buffer_fd;
+ perf_buffer__epoll_fd;
+ perf_buffer__consume_buffer;
+} LIBBPF_0.1.0;
+
+LIBBPF_0.3.0 {
+ global:
+ btf__base_btf;
+ btf__parse_elf_split;
+ btf__parse_raw_split;
+ btf__parse_split;
+ btf__new_empty_split;
+ btf__new_split;
+ ring_buffer__epoll_fd;
+} LIBBPF_0.2.0;
+
+LIBBPF_0.4.0 {
+ global:
+ btf__add_float;
+ btf__add_type;
+ bpf_linker__add_file;
+ bpf_linker__finalize;
+ bpf_linker__free;
+ bpf_linker__new;
+ bpf_map__inner_map;
+ bpf_object__set_kversion;
+ bpf_tc_attach;
+ bpf_tc_detach;
+ bpf_tc_hook_create;
+ bpf_tc_hook_destroy;
+ bpf_tc_query;
+} LIBBPF_0.3.0;
+
+LIBBPF_0.5.0 {
+ global:
+ bpf_map__initial_value;
+ bpf_map__pin_path;
+ bpf_map_lookup_and_delete_elem_flags;
+ bpf_program__attach_kprobe_opts;
+ bpf_program__attach_perf_event_opts;
+ bpf_program__attach_tracepoint_opts;
+ bpf_program__attach_uprobe_opts;
+ bpf_object__gen_loader;
+ btf__load_from_kernel_by_id;
+ btf__load_from_kernel_by_id_split;
+ btf__load_into_kernel;
+ btf__load_module_btf;
+ btf__load_vmlinux_btf;
+ btf_dump__dump_type_data;
+ libbpf_set_strict_mode;
+} LIBBPF_0.4.0;
+
+LIBBPF_0.6.0 {
+ global:
+ bpf_map__map_extra;
+ bpf_map__set_map_extra;
+ bpf_map_create;
+ bpf_object__next_map;
+ bpf_object__next_program;
+ bpf_object__prev_map;
+ bpf_object__prev_program;
+ bpf_prog_load;
+ bpf_program__flags;
+ bpf_program__insn_cnt;
+ bpf_program__insns;
+ bpf_program__set_flags;
+ btf__add_btf;
+ btf__add_decl_tag;
+ btf__add_type_tag;
+ btf__dedup;
+ btf__raw_data;
+ btf__type_cnt;
+ btf_dump__new;
+ libbpf_major_version;
+ libbpf_minor_version;
+ libbpf_version_string;
+ perf_buffer__new;
+ perf_buffer__new_raw;
+} LIBBPF_0.5.0;
+
+LIBBPF_0.7.0 {
+ global:
+ bpf_btf_load;
+ bpf_program__expected_attach_type;
+ bpf_program__log_buf;
+ bpf_program__log_level;
+ bpf_program__set_log_buf;
+ bpf_program__set_log_level;
+ bpf_program__type;
+ bpf_xdp_attach;
+ bpf_xdp_detach;
+ bpf_xdp_query;
+ bpf_xdp_query_id;
+ btf_ext__raw_data;
+ libbpf_probe_bpf_helper;
+ libbpf_probe_bpf_map_type;
+ libbpf_probe_bpf_prog_type;
+ libbpf_set_memlock_rlim;
+} LIBBPF_0.6.0;
+
+LIBBPF_0.8.0 {
+ global:
+ bpf_map__autocreate;
+ bpf_map__get_next_key;
+ bpf_map__delete_elem;
+ bpf_map__lookup_and_delete_elem;
+ bpf_map__lookup_elem;
+ bpf_map__set_autocreate;
+ bpf_map__update_elem;
+ bpf_map_delete_elem_flags;
+ bpf_object__destroy_subskeleton;
+ bpf_object__open_subskeleton;
+ bpf_program__attach_kprobe_multi_opts;
+ bpf_program__attach_trace_opts;
+ bpf_program__attach_usdt;
+ bpf_program__set_insns;
+ libbpf_register_prog_handler;
+ libbpf_unregister_prog_handler;
+} LIBBPF_0.7.0;
+
+LIBBPF_1.0.0 {
+ global:
+ bpf_obj_get_opts;
+ bpf_prog_query_opts;
+ bpf_program__attach_ksyscall;
+ bpf_program__autoattach;
+ bpf_program__set_autoattach;
+ btf__add_enum64;
+ btf__add_enum64_value;
+ libbpf_bpf_attach_type_str;
+ libbpf_bpf_link_type_str;
+ libbpf_bpf_map_type_str;
+ libbpf_bpf_prog_type_str;
+ perf_buffer__buffer;
+} LIBBPF_0.8.0;
+
+LIBBPF_1.1.0 {
+ global:
+ bpf_btf_get_fd_by_id_opts;
+ bpf_link_get_fd_by_id_opts;
+ bpf_map_get_fd_by_id_opts;
+ bpf_prog_get_fd_by_id_opts;
+ user_ring_buffer__discard;
+ user_ring_buffer__free;
+ user_ring_buffer__new;
+ user_ring_buffer__reserve;
+ user_ring_buffer__reserve_blocking;
+ user_ring_buffer__submit;
+} LIBBPF_1.0.0;
+
+LIBBPF_1.2.0 {
+ global:
+ bpf_btf_get_info_by_fd;
+ bpf_link__update_map;
+ bpf_link_get_info_by_fd;
+ bpf_map_get_info_by_fd;
+ bpf_prog_get_info_by_fd;
+} LIBBPF_1.1.0;
diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h
index a23ae1ac27eb..9a7937f339df 100644
--- a/tools/lib/bpf/libbpf_common.h
+++ b/tools/lib/bpf/libbpf_common.h
@@ -10,11 +10,44 @@
#define __LIBBPF_LIBBPF_COMMON_H
#include <string.h>
+#include "libbpf_version.h"
#ifndef LIBBPF_API
#define LIBBPF_API __attribute__((visibility("default")))
#endif
+#define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg)))
+
+/* Mark a symbol as deprecated when libbpf version is >= {major}.{minor} */
+#define LIBBPF_DEPRECATED_SINCE(major, minor, msg) \
+ __LIBBPF_MARK_DEPRECATED_ ## major ## _ ## minor \
+ (LIBBPF_DEPRECATED("libbpf v" # major "." # minor "+: " msg))
+
+#define __LIBBPF_CURRENT_VERSION_GEQ(major, minor) \
+ (LIBBPF_MAJOR_VERSION > (major) || \
+ (LIBBPF_MAJOR_VERSION == (major) && LIBBPF_MINOR_VERSION >= (minor)))
+
+/* Add checks for other versions below when planning deprecation of API symbols
+ * with the LIBBPF_DEPRECATED_SINCE macro.
+ */
+#if __LIBBPF_CURRENT_VERSION_GEQ(1, 0)
+#define __LIBBPF_MARK_DEPRECATED_1_0(X) X
+#else
+#define __LIBBPF_MARK_DEPRECATED_1_0(X)
+#endif
+
+/* This set of internal macros allows to do "function overloading" based on
+ * number of arguments provided by used in backwards-compatible way during the
+ * transition to libbpf 1.0
+ * It's ugly but necessary evil that will be cleaned up when we get to 1.0.
+ * See bpf_prog_load() overload for example.
+ */
+#define ___libbpf_cat(A, B) A ## B
+#define ___libbpf_select(NAME, NUM) ___libbpf_cat(NAME, NUM)
+#define ___libbpf_nth(_1, _2, _3, _4, _5, _6, N, ...) N
+#define ___libbpf_cnt(...) ___libbpf_nth(__VA_ARGS__, 6, 5, 4, 3, 2, 1)
+#define ___libbpf_overload(NAME, ...) ___libbpf_select(NAME, ___libbpf_cnt(__VA_ARGS__))(__VA_ARGS__)
+
/* Helper macro to declare and initialize libbpf options struct
*
* This dance with uninitialized declaration, followed by memset to zero,
@@ -28,7 +61,7 @@
* including any extra padding, it with memset() and then assigns initial
* values provided by users in struct initializer-syntax as varargs.
*/
-#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \
+#define LIBBPF_OPTS(TYPE, NAME, ...) \
struct TYPE NAME = ({ \
memset(&NAME, 0, sizeof(struct TYPE)); \
(struct TYPE) { \
diff --git a/tools/lib/bpf/libbpf_errno.c b/tools/lib/bpf/libbpf_errno.c
index 0afb51f7a919..6b180172ec6b 100644
--- a/tools/lib/bpf/libbpf_errno.c
+++ b/tools/lib/bpf/libbpf_errno.c
@@ -12,6 +12,7 @@
#include <string.h>
#include "libbpf.h"
+#include "libbpf_internal.h"
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
@@ -38,29 +39,37 @@ static const char *libbpf_strerror_table[NR_ERRNO] = {
int libbpf_strerror(int err, char *buf, size_t size)
{
+ int ret;
+
if (!buf || !size)
- return -1;
+ return libbpf_err(-EINVAL);
err = err > 0 ? err : -err;
if (err < __LIBBPF_ERRNO__START) {
- int ret;
-
ret = strerror_r(err, buf, size);
buf[size - 1] = '\0';
- return ret;
+ return libbpf_err_errno(ret);
}
if (err < __LIBBPF_ERRNO__END) {
const char *msg;
msg = libbpf_strerror_table[ERRNO_OFFSET(err)];
- snprintf(buf, size, "%s", msg);
+ ret = snprintf(buf, size, "%s", msg);
buf[size - 1] = '\0';
+ /* The length of the buf and msg is positive.
+ * A negative number may be returned only when the
+ * size exceeds INT_MAX. Not likely to appear.
+ */
+ if (ret >= size)
+ return libbpf_err(-ERANGE);
return 0;
}
- snprintf(buf, size, "Unknown libbpf error %d", err);
+ ret = snprintf(buf, size, "Unknown libbpf error %d", err);
buf[size - 1] = '\0';
- return -1;
+ if (ret >= size)
+ return libbpf_err(-ERANGE);
+ return libbpf_err(-ENOENT);
}
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 8c3afbd97747..e4d05662a96c 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -9,7 +9,53 @@
#ifndef __LIBBPF_LIBBPF_INTERNAL_H
#define __LIBBPF_LIBBPF_INTERNAL_H
+#include <stdlib.h>
+#include <limits.h>
+#include <errno.h>
+#include <linux/err.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include "relo_core.h"
+
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
+/* prevent accidental re-addition of reallocarray() */
+#pragma GCC poison reallocarray
+
#include "libbpf.h"
+#include "btf.h"
+
+#ifndef EM_BPF
+#define EM_BPF 247
+#endif
+
+#ifndef R_BPF_64_64
+#define R_BPF_64_64 1
+#endif
+#ifndef R_BPF_64_ABS64
+#define R_BPF_64_ABS64 2
+#endif
+#ifndef R_BPF_64_ABS32
+#define R_BPF_64_ABS32 3
+#endif
+#ifndef R_BPF_64_32
+#define R_BPF_64_32 10
+#endif
+
+#ifndef SHT_LLVM_ADDRSIG
+#define SHT_LLVM_ADDRSIG 0x6FFF4C03
+#endif
+
+/* if libelf is old and doesn't support mmap(), fall back to read() */
+#ifndef ELF_C_READ_MMAP
+#define ELF_C_READ_MMAP ELF_C_READ
+#endif
+
+/* Older libelf all end up in this expression, for both 32 and 64 bit */
+#ifndef ELF64_ST_VISIBILITY
+#define ELF64_ST_VISIBILITY(o) ((o) & 0x03)
+#endif
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
@@ -22,7 +68,19 @@
#define BTF_MEMBER_ENC(name, type, bits_offset) (name), (type), (bits_offset)
#define BTF_PARAM_ENC(name, type) (name), (type)
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
+#define BTF_TYPE_FLOAT_ENC(name, sz) \
+ BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
+#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \
+ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx)
+#define BTF_TYPE_TYPE_TAG_ENC(value, type) \
+ BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type)
+#ifndef likely
+#define likely(x) __builtin_expect(!!(x), 1)
+#endif
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
#ifndef min
# define min(x, y) ((x) < (y) ? (x) : (y))
#endif
@@ -33,21 +91,55 @@
# define offsetofend(TYPE, FIELD) \
(offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
#endif
+#ifndef __alias
+#define __alias(symbol) __attribute__((alias(#symbol)))
+#endif
+
+/* Check whether a string `str` has prefix `pfx`, regardless if `pfx` is
+ * a string literal known at compilation time or char * pointer known only at
+ * runtime.
+ */
+#define str_has_pfx(str, pfx) \
+ (strncmp(str, pfx, __builtin_constant_p(pfx) ? sizeof(pfx) - 1 : strlen(pfx)) == 0)
+
+/* suffix check */
+static inline bool str_has_sfx(const char *str, const char *sfx)
+{
+ size_t str_len = strlen(str);
+ size_t sfx_len = strlen(sfx);
+
+ if (sfx_len > str_len)
+ return false;
+ return strcmp(str + str_len - sfx_len, sfx) == 0;
+}
/* Symbol versioning is different between static and shared library.
* Properly versioned symbols are needed for shared library, but
* only the symbol of the new version is needed for static library.
+ * Starting with GNU C 10, use symver attribute instead of .symver assembler
+ * directive, which works better with GCC LTO builds.
*/
-#ifdef SHARED
-# define COMPAT_VERSION(internal_name, api_name, version) \
+#if defined(SHARED) && defined(__GNUC__) && __GNUC__ >= 10
+
+#define DEFAULT_VERSION(internal_name, api_name, version) \
+ __attribute__((symver(#api_name "@@" #version)))
+#define COMPAT_VERSION(internal_name, api_name, version) \
+ __attribute__((symver(#api_name "@" #version)))
+
+#elif defined(SHARED)
+
+#define COMPAT_VERSION(internal_name, api_name, version) \
asm(".symver " #internal_name "," #api_name "@" #version);
-# define DEFAULT_VERSION(internal_name, api_name, version) \
+#define DEFAULT_VERSION(internal_name, api_name, version) \
asm(".symver " #internal_name "," #api_name "@@" #version);
-#else
-# define COMPAT_VERSION(internal_name, api_name, version)
-# define DEFAULT_VERSION(internal_name, api_name, version) \
+
+#else /* !SHARED */
+
+#define COMPAT_VERSION(internal_name, api_name, version)
+#define DEFAULT_VERSION(internal_name, api_name, version) \
extern typeof(internal_name) api_name \
__attribute__((alias(#internal_name)));
+
#endif
extern void libbpf_print(enum libbpf_print_level level,
@@ -63,6 +155,129 @@ do { \
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+struct bpf_link {
+ int (*detach)(struct bpf_link *link);
+ void (*dealloc)(struct bpf_link *link);
+ char *pin_path; /* NULL, if not pinned */
+ int fd; /* hook FD, -1 if not applicable */
+ bool disconnected;
+};
+
+/*
+ * Re-implement glibc's reallocarray() for libbpf internal-only use.
+ * reallocarray(), unfortunately, is not available in all versions of glibc,
+ * so requires extra feature detection and using reallocarray() stub from
+ * <tools/libc_compat.h> and COMPAT_NEED_REALLOCARRAY. All this complicates
+ * build of libbpf unnecessarily and is just a maintenance burden. Instead,
+ * it's trivial to implement libbpf-specific internal version and use it
+ * throughout libbpf.
+ */
+static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
+{
+ size_t total;
+
+#if __has_builtin(__builtin_mul_overflow)
+ if (unlikely(__builtin_mul_overflow(nmemb, size, &total)))
+ return NULL;
+#else
+ if (size == 0 || nmemb > ULONG_MAX / size)
+ return NULL;
+ total = nmemb * size;
+#endif
+ return realloc(ptr, total);
+}
+
+/* Copy up to sz - 1 bytes from zero-terminated src string and ensure that dst
+ * is zero-terminated string no matter what (unless sz == 0, in which case
+ * it's a no-op). It's conceptually close to FreeBSD's strlcpy(), but differs
+ * in what is returned. Given this is internal helper, it's trivial to extend
+ * this, when necessary. Use this instead of strncpy inside libbpf source code.
+ */
+static inline void libbpf_strlcpy(char *dst, const char *src, size_t sz)
+{
+ size_t i;
+
+ if (sz == 0)
+ return;
+
+ sz--;
+ for (i = 0; i < sz && src[i]; i++)
+ dst[i] = src[i];
+ dst[i] = '\0';
+}
+
+__u32 get_kernel_version(void);
+
+struct btf;
+struct btf_type;
+
+struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id);
+const char *btf_kind_str(const struct btf_type *t);
+const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
+
+static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t)
+{
+ return (enum btf_func_linkage)(int)btf_vlen(t);
+}
+
+static inline __u32 btf_type_info(int kind, int vlen, int kflag)
+{
+ return (kflag << 31) | (kind << 24) | vlen;
+}
+
+enum map_def_parts {
+ MAP_DEF_MAP_TYPE = 0x001,
+ MAP_DEF_KEY_TYPE = 0x002,
+ MAP_DEF_KEY_SIZE = 0x004,
+ MAP_DEF_VALUE_TYPE = 0x008,
+ MAP_DEF_VALUE_SIZE = 0x010,
+ MAP_DEF_MAX_ENTRIES = 0x020,
+ MAP_DEF_MAP_FLAGS = 0x040,
+ MAP_DEF_NUMA_NODE = 0x080,
+ MAP_DEF_PINNING = 0x100,
+ MAP_DEF_INNER_MAP = 0x200,
+ MAP_DEF_MAP_EXTRA = 0x400,
+
+ MAP_DEF_ALL = 0x7ff, /* combination of all above */
+};
+
+struct btf_map_def {
+ enum map_def_parts parts;
+ __u32 map_type;
+ __u32 key_type_id;
+ __u32 key_size;
+ __u32 value_type_id;
+ __u32 value_size;
+ __u32 max_entries;
+ __u32 map_flags;
+ __u32 numa_node;
+ __u32 pinning;
+ __u64 map_extra;
+};
+
+int parse_btf_map_def(const char *map_name, struct btf *btf,
+ const struct btf_type *def_t, bool strict,
+ struct btf_map_def *map_def, struct btf_map_def *inner_def);
+
+void *libbpf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
+ size_t cur_cnt, size_t max_cnt, size_t add_cnt);
+int libbpf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
+
+static inline bool libbpf_is_mem_zeroed(const char *p, ssize_t len)
+{
+ while (len > 0) {
+ if (*p)
+ return false;
+ p++;
+ len--;
+ }
+ return true;
+}
+
static inline bool libbpf_validate_opts(const char *opts,
size_t opts_sz, size_t user_sz,
const char *type_name)
@@ -71,16 +286,9 @@ static inline bool libbpf_validate_opts(const char *opts,
pr_warn("%s size (%zu) is too small\n", type_name, user_sz);
return false;
}
- if (user_sz > opts_sz) {
- size_t i;
-
- for (i = opts_sz; i < user_sz; i++) {
- if (opts[i]) {
- pr_warn("%s has non-zero extra bytes\n",
- type_name);
- return false;
- }
- }
+ if (!libbpf_is_mem_zeroed(opts + opts_sz, (ssize_t)user_sz - opts_sz)) {
+ pr_warn("%s has non-zero extra bytes\n", type_name);
+ return false;
}
return true;
}
@@ -94,28 +302,74 @@ static inline bool libbpf_validate_opts(const char *opts,
((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
#define OPTS_GET(opts, field, fallback_value) \
(OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
+#define OPTS_SET(opts, field, value) \
+ do { \
+ if (OPTS_HAS(opts, field)) \
+ (opts)->field = value; \
+ } while (0)
+
+#define OPTS_ZEROED(opts, last_nonzero_field) \
+({ \
+ ssize_t __off = offsetofend(typeof(*(opts)), last_nonzero_field); \
+ !(opts) || libbpf_is_mem_zeroed((const void *)opts + __off, \
+ (opts)->sz - __off); \
+})
+
+enum kern_feature_id {
+ /* v4.14: kernel support for program & map names. */
+ FEAT_PROG_NAME,
+ /* v5.2: kernel support for global data sections. */
+ FEAT_GLOBAL_DATA,
+ /* BTF support */
+ FEAT_BTF,
+ /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
+ FEAT_BTF_FUNC,
+ /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
+ FEAT_BTF_DATASEC,
+ /* BTF_FUNC_GLOBAL is supported */
+ FEAT_BTF_GLOBAL_FUNC,
+ /* BPF_F_MMAPABLE is supported for arrays */
+ FEAT_ARRAY_MMAP,
+ /* kernel support for expected_attach_type in BPF_PROG_LOAD */
+ FEAT_EXP_ATTACH_TYPE,
+ /* bpf_probe_read_{kernel,user}[_str] helpers */
+ FEAT_PROBE_READ_KERN,
+ /* BPF_PROG_BIND_MAP is supported */
+ FEAT_PROG_BIND_MAP,
+ /* Kernel support for module BTFs */
+ FEAT_MODULE_BTF,
+ /* BTF_KIND_FLOAT support */
+ FEAT_BTF_FLOAT,
+ /* BPF perf link support */
+ FEAT_PERF_LINK,
+ /* BTF_KIND_DECL_TAG support */
+ FEAT_BTF_DECL_TAG,
+ /* BTF_KIND_TYPE_TAG support */
+ FEAT_BTF_TYPE_TAG,
+ /* memcg-based accounting for BPF maps and progs */
+ FEAT_MEMCG_ACCOUNT,
+ /* BPF cookie (bpf_get_attach_cookie() BPF helper) support */
+ FEAT_BPF_COOKIE,
+ /* BTF_KIND_ENUM64 support and BTF_KIND_ENUM kflag support */
+ FEAT_BTF_ENUM64,
+ /* Kernel uses syscall wrapper (CONFIG_ARCH_HAS_SYSCALL_WRAPPER) */
+ FEAT_SYSCALL_WRAPPER,
+ __FEAT_CNT,
+};
+
+int probe_memcg_account(void);
+bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
+int bump_rlimit_memlock(void);
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
+int btf_load_into_kernel(struct btf *btf, char *log_buf, size_t log_sz, __u32 log_level);
-int bpf_object__section_size(const struct bpf_object *obj, const char *name,
- __u32 *size);
-int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
- __u32 *off);
-
-struct nlattr;
-typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
-int libbpf_netlink_open(unsigned int *nl_pid);
-int libbpf_nl_get_link(int sock, unsigned int nl_pid,
- libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie);
-int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex,
- libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie);
-int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
- libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie);
-int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
- libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie);
+struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
+void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
+ const char **prefix, int *kind);
struct btf_ext_info {
/*
@@ -125,6 +379,13 @@ struct btf_ext_info {
void *info;
__u32 rec_size;
__u32 len;
+ /* optional (maintained internally by libbpf) mapping between .BTF.ext
+ * section and corresponding ELF section. This is used to join
+ * information like CO-RE relocation records with corresponding BPF
+ * programs defined in ELF sections
+ */
+ __u32 *sec_idxs;
+ int sec_cnt;
};
#define for_each_btf_ext_sec(seg, sec) \
@@ -138,6 +399,44 @@ struct btf_ext_info {
i < (sec)->num_info; \
i++, rec = (void *)rec + (seg)->rec_size)
+/*
+ * The .BTF.ext ELF section layout defined as
+ * struct btf_ext_header
+ * func_info subsection
+ *
+ * The func_info subsection layout:
+ * record size for struct bpf_func_info in the func_info subsection
+ * struct btf_sec_func_info for section #1
+ * a list of bpf_func_info records for section #1
+ * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
+ * but may not be identical
+ * struct btf_sec_func_info for section #2
+ * a list of bpf_func_info records for section #2
+ * ......
+ *
+ * Note that the bpf_func_info record size in .BTF.ext may not
+ * be the same as the one defined in include/uapi/linux/bpf.h.
+ * The loader should ensure that record_size meets minimum
+ * requirement and pass the record as is to the kernel. The
+ * kernel will handle the func_info properly based on its contents.
+ */
+struct btf_ext_header {
+ __u16 magic;
+ __u8 version;
+ __u8 flags;
+ __u32 hdr_len;
+
+ /* All offsets are in bytes relative to the end of this header */
+ __u32 func_info_off;
+ __u32 func_info_len;
+ __u32 line_info_off;
+ __u32 line_info_len;
+
+ /* optional part of .BTF.ext header */
+ __u32 core_relo_off;
+ __u32 core_relo_len;
+};
+
struct btf_ext {
union {
struct btf_ext_header *hdr;
@@ -145,7 +444,7 @@ struct btf_ext {
};
struct btf_ext_info func_info;
struct btf_ext_info line_info;
- struct btf_ext_info field_reloc_info;
+ struct btf_ext_info core_relo_info;
__u32 data_size;
};
@@ -153,7 +452,7 @@ struct btf_ext_info_sec {
__u32 sec_name_off;
__u32 num_info;
/* Followed by num_info * record_size number of bytes */
- __u8 data[0];
+ __u8 data[];
};
/* The minimum bpf_func_info checked by the loader */
@@ -170,67 +469,112 @@ struct bpf_line_info_min {
__u32 line_col;
};
-/* bpf_field_info_kind encodes which aspect of captured field has to be
- * adjusted by relocations. Currently supported values are:
- * - BPF_FIELD_BYTE_OFFSET: field offset (in bytes);
- * - BPF_FIELD_EXISTS: field existence (1, if field exists; 0, otherwise);
+
+typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx);
+typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx);
+int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx);
+int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
+int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
+int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
+__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
+ __u32 kind);
+
+typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
+ const char *sym_name, void *ctx);
+
+int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *arg);
+
+/* handle direct returned errors */
+static inline int libbpf_err(int ret)
+{
+ if (ret < 0)
+ errno = -ret;
+ return ret;
+}
+
+/* handle errno-based (e.g., syscall or libc) errors according to libbpf's
+ * strict mode settings
*/
-enum bpf_field_info_kind {
- BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
- BPF_FIELD_BYTE_SIZE = 1,
- BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
- BPF_FIELD_SIGNED = 3,
- BPF_FIELD_LSHIFT_U64 = 4,
- BPF_FIELD_RSHIFT_U64 = 5,
-};
+static inline int libbpf_err_errno(int ret)
+{
+ /* errno is already assumed to be set on error */
+ return ret < 0 ? -errno : ret;
+}
-/* The minimum bpf_field_reloc checked by the loader
- *
- * Field relocation captures the following data:
- * - insn_off - instruction offset (in bytes) within a BPF program that needs
- * its insn->imm field to be relocated with actual field info;
- * - type_id - BTF type ID of the "root" (containing) entity of a relocatable
- * field;
- * - access_str_off - offset into corresponding .BTF string section. String
- * itself encodes an accessed field using a sequence of field and array
- * indicies, separated by colon (:). It's conceptually very close to LLVM's
- * getelementptr ([0]) instruction's arguments for identifying offset to
- * a field.
- *
- * Example to provide a better feel.
- *
- * struct sample {
- * int a;
- * struct {
- * int b[10];
- * };
- * };
- *
- * struct sample *s = ...;
- * int x = &s->a; // encoded as "0:0" (a is field #0)
- * int y = &s->b[5]; // encoded as "0:1:0:5" (anon struct is field #1,
- * // b is field #0 inside anon struct, accessing elem #5)
- * int z = &s[10]->b; // encoded as "10:1" (ptr is used as an array)
- *
- * type_id for all relocs in this example will capture BTF type id of
- * `struct sample`.
- *
- * Such relocation is emitted when using __builtin_preserve_access_index()
- * Clang built-in, passing expression that captures field address, e.g.:
- *
- * bpf_probe_read(&dst, sizeof(dst),
- * __builtin_preserve_access_index(&src->a.b.c));
- *
- * In this case Clang will emit field relocation recording necessary data to
- * be able to find offset of embedded `a.b.c` field within `src` struct.
- *
- * [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
+/* handle error for pointer-returning APIs, err is assumed to be < 0 always */
+static inline void *libbpf_err_ptr(int err)
+{
+ /* set errno on error, this doesn't break anything */
+ errno = -err;
+ return NULL;
+}
+
+/* handle pointer-returning APIs' error handling */
+static inline void *libbpf_ptr(void *ret)
+{
+ /* set errno on error, this doesn't break anything */
+ if (IS_ERR(ret))
+ errno = -PTR_ERR(ret);
+
+ return IS_ERR(ret) ? NULL : ret;
+}
+
+static inline bool str_is_empty(const char *s)
+{
+ return !s || !s[0];
+}
+
+static inline bool is_ldimm64_insn(struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+}
+
+/* if fd is stdin, stdout, or stderr, dup to a fd greater than 2
+ * Takes ownership of the fd passed in, and closes it if calling
+ * fcntl(fd, F_DUPFD_CLOEXEC, 3).
*/
-struct bpf_field_reloc {
- __u32 insn_off;
- __u32 type_id;
- __u32 access_str_off;
- enum bpf_field_info_kind kind;
-};
+static inline int ensure_good_fd(int fd)
+{
+ int old_fd = fd, saved_errno;
+
+ if (fd < 0)
+ return fd;
+ if (fd < 3) {
+ fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
+ saved_errno = errno;
+ close(old_fd);
+ errno = saved_errno;
+ if (fd < 0) {
+ pr_warn("failed to dup FD %d to FD > 2: %d\n", old_fd, -saved_errno);
+ errno = saved_errno;
+ }
+ }
+ return fd;
+}
+
+/* The following two functions are exposed to bpftool */
+int bpf_core_add_cands(struct bpf_core_cand *local_cand,
+ size_t local_essent_len,
+ const struct btf *targ_btf,
+ const char *targ_btf_name,
+ int targ_start_id,
+ struct bpf_core_cand_list *cands);
+void bpf_core_free_cands(struct bpf_core_cand_list *cands);
+
+struct usdt_manager *usdt_manager_new(struct bpf_object *obj);
+void usdt_manager_free(struct usdt_manager *man);
+struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man,
+ const struct bpf_program *prog,
+ pid_t pid, const char *path,
+ const char *usdt_provider, const char *usdt_name,
+ __u64 usdt_cookie);
+
+static inline bool is_pow_of_2(size_t x)
+{
+ return x && (x & (x - 1)) == 0;
+}
+
+#define PROG_LOAD_ATTEMPTS 5
+int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts);
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_legacy.h b/tools/lib/bpf/libbpf_legacy.h
new file mode 100644
index 000000000000..1e1be467bede
--- /dev/null
+++ b/tools/lib/bpf/libbpf_legacy.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/*
+ * Libbpf legacy APIs (either discouraged or deprecated, as mentioned in [0])
+ *
+ * [0] https://docs.google.com/document/d/1UyjTZuPFWiPFyKk1tV5an11_iaRuec6U-ZESZ54nNTY
+ *
+ * Copyright (C) 2021 Facebook
+ */
+#ifndef __LIBBPF_LEGACY_BPF_H
+#define __LIBBPF_LEGACY_BPF_H
+
+#include <linux/bpf.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include "libbpf_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* As of libbpf 1.0 libbpf_set_strict_mode() and enum libbpf_struct_mode have
+ * no effect. But they are left in libbpf_legacy.h so that applications that
+ * prepared for libbpf 1.0 before final release by using
+ * libbpf_set_strict_mode() still work with libbpf 1.0+ without any changes.
+ */
+enum libbpf_strict_mode {
+ /* Turn on all supported strict features of libbpf to simulate libbpf
+ * v1.0 behavior.
+ * This will be the default behavior in libbpf v1.0.
+ */
+ LIBBPF_STRICT_ALL = 0xffffffff,
+
+ /*
+ * Disable any libbpf 1.0 behaviors. This is the default before libbpf
+ * v1.0. It won't be supported anymore in v1.0, please update your
+ * code so that it handles LIBBPF_STRICT_ALL mode before libbpf v1.0.
+ */
+ LIBBPF_STRICT_NONE = 0x00,
+ /*
+ * Return NULL pointers on error, not ERR_PTR(err).
+ * Additionally, libbpf also always sets errno to corresponding Exx
+ * (positive) error code.
+ */
+ LIBBPF_STRICT_CLEAN_PTRS = 0x01,
+ /*
+ * Return actual error codes from low-level APIs directly, not just -1.
+ * Additionally, libbpf also always sets errno to corresponding Exx
+ * (positive) error code.
+ */
+ LIBBPF_STRICT_DIRECT_ERRS = 0x02,
+ /*
+ * Enforce strict BPF program section (SEC()) names.
+ * E.g., while prefiously SEC("xdp_whatever") or SEC("perf_event_blah") were
+ * allowed, with LIBBPF_STRICT_SEC_PREFIX this will become
+ * unrecognized by libbpf and would have to be just SEC("xdp") and
+ * SEC("xdp") and SEC("perf_event").
+ *
+ * Note, in this mode the program pin path will be based on the
+ * function name instead of section name.
+ *
+ * Additionally, routines in the .text section are always considered
+ * sub-programs. Legacy behavior allows for a single routine in .text
+ * to be a program.
+ */
+ LIBBPF_STRICT_SEC_NAME = 0x04,
+ /*
+ * Disable the global 'bpf_objects_list'. Maintaining this list adds
+ * a race condition to bpf_object__open() and bpf_object__close().
+ * Clients can maintain it on their own if it is valuable for them.
+ */
+ LIBBPF_STRICT_NO_OBJECT_LIST = 0x08,
+ /*
+ * Automatically bump RLIMIT_MEMLOCK using setrlimit() before the
+ * first BPF program or map creation operation. This is done only if
+ * kernel is too old to support memcg-based memory accounting for BPF
+ * subsystem. By default, RLIMIT_MEMLOCK limit is set to RLIM_INFINITY,
+ * but it can be overriden with libbpf_set_memlock_rlim() API.
+ * Note that libbpf_set_memlock_rlim() needs to be called before
+ * the very first bpf_prog_load(), bpf_map_create() or bpf_object__load()
+ * operation.
+ */
+ LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK = 0x10,
+ /*
+ * Error out on any SEC("maps") map definition, which are deprecated
+ * in favor of BTF-defined map definitions in SEC(".maps").
+ */
+ LIBBPF_STRICT_MAP_DEFINITIONS = 0x20,
+
+ __LIBBPF_STRICT_LAST,
+};
+
+LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
+
+/**
+ * @brief **libbpf_get_error()** extracts the error code from the passed
+ * pointer
+ * @param ptr pointer returned from libbpf API function
+ * @return error code; or 0 if no error occured
+ *
+ * Note, as of libbpf 1.0 this function is not necessary and not recommended
+ * to be used. Libbpf doesn't return error code embedded into the pointer
+ * itself. Instead, NULL is returned on error and error code is passed through
+ * thread-local errno variable. **libbpf_get_error()** is just returning -errno
+ * value if it receives NULL, which is correct only if errno hasn't been
+ * modified between libbpf API call and corresponding **libbpf_get_error()**
+ * call. Prefer to check return for NULL and use errno directly.
+ *
+ * This API is left in libbpf 1.0 to allow applications that were 1.0-ready
+ * before final libbpf 1.0 without needing to change them.
+ */
+LIBBPF_API long libbpf_get_error(const void *ptr);
+
+#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS
+
+/* "Discouraged" APIs which don't follow consistent libbpf naming patterns.
+ * They are normally a trivial aliases or wrappers for proper APIs and are
+ * left to minimize unnecessary disruption for users of libbpf. But they
+ * shouldn't be used going forward.
+ */
+
+struct bpf_program;
+struct bpf_map;
+struct btf;
+struct btf_ext;
+
+LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
+
+LIBBPF_API enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
+LIBBPF_API enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
+LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map);
+LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
+LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext, __u32 *size);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* __LIBBPF_LEGACY_BPF_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 2c92059c0c90..6065f408a59c 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -12,74 +12,151 @@
#include <linux/btf.h>
#include <linux/filter.h>
#include <linux/kernel.h>
+#include <linux/version.h>
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
-static bool grep(const char *buffer, const char *pattern)
+/* On Ubuntu LINUX_VERSION_CODE doesn't correspond to info.release,
+ * but Ubuntu provides /proc/version_signature file, as described at
+ * https://ubuntu.com/kernel, with an example contents below, which we
+ * can use to get a proper LINUX_VERSION_CODE.
+ *
+ * Ubuntu 5.4.0-12.15-generic 5.4.8
+ *
+ * In the above, 5.4.8 is what kernel is actually expecting, while
+ * uname() call will return 5.4.0 in info.release.
+ */
+static __u32 get_ubuntu_kernel_version(void)
{
- return !!strstr(buffer, pattern);
-}
+ const char *ubuntu_kver_file = "/proc/version_signature";
+ __u32 major, minor, patch;
+ int ret;
+ FILE *f;
-static int get_vendor_id(int ifindex)
-{
- char ifname[IF_NAMESIZE], path[64], buf[8];
- ssize_t len;
- int fd;
+ if (faccessat(AT_FDCWD, ubuntu_kver_file, R_OK, AT_EACCESS) != 0)
+ return 0;
- if (!if_indextoname(ifindex, ifname))
- return -1;
+ f = fopen(ubuntu_kver_file, "r");
+ if (!f)
+ return 0;
- snprintf(path, sizeof(path), "/sys/class/net/%s/device/vendor", ifname);
+ ret = fscanf(f, "%*s %*s %u.%u.%u\n", &major, &minor, &patch);
+ fclose(f);
+ if (ret != 3)
+ return 0;
- fd = open(path, O_RDONLY);
- if (fd < 0)
- return -1;
+ return KERNEL_VERSION(major, minor, patch);
+}
- len = read(fd, buf, sizeof(buf));
- close(fd);
- if (len < 0)
- return -1;
- if (len >= (ssize_t)sizeof(buf))
- return -1;
- buf[len] = '\0';
+/* On Debian LINUX_VERSION_CODE doesn't correspond to info.release.
+ * Instead, it is provided in info.version. An example content of
+ * Debian 10 looks like the below.
+ *
+ * utsname::release 4.19.0-22-amd64
+ * utsname::version #1 SMP Debian 4.19.260-1 (2022-09-29)
+ *
+ * In the above, 4.19.260 is what kernel is actually expecting, while
+ * uname() call will return 4.19.0 in info.release.
+ */
+static __u32 get_debian_kernel_version(struct utsname *info)
+{
+ __u32 major, minor, patch;
+ char *p;
+
+ p = strstr(info->version, "Debian ");
+ if (!p) {
+ /* This is not a Debian kernel. */
+ return 0;
+ }
+
+ if (sscanf(p, "Debian %u.%u.%u", &major, &minor, &patch) != 3)
+ return 0;
- return strtol(buf, NULL, 0);
+ return KERNEL_VERSION(major, minor, patch);
}
-static int get_kernel_version(void)
+__u32 get_kernel_version(void)
{
- int version, subversion, patchlevel;
- struct utsname utsn;
+ __u32 major, minor, patch, version;
+ struct utsname info;
- /* Return 0 on failure, and attempt to probe with empty kversion */
- if (uname(&utsn))
- return 0;
+ /* Check if this is an Ubuntu kernel. */
+ version = get_ubuntu_kernel_version();
+ if (version != 0)
+ return version;
+
+ uname(&info);
- if (sscanf(utsn.release, "%d.%d.%d",
- &version, &subversion, &patchlevel) != 3)
+ /* Check if this is a Debian kernel. */
+ version = get_debian_kernel_version(&info);
+ if (version != 0)
+ return version;
+
+ if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
return 0;
- return (version << 16) + (subversion << 8) + patchlevel;
+ return KERNEL_VERSION(major, minor, patch);
}
-static void
-probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
- size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
+static int probe_prog_load(enum bpf_prog_type prog_type,
+ const struct bpf_insn *insns, size_t insns_cnt,
+ char *log_buf, size_t log_buf_sz)
{
- struct bpf_load_program_attr xattr = {};
- int fd;
+ LIBBPF_OPTS(bpf_prog_load_opts, opts,
+ .log_buf = log_buf,
+ .log_size = log_buf_sz,
+ .log_level = log_buf ? 1 : 0,
+ );
+ int fd, err, exp_err = 0;
+ const char *exp_msg = NULL;
+ char buf[4096];
switch (prog_type) {
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
- xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
+ opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
+ break;
+ case BPF_PROG_TYPE_CGROUP_SOCKOPT:
+ opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
+ break;
+ case BPF_PROG_TYPE_SK_LOOKUP:
+ opts.expected_attach_type = BPF_SK_LOOKUP;
break;
case BPF_PROG_TYPE_KPROBE:
- xattr.kern_version = get_kernel_version();
+ opts.kern_version = get_kernel_version();
+ break;
+ case BPF_PROG_TYPE_LIRC_MODE2:
+ opts.expected_attach_type = BPF_LIRC_MODE2;
+ break;
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_LSM:
+ opts.log_buf = buf;
+ opts.log_size = sizeof(buf);
+ opts.log_level = 1;
+ if (prog_type == BPF_PROG_TYPE_TRACING)
+ opts.expected_attach_type = BPF_TRACE_FENTRY;
+ else
+ opts.expected_attach_type = BPF_MODIFY_RETURN;
+ opts.attach_btf_id = 1;
+
+ exp_err = -EINVAL;
+ exp_msg = "attach_btf_id 1 is not a function";
+ break;
+ case BPF_PROG_TYPE_EXT:
+ opts.log_buf = buf;
+ opts.log_size = sizeof(buf);
+ opts.log_level = 1;
+ opts.attach_btf_id = 1;
+
+ exp_err = -EINVAL;
+ exp_msg = "Cannot replace kernel functions";
+ break;
+ case BPF_PROG_TYPE_SYSCALL:
+ opts.prog_flags = BPF_F_SLEEPABLE;
+ break;
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ exp_err = -524; /* -ENOTSUPP */
break;
case BPF_PROG_TYPE_UNSPEC:
case BPF_PROG_TYPE_SOCKET_FILTER:
@@ -100,45 +177,43 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
case BPF_PROG_TYPE_LWT_SEG6LOCAL:
- case BPF_PROG_TYPE_LIRC_MODE2:
case BPF_PROG_TYPE_SK_REUSEPORT:
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
- case BPF_PROG_TYPE_CGROUP_SOCKOPT:
- case BPF_PROG_TYPE_TRACING:
- case BPF_PROG_TYPE_STRUCT_OPS:
- case BPF_PROG_TYPE_EXT:
- case BPF_PROG_TYPE_LSM:
- default:
+ case BPF_PROG_TYPE_NETFILTER:
break;
+ default:
+ return -EOPNOTSUPP;
}
- xattr.prog_type = prog_type;
- xattr.insns = insns;
- xattr.insns_cnt = insns_cnt;
- xattr.license = "GPL";
- xattr.prog_ifindex = ifindex;
-
- fd = bpf_load_program_xattr(&xattr, buf, buf_len);
+ fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, &opts);
+ err = -errno;
if (fd >= 0)
close(fd);
+ if (exp_err) {
+ if (fd >= 0 || err != exp_err)
+ return 0;
+ if (exp_msg && !strstr(buf, exp_msg))
+ return 0;
+ return 1;
+ }
+ return fd >= 0 ? 1 : 0;
}
-bool bpf_probe_prog_type(enum bpf_prog_type prog_type, __u32 ifindex)
+int libbpf_probe_bpf_prog_type(enum bpf_prog_type prog_type, const void *opts)
{
- struct bpf_insn insns[2] = {
+ struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN()
};
+ const size_t insn_cnt = ARRAY_SIZE(insns);
+ int ret;
- if (ifindex && prog_type == BPF_PROG_TYPE_SCHED_CLS)
- /* nfp returns -EINVAL on exit(0) with TC offload */
- insns[0].imm = 2;
+ if (opts)
+ return libbpf_err(-EINVAL);
- errno = 0;
- probe_load(prog_type, insns, ARRAY_SIZE(insns), NULL, 0, ifindex);
-
- return errno != EINVAL && errno != EOPNOTSUPP;
+ ret = probe_prog_load(prog_type, insns, insn_cnt, NULL, 0);
+ return libbpf_err(ret);
}
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
@@ -164,13 +239,13 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
memcpy(raw_btf + hdr.hdr_len, raw_types, hdr.type_len);
memcpy(raw_btf + hdr.hdr_len + hdr.type_len, str_sec, hdr.str_len);
- btf_fd = bpf_load_btf(raw_btf, btf_len, NULL, 0, false);
+ btf_fd = bpf_btf_load(raw_btf, btf_len, NULL);
free(raw_btf);
return btf_fd;
}
-static int load_sk_storage_btf(void)
+static int load_local_storage_btf(void)
{
const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
/* struct bpf_spin_lock {
@@ -197,17 +272,16 @@ static int load_sk_storage_btf(void)
strs, sizeof(strs));
}
-bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
+static int probe_map_create(enum bpf_map_type map_type)
{
- int key_size, value_size, max_entries, map_flags;
+ LIBBPF_OPTS(bpf_map_create_opts, opts);
+ int key_size, value_size, max_entries;
__u32 btf_key_type_id = 0, btf_value_type_id = 0;
- struct bpf_create_map_attr attr = {};
- int fd = -1, btf_fd = -1, fd_inner;
+ int fd = -1, btf_fd = -1, fd_inner = -1, exp_err = 0, err = 0;
key_size = sizeof(__u32);
value_size = sizeof(__u32);
max_entries = 1;
- map_flags = 0;
switch (map_type) {
case BPF_MAP_TYPE_STACK_TRACE:
@@ -216,7 +290,7 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
case BPF_MAP_TYPE_LPM_TRIE:
key_size = sizeof(__u64);
value_size = sizeof(__u64);
- map_flags = BPF_F_NO_PREALLOC;
+ opts.map_flags = BPF_F_NO_PREALLOC;
break;
case BPF_MAP_TYPE_CGROUP_STORAGE:
case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE:
@@ -229,16 +303,33 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
key_size = 0;
break;
case BPF_MAP_TYPE_SK_STORAGE:
+ case BPF_MAP_TYPE_INODE_STORAGE:
+ case BPF_MAP_TYPE_TASK_STORAGE:
+ case BPF_MAP_TYPE_CGRP_STORAGE:
btf_key_type_id = 1;
btf_value_type_id = 3;
value_size = 8;
max_entries = 0;
- map_flags = BPF_F_NO_PREALLOC;
- btf_fd = load_sk_storage_btf();
+ opts.map_flags = BPF_F_NO_PREALLOC;
+ btf_fd = load_local_storage_btf();
if (btf_fd < 0)
- return false;
+ return btf_fd;
+ break;
+ case BPF_MAP_TYPE_RINGBUF:
+ case BPF_MAP_TYPE_USER_RINGBUF:
+ key_size = 0;
+ value_size = 0;
+ max_entries = sysconf(_SC_PAGE_SIZE);
+ break;
+ case BPF_MAP_TYPE_STRUCT_OPS:
+ /* we'll get -ENOTSUPP for invalid BTF type ID for struct_ops */
+ opts.btf_vmlinux_value_type_id = 1;
+ exp_err = -524; /* -ENOTSUPP */
+ break;
+ case BPF_MAP_TYPE_BLOOM_FILTER:
+ key_size = 0;
+ max_entries = 1;
break;
- case BPF_MAP_TYPE_UNSPEC:
case BPF_MAP_TYPE_HASH:
case BPF_MAP_TYPE_ARRAY:
case BPF_MAP_TYPE_PROG_ARRAY:
@@ -257,95 +348,101 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
case BPF_MAP_TYPE_XSKMAP:
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
- case BPF_MAP_TYPE_STRUCT_OPS:
- default:
break;
+ case BPF_MAP_TYPE_UNSPEC:
+ default:
+ return -EOPNOTSUPP;
}
if (map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
- /* TODO: probe for device, once libbpf has a function to create
- * map-in-map for offload
- */
- if (ifindex)
- return false;
-
- fd_inner = bpf_create_map(BPF_MAP_TYPE_HASH,
- sizeof(__u32), sizeof(__u32), 1, 0);
+ fd_inner = bpf_map_create(BPF_MAP_TYPE_HASH, NULL,
+ sizeof(__u32), sizeof(__u32), 1, NULL);
if (fd_inner < 0)
- return false;
- fd = bpf_create_map_in_map(map_type, NULL, sizeof(__u32),
- fd_inner, 1, 0);
- close(fd_inner);
- } else {
- /* Note: No other restriction on map type probes for offload */
- attr.map_type = map_type;
- attr.key_size = key_size;
- attr.value_size = value_size;
- attr.max_entries = max_entries;
- attr.map_flags = map_flags;
- attr.map_ifindex = ifindex;
- if (btf_fd >= 0) {
- attr.btf_fd = btf_fd;
- attr.btf_key_type_id = btf_key_type_id;
- attr.btf_value_type_id = btf_value_type_id;
- }
-
- fd = bpf_create_map_xattr(&attr);
+ goto cleanup;
+
+ opts.inner_map_fd = fd_inner;
}
+
+ if (btf_fd >= 0) {
+ opts.btf_fd = btf_fd;
+ opts.btf_key_type_id = btf_key_type_id;
+ opts.btf_value_type_id = btf_value_type_id;
+ }
+
+ fd = bpf_map_create(map_type, NULL, key_size, value_size, max_entries, &opts);
+ err = -errno;
+
+cleanup:
if (fd >= 0)
close(fd);
+ if (fd_inner >= 0)
+ close(fd_inner);
if (btf_fd >= 0)
close(btf_fd);
- return fd >= 0;
+ if (exp_err)
+ return fd < 0 && err == exp_err ? 1 : 0;
+ else
+ return fd >= 0 ? 1 : 0;
}
-bool bpf_probe_helper(enum bpf_func_id id, enum bpf_prog_type prog_type,
- __u32 ifindex)
+int libbpf_probe_bpf_map_type(enum bpf_map_type map_type, const void *opts)
{
- struct bpf_insn insns[2] = {
- BPF_EMIT_CALL(id),
- BPF_EXIT_INSN()
- };
- char buf[4096] = {};
- bool res;
-
- probe_load(prog_type, insns, ARRAY_SIZE(insns), buf, sizeof(buf),
- ifindex);
- res = !grep(buf, "invalid func ") && !grep(buf, "unknown func ");
-
- if (ifindex) {
- switch (get_vendor_id(ifindex)) {
- case 0x19ee: /* Netronome specific */
- res = res && !grep(buf, "not supported by FW") &&
- !grep(buf, "unsupported function id");
- break;
- default:
- break;
- }
- }
+ int ret;
- return res;
+ if (opts)
+ return libbpf_err(-EINVAL);
+
+ ret = probe_map_create(map_type);
+ return libbpf_err(ret);
}
-/*
- * Probe for availability of kernel commit (5.3):
- *
- * c04c0d2b968a ("bpf: increase complexity limit and maximum program size")
- */
-bool bpf_probe_large_insn_limit(__u32 ifindex)
+int libbpf_probe_bpf_helper(enum bpf_prog_type prog_type, enum bpf_func_id helper_id,
+ const void *opts)
{
- struct bpf_insn insns[BPF_MAXINSNS + 1];
- int i;
+ struct bpf_insn insns[] = {
+ BPF_EMIT_CALL((__u32)helper_id),
+ BPF_EXIT_INSN(),
+ };
+ const size_t insn_cnt = ARRAY_SIZE(insns);
+ char buf[4096];
+ int ret;
- for (i = 0; i < BPF_MAXINSNS; i++)
- insns[i] = BPF_MOV64_IMM(BPF_REG_0, 1);
- insns[BPF_MAXINSNS] = BPF_EXIT_INSN();
+ if (opts)
+ return libbpf_err(-EINVAL);
- errno = 0;
- probe_load(BPF_PROG_TYPE_SCHED_CLS, insns, ARRAY_SIZE(insns), NULL, 0,
- ifindex);
+ /* we can't successfully load all prog types to check for BPF helper
+ * support, so bail out with -EOPNOTSUPP error
+ */
+ switch (prog_type) {
+ case BPF_PROG_TYPE_TRACING:
+ case BPF_PROG_TYPE_EXT:
+ case BPF_PROG_TYPE_LSM:
+ case BPF_PROG_TYPE_STRUCT_OPS:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
- return errno != E2BIG && errno != EINVAL;
+ buf[0] = '\0';
+ ret = probe_prog_load(prog_type, insns, insn_cnt, buf, sizeof(buf));
+ if (ret < 0)
+ return libbpf_err(ret);
+
+ /* If BPF verifier doesn't recognize BPF helper ID (enum bpf_func_id)
+ * at all, it will emit something like "invalid func unknown#181".
+ * If BPF verifier recognizes BPF helper but it's not supported for
+ * given BPF program type, it will emit "unknown func bpf_sys_bpf#166".
+ * In both cases, provided combination of BPF program type and BPF
+ * helper is not supported by the kernel.
+ * In all other cases, probe_prog_load() above will either succeed (e.g.,
+ * because BPF helper happens to accept no input arguments or it
+ * accepts one input argument and initial PTR_TO_CTX is fine for
+ * that), or we'll get some more specific BPF verifier error about
+ * some unsatisfied conditions.
+ */
+ if (ret == 0 && (strstr(buf, "invalid func ") || strstr(buf, "unknown func ")))
+ return 0;
+ return 1; /* assume supported */
}
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h
deleted file mode 100644
index 59c779c5790c..000000000000
--- a/tools/lib/bpf/libbpf_util.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
-/* Copyright (c) 2019 Facebook */
-
-#ifndef __LIBBPF_LIBBPF_UTIL_H
-#define __LIBBPF_LIBBPF_UTIL_H
-
-#include <stdbool.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* Use these barrier functions instead of smp_[rw]mb() when they are
- * used in a libbpf header file. That way they can be built into the
- * application that uses libbpf.
- */
-#if defined(__i386__) || defined(__x86_64__)
-# define libbpf_smp_rmb() asm volatile("" : : : "memory")
-# define libbpf_smp_wmb() asm volatile("" : : : "memory")
-# define libbpf_smp_mb() \
- asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc")
-/* Hinders stores to be observed before older loads. */
-# define libbpf_smp_rwmb() asm volatile("" : : : "memory")
-#elif defined(__aarch64__)
-# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory")
-# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
-# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
-# define libbpf_smp_rwmb() libbpf_smp_mb()
-#elif defined(__arm__)
-/* These are only valid for armv7 and above */
-# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory")
-# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
-# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
-# define libbpf_smp_rwmb() libbpf_smp_mb()
-#else
-/* Architecture missing native barrier functions. */
-# define libbpf_smp_rmb() __sync_synchronize()
-# define libbpf_smp_wmb() __sync_synchronize()
-# define libbpf_smp_mb() __sync_synchronize()
-# define libbpf_smp_rwmb() __sync_synchronize()
-#endif
-
-#ifdef __cplusplus
-} /* extern "C" */
-#endif
-
-#endif
diff --git a/tools/lib/bpf/libbpf_version.h b/tools/lib/bpf/libbpf_version.h
new file mode 100644
index 000000000000..1fd2eeac5cfc
--- /dev/null
+++ b/tools/lib/bpf/libbpf_version.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (C) 2021 Facebook */
+#ifndef __LIBBPF_VERSION_H
+#define __LIBBPF_VERSION_H
+
+#define LIBBPF_MAJOR_VERSION 1
+#define LIBBPF_MINOR_VERSION 2
+
+#endif /* __LIBBPF_VERSION_H */
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
new file mode 100644
index 000000000000..5ced96d99f8c
--- /dev/null
+++ b/tools/lib/bpf/linker.c
@@ -0,0 +1,2905 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/*
+ * BPF static linker
+ *
+ * Copyright (c) 2021 Facebook
+ */
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <linux/err.h>
+#include <linux/btf.h>
+#include <elf.h>
+#include <libelf.h>
+#include <fcntl.h>
+#include "libbpf.h"
+#include "btf.h"
+#include "libbpf_internal.h"
+#include "strset.h"
+
+#define BTF_EXTERN_SEC ".extern"
+
+struct src_sec {
+ const char *sec_name;
+ /* positional (not necessarily ELF) index in an array of sections */
+ int id;
+ /* positional (not necessarily ELF) index of a matching section in a final object file */
+ int dst_id;
+ /* section data offset in a matching output section */
+ int dst_off;
+ /* whether section is omitted from the final ELF file */
+ bool skipped;
+ /* whether section is an ephemeral section, not mapped to an ELF section */
+ bool ephemeral;
+
+ /* ELF info */
+ size_t sec_idx;
+ Elf_Scn *scn;
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+
+ /* corresponding BTF DATASEC type ID */
+ int sec_type_id;
+};
+
+struct src_obj {
+ const char *filename;
+ int fd;
+ Elf *elf;
+ /* Section header strings section index */
+ size_t shstrs_sec_idx;
+ /* SYMTAB section index */
+ size_t symtab_sec_idx;
+
+ struct btf *btf;
+ struct btf_ext *btf_ext;
+
+ /* List of sections (including ephemeral). Slot zero is unused. */
+ struct src_sec *secs;
+ int sec_cnt;
+
+ /* mapping of symbol indices from src to dst ELF */
+ int *sym_map;
+ /* mapping from the src BTF type IDs to dst ones */
+ int *btf_type_map;
+};
+
+/* single .BTF.ext data section */
+struct btf_ext_sec_data {
+ size_t rec_cnt;
+ __u32 rec_sz;
+ void *recs;
+};
+
+struct glob_sym {
+ /* ELF symbol index */
+ int sym_idx;
+ /* associated section id for .ksyms, .kconfig, etc, but not .extern */
+ int sec_id;
+ /* extern name offset in STRTAB */
+ int name_off;
+ /* optional associated BTF type ID */
+ int btf_id;
+ /* BTF type ID to which VAR/FUNC type is pointing to; used for
+ * rewriting types when extern VAR/FUNC is resolved to a concrete
+ * definition
+ */
+ int underlying_btf_id;
+ /* sec_var index in the corresponding dst_sec, if exists */
+ int var_idx;
+
+ /* extern or resolved/global symbol */
+ bool is_extern;
+ /* weak or strong symbol, never goes back from strong to weak */
+ bool is_weak;
+};
+
+struct dst_sec {
+ char *sec_name;
+ /* positional (not necessarily ELF) index in an array of sections */
+ int id;
+
+ bool ephemeral;
+
+ /* ELF info */
+ size_t sec_idx;
+ Elf_Scn *scn;
+ Elf64_Shdr *shdr;
+ Elf_Data *data;
+
+ /* final output section size */
+ int sec_sz;
+ /* final output contents of the section */
+ void *raw_data;
+
+ /* corresponding STT_SECTION symbol index in SYMTAB */
+ int sec_sym_idx;
+
+ /* section's DATASEC variable info, emitted on BTF finalization */
+ bool has_btf;
+ int sec_var_cnt;
+ struct btf_var_secinfo *sec_vars;
+
+ /* section's .BTF.ext data */
+ struct btf_ext_sec_data func_info;
+ struct btf_ext_sec_data line_info;
+ struct btf_ext_sec_data core_relo_info;
+};
+
+struct bpf_linker {
+ char *filename;
+ int fd;
+ Elf *elf;
+ Elf64_Ehdr *elf_hdr;
+
+ /* Output sections metadata */
+ struct dst_sec *secs;
+ int sec_cnt;
+
+ struct strset *strtab_strs; /* STRTAB unique strings */
+ size_t strtab_sec_idx; /* STRTAB section index */
+ size_t symtab_sec_idx; /* SYMTAB section index */
+
+ struct btf *btf;
+ struct btf_ext *btf_ext;
+
+ /* global (including extern) ELF symbols */
+ int glob_sym_cnt;
+ struct glob_sym *glob_syms;
+};
+
+#define pr_warn_elf(fmt, ...) \
+ libbpf_print(LIBBPF_WARN, "libbpf: " fmt ": %s\n", ##__VA_ARGS__, elf_errmsg(-1))
+
+static int init_output_elf(struct bpf_linker *linker, const char *file);
+
+static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts,
+ struct src_obj *obj);
+static int linker_sanity_check_elf(struct src_obj *obj);
+static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
+static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec);
+static int linker_sanity_check_btf(struct src_obj *obj);
+static int linker_sanity_check_btf_ext(struct src_obj *obj);
+static int linker_fixup_btf(struct src_obj *obj);
+static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj);
+static int linker_append_elf_syms(struct bpf_linker *linker, struct src_obj *obj);
+static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
+ Elf64_Sym *sym, const char *sym_name, int src_sym_idx);
+static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj);
+static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj);
+static int linker_append_btf_ext(struct bpf_linker *linker, struct src_obj *obj);
+
+static int finalize_btf(struct bpf_linker *linker);
+static int finalize_btf_ext(struct bpf_linker *linker);
+
+void bpf_linker__free(struct bpf_linker *linker)
+{
+ int i;
+
+ if (!linker)
+ return;
+
+ free(linker->filename);
+
+ if (linker->elf)
+ elf_end(linker->elf);
+
+ if (linker->fd >= 0)
+ close(linker->fd);
+
+ strset__free(linker->strtab_strs);
+
+ btf__free(linker->btf);
+ btf_ext__free(linker->btf_ext);
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ free(sec->sec_name);
+ free(sec->raw_data);
+ free(sec->sec_vars);
+
+ free(sec->func_info.recs);
+ free(sec->line_info.recs);
+ free(sec->core_relo_info.recs);
+ }
+ free(linker->secs);
+
+ free(linker->glob_syms);
+ free(linker);
+}
+
+struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts)
+{
+ struct bpf_linker *linker;
+ int err;
+
+ if (!OPTS_VALID(opts, bpf_linker_opts))
+ return errno = EINVAL, NULL;
+
+ if (elf_version(EV_CURRENT) == EV_NONE) {
+ pr_warn_elf("libelf initialization failed");
+ return errno = EINVAL, NULL;
+ }
+
+ linker = calloc(1, sizeof(*linker));
+ if (!linker)
+ return errno = ENOMEM, NULL;
+
+ linker->fd = -1;
+
+ err = init_output_elf(linker, filename);
+ if (err)
+ goto err_out;
+
+ return linker;
+
+err_out:
+ bpf_linker__free(linker);
+ return errno = -err, NULL;
+}
+
+static struct dst_sec *add_dst_sec(struct bpf_linker *linker, const char *sec_name)
+{
+ struct dst_sec *secs = linker->secs, *sec;
+ size_t new_cnt = linker->sec_cnt ? linker->sec_cnt + 1 : 2;
+
+ secs = libbpf_reallocarray(secs, new_cnt, sizeof(*secs));
+ if (!secs)
+ return NULL;
+
+ /* zero out newly allocated memory */
+ memset(secs + linker->sec_cnt, 0, (new_cnt - linker->sec_cnt) * sizeof(*secs));
+
+ linker->secs = secs;
+ linker->sec_cnt = new_cnt;
+
+ sec = &linker->secs[new_cnt - 1];
+ sec->id = new_cnt - 1;
+ sec->sec_name = strdup(sec_name);
+ if (!sec->sec_name)
+ return NULL;
+
+ return sec;
+}
+
+static Elf64_Sym *add_new_sym(struct bpf_linker *linker, size_t *sym_idx)
+{
+ struct dst_sec *symtab = &linker->secs[linker->symtab_sec_idx];
+ Elf64_Sym *syms, *sym;
+ size_t sym_cnt = symtab->sec_sz / sizeof(*sym);
+
+ syms = libbpf_reallocarray(symtab->raw_data, sym_cnt + 1, sizeof(*sym));
+ if (!syms)
+ return NULL;
+
+ sym = &syms[sym_cnt];
+ memset(sym, 0, sizeof(*sym));
+
+ symtab->raw_data = syms;
+ symtab->sec_sz += sizeof(*sym);
+ symtab->shdr->sh_size += sizeof(*sym);
+ symtab->data->d_size += sizeof(*sym);
+
+ if (sym_idx)
+ *sym_idx = sym_cnt;
+
+ return sym;
+}
+
+static int init_output_elf(struct bpf_linker *linker, const char *file)
+{
+ int err, str_off;
+ Elf64_Sym *init_sym;
+ struct dst_sec *sec;
+
+ linker->filename = strdup(file);
+ if (!linker->filename)
+ return -ENOMEM;
+
+ linker->fd = open(file, O_WRONLY | O_CREAT | O_TRUNC | O_CLOEXEC, 0644);
+ if (linker->fd < 0) {
+ err = -errno;
+ pr_warn("failed to create '%s': %d\n", file, err);
+ return err;
+ }
+
+ linker->elf = elf_begin(linker->fd, ELF_C_WRITE, NULL);
+ if (!linker->elf) {
+ pr_warn_elf("failed to create ELF object");
+ return -EINVAL;
+ }
+
+ /* ELF header */
+ linker->elf_hdr = elf64_newehdr(linker->elf);
+ if (!linker->elf_hdr) {
+ pr_warn_elf("failed to create ELF header");
+ return -EINVAL;
+ }
+
+ linker->elf_hdr->e_machine = EM_BPF;
+ linker->elf_hdr->e_type = ET_REL;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2LSB;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ linker->elf_hdr->e_ident[EI_DATA] = ELFDATA2MSB;
+#else
+#error "Unknown __BYTE_ORDER__"
+#endif
+
+ /* STRTAB */
+ /* initialize strset with an empty string to conform to ELF */
+ linker->strtab_strs = strset__new(INT_MAX, "", sizeof(""));
+ if (libbpf_get_error(linker->strtab_strs))
+ return libbpf_get_error(linker->strtab_strs);
+
+ sec = add_dst_sec(linker, ".strtab");
+ if (!sec)
+ return -ENOMEM;
+
+ sec->scn = elf_newscn(linker->elf);
+ if (!sec->scn) {
+ pr_warn_elf("failed to create STRTAB section");
+ return -EINVAL;
+ }
+
+ sec->shdr = elf64_getshdr(sec->scn);
+ if (!sec->shdr)
+ return -EINVAL;
+
+ sec->data = elf_newdata(sec->scn);
+ if (!sec->data) {
+ pr_warn_elf("failed to create STRTAB data");
+ return -EINVAL;
+ }
+
+ str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
+ if (str_off < 0)
+ return str_off;
+
+ sec->sec_idx = elf_ndxscn(sec->scn);
+ linker->elf_hdr->e_shstrndx = sec->sec_idx;
+ linker->strtab_sec_idx = sec->sec_idx;
+
+ sec->shdr->sh_name = str_off;
+ sec->shdr->sh_type = SHT_STRTAB;
+ sec->shdr->sh_flags = SHF_STRINGS;
+ sec->shdr->sh_offset = 0;
+ sec->shdr->sh_link = 0;
+ sec->shdr->sh_info = 0;
+ sec->shdr->sh_addralign = 1;
+ sec->shdr->sh_size = sec->sec_sz = 0;
+ sec->shdr->sh_entsize = 0;
+
+ /* SYMTAB */
+ sec = add_dst_sec(linker, ".symtab");
+ if (!sec)
+ return -ENOMEM;
+
+ sec->scn = elf_newscn(linker->elf);
+ if (!sec->scn) {
+ pr_warn_elf("failed to create SYMTAB section");
+ return -EINVAL;
+ }
+
+ sec->shdr = elf64_getshdr(sec->scn);
+ if (!sec->shdr)
+ return -EINVAL;
+
+ sec->data = elf_newdata(sec->scn);
+ if (!sec->data) {
+ pr_warn_elf("failed to create SYMTAB data");
+ return -EINVAL;
+ }
+
+ str_off = strset__add_str(linker->strtab_strs, sec->sec_name);
+ if (str_off < 0)
+ return str_off;
+
+ sec->sec_idx = elf_ndxscn(sec->scn);
+ linker->symtab_sec_idx = sec->sec_idx;
+
+ sec->shdr->sh_name = str_off;
+ sec->shdr->sh_type = SHT_SYMTAB;
+ sec->shdr->sh_flags = 0;
+ sec->shdr->sh_offset = 0;
+ sec->shdr->sh_link = linker->strtab_sec_idx;
+ /* sh_info should be one greater than the index of the last local
+ * symbol (i.e., binding is STB_LOCAL). But why and who cares?
+ */
+ sec->shdr->sh_info = 0;
+ sec->shdr->sh_addralign = 8;
+ sec->shdr->sh_entsize = sizeof(Elf64_Sym);
+
+ /* .BTF */
+ linker->btf = btf__new_empty();
+ err = libbpf_get_error(linker->btf);
+ if (err)
+ return err;
+
+ /* add the special all-zero symbol */
+ init_sym = add_new_sym(linker, NULL);
+ if (!init_sym)
+ return -EINVAL;
+
+ init_sym->st_name = 0;
+ init_sym->st_info = 0;
+ init_sym->st_other = 0;
+ init_sym->st_shndx = SHN_UNDEF;
+ init_sym->st_value = 0;
+ init_sym->st_size = 0;
+
+ return 0;
+}
+
+int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts)
+{
+ struct src_obj obj = {};
+ int err = 0;
+
+ if (!OPTS_VALID(opts, bpf_linker_file_opts))
+ return libbpf_err(-EINVAL);
+
+ if (!linker->elf)
+ return libbpf_err(-EINVAL);
+
+ err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
+ err = err ?: linker_append_sec_data(linker, &obj);
+ err = err ?: linker_append_elf_syms(linker, &obj);
+ err = err ?: linker_append_elf_relos(linker, &obj);
+ err = err ?: linker_append_btf(linker, &obj);
+ err = err ?: linker_append_btf_ext(linker, &obj);
+
+ /* free up src_obj resources */
+ free(obj.btf_type_map);
+ btf__free(obj.btf);
+ btf_ext__free(obj.btf_ext);
+ free(obj.secs);
+ free(obj.sym_map);
+ if (obj.elf)
+ elf_end(obj.elf);
+ if (obj.fd >= 0)
+ close(obj.fd);
+
+ return libbpf_err(err);
+}
+
+static bool is_dwarf_sec_name(const char *name)
+{
+ /* approximation, but the actual list is too long */
+ return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
+}
+
+static bool is_ignored_sec(struct src_sec *sec)
+{
+ Elf64_Shdr *shdr = sec->shdr;
+ const char *name = sec->sec_name;
+
+ /* no special handling of .strtab */
+ if (shdr->sh_type == SHT_STRTAB)
+ return true;
+
+ /* ignore .llvm_addrsig section as well */
+ if (shdr->sh_type == SHT_LLVM_ADDRSIG)
+ return true;
+
+ /* no subprograms will lead to an empty .text section, ignore it */
+ if (shdr->sh_type == SHT_PROGBITS && shdr->sh_size == 0 &&
+ strcmp(sec->sec_name, ".text") == 0)
+ return true;
+
+ /* DWARF sections */
+ if (is_dwarf_sec_name(sec->sec_name))
+ return true;
+
+ if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
+ name += sizeof(".rel") - 1;
+ /* DWARF section relocations */
+ if (is_dwarf_sec_name(name))
+ return true;
+
+ /* .BTF and .BTF.ext don't need relocations */
+ if (strcmp(name, BTF_ELF_SEC) == 0 ||
+ strcmp(name, BTF_EXT_ELF_SEC) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name)
+{
+ struct src_sec *secs = obj->secs, *sec;
+ size_t new_cnt = obj->sec_cnt ? obj->sec_cnt + 1 : 2;
+
+ secs = libbpf_reallocarray(secs, new_cnt, sizeof(*secs));
+ if (!secs)
+ return NULL;
+
+ /* zero out newly allocated memory */
+ memset(secs + obj->sec_cnt, 0, (new_cnt - obj->sec_cnt) * sizeof(*secs));
+
+ obj->secs = secs;
+ obj->sec_cnt = new_cnt;
+
+ sec = &obj->secs[new_cnt - 1];
+ sec->id = new_cnt - 1;
+ sec->sec_name = sec_name;
+
+ return sec;
+}
+
+static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
+ const struct bpf_linker_file_opts *opts,
+ struct src_obj *obj)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ const int host_endianness = ELFDATA2LSB;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ const int host_endianness = ELFDATA2MSB;
+#else
+#error "Unknown __BYTE_ORDER__"
+#endif
+ int err = 0;
+ Elf_Scn *scn;
+ Elf_Data *data;
+ Elf64_Ehdr *ehdr;
+ Elf64_Shdr *shdr;
+ struct src_sec *sec;
+
+ pr_debug("linker: adding object file '%s'...\n", filename);
+
+ obj->filename = filename;
+
+ obj->fd = open(filename, O_RDONLY | O_CLOEXEC);
+ if (obj->fd < 0) {
+ err = -errno;
+ pr_warn("failed to open file '%s': %d\n", filename, err);
+ return err;
+ }
+ obj->elf = elf_begin(obj->fd, ELF_C_READ_MMAP, NULL);
+ if (!obj->elf) {
+ err = -errno;
+ pr_warn_elf("failed to parse ELF file '%s'", filename);
+ return err;
+ }
+
+ /* Sanity check ELF file high-level properties */
+ ehdr = elf64_getehdr(obj->elf);
+ if (!ehdr) {
+ err = -errno;
+ pr_warn_elf("failed to get ELF header for %s", filename);
+ return err;
+ }
+ if (ehdr->e_ident[EI_DATA] != host_endianness) {
+ err = -EOPNOTSUPP;
+ pr_warn_elf("unsupported byte order of ELF file %s", filename);
+ return err;
+ }
+ if (ehdr->e_type != ET_REL
+ || ehdr->e_machine != EM_BPF
+ || ehdr->e_ident[EI_CLASS] != ELFCLASS64) {
+ err = -EOPNOTSUPP;
+ pr_warn_elf("unsupported kind of ELF file %s", filename);
+ return err;
+ }
+
+ if (elf_getshdrstrndx(obj->elf, &obj->shstrs_sec_idx)) {
+ err = -errno;
+ pr_warn_elf("failed to get SHSTRTAB section index for %s", filename);
+ return err;
+ }
+
+ scn = NULL;
+ while ((scn = elf_nextscn(obj->elf, scn)) != NULL) {
+ size_t sec_idx = elf_ndxscn(scn);
+ const char *sec_name;
+
+ shdr = elf64_getshdr(scn);
+ if (!shdr) {
+ err = -errno;
+ pr_warn_elf("failed to get section #%zu header for %s",
+ sec_idx, filename);
+ return err;
+ }
+
+ sec_name = elf_strptr(obj->elf, obj->shstrs_sec_idx, shdr->sh_name);
+ if (!sec_name) {
+ err = -errno;
+ pr_warn_elf("failed to get section #%zu name for %s",
+ sec_idx, filename);
+ return err;
+ }
+
+ data = elf_getdata(scn, 0);
+ if (!data) {
+ err = -errno;
+ pr_warn_elf("failed to get section #%zu (%s) data from %s",
+ sec_idx, sec_name, filename);
+ return err;
+ }
+
+ sec = add_src_sec(obj, sec_name);
+ if (!sec)
+ return -ENOMEM;
+
+ sec->scn = scn;
+ sec->shdr = shdr;
+ sec->data = data;
+ sec->sec_idx = elf_ndxscn(scn);
+
+ if (is_ignored_sec(sec)) {
+ sec->skipped = true;
+ continue;
+ }
+
+ switch (shdr->sh_type) {
+ case SHT_SYMTAB:
+ if (obj->symtab_sec_idx) {
+ err = -EOPNOTSUPP;
+ pr_warn("multiple SYMTAB sections found, not supported\n");
+ return err;
+ }
+ obj->symtab_sec_idx = sec_idx;
+ break;
+ case SHT_STRTAB:
+ /* we'll construct our own string table */
+ break;
+ case SHT_PROGBITS:
+ if (strcmp(sec_name, BTF_ELF_SEC) == 0) {
+ obj->btf = btf__new(data->d_buf, shdr->sh_size);
+ err = libbpf_get_error(obj->btf);
+ if (err) {
+ pr_warn("failed to parse .BTF from %s: %d\n", filename, err);
+ return err;
+ }
+ sec->skipped = true;
+ continue;
+ }
+ if (strcmp(sec_name, BTF_EXT_ELF_SEC) == 0) {
+ obj->btf_ext = btf_ext__new(data->d_buf, shdr->sh_size);
+ err = libbpf_get_error(obj->btf_ext);
+ if (err) {
+ pr_warn("failed to parse .BTF.ext from '%s': %d\n", filename, err);
+ return err;
+ }
+ sec->skipped = true;
+ continue;
+ }
+
+ /* data & code */
+ break;
+ case SHT_NOBITS:
+ /* BSS */
+ break;
+ case SHT_REL:
+ /* relocations */
+ break;
+ default:
+ pr_warn("unrecognized section #%zu (%s) in %s\n",
+ sec_idx, sec_name, filename);
+ err = -EINVAL;
+ return err;
+ }
+ }
+
+ err = err ?: linker_sanity_check_elf(obj);
+ err = err ?: linker_sanity_check_btf(obj);
+ err = err ?: linker_sanity_check_btf_ext(obj);
+ err = err ?: linker_fixup_btf(obj);
+
+ return err;
+}
+
+static int linker_sanity_check_elf(struct src_obj *obj)
+{
+ struct src_sec *sec;
+ int i, err;
+
+ if (!obj->symtab_sec_idx) {
+ pr_warn("ELF is missing SYMTAB section in %s\n", obj->filename);
+ return -EINVAL;
+ }
+ if (!obj->shstrs_sec_idx) {
+ pr_warn("ELF is missing section headers STRTAB section in %s\n", obj->filename);
+ return -EINVAL;
+ }
+
+ for (i = 1; i < obj->sec_cnt; i++) {
+ sec = &obj->secs[i];
+
+ if (sec->sec_name[0] == '\0') {
+ pr_warn("ELF section #%zu has empty name in %s\n", sec->sec_idx, obj->filename);
+ return -EINVAL;
+ }
+
+ if (sec->shdr->sh_addralign && !is_pow_of_2(sec->shdr->sh_addralign))
+ return -EINVAL;
+ if (sec->shdr->sh_addralign != sec->data->d_align)
+ return -EINVAL;
+
+ if (sec->shdr->sh_size != sec->data->d_size)
+ return -EINVAL;
+
+ switch (sec->shdr->sh_type) {
+ case SHT_SYMTAB:
+ err = linker_sanity_check_elf_symtab(obj, sec);
+ if (err)
+ return err;
+ break;
+ case SHT_STRTAB:
+ break;
+ case SHT_PROGBITS:
+ if (sec->shdr->sh_flags & SHF_EXECINSTR) {
+ if (sec->shdr->sh_size % sizeof(struct bpf_insn) != 0)
+ return -EINVAL;
+ }
+ break;
+ case SHT_NOBITS:
+ break;
+ case SHT_REL:
+ err = linker_sanity_check_elf_relos(obj, sec);
+ if (err)
+ return err;
+ break;
+ case SHT_LLVM_ADDRSIG:
+ break;
+ default:
+ pr_warn("ELF section #%zu (%s) has unrecognized type %zu in %s\n",
+ sec->sec_idx, sec->sec_name, (size_t)sec->shdr->sh_type, obj->filename);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec)
+{
+ struct src_sec *link_sec;
+ Elf64_Sym *sym;
+ int i, n;
+
+ if (sec->shdr->sh_entsize != sizeof(Elf64_Sym))
+ return -EINVAL;
+ if (sec->shdr->sh_size % sec->shdr->sh_entsize != 0)
+ return -EINVAL;
+
+ if (!sec->shdr->sh_link || sec->shdr->sh_link >= obj->sec_cnt) {
+ pr_warn("ELF SYMTAB section #%zu points to missing STRTAB section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename);
+ return -EINVAL;
+ }
+ link_sec = &obj->secs[sec->shdr->sh_link];
+ if (link_sec->shdr->sh_type != SHT_STRTAB) {
+ pr_warn("ELF SYMTAB section #%zu points to invalid STRTAB section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename);
+ return -EINVAL;
+ }
+
+ n = sec->shdr->sh_size / sec->shdr->sh_entsize;
+ sym = sec->data->d_buf;
+ for (i = 0; i < n; i++, sym++) {
+ int sym_type = ELF64_ST_TYPE(sym->st_info);
+ int sym_bind = ELF64_ST_BIND(sym->st_info);
+ int sym_vis = ELF64_ST_VISIBILITY(sym->st_other);
+
+ if (i == 0) {
+ if (sym->st_name != 0 || sym->st_info != 0
+ || sym->st_other != 0 || sym->st_shndx != 0
+ || sym->st_value != 0 || sym->st_size != 0) {
+ pr_warn("ELF sym #0 is invalid in %s\n", obj->filename);
+ return -EINVAL;
+ }
+ continue;
+ }
+ if (sym_bind != STB_LOCAL && sym_bind != STB_GLOBAL && sym_bind != STB_WEAK) {
+ pr_warn("ELF sym #%d in section #%zu has unsupported symbol binding %d\n",
+ i, sec->sec_idx, sym_bind);
+ return -EINVAL;
+ }
+ if (sym_vis != STV_DEFAULT && sym_vis != STV_HIDDEN) {
+ pr_warn("ELF sym #%d in section #%zu has unsupported symbol visibility %d\n",
+ i, sec->sec_idx, sym_vis);
+ return -EINVAL;
+ }
+ if (sym->st_shndx == 0) {
+ if (sym_type != STT_NOTYPE || sym_bind == STB_LOCAL
+ || sym->st_value != 0 || sym->st_size != 0) {
+ pr_warn("ELF sym #%d is invalid extern symbol in %s\n",
+ i, obj->filename);
+
+ return -EINVAL;
+ }
+ continue;
+ }
+ if (sym->st_shndx < SHN_LORESERVE && sym->st_shndx >= obj->sec_cnt) {
+ pr_warn("ELF sym #%d in section #%zu points to missing section #%zu in %s\n",
+ i, sec->sec_idx, (size_t)sym->st_shndx, obj->filename);
+ return -EINVAL;
+ }
+ if (sym_type == STT_SECTION) {
+ if (sym->st_value != 0)
+ return -EINVAL;
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec)
+{
+ struct src_sec *link_sec, *sym_sec;
+ Elf64_Rel *relo;
+ int i, n;
+
+ if (sec->shdr->sh_entsize != sizeof(Elf64_Rel))
+ return -EINVAL;
+ if (sec->shdr->sh_size % sec->shdr->sh_entsize != 0)
+ return -EINVAL;
+
+ /* SHT_REL's sh_link should point to SYMTAB */
+ if (sec->shdr->sh_link != obj->symtab_sec_idx) {
+ pr_warn("ELF relo section #%zu points to invalid SYMTAB section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_link, obj->filename);
+ return -EINVAL;
+ }
+
+ /* SHT_REL's sh_info points to relocated section */
+ if (!sec->shdr->sh_info || sec->shdr->sh_info >= obj->sec_cnt) {
+ pr_warn("ELF relo section #%zu points to missing section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_info, obj->filename);
+ return -EINVAL;
+ }
+ link_sec = &obj->secs[sec->shdr->sh_info];
+
+ /* .rel<secname> -> <secname> pattern is followed */
+ if (strncmp(sec->sec_name, ".rel", sizeof(".rel") - 1) != 0
+ || strcmp(sec->sec_name + sizeof(".rel") - 1, link_sec->sec_name) != 0) {
+ pr_warn("ELF relo section #%zu name has invalid name in %s\n",
+ sec->sec_idx, obj->filename);
+ return -EINVAL;
+ }
+
+ /* don't further validate relocations for ignored sections */
+ if (link_sec->skipped)
+ return 0;
+
+ /* relocatable section is data or instructions */
+ if (link_sec->shdr->sh_type != SHT_PROGBITS && link_sec->shdr->sh_type != SHT_NOBITS) {
+ pr_warn("ELF relo section #%zu points to invalid section #%zu in %s\n",
+ sec->sec_idx, (size_t)sec->shdr->sh_info, obj->filename);
+ return -EINVAL;
+ }
+
+ /* check sanity of each relocation */
+ n = sec->shdr->sh_size / sec->shdr->sh_entsize;
+ relo = sec->data->d_buf;
+ sym_sec = &obj->secs[obj->symtab_sec_idx];
+ for (i = 0; i < n; i++, relo++) {
+ size_t sym_idx = ELF64_R_SYM(relo->r_info);
+ size_t sym_type = ELF64_R_TYPE(relo->r_info);
+
+ if (sym_type != R_BPF_64_64 && sym_type != R_BPF_64_32 &&
+ sym_type != R_BPF_64_ABS64 && sym_type != R_BPF_64_ABS32) {
+ pr_warn("ELF relo #%d in section #%zu has unexpected type %zu in %s\n",
+ i, sec->sec_idx, sym_type, obj->filename);
+ return -EINVAL;
+ }
+
+ if (!sym_idx || sym_idx * sizeof(Elf64_Sym) >= sym_sec->shdr->sh_size) {
+ pr_warn("ELF relo #%d in section #%zu points to invalid symbol #%zu in %s\n",
+ i, sec->sec_idx, sym_idx, obj->filename);
+ return -EINVAL;
+ }
+
+ if (link_sec->shdr->sh_flags & SHF_EXECINSTR) {
+ if (relo->r_offset % sizeof(struct bpf_insn) != 0) {
+ pr_warn("ELF relo #%d in section #%zu points to missing symbol #%zu in %s\n",
+ i, sec->sec_idx, sym_idx, obj->filename);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int check_btf_type_id(__u32 *type_id, void *ctx)
+{
+ struct btf *btf = ctx;
+
+ if (*type_id >= btf__type_cnt(btf))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int check_btf_str_off(__u32 *str_off, void *ctx)
+{
+ struct btf *btf = ctx;
+ const char *s;
+
+ s = btf__str_by_offset(btf, *str_off);
+
+ if (!s)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int linker_sanity_check_btf(struct src_obj *obj)
+{
+ struct btf_type *t;
+ int i, n, err = 0;
+
+ if (!obj->btf)
+ return 0;
+
+ n = btf__type_cnt(obj->btf);
+ for (i = 1; i < n; i++) {
+ t = btf_type_by_id(obj->btf, i);
+
+ err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf);
+ err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int linker_sanity_check_btf_ext(struct src_obj *obj)
+{
+ int err = 0;
+
+ if (!obj->btf_ext)
+ return 0;
+
+ /* can't use .BTF.ext without .BTF */
+ if (!obj->btf)
+ return -EINVAL;
+
+ err = err ?: btf_ext_visit_type_ids(obj->btf_ext, check_btf_type_id, obj->btf);
+ err = err ?: btf_ext_visit_str_offs(obj->btf_ext, check_btf_str_off, obj->btf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int init_sec(struct bpf_linker *linker, struct dst_sec *dst_sec, struct src_sec *src_sec)
+{
+ Elf_Scn *scn;
+ Elf_Data *data;
+ Elf64_Shdr *shdr;
+ int name_off;
+
+ dst_sec->sec_sz = 0;
+ dst_sec->sec_idx = 0;
+ dst_sec->ephemeral = src_sec->ephemeral;
+
+ /* ephemeral sections are just thin section shells lacking most parts */
+ if (src_sec->ephemeral)
+ return 0;
+
+ scn = elf_newscn(linker->elf);
+ if (!scn)
+ return -ENOMEM;
+ data = elf_newdata(scn);
+ if (!data)
+ return -ENOMEM;
+ shdr = elf64_getshdr(scn);
+ if (!shdr)
+ return -ENOMEM;
+
+ dst_sec->scn = scn;
+ dst_sec->shdr = shdr;
+ dst_sec->data = data;
+ dst_sec->sec_idx = elf_ndxscn(scn);
+
+ name_off = strset__add_str(linker->strtab_strs, src_sec->sec_name);
+ if (name_off < 0)
+ return name_off;
+
+ shdr->sh_name = name_off;
+ shdr->sh_type = src_sec->shdr->sh_type;
+ shdr->sh_flags = src_sec->shdr->sh_flags;
+ shdr->sh_size = 0;
+ /* sh_link and sh_info have different meaning for different types of
+ * sections, so we leave it up to the caller code to fill them in, if
+ * necessary
+ */
+ shdr->sh_link = 0;
+ shdr->sh_info = 0;
+ shdr->sh_addralign = src_sec->shdr->sh_addralign;
+ shdr->sh_entsize = src_sec->shdr->sh_entsize;
+
+ data->d_type = src_sec->data->d_type;
+ data->d_size = 0;
+ data->d_buf = NULL;
+ data->d_align = src_sec->data->d_align;
+ data->d_off = 0;
+
+ return 0;
+}
+
+static struct dst_sec *find_dst_sec_by_name(struct bpf_linker *linker, const char *sec_name)
+{
+ struct dst_sec *sec;
+ int i;
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ sec = &linker->secs[i];
+
+ if (strcmp(sec->sec_name, sec_name) == 0)
+ return sec;
+ }
+
+ return NULL;
+}
+
+static bool secs_match(struct dst_sec *dst, struct src_sec *src)
+{
+ if (dst->ephemeral || src->ephemeral)
+ return true;
+
+ if (dst->shdr->sh_type != src->shdr->sh_type) {
+ pr_warn("sec %s types mismatch\n", dst->sec_name);
+ return false;
+ }
+ if (dst->shdr->sh_flags != src->shdr->sh_flags) {
+ pr_warn("sec %s flags mismatch\n", dst->sec_name);
+ return false;
+ }
+ if (dst->shdr->sh_entsize != src->shdr->sh_entsize) {
+ pr_warn("sec %s entsize mismatch\n", dst->sec_name);
+ return false;
+ }
+
+ return true;
+}
+
+static bool sec_content_is_same(struct dst_sec *dst_sec, struct src_sec *src_sec)
+{
+ if (dst_sec->sec_sz != src_sec->shdr->sh_size)
+ return false;
+ if (memcmp(dst_sec->raw_data, src_sec->data->d_buf, dst_sec->sec_sz) != 0)
+ return false;
+ return true;
+}
+
+static int extend_sec(struct bpf_linker *linker, struct dst_sec *dst, struct src_sec *src)
+{
+ void *tmp;
+ size_t dst_align, src_align;
+ size_t dst_align_sz, dst_final_sz;
+ int err;
+
+ /* Ephemeral source section doesn't contribute anything to ELF
+ * section data.
+ */
+ if (src->ephemeral)
+ return 0;
+
+ /* Some sections (like .maps) can contain both externs (and thus be
+ * ephemeral) and non-externs (map definitions). So it's possible that
+ * it has to be "upgraded" from ephemeral to non-ephemeral when the
+ * first non-ephemeral entity appears. In such case, we add ELF
+ * section, data, etc.
+ */
+ if (dst->ephemeral) {
+ err = init_sec(linker, dst, src);
+ if (err)
+ return err;
+ }
+
+ dst_align = dst->shdr->sh_addralign;
+ src_align = src->shdr->sh_addralign;
+ if (dst_align == 0)
+ dst_align = 1;
+ if (dst_align < src_align)
+ dst_align = src_align;
+
+ dst_align_sz = (dst->sec_sz + dst_align - 1) / dst_align * dst_align;
+
+ /* no need to re-align final size */
+ dst_final_sz = dst_align_sz + src->shdr->sh_size;
+
+ if (src->shdr->sh_type != SHT_NOBITS) {
+ tmp = realloc(dst->raw_data, dst_final_sz);
+ /* If dst_align_sz == 0, realloc() behaves in a special way:
+ * 1. When dst->raw_data is NULL it returns:
+ * "either NULL or a pointer suitable to be passed to free()" [1].
+ * 2. When dst->raw_data is not-NULL it frees dst->raw_data and returns NULL,
+ * thus invalidating any "pointer suitable to be passed to free()" obtained
+ * at step (1).
+ *
+ * The dst_align_sz > 0 check avoids error exit after (2), otherwise
+ * dst->raw_data would be freed again in bpf_linker__free().
+ *
+ * [1] man 3 realloc
+ */
+ if (!tmp && dst_align_sz > 0)
+ return -ENOMEM;
+ dst->raw_data = tmp;
+
+ /* pad dst section, if it's alignment forced size increase */
+ memset(dst->raw_data + dst->sec_sz, 0, dst_align_sz - dst->sec_sz);
+ /* now copy src data at a properly aligned offset */
+ memcpy(dst->raw_data + dst_align_sz, src->data->d_buf, src->shdr->sh_size);
+ }
+
+ dst->sec_sz = dst_final_sz;
+ dst->shdr->sh_size = dst_final_sz;
+ dst->data->d_size = dst_final_sz;
+
+ dst->shdr->sh_addralign = dst_align;
+ dst->data->d_align = dst_align;
+
+ src->dst_off = dst_align_sz;
+
+ return 0;
+}
+
+static bool is_data_sec(struct src_sec *sec)
+{
+ if (!sec || sec->skipped)
+ return false;
+ /* ephemeral sections are data sections, e.g., .kconfig, .ksyms */
+ if (sec->ephemeral)
+ return true;
+ return sec->shdr->sh_type == SHT_PROGBITS || sec->shdr->sh_type == SHT_NOBITS;
+}
+
+static bool is_relo_sec(struct src_sec *sec)
+{
+ if (!sec || sec->skipped || sec->ephemeral)
+ return false;
+ return sec->shdr->sh_type == SHT_REL;
+}
+
+static int linker_append_sec_data(struct bpf_linker *linker, struct src_obj *obj)
+{
+ int i, err;
+
+ for (i = 1; i < obj->sec_cnt; i++) {
+ struct src_sec *src_sec;
+ struct dst_sec *dst_sec;
+
+ src_sec = &obj->secs[i];
+ if (!is_data_sec(src_sec))
+ continue;
+
+ dst_sec = find_dst_sec_by_name(linker, src_sec->sec_name);
+ if (!dst_sec) {
+ dst_sec = add_dst_sec(linker, src_sec->sec_name);
+ if (!dst_sec)
+ return -ENOMEM;
+ err = init_sec(linker, dst_sec, src_sec);
+ if (err) {
+ pr_warn("failed to init section '%s'\n", src_sec->sec_name);
+ return err;
+ }
+ } else {
+ if (!secs_match(dst_sec, src_sec)) {
+ pr_warn("ELF sections %s are incompatible\n", src_sec->sec_name);
+ return -1;
+ }
+
+ /* "license" and "version" sections are deduped */
+ if (strcmp(src_sec->sec_name, "license") == 0
+ || strcmp(src_sec->sec_name, "version") == 0) {
+ if (!sec_content_is_same(dst_sec, src_sec)) {
+ pr_warn("non-identical contents of section '%s' are not supported\n", src_sec->sec_name);
+ return -EINVAL;
+ }
+ src_sec->skipped = true;
+ src_sec->dst_id = dst_sec->id;
+ continue;
+ }
+ }
+
+ /* record mapped section index */
+ src_sec->dst_id = dst_sec->id;
+
+ err = extend_sec(linker, dst_sec, src_sec);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int linker_append_elf_syms(struct bpf_linker *linker, struct src_obj *obj)
+{
+ struct src_sec *symtab = &obj->secs[obj->symtab_sec_idx];
+ Elf64_Sym *sym = symtab->data->d_buf;
+ int i, n = symtab->shdr->sh_size / symtab->shdr->sh_entsize, err;
+ int str_sec_idx = symtab->shdr->sh_link;
+ const char *sym_name;
+
+ obj->sym_map = calloc(n + 1, sizeof(*obj->sym_map));
+ if (!obj->sym_map)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++, sym++) {
+ /* We already validated all-zero symbol #0 and we already
+ * appended it preventively to the final SYMTAB, so skip it.
+ */
+ if (i == 0)
+ continue;
+
+ sym_name = elf_strptr(obj->elf, str_sec_idx, sym->st_name);
+ if (!sym_name) {
+ pr_warn("can't fetch symbol name for symbol #%d in '%s'\n", i, obj->filename);
+ return -EINVAL;
+ }
+
+ err = linker_append_elf_sym(linker, obj, sym, sym_name, i);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static Elf64_Sym *get_sym_by_idx(struct bpf_linker *linker, size_t sym_idx)
+{
+ struct dst_sec *symtab = &linker->secs[linker->symtab_sec_idx];
+ Elf64_Sym *syms = symtab->raw_data;
+
+ return &syms[sym_idx];
+}
+
+static struct glob_sym *find_glob_sym(struct bpf_linker *linker, const char *sym_name)
+{
+ struct glob_sym *glob_sym;
+ const char *name;
+ int i;
+
+ for (i = 0; i < linker->glob_sym_cnt; i++) {
+ glob_sym = &linker->glob_syms[i];
+ name = strset__data(linker->strtab_strs) + glob_sym->name_off;
+
+ if (strcmp(name, sym_name) == 0)
+ return glob_sym;
+ }
+
+ return NULL;
+}
+
+static struct glob_sym *add_glob_sym(struct bpf_linker *linker)
+{
+ struct glob_sym *syms, *sym;
+
+ syms = libbpf_reallocarray(linker->glob_syms, linker->glob_sym_cnt + 1,
+ sizeof(*linker->glob_syms));
+ if (!syms)
+ return NULL;
+
+ sym = &syms[linker->glob_sym_cnt];
+ memset(sym, 0, sizeof(*sym));
+ sym->var_idx = -1;
+
+ linker->glob_syms = syms;
+ linker->glob_sym_cnt++;
+
+ return sym;
+}
+
+static bool glob_sym_btf_matches(const char *sym_name, bool exact,
+ const struct btf *btf1, __u32 id1,
+ const struct btf *btf2, __u32 id2)
+{
+ const struct btf_type *t1, *t2;
+ bool is_static1, is_static2;
+ const char *n1, *n2;
+ int i, n;
+
+recur:
+ n1 = n2 = NULL;
+ t1 = skip_mods_and_typedefs(btf1, id1, &id1);
+ t2 = skip_mods_and_typedefs(btf2, id2, &id2);
+
+ /* check if only one side is FWD, otherwise handle with common logic */
+ if (!exact && btf_is_fwd(t1) != btf_is_fwd(t2)) {
+ n1 = btf__str_by_offset(btf1, t1->name_off);
+ n2 = btf__str_by_offset(btf2, t2->name_off);
+ if (strcmp(n1, n2) != 0) {
+ pr_warn("global '%s': incompatible forward declaration names '%s' and '%s'\n",
+ sym_name, n1, n2);
+ return false;
+ }
+ /* validate if FWD kind matches concrete kind */
+ if (btf_is_fwd(t1)) {
+ if (btf_kflag(t1) && btf_is_union(t2))
+ return true;
+ if (!btf_kflag(t1) && btf_is_struct(t2))
+ return true;
+ pr_warn("global '%s': incompatible %s forward declaration and concrete kind %s\n",
+ sym_name, btf_kflag(t1) ? "union" : "struct", btf_kind_str(t2));
+ } else {
+ if (btf_kflag(t2) && btf_is_union(t1))
+ return true;
+ if (!btf_kflag(t2) && btf_is_struct(t1))
+ return true;
+ pr_warn("global '%s': incompatible %s forward declaration and concrete kind %s\n",
+ sym_name, btf_kflag(t2) ? "union" : "struct", btf_kind_str(t1));
+ }
+ return false;
+ }
+
+ if (btf_kind(t1) != btf_kind(t2)) {
+ pr_warn("global '%s': incompatible BTF kinds %s and %s\n",
+ sym_name, btf_kind_str(t1), btf_kind_str(t2));
+ return false;
+ }
+
+ switch (btf_kind(t1)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ case BTF_KIND_FWD:
+ case BTF_KIND_FUNC:
+ case BTF_KIND_VAR:
+ n1 = btf__str_by_offset(btf1, t1->name_off);
+ n2 = btf__str_by_offset(btf2, t2->name_off);
+ if (strcmp(n1, n2) != 0) {
+ pr_warn("global '%s': incompatible %s names '%s' and '%s'\n",
+ sym_name, btf_kind_str(t1), n1, n2);
+ return false;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (btf_kind(t1)) {
+ case BTF_KIND_UNKN: /* void */
+ case BTF_KIND_FWD:
+ return true;
+ case BTF_KIND_INT:
+ case BTF_KIND_FLOAT:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ /* ignore encoding for int and enum values for enum */
+ if (t1->size != t2->size) {
+ pr_warn("global '%s': incompatible %s '%s' size %u and %u\n",
+ sym_name, btf_kind_str(t1), n1, t1->size, t2->size);
+ return false;
+ }
+ return true;
+ case BTF_KIND_PTR:
+ /* just validate overall shape of the referenced type, so no
+ * contents comparison for struct/union, and allowd fwd vs
+ * struct/union
+ */
+ exact = false;
+ id1 = t1->type;
+ id2 = t2->type;
+ goto recur;
+ case BTF_KIND_ARRAY:
+ /* ignore index type and array size */
+ id1 = btf_array(t1)->type;
+ id2 = btf_array(t2)->type;
+ goto recur;
+ case BTF_KIND_FUNC:
+ /* extern and global linkages are compatible */
+ is_static1 = btf_func_linkage(t1) == BTF_FUNC_STATIC;
+ is_static2 = btf_func_linkage(t2) == BTF_FUNC_STATIC;
+ if (is_static1 != is_static2) {
+ pr_warn("global '%s': incompatible func '%s' linkage\n", sym_name, n1);
+ return false;
+ }
+
+ id1 = t1->type;
+ id2 = t2->type;
+ goto recur;
+ case BTF_KIND_VAR:
+ /* extern and global linkages are compatible */
+ is_static1 = btf_var(t1)->linkage == BTF_VAR_STATIC;
+ is_static2 = btf_var(t2)->linkage == BTF_VAR_STATIC;
+ if (is_static1 != is_static2) {
+ pr_warn("global '%s': incompatible var '%s' linkage\n", sym_name, n1);
+ return false;
+ }
+
+ id1 = t1->type;
+ id2 = t2->type;
+ goto recur;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION: {
+ const struct btf_member *m1, *m2;
+
+ if (!exact)
+ return true;
+
+ if (btf_vlen(t1) != btf_vlen(t2)) {
+ pr_warn("global '%s': incompatible number of %s fields %u and %u\n",
+ sym_name, btf_kind_str(t1), btf_vlen(t1), btf_vlen(t2));
+ return false;
+ }
+
+ n = btf_vlen(t1);
+ m1 = btf_members(t1);
+ m2 = btf_members(t2);
+ for (i = 0; i < n; i++, m1++, m2++) {
+ n1 = btf__str_by_offset(btf1, m1->name_off);
+ n2 = btf__str_by_offset(btf2, m2->name_off);
+ if (strcmp(n1, n2) != 0) {
+ pr_warn("global '%s': incompatible field #%d names '%s' and '%s'\n",
+ sym_name, i, n1, n2);
+ return false;
+ }
+ if (m1->offset != m2->offset) {
+ pr_warn("global '%s': incompatible field #%d ('%s') offsets\n",
+ sym_name, i, n1);
+ return false;
+ }
+ if (!glob_sym_btf_matches(sym_name, exact, btf1, m1->type, btf2, m2->type))
+ return false;
+ }
+
+ return true;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ const struct btf_param *m1, *m2;
+
+ if (btf_vlen(t1) != btf_vlen(t2)) {
+ pr_warn("global '%s': incompatible number of %s params %u and %u\n",
+ sym_name, btf_kind_str(t1), btf_vlen(t1), btf_vlen(t2));
+ return false;
+ }
+
+ n = btf_vlen(t1);
+ m1 = btf_params(t1);
+ m2 = btf_params(t2);
+ for (i = 0; i < n; i++, m1++, m2++) {
+ /* ignore func arg names */
+ if (!glob_sym_btf_matches(sym_name, exact, btf1, m1->type, btf2, m2->type))
+ return false;
+ }
+
+ /* now check return type as well */
+ id1 = t1->type;
+ id2 = t2->type;
+ goto recur;
+ }
+
+ /* skip_mods_and_typedefs() make this impossible */
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_CONST:
+ case BTF_KIND_RESTRICT:
+ /* DATASECs are never compared with each other */
+ case BTF_KIND_DATASEC:
+ default:
+ pr_warn("global '%s': unsupported BTF kind %s\n",
+ sym_name, btf_kind_str(t1));
+ return false;
+ }
+}
+
+static bool map_defs_match(const char *sym_name,
+ const struct btf *main_btf,
+ const struct btf_map_def *main_def,
+ const struct btf_map_def *main_inner_def,
+ const struct btf *extra_btf,
+ const struct btf_map_def *extra_def,
+ const struct btf_map_def *extra_inner_def)
+{
+ const char *reason;
+
+ if (main_def->map_type != extra_def->map_type) {
+ reason = "type";
+ goto mismatch;
+ }
+
+ /* check key type/size match */
+ if (main_def->key_size != extra_def->key_size) {
+ reason = "key_size";
+ goto mismatch;
+ }
+ if (!!main_def->key_type_id != !!extra_def->key_type_id) {
+ reason = "key type";
+ goto mismatch;
+ }
+ if ((main_def->parts & MAP_DEF_KEY_TYPE)
+ && !glob_sym_btf_matches(sym_name, true /*exact*/,
+ main_btf, main_def->key_type_id,
+ extra_btf, extra_def->key_type_id)) {
+ reason = "key type";
+ goto mismatch;
+ }
+
+ /* validate value type/size match */
+ if (main_def->value_size != extra_def->value_size) {
+ reason = "value_size";
+ goto mismatch;
+ }
+ if (!!main_def->value_type_id != !!extra_def->value_type_id) {
+ reason = "value type";
+ goto mismatch;
+ }
+ if ((main_def->parts & MAP_DEF_VALUE_TYPE)
+ && !glob_sym_btf_matches(sym_name, true /*exact*/,
+ main_btf, main_def->value_type_id,
+ extra_btf, extra_def->value_type_id)) {
+ reason = "key type";
+ goto mismatch;
+ }
+
+ if (main_def->max_entries != extra_def->max_entries) {
+ reason = "max_entries";
+ goto mismatch;
+ }
+ if (main_def->map_flags != extra_def->map_flags) {
+ reason = "map_flags";
+ goto mismatch;
+ }
+ if (main_def->numa_node != extra_def->numa_node) {
+ reason = "numa_node";
+ goto mismatch;
+ }
+ if (main_def->pinning != extra_def->pinning) {
+ reason = "pinning";
+ goto mismatch;
+ }
+
+ if ((main_def->parts & MAP_DEF_INNER_MAP) != (extra_def->parts & MAP_DEF_INNER_MAP)) {
+ reason = "inner map";
+ goto mismatch;
+ }
+
+ if (main_def->parts & MAP_DEF_INNER_MAP) {
+ char inner_map_name[128];
+
+ snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", sym_name);
+
+ return map_defs_match(inner_map_name,
+ main_btf, main_inner_def, NULL,
+ extra_btf, extra_inner_def, NULL);
+ }
+
+ return true;
+
+mismatch:
+ pr_warn("global '%s': map %s mismatch\n", sym_name, reason);
+ return false;
+}
+
+static bool glob_map_defs_match(const char *sym_name,
+ struct bpf_linker *linker, struct glob_sym *glob_sym,
+ struct src_obj *obj, Elf64_Sym *sym, int btf_id)
+{
+ struct btf_map_def dst_def = {}, dst_inner_def = {};
+ struct btf_map_def src_def = {}, src_inner_def = {};
+ const struct btf_type *t;
+ int err;
+
+ t = btf__type_by_id(obj->btf, btf_id);
+ if (!btf_is_var(t)) {
+ pr_warn("global '%s': invalid map definition type [%d]\n", sym_name, btf_id);
+ return false;
+ }
+ t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
+
+ err = parse_btf_map_def(sym_name, obj->btf, t, true /*strict*/, &src_def, &src_inner_def);
+ if (err) {
+ pr_warn("global '%s': invalid map definition\n", sym_name);
+ return false;
+ }
+
+ /* re-parse existing map definition */
+ t = btf__type_by_id(linker->btf, glob_sym->btf_id);
+ t = skip_mods_and_typedefs(linker->btf, t->type, NULL);
+ err = parse_btf_map_def(sym_name, linker->btf, t, true /*strict*/, &dst_def, &dst_inner_def);
+ if (err) {
+ /* this should not happen, because we already validated it */
+ pr_warn("global '%s': invalid dst map definition\n", sym_name);
+ return false;
+ }
+
+ /* Currently extern map definition has to be complete and match
+ * concrete map definition exactly. This restriction might be lifted
+ * in the future.
+ */
+ return map_defs_match(sym_name, linker->btf, &dst_def, &dst_inner_def,
+ obj->btf, &src_def, &src_inner_def);
+}
+
+static bool glob_syms_match(const char *sym_name,
+ struct bpf_linker *linker, struct glob_sym *glob_sym,
+ struct src_obj *obj, Elf64_Sym *sym, size_t sym_idx, int btf_id)
+{
+ const struct btf_type *src_t;
+
+ /* if we are dealing with externs, BTF types describing both global
+ * and extern VARs/FUNCs should be completely present in all files
+ */
+ if (!glob_sym->btf_id || !btf_id) {
+ pr_warn("BTF info is missing for global symbol '%s'\n", sym_name);
+ return false;
+ }
+
+ src_t = btf__type_by_id(obj->btf, btf_id);
+ if (!btf_is_var(src_t) && !btf_is_func(src_t)) {
+ pr_warn("only extern variables and functions are supported, but got '%s' for '%s'\n",
+ btf_kind_str(src_t), sym_name);
+ return false;
+ }
+
+ /* deal with .maps definitions specially */
+ if (glob_sym->sec_id && strcmp(linker->secs[glob_sym->sec_id].sec_name, MAPS_ELF_SEC) == 0)
+ return glob_map_defs_match(sym_name, linker, glob_sym, obj, sym, btf_id);
+
+ if (!glob_sym_btf_matches(sym_name, true /*exact*/,
+ linker->btf, glob_sym->btf_id, obj->btf, btf_id))
+ return false;
+
+ return true;
+}
+
+static bool btf_is_non_static(const struct btf_type *t)
+{
+ return (btf_is_var(t) && btf_var(t)->linkage != BTF_VAR_STATIC)
+ || (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_STATIC);
+}
+
+static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name,
+ int *out_btf_sec_id, int *out_btf_id)
+{
+ int i, j, n, m, btf_id = 0;
+ const struct btf_type *t;
+ const struct btf_var_secinfo *vi;
+ const char *name;
+
+ if (!obj->btf) {
+ pr_warn("failed to find BTF info for object '%s'\n", obj->filename);
+ return -EINVAL;
+ }
+
+ n = btf__type_cnt(obj->btf);
+ for (i = 1; i < n; i++) {
+ t = btf__type_by_id(obj->btf, i);
+
+ /* some global and extern FUNCs and VARs might not be associated with any
+ * DATASEC, so try to detect them in the same pass
+ */
+ if (btf_is_non_static(t)) {
+ name = btf__str_by_offset(obj->btf, t->name_off);
+ if (strcmp(name, sym_name) != 0)
+ continue;
+
+ /* remember and still try to find DATASEC */
+ btf_id = i;
+ continue;
+ }
+
+ if (!btf_is_datasec(t))
+ continue;
+
+ vi = btf_var_secinfos(t);
+ for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
+ t = btf__type_by_id(obj->btf, vi->type);
+ name = btf__str_by_offset(obj->btf, t->name_off);
+
+ if (strcmp(name, sym_name) != 0)
+ continue;
+ if (btf_is_var(t) && btf_var(t)->linkage == BTF_VAR_STATIC)
+ continue;
+ if (btf_is_func(t) && btf_func_linkage(t) == BTF_FUNC_STATIC)
+ continue;
+
+ if (btf_id && btf_id != vi->type) {
+ pr_warn("global/extern '%s' BTF is ambiguous: both types #%d and #%u match\n",
+ sym_name, btf_id, vi->type);
+ return -EINVAL;
+ }
+
+ *out_btf_sec_id = i;
+ *out_btf_id = vi->type;
+
+ return 0;
+ }
+ }
+
+ /* free-floating extern or global FUNC */
+ if (btf_id) {
+ *out_btf_sec_id = 0;
+ *out_btf_id = btf_id;
+ return 0;
+ }
+
+ pr_warn("failed to find BTF info for global/extern symbol '%s'\n", sym_name);
+ return -ENOENT;
+}
+
+static struct src_sec *find_src_sec_by_name(struct src_obj *obj, const char *sec_name)
+{
+ struct src_sec *sec;
+ int i;
+
+ for (i = 1; i < obj->sec_cnt; i++) {
+ sec = &obj->secs[i];
+
+ if (strcmp(sec->sec_name, sec_name) == 0)
+ return sec;
+ }
+
+ return NULL;
+}
+
+static int complete_extern_btf_info(struct btf *dst_btf, int dst_id,
+ struct btf *src_btf, int src_id)
+{
+ struct btf_type *dst_t = btf_type_by_id(dst_btf, dst_id);
+ struct btf_type *src_t = btf_type_by_id(src_btf, src_id);
+ struct btf_param *src_p, *dst_p;
+ const char *s;
+ int i, n, off;
+
+ /* We already made sure that source and destination types (FUNC or
+ * VAR) match in terms of types and argument names.
+ */
+ if (btf_is_var(dst_t)) {
+ btf_var(dst_t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
+ return 0;
+ }
+
+ dst_t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_GLOBAL, 0);
+
+ /* now onto FUNC_PROTO types */
+ src_t = btf_type_by_id(src_btf, src_t->type);
+ dst_t = btf_type_by_id(dst_btf, dst_t->type);
+
+ /* Fill in all the argument names, which for extern FUNCs are missing.
+ * We'll end up with two copies of FUNCs/VARs for externs, but that
+ * will be taken care of by BTF dedup at the very end.
+ * It might be that BTF types for extern in one file has less/more BTF
+ * information (e.g., FWD instead of full STRUCT/UNION information),
+ * but that should be (in most cases, subject to BTF dedup rules)
+ * handled and resolved by BTF dedup algorithm as well, so we won't
+ * worry about it. Our only job is to make sure that argument names
+ * are populated on both sides, otherwise BTF dedup will pedantically
+ * consider them different.
+ */
+ src_p = btf_params(src_t);
+ dst_p = btf_params(dst_t);
+ for (i = 0, n = btf_vlen(dst_t); i < n; i++, src_p++, dst_p++) {
+ if (!src_p->name_off)
+ continue;
+
+ /* src_btf has more complete info, so add name to dst_btf */
+ s = btf__str_by_offset(src_btf, src_p->name_off);
+ off = btf__add_str(dst_btf, s);
+ if (off < 0)
+ return off;
+ dst_p->name_off = off;
+ }
+ return 0;
+}
+
+static void sym_update_bind(Elf64_Sym *sym, int sym_bind)
+{
+ sym->st_info = ELF64_ST_INFO(sym_bind, ELF64_ST_TYPE(sym->st_info));
+}
+
+static void sym_update_type(Elf64_Sym *sym, int sym_type)
+{
+ sym->st_info = ELF64_ST_INFO(ELF64_ST_BIND(sym->st_info), sym_type);
+}
+
+static void sym_update_visibility(Elf64_Sym *sym, int sym_vis)
+{
+ /* libelf doesn't provide setters for ST_VISIBILITY,
+ * but it is stored in the lower 2 bits of st_other
+ */
+ sym->st_other &= ~0x03;
+ sym->st_other |= sym_vis;
+}
+
+static int linker_append_elf_sym(struct bpf_linker *linker, struct src_obj *obj,
+ Elf64_Sym *sym, const char *sym_name, int src_sym_idx)
+{
+ struct src_sec *src_sec = NULL;
+ struct dst_sec *dst_sec = NULL;
+ struct glob_sym *glob_sym = NULL;
+ int name_off, sym_type, sym_bind, sym_vis, err;
+ int btf_sec_id = 0, btf_id = 0;
+ size_t dst_sym_idx;
+ Elf64_Sym *dst_sym;
+ bool sym_is_extern;
+
+ sym_type = ELF64_ST_TYPE(sym->st_info);
+ sym_bind = ELF64_ST_BIND(sym->st_info);
+ sym_vis = ELF64_ST_VISIBILITY(sym->st_other);
+ sym_is_extern = sym->st_shndx == SHN_UNDEF;
+
+ if (sym_is_extern) {
+ if (!obj->btf) {
+ pr_warn("externs without BTF info are not supported\n");
+ return -ENOTSUP;
+ }
+ } else if (sym->st_shndx < SHN_LORESERVE) {
+ src_sec = &obj->secs[sym->st_shndx];
+ if (src_sec->skipped)
+ return 0;
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ /* allow only one STT_SECTION symbol per section */
+ if (sym_type == STT_SECTION && dst_sec->sec_sym_idx) {
+ obj->sym_map[src_sym_idx] = dst_sec->sec_sym_idx;
+ return 0;
+ }
+ }
+
+ if (sym_bind == STB_LOCAL)
+ goto add_sym;
+
+ /* find matching BTF info */
+ err = find_glob_sym_btf(obj, sym, sym_name, &btf_sec_id, &btf_id);
+ if (err)
+ return err;
+
+ if (sym_is_extern && btf_sec_id) {
+ const char *sec_name = NULL;
+ const struct btf_type *t;
+
+ t = btf__type_by_id(obj->btf, btf_sec_id);
+ sec_name = btf__str_by_offset(obj->btf, t->name_off);
+
+ /* Clang puts unannotated extern vars into
+ * '.extern' BTF DATASEC. Treat them the same
+ * as unannotated extern funcs (which are
+ * currently not put into any DATASECs).
+ * Those don't have associated src_sec/dst_sec.
+ */
+ if (strcmp(sec_name, BTF_EXTERN_SEC) != 0) {
+ src_sec = find_src_sec_by_name(obj, sec_name);
+ if (!src_sec) {
+ pr_warn("failed to find matching ELF sec '%s'\n", sec_name);
+ return -ENOENT;
+ }
+ dst_sec = &linker->secs[src_sec->dst_id];
+ }
+ }
+
+ glob_sym = find_glob_sym(linker, sym_name);
+ if (glob_sym) {
+ /* Preventively resolve to existing symbol. This is
+ * needed for further relocation symbol remapping in
+ * the next step of linking.
+ */
+ obj->sym_map[src_sym_idx] = glob_sym->sym_idx;
+
+ /* If both symbols are non-externs, at least one of
+ * them has to be STB_WEAK, otherwise they are in
+ * a conflict with each other.
+ */
+ if (!sym_is_extern && !glob_sym->is_extern
+ && !glob_sym->is_weak && sym_bind != STB_WEAK) {
+ pr_warn("conflicting non-weak symbol #%d (%s) definition in '%s'\n",
+ src_sym_idx, sym_name, obj->filename);
+ return -EINVAL;
+ }
+
+ if (!glob_syms_match(sym_name, linker, glob_sym, obj, sym, src_sym_idx, btf_id))
+ return -EINVAL;
+
+ dst_sym = get_sym_by_idx(linker, glob_sym->sym_idx);
+
+ /* If new symbol is strong, then force dst_sym to be strong as
+ * well; this way a mix of weak and non-weak extern
+ * definitions will end up being strong.
+ */
+ if (sym_bind == STB_GLOBAL) {
+ /* We still need to preserve type (NOTYPE or
+ * OBJECT/FUNC, depending on whether the symbol is
+ * extern or not)
+ */
+ sym_update_bind(dst_sym, STB_GLOBAL);
+ glob_sym->is_weak = false;
+ }
+
+ /* Non-default visibility is "contaminating", with stricter
+ * visibility overwriting more permissive ones, even if more
+ * permissive visibility comes from just an extern definition.
+ * Currently only STV_DEFAULT and STV_HIDDEN are allowed and
+ * ensured by ELF symbol sanity checks above.
+ */
+ if (sym_vis > ELF64_ST_VISIBILITY(dst_sym->st_other))
+ sym_update_visibility(dst_sym, sym_vis);
+
+ /* If the new symbol is extern, then regardless if
+ * existing symbol is extern or resolved global, just
+ * keep the existing one untouched.
+ */
+ if (sym_is_extern)
+ return 0;
+
+ /* If existing symbol is a strong resolved symbol, bail out,
+ * because we lost resolution battle have nothing to
+ * contribute. We already checked abover that there is no
+ * strong-strong conflict. We also already tightened binding
+ * and visibility, so nothing else to contribute at that point.
+ */
+ if (!glob_sym->is_extern && sym_bind == STB_WEAK)
+ return 0;
+
+ /* At this point, new symbol is strong non-extern,
+ * so overwrite glob_sym with new symbol information.
+ * Preserve binding and visibility.
+ */
+ sym_update_type(dst_sym, sym_type);
+ dst_sym->st_shndx = dst_sec->sec_idx;
+ dst_sym->st_value = src_sec->dst_off + sym->st_value;
+ dst_sym->st_size = sym->st_size;
+
+ /* see comment below about dst_sec->id vs dst_sec->sec_idx */
+ glob_sym->sec_id = dst_sec->id;
+ glob_sym->is_extern = false;
+
+ if (complete_extern_btf_info(linker->btf, glob_sym->btf_id,
+ obj->btf, btf_id))
+ return -EINVAL;
+
+ /* request updating VAR's/FUNC's underlying BTF type when appending BTF type */
+ glob_sym->underlying_btf_id = 0;
+
+ obj->sym_map[src_sym_idx] = glob_sym->sym_idx;
+ return 0;
+ }
+
+add_sym:
+ name_off = strset__add_str(linker->strtab_strs, sym_name);
+ if (name_off < 0)
+ return name_off;
+
+ dst_sym = add_new_sym(linker, &dst_sym_idx);
+ if (!dst_sym)
+ return -ENOMEM;
+
+ dst_sym->st_name = name_off;
+ dst_sym->st_info = sym->st_info;
+ dst_sym->st_other = sym->st_other;
+ dst_sym->st_shndx = dst_sec ? dst_sec->sec_idx : sym->st_shndx;
+ dst_sym->st_value = (src_sec ? src_sec->dst_off : 0) + sym->st_value;
+ dst_sym->st_size = sym->st_size;
+
+ obj->sym_map[src_sym_idx] = dst_sym_idx;
+
+ if (sym_type == STT_SECTION && dst_sym) {
+ dst_sec->sec_sym_idx = dst_sym_idx;
+ dst_sym->st_value = 0;
+ }
+
+ if (sym_bind != STB_LOCAL) {
+ glob_sym = add_glob_sym(linker);
+ if (!glob_sym)
+ return -ENOMEM;
+
+ glob_sym->sym_idx = dst_sym_idx;
+ /* we use dst_sec->id (and not dst_sec->sec_idx), because
+ * ephemeral sections (.kconfig, .ksyms, etc) don't have
+ * sec_idx (as they don't have corresponding ELF section), but
+ * still have id. .extern doesn't have even ephemeral section
+ * associated with it, so dst_sec->id == dst_sec->sec_idx == 0.
+ */
+ glob_sym->sec_id = dst_sec ? dst_sec->id : 0;
+ glob_sym->name_off = name_off;
+ /* we will fill btf_id in during BTF merging step */
+ glob_sym->btf_id = 0;
+ glob_sym->is_extern = sym_is_extern;
+ glob_sym->is_weak = sym_bind == STB_WEAK;
+ }
+
+ return 0;
+}
+
+static int linker_append_elf_relos(struct bpf_linker *linker, struct src_obj *obj)
+{
+ struct src_sec *src_symtab = &obj->secs[obj->symtab_sec_idx];
+ int i, err;
+
+ for (i = 1; i < obj->sec_cnt; i++) {
+ struct src_sec *src_sec, *src_linked_sec;
+ struct dst_sec *dst_sec, *dst_linked_sec;
+ Elf64_Rel *src_rel, *dst_rel;
+ int j, n;
+
+ src_sec = &obj->secs[i];
+ if (!is_relo_sec(src_sec))
+ continue;
+
+ /* shdr->sh_info points to relocatable section */
+ src_linked_sec = &obj->secs[src_sec->shdr->sh_info];
+ if (src_linked_sec->skipped)
+ continue;
+
+ dst_sec = find_dst_sec_by_name(linker, src_sec->sec_name);
+ if (!dst_sec) {
+ dst_sec = add_dst_sec(linker, src_sec->sec_name);
+ if (!dst_sec)
+ return -ENOMEM;
+ err = init_sec(linker, dst_sec, src_sec);
+ if (err) {
+ pr_warn("failed to init section '%s'\n", src_sec->sec_name);
+ return err;
+ }
+ } else if (!secs_match(dst_sec, src_sec)) {
+ pr_warn("sections %s are not compatible\n", src_sec->sec_name);
+ return -1;
+ }
+
+ /* shdr->sh_link points to SYMTAB */
+ dst_sec->shdr->sh_link = linker->symtab_sec_idx;
+
+ /* shdr->sh_info points to relocated section */
+ dst_linked_sec = &linker->secs[src_linked_sec->dst_id];
+ dst_sec->shdr->sh_info = dst_linked_sec->sec_idx;
+
+ src_sec->dst_id = dst_sec->id;
+ err = extend_sec(linker, dst_sec, src_sec);
+ if (err)
+ return err;
+
+ src_rel = src_sec->data->d_buf;
+ dst_rel = dst_sec->raw_data + src_sec->dst_off;
+ n = src_sec->shdr->sh_size / src_sec->shdr->sh_entsize;
+ for (j = 0; j < n; j++, src_rel++, dst_rel++) {
+ size_t src_sym_idx, dst_sym_idx, sym_type;
+ Elf64_Sym *src_sym;
+
+ src_sym_idx = ELF64_R_SYM(src_rel->r_info);
+ src_sym = src_symtab->data->d_buf + sizeof(*src_sym) * src_sym_idx;
+
+ dst_sym_idx = obj->sym_map[src_sym_idx];
+ dst_rel->r_offset += src_linked_sec->dst_off;
+ sym_type = ELF64_R_TYPE(src_rel->r_info);
+ dst_rel->r_info = ELF64_R_INFO(dst_sym_idx, sym_type);
+
+ if (ELF64_ST_TYPE(src_sym->st_info) == STT_SECTION) {
+ struct src_sec *sec = &obj->secs[src_sym->st_shndx];
+ struct bpf_insn *insn;
+
+ if (src_linked_sec->shdr->sh_flags & SHF_EXECINSTR) {
+ /* calls to the very first static function inside
+ * .text section at offset 0 will
+ * reference section symbol, not the
+ * function symbol. Fix that up,
+ * otherwise it won't be possible to
+ * relocate calls to two different
+ * static functions with the same name
+ * (rom two different object files)
+ */
+ insn = dst_linked_sec->raw_data + dst_rel->r_offset;
+ if (insn->code == (BPF_JMP | BPF_CALL))
+ insn->imm += sec->dst_off / sizeof(struct bpf_insn);
+ else
+ insn->imm += sec->dst_off;
+ } else {
+ pr_warn("relocation against STT_SECTION in non-exec section is not supported!\n");
+ return -EINVAL;
+ }
+ }
+
+ }
+ }
+
+ return 0;
+}
+
+static Elf64_Sym *find_sym_by_name(struct src_obj *obj, size_t sec_idx,
+ int sym_type, const char *sym_name)
+{
+ struct src_sec *symtab = &obj->secs[obj->symtab_sec_idx];
+ Elf64_Sym *sym = symtab->data->d_buf;
+ int i, n = symtab->shdr->sh_size / symtab->shdr->sh_entsize;
+ int str_sec_idx = symtab->shdr->sh_link;
+ const char *name;
+
+ for (i = 0; i < n; i++, sym++) {
+ if (sym->st_shndx != sec_idx)
+ continue;
+ if (ELF64_ST_TYPE(sym->st_info) != sym_type)
+ continue;
+
+ name = elf_strptr(obj->elf, str_sec_idx, sym->st_name);
+ if (!name)
+ return NULL;
+
+ if (strcmp(sym_name, name) != 0)
+ continue;
+
+ return sym;
+ }
+
+ return NULL;
+}
+
+static int linker_fixup_btf(struct src_obj *obj)
+{
+ const char *sec_name;
+ struct src_sec *sec;
+ int i, j, n, m;
+
+ if (!obj->btf)
+ return 0;
+
+ n = btf__type_cnt(obj->btf);
+ for (i = 1; i < n; i++) {
+ struct btf_var_secinfo *vi;
+ struct btf_type *t;
+
+ t = btf_type_by_id(obj->btf, i);
+ if (btf_kind(t) != BTF_KIND_DATASEC)
+ continue;
+
+ sec_name = btf__str_by_offset(obj->btf, t->name_off);
+ sec = find_src_sec_by_name(obj, sec_name);
+ if (sec) {
+ /* record actual section size, unless ephemeral */
+ if (sec->shdr)
+ t->size = sec->shdr->sh_size;
+ } else {
+ /* BTF can have some sections that are not represented
+ * in ELF, e.g., .kconfig, .ksyms, .extern, which are used
+ * for special extern variables.
+ *
+ * For all but one such special (ephemeral)
+ * sections, we pre-create "section shells" to be able
+ * to keep track of extra per-section metadata later
+ * (e.g., those BTF extern variables).
+ *
+ * .extern is even more special, though, because it
+ * contains extern variables that need to be resolved
+ * by static linker, not libbpf and kernel. When such
+ * externs are resolved, we are going to remove them
+ * from .extern BTF section and might end up not
+ * needing it at all. Each resolved extern should have
+ * matching non-extern VAR/FUNC in other sections.
+ *
+ * We do support leaving some of the externs
+ * unresolved, though, to support cases of building
+ * libraries, which will later be linked against final
+ * BPF applications. So if at finalization we still
+ * see unresolved externs, we'll create .extern
+ * section on our own.
+ */
+ if (strcmp(sec_name, BTF_EXTERN_SEC) == 0)
+ continue;
+
+ sec = add_src_sec(obj, sec_name);
+ if (!sec)
+ return -ENOMEM;
+
+ sec->ephemeral = true;
+ sec->sec_idx = 0; /* will match UNDEF shndx in ELF */
+ }
+
+ /* remember ELF section and its BTF type ID match */
+ sec->sec_type_id = i;
+
+ /* fix up variable offsets */
+ vi = btf_var_secinfos(t);
+ for (j = 0, m = btf_vlen(t); j < m; j++, vi++) {
+ const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type);
+ const char *var_name = btf__str_by_offset(obj->btf, vt->name_off);
+ int var_linkage = btf_var(vt)->linkage;
+ Elf64_Sym *sym;
+
+ /* no need to patch up static or extern vars */
+ if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED)
+ continue;
+
+ sym = find_sym_by_name(obj, sec->sec_idx, STT_OBJECT, var_name);
+ if (!sym) {
+ pr_warn("failed to find symbol for variable '%s' in section '%s'\n", var_name, sec_name);
+ return -ENOENT;
+ }
+
+ vi->offset = sym->st_value;
+ }
+ }
+
+ return 0;
+}
+
+static int remap_type_id(__u32 *type_id, void *ctx)
+{
+ int *id_map = ctx;
+ int new_id = id_map[*type_id];
+
+ /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */
+ if (new_id == 0 && *type_id != 0) {
+ pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id);
+ return -EINVAL;
+ }
+
+ *type_id = id_map[*type_id];
+
+ return 0;
+}
+
+static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj)
+{
+ const struct btf_type *t;
+ int i, j, n, start_id, id;
+ const char *name;
+
+ if (!obj->btf)
+ return 0;
+
+ start_id = btf__type_cnt(linker->btf);
+ n = btf__type_cnt(obj->btf);
+
+ obj->btf_type_map = calloc(n + 1, sizeof(int));
+ if (!obj->btf_type_map)
+ return -ENOMEM;
+
+ for (i = 1; i < n; i++) {
+ struct glob_sym *glob_sym = NULL;
+
+ t = btf__type_by_id(obj->btf, i);
+
+ /* DATASECs are handled specially below */
+ if (btf_kind(t) == BTF_KIND_DATASEC)
+ continue;
+
+ if (btf_is_non_static(t)) {
+ /* there should be glob_sym already */
+ name = btf__str_by_offset(obj->btf, t->name_off);
+ glob_sym = find_glob_sym(linker, name);
+
+ /* VARs without corresponding glob_sym are those that
+ * belong to skipped/deduplicated sections (i.e.,
+ * license and version), so just skip them
+ */
+ if (!glob_sym)
+ continue;
+
+ /* linker_append_elf_sym() might have requested
+ * updating underlying type ID, if extern was resolved
+ * to strong symbol or weak got upgraded to non-weak
+ */
+ if (glob_sym->underlying_btf_id == 0)
+ glob_sym->underlying_btf_id = -t->type;
+
+ /* globals from previous object files that match our
+ * VAR/FUNC already have a corresponding associated
+ * BTF type, so just make sure to use it
+ */
+ if (glob_sym->btf_id) {
+ /* reuse existing BTF type for global var/func */
+ obj->btf_type_map[i] = glob_sym->btf_id;
+ continue;
+ }
+ }
+
+ id = btf__add_type(linker->btf, obj->btf, t);
+ if (id < 0) {
+ pr_warn("failed to append BTF type #%d from file '%s'\n", i, obj->filename);
+ return id;
+ }
+
+ obj->btf_type_map[i] = id;
+
+ /* record just appended BTF type for var/func */
+ if (glob_sym) {
+ glob_sym->btf_id = id;
+ glob_sym->underlying_btf_id = -t->type;
+ }
+ }
+
+ /* remap all the types except DATASECs */
+ n = btf__type_cnt(linker->btf);
+ for (i = start_id; i < n; i++) {
+ struct btf_type *dst_t = btf_type_by_id(linker->btf, i);
+
+ if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map))
+ return -EINVAL;
+ }
+
+ /* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's
+ * actual type), if necessary
+ */
+ for (i = 0; i < linker->glob_sym_cnt; i++) {
+ struct glob_sym *glob_sym = &linker->glob_syms[i];
+ struct btf_type *glob_t;
+
+ if (glob_sym->underlying_btf_id >= 0)
+ continue;
+
+ glob_sym->underlying_btf_id = obj->btf_type_map[-glob_sym->underlying_btf_id];
+
+ glob_t = btf_type_by_id(linker->btf, glob_sym->btf_id);
+ glob_t->type = glob_sym->underlying_btf_id;
+ }
+
+ /* append DATASEC info */
+ for (i = 1; i < obj->sec_cnt; i++) {
+ struct src_sec *src_sec;
+ struct dst_sec *dst_sec;
+ const struct btf_var_secinfo *src_var;
+ struct btf_var_secinfo *dst_var;
+
+ src_sec = &obj->secs[i];
+ if (!src_sec->sec_type_id || src_sec->skipped)
+ continue;
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ /* Mark section as having BTF regardless of the presence of
+ * variables. In some cases compiler might generate empty BTF
+ * with no variables information. E.g., when promoting local
+ * array/structure variable initial values and BPF object
+ * file otherwise has no read-only static variables in
+ * .rodata. We need to preserve such empty BTF and just set
+ * correct section size.
+ */
+ dst_sec->has_btf = true;
+
+ t = btf__type_by_id(obj->btf, src_sec->sec_type_id);
+ src_var = btf_var_secinfos(t);
+ n = btf_vlen(t);
+ for (j = 0; j < n; j++, src_var++) {
+ void *sec_vars = dst_sec->sec_vars;
+ int new_id = obj->btf_type_map[src_var->type];
+ struct glob_sym *glob_sym = NULL;
+
+ t = btf_type_by_id(linker->btf, new_id);
+ if (btf_is_non_static(t)) {
+ name = btf__str_by_offset(linker->btf, t->name_off);
+ glob_sym = find_glob_sym(linker, name);
+ if (glob_sym->sec_id != dst_sec->id) {
+ pr_warn("global '%s': section mismatch %d vs %d\n",
+ name, glob_sym->sec_id, dst_sec->id);
+ return -EINVAL;
+ }
+ }
+
+ /* If there is already a member (VAR or FUNC) mapped
+ * to the same type, don't add a duplicate entry.
+ * This will happen when multiple object files define
+ * the same extern VARs/FUNCs.
+ */
+ if (glob_sym && glob_sym->var_idx >= 0) {
+ __s64 sz;
+
+ dst_var = &dst_sec->sec_vars[glob_sym->var_idx];
+ /* Because underlying BTF type might have
+ * changed, so might its size have changed, so
+ * re-calculate and update it in sec_var.
+ */
+ sz = btf__resolve_size(linker->btf, glob_sym->underlying_btf_id);
+ if (sz < 0) {
+ pr_warn("global '%s': failed to resolve size of underlying type: %d\n",
+ name, (int)sz);
+ return -EINVAL;
+ }
+ dst_var->size = sz;
+ continue;
+ }
+
+ sec_vars = libbpf_reallocarray(sec_vars,
+ dst_sec->sec_var_cnt + 1,
+ sizeof(*dst_sec->sec_vars));
+ if (!sec_vars)
+ return -ENOMEM;
+
+ dst_sec->sec_vars = sec_vars;
+ dst_sec->sec_var_cnt++;
+
+ dst_var = &dst_sec->sec_vars[dst_sec->sec_var_cnt - 1];
+ dst_var->type = obj->btf_type_map[src_var->type];
+ dst_var->size = src_var->size;
+ dst_var->offset = src_sec->dst_off + src_var->offset;
+
+ if (glob_sym)
+ glob_sym->var_idx = dst_sec->sec_var_cnt - 1;
+ }
+ }
+
+ return 0;
+}
+
+static void *add_btf_ext_rec(struct btf_ext_sec_data *ext_data, const void *src_rec)
+{
+ void *tmp;
+
+ tmp = libbpf_reallocarray(ext_data->recs, ext_data->rec_cnt + 1, ext_data->rec_sz);
+ if (!tmp)
+ return NULL;
+ ext_data->recs = tmp;
+
+ tmp += ext_data->rec_cnt * ext_data->rec_sz;
+ memcpy(tmp, src_rec, ext_data->rec_sz);
+
+ ext_data->rec_cnt++;
+
+ return tmp;
+}
+
+static int linker_append_btf_ext(struct bpf_linker *linker, struct src_obj *obj)
+{
+ const struct btf_ext_info_sec *ext_sec;
+ const char *sec_name, *s;
+ struct src_sec *src_sec;
+ struct dst_sec *dst_sec;
+ int rec_sz, str_off, i;
+
+ if (!obj->btf_ext)
+ return 0;
+
+ rec_sz = obj->btf_ext->func_info.rec_size;
+ for_each_btf_ext_sec(&obj->btf_ext->func_info, ext_sec) {
+ struct bpf_func_info_min *src_rec, *dst_rec;
+
+ sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off);
+ src_sec = find_src_sec_by_name(obj, sec_name);
+ if (!src_sec) {
+ pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name);
+ return -EINVAL;
+ }
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ if (dst_sec->func_info.rec_sz == 0)
+ dst_sec->func_info.rec_sz = rec_sz;
+ if (dst_sec->func_info.rec_sz != rec_sz) {
+ pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name);
+ return -EINVAL;
+ }
+
+ for_each_btf_ext_rec(&obj->btf_ext->func_info, ext_sec, i, src_rec) {
+ dst_rec = add_btf_ext_rec(&dst_sec->func_info, src_rec);
+ if (!dst_rec)
+ return -ENOMEM;
+
+ dst_rec->insn_off += src_sec->dst_off;
+ dst_rec->type_id = obj->btf_type_map[dst_rec->type_id];
+ }
+ }
+
+ rec_sz = obj->btf_ext->line_info.rec_size;
+ for_each_btf_ext_sec(&obj->btf_ext->line_info, ext_sec) {
+ struct bpf_line_info_min *src_rec, *dst_rec;
+
+ sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off);
+ src_sec = find_src_sec_by_name(obj, sec_name);
+ if (!src_sec) {
+ pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name);
+ return -EINVAL;
+ }
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ if (dst_sec->line_info.rec_sz == 0)
+ dst_sec->line_info.rec_sz = rec_sz;
+ if (dst_sec->line_info.rec_sz != rec_sz) {
+ pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name);
+ return -EINVAL;
+ }
+
+ for_each_btf_ext_rec(&obj->btf_ext->line_info, ext_sec, i, src_rec) {
+ dst_rec = add_btf_ext_rec(&dst_sec->line_info, src_rec);
+ if (!dst_rec)
+ return -ENOMEM;
+
+ dst_rec->insn_off += src_sec->dst_off;
+
+ s = btf__str_by_offset(obj->btf, src_rec->file_name_off);
+ str_off = btf__add_str(linker->btf, s);
+ if (str_off < 0)
+ return -ENOMEM;
+ dst_rec->file_name_off = str_off;
+
+ s = btf__str_by_offset(obj->btf, src_rec->line_off);
+ str_off = btf__add_str(linker->btf, s);
+ if (str_off < 0)
+ return -ENOMEM;
+ dst_rec->line_off = str_off;
+
+ /* dst_rec->line_col is fine */
+ }
+ }
+
+ rec_sz = obj->btf_ext->core_relo_info.rec_size;
+ for_each_btf_ext_sec(&obj->btf_ext->core_relo_info, ext_sec) {
+ struct bpf_core_relo *src_rec, *dst_rec;
+
+ sec_name = btf__name_by_offset(obj->btf, ext_sec->sec_name_off);
+ src_sec = find_src_sec_by_name(obj, sec_name);
+ if (!src_sec) {
+ pr_warn("can't find section '%s' referenced from .BTF.ext\n", sec_name);
+ return -EINVAL;
+ }
+ dst_sec = &linker->secs[src_sec->dst_id];
+
+ if (dst_sec->core_relo_info.rec_sz == 0)
+ dst_sec->core_relo_info.rec_sz = rec_sz;
+ if (dst_sec->core_relo_info.rec_sz != rec_sz) {
+ pr_warn("incompatible .BTF.ext record sizes for section '%s'\n", sec_name);
+ return -EINVAL;
+ }
+
+ for_each_btf_ext_rec(&obj->btf_ext->core_relo_info, ext_sec, i, src_rec) {
+ dst_rec = add_btf_ext_rec(&dst_sec->core_relo_info, src_rec);
+ if (!dst_rec)
+ return -ENOMEM;
+
+ dst_rec->insn_off += src_sec->dst_off;
+ dst_rec->type_id = obj->btf_type_map[dst_rec->type_id];
+
+ s = btf__str_by_offset(obj->btf, src_rec->access_str_off);
+ str_off = btf__add_str(linker->btf, s);
+ if (str_off < 0)
+ return -ENOMEM;
+ dst_rec->access_str_off = str_off;
+
+ /* dst_rec->kind is fine */
+ }
+ }
+
+ return 0;
+}
+
+int bpf_linker__finalize(struct bpf_linker *linker)
+{
+ struct dst_sec *sec;
+ size_t strs_sz;
+ const void *strs;
+ int err, i;
+
+ if (!linker->elf)
+ return libbpf_err(-EINVAL);
+
+ err = finalize_btf(linker);
+ if (err)
+ return libbpf_err(err);
+
+ /* Finalize strings */
+ strs_sz = strset__data_size(linker->strtab_strs);
+ strs = strset__data(linker->strtab_strs);
+
+ sec = &linker->secs[linker->strtab_sec_idx];
+ sec->data->d_align = 1;
+ sec->data->d_off = 0LL;
+ sec->data->d_buf = (void *)strs;
+ sec->data->d_type = ELF_T_BYTE;
+ sec->data->d_size = strs_sz;
+ sec->shdr->sh_size = strs_sz;
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ sec = &linker->secs[i];
+
+ /* STRTAB is handled specially above */
+ if (sec->sec_idx == linker->strtab_sec_idx)
+ continue;
+
+ /* special ephemeral sections (.ksyms, .kconfig, etc) */
+ if (!sec->scn)
+ continue;
+
+ sec->data->d_buf = sec->raw_data;
+ }
+
+ /* Finalize ELF layout */
+ if (elf_update(linker->elf, ELF_C_NULL) < 0) {
+ err = -errno;
+ pr_warn_elf("failed to finalize ELF layout");
+ return libbpf_err(err);
+ }
+
+ /* Write out final ELF contents */
+ if (elf_update(linker->elf, ELF_C_WRITE) < 0) {
+ err = -errno;
+ pr_warn_elf("failed to write ELF contents");
+ return libbpf_err(err);
+ }
+
+ elf_end(linker->elf);
+ close(linker->fd);
+
+ linker->elf = NULL;
+ linker->fd = -1;
+
+ return 0;
+}
+
+static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name,
+ size_t align, const void *raw_data, size_t raw_sz)
+{
+ Elf_Scn *scn;
+ Elf_Data *data;
+ Elf64_Shdr *shdr;
+ int name_off;
+
+ name_off = strset__add_str(linker->strtab_strs, sec_name);
+ if (name_off < 0)
+ return name_off;
+
+ scn = elf_newscn(linker->elf);
+ if (!scn)
+ return -ENOMEM;
+ data = elf_newdata(scn);
+ if (!data)
+ return -ENOMEM;
+ shdr = elf64_getshdr(scn);
+ if (!shdr)
+ return -EINVAL;
+
+ shdr->sh_name = name_off;
+ shdr->sh_type = SHT_PROGBITS;
+ shdr->sh_flags = 0;
+ shdr->sh_size = raw_sz;
+ shdr->sh_link = 0;
+ shdr->sh_info = 0;
+ shdr->sh_addralign = align;
+ shdr->sh_entsize = 0;
+
+ data->d_type = ELF_T_BYTE;
+ data->d_size = raw_sz;
+ data->d_buf = (void *)raw_data;
+ data->d_align = align;
+ data->d_off = 0;
+
+ return 0;
+}
+
+static int finalize_btf(struct bpf_linker *linker)
+{
+ LIBBPF_OPTS(btf_dedup_opts, opts);
+ struct btf *btf = linker->btf;
+ const void *raw_data;
+ int i, j, id, err;
+ __u32 raw_sz;
+
+ /* bail out if no BTF data was produced */
+ if (btf__type_cnt(linker->btf) == 1)
+ return 0;
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ if (!sec->has_btf)
+ continue;
+
+ id = btf__add_datasec(btf, sec->sec_name, sec->sec_sz);
+ if (id < 0) {
+ pr_warn("failed to add consolidated BTF type for datasec '%s': %d\n",
+ sec->sec_name, id);
+ return id;
+ }
+
+ for (j = 0; j < sec->sec_var_cnt; j++) {
+ struct btf_var_secinfo *vi = &sec->sec_vars[j];
+
+ if (btf__add_datasec_var_info(btf, vi->type, vi->offset, vi->size))
+ return -EINVAL;
+ }
+ }
+
+ err = finalize_btf_ext(linker);
+ if (err) {
+ pr_warn(".BTF.ext generation failed: %d\n", err);
+ return err;
+ }
+
+ opts.btf_ext = linker->btf_ext;
+ err = btf__dedup(linker->btf, &opts);
+ if (err) {
+ pr_warn("BTF dedup failed: %d\n", err);
+ return err;
+ }
+
+ /* Emit .BTF section */
+ raw_data = btf__raw_data(linker->btf, &raw_sz);
+ if (!raw_data)
+ return -ENOMEM;
+
+ err = emit_elf_data_sec(linker, BTF_ELF_SEC, 8, raw_data, raw_sz);
+ if (err) {
+ pr_warn("failed to write out .BTF ELF section: %d\n", err);
+ return err;
+ }
+
+ /* Emit .BTF.ext section */
+ if (linker->btf_ext) {
+ raw_data = btf_ext__get_raw_data(linker->btf_ext, &raw_sz);
+ if (!raw_data)
+ return -ENOMEM;
+
+ err = emit_elf_data_sec(linker, BTF_EXT_ELF_SEC, 8, raw_data, raw_sz);
+ if (err) {
+ pr_warn("failed to write out .BTF.ext ELF section: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int emit_btf_ext_data(struct bpf_linker *linker, void *output,
+ const char *sec_name, struct btf_ext_sec_data *sec_data)
+{
+ struct btf_ext_info_sec *sec_info;
+ void *cur = output;
+ int str_off;
+ size_t sz;
+
+ if (!sec_data->rec_cnt)
+ return 0;
+
+ str_off = btf__add_str(linker->btf, sec_name);
+ if (str_off < 0)
+ return -ENOMEM;
+
+ sec_info = cur;
+ sec_info->sec_name_off = str_off;
+ sec_info->num_info = sec_data->rec_cnt;
+ cur += sizeof(struct btf_ext_info_sec);
+
+ sz = sec_data->rec_cnt * sec_data->rec_sz;
+ memcpy(cur, sec_data->recs, sz);
+ cur += sz;
+
+ return cur - output;
+}
+
+static int finalize_btf_ext(struct bpf_linker *linker)
+{
+ size_t funcs_sz = 0, lines_sz = 0, core_relos_sz = 0, total_sz = 0;
+ size_t func_rec_sz = 0, line_rec_sz = 0, core_relo_rec_sz = 0;
+ struct btf_ext_header *hdr;
+ void *data, *cur;
+ int i, err, sz;
+
+ /* validate that all sections have the same .BTF.ext record sizes
+ * and calculate total data size for each type of data (func info,
+ * line info, core relos)
+ */
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ if (sec->func_info.rec_cnt) {
+ if (func_rec_sz == 0)
+ func_rec_sz = sec->func_info.rec_sz;
+ if (func_rec_sz != sec->func_info.rec_sz) {
+ pr_warn("mismatch in func_info record size %zu != %u\n",
+ func_rec_sz, sec->func_info.rec_sz);
+ return -EINVAL;
+ }
+
+ funcs_sz += sizeof(struct btf_ext_info_sec) + func_rec_sz * sec->func_info.rec_cnt;
+ }
+ if (sec->line_info.rec_cnt) {
+ if (line_rec_sz == 0)
+ line_rec_sz = sec->line_info.rec_sz;
+ if (line_rec_sz != sec->line_info.rec_sz) {
+ pr_warn("mismatch in line_info record size %zu != %u\n",
+ line_rec_sz, sec->line_info.rec_sz);
+ return -EINVAL;
+ }
+
+ lines_sz += sizeof(struct btf_ext_info_sec) + line_rec_sz * sec->line_info.rec_cnt;
+ }
+ if (sec->core_relo_info.rec_cnt) {
+ if (core_relo_rec_sz == 0)
+ core_relo_rec_sz = sec->core_relo_info.rec_sz;
+ if (core_relo_rec_sz != sec->core_relo_info.rec_sz) {
+ pr_warn("mismatch in core_relo_info record size %zu != %u\n",
+ core_relo_rec_sz, sec->core_relo_info.rec_sz);
+ return -EINVAL;
+ }
+
+ core_relos_sz += sizeof(struct btf_ext_info_sec) + core_relo_rec_sz * sec->core_relo_info.rec_cnt;
+ }
+ }
+
+ if (!funcs_sz && !lines_sz && !core_relos_sz)
+ return 0;
+
+ total_sz += sizeof(struct btf_ext_header);
+ if (funcs_sz) {
+ funcs_sz += sizeof(__u32); /* record size prefix */
+ total_sz += funcs_sz;
+ }
+ if (lines_sz) {
+ lines_sz += sizeof(__u32); /* record size prefix */
+ total_sz += lines_sz;
+ }
+ if (core_relos_sz) {
+ core_relos_sz += sizeof(__u32); /* record size prefix */
+ total_sz += core_relos_sz;
+ }
+
+ cur = data = calloc(1, total_sz);
+ if (!data)
+ return -ENOMEM;
+
+ hdr = cur;
+ hdr->magic = BTF_MAGIC;
+ hdr->version = BTF_VERSION;
+ hdr->flags = 0;
+ hdr->hdr_len = sizeof(struct btf_ext_header);
+ cur += sizeof(struct btf_ext_header);
+
+ /* All offsets are in bytes relative to the end of this header */
+ hdr->func_info_off = 0;
+ hdr->func_info_len = funcs_sz;
+ hdr->line_info_off = funcs_sz;
+ hdr->line_info_len = lines_sz;
+ hdr->core_relo_off = funcs_sz + lines_sz;
+ hdr->core_relo_len = core_relos_sz;
+
+ if (funcs_sz) {
+ *(__u32 *)cur = func_rec_sz;
+ cur += sizeof(__u32);
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->func_info);
+ if (sz < 0) {
+ err = sz;
+ goto out;
+ }
+
+ cur += sz;
+ }
+ }
+
+ if (lines_sz) {
+ *(__u32 *)cur = line_rec_sz;
+ cur += sizeof(__u32);
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->line_info);
+ if (sz < 0) {
+ err = sz;
+ goto out;
+ }
+
+ cur += sz;
+ }
+ }
+
+ if (core_relos_sz) {
+ *(__u32 *)cur = core_relo_rec_sz;
+ cur += sizeof(__u32);
+
+ for (i = 1; i < linker->sec_cnt; i++) {
+ struct dst_sec *sec = &linker->secs[i];
+
+ sz = emit_btf_ext_data(linker, cur, sec->sec_name, &sec->core_relo_info);
+ if (sz < 0) {
+ err = sz;
+ goto out;
+ }
+
+ cur += sz;
+ }
+ }
+
+ linker->btf_ext = btf_ext__new(data, total_sz);
+ err = libbpf_get_error(linker->btf_ext);
+ if (err) {
+ linker->btf_ext = NULL;
+ pr_warn("failed to parse final .BTF.ext data: %d\n", err);
+ goto out;
+ }
+
+out:
+ free(data);
+ return err;
+}
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 18b5319025e1..84dd5fa14905 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -4,8 +4,12 @@
#include <stdlib.h>
#include <memory.h>
#include <unistd.h>
+#include <arpa/inet.h>
#include <linux/bpf.h>
+#include <linux/if_ether.h>
+#include <linux/pkt_cls.h>
#include <linux/rtnetlink.h>
+#include <linux/netdev.h>
#include <sys/socket.h>
#include <errno.h>
#include <time.h>
@@ -15,23 +19,36 @@
#include "libbpf_internal.h"
#include "nlattr.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
#ifndef SOL_NETLINK
#define SOL_NETLINK 270
#endif
+typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
+
typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
void *cookie);
+struct xdp_link_info {
+ __u32 prog_id;
+ __u32 drv_prog_id;
+ __u32 hw_prog_id;
+ __u32 skb_prog_id;
+ __u8 attach_mode;
+};
+
struct xdp_id_md {
int ifindex;
__u32 flags;
struct xdp_link_info info;
+ __u64 feature_flags;
+};
+
+struct xdp_features_md {
+ int ifindex;
+ __u64 flags;
};
-int libbpf_netlink_open(__u32 *nl_pid)
+static int libbpf_netlink_open(__u32 *nl_pid, int proto)
{
struct sockaddr_nl sa;
socklen_t addrlen;
@@ -41,7 +58,7 @@ int libbpf_netlink_open(__u32 *nl_pid)
memset(&sa, 0, sizeof(sa));
sa.nl_family = AF_NETLINK;
- sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, proto);
if (sock < 0)
return -errno;
@@ -74,28 +91,86 @@ cleanup:
return ret;
}
-static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
- __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
- void *cookie)
+static void libbpf_netlink_close(int sock)
+{
+ close(sock);
+}
+
+enum {
+ NL_CONT,
+ NL_NEXT,
+ NL_DONE,
+};
+
+static int netlink_recvmsg(int sock, struct msghdr *mhdr, int flags)
+{
+ int len;
+
+ do {
+ len = recvmsg(sock, mhdr, flags);
+ } while (len < 0 && (errno == EINTR || errno == EAGAIN));
+
+ if (len < 0)
+ return -errno;
+ return len;
+}
+
+static int alloc_iov(struct iovec *iov, int len)
+{
+ void *nbuf;
+
+ nbuf = realloc(iov->iov_base, len);
+ if (!nbuf)
+ return -ENOMEM;
+
+ iov->iov_base = nbuf;
+ iov->iov_len = len;
+ return 0;
+}
+
+static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq,
+ __dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
+ void *cookie)
{
+ struct iovec iov = {};
+ struct msghdr mhdr = {
+ .msg_iov = &iov,
+ .msg_iovlen = 1,
+ };
bool multipart = true;
struct nlmsgerr *err;
struct nlmsghdr *nh;
- char buf[4096];
int len, ret;
+ ret = alloc_iov(&iov, 4096);
+ if (ret)
+ goto done;
+
while (multipart) {
+start:
multipart = false;
- len = recv(sock, buf, sizeof(buf), 0);
+ len = netlink_recvmsg(sock, &mhdr, MSG_PEEK | MSG_TRUNC);
if (len < 0) {
- ret = -errno;
+ ret = len;
+ goto done;
+ }
+
+ if (len > iov.iov_len) {
+ ret = alloc_iov(&iov, len);
+ if (ret)
+ goto done;
+ }
+
+ len = netlink_recvmsg(sock, &mhdr, 0);
+ if (len < 0) {
+ ret = len;
goto done;
}
if (len == 0)
break;
- for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
+ for (nh = (struct nlmsghdr *)iov.iov_base; NLMSG_OK(nh, len);
nh = NLMSG_NEXT(nh, len)) {
if (nh->nlmsg_pid != nl_pid) {
ret = -LIBBPF_ERRNO__WRNGPID;
@@ -116,111 +191,153 @@ static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
libbpf_nla_dump_errormsg(nh);
goto done;
case NLMSG_DONE:
- return 0;
+ ret = 0;
+ goto done;
default:
break;
}
if (_fn) {
ret = _fn(nh, fn, cookie);
- if (ret)
- return ret;
+ switch (ret) {
+ case NL_CONT:
+ break;
+ case NL_NEXT:
+ goto start;
+ case NL_DONE:
+ ret = 0;
+ goto done;
+ default:
+ goto done;
+ }
}
}
}
ret = 0;
done:
+ free(iov.iov_base);
return ret;
}
-static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
- __u32 flags)
+static int libbpf_netlink_send_recv(struct libbpf_nla_req *req,
+ int proto, __dump_nlmsg_t parse_msg,
+ libbpf_dump_nlmsg_t parse_attr,
+ void *cookie)
{
- int sock, seq = 0, ret;
- struct nlattr *nla, *nla_xdp;
- struct {
- struct nlmsghdr nh;
- struct ifinfomsg ifinfo;
- char attrbuf[64];
- } req;
- __u32 nl_pid;
-
- sock = libbpf_netlink_open(&nl_pid);
+ __u32 nl_pid = 0;
+ int sock, ret;
+
+ sock = libbpf_netlink_open(&nl_pid, proto);
if (sock < 0)
return sock;
+ req->nh.nlmsg_pid = 0;
+ req->nh.nlmsg_seq = time(NULL);
+
+ if (send(sock, req, req->nh.nlmsg_len, 0) < 0) {
+ ret = -errno;
+ goto out;
+ }
+
+ ret = libbpf_netlink_recv(sock, nl_pid, req->nh.nlmsg_seq,
+ parse_msg, parse_attr, cookie);
+out:
+ libbpf_netlink_close(sock);
+ return ret;
+}
+
+static int parse_genl_family_id(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
+ void *cookie)
+{
+ struct genlmsghdr *gnl = NLMSG_DATA(nh);
+ struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN);
+ struct nlattr *tb[CTRL_ATTR_FAMILY_ID + 1];
+ __u16 *id = cookie;
+
+ libbpf_nla_parse(tb, CTRL_ATTR_FAMILY_ID, na,
+ NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL);
+ if (!tb[CTRL_ATTR_FAMILY_ID])
+ return NL_CONT;
+
+ *id = libbpf_nla_getattr_u16(tb[CTRL_ATTR_FAMILY_ID]);
+ return NL_DONE;
+}
+
+static int libbpf_netlink_resolve_genl_family_id(const char *name,
+ __u16 len, __u16 *id)
+{
+ struct libbpf_nla_req req = {
+ .nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN),
+ .nh.nlmsg_type = GENL_ID_CTRL,
+ .nh.nlmsg_flags = NLM_F_REQUEST,
+ .gnl.cmd = CTRL_CMD_GETFAMILY,
+ .gnl.version = 2,
+ };
+ int err;
+
+ err = nlattr_add(&req, CTRL_ATTR_FAMILY_NAME, name, len);
+ if (err < 0)
+ return err;
+
+ return libbpf_netlink_send_recv(&req, NETLINK_GENERIC,
+ parse_genl_family_id, NULL, id);
+}
+
+static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
+ __u32 flags)
+{
+ struct nlattr *nla;
+ int ret;
+ struct libbpf_nla_req req;
+
memset(&req, 0, sizeof(req));
- req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
- req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
- req.nh.nlmsg_type = RTM_SETLINK;
- req.nh.nlmsg_pid = 0;
- req.nh.nlmsg_seq = ++seq;
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_type = RTM_SETLINK;
req.ifinfo.ifi_family = AF_UNSPEC;
- req.ifinfo.ifi_index = ifindex;
-
- /* started nested attribute for XDP */
- nla = (struct nlattr *)(((char *)&req)
- + NLMSG_ALIGN(req.nh.nlmsg_len));
- nla->nla_type = NLA_F_NESTED | IFLA_XDP;
- nla->nla_len = NLA_HDRLEN;
-
- /* add XDP fd */
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_FD;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
- nla->nla_len += nla_xdp->nla_len;
-
- /* if user passed in any flags, add those too */
+ req.ifinfo.ifi_index = ifindex;
+
+ nla = nlattr_begin_nested(&req, IFLA_XDP);
+ if (!nla)
+ return -EMSGSIZE;
+ ret = nlattr_add(&req, IFLA_XDP_FD, &fd, sizeof(fd));
+ if (ret < 0)
+ return ret;
if (flags) {
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_FLAGS;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
- nla->nla_len += nla_xdp->nla_len;
+ ret = nlattr_add(&req, IFLA_XDP_FLAGS, &flags, sizeof(flags));
+ if (ret < 0)
+ return ret;
}
-
if (flags & XDP_FLAGS_REPLACE) {
- nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
- nla_xdp->nla_type = IFLA_XDP_EXPECTED_FD;
- nla_xdp->nla_len = NLA_HDRLEN + sizeof(old_fd);
- memcpy((char *)nla_xdp + NLA_HDRLEN, &old_fd, sizeof(old_fd));
- nla->nla_len += nla_xdp->nla_len;
- }
-
- req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
-
- if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
- ret = -errno;
- goto cleanup;
+ ret = nlattr_add(&req, IFLA_XDP_EXPECTED_FD, &old_fd,
+ sizeof(old_fd));
+ if (ret < 0)
+ return ret;
}
- ret = bpf_netlink_recv(sock, nl_pid, seq, NULL, NULL, NULL);
+ nlattr_end_nested(&req, nla);
-cleanup:
- close(sock);
- return ret;
+ return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL);
}
-int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
- const struct bpf_xdp_set_link_opts *opts)
+int bpf_xdp_attach(int ifindex, int prog_fd, __u32 flags, const struct bpf_xdp_attach_opts *opts)
{
- int old_fd = -1;
+ int old_prog_fd, err;
- if (!OPTS_VALID(opts, bpf_xdp_set_link_opts))
- return -EINVAL;
+ if (!OPTS_VALID(opts, bpf_xdp_attach_opts))
+ return libbpf_err(-EINVAL);
- if (OPTS_HAS(opts, old_fd)) {
- old_fd = OPTS_GET(opts, old_fd, -1);
+ old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
+ if (old_prog_fd)
flags |= XDP_FLAGS_REPLACE;
- }
+ else
+ old_prog_fd = -1;
- return __bpf_set_link_xdp_fd_replace(ifindex, fd,
- old_fd,
- flags);
+ err = __bpf_set_link_xdp_fd_replace(ifindex, prog_fd, old_prog_fd, flags);
+ return libbpf_err(err);
}
-int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
+int bpf_xdp_detach(int ifindex, __u32 flags, const struct bpf_xdp_attach_opts *opts)
{
- return __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags);
+ return bpf_xdp_attach(ifindex, -1, flags, opts);
}
static int __dump_link_nlmsg(struct nlmsghdr *nlh,
@@ -232,6 +349,7 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh,
len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
+
if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
return -LIBBPF_ERRNO__NLPARSE;
@@ -283,204 +401,517 @@ static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb)
return 0;
}
-int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
- size_t info_size, __u32 flags)
+static int parse_xdp_features(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
+ void *cookie)
{
+ struct genlmsghdr *gnl = NLMSG_DATA(nh);
+ struct nlattr *na = (struct nlattr *)((void *)gnl + GENL_HDRLEN);
+ struct nlattr *tb[NETDEV_CMD_MAX + 1];
+ struct xdp_features_md *md = cookie;
+ __u32 ifindex;
+
+ libbpf_nla_parse(tb, NETDEV_CMD_MAX, na,
+ NLMSG_PAYLOAD(nh, sizeof(*gnl)), NULL);
+
+ if (!tb[NETDEV_A_DEV_IFINDEX] || !tb[NETDEV_A_DEV_XDP_FEATURES])
+ return NL_CONT;
+
+ ifindex = libbpf_nla_getattr_u32(tb[NETDEV_A_DEV_IFINDEX]);
+ if (ifindex != md->ifindex)
+ return NL_CONT;
+
+ md->flags = libbpf_nla_getattr_u64(tb[NETDEV_A_DEV_XDP_FEATURES]);
+ return NL_DONE;
+}
+
+int bpf_xdp_query(int ifindex, int xdp_flags, struct bpf_xdp_query_opts *opts)
+{
+ struct libbpf_nla_req req = {
+ .nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nh.nlmsg_type = RTM_GETLINK,
+ .nh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .ifinfo.ifi_family = AF_PACKET,
+ };
struct xdp_id_md xdp_id = {};
- int sock, ret;
- __u32 nl_pid;
- __u32 mask;
+ struct xdp_features_md md = {
+ .ifindex = ifindex,
+ };
+ __u16 id;
+ int err;
- if (flags & ~XDP_FLAGS_MASK || !info_size)
- return -EINVAL;
+ if (!OPTS_VALID(opts, bpf_xdp_query_opts))
+ return libbpf_err(-EINVAL);
- /* Check whether the single {HW,DRV,SKB} mode is set */
- flags &= (XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE);
- mask = flags - 1;
- if (flags && flags & mask)
- return -EINVAL;
+ if (xdp_flags & ~XDP_FLAGS_MASK)
+ return libbpf_err(-EINVAL);
- sock = libbpf_netlink_open(&nl_pid);
- if (sock < 0)
- return sock;
+ /* Check whether the single {HW,DRV,SKB} mode is set */
+ xdp_flags &= XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE;
+ if (xdp_flags & (xdp_flags - 1))
+ return libbpf_err(-EINVAL);
xdp_id.ifindex = ifindex;
- xdp_id.flags = flags;
+ xdp_id.flags = xdp_flags;
+
+ err = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, __dump_link_nlmsg,
+ get_xdp_info, &xdp_id);
+ if (err)
+ return libbpf_err(err);
+
+ OPTS_SET(opts, prog_id, xdp_id.info.prog_id);
+ OPTS_SET(opts, drv_prog_id, xdp_id.info.drv_prog_id);
+ OPTS_SET(opts, hw_prog_id, xdp_id.info.hw_prog_id);
+ OPTS_SET(opts, skb_prog_id, xdp_id.info.skb_prog_id);
+ OPTS_SET(opts, attach_mode, xdp_id.info.attach_mode);
- ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_info, &xdp_id);
- if (!ret) {
- size_t sz = min(info_size, sizeof(xdp_id.info));
+ if (!OPTS_HAS(opts, feature_flags))
+ return 0;
- memcpy(info, &xdp_id.info, sz);
- memset((void *) info + sz, 0, info_size - sz);
+ err = libbpf_netlink_resolve_genl_family_id("netdev", sizeof("netdev"), &id);
+ if (err < 0) {
+ if (err == -ENOENT) {
+ opts->feature_flags = 0;
+ goto skip_feature_flags;
+ }
+ return libbpf_err(err);
}
- close(sock);
- return ret;
-}
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(GENL_HDRLEN);
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_type = id;
+ req.gnl.cmd = NETDEV_CMD_DEV_GET;
+ req.gnl.version = 2;
-static __u32 get_xdp_id(struct xdp_link_info *info, __u32 flags)
-{
- if (info->attach_mode != XDP_ATTACHED_MULTI)
- return info->prog_id;
- if (flags & XDP_FLAGS_DRV_MODE)
- return info->drv_prog_id;
- if (flags & XDP_FLAGS_HW_MODE)
- return info->hw_prog_id;
- if (flags & XDP_FLAGS_SKB_MODE)
- return info->skb_prog_id;
+ err = nlattr_add(&req, NETDEV_A_DEV_IFINDEX, &ifindex, sizeof(ifindex));
+ if (err < 0)
+ return libbpf_err(err);
+ err = libbpf_netlink_send_recv(&req, NETLINK_GENERIC,
+ parse_xdp_features, NULL, &md);
+ if (err)
+ return libbpf_err(err);
+
+ opts->feature_flags = md.flags;
+
+skip_feature_flags:
return 0;
}
-int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
+int bpf_xdp_query_id(int ifindex, int flags, __u32 *prog_id)
{
- struct xdp_link_info info;
+ LIBBPF_OPTS(bpf_xdp_query_opts, opts);
int ret;
- ret = bpf_get_link_xdp_info(ifindex, &info, sizeof(info), flags);
- if (!ret)
- *prog_id = get_xdp_id(&info, flags);
+ ret = bpf_xdp_query(ifindex, flags, &opts);
+ if (ret)
+ return libbpf_err(ret);
- return ret;
+ flags &= XDP_FLAGS_MODES;
+
+ if (opts.attach_mode != XDP_ATTACHED_MULTI && !flags)
+ *prog_id = opts.prog_id;
+ else if (flags & XDP_FLAGS_DRV_MODE)
+ *prog_id = opts.drv_prog_id;
+ else if (flags & XDP_FLAGS_HW_MODE)
+ *prog_id = opts.hw_prog_id;
+ else if (flags & XDP_FLAGS_SKB_MODE)
+ *prog_id = opts.skb_prog_id;
+ else
+ *prog_id = 0;
+
+ return 0;
}
-int libbpf_nl_get_link(int sock, unsigned int nl_pid,
- libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
+
+typedef int (*qdisc_config_t)(struct libbpf_nla_req *req);
+
+static int clsact_config(struct libbpf_nla_req *req)
{
- struct {
- struct nlmsghdr nlh;
- struct ifinfomsg ifm;
- } req = {
- .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
- .nlh.nlmsg_type = RTM_GETLINK,
- .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
- .ifm.ifi_family = AF_PACKET,
- };
- int seq = time(NULL);
+ req->tc.tcm_parent = TC_H_CLSACT;
+ req->tc.tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0);
- req.nlh.nlmsg_seq = seq;
- if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
- return -errno;
+ return nlattr_add(req, TCA_KIND, "clsact", sizeof("clsact"));
+}
- return bpf_netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
- dump_link_nlmsg, cookie);
+static int attach_point_to_config(struct bpf_tc_hook *hook,
+ qdisc_config_t *config)
+{
+ switch (OPTS_GET(hook, attach_point, 0)) {
+ case BPF_TC_INGRESS:
+ case BPF_TC_EGRESS:
+ case BPF_TC_INGRESS | BPF_TC_EGRESS:
+ if (OPTS_GET(hook, parent, 0))
+ return -EINVAL;
+ *config = &clsact_config;
+ return 0;
+ case BPF_TC_CUSTOM:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
}
-static int __dump_class_nlmsg(struct nlmsghdr *nlh,
- libbpf_dump_nlmsg_t dump_class_nlmsg,
- void *cookie)
+static int tc_get_tcm_parent(enum bpf_tc_attach_point attach_point,
+ __u32 *parent)
{
- struct nlattr *tb[TCA_MAX + 1], *attr;
- struct tcmsg *t = NLMSG_DATA(nlh);
- int len;
+ switch (attach_point) {
+ case BPF_TC_INGRESS:
+ case BPF_TC_EGRESS:
+ if (*parent)
+ return -EINVAL;
+ *parent = TC_H_MAKE(TC_H_CLSACT,
+ attach_point == BPF_TC_INGRESS ?
+ TC_H_MIN_INGRESS : TC_H_MIN_EGRESS);
+ break;
+ case BPF_TC_CUSTOM:
+ if (!*parent)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
- len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
- attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
- if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
- return -LIBBPF_ERRNO__NLPARSE;
+static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags)
+{
+ qdisc_config_t config;
+ int ret;
+ struct libbpf_nla_req req;
+
+ ret = attach_point_to_config(hook, &config);
+ if (ret < 0)
+ return ret;
- return dump_class_nlmsg(cookie, t, tb);
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags;
+ req.nh.nlmsg_type = cmd;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0);
+
+ ret = config(&req);
+ if (ret < 0)
+ return ret;
+
+ return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL);
}
-int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex,
- libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie)
+static int tc_qdisc_create_excl(struct bpf_tc_hook *hook)
{
- struct {
- struct nlmsghdr nlh;
- struct tcmsg t;
- } req = {
- .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
- .nlh.nlmsg_type = RTM_GETTCLASS,
- .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
- .t.tcm_family = AF_UNSPEC,
- .t.tcm_ifindex = ifindex,
- };
- int seq = time(NULL);
+ return tc_qdisc_modify(hook, RTM_NEWQDISC, NLM_F_CREATE | NLM_F_EXCL);
+}
- req.nlh.nlmsg_seq = seq;
- if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
- return -errno;
+static int tc_qdisc_delete(struct bpf_tc_hook *hook)
+{
+ return tc_qdisc_modify(hook, RTM_DELQDISC, 0);
+}
+
+int bpf_tc_hook_create(struct bpf_tc_hook *hook)
+{
+ int ret;
+
+ if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
+ OPTS_GET(hook, ifindex, 0) <= 0)
+ return libbpf_err(-EINVAL);
- return bpf_netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg,
- dump_class_nlmsg, cookie);
+ ret = tc_qdisc_create_excl(hook);
+ return libbpf_err(ret);
}
-static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh,
- libbpf_dump_nlmsg_t dump_qdisc_nlmsg,
- void *cookie)
+static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts,
+ const bool flush);
+
+int bpf_tc_hook_destroy(struct bpf_tc_hook *hook)
{
- struct nlattr *tb[TCA_MAX + 1], *attr;
- struct tcmsg *t = NLMSG_DATA(nlh);
- int len;
+ if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
+ OPTS_GET(hook, ifindex, 0) <= 0)
+ return libbpf_err(-EINVAL);
+
+ switch (OPTS_GET(hook, attach_point, 0)) {
+ case BPF_TC_INGRESS:
+ case BPF_TC_EGRESS:
+ return libbpf_err(__bpf_tc_detach(hook, NULL, true));
+ case BPF_TC_INGRESS | BPF_TC_EGRESS:
+ return libbpf_err(tc_qdisc_delete(hook));
+ case BPF_TC_CUSTOM:
+ return libbpf_err(-EOPNOTSUPP);
+ default:
+ return libbpf_err(-EINVAL);
+ }
+}
- len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
- attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
- if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
- return -LIBBPF_ERRNO__NLPARSE;
+struct bpf_cb_ctx {
+ struct bpf_tc_opts *opts;
+ bool processed;
+};
+
+static int __get_tc_info(void *cookie, struct tcmsg *tc, struct nlattr **tb,
+ bool unicast)
+{
+ struct nlattr *tbb[TCA_BPF_MAX + 1];
+ struct bpf_cb_ctx *info = cookie;
- return dump_qdisc_nlmsg(cookie, t, tb);
+ if (!info || !info->opts)
+ return -EINVAL;
+ if (unicast && info->processed)
+ return -EINVAL;
+ if (!tb[TCA_OPTIONS])
+ return NL_CONT;
+
+ libbpf_nla_parse_nested(tbb, TCA_BPF_MAX, tb[TCA_OPTIONS], NULL);
+ if (!tbb[TCA_BPF_ID])
+ return -EINVAL;
+
+ OPTS_SET(info->opts, prog_id, libbpf_nla_getattr_u32(tbb[TCA_BPF_ID]));
+ OPTS_SET(info->opts, handle, tc->tcm_handle);
+ OPTS_SET(info->opts, priority, TC_H_MAJ(tc->tcm_info) >> 16);
+
+ info->processed = true;
+ return unicast ? NL_NEXT : NL_DONE;
}
-int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
- libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie)
+static int get_tc_info(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
+ void *cookie)
{
- struct {
- struct nlmsghdr nlh;
- struct tcmsg t;
- } req = {
- .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
- .nlh.nlmsg_type = RTM_GETQDISC,
- .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
- .t.tcm_family = AF_UNSPEC,
- .t.tcm_ifindex = ifindex,
- };
- int seq = time(NULL);
+ struct tcmsg *tc = NLMSG_DATA(nh);
+ struct nlattr *tb[TCA_MAX + 1];
+
+ libbpf_nla_parse(tb, TCA_MAX,
+ (struct nlattr *)((void *)tc + NLMSG_ALIGN(sizeof(*tc))),
+ NLMSG_PAYLOAD(nh, sizeof(*tc)), NULL);
+ if (!tb[TCA_KIND])
+ return NL_CONT;
+ return __get_tc_info(cookie, tc, tb, nh->nlmsg_flags & NLM_F_ECHO);
+}
- req.nlh.nlmsg_seq = seq;
- if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+static int tc_add_fd_and_name(struct libbpf_nla_req *req, int fd)
+{
+ struct bpf_prog_info info;
+ __u32 info_len = sizeof(info);
+ char name[256];
+ int len, ret;
+
+ memset(&info, 0, info_len);
+ ret = bpf_prog_get_info_by_fd(fd, &info, &info_len);
+ if (ret < 0)
+ return ret;
+
+ ret = nlattr_add(req, TCA_BPF_FD, &fd, sizeof(fd));
+ if (ret < 0)
+ return ret;
+ len = snprintf(name, sizeof(name), "%s:[%u]", info.name, info.id);
+ if (len < 0)
return -errno;
+ if (len >= sizeof(name))
+ return -ENAMETOOLONG;
+ return nlattr_add(req, TCA_BPF_NAME, name, len + 1);
+}
+
+int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
+{
+ __u32 protocol, bpf_flags, handle, priority, parent, prog_id, flags;
+ int ret, ifindex, attach_point, prog_fd;
+ struct bpf_cb_ctx info = {};
+ struct libbpf_nla_req req;
+ struct nlattr *nla;
+
+ if (!hook || !opts ||
+ !OPTS_VALID(hook, bpf_tc_hook) ||
+ !OPTS_VALID(opts, bpf_tc_opts))
+ return libbpf_err(-EINVAL);
+
+ ifindex = OPTS_GET(hook, ifindex, 0);
+ parent = OPTS_GET(hook, parent, 0);
+ attach_point = OPTS_GET(hook, attach_point, 0);
+
+ handle = OPTS_GET(opts, handle, 0);
+ priority = OPTS_GET(opts, priority, 0);
+ prog_fd = OPTS_GET(opts, prog_fd, 0);
+ prog_id = OPTS_GET(opts, prog_id, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ if (ifindex <= 0 || !prog_fd || prog_id)
+ return libbpf_err(-EINVAL);
+ if (priority > UINT16_MAX)
+ return libbpf_err(-EINVAL);
+ if (flags & ~BPF_TC_F_REPLACE)
+ return libbpf_err(-EINVAL);
+
+ flags = (flags & BPF_TC_F_REPLACE) ? NLM_F_REPLACE : NLM_F_EXCL;
+ protocol = ETH_P_ALL;
- return bpf_netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg,
- dump_qdisc_nlmsg, cookie);
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE |
+ NLM_F_ECHO | flags;
+ req.nh.nlmsg_type = RTM_NEWTFILTER;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = ifindex;
+ req.tc.tcm_handle = handle;
+ req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
+
+ ret = tc_get_tcm_parent(attach_point, &parent);
+ if (ret < 0)
+ return libbpf_err(ret);
+ req.tc.tcm_parent = parent;
+
+ ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf"));
+ if (ret < 0)
+ return libbpf_err(ret);
+ nla = nlattr_begin_nested(&req, TCA_OPTIONS);
+ if (!nla)
+ return libbpf_err(-EMSGSIZE);
+ ret = tc_add_fd_and_name(&req, prog_fd);
+ if (ret < 0)
+ return libbpf_err(ret);
+ bpf_flags = TCA_BPF_FLAG_ACT_DIRECT;
+ ret = nlattr_add(&req, TCA_BPF_FLAGS, &bpf_flags, sizeof(bpf_flags));
+ if (ret < 0)
+ return libbpf_err(ret);
+ nlattr_end_nested(&req, nla);
+
+ info.opts = opts;
+
+ ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL,
+ &info);
+ if (ret < 0)
+ return libbpf_err(ret);
+ if (!info.processed)
+ return libbpf_err(-ENOENT);
+ return ret;
}
-static int __dump_filter_nlmsg(struct nlmsghdr *nlh,
- libbpf_dump_nlmsg_t dump_filter_nlmsg,
- void *cookie)
+static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts,
+ const bool flush)
{
- struct nlattr *tb[TCA_MAX + 1], *attr;
- struct tcmsg *t = NLMSG_DATA(nlh);
- int len;
+ __u32 protocol = 0, handle, priority, parent, prog_id, flags;
+ int ret, ifindex, attach_point, prog_fd;
+ struct libbpf_nla_req req;
- len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
- attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
- if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
- return -LIBBPF_ERRNO__NLPARSE;
+ if (!hook ||
+ !OPTS_VALID(hook, bpf_tc_hook) ||
+ !OPTS_VALID(opts, bpf_tc_opts))
+ return -EINVAL;
+
+ ifindex = OPTS_GET(hook, ifindex, 0);
+ parent = OPTS_GET(hook, parent, 0);
+ attach_point = OPTS_GET(hook, attach_point, 0);
+
+ handle = OPTS_GET(opts, handle, 0);
+ priority = OPTS_GET(opts, priority, 0);
+ prog_fd = OPTS_GET(opts, prog_fd, 0);
+ prog_id = OPTS_GET(opts, prog_id, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ if (ifindex <= 0 || flags || prog_fd || prog_id)
+ return -EINVAL;
+ if (priority > UINT16_MAX)
+ return -EINVAL;
+ if (!flush) {
+ if (!handle || !priority)
+ return -EINVAL;
+ protocol = ETH_P_ALL;
+ } else {
+ if (handle || priority)
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_type = RTM_DELTFILTER;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = ifindex;
+ if (!flush) {
+ req.tc.tcm_handle = handle;
+ req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
+ }
+
+ ret = tc_get_tcm_parent(attach_point, &parent);
+ if (ret < 0)
+ return ret;
+ req.tc.tcm_parent = parent;
+
+ if (!flush) {
+ ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf"));
+ if (ret < 0)
+ return ret;
+ }
- return dump_filter_nlmsg(cookie, t, tb);
+ return libbpf_netlink_send_recv(&req, NETLINK_ROUTE, NULL, NULL, NULL);
}
-int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
- libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie)
+int bpf_tc_detach(const struct bpf_tc_hook *hook,
+ const struct bpf_tc_opts *opts)
{
- struct {
- struct nlmsghdr nlh;
- struct tcmsg t;
- } req = {
- .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
- .nlh.nlmsg_type = RTM_GETTFILTER,
- .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
- .t.tcm_family = AF_UNSPEC,
- .t.tcm_ifindex = ifindex,
- .t.tcm_parent = handle,
- };
- int seq = time(NULL);
+ int ret;
- req.nlh.nlmsg_seq = seq;
- if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
- return -errno;
+ if (!opts)
+ return libbpf_err(-EINVAL);
- return bpf_netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg,
- dump_filter_nlmsg, cookie);
+ ret = __bpf_tc_detach(hook, opts, false);
+ return libbpf_err(ret);
+}
+
+int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
+{
+ __u32 protocol, handle, priority, parent, prog_id, flags;
+ int ret, ifindex, attach_point, prog_fd;
+ struct bpf_cb_ctx info = {};
+ struct libbpf_nla_req req;
+
+ if (!hook || !opts ||
+ !OPTS_VALID(hook, bpf_tc_hook) ||
+ !OPTS_VALID(opts, bpf_tc_opts))
+ return libbpf_err(-EINVAL);
+
+ ifindex = OPTS_GET(hook, ifindex, 0);
+ parent = OPTS_GET(hook, parent, 0);
+ attach_point = OPTS_GET(hook, attach_point, 0);
+
+ handle = OPTS_GET(opts, handle, 0);
+ priority = OPTS_GET(opts, priority, 0);
+ prog_fd = OPTS_GET(opts, prog_fd, 0);
+ prog_id = OPTS_GET(opts, prog_id, 0);
+ flags = OPTS_GET(opts, flags, 0);
+
+ if (ifindex <= 0 || flags || prog_fd || prog_id ||
+ !handle || !priority)
+ return libbpf_err(-EINVAL);
+ if (priority > UINT16_MAX)
+ return libbpf_err(-EINVAL);
+
+ protocol = ETH_P_ALL;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_type = RTM_GETTFILTER;
+ req.tc.tcm_family = AF_UNSPEC;
+ req.tc.tcm_ifindex = ifindex;
+ req.tc.tcm_handle = handle;
+ req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
+
+ ret = tc_get_tcm_parent(attach_point, &parent);
+ if (ret < 0)
+ return libbpf_err(ret);
+ req.tc.tcm_parent = parent;
+
+ ret = nlattr_add(&req, TCA_KIND, "bpf", sizeof("bpf"));
+ if (ret < 0)
+ return libbpf_err(ret);
+
+ info.opts = opts;
+
+ ret = libbpf_netlink_send_recv(&req, NETLINK_ROUTE, get_tc_info, NULL,
+ &info);
+ if (ret < 0)
+ return libbpf_err(ret);
+ if (!info.processed)
+ return libbpf_err(-ENOENT);
+ return ret;
}
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 0ad41dfea8eb..975e265eab3b 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -7,14 +7,11 @@
*/
#include <errno.h>
-#include "nlattr.h"
-#include "libbpf_internal.h"
-#include <linux/rtnetlink.h>
#include <string.h>
#include <stdio.h>
-
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+#include <linux/rtnetlink.h>
+#include "nlattr.h"
+#include "libbpf_internal.h"
static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = {
[LIBBPF_NLA_U8] = sizeof(uint8_t),
@@ -30,12 +27,12 @@ static struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
int totlen = NLA_ALIGN(nla->nla_len);
*remaining -= totlen;
- return (struct nlattr *) ((char *) nla + totlen);
+ return (struct nlattr *)((void *)nla + totlen);
}
static int nla_ok(const struct nlattr *nla, int remaining)
{
- return remaining >= sizeof(*nla) &&
+ return remaining >= (int)sizeof(*nla) &&
nla->nla_len >= sizeof(*nla) &&
nla->nla_len <= remaining;
}
@@ -181,7 +178,7 @@ int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh)
hlen += nlmsg_len(&err->msg);
attr = (struct nlattr *) ((void *) err + hlen);
- alen = nlh->nlmsg_len - hlen;
+ alen = (void *)nlh + nlh->nlmsg_len - (void *)attr;
if (libbpf_nla_parse(tb, NLMSGERR_ATTR_MAX, attr, alen,
extack_policy) != 0) {
diff --git a/tools/lib/bpf/nlattr.h b/tools/lib/bpf/nlattr.h
index 6cc3ac91690f..d92d1c1de700 100644
--- a/tools/lib/bpf/nlattr.h
+++ b/tools/lib/bpf/nlattr.h
@@ -10,7 +10,12 @@
#define __LIBBPF_NLATTR_H
#include <stdint.h>
+#include <string.h>
+#include <errno.h>
#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/genetlink.h>
+
/* avoid multiple definition of netlink features */
#define __LINUX_NETLINK_H
@@ -49,6 +54,16 @@ struct libbpf_nla_policy {
uint16_t maxlen;
};
+struct libbpf_nla_req {
+ struct nlmsghdr nh;
+ union {
+ struct ifinfomsg ifinfo;
+ struct tcmsg tc;
+ struct genlmsghdr gnl;
+ };
+ char buf[128];
+};
+
/**
* @ingroup attr
* Iterate over a stream of attributes
@@ -68,7 +83,7 @@ struct libbpf_nla_policy {
*/
static inline void *libbpf_nla_data(const struct nlattr *nla)
{
- return (char *) nla + NLA_HDRLEN;
+ return (void *)nla + NLA_HDRLEN;
}
static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla)
@@ -76,11 +91,21 @@ static inline uint8_t libbpf_nla_getattr_u8(const struct nlattr *nla)
return *(uint8_t *)libbpf_nla_data(nla);
}
+static inline uint16_t libbpf_nla_getattr_u16(const struct nlattr *nla)
+{
+ return *(uint16_t *)libbpf_nla_data(nla);
+}
+
static inline uint32_t libbpf_nla_getattr_u32(const struct nlattr *nla)
{
return *(uint32_t *)libbpf_nla_data(nla);
}
+static inline uint64_t libbpf_nla_getattr_u64(const struct nlattr *nla)
+{
+ return *(uint64_t *)libbpf_nla_data(nla);
+}
+
static inline const char *libbpf_nla_getattr_str(const struct nlattr *nla)
{
return (const char *)libbpf_nla_data(nla);
@@ -103,4 +128,49 @@ int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype,
int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh);
+static inline struct nlattr *nla_data(struct nlattr *nla)
+{
+ return (struct nlattr *)((void *)nla + NLA_HDRLEN);
+}
+
+static inline struct nlattr *req_tail(struct libbpf_nla_req *req)
+{
+ return (struct nlattr *)((void *)req + NLMSG_ALIGN(req->nh.nlmsg_len));
+}
+
+static inline int nlattr_add(struct libbpf_nla_req *req, int type,
+ const void *data, int len)
+{
+ struct nlattr *nla;
+
+ if (NLMSG_ALIGN(req->nh.nlmsg_len) + NLA_ALIGN(NLA_HDRLEN + len) > sizeof(*req))
+ return -EMSGSIZE;
+ if (!!data != !!len)
+ return -EINVAL;
+
+ nla = req_tail(req);
+ nla->nla_type = type;
+ nla->nla_len = NLA_HDRLEN + len;
+ if (data)
+ memcpy(nla_data(nla), data, len);
+ req->nh.nlmsg_len = NLMSG_ALIGN(req->nh.nlmsg_len) + NLA_ALIGN(nla->nla_len);
+ return 0;
+}
+
+static inline struct nlattr *nlattr_begin_nested(struct libbpf_nla_req *req, int type)
+{
+ struct nlattr *tail;
+
+ tail = req_tail(req);
+ if (nlattr_add(req, type | NLA_F_NESTED, NULL, 0))
+ return NULL;
+ return tail;
+}
+
+static inline void nlattr_end_nested(struct libbpf_nla_req *req,
+ struct nlattr *tail)
+{
+ tail->nla_len = (void *)req_tail(req) - (void *)tail;
+}
+
#endif /* __LIBBPF_NLATTR_H */
diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
new file mode 100644
index 000000000000..a26b2f5fa0fc
--- /dev/null
+++ b/tools/lib/bpf/relo_core.c
@@ -0,0 +1,1687 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2019 Facebook */
+
+#ifdef __KERNEL__
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/string.h>
+#include <linux/bpf_verifier.h>
+#include "relo_core.h"
+
+static const char *btf_kind_str(const struct btf_type *t)
+{
+ return btf_type_str(t);
+}
+
+static bool is_ldimm64_insn(struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+}
+
+static const struct btf_type *
+skip_mods_and_typedefs(const struct btf *btf, u32 id, u32 *res_id)
+{
+ return btf_type_skip_modifiers(btf, id, res_id);
+}
+
+static const char *btf__name_by_offset(const struct btf *btf, u32 offset)
+{
+ return btf_name_by_offset(btf, offset);
+}
+
+static s64 btf__resolve_size(const struct btf *btf, u32 type_id)
+{
+ const struct btf_type *t;
+ int size;
+
+ t = btf_type_by_id(btf, type_id);
+ t = btf_resolve_size(btf, t, &size);
+ if (IS_ERR(t))
+ return PTR_ERR(t);
+ return size;
+}
+
+enum libbpf_print_level {
+ LIBBPF_WARN,
+ LIBBPF_INFO,
+ LIBBPF_DEBUG,
+};
+
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+#define pr_warn(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
+#define pr_info(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
+#define pr_debug(fmt, log, ...) bpf_log((void *)log, fmt, "", ##__VA_ARGS__)
+#define libbpf_print(level, fmt, ...) bpf_log((void *)prog_name, fmt, ##__VA_ARGS__)
+#else
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <ctype.h>
+#include <linux/err.h>
+
+#include "libbpf.h"
+#include "bpf.h"
+#include "btf.h"
+#include "str_error.h"
+#include "libbpf_internal.h"
+#endif
+
+static bool is_flex_arr(const struct btf *btf,
+ const struct bpf_core_accessor *acc,
+ const struct btf_array *arr)
+{
+ const struct btf_type *t;
+
+ /* not a flexible array, if not inside a struct or has non-zero size */
+ if (!acc->name || arr->nelems > 0)
+ return false;
+
+ /* has to be the last member of enclosing struct */
+ t = btf_type_by_id(btf, acc->type_id);
+ return acc->idx == btf_vlen(t) - 1;
+}
+
+static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
+{
+ switch (kind) {
+ case BPF_CORE_FIELD_BYTE_OFFSET: return "byte_off";
+ case BPF_CORE_FIELD_BYTE_SIZE: return "byte_sz";
+ case BPF_CORE_FIELD_EXISTS: return "field_exists";
+ case BPF_CORE_FIELD_SIGNED: return "signed";
+ case BPF_CORE_FIELD_LSHIFT_U64: return "lshift_u64";
+ case BPF_CORE_FIELD_RSHIFT_U64: return "rshift_u64";
+ case BPF_CORE_TYPE_ID_LOCAL: return "local_type_id";
+ case BPF_CORE_TYPE_ID_TARGET: return "target_type_id";
+ case BPF_CORE_TYPE_EXISTS: return "type_exists";
+ case BPF_CORE_TYPE_MATCHES: return "type_matches";
+ case BPF_CORE_TYPE_SIZE: return "type_size";
+ case BPF_CORE_ENUMVAL_EXISTS: return "enumval_exists";
+ case BPF_CORE_ENUMVAL_VALUE: return "enumval_value";
+ default: return "unknown";
+ }
+}
+
+static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
+{
+ switch (kind) {
+ case BPF_CORE_FIELD_BYTE_OFFSET:
+ case BPF_CORE_FIELD_BYTE_SIZE:
+ case BPF_CORE_FIELD_EXISTS:
+ case BPF_CORE_FIELD_SIGNED:
+ case BPF_CORE_FIELD_LSHIFT_U64:
+ case BPF_CORE_FIELD_RSHIFT_U64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
+{
+ switch (kind) {
+ case BPF_CORE_TYPE_ID_LOCAL:
+ case BPF_CORE_TYPE_ID_TARGET:
+ case BPF_CORE_TYPE_EXISTS:
+ case BPF_CORE_TYPE_MATCHES:
+ case BPF_CORE_TYPE_SIZE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
+{
+ switch (kind) {
+ case BPF_CORE_ENUMVAL_EXISTS:
+ case BPF_CORE_ENUMVAL_VALUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
+ const struct btf *targ_btf, __u32 targ_id, int level)
+{
+ const struct btf_type *local_type, *targ_type;
+ int depth = 32; /* max recursion depth */
+
+ /* caller made sure that names match (ignoring flavor suffix) */
+ local_type = btf_type_by_id(local_btf, local_id);
+ targ_type = btf_type_by_id(targ_btf, targ_id);
+ if (!btf_kind_core_compat(local_type, targ_type))
+ return 0;
+
+recur:
+ depth--;
+ if (depth < 0)
+ return -EINVAL;
+
+ local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
+ targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!local_type || !targ_type)
+ return -EINVAL;
+
+ if (!btf_kind_core_compat(local_type, targ_type))
+ return 0;
+
+ switch (btf_kind(local_type)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_FWD:
+ case BTF_KIND_ENUM64:
+ return 1;
+ case BTF_KIND_INT:
+ /* just reject deprecated bitfield-like integers; all other
+ * integers are by default compatible between each other
+ */
+ return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
+ case BTF_KIND_PTR:
+ local_id = local_type->type;
+ targ_id = targ_type->type;
+ goto recur;
+ case BTF_KIND_ARRAY:
+ local_id = btf_array(local_type)->type;
+ targ_id = btf_array(targ_type)->type;
+ goto recur;
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *local_p = btf_params(local_type);
+ struct btf_param *targ_p = btf_params(targ_type);
+ __u16 local_vlen = btf_vlen(local_type);
+ __u16 targ_vlen = btf_vlen(targ_type);
+ int i, err;
+
+ if (local_vlen != targ_vlen)
+ return 0;
+
+ for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
+ if (level <= 0)
+ return -EINVAL;
+
+ skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
+ skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
+ err = __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id,
+ level - 1);
+ if (err <= 0)
+ return err;
+ }
+
+ /* tail recurse for return type check */
+ skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
+ skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
+ goto recur;
+ }
+ default:
+ pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
+ btf_kind_str(local_type), local_id, targ_id);
+ return 0;
+ }
+}
+
+/*
+ * Turn bpf_core_relo into a low- and high-level spec representation,
+ * validating correctness along the way, as well as calculating resulting
+ * field bit offset, specified by accessor string. Low-level spec captures
+ * every single level of nestedness, including traversing anonymous
+ * struct/union members. High-level one only captures semantically meaningful
+ * "turning points": named fields and array indicies.
+ * E.g., for this case:
+ *
+ * struct sample {
+ * int __unimportant;
+ * struct {
+ * int __1;
+ * int __2;
+ * int a[7];
+ * };
+ * };
+ *
+ * struct sample *s = ...;
+ *
+ * int x = &s->a[3]; // access string = '0:1:2:3'
+ *
+ * Low-level spec has 1:1 mapping with each element of access string (it's
+ * just a parsed access string representation): [0, 1, 2, 3].
+ *
+ * High-level spec will capture only 3 points:
+ * - initial zero-index access by pointer (&s->... is the same as &s[0]...);
+ * - field 'a' access (corresponds to '2' in low-level spec);
+ * - array element #3 access (corresponds to '3' in low-level spec).
+ *
+ * Type-based relocations (TYPE_EXISTS/TYPE_MATCHES/TYPE_SIZE,
+ * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
+ * spec and raw_spec are kept empty.
+ *
+ * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
+ * string to specify enumerator's value index that need to be relocated.
+ */
+int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
+ const struct bpf_core_relo *relo,
+ struct bpf_core_spec *spec)
+{
+ int access_idx, parsed_len, i;
+ struct bpf_core_accessor *acc;
+ const struct btf_type *t;
+ const char *name, *spec_str;
+ __u32 id, name_off;
+ __s64 sz;
+
+ spec_str = btf__name_by_offset(btf, relo->access_str_off);
+ if (str_is_empty(spec_str) || *spec_str == ':')
+ return -EINVAL;
+
+ memset(spec, 0, sizeof(*spec));
+ spec->btf = btf;
+ spec->root_type_id = relo->type_id;
+ spec->relo_kind = relo->kind;
+
+ /* type-based relocations don't have a field access string */
+ if (core_relo_is_type_based(relo->kind)) {
+ if (strcmp(spec_str, "0"))
+ return -EINVAL;
+ return 0;
+ }
+
+ /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
+ while (*spec_str) {
+ if (*spec_str == ':')
+ ++spec_str;
+ if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
+ return -EINVAL;
+ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+ spec_str += parsed_len;
+ spec->raw_spec[spec->raw_len++] = access_idx;
+ }
+
+ if (spec->raw_len == 0)
+ return -EINVAL;
+
+ t = skip_mods_and_typedefs(btf, relo->type_id, &id);
+ if (!t)
+ return -EINVAL;
+
+ access_idx = spec->raw_spec[0];
+ acc = &spec->spec[0];
+ acc->type_id = id;
+ acc->idx = access_idx;
+ spec->len++;
+
+ if (core_relo_is_enumval_based(relo->kind)) {
+ if (!btf_is_any_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
+ return -EINVAL;
+
+ /* record enumerator name in a first accessor */
+ name_off = btf_is_enum(t) ? btf_enum(t)[access_idx].name_off
+ : btf_enum64(t)[access_idx].name_off;
+ acc->name = btf__name_by_offset(btf, name_off);
+ return 0;
+ }
+
+ if (!core_relo_is_field_based(relo->kind))
+ return -EINVAL;
+
+ sz = btf__resolve_size(btf, id);
+ if (sz < 0)
+ return sz;
+ spec->bit_offset = access_idx * sz * 8;
+
+ for (i = 1; i < spec->raw_len; i++) {
+ t = skip_mods_and_typedefs(btf, id, &id);
+ if (!t)
+ return -EINVAL;
+
+ access_idx = spec->raw_spec[i];
+ acc = &spec->spec[spec->len];
+
+ if (btf_is_composite(t)) {
+ const struct btf_member *m;
+ __u32 bit_offset;
+
+ if (access_idx >= btf_vlen(t))
+ return -EINVAL;
+
+ bit_offset = btf_member_bit_offset(t, access_idx);
+ spec->bit_offset += bit_offset;
+
+ m = btf_members(t) + access_idx;
+ if (m->name_off) {
+ name = btf__name_by_offset(btf, m->name_off);
+ if (str_is_empty(name))
+ return -EINVAL;
+
+ acc->type_id = id;
+ acc->idx = access_idx;
+ acc->name = name;
+ spec->len++;
+ }
+
+ id = m->type;
+ } else if (btf_is_array(t)) {
+ const struct btf_array *a = btf_array(t);
+ bool flex;
+
+ t = skip_mods_and_typedefs(btf, a->type, &id);
+ if (!t)
+ return -EINVAL;
+
+ flex = is_flex_arr(btf, acc - 1, a);
+ if (!flex && access_idx >= a->nelems)
+ return -EINVAL;
+
+ spec->spec[spec->len].type_id = id;
+ spec->spec[spec->len].idx = access_idx;
+ spec->len++;
+
+ sz = btf__resolve_size(btf, id);
+ if (sz < 0)
+ return sz;
+ spec->bit_offset += access_idx * sz * 8;
+ } else {
+ pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
+ prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* Check two types for compatibility for the purpose of field access
+ * relocation. const/volatile/restrict and typedefs are skipped to ensure we
+ * are relocating semantically compatible entities:
+ * - any two STRUCTs/UNIONs are compatible and can be mixed;
+ * - any two FWDs are compatible, if their names match (modulo flavor suffix);
+ * - any two PTRs are always compatible;
+ * - for ENUMs, names should be the same (ignoring flavor suffix) or at
+ * least one of enums should be anonymous;
+ * - for ENUMs, check sizes, names are ignored;
+ * - for INT, size and signedness are ignored;
+ * - any two FLOATs are always compatible;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - everything else shouldn't be ever a target of relocation.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ */
+static int bpf_core_fields_are_compat(const struct btf *local_btf,
+ __u32 local_id,
+ const struct btf *targ_btf,
+ __u32 targ_id)
+{
+ const struct btf_type *local_type, *targ_type;
+
+recur:
+ local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
+ targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!local_type || !targ_type)
+ return -EINVAL;
+
+ if (btf_is_composite(local_type) && btf_is_composite(targ_type))
+ return 1;
+ if (!btf_kind_core_compat(local_type, targ_type))
+ return 0;
+
+ switch (btf_kind(local_type)) {
+ case BTF_KIND_PTR:
+ case BTF_KIND_FLOAT:
+ return 1;
+ case BTF_KIND_FWD:
+ case BTF_KIND_ENUM64:
+ case BTF_KIND_ENUM: {
+ const char *local_name, *targ_name;
+ size_t local_len, targ_len;
+
+ local_name = btf__name_by_offset(local_btf,
+ local_type->name_off);
+ targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
+ local_len = bpf_core_essential_name_len(local_name);
+ targ_len = bpf_core_essential_name_len(targ_name);
+ /* one of them is anonymous or both w/ same flavor-less names */
+ return local_len == 0 || targ_len == 0 ||
+ (local_len == targ_len &&
+ strncmp(local_name, targ_name, local_len) == 0);
+ }
+ case BTF_KIND_INT:
+ /* just reject deprecated bitfield-like integers; all other
+ * integers are by default compatible between each other
+ */
+ return btf_int_offset(local_type) == 0 &&
+ btf_int_offset(targ_type) == 0;
+ case BTF_KIND_ARRAY:
+ local_id = btf_array(local_type)->type;
+ targ_id = btf_array(targ_type)->type;
+ goto recur;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Given single high-level named field accessor in local type, find
+ * corresponding high-level accessor for a target type. Along the way,
+ * maintain low-level spec for target as well. Also keep updating target
+ * bit offset.
+ *
+ * Searching is performed through recursive exhaustive enumeration of all
+ * fields of a struct/union. If there are any anonymous (embedded)
+ * structs/unions, they are recursively searched as well. If field with
+ * desired name is found, check compatibility between local and target types,
+ * before returning result.
+ *
+ * 1 is returned, if field is found.
+ * 0 is returned if no compatible field is found.
+ * <0 is returned on error.
+ */
+static int bpf_core_match_member(const struct btf *local_btf,
+ const struct bpf_core_accessor *local_acc,
+ const struct btf *targ_btf,
+ __u32 targ_id,
+ struct bpf_core_spec *spec,
+ __u32 *next_targ_id)
+{
+ const struct btf_type *local_type, *targ_type;
+ const struct btf_member *local_member, *m;
+ const char *local_name, *targ_name;
+ __u32 local_id;
+ int i, n, found;
+
+ targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!targ_type)
+ return -EINVAL;
+ if (!btf_is_composite(targ_type))
+ return 0;
+
+ local_id = local_acc->type_id;
+ local_type = btf_type_by_id(local_btf, local_id);
+ local_member = btf_members(local_type) + local_acc->idx;
+ local_name = btf__name_by_offset(local_btf, local_member->name_off);
+
+ n = btf_vlen(targ_type);
+ m = btf_members(targ_type);
+ for (i = 0; i < n; i++, m++) {
+ __u32 bit_offset;
+
+ bit_offset = btf_member_bit_offset(targ_type, i);
+
+ /* too deep struct/union/array nesting */
+ if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+
+ /* speculate this member will be the good one */
+ spec->bit_offset += bit_offset;
+ spec->raw_spec[spec->raw_len++] = i;
+
+ targ_name = btf__name_by_offset(targ_btf, m->name_off);
+ if (str_is_empty(targ_name)) {
+ /* embedded struct/union, we need to go deeper */
+ found = bpf_core_match_member(local_btf, local_acc,
+ targ_btf, m->type,
+ spec, next_targ_id);
+ if (found) /* either found or error */
+ return found;
+ } else if (strcmp(local_name, targ_name) == 0) {
+ /* matching named field */
+ struct bpf_core_accessor *targ_acc;
+
+ targ_acc = &spec->spec[spec->len++];
+ targ_acc->type_id = targ_id;
+ targ_acc->idx = i;
+ targ_acc->name = targ_name;
+
+ *next_targ_id = m->type;
+ found = bpf_core_fields_are_compat(local_btf,
+ local_member->type,
+ targ_btf, m->type);
+ if (!found)
+ spec->len--; /* pop accessor */
+ return found;
+ }
+ /* member turned out not to be what we looked for */
+ spec->bit_offset -= bit_offset;
+ spec->raw_len--;
+ }
+
+ return 0;
+}
+
+/*
+ * Try to match local spec to a target type and, if successful, produce full
+ * target spec (high-level, low-level + bit offset).
+ */
+static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
+ const struct btf *targ_btf, __u32 targ_id,
+ struct bpf_core_spec *targ_spec)
+{
+ const struct btf_type *targ_type;
+ const struct bpf_core_accessor *local_acc;
+ struct bpf_core_accessor *targ_acc;
+ int i, sz, matched;
+ __u32 name_off;
+
+ memset(targ_spec, 0, sizeof(*targ_spec));
+ targ_spec->btf = targ_btf;
+ targ_spec->root_type_id = targ_id;
+ targ_spec->relo_kind = local_spec->relo_kind;
+
+ if (core_relo_is_type_based(local_spec->relo_kind)) {
+ if (local_spec->relo_kind == BPF_CORE_TYPE_MATCHES)
+ return bpf_core_types_match(local_spec->btf,
+ local_spec->root_type_id,
+ targ_btf, targ_id);
+ else
+ return bpf_core_types_are_compat(local_spec->btf,
+ local_spec->root_type_id,
+ targ_btf, targ_id);
+ }
+
+ local_acc = &local_spec->spec[0];
+ targ_acc = &targ_spec->spec[0];
+
+ if (core_relo_is_enumval_based(local_spec->relo_kind)) {
+ size_t local_essent_len, targ_essent_len;
+ const char *targ_name;
+
+ /* has to resolve to an enum */
+ targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
+ if (!btf_is_any_enum(targ_type))
+ return 0;
+
+ local_essent_len = bpf_core_essential_name_len(local_acc->name);
+
+ for (i = 0; i < btf_vlen(targ_type); i++) {
+ if (btf_is_enum(targ_type))
+ name_off = btf_enum(targ_type)[i].name_off;
+ else
+ name_off = btf_enum64(targ_type)[i].name_off;
+
+ targ_name = btf__name_by_offset(targ_spec->btf, name_off);
+ targ_essent_len = bpf_core_essential_name_len(targ_name);
+ if (targ_essent_len != local_essent_len)
+ continue;
+ if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
+ targ_acc->type_id = targ_id;
+ targ_acc->idx = i;
+ targ_acc->name = targ_name;
+ targ_spec->len++;
+ targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
+ targ_spec->raw_len++;
+ return 1;
+ }
+ }
+ return 0;
+ }
+
+ if (!core_relo_is_field_based(local_spec->relo_kind))
+ return -EINVAL;
+
+ for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
+ targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
+ &targ_id);
+ if (!targ_type)
+ return -EINVAL;
+
+ if (local_acc->name) {
+ matched = bpf_core_match_member(local_spec->btf,
+ local_acc,
+ targ_btf, targ_id,
+ targ_spec, &targ_id);
+ if (matched <= 0)
+ return matched;
+ } else {
+ /* for i=0, targ_id is already treated as array element
+ * type (because it's the original struct), for others
+ * we should find array element type first
+ */
+ if (i > 0) {
+ const struct btf_array *a;
+ bool flex;
+
+ if (!btf_is_array(targ_type))
+ return 0;
+
+ a = btf_array(targ_type);
+ flex = is_flex_arr(targ_btf, targ_acc - 1, a);
+ if (!flex && local_acc->idx >= a->nelems)
+ return 0;
+ if (!skip_mods_and_typedefs(targ_btf, a->type,
+ &targ_id))
+ return -EINVAL;
+ }
+
+ /* too deep struct/union/array nesting */
+ if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
+ return -E2BIG;
+
+ targ_acc->type_id = targ_id;
+ targ_acc->idx = local_acc->idx;
+ targ_acc->name = NULL;
+ targ_spec->len++;
+ targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
+ targ_spec->raw_len++;
+
+ sz = btf__resolve_size(targ_btf, targ_id);
+ if (sz < 0)
+ return sz;
+ targ_spec->bit_offset += local_acc->idx * sz * 8;
+ }
+ }
+
+ return 1;
+}
+
+static int bpf_core_calc_field_relo(const char *prog_name,
+ const struct bpf_core_relo *relo,
+ const struct bpf_core_spec *spec,
+ __u64 *val, __u32 *field_sz, __u32 *type_id,
+ bool *validate)
+{
+ const struct bpf_core_accessor *acc;
+ const struct btf_type *t;
+ __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
+ const struct btf_member *m;
+ const struct btf_type *mt;
+ bool bitfield;
+ __s64 sz;
+
+ *field_sz = 0;
+
+ if (relo->kind == BPF_CORE_FIELD_EXISTS) {
+ *val = spec ? 1 : 0;
+ return 0;
+ }
+
+ if (!spec)
+ return -EUCLEAN; /* request instruction poisoning */
+
+ acc = &spec->spec[spec->len - 1];
+ t = btf_type_by_id(spec->btf, acc->type_id);
+
+ /* a[n] accessor needs special handling */
+ if (!acc->name) {
+ if (relo->kind == BPF_CORE_FIELD_BYTE_OFFSET) {
+ *val = spec->bit_offset / 8;
+ /* remember field size for load/store mem size */
+ sz = btf__resolve_size(spec->btf, acc->type_id);
+ if (sz < 0)
+ return -EINVAL;
+ *field_sz = sz;
+ *type_id = acc->type_id;
+ } else if (relo->kind == BPF_CORE_FIELD_BYTE_SIZE) {
+ sz = btf__resolve_size(spec->btf, acc->type_id);
+ if (sz < 0)
+ return -EINVAL;
+ *val = sz;
+ } else {
+ pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
+ prog_name, relo->kind, relo->insn_off / 8);
+ return -EINVAL;
+ }
+ if (validate)
+ *validate = true;
+ return 0;
+ }
+
+ m = btf_members(t) + acc->idx;
+ mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
+ bit_off = spec->bit_offset;
+ bit_sz = btf_member_bitfield_size(t, acc->idx);
+
+ bitfield = bit_sz > 0;
+ if (bitfield) {
+ byte_sz = mt->size;
+ byte_off = bit_off / 8 / byte_sz * byte_sz;
+ /* figure out smallest int size necessary for bitfield load */
+ while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
+ if (byte_sz >= 8) {
+ /* bitfield can't be read with 64-bit read */
+ pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
+ prog_name, relo->kind, relo->insn_off / 8);
+ return -E2BIG;
+ }
+ byte_sz *= 2;
+ byte_off = bit_off / 8 / byte_sz * byte_sz;
+ }
+ } else {
+ sz = btf__resolve_size(spec->btf, field_type_id);
+ if (sz < 0)
+ return -EINVAL;
+ byte_sz = sz;
+ byte_off = spec->bit_offset / 8;
+ bit_sz = byte_sz * 8;
+ }
+
+ /* for bitfields, all the relocatable aspects are ambiguous and we
+ * might disagree with compiler, so turn off validation of expected
+ * value, except for signedness
+ */
+ if (validate)
+ *validate = !bitfield;
+
+ switch (relo->kind) {
+ case BPF_CORE_FIELD_BYTE_OFFSET:
+ *val = byte_off;
+ if (!bitfield) {
+ *field_sz = byte_sz;
+ *type_id = field_type_id;
+ }
+ break;
+ case BPF_CORE_FIELD_BYTE_SIZE:
+ *val = byte_sz;
+ break;
+ case BPF_CORE_FIELD_SIGNED:
+ *val = (btf_is_any_enum(mt) && BTF_INFO_KFLAG(mt->info)) ||
+ (btf_int_encoding(mt) & BTF_INT_SIGNED);
+ if (validate)
+ *validate = true; /* signedness is never ambiguous */
+ break;
+ case BPF_CORE_FIELD_LSHIFT_U64:
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ *val = 64 - (bit_off + bit_sz - byte_off * 8);
+#else
+ *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
+#endif
+ break;
+ case BPF_CORE_FIELD_RSHIFT_U64:
+ *val = 64 - bit_sz;
+ if (validate)
+ *validate = true; /* right shift is never ambiguous */
+ break;
+ case BPF_CORE_FIELD_EXISTS:
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
+ const struct bpf_core_spec *spec,
+ __u64 *val, bool *validate)
+{
+ __s64 sz;
+
+ /* by default, always check expected value in bpf_insn */
+ if (validate)
+ *validate = true;
+
+ /* type-based relos return zero when target type is not found */
+ if (!spec) {
+ *val = 0;
+ return 0;
+ }
+
+ switch (relo->kind) {
+ case BPF_CORE_TYPE_ID_TARGET:
+ *val = spec->root_type_id;
+ /* type ID, embedded in bpf_insn, might change during linking,
+ * so enforcing it is pointless
+ */
+ if (validate)
+ *validate = false;
+ break;
+ case BPF_CORE_TYPE_EXISTS:
+ case BPF_CORE_TYPE_MATCHES:
+ *val = 1;
+ break;
+ case BPF_CORE_TYPE_SIZE:
+ sz = btf__resolve_size(spec->btf, spec->root_type_id);
+ if (sz < 0)
+ return -EINVAL;
+ *val = sz;
+ break;
+ case BPF_CORE_TYPE_ID_LOCAL:
+ /* BPF_CORE_TYPE_ID_LOCAL is handled specially and shouldn't get here */
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
+ const struct bpf_core_spec *spec,
+ __u64 *val)
+{
+ const struct btf_type *t;
+
+ switch (relo->kind) {
+ case BPF_CORE_ENUMVAL_EXISTS:
+ *val = spec ? 1 : 0;
+ break;
+ case BPF_CORE_ENUMVAL_VALUE:
+ if (!spec)
+ return -EUCLEAN; /* request instruction poisoning */
+ t = btf_type_by_id(spec->btf, spec->spec[0].type_id);
+ if (btf_is_enum(t))
+ *val = btf_enum(t)[spec->spec[0].idx].val;
+ else
+ *val = btf_enum64_value(btf_enum64(t) + spec->spec[0].idx);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+/* Calculate original and target relocation values, given local and target
+ * specs and relocation kind. These values are calculated for each candidate.
+ * If there are multiple candidates, resulting values should all be consistent
+ * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
+ * If instruction has to be poisoned, *poison will be set to true.
+ */
+static int bpf_core_calc_relo(const char *prog_name,
+ const struct bpf_core_relo *relo,
+ int relo_idx,
+ const struct bpf_core_spec *local_spec,
+ const struct bpf_core_spec *targ_spec,
+ struct bpf_core_relo_res *res)
+{
+ int err = -EOPNOTSUPP;
+
+ res->orig_val = 0;
+ res->new_val = 0;
+ res->poison = false;
+ res->validate = true;
+ res->fail_memsz_adjust = false;
+ res->orig_sz = res->new_sz = 0;
+ res->orig_type_id = res->new_type_id = 0;
+
+ if (core_relo_is_field_based(relo->kind)) {
+ err = bpf_core_calc_field_relo(prog_name, relo, local_spec,
+ &res->orig_val, &res->orig_sz,
+ &res->orig_type_id, &res->validate);
+ err = err ?: bpf_core_calc_field_relo(prog_name, relo, targ_spec,
+ &res->new_val, &res->new_sz,
+ &res->new_type_id, NULL);
+ if (err)
+ goto done;
+ /* Validate if it's safe to adjust load/store memory size.
+ * Adjustments are performed only if original and new memory
+ * sizes differ.
+ */
+ res->fail_memsz_adjust = false;
+ if (res->orig_sz != res->new_sz) {
+ const struct btf_type *orig_t, *new_t;
+
+ orig_t = btf_type_by_id(local_spec->btf, res->orig_type_id);
+ new_t = btf_type_by_id(targ_spec->btf, res->new_type_id);
+
+ /* There are two use cases in which it's safe to
+ * adjust load/store's mem size:
+ * - reading a 32-bit kernel pointer, while on BPF
+ * size pointers are always 64-bit; in this case
+ * it's safe to "downsize" instruction size due to
+ * pointer being treated as unsigned integer with
+ * zero-extended upper 32-bits;
+ * - reading unsigned integers, again due to
+ * zero-extension is preserving the value correctly.
+ *
+ * In all other cases it's incorrect to attempt to
+ * load/store field because read value will be
+ * incorrect, so we poison relocated instruction.
+ */
+ if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
+ goto done;
+ if (btf_is_int(orig_t) && btf_is_int(new_t) &&
+ btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
+ btf_int_encoding(new_t) != BTF_INT_SIGNED)
+ goto done;
+
+ /* mark as invalid mem size adjustment, but this will
+ * only be checked for LDX/STX/ST insns
+ */
+ res->fail_memsz_adjust = true;
+ }
+ } else if (core_relo_is_type_based(relo->kind)) {
+ err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val, &res->validate);
+ err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val, NULL);
+ } else if (core_relo_is_enumval_based(relo->kind)) {
+ err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
+ err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
+ }
+
+done:
+ if (err == -EUCLEAN) {
+ /* EUCLEAN is used to signal instruction poisoning request */
+ res->poison = true;
+ err = 0;
+ } else if (err == -EOPNOTSUPP) {
+ /* EOPNOTSUPP means unknown/unsupported relocation */
+ pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
+ prog_name, relo_idx, core_relo_kind_str(relo->kind),
+ relo->kind, relo->insn_off / 8);
+ }
+
+ return err;
+}
+
+/*
+ * Turn instruction for which CO_RE relocation failed into invalid one with
+ * distinct signature.
+ */
+static void bpf_core_poison_insn(const char *prog_name, int relo_idx,
+ int insn_idx, struct bpf_insn *insn)
+{
+ pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
+ prog_name, relo_idx, insn_idx);
+ insn->code = BPF_JMP | BPF_CALL;
+ insn->dst_reg = 0;
+ insn->src_reg = 0;
+ insn->off = 0;
+ /* if this instruction is reachable (not a dead code),
+ * verifier will complain with the following message:
+ * invalid func unknown#195896080
+ */
+ insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
+}
+
+static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
+{
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW: return 8;
+ case BPF_W: return 4;
+ case BPF_H: return 2;
+ case BPF_B: return 1;
+ default: return -1;
+ }
+}
+
+static int insn_bytes_to_bpf_size(__u32 sz)
+{
+ switch (sz) {
+ case 8: return BPF_DW;
+ case 4: return BPF_W;
+ case 2: return BPF_H;
+ case 1: return BPF_B;
+ default: return -1;
+ }
+}
+
+/*
+ * Patch relocatable BPF instruction.
+ *
+ * Patched value is determined by relocation kind and target specification.
+ * For existence relocations target spec will be NULL if field/type is not found.
+ * Expected insn->imm value is determined using relocation kind and local
+ * spec, and is checked before patching instruction. If actual insn->imm value
+ * is wrong, bail out with error.
+ *
+ * Currently supported classes of BPF instruction are:
+ * 1. rX = <imm> (assignment with immediate operand);
+ * 2. rX += <imm> (arithmetic operations with immediate operand);
+ * 3. rX = <imm64> (load with 64-bit immediate value);
+ * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
+ * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
+ * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
+ */
+int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
+ int insn_idx, const struct bpf_core_relo *relo,
+ int relo_idx, const struct bpf_core_relo_res *res)
+{
+ __u64 orig_val, new_val;
+ __u8 class;
+
+ class = BPF_CLASS(insn->code);
+
+ if (res->poison) {
+poison:
+ /* poison second part of ldimm64 to avoid confusing error from
+ * verifier about "unknown opcode 00"
+ */
+ if (is_ldimm64_insn(insn))
+ bpf_core_poison_insn(prog_name, relo_idx, insn_idx + 1, insn + 1);
+ bpf_core_poison_insn(prog_name, relo_idx, insn_idx, insn);
+ return 0;
+ }
+
+ orig_val = res->orig_val;
+ new_val = res->new_val;
+
+ switch (class) {
+ case BPF_ALU:
+ case BPF_ALU64:
+ if (BPF_SRC(insn->code) != BPF_K)
+ return -EINVAL;
+ if (res->validate && insn->imm != orig_val) {
+ pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %llu -> %llu\n",
+ prog_name, relo_idx,
+ insn_idx, insn->imm, (unsigned long long)orig_val,
+ (unsigned long long)new_val);
+ return -EINVAL;
+ }
+ orig_val = insn->imm;
+ insn->imm = new_val;
+ pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %llu -> %llu\n",
+ prog_name, relo_idx, insn_idx,
+ (unsigned long long)orig_val, (unsigned long long)new_val);
+ break;
+ case BPF_LDX:
+ case BPF_ST:
+ case BPF_STX:
+ if (res->validate && insn->off != orig_val) {
+ pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %llu -> %llu\n",
+ prog_name, relo_idx, insn_idx, insn->off, (unsigned long long)orig_val,
+ (unsigned long long)new_val);
+ return -EINVAL;
+ }
+ if (new_val > SHRT_MAX) {
+ pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %llu\n",
+ prog_name, relo_idx, insn_idx, (unsigned long long)new_val);
+ return -ERANGE;
+ }
+ if (res->fail_memsz_adjust) {
+ pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
+ "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
+ prog_name, relo_idx, insn_idx);
+ goto poison;
+ }
+
+ orig_val = insn->off;
+ insn->off = new_val;
+ pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %llu -> %llu\n",
+ prog_name, relo_idx, insn_idx, (unsigned long long)orig_val,
+ (unsigned long long)new_val);
+
+ if (res->new_sz != res->orig_sz) {
+ int insn_bytes_sz, insn_bpf_sz;
+
+ insn_bytes_sz = insn_bpf_size_to_bytes(insn);
+ if (insn_bytes_sz != res->orig_sz) {
+ pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
+ prog_name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
+ return -EINVAL;
+ }
+
+ insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
+ if (insn_bpf_sz < 0) {
+ pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
+ prog_name, relo_idx, insn_idx, res->new_sz);
+ return -EINVAL;
+ }
+
+ insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
+ pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
+ prog_name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
+ }
+ break;
+ case BPF_LD: {
+ __u64 imm;
+
+ if (!is_ldimm64_insn(insn) ||
+ insn[0].src_reg != 0 || insn[0].off != 0 ||
+ insn[1].code != 0 || insn[1].dst_reg != 0 ||
+ insn[1].src_reg != 0 || insn[1].off != 0) {
+ pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
+ prog_name, relo_idx, insn_idx);
+ return -EINVAL;
+ }
+
+ imm = (__u32)insn[0].imm | ((__u64)insn[1].imm << 32);
+ if (res->validate && imm != orig_val) {
+ pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %llu -> %llu\n",
+ prog_name, relo_idx,
+ insn_idx, (unsigned long long)imm,
+ (unsigned long long)orig_val, (unsigned long long)new_val);
+ return -EINVAL;
+ }
+
+ insn[0].imm = new_val;
+ insn[1].imm = new_val >> 32;
+ pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %llu\n",
+ prog_name, relo_idx, insn_idx,
+ (unsigned long long)imm, (unsigned long long)new_val);
+ break;
+ }
+ default:
+ pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
+ prog_name, relo_idx, insn_idx, insn->code,
+ insn->src_reg, insn->dst_reg, insn->off, insn->imm);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Output spec definition in the format:
+ * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
+ * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
+ */
+int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec)
+{
+ const struct btf_type *t;
+ const char *s;
+ __u32 type_id;
+ int i, len = 0;
+
+#define append_buf(fmt, args...) \
+ ({ \
+ int r; \
+ r = snprintf(buf, buf_sz, fmt, ##args); \
+ len += r; \
+ if (r >= buf_sz) \
+ r = buf_sz; \
+ buf += r; \
+ buf_sz -= r; \
+ })
+
+ type_id = spec->root_type_id;
+ t = btf_type_by_id(spec->btf, type_id);
+ s = btf__name_by_offset(spec->btf, t->name_off);
+
+ append_buf("<%s> [%u] %s %s",
+ core_relo_kind_str(spec->relo_kind),
+ type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
+
+ if (core_relo_is_type_based(spec->relo_kind))
+ return len;
+
+ if (core_relo_is_enumval_based(spec->relo_kind)) {
+ t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
+ if (btf_is_enum(t)) {
+ const struct btf_enum *e;
+ const char *fmt_str;
+
+ e = btf_enum(t) + spec->raw_spec[0];
+ s = btf__name_by_offset(spec->btf, e->name_off);
+ fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %d" : "::%s = %u";
+ append_buf(fmt_str, s, e->val);
+ } else {
+ const struct btf_enum64 *e;
+ const char *fmt_str;
+
+ e = btf_enum64(t) + spec->raw_spec[0];
+ s = btf__name_by_offset(spec->btf, e->name_off);
+ fmt_str = BTF_INFO_KFLAG(t->info) ? "::%s = %lld" : "::%s = %llu";
+ append_buf(fmt_str, s, (unsigned long long)btf_enum64_value(e));
+ }
+ return len;
+ }
+
+ if (core_relo_is_field_based(spec->relo_kind)) {
+ for (i = 0; i < spec->len; i++) {
+ if (spec->spec[i].name)
+ append_buf(".%s", spec->spec[i].name);
+ else if (i > 0 || spec->spec[i].idx > 0)
+ append_buf("[%u]", spec->spec[i].idx);
+ }
+
+ append_buf(" (");
+ for (i = 0; i < spec->raw_len; i++)
+ append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
+
+ if (spec->bit_offset % 8)
+ append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8);
+ else
+ append_buf(" @ offset %u)", spec->bit_offset / 8);
+ return len;
+ }
+
+ return len;
+#undef append_buf
+}
+
+/*
+ * Calculate CO-RE relocation target result.
+ *
+ * The outline and important points of the algorithm:
+ * 1. For given local type, find corresponding candidate target types.
+ * Candidate type is a type with the same "essential" name, ignoring
+ * everything after last triple underscore (___). E.g., `sample`,
+ * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
+ * for each other. Names with triple underscore are referred to as
+ * "flavors" and are useful, among other things, to allow to
+ * specify/support incompatible variations of the same kernel struct, which
+ * might differ between different kernel versions and/or build
+ * configurations.
+ *
+ * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
+ * converter, when deduplicated BTF of a kernel still contains more than
+ * one different types with the same name. In that case, ___2, ___3, etc
+ * are appended starting from second name conflict. But start flavors are
+ * also useful to be defined "locally", in BPF program, to extract same
+ * data from incompatible changes between different kernel
+ * versions/configurations. For instance, to handle field renames between
+ * kernel versions, one can use two flavors of the struct name with the
+ * same common name and use conditional relocations to extract that field,
+ * depending on target kernel version.
+ * 2. For each candidate type, try to match local specification to this
+ * candidate target type. Matching involves finding corresponding
+ * high-level spec accessors, meaning that all named fields should match,
+ * as well as all array accesses should be within the actual bounds. Also,
+ * types should be compatible (see bpf_core_fields_are_compat for details).
+ * 3. It is supported and expected that there might be multiple flavors
+ * matching the spec. As long as all the specs resolve to the same set of
+ * offsets across all candidates, there is no error. If there is any
+ * ambiguity, CO-RE relocation will fail. This is necessary to accommodate
+ * imperfection of BTF deduplication, which can cause slight duplication of
+ * the same BTF type, if some directly or indirectly referenced (by
+ * pointer) type gets resolved to different actual types in different
+ * object files. If such a situation occurs, deduplicated BTF will end up
+ * with two (or more) structurally identical types, which differ only in
+ * types they refer to through pointer. This should be OK in most cases and
+ * is not an error.
+ * 4. Candidate types search is performed by linearly scanning through all
+ * types in target BTF. It is anticipated that this is overall more
+ * efficient memory-wise and not significantly worse (if not better)
+ * CPU-wise compared to prebuilding a map from all local type names to
+ * a list of candidate type names. It's also sped up by caching resolved
+ * list of matching candidates per each local "root" type ID, that has at
+ * least one bpf_core_relo associated with it. This list is shared
+ * between multiple relocations for the same type ID and is updated as some
+ * of the candidates are pruned due to structural incompatibility.
+ */
+int bpf_core_calc_relo_insn(const char *prog_name,
+ const struct bpf_core_relo *relo,
+ int relo_idx,
+ const struct btf *local_btf,
+ struct bpf_core_cand_list *cands,
+ struct bpf_core_spec *specs_scratch,
+ struct bpf_core_relo_res *targ_res)
+{
+ struct bpf_core_spec *local_spec = &specs_scratch[0];
+ struct bpf_core_spec *cand_spec = &specs_scratch[1];
+ struct bpf_core_spec *targ_spec = &specs_scratch[2];
+ struct bpf_core_relo_res cand_res;
+ const struct btf_type *local_type;
+ const char *local_name;
+ __u32 local_id;
+ char spec_buf[256];
+ int i, j, err;
+
+ local_id = relo->type_id;
+ local_type = btf_type_by_id(local_btf, local_id);
+ local_name = btf__name_by_offset(local_btf, local_type->name_off);
+ if (!local_name)
+ return -EINVAL;
+
+ err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec);
+ if (err) {
+ const char *spec_str;
+
+ spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
+ pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
+ prog_name, relo_idx, local_id, btf_kind_str(local_type),
+ str_is_empty(local_name) ? "<anon>" : local_name,
+ spec_str ?: "<?>", err);
+ return -EINVAL;
+ }
+
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec);
+ pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf);
+
+ /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
+ if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
+ /* bpf_insn's imm value could get out of sync during linking */
+ memset(targ_res, 0, sizeof(*targ_res));
+ targ_res->validate = false;
+ targ_res->poison = false;
+ targ_res->orig_val = local_spec->root_type_id;
+ targ_res->new_val = local_spec->root_type_id;
+ return 0;
+ }
+
+ /* libbpf doesn't support candidate search for anonymous types */
+ if (str_is_empty(local_name)) {
+ pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
+ prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0, j = 0; i < cands->len; i++) {
+ err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
+ cands->cands[i].id, cand_spec);
+ if (err < 0) {
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
+ pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
+ prog_name, relo_idx, i, spec_buf, err);
+ return err;
+ }
+
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
+ pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name,
+ relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf);
+
+ if (err == 0)
+ continue;
+
+ err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, cand_spec, &cand_res);
+ if (err)
+ return err;
+
+ if (j == 0) {
+ *targ_res = cand_res;
+ *targ_spec = *cand_spec;
+ } else if (cand_spec->bit_offset != targ_spec->bit_offset) {
+ /* if there are many field relo candidates, they
+ * should all resolve to the same bit offset
+ */
+ pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
+ prog_name, relo_idx, cand_spec->bit_offset,
+ targ_spec->bit_offset);
+ return -EINVAL;
+ } else if (cand_res.poison != targ_res->poison ||
+ cand_res.new_val != targ_res->new_val) {
+ /* all candidates should result in the same relocation
+ * decision and value, otherwise it's dangerous to
+ * proceed due to ambiguity
+ */
+ pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %llu != %s %llu\n",
+ prog_name, relo_idx,
+ cand_res.poison ? "failure" : "success",
+ (unsigned long long)cand_res.new_val,
+ targ_res->poison ? "failure" : "success",
+ (unsigned long long)targ_res->new_val);
+ return -EINVAL;
+ }
+
+ cands->cands[j++] = cands->cands[i];
+ }
+
+ /*
+ * For BPF_CORE_FIELD_EXISTS relo or when used BPF program has field
+ * existence checks or kernel version/config checks, it's expected
+ * that we might not find any candidates. In this case, if field
+ * wasn't found in any candidate, the list of candidates shouldn't
+ * change at all, we'll just handle relocating appropriately,
+ * depending on relo's kind.
+ */
+ if (j > 0)
+ cands->len = j;
+
+ /*
+ * If no candidates were found, it might be both a programmer error,
+ * as well as expected case, depending whether instruction w/
+ * relocation is guarded in some way that makes it unreachable (dead
+ * code) if relocation can't be resolved. This is handled in
+ * bpf_core_patch_insn() uniformly by replacing that instruction with
+ * BPF helper call insn (using invalid helper ID). If that instruction
+ * is indeed unreachable, then it will be ignored and eliminated by
+ * verifier. If it was an error, then verifier will complain and point
+ * to a specific instruction number in its log.
+ */
+ if (j == 0) {
+ pr_debug("prog '%s': relo #%d: no matching targets found\n",
+ prog_name, relo_idx);
+
+ /* calculate single target relo result explicitly */
+ err = bpf_core_calc_relo(prog_name, relo, relo_idx, local_spec, NULL, targ_res);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static bool bpf_core_names_match(const struct btf *local_btf, size_t local_name_off,
+ const struct btf *targ_btf, size_t targ_name_off)
+{
+ const char *local_n, *targ_n;
+ size_t local_len, targ_len;
+
+ local_n = btf__name_by_offset(local_btf, local_name_off);
+ targ_n = btf__name_by_offset(targ_btf, targ_name_off);
+
+ if (str_is_empty(targ_n))
+ return str_is_empty(local_n);
+
+ targ_len = bpf_core_essential_name_len(targ_n);
+ local_len = bpf_core_essential_name_len(local_n);
+
+ return targ_len == local_len && strncmp(local_n, targ_n, local_len) == 0;
+}
+
+static int bpf_core_enums_match(const struct btf *local_btf, const struct btf_type *local_t,
+ const struct btf *targ_btf, const struct btf_type *targ_t)
+{
+ __u16 local_vlen = btf_vlen(local_t);
+ __u16 targ_vlen = btf_vlen(targ_t);
+ int i, j;
+
+ if (local_t->size != targ_t->size)
+ return 0;
+
+ if (local_vlen > targ_vlen)
+ return 0;
+
+ /* iterate over the local enum's variants and make sure each has
+ * a symbolic name correspondent in the target
+ */
+ for (i = 0; i < local_vlen; i++) {
+ bool matched = false;
+ __u32 local_n_off, targ_n_off;
+
+ local_n_off = btf_is_enum(local_t) ? btf_enum(local_t)[i].name_off :
+ btf_enum64(local_t)[i].name_off;
+
+ for (j = 0; j < targ_vlen; j++) {
+ targ_n_off = btf_is_enum(targ_t) ? btf_enum(targ_t)[j].name_off :
+ btf_enum64(targ_t)[j].name_off;
+
+ if (bpf_core_names_match(local_btf, local_n_off, targ_btf, targ_n_off)) {
+ matched = true;
+ break;
+ }
+ }
+
+ if (!matched)
+ return 0;
+ }
+ return 1;
+}
+
+static int bpf_core_composites_match(const struct btf *local_btf, const struct btf_type *local_t,
+ const struct btf *targ_btf, const struct btf_type *targ_t,
+ bool behind_ptr, int level)
+{
+ const struct btf_member *local_m = btf_members(local_t);
+ __u16 local_vlen = btf_vlen(local_t);
+ __u16 targ_vlen = btf_vlen(targ_t);
+ int i, j, err;
+
+ if (local_vlen > targ_vlen)
+ return 0;
+
+ /* check that all local members have a match in the target */
+ for (i = 0; i < local_vlen; i++, local_m++) {
+ const struct btf_member *targ_m = btf_members(targ_t);
+ bool matched = false;
+
+ for (j = 0; j < targ_vlen; j++, targ_m++) {
+ if (!bpf_core_names_match(local_btf, local_m->name_off,
+ targ_btf, targ_m->name_off))
+ continue;
+
+ err = __bpf_core_types_match(local_btf, local_m->type, targ_btf,
+ targ_m->type, behind_ptr, level - 1);
+ if (err < 0)
+ return err;
+ if (err > 0) {
+ matched = true;
+ break;
+ }
+ }
+
+ if (!matched)
+ return 0;
+ }
+ return 1;
+}
+
+/* Check that two types "match". This function assumes that root types were
+ * already checked for name match.
+ *
+ * The matching relation is defined as follows:
+ * - modifiers and typedefs are stripped (and, hence, effectively ignored)
+ * - generally speaking types need to be of same kind (struct vs. struct, union
+ * vs. union, etc.)
+ * - exceptions are struct/union behind a pointer which could also match a
+ * forward declaration of a struct or union, respectively, and enum vs.
+ * enum64 (see below)
+ * Then, depending on type:
+ * - integers:
+ * - match if size and signedness match
+ * - arrays & pointers:
+ * - target types are recursively matched
+ * - structs & unions:
+ * - local members need to exist in target with the same name
+ * - for each member we recursively check match unless it is already behind a
+ * pointer, in which case we only check matching names and compatible kind
+ * - enums:
+ * - local variants have to have a match in target by symbolic name (but not
+ * numeric value)
+ * - size has to match (but enum may match enum64 and vice versa)
+ * - function pointers:
+ * - number and position of arguments in local type has to match target
+ * - for each argument and the return value we recursively check match
+ */
+int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
+ __u32 targ_id, bool behind_ptr, int level)
+{
+ const struct btf_type *local_t, *targ_t;
+ int depth = 32; /* max recursion depth */
+ __u16 local_k, targ_k;
+
+ if (level <= 0)
+ return -EINVAL;
+
+recur:
+ depth--;
+ if (depth < 0)
+ return -EINVAL;
+
+ local_t = skip_mods_and_typedefs(local_btf, local_id, &local_id);
+ targ_t = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!local_t || !targ_t)
+ return -EINVAL;
+
+ /* While the name check happens after typedefs are skipped, root-level
+ * typedefs would still be name-matched as that's the contract with
+ * callers.
+ */
+ if (!bpf_core_names_match(local_btf, local_t->name_off, targ_btf, targ_t->name_off))
+ return 0;
+
+ local_k = btf_kind(local_t);
+ targ_k = btf_kind(targ_t);
+
+ switch (local_k) {
+ case BTF_KIND_UNKN:
+ return local_k == targ_k;
+ case BTF_KIND_FWD: {
+ bool local_f = BTF_INFO_KFLAG(local_t->info);
+
+ if (behind_ptr) {
+ if (local_k == targ_k)
+ return local_f == BTF_INFO_KFLAG(targ_t->info);
+
+ /* for forward declarations kflag dictates whether the
+ * target is a struct (0) or union (1)
+ */
+ return (targ_k == BTF_KIND_STRUCT && !local_f) ||
+ (targ_k == BTF_KIND_UNION && local_f);
+ } else {
+ if (local_k != targ_k)
+ return 0;
+
+ /* match if the forward declaration is for the same kind */
+ return local_f == BTF_INFO_KFLAG(targ_t->info);
+ }
+ }
+ case BTF_KIND_ENUM:
+ case BTF_KIND_ENUM64:
+ if (!btf_is_any_enum(targ_t))
+ return 0;
+
+ return bpf_core_enums_match(local_btf, local_t, targ_btf, targ_t);
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ if (behind_ptr) {
+ bool targ_f = BTF_INFO_KFLAG(targ_t->info);
+
+ if (local_k == targ_k)
+ return 1;
+
+ if (targ_k != BTF_KIND_FWD)
+ return 0;
+
+ return (local_k == BTF_KIND_UNION) == targ_f;
+ } else {
+ if (local_k != targ_k)
+ return 0;
+
+ return bpf_core_composites_match(local_btf, local_t, targ_btf, targ_t,
+ behind_ptr, level);
+ }
+ case BTF_KIND_INT: {
+ __u8 local_sgn;
+ __u8 targ_sgn;
+
+ if (local_k != targ_k)
+ return 0;
+
+ local_sgn = btf_int_encoding(local_t) & BTF_INT_SIGNED;
+ targ_sgn = btf_int_encoding(targ_t) & BTF_INT_SIGNED;
+
+ return local_t->size == targ_t->size && local_sgn == targ_sgn;
+ }
+ case BTF_KIND_PTR:
+ if (local_k != targ_k)
+ return 0;
+
+ behind_ptr = true;
+
+ local_id = local_t->type;
+ targ_id = targ_t->type;
+ goto recur;
+ case BTF_KIND_ARRAY: {
+ const struct btf_array *local_array = btf_array(local_t);
+ const struct btf_array *targ_array = btf_array(targ_t);
+
+ if (local_k != targ_k)
+ return 0;
+
+ if (local_array->nelems != targ_array->nelems)
+ return 0;
+
+ local_id = local_array->type;
+ targ_id = targ_array->type;
+ goto recur;
+ }
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *local_p = btf_params(local_t);
+ struct btf_param *targ_p = btf_params(targ_t);
+ __u16 local_vlen = btf_vlen(local_t);
+ __u16 targ_vlen = btf_vlen(targ_t);
+ int i, err;
+
+ if (local_k != targ_k)
+ return 0;
+
+ if (local_vlen != targ_vlen)
+ return 0;
+
+ for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
+ err = __bpf_core_types_match(local_btf, local_p->type, targ_btf,
+ targ_p->type, behind_ptr, level - 1);
+ if (err <= 0)
+ return err;
+ }
+
+ /* tail recurse for return type check */
+ local_id = local_t->type;
+ targ_id = targ_t->type;
+ goto recur;
+ }
+ default:
+ pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
+ btf_kind_str(local_t), local_id, targ_id);
+ return 0;
+ }
+}
diff --git a/tools/lib/bpf/relo_core.h b/tools/lib/bpf/relo_core.h
new file mode 100644
index 000000000000..1c0566daf8e8
--- /dev/null
+++ b/tools/lib/bpf/relo_core.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2019 Facebook */
+
+#ifndef __RELO_CORE_H
+#define __RELO_CORE_H
+
+#include <linux/bpf.h>
+
+struct bpf_core_cand {
+ const struct btf *btf;
+ __u32 id;
+};
+
+/* dynamically sized list of type IDs and its associated struct btf */
+struct bpf_core_cand_list {
+ struct bpf_core_cand *cands;
+ int len;
+};
+
+#define BPF_CORE_SPEC_MAX_LEN 64
+
+/* represents BPF CO-RE field or array element accessor */
+struct bpf_core_accessor {
+ __u32 type_id; /* struct/union type or array element type */
+ __u32 idx; /* field index or array index */
+ const char *name; /* field name or NULL for array accessor */
+};
+
+struct bpf_core_spec {
+ const struct btf *btf;
+ /* high-level spec: named fields and array indices only */
+ struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
+ /* original unresolved (no skip_mods_or_typedefs) root type ID */
+ __u32 root_type_id;
+ /* CO-RE relocation kind */
+ enum bpf_core_relo_kind relo_kind;
+ /* high-level spec length */
+ int len;
+ /* raw, low-level spec: 1-to-1 with accessor spec string */
+ int raw_spec[BPF_CORE_SPEC_MAX_LEN];
+ /* raw spec length */
+ int raw_len;
+ /* field bit offset represented by spec */
+ __u32 bit_offset;
+};
+
+struct bpf_core_relo_res {
+ /* expected value in the instruction, unless validate == false */
+ __u64 orig_val;
+ /* new value that needs to be patched up to */
+ __u64 new_val;
+ /* relocation unsuccessful, poison instruction, but don't fail load */
+ bool poison;
+ /* some relocations can't be validated against orig_val */
+ bool validate;
+ /* for field byte offset relocations or the forms:
+ * *(T *)(rX + <off>) = rY
+ * rX = *(T *)(rY + <off>),
+ * we remember original and resolved field size to adjust direct
+ * memory loads of pointers and integers; this is necessary for 32-bit
+ * host kernel architectures, but also allows to automatically
+ * relocate fields that were resized from, e.g., u32 to u64, etc.
+ */
+ bool fail_memsz_adjust;
+ __u32 orig_sz;
+ __u32 orig_type_id;
+ __u32 new_sz;
+ __u32 new_type_id;
+};
+
+int __bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
+ const struct btf *targ_btf, __u32 targ_id, int level);
+int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
+ const struct btf *targ_btf, __u32 targ_id);
+int __bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
+ __u32 targ_id, bool behind_ptr, int level);
+int bpf_core_types_match(const struct btf *local_btf, __u32 local_id, const struct btf *targ_btf,
+ __u32 targ_id);
+
+size_t bpf_core_essential_name_len(const char *name);
+
+int bpf_core_calc_relo_insn(const char *prog_name,
+ const struct bpf_core_relo *relo, int relo_idx,
+ const struct btf *local_btf,
+ struct bpf_core_cand_list *cands,
+ struct bpf_core_spec *specs_scratch,
+ struct bpf_core_relo_res *targ_res);
+
+int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
+ int insn_idx, const struct bpf_core_relo *relo,
+ int relo_idx, const struct bpf_core_relo_res *res);
+
+int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
+ const struct bpf_core_relo *relo,
+ struct bpf_core_spec *spec);
+
+int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec);
+
+#endif
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
new file mode 100644
index 000000000000..02199364db13
--- /dev/null
+++ b/tools/lib/bpf/ringbuf.c
@@ -0,0 +1,587 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/*
+ * Ring buffer operations.
+ *
+ * Copyright (C) 2020 Facebook, Inc.
+ */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <linux/err.h>
+#include <linux/bpf.h>
+#include <asm/barrier.h>
+#include <sys/mman.h>
+#include <sys/epoll.h>
+#include <time.h>
+
+#include "libbpf.h"
+#include "libbpf_internal.h"
+#include "bpf.h"
+
+struct ring {
+ ring_buffer_sample_fn sample_cb;
+ void *ctx;
+ void *data;
+ unsigned long *consumer_pos;
+ unsigned long *producer_pos;
+ unsigned long mask;
+ int map_fd;
+};
+
+struct ring_buffer {
+ struct epoll_event *events;
+ struct ring *rings;
+ size_t page_size;
+ int epoll_fd;
+ int ring_cnt;
+};
+
+struct user_ring_buffer {
+ struct epoll_event event;
+ unsigned long *consumer_pos;
+ unsigned long *producer_pos;
+ void *data;
+ unsigned long mask;
+ size_t page_size;
+ int map_fd;
+ int epoll_fd;
+};
+
+/* 8-byte ring buffer header structure */
+struct ringbuf_hdr {
+ __u32 len;
+ __u32 pad;
+};
+
+static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
+{
+ if (r->consumer_pos) {
+ munmap(r->consumer_pos, rb->page_size);
+ r->consumer_pos = NULL;
+ }
+ if (r->producer_pos) {
+ munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
+ r->producer_pos = NULL;
+ }
+}
+
+/* Add extra RINGBUF maps to this ring buffer manager */
+int ring_buffer__add(struct ring_buffer *rb, int map_fd,
+ ring_buffer_sample_fn sample_cb, void *ctx)
+{
+ struct bpf_map_info info;
+ __u32 len = sizeof(info);
+ struct epoll_event *e;
+ struct ring *r;
+ __u64 mmap_sz;
+ void *tmp;
+ int err;
+
+ memset(&info, 0, sizeof(info));
+
+ err = bpf_map_get_info_by_fd(map_fd, &info, &len);
+ if (err) {
+ err = -errno;
+ pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
+ map_fd, err);
+ return libbpf_err(err);
+ }
+
+ if (info.type != BPF_MAP_TYPE_RINGBUF) {
+ pr_warn("ringbuf: map fd=%d is not BPF_MAP_TYPE_RINGBUF\n",
+ map_fd);
+ return libbpf_err(-EINVAL);
+ }
+
+ tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
+ if (!tmp)
+ return libbpf_err(-ENOMEM);
+ rb->rings = tmp;
+
+ tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
+ if (!tmp)
+ return libbpf_err(-ENOMEM);
+ rb->events = tmp;
+
+ r = &rb->rings[rb->ring_cnt];
+ memset(r, 0, sizeof(*r));
+
+ r->map_fd = map_fd;
+ r->sample_cb = sample_cb;
+ r->ctx = ctx;
+ r->mask = info.max_entries - 1;
+
+ /* Map writable consumer page */
+ tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
+ if (tmp == MAP_FAILED) {
+ err = -errno;
+ pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
+ map_fd, err);
+ return libbpf_err(err);
+ }
+ r->consumer_pos = tmp;
+
+ /* Map read-only producer page and data pages. We map twice as big
+ * data size to allow simple reading of samples that wrap around the
+ * end of a ring buffer. See kernel implementation for details.
+ */
+ mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
+ if (mmap_sz != (__u64)(size_t)mmap_sz) {
+ pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
+ return libbpf_err(-E2BIG);
+ }
+ tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
+ if (tmp == MAP_FAILED) {
+ err = -errno;
+ ringbuf_unmap_ring(rb, r);
+ pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n",
+ map_fd, err);
+ return libbpf_err(err);
+ }
+ r->producer_pos = tmp;
+ r->data = tmp + rb->page_size;
+
+ e = &rb->events[rb->ring_cnt];
+ memset(e, 0, sizeof(*e));
+
+ e->events = EPOLLIN;
+ e->data.fd = rb->ring_cnt;
+ if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
+ err = -errno;
+ ringbuf_unmap_ring(rb, r);
+ pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n",
+ map_fd, err);
+ return libbpf_err(err);
+ }
+
+ rb->ring_cnt++;
+ return 0;
+}
+
+void ring_buffer__free(struct ring_buffer *rb)
+{
+ int i;
+
+ if (!rb)
+ return;
+
+ for (i = 0; i < rb->ring_cnt; ++i)
+ ringbuf_unmap_ring(rb, &rb->rings[i]);
+ if (rb->epoll_fd >= 0)
+ close(rb->epoll_fd);
+
+ free(rb->events);
+ free(rb->rings);
+ free(rb);
+}
+
+struct ring_buffer *
+ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
+ const struct ring_buffer_opts *opts)
+{
+ struct ring_buffer *rb;
+ int err;
+
+ if (!OPTS_VALID(opts, ring_buffer_opts))
+ return errno = EINVAL, NULL;
+
+ rb = calloc(1, sizeof(*rb));
+ if (!rb)
+ return errno = ENOMEM, NULL;
+
+ rb->page_size = getpagesize();
+
+ rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
+ if (rb->epoll_fd < 0) {
+ err = -errno;
+ pr_warn("ringbuf: failed to create epoll instance: %d\n", err);
+ goto err_out;
+ }
+
+ err = ring_buffer__add(rb, map_fd, sample_cb, ctx);
+ if (err)
+ goto err_out;
+
+ return rb;
+
+err_out:
+ ring_buffer__free(rb);
+ return errno = -err, NULL;
+}
+
+static inline int roundup_len(__u32 len)
+{
+ /* clear out top 2 bits (discard and busy, if set) */
+ len <<= 2;
+ len >>= 2;
+ /* add length prefix */
+ len += BPF_RINGBUF_HDR_SZ;
+ /* round up to 8 byte alignment */
+ return (len + 7) / 8 * 8;
+}
+
+static int64_t ringbuf_process_ring(struct ring *r)
+{
+ int *len_ptr, len, err;
+ /* 64-bit to avoid overflow in case of extreme application behavior */
+ int64_t cnt = 0;
+ unsigned long cons_pos, prod_pos;
+ bool got_new_data;
+ void *sample;
+
+ cons_pos = smp_load_acquire(r->consumer_pos);
+ do {
+ got_new_data = false;
+ prod_pos = smp_load_acquire(r->producer_pos);
+ while (cons_pos < prod_pos) {
+ len_ptr = r->data + (cons_pos & r->mask);
+ len = smp_load_acquire(len_ptr);
+
+ /* sample not committed yet, bail out for now */
+ if (len & BPF_RINGBUF_BUSY_BIT)
+ goto done;
+
+ got_new_data = true;
+ cons_pos += roundup_len(len);
+
+ if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) {
+ sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
+ err = r->sample_cb(r->ctx, sample, len);
+ if (err < 0) {
+ /* update consumer pos and bail out */
+ smp_store_release(r->consumer_pos,
+ cons_pos);
+ return err;
+ }
+ cnt++;
+ }
+
+ smp_store_release(r->consumer_pos, cons_pos);
+ }
+ } while (got_new_data);
+done:
+ return cnt;
+}
+
+/* Consume available ring buffer(s) data without event polling.
+ * Returns number of records consumed across all registered ring buffers (or
+ * INT_MAX, whichever is less), or negative number if any of the callbacks
+ * return error.
+ */
+int ring_buffer__consume(struct ring_buffer *rb)
+{
+ int64_t err, res = 0;
+ int i;
+
+ for (i = 0; i < rb->ring_cnt; i++) {
+ struct ring *ring = &rb->rings[i];
+
+ err = ringbuf_process_ring(ring);
+ if (err < 0)
+ return libbpf_err(err);
+ res += err;
+ }
+ if (res > INT_MAX)
+ return INT_MAX;
+ return res;
+}
+
+/* Poll for available data and consume records, if any are available.
+ * Returns number of records consumed (or INT_MAX, whichever is less), or
+ * negative number, if any of the registered callbacks returned error.
+ */
+int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
+{
+ int i, cnt;
+ int64_t err, res = 0;
+
+ cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
+ if (cnt < 0)
+ return libbpf_err(-errno);
+
+ for (i = 0; i < cnt; i++) {
+ __u32 ring_id = rb->events[i].data.fd;
+ struct ring *ring = &rb->rings[ring_id];
+
+ err = ringbuf_process_ring(ring);
+ if (err < 0)
+ return libbpf_err(err);
+ res += err;
+ }
+ if (res > INT_MAX)
+ return INT_MAX;
+ return res;
+}
+
+/* Get an fd that can be used to sleep until data is available in the ring(s) */
+int ring_buffer__epoll_fd(const struct ring_buffer *rb)
+{
+ return rb->epoll_fd;
+}
+
+static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
+{
+ if (rb->consumer_pos) {
+ munmap(rb->consumer_pos, rb->page_size);
+ rb->consumer_pos = NULL;
+ }
+ if (rb->producer_pos) {
+ munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1));
+ rb->producer_pos = NULL;
+ }
+}
+
+void user_ring_buffer__free(struct user_ring_buffer *rb)
+{
+ if (!rb)
+ return;
+
+ user_ringbuf_unmap_ring(rb);
+
+ if (rb->epoll_fd >= 0)
+ close(rb->epoll_fd);
+
+ free(rb);
+}
+
+static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
+{
+ struct bpf_map_info info;
+ __u32 len = sizeof(info);
+ __u64 mmap_sz;
+ void *tmp;
+ struct epoll_event *rb_epoll;
+ int err;
+
+ memset(&info, 0, sizeof(info));
+
+ err = bpf_map_get_info_by_fd(map_fd, &info, &len);
+ if (err) {
+ err = -errno;
+ pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err);
+ return err;
+ }
+
+ if (info.type != BPF_MAP_TYPE_USER_RINGBUF) {
+ pr_warn("user ringbuf: map fd=%d is not BPF_MAP_TYPE_USER_RINGBUF\n", map_fd);
+ return -EINVAL;
+ }
+
+ rb->map_fd = map_fd;
+ rb->mask = info.max_entries - 1;
+
+ /* Map read-only consumer page */
+ tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
+ if (tmp == MAP_FAILED) {
+ err = -errno;
+ pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
+ map_fd, err);
+ return err;
+ }
+ rb->consumer_pos = tmp;
+
+ /* Map read-write the producer page and data pages. We map the data
+ * region as twice the total size of the ring buffer to allow the
+ * simple reading and writing of samples that wrap around the end of
+ * the buffer. See the kernel implementation for details.
+ */
+ mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
+ if (mmap_sz != (__u64)(size_t)mmap_sz) {
+ pr_warn("user ringbuf: ring buf size (%u) is too big\n", info.max_entries);
+ return -E2BIG;
+ }
+ tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
+ map_fd, rb->page_size);
+ if (tmp == MAP_FAILED) {
+ err = -errno;
+ pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n",
+ map_fd, err);
+ return err;
+ }
+
+ rb->producer_pos = tmp;
+ rb->data = tmp + rb->page_size;
+
+ rb_epoll = &rb->event;
+ rb_epoll->events = EPOLLOUT;
+ if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
+ err = -errno;
+ pr_warn("user ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err);
+ return err;
+ }
+
+ return 0;
+}
+
+struct user_ring_buffer *
+user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts)
+{
+ struct user_ring_buffer *rb;
+ int err;
+
+ if (!OPTS_VALID(opts, user_ring_buffer_opts))
+ return errno = EINVAL, NULL;
+
+ rb = calloc(1, sizeof(*rb));
+ if (!rb)
+ return errno = ENOMEM, NULL;
+
+ rb->page_size = getpagesize();
+
+ rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
+ if (rb->epoll_fd < 0) {
+ err = -errno;
+ pr_warn("user ringbuf: failed to create epoll instance: %d\n", err);
+ goto err_out;
+ }
+
+ err = user_ringbuf_map(rb, map_fd);
+ if (err)
+ goto err_out;
+
+ return rb;
+
+err_out:
+ user_ring_buffer__free(rb);
+ return errno = -err, NULL;
+}
+
+static void user_ringbuf_commit(struct user_ring_buffer *rb, void *sample, bool discard)
+{
+ __u32 new_len;
+ struct ringbuf_hdr *hdr;
+ uintptr_t hdr_offset;
+
+ hdr_offset = rb->mask + 1 + (sample - rb->data) - BPF_RINGBUF_HDR_SZ;
+ hdr = rb->data + (hdr_offset & rb->mask);
+
+ new_len = hdr->len & ~BPF_RINGBUF_BUSY_BIT;
+ if (discard)
+ new_len |= BPF_RINGBUF_DISCARD_BIT;
+
+ /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in
+ * the kernel.
+ */
+ __atomic_exchange_n(&hdr->len, new_len, __ATOMIC_ACQ_REL);
+}
+
+void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample)
+{
+ user_ringbuf_commit(rb, sample, true);
+}
+
+void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample)
+{
+ user_ringbuf_commit(rb, sample, false);
+}
+
+void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size)
+{
+ __u32 avail_size, total_size, max_size;
+ /* 64-bit to avoid overflow in case of extreme application behavior */
+ __u64 cons_pos, prod_pos;
+ struct ringbuf_hdr *hdr;
+
+ /* The top two bits are used as special flags */
+ if (size & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT))
+ return errno = E2BIG, NULL;
+
+ /* Synchronizes with smp_store_release() in __bpf_user_ringbuf_peek() in
+ * the kernel.
+ */
+ cons_pos = smp_load_acquire(rb->consumer_pos);
+ /* Synchronizes with smp_store_release() in user_ringbuf_commit() */
+ prod_pos = smp_load_acquire(rb->producer_pos);
+
+ max_size = rb->mask + 1;
+ avail_size = max_size - (prod_pos - cons_pos);
+ /* Round up total size to a multiple of 8. */
+ total_size = (size + BPF_RINGBUF_HDR_SZ + 7) / 8 * 8;
+
+ if (total_size > max_size)
+ return errno = E2BIG, NULL;
+
+ if (avail_size < total_size)
+ return errno = ENOSPC, NULL;
+
+ hdr = rb->data + (prod_pos & rb->mask);
+ hdr->len = size | BPF_RINGBUF_BUSY_BIT;
+ hdr->pad = 0;
+
+ /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in
+ * the kernel.
+ */
+ smp_store_release(rb->producer_pos, prod_pos + total_size);
+
+ return (void *)rb->data + ((prod_pos + BPF_RINGBUF_HDR_SZ) & rb->mask);
+}
+
+static __u64 ns_elapsed_timespec(const struct timespec *start, const struct timespec *end)
+{
+ __u64 start_ns, end_ns, ns_per_s = 1000000000;
+
+ start_ns = (__u64)start->tv_sec * ns_per_s + start->tv_nsec;
+ end_ns = (__u64)end->tv_sec * ns_per_s + end->tv_nsec;
+
+ return end_ns - start_ns;
+}
+
+void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms)
+{
+ void *sample;
+ int err, ms_remaining = timeout_ms;
+ struct timespec start;
+
+ if (timeout_ms < 0 && timeout_ms != -1)
+ return errno = EINVAL, NULL;
+
+ if (timeout_ms != -1) {
+ err = clock_gettime(CLOCK_MONOTONIC, &start);
+ if (err)
+ return NULL;
+ }
+
+ do {
+ int cnt, ms_elapsed;
+ struct timespec curr;
+ __u64 ns_per_ms = 1000000;
+
+ sample = user_ring_buffer__reserve(rb, size);
+ if (sample)
+ return sample;
+ else if (errno != ENOSPC)
+ return NULL;
+
+ /* The kernel guarantees at least one event notification
+ * delivery whenever at least one sample is drained from the
+ * ring buffer in an invocation to bpf_ringbuf_drain(). Other
+ * additional events may be delivered at any time, but only one
+ * event is guaranteed per bpf_ringbuf_drain() invocation,
+ * provided that a sample is drained, and the BPF program did
+ * not pass BPF_RB_NO_WAKEUP to bpf_ringbuf_drain(). If
+ * BPF_RB_FORCE_WAKEUP is passed to bpf_ringbuf_drain(), a
+ * wakeup event will be delivered even if no samples are
+ * drained.
+ */
+ cnt = epoll_wait(rb->epoll_fd, &rb->event, 1, ms_remaining);
+ if (cnt < 0)
+ return NULL;
+
+ if (timeout_ms == -1)
+ continue;
+
+ err = clock_gettime(CLOCK_MONOTONIC, &curr);
+ if (err)
+ return NULL;
+
+ ms_elapsed = ns_elapsed_timespec(&start, &curr) / ns_per_ms;
+ ms_remaining = timeout_ms - ms_elapsed;
+ } while (ms_remaining > 0);
+
+ /* Try one more time to reserve a sample after the specified timeout has elapsed. */
+ return user_ring_buffer__reserve(rb, size);
+}
diff --git a/tools/lib/bpf/skel_internal.h b/tools/lib/bpf/skel_internal.h
new file mode 100644
index 000000000000..1e82ab06c3eb
--- /dev/null
+++ b/tools/lib/bpf/skel_internal.h
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2021 Facebook */
+#ifndef __SKEL_INTERNAL_H
+#define __SKEL_INTERNAL_H
+
+#ifdef __KERNEL__
+#include <linux/fdtable.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/bpf.h>
+#else
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/mman.h>
+#include <stdlib.h>
+#include "bpf.h"
+#endif
+
+#ifndef __NR_bpf
+# if defined(__mips__) && defined(_ABIO32)
+# define __NR_bpf 4355
+# elif defined(__mips__) && defined(_ABIN32)
+# define __NR_bpf 6319
+# elif defined(__mips__) && defined(_ABI64)
+# define __NR_bpf 5315
+# endif
+#endif
+
+/* This file is a base header for auto-generated *.lskel.h files.
+ * Its contents will change and may become part of auto-generation in the future.
+ *
+ * The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent
+ * and will change from one version of libbpf to another and features
+ * requested during loader program generation.
+ */
+struct bpf_map_desc {
+ /* output of the loader prog */
+ int map_fd;
+ /* input for the loader prog */
+ __u32 max_entries;
+ __aligned_u64 initial_value;
+};
+struct bpf_prog_desc {
+ int prog_fd;
+};
+
+enum {
+ BPF_SKEL_KERNEL = (1ULL << 0),
+};
+
+struct bpf_loader_ctx {
+ __u32 sz;
+ __u32 flags;
+ __u32 log_level;
+ __u32 log_size;
+ __u64 log_buf;
+};
+
+struct bpf_load_and_run_opts {
+ struct bpf_loader_ctx *ctx;
+ const void *data;
+ const void *insns;
+ __u32 data_sz;
+ __u32 insns_sz;
+ const char *errstr;
+};
+
+long kern_sys_bpf(__u32 cmd, void *attr, __u32 attr_size);
+
+static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
+ unsigned int size)
+{
+#ifdef __KERNEL__
+ return kern_sys_bpf(cmd, attr, size);
+#else
+ return syscall(__NR_bpf, cmd, attr, size);
+#endif
+}
+
+#ifdef __KERNEL__
+static inline int close(int fd)
+{
+ return close_fd(fd);
+}
+
+static inline void *skel_alloc(size_t size)
+{
+ struct bpf_loader_ctx *ctx = kzalloc(size, GFP_KERNEL);
+
+ if (!ctx)
+ return NULL;
+ ctx->flags |= BPF_SKEL_KERNEL;
+ return ctx;
+}
+
+static inline void skel_free(const void *p)
+{
+ kfree(p);
+}
+
+/* skel->bss/rodata maps are populated the following way:
+ *
+ * For kernel use:
+ * skel_prep_map_data() allocates kernel memory that kernel module can directly access.
+ * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
+ * The loader program will perform probe_read_kernel() from maps.rodata.initial_value.
+ * skel_finalize_map_data() sets skel->rodata to point to actual value in a bpf map and
+ * does maps.rodata.initial_value = ~0ULL to signal skel_free_map_data() that kvfree
+ * is not nessary.
+ *
+ * For user space:
+ * skel_prep_map_data() mmaps anon memory into skel->rodata that can be accessed directly.
+ * Generated lskel stores the pointer in skel->rodata and in skel->maps.rodata.initial_value.
+ * The loader program will perform copy_from_user() from maps.rodata.initial_value.
+ * skel_finalize_map_data() remaps bpf array map value from the kernel memory into
+ * skel->rodata address.
+ *
+ * The "bpftool gen skeleton -L" command generates lskel.h that is suitable for
+ * both kernel and user space. The generated loader program does
+ * either bpf_probe_read_kernel() or bpf_copy_from_user() from initial_value
+ * depending on bpf_loader_ctx->flags.
+ */
+static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
+{
+ if (addr != ~0ULL)
+ kvfree(p);
+ /* When addr == ~0ULL the 'p' points to
+ * ((struct bpf_array *)map)->value. See skel_finalize_map_data.
+ */
+}
+
+static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
+{
+ void *addr;
+
+ addr = kvmalloc(val_sz, GFP_KERNEL);
+ if (!addr)
+ return NULL;
+ memcpy(addr, val, val_sz);
+ return addr;
+}
+
+static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
+{
+ struct bpf_map *map;
+ void *addr = NULL;
+
+ kvfree((void *) (long) *init_val);
+ *init_val = ~0ULL;
+
+ /* At this point bpf_load_and_run() finished without error and
+ * 'fd' is a valid bpf map FD. All sanity checks below should succeed.
+ */
+ map = bpf_map_get(fd);
+ if (IS_ERR(map))
+ return NULL;
+ if (map->map_type != BPF_MAP_TYPE_ARRAY)
+ goto out;
+ addr = ((struct bpf_array *)map)->value;
+ /* the addr stays valid, since FD is not closed */
+out:
+ bpf_map_put(map);
+ return addr;
+}
+
+#else
+
+static inline void *skel_alloc(size_t size)
+{
+ return calloc(1, size);
+}
+
+static inline void skel_free(void *p)
+{
+ free(p);
+}
+
+static inline void skel_free_map_data(void *p, __u64 addr, size_t sz)
+{
+ munmap(p, sz);
+}
+
+static inline void *skel_prep_map_data(const void *val, size_t mmap_sz, size_t val_sz)
+{
+ void *addr;
+
+ addr = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == (void *) -1)
+ return NULL;
+ memcpy(addr, val, val_sz);
+ return addr;
+}
+
+static inline void *skel_finalize_map_data(__u64 *init_val, size_t mmap_sz, int flags, int fd)
+{
+ void *addr;
+
+ addr = mmap((void *) (long) *init_val, mmap_sz, flags, MAP_SHARED | MAP_FIXED, fd, 0);
+ if (addr == (void *) -1)
+ return NULL;
+ return addr;
+}
+#endif
+
+static inline int skel_closenz(int fd)
+{
+ if (fd > 0)
+ return close(fd);
+ return -EINVAL;
+}
+
+#ifndef offsetofend
+#define offsetofend(TYPE, MEMBER) \
+ (offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
+#endif
+
+static inline int skel_map_create(enum bpf_map_type map_type,
+ const char *map_name,
+ __u32 key_size,
+ __u32 value_size,
+ __u32 max_entries)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, map_extra);
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_sz);
+
+ attr.map_type = map_type;
+ strncpy(attr.map_name, map_name, sizeof(attr.map_name));
+ attr.key_size = key_size;
+ attr.value_size = value_size;
+ attr.max_entries = max_entries;
+
+ return skel_sys_bpf(BPF_MAP_CREATE, &attr, attr_sz);
+}
+
+static inline int skel_map_update_elem(int fd, const void *key,
+ const void *value, __u64 flags)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_sz);
+ attr.map_fd = fd;
+ attr.key = (long) key;
+ attr.value = (long) value;
+ attr.flags = flags;
+
+ return skel_sys_bpf(BPF_MAP_UPDATE_ELEM, &attr, attr_sz);
+}
+
+static inline int skel_map_delete_elem(int fd, const void *key)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_sz);
+ attr.map_fd = fd;
+ attr.key = (long)key;
+
+ return skel_sys_bpf(BPF_MAP_DELETE_ELEM, &attr, attr_sz);
+}
+
+static inline int skel_map_get_fd_by_id(__u32 id)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, flags);
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_sz);
+ attr.map_id = id;
+
+ return skel_sys_bpf(BPF_MAP_GET_FD_BY_ID, &attr, attr_sz);
+}
+
+static inline int skel_raw_tracepoint_open(const char *name, int prog_fd)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, raw_tracepoint.prog_fd);
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_sz);
+ attr.raw_tracepoint.name = (long) name;
+ attr.raw_tracepoint.prog_fd = prog_fd;
+
+ return skel_sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, attr_sz);
+}
+
+static inline int skel_link_create(int prog_fd, int target_fd,
+ enum bpf_attach_type attach_type)
+{
+ const size_t attr_sz = offsetofend(union bpf_attr, link_create.iter_info_len);
+ union bpf_attr attr;
+
+ memset(&attr, 0, attr_sz);
+ attr.link_create.prog_fd = prog_fd;
+ attr.link_create.target_fd = target_fd;
+ attr.link_create.attach_type = attach_type;
+
+ return skel_sys_bpf(BPF_LINK_CREATE, &attr, attr_sz);
+}
+
+#ifdef __KERNEL__
+#define set_err
+#else
+#define set_err err = -errno
+#endif
+
+static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
+{
+ const size_t prog_load_attr_sz = offsetofend(union bpf_attr, fd_array);
+ const size_t test_run_attr_sz = offsetofend(union bpf_attr, test);
+ int map_fd = -1, prog_fd = -1, key = 0, err;
+ union bpf_attr attr;
+
+ err = map_fd = skel_map_create(BPF_MAP_TYPE_ARRAY, "__loader.map", 4, opts->data_sz, 1);
+ if (map_fd < 0) {
+ opts->errstr = "failed to create loader map";
+ set_err;
+ goto out;
+ }
+
+ err = skel_map_update_elem(map_fd, &key, opts->data, 0);
+ if (err < 0) {
+ opts->errstr = "failed to update loader map";
+ set_err;
+ goto out;
+ }
+
+ memset(&attr, 0, prog_load_attr_sz);
+ attr.prog_type = BPF_PROG_TYPE_SYSCALL;
+ attr.insns = (long) opts->insns;
+ attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
+ attr.license = (long) "Dual BSD/GPL";
+ memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
+ attr.fd_array = (long) &map_fd;
+ attr.log_level = opts->ctx->log_level;
+ attr.log_size = opts->ctx->log_size;
+ attr.log_buf = opts->ctx->log_buf;
+ attr.prog_flags = BPF_F_SLEEPABLE;
+ err = prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, prog_load_attr_sz);
+ if (prog_fd < 0) {
+ opts->errstr = "failed to load loader prog";
+ set_err;
+ goto out;
+ }
+
+ memset(&attr, 0, test_run_attr_sz);
+ attr.test.prog_fd = prog_fd;
+ attr.test.ctx_in = (long) opts->ctx;
+ attr.test.ctx_size_in = opts->ctx->sz;
+ err = skel_sys_bpf(BPF_PROG_RUN, &attr, test_run_attr_sz);
+ if (err < 0 || (int)attr.test.retval < 0) {
+ opts->errstr = "failed to execute loader prog";
+ if (err < 0) {
+ set_err;
+ } else {
+ err = (int)attr.test.retval;
+#ifndef __KERNEL__
+ errno = -err;
+#endif
+ }
+ goto out;
+ }
+ err = 0;
+out:
+ if (map_fd >= 0)
+ close(map_fd);
+ if (prog_fd >= 0)
+ close(prog_fd);
+ return err;
+}
+
+#endif
diff --git a/tools/lib/bpf/strset.c b/tools/lib/bpf/strset.c
new file mode 100644
index 000000000000..2464bcbd04e0
--- /dev/null
+++ b/tools/lib/bpf/strset.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2021 Facebook */
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <linux/err.h>
+#include "hashmap.h"
+#include "libbpf_internal.h"
+#include "strset.h"
+
+struct strset {
+ void *strs_data;
+ size_t strs_data_len;
+ size_t strs_data_cap;
+ size_t strs_data_max_len;
+
+ /* lookup index for each unique string in strings set */
+ struct hashmap *strs_hash;
+};
+
+static size_t strset_hash_fn(long key, void *ctx)
+{
+ const struct strset *s = ctx;
+ const char *str = s->strs_data + key;
+
+ return str_hash(str);
+}
+
+static bool strset_equal_fn(long key1, long key2, void *ctx)
+{
+ const struct strset *s = ctx;
+ const char *str1 = s->strs_data + key1;
+ const char *str2 = s->strs_data + key2;
+
+ return strcmp(str1, str2) == 0;
+}
+
+struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz)
+{
+ struct strset *set = calloc(1, sizeof(*set));
+ struct hashmap *hash;
+ int err = -ENOMEM;
+
+ if (!set)
+ return ERR_PTR(-ENOMEM);
+
+ hash = hashmap__new(strset_hash_fn, strset_equal_fn, set);
+ if (IS_ERR(hash))
+ goto err_out;
+
+ set->strs_data_max_len = max_data_sz;
+ set->strs_hash = hash;
+
+ if (init_data) {
+ long off;
+
+ set->strs_data = malloc(init_data_sz);
+ if (!set->strs_data)
+ goto err_out;
+
+ memcpy(set->strs_data, init_data, init_data_sz);
+ set->strs_data_len = init_data_sz;
+ set->strs_data_cap = init_data_sz;
+
+ for (off = 0; off < set->strs_data_len; off += strlen(set->strs_data + off) + 1) {
+ /* hashmap__add() returns EEXIST if string with the same
+ * content already is in the hash map
+ */
+ err = hashmap__add(hash, off, off);
+ if (err == -EEXIST)
+ continue; /* duplicate */
+ if (err)
+ goto err_out;
+ }
+ }
+
+ return set;
+err_out:
+ strset__free(set);
+ return ERR_PTR(err);
+}
+
+void strset__free(struct strset *set)
+{
+ if (IS_ERR_OR_NULL(set))
+ return;
+
+ hashmap__free(set->strs_hash);
+ free(set->strs_data);
+ free(set);
+}
+
+size_t strset__data_size(const struct strset *set)
+{
+ return set->strs_data_len;
+}
+
+const char *strset__data(const struct strset *set)
+{
+ return set->strs_data;
+}
+
+static void *strset_add_str_mem(struct strset *set, size_t add_sz)
+{
+ return libbpf_add_mem(&set->strs_data, &set->strs_data_cap, 1,
+ set->strs_data_len, set->strs_data_max_len, add_sz);
+}
+
+/* Find string offset that corresponds to a given string *s*.
+ * Returns:
+ * - >0 offset into string data, if string is found;
+ * - -ENOENT, if string is not in the string data;
+ * - <0, on any other error.
+ */
+int strset__find_str(struct strset *set, const char *s)
+{
+ long old_off, new_off, len;
+ void *p;
+
+ /* see strset__add_str() for why we do this */
+ len = strlen(s) + 1;
+ p = strset_add_str_mem(set, len);
+ if (!p)
+ return -ENOMEM;
+
+ new_off = set->strs_data_len;
+ memcpy(p, s, len);
+
+ if (hashmap__find(set->strs_hash, new_off, &old_off))
+ return old_off;
+
+ return -ENOENT;
+}
+
+/* Add a string s to the string data. If the string already exists, return its
+ * offset within string data.
+ * Returns:
+ * - > 0 offset into string data, on success;
+ * - < 0, on error.
+ */
+int strset__add_str(struct strset *set, const char *s)
+{
+ long old_off, new_off, len;
+ void *p;
+ int err;
+
+ /* Hashmap keys are always offsets within set->strs_data, so to even
+ * look up some string from the "outside", we need to first append it
+ * at the end, so that it can be addressed with an offset. Luckily,
+ * until set->strs_data_len is incremented, that string is just a piece
+ * of garbage for the rest of the code, so no harm, no foul. On the
+ * other hand, if the string is unique, it's already appended and
+ * ready to be used, only a simple set->strs_data_len increment away.
+ */
+ len = strlen(s) + 1;
+ p = strset_add_str_mem(set, len);
+ if (!p)
+ return -ENOMEM;
+
+ new_off = set->strs_data_len;
+ memcpy(p, s, len);
+
+ /* Now attempt to add the string, but only if the string with the same
+ * contents doesn't exist already (HASHMAP_ADD strategy). If such
+ * string exists, we'll get its offset in old_off (that's old_key).
+ */
+ err = hashmap__insert(set->strs_hash, new_off, new_off,
+ HASHMAP_ADD, &old_off, NULL);
+ if (err == -EEXIST)
+ return old_off; /* duplicated string, return existing offset */
+ if (err)
+ return err;
+
+ set->strs_data_len += len; /* new unique string, adjust data length */
+ return new_off;
+}
diff --git a/tools/lib/bpf/strset.h b/tools/lib/bpf/strset.h
new file mode 100644
index 000000000000..b6ddf77a83c2
--- /dev/null
+++ b/tools/lib/bpf/strset.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/* Copyright (c) 2021 Facebook */
+#ifndef __LIBBPF_STRSET_H
+#define __LIBBPF_STRSET_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+struct strset;
+
+struct strset *strset__new(size_t max_data_sz, const char *init_data, size_t init_data_sz);
+void strset__free(struct strset *set);
+
+const char *strset__data(const struct strset *set);
+size_t strset__data_size(const struct strset *set);
+
+int strset__find_str(struct strset *set, const char *s);
+int strset__add_str(struct strset *set, const char *s);
+
+#endif /* __LIBBPF_STRSET_H */
diff --git a/tools/lib/bpf/usdt.bpf.h b/tools/lib/bpf/usdt.bpf.h
new file mode 100644
index 000000000000..0bd4c135acc2
--- /dev/null
+++ b/tools/lib/bpf/usdt.bpf.h
@@ -0,0 +1,250 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#ifndef __USDT_BPF_H__
+#define __USDT_BPF_H__
+
+#include <linux/errno.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+/* Below types and maps are internal implementation details of libbpf's USDT
+ * support and are subjects to change. Also, bpf_usdt_xxx() API helpers should
+ * be considered an unstable API as well and might be adjusted based on user
+ * feedback from using libbpf's USDT support in production.
+ */
+
+/* User can override BPF_USDT_MAX_SPEC_CNT to change default size of internal
+ * map that keeps track of USDT argument specifications. This might be
+ * necessary if there are a lot of USDT attachments.
+ */
+#ifndef BPF_USDT_MAX_SPEC_CNT
+#define BPF_USDT_MAX_SPEC_CNT 256
+#endif
+/* User can override BPF_USDT_MAX_IP_CNT to change default size of internal
+ * map that keeps track of IP (memory address) mapping to USDT argument
+ * specification.
+ * Note, if kernel supports BPF cookies, this map is not used and could be
+ * resized all the way to 1 to save a bit of memory.
+ */
+#ifndef BPF_USDT_MAX_IP_CNT
+#define BPF_USDT_MAX_IP_CNT (4 * BPF_USDT_MAX_SPEC_CNT)
+#endif
+
+enum __bpf_usdt_arg_type {
+ BPF_USDT_ARG_CONST,
+ BPF_USDT_ARG_REG,
+ BPF_USDT_ARG_REG_DEREF,
+};
+
+struct __bpf_usdt_arg_spec {
+ /* u64 scalar interpreted depending on arg_type, see below */
+ __u64 val_off;
+ /* arg location case, see bpf_udst_arg() for details */
+ enum __bpf_usdt_arg_type arg_type;
+ /* offset of referenced register within struct pt_regs */
+ short reg_off;
+ /* whether arg should be interpreted as signed value */
+ bool arg_signed;
+ /* number of bits that need to be cleared and, optionally,
+ * sign-extended to cast arguments that are 1, 2, or 4 bytes
+ * long into final 8-byte u64/s64 value returned to user
+ */
+ char arg_bitshift;
+};
+
+/* should match USDT_MAX_ARG_CNT in usdt.c exactly */
+#define BPF_USDT_MAX_ARG_CNT 12
+struct __bpf_usdt_spec {
+ struct __bpf_usdt_arg_spec args[BPF_USDT_MAX_ARG_CNT];
+ __u64 usdt_cookie;
+ short arg_cnt;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, BPF_USDT_MAX_SPEC_CNT);
+ __type(key, int);
+ __type(value, struct __bpf_usdt_spec);
+} __bpf_usdt_specs SEC(".maps") __weak;
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, BPF_USDT_MAX_IP_CNT);
+ __type(key, long);
+ __type(value, __u32);
+} __bpf_usdt_ip_to_spec_id SEC(".maps") __weak;
+
+extern const _Bool LINUX_HAS_BPF_COOKIE __kconfig;
+
+static __always_inline
+int __bpf_usdt_spec_id(struct pt_regs *ctx)
+{
+ if (!LINUX_HAS_BPF_COOKIE) {
+ long ip = PT_REGS_IP(ctx);
+ int *spec_id_ptr;
+
+ spec_id_ptr = bpf_map_lookup_elem(&__bpf_usdt_ip_to_spec_id, &ip);
+ return spec_id_ptr ? *spec_id_ptr : -ESRCH;
+ }
+
+ return bpf_get_attach_cookie(ctx);
+}
+
+/* Return number of USDT arguments defined for currently traced USDT. */
+__weak __hidden
+int bpf_usdt_arg_cnt(struct pt_regs *ctx)
+{
+ struct __bpf_usdt_spec *spec;
+ int spec_id;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return -ESRCH;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return -ESRCH;
+
+ return spec->arg_cnt;
+}
+
+/* Fetch USDT argument #*arg_num* (zero-indexed) and put its value into *res.
+ * Returns 0 on success; negative error, otherwise.
+ * On error *res is guaranteed to be set to zero.
+ */
+__weak __hidden
+int bpf_usdt_arg(struct pt_regs *ctx, __u64 arg_num, long *res)
+{
+ struct __bpf_usdt_spec *spec;
+ struct __bpf_usdt_arg_spec *arg_spec;
+ unsigned long val;
+ int err, spec_id;
+
+ *res = 0;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return -ESRCH;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return -ESRCH;
+
+ if (arg_num >= BPF_USDT_MAX_ARG_CNT)
+ return -ENOENT;
+ barrier_var(arg_num);
+ if (arg_num >= spec->arg_cnt)
+ return -ENOENT;
+
+ arg_spec = &spec->args[arg_num];
+ switch (arg_spec->arg_type) {
+ case BPF_USDT_ARG_CONST:
+ /* Arg is just a constant ("-4@$-9" in USDT arg spec).
+ * value is recorded in arg_spec->val_off directly.
+ */
+ val = arg_spec->val_off;
+ break;
+ case BPF_USDT_ARG_REG:
+ /* Arg is in a register (e.g, "8@%rax" in USDT arg spec),
+ * so we read the contents of that register directly from
+ * struct pt_regs. To keep things simple user-space parts
+ * record offsetof(struct pt_regs, <regname>) in arg_spec->reg_off.
+ */
+ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
+ if (err)
+ return err;
+ break;
+ case BPF_USDT_ARG_REG_DEREF:
+ /* Arg is in memory addressed by register, plus some offset
+ * (e.g., "-4@-1204(%rbp)" in USDT arg spec). Register is
+ * identified like with BPF_USDT_ARG_REG case, and the offset
+ * is in arg_spec->val_off. We first fetch register contents
+ * from pt_regs, then do another user-space probe read to
+ * fetch argument value itself.
+ */
+ err = bpf_probe_read_kernel(&val, sizeof(val), (void *)ctx + arg_spec->reg_off);
+ if (err)
+ return err;
+ err = bpf_probe_read_user(&val, sizeof(val), (void *)val + arg_spec->val_off);
+ if (err)
+ return err;
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ val >>= arg_spec->arg_bitshift;
+#endif
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* cast arg from 1, 2, or 4 bytes to final 8 byte size clearing
+ * necessary upper arg_bitshift bits, with sign extension if argument
+ * is signed
+ */
+ val <<= arg_spec->arg_bitshift;
+ if (arg_spec->arg_signed)
+ val = ((long)val) >> arg_spec->arg_bitshift;
+ else
+ val = val >> arg_spec->arg_bitshift;
+ *res = val;
+ return 0;
+}
+
+/* Retrieve user-specified cookie value provided during attach as
+ * bpf_usdt_opts.usdt_cookie. This serves the same purpose as BPF cookie
+ * returned by bpf_get_attach_cookie(). Libbpf's support for USDT is itself
+ * utilizing BPF cookies internally, so user can't use BPF cookie directly
+ * for USDT programs and has to use bpf_usdt_cookie() API instead.
+ */
+__weak __hidden
+long bpf_usdt_cookie(struct pt_regs *ctx)
+{
+ struct __bpf_usdt_spec *spec;
+ int spec_id;
+
+ spec_id = __bpf_usdt_spec_id(ctx);
+ if (spec_id < 0)
+ return 0;
+
+ spec = bpf_map_lookup_elem(&__bpf_usdt_specs, &spec_id);
+ if (!spec)
+ return 0;
+
+ return spec->usdt_cookie;
+}
+
+/* we rely on ___bpf_apply() and ___bpf_narg() macros already defined in bpf_tracing.h */
+#define ___bpf_usdt_args0() ctx
+#define ___bpf_usdt_args1(x) ___bpf_usdt_args0(), ({ long _x; bpf_usdt_arg(ctx, 0, &_x); (void *)_x; })
+#define ___bpf_usdt_args2(x, args...) ___bpf_usdt_args1(args), ({ long _x; bpf_usdt_arg(ctx, 1, &_x); (void *)_x; })
+#define ___bpf_usdt_args3(x, args...) ___bpf_usdt_args2(args), ({ long _x; bpf_usdt_arg(ctx, 2, &_x); (void *)_x; })
+#define ___bpf_usdt_args4(x, args...) ___bpf_usdt_args3(args), ({ long _x; bpf_usdt_arg(ctx, 3, &_x); (void *)_x; })
+#define ___bpf_usdt_args5(x, args...) ___bpf_usdt_args4(args), ({ long _x; bpf_usdt_arg(ctx, 4, &_x); (void *)_x; })
+#define ___bpf_usdt_args6(x, args...) ___bpf_usdt_args5(args), ({ long _x; bpf_usdt_arg(ctx, 5, &_x); (void *)_x; })
+#define ___bpf_usdt_args7(x, args...) ___bpf_usdt_args6(args), ({ long _x; bpf_usdt_arg(ctx, 6, &_x); (void *)_x; })
+#define ___bpf_usdt_args8(x, args...) ___bpf_usdt_args7(args), ({ long _x; bpf_usdt_arg(ctx, 7, &_x); (void *)_x; })
+#define ___bpf_usdt_args9(x, args...) ___bpf_usdt_args8(args), ({ long _x; bpf_usdt_arg(ctx, 8, &_x); (void *)_x; })
+#define ___bpf_usdt_args10(x, args...) ___bpf_usdt_args9(args), ({ long _x; bpf_usdt_arg(ctx, 9, &_x); (void *)_x; })
+#define ___bpf_usdt_args11(x, args...) ___bpf_usdt_args10(args), ({ long _x; bpf_usdt_arg(ctx, 10, &_x); (void *)_x; })
+#define ___bpf_usdt_args12(x, args...) ___bpf_usdt_args11(args), ({ long _x; bpf_usdt_arg(ctx, 11, &_x); (void *)_x; })
+#define ___bpf_usdt_args(args...) ___bpf_apply(___bpf_usdt_args, ___bpf_narg(args))(args)
+
+/*
+ * BPF_USDT serves the same purpose for USDT handlers as BPF_PROG for
+ * tp_btf/fentry/fexit BPF programs and BPF_KPROBE for kprobes.
+ * Original struct pt_regs * context is preserved as 'ctx' argument.
+ */
+#define BPF_USDT(name, args...) \
+name(struct pt_regs *ctx); \
+static __always_inline typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args); \
+typeof(name(0)) name(struct pt_regs *ctx) \
+{ \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
+ return ____##name(___bpf_usdt_args(args)); \
+ _Pragma("GCC diagnostic pop") \
+} \
+static __always_inline typeof(name(0)) \
+____##name(struct pt_regs *ctx, ##args)
+
+#endif /* __USDT_BPF_H__ */
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
new file mode 100644
index 000000000000..086eef355ab3
--- /dev/null
+++ b/tools/lib/bpf/usdt.c
@@ -0,0 +1,1556 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <libelf.h>
+#include <gelf.h>
+#include <unistd.h>
+#include <linux/ptrace.h>
+#include <linux/kernel.h>
+
+/* s8 will be marked as poison while it's a reg of riscv */
+#if defined(__riscv)
+#define rv_s8 s8
+#endif
+
+#include "bpf.h"
+#include "libbpf.h"
+#include "libbpf_common.h"
+#include "libbpf_internal.h"
+#include "hashmap.h"
+
+/* libbpf's USDT support consists of BPF-side state/code and user-space
+ * state/code working together in concert. BPF-side parts are defined in
+ * usdt.bpf.h header library. User-space state is encapsulated by struct
+ * usdt_manager and all the supporting code centered around usdt_manager.
+ *
+ * usdt.bpf.h defines two BPF maps that usdt_manager expects: USDT spec map
+ * and IP-to-spec-ID map, which is auxiliary map necessary for kernels that
+ * don't support BPF cookie (see below). These two maps are implicitly
+ * embedded into user's end BPF object file when user's code included
+ * usdt.bpf.h. This means that libbpf doesn't do anything special to create
+ * these USDT support maps. They are created by normal libbpf logic of
+ * instantiating BPF maps when opening and loading BPF object.
+ *
+ * As such, libbpf is basically unaware of the need to do anything
+ * USDT-related until the very first call to bpf_program__attach_usdt(), which
+ * can be called by user explicitly or happen automatically during skeleton
+ * attach (or, equivalently, through generic bpf_program__attach() call). At
+ * this point, libbpf will instantiate and initialize struct usdt_manager and
+ * store it in bpf_object. USDT manager is per-BPF object construct, as each
+ * independent BPF object might or might not have USDT programs, and thus all
+ * the expected USDT-related state. There is no coordination between two
+ * bpf_object in parts of USDT attachment, they are oblivious of each other's
+ * existence and libbpf is just oblivious, dealing with bpf_object-specific
+ * USDT state.
+ *
+ * Quick crash course on USDTs.
+ *
+ * From user-space application's point of view, USDT is essentially just
+ * a slightly special function call that normally has zero overhead, unless it
+ * is being traced by some external entity (e.g, BPF-based tool). Here's how
+ * a typical application can trigger USDT probe:
+ *
+ * #include <sys/sdt.h> // provided by systemtap-sdt-devel package
+ * // folly also provide similar functionality in folly/tracing/StaticTracepoint.h
+ *
+ * STAP_PROBE3(my_usdt_provider, my_usdt_probe_name, 123, x, &y);
+ *
+ * USDT is identified by it's <provider-name>:<probe-name> pair of names. Each
+ * individual USDT has a fixed number of arguments (3 in the above example)
+ * and specifies values of each argument as if it was a function call.
+ *
+ * USDT call is actually not a function call, but is instead replaced by
+ * a single NOP instruction (thus zero overhead, effectively). But in addition
+ * to that, those USDT macros generate special SHT_NOTE ELF records in
+ * .note.stapsdt ELF section. Here's an example USDT definition as emitted by
+ * `readelf -n <binary>`:
+ *
+ * stapsdt 0x00000089 NT_STAPSDT (SystemTap probe descriptors)
+ * Provider: test
+ * Name: usdt12
+ * Location: 0x0000000000549df3, Base: 0x00000000008effa4, Semaphore: 0x0000000000a4606e
+ * Arguments: -4@-1204(%rbp) -4@%edi -8@-1216(%rbp) -8@%r8 -4@$5 -8@%r9 8@%rdx 8@%r10 -4@$-9 -2@%cx -2@%ax -1@%sil
+ *
+ * In this case we have USDT test:usdt12 with 12 arguments.
+ *
+ * Location and base are offsets used to calculate absolute IP address of that
+ * NOP instruction that kernel can replace with an interrupt instruction to
+ * trigger instrumentation code (BPF program for all that we care about).
+ *
+ * Semaphore above is and optional feature. It records an address of a 2-byte
+ * refcount variable (normally in '.probes' ELF section) used for signaling if
+ * there is anything that is attached to USDT. This is useful for user
+ * applications if, for example, they need to prepare some arguments that are
+ * passed only to USDTs and preparation is expensive. By checking if USDT is
+ * "activated", an application can avoid paying those costs unnecessarily.
+ * Recent enough kernel has built-in support for automatically managing this
+ * refcount, which libbpf expects and relies on. If USDT is defined without
+ * associated semaphore, this value will be zero. See selftests for semaphore
+ * examples.
+ *
+ * Arguments is the most interesting part. This USDT specification string is
+ * providing information about all the USDT arguments and their locations. The
+ * part before @ sign defined byte size of the argument (1, 2, 4, or 8) and
+ * whether the argument is signed or unsigned (negative size means signed).
+ * The part after @ sign is assembly-like definition of argument location
+ * (see [0] for more details). Technically, assembler can provide some pretty
+ * advanced definitions, but libbpf is currently supporting three most common
+ * cases:
+ * 1) immediate constant, see 5th and 9th args above (-4@$5 and -4@-9);
+ * 2) register value, e.g., 8@%rdx, which means "unsigned 8-byte integer
+ * whose value is in register %rdx";
+ * 3) memory dereference addressed by register, e.g., -4@-1204(%rbp), which
+ * specifies signed 32-bit integer stored at offset -1204 bytes from
+ * memory address stored in %rbp.
+ *
+ * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ *
+ * During attachment, libbpf parses all the relevant USDT specifications and
+ * prepares `struct usdt_spec` (USDT spec), which is then provided to BPF-side
+ * code through spec map. This allows BPF applications to quickly fetch the
+ * actual value at runtime using a simple BPF-side code.
+ *
+ * With basics out of the way, let's go over less immediately obvious aspects
+ * of supporting USDTs.
+ *
+ * First, there is no special USDT BPF program type. It is actually just
+ * a uprobe BPF program (which for kernel, at least currently, is just a kprobe
+ * program, so BPF_PROG_TYPE_KPROBE program type). With the only difference
+ * that uprobe is usually attached at the function entry, while USDT will
+ * normally will be somewhere inside the function. But it should always be
+ * pointing to NOP instruction, which makes such uprobes the fastest uprobe
+ * kind.
+ *
+ * Second, it's important to realize that such STAP_PROBEn(provider, name, ...)
+ * macro invocations can end up being inlined many-many times, depending on
+ * specifics of each individual user application. So single conceptual USDT
+ * (identified by provider:name pair of identifiers) is, generally speaking,
+ * multiple uprobe locations (USDT call sites) in different places in user
+ * application. Further, again due to inlining, each USDT call site might end
+ * up having the same argument #N be located in a different place. In one call
+ * site it could be a constant, in another will end up in a register, and in
+ * yet another could be some other register or even somewhere on the stack.
+ *
+ * As such, "attaching to USDT" means (in general case) attaching the same
+ * uprobe BPF program to multiple target locations in user application, each
+ * potentially having a completely different USDT spec associated with it.
+ * To wire all this up together libbpf allocates a unique integer spec ID for
+ * each unique USDT spec. Spec IDs are allocated as sequential small integers
+ * so that they can be used as keys in array BPF map (for performance reasons).
+ * Spec ID allocation and accounting is big part of what usdt_manager is
+ * about. This state has to be maintained per-BPF object and coordinate
+ * between different USDT attachments within the same BPF object.
+ *
+ * Spec ID is the key in spec BPF map, value is the actual USDT spec layed out
+ * as struct usdt_spec. Each invocation of BPF program at runtime needs to
+ * know its associated spec ID. It gets it either through BPF cookie, which
+ * libbpf sets to spec ID during attach time, or, if kernel is too old to
+ * support BPF cookie, through IP-to-spec-ID map that libbpf maintains in such
+ * case. The latter means that some modes of operation can't be supported
+ * without BPF cookie. Such mode is attaching to shared library "generically",
+ * without specifying target process. In such case, it's impossible to
+ * calculate absolute IP addresses for IP-to-spec-ID map, and thus such mode
+ * is not supported without BPF cookie support.
+ *
+ * Note that libbpf is using BPF cookie functionality for its own internal
+ * needs, so user itself can't rely on BPF cookie feature. To that end, libbpf
+ * provides conceptually equivalent USDT cookie support. It's still u64
+ * user-provided value that can be associated with USDT attachment. Note that
+ * this will be the same value for all USDT call sites within the same single
+ * *logical* USDT attachment. This makes sense because to user attaching to
+ * USDT is a single BPF program triggered for singular USDT probe. The fact
+ * that this is done at multiple actual locations is a mostly hidden
+ * implementation details. This USDT cookie value can be fetched with
+ * bpf_usdt_cookie(ctx) API provided by usdt.bpf.h
+ *
+ * Lastly, while single USDT can have tons of USDT call sites, it doesn't
+ * necessarily have that many different USDT specs. It very well might be
+ * that 1000 USDT call sites only need 5 different USDT specs, because all the
+ * arguments are typically contained in a small set of registers or stack
+ * locations. As such, it's wasteful to allocate as many USDT spec IDs as
+ * there are USDT call sites. So libbpf tries to be frugal and performs
+ * on-the-fly deduplication during a single USDT attachment to only allocate
+ * the minimal required amount of unique USDT specs (and thus spec IDs). This
+ * is trivially achieved by using USDT spec string (Arguments string from USDT
+ * note) as a lookup key in a hashmap. USDT spec string uniquely defines
+ * everything about how to fetch USDT arguments, so two USDT call sites
+ * sharing USDT spec string can safely share the same USDT spec and spec ID.
+ * Note, this spec string deduplication is happening only during the same USDT
+ * attachment, so each USDT spec shares the same USDT cookie value. This is
+ * not generally true for other USDT attachments within the same BPF object,
+ * as even if USDT spec string is the same, USDT cookie value can be
+ * different. It was deemed excessive to try to deduplicate across independent
+ * USDT attachments by taking into account USDT spec string *and* USDT cookie
+ * value, which would complicated spec ID accounting significantly for little
+ * gain.
+ */
+
+#define USDT_BASE_SEC ".stapsdt.base"
+#define USDT_SEMA_SEC ".probes"
+#define USDT_NOTE_SEC ".note.stapsdt"
+#define USDT_NOTE_TYPE 3
+#define USDT_NOTE_NAME "stapsdt"
+
+/* should match exactly enum __bpf_usdt_arg_type from usdt.bpf.h */
+enum usdt_arg_type {
+ USDT_ARG_CONST,
+ USDT_ARG_REG,
+ USDT_ARG_REG_DEREF,
+};
+
+/* should match exactly struct __bpf_usdt_arg_spec from usdt.bpf.h */
+struct usdt_arg_spec {
+ __u64 val_off;
+ enum usdt_arg_type arg_type;
+ short reg_off;
+ bool arg_signed;
+ char arg_bitshift;
+};
+
+/* should match BPF_USDT_MAX_ARG_CNT in usdt.bpf.h */
+#define USDT_MAX_ARG_CNT 12
+
+/* should match struct __bpf_usdt_spec from usdt.bpf.h */
+struct usdt_spec {
+ struct usdt_arg_spec args[USDT_MAX_ARG_CNT];
+ __u64 usdt_cookie;
+ short arg_cnt;
+};
+
+struct usdt_note {
+ const char *provider;
+ const char *name;
+ /* USDT args specification string, e.g.:
+ * "-4@%esi -4@-24(%rbp) -4@%ecx 2@%ax 8@%rdx"
+ */
+ const char *args;
+ long loc_addr;
+ long base_addr;
+ long sema_addr;
+};
+
+struct usdt_target {
+ long abs_ip;
+ long rel_ip;
+ long sema_off;
+ struct usdt_spec spec;
+ const char *spec_str;
+};
+
+struct usdt_manager {
+ struct bpf_map *specs_map;
+ struct bpf_map *ip_to_spec_id_map;
+
+ int *free_spec_ids;
+ size_t free_spec_cnt;
+ size_t next_free_spec_id;
+
+ bool has_bpf_cookie;
+ bool has_sema_refcnt;
+};
+
+struct usdt_manager *usdt_manager_new(struct bpf_object *obj)
+{
+ static const char *ref_ctr_sysfs_path = "/sys/bus/event_source/devices/uprobe/format/ref_ctr_offset";
+ struct usdt_manager *man;
+ struct bpf_map *specs_map, *ip_to_spec_id_map;
+
+ specs_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_specs");
+ ip_to_spec_id_map = bpf_object__find_map_by_name(obj, "__bpf_usdt_ip_to_spec_id");
+ if (!specs_map || !ip_to_spec_id_map) {
+ pr_warn("usdt: failed to find USDT support BPF maps, did you forget to include bpf/usdt.bpf.h?\n");
+ return ERR_PTR(-ESRCH);
+ }
+
+ man = calloc(1, sizeof(*man));
+ if (!man)
+ return ERR_PTR(-ENOMEM);
+
+ man->specs_map = specs_map;
+ man->ip_to_spec_id_map = ip_to_spec_id_map;
+
+ /* Detect if BPF cookie is supported for kprobes.
+ * We don't need IP-to-ID mapping if we can use BPF cookies.
+ * Added in: 7adfc6c9b315 ("bpf: Add bpf_get_attach_cookie() BPF helper to access bpf_cookie value")
+ */
+ man->has_bpf_cookie = kernel_supports(obj, FEAT_BPF_COOKIE);
+
+ /* Detect kernel support for automatic refcounting of USDT semaphore.
+ * If this is not supported, USDTs with semaphores will not be supported.
+ * Added in: a6ca88b241d5 ("trace_uprobe: support reference counter in fd-based uprobe")
+ */
+ man->has_sema_refcnt = faccessat(AT_FDCWD, ref_ctr_sysfs_path, F_OK, AT_EACCESS) == 0;
+
+ return man;
+}
+
+void usdt_manager_free(struct usdt_manager *man)
+{
+ if (IS_ERR_OR_NULL(man))
+ return;
+
+ free(man->free_spec_ids);
+ free(man);
+}
+
+static int sanity_check_usdt_elf(Elf *elf, const char *path)
+{
+ GElf_Ehdr ehdr;
+ int endianness;
+
+ if (elf_kind(elf) != ELF_K_ELF) {
+ pr_warn("usdt: unrecognized ELF kind %d for '%s'\n", elf_kind(elf), path);
+ return -EBADF;
+ }
+
+ switch (gelf_getclass(elf)) {
+ case ELFCLASS64:
+ if (sizeof(void *) != 8) {
+ pr_warn("usdt: attaching to 64-bit ELF binary '%s' is not supported\n", path);
+ return -EBADF;
+ }
+ break;
+ case ELFCLASS32:
+ if (sizeof(void *) != 4) {
+ pr_warn("usdt: attaching to 32-bit ELF binary '%s' is not supported\n", path);
+ return -EBADF;
+ }
+ break;
+ default:
+ pr_warn("usdt: unsupported ELF class for '%s'\n", path);
+ return -EBADF;
+ }
+
+ if (!gelf_getehdr(elf, &ehdr))
+ return -EINVAL;
+
+ if (ehdr.e_type != ET_EXEC && ehdr.e_type != ET_DYN) {
+ pr_warn("usdt: unsupported type of ELF binary '%s' (%d), only ET_EXEC and ET_DYN are supported\n",
+ path, ehdr.e_type);
+ return -EBADF;
+ }
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ endianness = ELFDATA2LSB;
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ endianness = ELFDATA2MSB;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+ if (endianness != ehdr.e_ident[EI_DATA]) {
+ pr_warn("usdt: ELF endianness mismatch for '%s'\n", path);
+ return -EBADF;
+ }
+
+ return 0;
+}
+
+static int find_elf_sec_by_name(Elf *elf, const char *sec_name, GElf_Shdr *shdr, Elf_Scn **scn)
+{
+ Elf_Scn *sec = NULL;
+ size_t shstrndx;
+
+ if (elf_getshdrstrndx(elf, &shstrndx))
+ return -EINVAL;
+
+ /* check if ELF is corrupted and avoid calling elf_strptr if yes */
+ if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL))
+ return -EINVAL;
+
+ while ((sec = elf_nextscn(elf, sec)) != NULL) {
+ char *name;
+
+ if (!gelf_getshdr(sec, shdr))
+ return -EINVAL;
+
+ name = elf_strptr(elf, shstrndx, shdr->sh_name);
+ if (name && strcmp(sec_name, name) == 0) {
+ *scn = sec;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+struct elf_seg {
+ long start;
+ long end;
+ long offset;
+ bool is_exec;
+};
+
+static int cmp_elf_segs(const void *_a, const void *_b)
+{
+ const struct elf_seg *a = _a;
+ const struct elf_seg *b = _b;
+
+ return a->start < b->start ? -1 : 1;
+}
+
+static int parse_elf_segs(Elf *elf, const char *path, struct elf_seg **segs, size_t *seg_cnt)
+{
+ GElf_Phdr phdr;
+ size_t n;
+ int i, err;
+ struct elf_seg *seg;
+ void *tmp;
+
+ *seg_cnt = 0;
+
+ if (elf_getphdrnum(elf, &n)) {
+ err = -errno;
+ return err;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (!gelf_getphdr(elf, i, &phdr)) {
+ err = -errno;
+ return err;
+ }
+
+ pr_debug("usdt: discovered PHDR #%d in '%s': vaddr 0x%lx memsz 0x%lx offset 0x%lx type 0x%lx flags 0x%lx\n",
+ i, path, (long)phdr.p_vaddr, (long)phdr.p_memsz, (long)phdr.p_offset,
+ (long)phdr.p_type, (long)phdr.p_flags);
+ if (phdr.p_type != PT_LOAD)
+ continue;
+
+ tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
+ if (!tmp)
+ return -ENOMEM;
+
+ *segs = tmp;
+ seg = *segs + *seg_cnt;
+ (*seg_cnt)++;
+
+ seg->start = phdr.p_vaddr;
+ seg->end = phdr.p_vaddr + phdr.p_memsz;
+ seg->offset = phdr.p_offset;
+ seg->is_exec = phdr.p_flags & PF_X;
+ }
+
+ if (*seg_cnt == 0) {
+ pr_warn("usdt: failed to find PT_LOAD program headers in '%s'\n", path);
+ return -ESRCH;
+ }
+
+ qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
+ return 0;
+}
+
+static int parse_vma_segs(int pid, const char *lib_path, struct elf_seg **segs, size_t *seg_cnt)
+{
+ char path[PATH_MAX], line[PATH_MAX], mode[16];
+ size_t seg_start, seg_end, seg_off;
+ struct elf_seg *seg;
+ int tmp_pid, i, err;
+ FILE *f;
+
+ *seg_cnt = 0;
+
+ /* Handle containerized binaries only accessible from
+ * /proc/<pid>/root/<path>. They will be reported as just /<path> in
+ * /proc/<pid>/maps.
+ */
+ if (sscanf(lib_path, "/proc/%d/root%s", &tmp_pid, path) == 2 && pid == tmp_pid)
+ goto proceed;
+
+ if (!realpath(lib_path, path)) {
+ pr_warn("usdt: failed to get absolute path of '%s' (err %d), using path as is...\n",
+ lib_path, -errno);
+ libbpf_strlcpy(path, lib_path, sizeof(path));
+ }
+
+proceed:
+ sprintf(line, "/proc/%d/maps", pid);
+ f = fopen(line, "r");
+ if (!f) {
+ err = -errno;
+ pr_warn("usdt: failed to open '%s' to get base addr of '%s': %d\n",
+ line, lib_path, err);
+ return err;
+ }
+
+ /* We need to handle lines with no path at the end:
+ *
+ * 7f5c6f5d1000-7f5c6f5d3000 rw-p 001c7000 08:04 21238613 /usr/lib64/libc-2.17.so
+ * 7f5c6f5d3000-7f5c6f5d8000 rw-p 00000000 00:00 0
+ * 7f5c6f5d8000-7f5c6f5d9000 r-xp 00000000 103:01 362990598 /data/users/andriin/linux/tools/bpf/usdt/libhello_usdt.so
+ */
+ while (fscanf(f, "%zx-%zx %s %zx %*s %*d%[^\n]\n",
+ &seg_start, &seg_end, mode, &seg_off, line) == 5) {
+ void *tmp;
+
+ /* to handle no path case (see above) we need to capture line
+ * without skipping any whitespaces. So we need to strip
+ * leading whitespaces manually here
+ */
+ i = 0;
+ while (isblank(line[i]))
+ i++;
+ if (strcmp(line + i, path) != 0)
+ continue;
+
+ pr_debug("usdt: discovered segment for lib '%s': addrs %zx-%zx mode %s offset %zx\n",
+ path, seg_start, seg_end, mode, seg_off);
+
+ /* ignore non-executable sections for shared libs */
+ if (mode[2] != 'x')
+ continue;
+
+ tmp = libbpf_reallocarray(*segs, *seg_cnt + 1, sizeof(**segs));
+ if (!tmp) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ *segs = tmp;
+ seg = *segs + *seg_cnt;
+ *seg_cnt += 1;
+
+ seg->start = seg_start;
+ seg->end = seg_end;
+ seg->offset = seg_off;
+ seg->is_exec = true;
+ }
+
+ if (*seg_cnt == 0) {
+ pr_warn("usdt: failed to find '%s' (resolved to '%s') within PID %d memory mappings\n",
+ lib_path, path, pid);
+ err = -ESRCH;
+ goto err_out;
+ }
+
+ qsort(*segs, *seg_cnt, sizeof(**segs), cmp_elf_segs);
+ err = 0;
+err_out:
+ fclose(f);
+ return err;
+}
+
+static struct elf_seg *find_elf_seg(struct elf_seg *segs, size_t seg_cnt, long virtaddr)
+{
+ struct elf_seg *seg;
+ int i;
+
+ /* for ELF binaries (both executables and shared libraries), we are
+ * given virtual address (absolute for executables, relative for
+ * libraries) which should match address range of [seg_start, seg_end)
+ */
+ for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
+ if (seg->start <= virtaddr && virtaddr < seg->end)
+ return seg;
+ }
+ return NULL;
+}
+
+static struct elf_seg *find_vma_seg(struct elf_seg *segs, size_t seg_cnt, long offset)
+{
+ struct elf_seg *seg;
+ int i;
+
+ /* for VMA segments from /proc/<pid>/maps file, provided "address" is
+ * actually a file offset, so should be fall within logical
+ * offset-based range of [offset_start, offset_end)
+ */
+ for (i = 0, seg = segs; i < seg_cnt; i++, seg++) {
+ if (seg->offset <= offset && offset < seg->offset + (seg->end - seg->start))
+ return seg;
+ }
+ return NULL;
+}
+
+static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
+ const char *data, size_t name_off, size_t desc_off,
+ struct usdt_note *usdt_note);
+
+static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
+
+static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid,
+ const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
+ struct usdt_target **out_targets, size_t *out_target_cnt)
+{
+ size_t off, name_off, desc_off, seg_cnt = 0, vma_seg_cnt = 0, target_cnt = 0;
+ struct elf_seg *segs = NULL, *vma_segs = NULL;
+ struct usdt_target *targets = NULL, *target;
+ long base_addr = 0;
+ Elf_Scn *notes_scn, *base_scn;
+ GElf_Shdr base_shdr, notes_shdr;
+ GElf_Ehdr ehdr;
+ GElf_Nhdr nhdr;
+ Elf_Data *data;
+ int err;
+
+ *out_targets = NULL;
+ *out_target_cnt = 0;
+
+ err = find_elf_sec_by_name(elf, USDT_NOTE_SEC, &notes_shdr, &notes_scn);
+ if (err) {
+ pr_warn("usdt: no USDT notes section (%s) found in '%s'\n", USDT_NOTE_SEC, path);
+ return err;
+ }
+
+ if (notes_shdr.sh_type != SHT_NOTE || !gelf_getehdr(elf, &ehdr)) {
+ pr_warn("usdt: invalid USDT notes section (%s) in '%s'\n", USDT_NOTE_SEC, path);
+ return -EINVAL;
+ }
+
+ err = parse_elf_segs(elf, path, &segs, &seg_cnt);
+ if (err) {
+ pr_warn("usdt: failed to process ELF program segments for '%s': %d\n", path, err);
+ goto err_out;
+ }
+
+ /* .stapsdt.base ELF section is optional, but is used for prelink
+ * offset compensation (see a big comment further below)
+ */
+ if (find_elf_sec_by_name(elf, USDT_BASE_SEC, &base_shdr, &base_scn) == 0)
+ base_addr = base_shdr.sh_addr;
+
+ data = elf_getdata(notes_scn, 0);
+ off = 0;
+ while ((off = gelf_getnote(data, off, &nhdr, &name_off, &desc_off)) > 0) {
+ long usdt_abs_ip, usdt_rel_ip, usdt_sema_off = 0;
+ struct usdt_note note;
+ struct elf_seg *seg = NULL;
+ void *tmp;
+
+ err = parse_usdt_note(elf, path, &nhdr, data->d_buf, name_off, desc_off, &note);
+ if (err)
+ goto err_out;
+
+ if (strcmp(note.provider, usdt_provider) != 0 || strcmp(note.name, usdt_name) != 0)
+ continue;
+
+ /* We need to compensate "prelink effect". See [0] for details,
+ * relevant parts quoted here:
+ *
+ * Each SDT probe also expands into a non-allocated ELF note. You can
+ * find this by looking at SHT_NOTE sections and decoding the format;
+ * see below for details. Because the note is non-allocated, it means
+ * there is no runtime cost, and also preserved in both stripped files
+ * and .debug files.
+ *
+ * However, this means that prelink won't adjust the note's contents
+ * for address offsets. Instead, this is done via the .stapsdt.base
+ * section. This is a special section that is added to the text. We
+ * will only ever have one of these sections in a final link and it
+ * will only ever be one byte long. Nothing about this section itself
+ * matters, we just use it as a marker to detect prelink address
+ * adjustments.
+ *
+ * Each probe note records the link-time address of the .stapsdt.base
+ * section alongside the probe PC address. The decoder compares the
+ * base address stored in the note with the .stapsdt.base section's
+ * sh_addr. Initially these are the same, but the section header will
+ * be adjusted by prelink. So the decoder applies the difference to
+ * the probe PC address to get the correct prelinked PC address; the
+ * same adjustment is applied to the semaphore address, if any.
+ *
+ * [0] https://sourceware.org/systemtap/wiki/UserSpaceProbeImplementation
+ */
+ usdt_abs_ip = note.loc_addr;
+ if (base_addr)
+ usdt_abs_ip += base_addr - note.base_addr;
+
+ /* When attaching uprobes (which is what USDTs basically are)
+ * kernel expects file offset to be specified, not a relative
+ * virtual address, so we need to translate virtual address to
+ * file offset, for both ET_EXEC and ET_DYN binaries.
+ */
+ seg = find_elf_seg(segs, seg_cnt, usdt_abs_ip);
+ if (!seg) {
+ err = -ESRCH;
+ pr_warn("usdt: failed to find ELF program segment for '%s:%s' in '%s' at IP 0x%lx\n",
+ usdt_provider, usdt_name, path, usdt_abs_ip);
+ goto err_out;
+ }
+ if (!seg->is_exec) {
+ err = -ESRCH;
+ pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx) for '%s:%s' at IP 0x%lx is not executable\n",
+ path, seg->start, seg->end, usdt_provider, usdt_name,
+ usdt_abs_ip);
+ goto err_out;
+ }
+ /* translate from virtual address to file offset */
+ usdt_rel_ip = usdt_abs_ip - seg->start + seg->offset;
+
+ if (ehdr.e_type == ET_DYN && !man->has_bpf_cookie) {
+ /* If we don't have BPF cookie support but need to
+ * attach to a shared library, we'll need to know and
+ * record absolute addresses of attach points due to
+ * the need to lookup USDT spec by absolute IP of
+ * triggered uprobe. Doing this resolution is only
+ * possible when we have a specific PID of the process
+ * that's using specified shared library. BPF cookie
+ * removes the absolute address limitation as we don't
+ * need to do this lookup (we just use BPF cookie as
+ * an index of USDT spec), so for newer kernels with
+ * BPF cookie support libbpf supports USDT attachment
+ * to shared libraries with no PID filter.
+ */
+ if (pid < 0) {
+ pr_warn("usdt: attaching to shared libraries without specific PID is not supported on current kernel\n");
+ err = -ENOTSUP;
+ goto err_out;
+ }
+
+ /* vma_segs are lazily initialized only if necessary */
+ if (vma_seg_cnt == 0) {
+ err = parse_vma_segs(pid, path, &vma_segs, &vma_seg_cnt);
+ if (err) {
+ pr_warn("usdt: failed to get memory segments in PID %d for shared library '%s': %d\n",
+ pid, path, err);
+ goto err_out;
+ }
+ }
+
+ seg = find_vma_seg(vma_segs, vma_seg_cnt, usdt_rel_ip);
+ if (!seg) {
+ err = -ESRCH;
+ pr_warn("usdt: failed to find shared lib memory segment for '%s:%s' in '%s' at relative IP 0x%lx\n",
+ usdt_provider, usdt_name, path, usdt_rel_ip);
+ goto err_out;
+ }
+
+ usdt_abs_ip = seg->start - seg->offset + usdt_rel_ip;
+ }
+
+ pr_debug("usdt: probe for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved abs_ip 0x%lx rel_ip 0x%lx) args '%s' in segment [0x%lx, 0x%lx) at offset 0x%lx\n",
+ usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ", path,
+ note.loc_addr, note.base_addr, usdt_abs_ip, usdt_rel_ip, note.args,
+ seg ? seg->start : 0, seg ? seg->end : 0, seg ? seg->offset : 0);
+
+ /* Adjust semaphore address to be a file offset */
+ if (note.sema_addr) {
+ if (!man->has_sema_refcnt) {
+ pr_warn("usdt: kernel doesn't support USDT semaphore refcounting for '%s:%s' in '%s'\n",
+ usdt_provider, usdt_name, path);
+ err = -ENOTSUP;
+ goto err_out;
+ }
+
+ seg = find_elf_seg(segs, seg_cnt, note.sema_addr);
+ if (!seg) {
+ err = -ESRCH;
+ pr_warn("usdt: failed to find ELF loadable segment with semaphore of '%s:%s' in '%s' at 0x%lx\n",
+ usdt_provider, usdt_name, path, note.sema_addr);
+ goto err_out;
+ }
+ if (seg->is_exec) {
+ err = -ESRCH;
+ pr_warn("usdt: matched ELF binary '%s' segment [0x%lx, 0x%lx] for semaphore of '%s:%s' at 0x%lx is executable\n",
+ path, seg->start, seg->end, usdt_provider, usdt_name,
+ note.sema_addr);
+ goto err_out;
+ }
+
+ usdt_sema_off = note.sema_addr - seg->start + seg->offset;
+
+ pr_debug("usdt: sema for '%s:%s' in %s '%s': addr 0x%lx base 0x%lx (resolved 0x%lx) in segment [0x%lx, 0x%lx] at offset 0x%lx\n",
+ usdt_provider, usdt_name, ehdr.e_type == ET_EXEC ? "exec" : "lib ",
+ path, note.sema_addr, note.base_addr, usdt_sema_off,
+ seg->start, seg->end, seg->offset);
+ }
+
+ /* Record adjusted addresses and offsets and parse USDT spec */
+ tmp = libbpf_reallocarray(targets, target_cnt + 1, sizeof(*targets));
+ if (!tmp) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ targets = tmp;
+
+ target = &targets[target_cnt];
+ memset(target, 0, sizeof(*target));
+
+ target->abs_ip = usdt_abs_ip;
+ target->rel_ip = usdt_rel_ip;
+ target->sema_off = usdt_sema_off;
+
+ /* notes.args references strings from ELF itself, so they can
+ * be referenced safely until elf_end() call
+ */
+ target->spec_str = note.args;
+
+ err = parse_usdt_spec(&target->spec, &note, usdt_cookie);
+ if (err)
+ goto err_out;
+
+ target_cnt++;
+ }
+
+ *out_targets = targets;
+ *out_target_cnt = target_cnt;
+ err = target_cnt;
+
+err_out:
+ free(segs);
+ free(vma_segs);
+ if (err < 0)
+ free(targets);
+ return err;
+}
+
+struct bpf_link_usdt {
+ struct bpf_link link;
+
+ struct usdt_manager *usdt_man;
+
+ size_t spec_cnt;
+ int *spec_ids;
+
+ size_t uprobe_cnt;
+ struct {
+ long abs_ip;
+ struct bpf_link *link;
+ } *uprobes;
+};
+
+static int bpf_link_usdt_detach(struct bpf_link *link)
+{
+ struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
+ struct usdt_manager *man = usdt_link->usdt_man;
+ int i;
+
+ for (i = 0; i < usdt_link->uprobe_cnt; i++) {
+ /* detach underlying uprobe link */
+ bpf_link__destroy(usdt_link->uprobes[i].link);
+ /* there is no need to update specs map because it will be
+ * unconditionally overwritten on subsequent USDT attaches,
+ * but if BPF cookies are not used we need to remove entry
+ * from ip_to_spec_id map, otherwise we'll run into false
+ * conflicting IP errors
+ */
+ if (!man->has_bpf_cookie) {
+ /* not much we can do about errors here */
+ (void)bpf_map_delete_elem(bpf_map__fd(man->ip_to_spec_id_map),
+ &usdt_link->uprobes[i].abs_ip);
+ }
+ }
+
+ /* try to return the list of previously used spec IDs to usdt_manager
+ * for future reuse for subsequent USDT attaches
+ */
+ if (!man->free_spec_ids) {
+ /* if there were no free spec IDs yet, just transfer our IDs */
+ man->free_spec_ids = usdt_link->spec_ids;
+ man->free_spec_cnt = usdt_link->spec_cnt;
+ usdt_link->spec_ids = NULL;
+ } else {
+ /* otherwise concat IDs */
+ size_t new_cnt = man->free_spec_cnt + usdt_link->spec_cnt;
+ int *new_free_ids;
+
+ new_free_ids = libbpf_reallocarray(man->free_spec_ids, new_cnt,
+ sizeof(*new_free_ids));
+ /* If we couldn't resize free_spec_ids, we'll just leak
+ * a bunch of free IDs; this is very unlikely to happen and if
+ * system is so exhausted on memory, it's the least of user's
+ * concerns, probably.
+ * So just do our best here to return those IDs to usdt_manager.
+ */
+ if (new_free_ids) {
+ memcpy(new_free_ids + man->free_spec_cnt, usdt_link->spec_ids,
+ usdt_link->spec_cnt * sizeof(*usdt_link->spec_ids));
+ man->free_spec_ids = new_free_ids;
+ man->free_spec_cnt = new_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void bpf_link_usdt_dealloc(struct bpf_link *link)
+{
+ struct bpf_link_usdt *usdt_link = container_of(link, struct bpf_link_usdt, link);
+
+ free(usdt_link->spec_ids);
+ free(usdt_link->uprobes);
+ free(usdt_link);
+}
+
+static size_t specs_hash_fn(long key, void *ctx)
+{
+ return str_hash((char *)key);
+}
+
+static bool specs_equal_fn(long key1, long key2, void *ctx)
+{
+ return strcmp((char *)key1, (char *)key2) == 0;
+}
+
+static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash,
+ struct bpf_link_usdt *link, struct usdt_target *target,
+ int *spec_id, bool *is_new)
+{
+ long tmp;
+ void *new_ids;
+ int err;
+
+ /* check if we already allocated spec ID for this spec string */
+ if (hashmap__find(specs_hash, target->spec_str, &tmp)) {
+ *spec_id = tmp;
+ *is_new = false;
+ return 0;
+ }
+
+ /* otherwise it's a new ID that needs to be set up in specs map and
+ * returned back to usdt_manager when USDT link is detached
+ */
+ new_ids = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids));
+ if (!new_ids)
+ return -ENOMEM;
+ link->spec_ids = new_ids;
+
+ /* get next free spec ID, giving preference to free list, if not empty */
+ if (man->free_spec_cnt) {
+ *spec_id = man->free_spec_ids[man->free_spec_cnt - 1];
+
+ /* cache spec ID for current spec string for future lookups */
+ err = hashmap__add(specs_hash, target->spec_str, *spec_id);
+ if (err)
+ return err;
+
+ man->free_spec_cnt--;
+ } else {
+ /* don't allocate spec ID bigger than what fits in specs map */
+ if (man->next_free_spec_id >= bpf_map__max_entries(man->specs_map))
+ return -E2BIG;
+
+ *spec_id = man->next_free_spec_id;
+
+ /* cache spec ID for current spec string for future lookups */
+ err = hashmap__add(specs_hash, target->spec_str, *spec_id);
+ if (err)
+ return err;
+
+ man->next_free_spec_id++;
+ }
+
+ /* remember new spec ID in the link for later return back to free list on detach */
+ link->spec_ids[link->spec_cnt] = *spec_id;
+ link->spec_cnt++;
+ *is_new = true;
+ return 0;
+}
+
+struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog,
+ pid_t pid, const char *path,
+ const char *usdt_provider, const char *usdt_name,
+ __u64 usdt_cookie)
+{
+ int i, fd, err, spec_map_fd, ip_map_fd;
+ LIBBPF_OPTS(bpf_uprobe_opts, opts);
+ struct hashmap *specs_hash = NULL;
+ struct bpf_link_usdt *link = NULL;
+ struct usdt_target *targets = NULL;
+ size_t target_cnt;
+ Elf *elf;
+
+ spec_map_fd = bpf_map__fd(man->specs_map);
+ ip_map_fd = bpf_map__fd(man->ip_to_spec_id_map);
+
+ /* TODO: perform path resolution similar to uprobe's */
+ fd = open(path, O_RDONLY);
+ if (fd < 0) {
+ err = -errno;
+ pr_warn("usdt: failed to open ELF binary '%s': %d\n", path, err);
+ return libbpf_err_ptr(err);
+ }
+
+ elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
+ if (!elf) {
+ err = -EBADF;
+ pr_warn("usdt: failed to parse ELF binary '%s': %s\n", path, elf_errmsg(-1));
+ goto err_out;
+ }
+
+ err = sanity_check_usdt_elf(elf, path);
+ if (err)
+ goto err_out;
+
+ /* normalize PID filter */
+ if (pid < 0)
+ pid = -1;
+ else if (pid == 0)
+ pid = getpid();
+
+ /* discover USDT in given binary, optionally limiting
+ * activations to a given PID, if pid > 0
+ */
+ err = collect_usdt_targets(man, elf, path, pid, usdt_provider, usdt_name,
+ usdt_cookie, &targets, &target_cnt);
+ if (err <= 0) {
+ err = (err == 0) ? -ENOENT : err;
+ goto err_out;
+ }
+
+ specs_hash = hashmap__new(specs_hash_fn, specs_equal_fn, NULL);
+ if (IS_ERR(specs_hash)) {
+ err = PTR_ERR(specs_hash);
+ goto err_out;
+ }
+
+ link = calloc(1, sizeof(*link));
+ if (!link) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ link->usdt_man = man;
+ link->link.detach = &bpf_link_usdt_detach;
+ link->link.dealloc = &bpf_link_usdt_dealloc;
+
+ link->uprobes = calloc(target_cnt, sizeof(*link->uprobes));
+ if (!link->uprobes) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ for (i = 0; i < target_cnt; i++) {
+ struct usdt_target *target = &targets[i];
+ struct bpf_link *uprobe_link;
+ bool is_new;
+ int spec_id;
+
+ /* Spec ID can be either reused or newly allocated. If it is
+ * newly allocated, we'll need to fill out spec map, otherwise
+ * entire spec should be valid and can be just used by a new
+ * uprobe. We reuse spec when USDT arg spec is identical. We
+ * also never share specs between two different USDT
+ * attachments ("links"), so all the reused specs already
+ * share USDT cookie value implicitly.
+ */
+ err = allocate_spec_id(man, specs_hash, link, target, &spec_id, &is_new);
+ if (err)
+ goto err_out;
+
+ if (is_new && bpf_map_update_elem(spec_map_fd, &spec_id, &target->spec, BPF_ANY)) {
+ err = -errno;
+ pr_warn("usdt: failed to set USDT spec #%d for '%s:%s' in '%s': %d\n",
+ spec_id, usdt_provider, usdt_name, path, err);
+ goto err_out;
+ }
+ if (!man->has_bpf_cookie &&
+ bpf_map_update_elem(ip_map_fd, &target->abs_ip, &spec_id, BPF_NOEXIST)) {
+ err = -errno;
+ if (err == -EEXIST) {
+ pr_warn("usdt: IP collision detected for spec #%d for '%s:%s' in '%s'\n",
+ spec_id, usdt_provider, usdt_name, path);
+ } else {
+ pr_warn("usdt: failed to map IP 0x%lx to spec #%d for '%s:%s' in '%s': %d\n",
+ target->abs_ip, spec_id, usdt_provider, usdt_name,
+ path, err);
+ }
+ goto err_out;
+ }
+
+ opts.ref_ctr_offset = target->sema_off;
+ opts.bpf_cookie = man->has_bpf_cookie ? spec_id : 0;
+ uprobe_link = bpf_program__attach_uprobe_opts(prog, pid, path,
+ target->rel_ip, &opts);
+ err = libbpf_get_error(uprobe_link);
+ if (err) {
+ pr_warn("usdt: failed to attach uprobe #%d for '%s:%s' in '%s': %d\n",
+ i, usdt_provider, usdt_name, path, err);
+ goto err_out;
+ }
+
+ link->uprobes[i].link = uprobe_link;
+ link->uprobes[i].abs_ip = target->abs_ip;
+ link->uprobe_cnt++;
+ }
+
+ free(targets);
+ hashmap__free(specs_hash);
+ elf_end(elf);
+ close(fd);
+
+ return &link->link;
+
+err_out:
+ if (link)
+ bpf_link__destroy(&link->link);
+ free(targets);
+ hashmap__free(specs_hash);
+ if (elf)
+ elf_end(elf);
+ close(fd);
+ return libbpf_err_ptr(err);
+}
+
+/* Parse out USDT ELF note from '.note.stapsdt' section.
+ * Logic inspired by perf's code.
+ */
+static int parse_usdt_note(Elf *elf, const char *path, GElf_Nhdr *nhdr,
+ const char *data, size_t name_off, size_t desc_off,
+ struct usdt_note *note)
+{
+ const char *provider, *name, *args;
+ long addrs[3];
+ size_t len;
+
+ /* sanity check USDT note name and type first */
+ if (strncmp(data + name_off, USDT_NOTE_NAME, nhdr->n_namesz) != 0)
+ return -EINVAL;
+ if (nhdr->n_type != USDT_NOTE_TYPE)
+ return -EINVAL;
+
+ /* sanity check USDT note contents ("description" in ELF terminology) */
+ len = nhdr->n_descsz;
+ data = data + desc_off;
+
+ /* +3 is the very minimum required to store three empty strings */
+ if (len < sizeof(addrs) + 3)
+ return -EINVAL;
+
+ /* get location, base, and semaphore addrs */
+ memcpy(&addrs, data, sizeof(addrs));
+
+ /* parse string fields: provider, name, args */
+ provider = data + sizeof(addrs);
+
+ name = (const char *)memchr(provider, '\0', data + len - provider);
+ if (!name) /* non-zero-terminated provider */
+ return -EINVAL;
+ name++;
+ if (name >= data + len || *name == '\0') /* missing or empty name */
+ return -EINVAL;
+
+ args = memchr(name, '\0', data + len - name);
+ if (!args) /* non-zero-terminated name */
+ return -EINVAL;
+ ++args;
+ if (args >= data + len) /* missing arguments spec */
+ return -EINVAL;
+
+ note->provider = provider;
+ note->name = name;
+ if (*args == '\0' || *args == ':')
+ note->args = "";
+ else
+ note->args = args;
+ note->loc_addr = addrs[0];
+ note->base_addr = addrs[1];
+ note->sema_addr = addrs[2];
+
+ return 0;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz);
+
+static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
+{
+ struct usdt_arg_spec *arg;
+ const char *s;
+ int arg_sz, len;
+
+ spec->usdt_cookie = usdt_cookie;
+ spec->arg_cnt = 0;
+
+ s = note->args;
+ while (s[0]) {
+ if (spec->arg_cnt >= USDT_MAX_ARG_CNT) {
+ pr_warn("usdt: too many USDT arguments (> %d) for '%s:%s' with args spec '%s'\n",
+ USDT_MAX_ARG_CNT, note->provider, note->name, note->args);
+ return -E2BIG;
+ }
+
+ arg = &spec->args[spec->arg_cnt];
+ len = parse_usdt_arg(s, spec->arg_cnt, arg, &arg_sz);
+ if (len < 0)
+ return len;
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ spec->arg_cnt, s, arg_sz);
+ return -EINVAL;
+ }
+
+ s += len;
+ spec->arg_cnt++;
+ }
+
+ return 0;
+}
+
+/* Architecture-specific logic for parsing USDT argument location specs */
+
+#if defined(__x86_64__) || defined(__i386__)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ static struct {
+ const char *names[4];
+ size_t pt_regs_off;
+ } reg_map[] = {
+#ifdef __x86_64__
+#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg64)
+#else
+#define reg_off(reg64, reg32) offsetof(struct pt_regs, reg32)
+#endif
+ { {"rip", "eip", "", ""}, reg_off(rip, eip) },
+ { {"rax", "eax", "ax", "al"}, reg_off(rax, eax) },
+ { {"rbx", "ebx", "bx", "bl"}, reg_off(rbx, ebx) },
+ { {"rcx", "ecx", "cx", "cl"}, reg_off(rcx, ecx) },
+ { {"rdx", "edx", "dx", "dl"}, reg_off(rdx, edx) },
+ { {"rsi", "esi", "si", "sil"}, reg_off(rsi, esi) },
+ { {"rdi", "edi", "di", "dil"}, reg_off(rdi, edi) },
+ { {"rbp", "ebp", "bp", "bpl"}, reg_off(rbp, ebp) },
+ { {"rsp", "esp", "sp", "spl"}, reg_off(rsp, esp) },
+#undef reg_off
+#ifdef __x86_64__
+ { {"r8", "r8d", "r8w", "r8b"}, offsetof(struct pt_regs, r8) },
+ { {"r9", "r9d", "r9w", "r9b"}, offsetof(struct pt_regs, r9) },
+ { {"r10", "r10d", "r10w", "r10b"}, offsetof(struct pt_regs, r10) },
+ { {"r11", "r11d", "r11w", "r11b"}, offsetof(struct pt_regs, r11) },
+ { {"r12", "r12d", "r12w", "r12b"}, offsetof(struct pt_regs, r12) },
+ { {"r13", "r13d", "r13w", "r13b"}, offsetof(struct pt_regs, r13) },
+ { {"r14", "r14d", "r14w", "r14b"}, offsetof(struct pt_regs, r14) },
+ { {"r15", "r15d", "r15w", "r15b"}, offsetof(struct pt_regs, r15) },
+#endif
+ };
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
+ for (j = 0; j < ARRAY_SIZE(reg_map[i].names); j++) {
+ if (strcmp(reg_name, reg_map[i].names[j]) == 0)
+ return reg_map[i].pt_regs_off;
+ }
+ }
+
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
+{
+ char reg_name[16];
+ int len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %%%15[^)] ) %n", arg_sz, &off, reg_name, &len) == 3) {
+ /* Memory dereference case, e.g., -4@-20(%rbp) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ ( %%%15[^)] ) %n", arg_sz, reg_name, &len) == 2) {
+ /* Memory dereference case without offset, e.g., 8@(%rsp) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %%%15s %n", arg_sz, reg_name, &len) == 2) {
+ /* Register read case, e.g., -4@%eax */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ $%ld %n", arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@$71 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__s390x__)
+
+/* Do not support __s390__ for now, since user_pt_regs is broken with -m31. */
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
+{
+ unsigned int reg;
+ int len;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %%r%u ) %n", arg_sz, &off, &reg, &len) == 3) {
+ /* Memory dereference case, e.g., -2@-28(%r15) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ if (reg > 15) {
+ pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
+ return -EINVAL;
+ }
+ arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
+ } else if (sscanf(arg_str, " %d @ %%r%u %n", arg_sz, &reg, &len) == 2) {
+ /* Register read case, e.g., -8@%r0 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ if (reg > 15) {
+ pr_warn("usdt: unrecognized register '%%r%u'\n", reg);
+ return -EINVAL;
+ }
+ arg->reg_off = offsetof(user_pt_regs, gprs[reg]);
+ } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@71 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__aarch64__)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ int reg_num;
+
+ if (sscanf(reg_name, "x%d", &reg_num) == 1) {
+ if (reg_num >= 0 && reg_num < 31)
+ return offsetof(struct user_pt_regs, regs[reg_num]);
+ } else if (strcmp(reg_name, "sp") == 0) {
+ return offsetof(struct user_pt_regs, sp);
+ }
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
+{
+ char reg_name[16];
+ int len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , %ld ] %n", arg_sz, reg_name, &off, &len) == 3) {
+ /* Memory dereference case, e.g., -4@[sp, 96] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
+ /* Memory dereference case, e.g., -4@[sp] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@5 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@x4 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__riscv)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ static struct {
+ const char *name;
+ size_t pt_regs_off;
+ } reg_map[] = {
+ { "ra", offsetof(struct user_regs_struct, ra) },
+ { "sp", offsetof(struct user_regs_struct, sp) },
+ { "gp", offsetof(struct user_regs_struct, gp) },
+ { "tp", offsetof(struct user_regs_struct, tp) },
+ { "a0", offsetof(struct user_regs_struct, a0) },
+ { "a1", offsetof(struct user_regs_struct, a1) },
+ { "a2", offsetof(struct user_regs_struct, a2) },
+ { "a3", offsetof(struct user_regs_struct, a3) },
+ { "a4", offsetof(struct user_regs_struct, a4) },
+ { "a5", offsetof(struct user_regs_struct, a5) },
+ { "a6", offsetof(struct user_regs_struct, a6) },
+ { "a7", offsetof(struct user_regs_struct, a7) },
+ { "s0", offsetof(struct user_regs_struct, s0) },
+ { "s1", offsetof(struct user_regs_struct, s1) },
+ { "s2", offsetof(struct user_regs_struct, s2) },
+ { "s3", offsetof(struct user_regs_struct, s3) },
+ { "s4", offsetof(struct user_regs_struct, s4) },
+ { "s5", offsetof(struct user_regs_struct, s5) },
+ { "s6", offsetof(struct user_regs_struct, s6) },
+ { "s7", offsetof(struct user_regs_struct, s7) },
+ { "s8", offsetof(struct user_regs_struct, rv_s8) },
+ { "s9", offsetof(struct user_regs_struct, s9) },
+ { "s10", offsetof(struct user_regs_struct, s10) },
+ { "s11", offsetof(struct user_regs_struct, s11) },
+ { "t0", offsetof(struct user_regs_struct, t0) },
+ { "t1", offsetof(struct user_regs_struct, t1) },
+ { "t2", offsetof(struct user_regs_struct, t2) },
+ { "t3", offsetof(struct user_regs_struct, t3) },
+ { "t4", offsetof(struct user_regs_struct, t4) },
+ { "t5", offsetof(struct user_regs_struct, t5) },
+ { "t6", offsetof(struct user_regs_struct, t6) },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
+ if (strcmp(reg_name, reg_map[i].name) == 0)
+ return reg_map[i].pt_regs_off;
+ }
+
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
+{
+ char reg_name[16];
+ int len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %15[a-z0-9] ) %n", arg_sz, &off, reg_name, &len) == 3) {
+ /* Memory dereference case, e.g., -8@-88(s0) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %ld %n", arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@5 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@a1 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__arm__)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ static struct {
+ const char *name;
+ size_t pt_regs_off;
+ } reg_map[] = {
+ { "r0", offsetof(struct pt_regs, uregs[0]) },
+ { "r1", offsetof(struct pt_regs, uregs[1]) },
+ { "r2", offsetof(struct pt_regs, uregs[2]) },
+ { "r3", offsetof(struct pt_regs, uregs[3]) },
+ { "r4", offsetof(struct pt_regs, uregs[4]) },
+ { "r5", offsetof(struct pt_regs, uregs[5]) },
+ { "r6", offsetof(struct pt_regs, uregs[6]) },
+ { "r7", offsetof(struct pt_regs, uregs[7]) },
+ { "r8", offsetof(struct pt_regs, uregs[8]) },
+ { "r9", offsetof(struct pt_regs, uregs[9]) },
+ { "r10", offsetof(struct pt_regs, uregs[10]) },
+ { "fp", offsetof(struct pt_regs, uregs[11]) },
+ { "ip", offsetof(struct pt_regs, uregs[12]) },
+ { "sp", offsetof(struct pt_regs, uregs[13]) },
+ { "lr", offsetof(struct pt_regs, uregs[14]) },
+ { "pc", offsetof(struct pt_regs, uregs[15]) },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
+ if (strcmp(reg_name, reg_map[i].name) == 0)
+ return reg_map[i].pt_regs_off;
+ }
+
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
+{
+ char reg_name[16];
+ int len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] , #%ld ] %n",
+ arg_sz, reg_name, &off, &len) == 3) {
+ /* Memory dereference case, e.g., -4@[fp, #96] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ \[ %15[a-z0-9] ] %n", arg_sz, reg_name, &len) == 2) {
+ /* Memory dereference case, e.g., -4@[sp] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ #%ld %n", arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@#5 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else if (sscanf(arg_str, " %d @ %15[a-z0-9] %n", arg_sz, reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@r4 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#else
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg, int *arg_sz)
+{
+ pr_warn("usdt: libbpf doesn't support USDTs on current architecture\n");
+ return -ENOTSUP;
+}
+
+#endif
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
deleted file mode 100644
index f7f4efb70a4c..000000000000
--- a/tools/lib/bpf/xsk.c
+++ /dev/null
@@ -1,812 +0,0 @@
-// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-/*
- * AF_XDP user-space access library.
- *
- * Copyright(c) 2018 - 2019 Intel Corporation.
- *
- * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
- */
-
-#include <errno.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <arpa/inet.h>
-#include <asm/barrier.h>
-#include <linux/compiler.h>
-#include <linux/ethtool.h>
-#include <linux/filter.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-#include <linux/if_xdp.h>
-#include <linux/sockios.h>
-#include <net/if.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include "bpf.h"
-#include "libbpf.h"
-#include "libbpf_internal.h"
-#include "xsk.h"
-
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
-#ifndef SOL_XDP
- #define SOL_XDP 283
-#endif
-
-#ifndef AF_XDP
- #define AF_XDP 44
-#endif
-
-#ifndef PF_XDP
- #define PF_XDP AF_XDP
-#endif
-
-struct xsk_umem {
- struct xsk_ring_prod *fill;
- struct xsk_ring_cons *comp;
- char *umem_area;
- struct xsk_umem_config config;
- int fd;
- int refcount;
-};
-
-struct xsk_socket {
- struct xsk_ring_cons *rx;
- struct xsk_ring_prod *tx;
- __u64 outstanding_tx;
- struct xsk_umem *umem;
- struct xsk_socket_config config;
- int fd;
- int ifindex;
- int prog_fd;
- int xsks_map_fd;
- __u32 queue_id;
- char ifname[IFNAMSIZ];
-};
-
-struct xsk_nl_info {
- bool xdp_prog_attached;
- int ifindex;
- int fd;
-};
-
-/* Up until and including Linux 5.3 */
-struct xdp_ring_offset_v1 {
- __u64 producer;
- __u64 consumer;
- __u64 desc;
-};
-
-/* Up until and including Linux 5.3 */
-struct xdp_mmap_offsets_v1 {
- struct xdp_ring_offset_v1 rx;
- struct xdp_ring_offset_v1 tx;
- struct xdp_ring_offset_v1 fr;
- struct xdp_ring_offset_v1 cr;
-};
-
-int xsk_umem__fd(const struct xsk_umem *umem)
-{
- return umem ? umem->fd : -EINVAL;
-}
-
-int xsk_socket__fd(const struct xsk_socket *xsk)
-{
- return xsk ? xsk->fd : -EINVAL;
-}
-
-static bool xsk_page_aligned(void *buffer)
-{
- unsigned long addr = (unsigned long)buffer;
-
- return !(addr & (getpagesize() - 1));
-}
-
-static void xsk_set_umem_config(struct xsk_umem_config *cfg,
- const struct xsk_umem_config *usr_cfg)
-{
- if (!usr_cfg) {
- cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
- cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
- cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
- cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
- cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
- return;
- }
-
- cfg->fill_size = usr_cfg->fill_size;
- cfg->comp_size = usr_cfg->comp_size;
- cfg->frame_size = usr_cfg->frame_size;
- cfg->frame_headroom = usr_cfg->frame_headroom;
- cfg->flags = usr_cfg->flags;
-}
-
-static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
- const struct xsk_socket_config *usr_cfg)
-{
- if (!usr_cfg) {
- cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
- cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
- cfg->libbpf_flags = 0;
- cfg->xdp_flags = 0;
- cfg->bind_flags = 0;
- return 0;
- }
-
- if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
- return -EINVAL;
-
- cfg->rx_size = usr_cfg->rx_size;
- cfg->tx_size = usr_cfg->tx_size;
- cfg->libbpf_flags = usr_cfg->libbpf_flags;
- cfg->xdp_flags = usr_cfg->xdp_flags;
- cfg->bind_flags = usr_cfg->bind_flags;
-
- return 0;
-}
-
-static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
-{
- struct xdp_mmap_offsets_v1 off_v1;
-
- /* getsockopt on a kernel <= 5.3 has no flags fields.
- * Copy over the offsets to the correct places in the >=5.4 format
- * and put the flags where they would have been on that kernel.
- */
- memcpy(&off_v1, off, sizeof(off_v1));
-
- off->rx.producer = off_v1.rx.producer;
- off->rx.consumer = off_v1.rx.consumer;
- off->rx.desc = off_v1.rx.desc;
- off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
-
- off->tx.producer = off_v1.tx.producer;
- off->tx.consumer = off_v1.tx.consumer;
- off->tx.desc = off_v1.tx.desc;
- off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
-
- off->fr.producer = off_v1.fr.producer;
- off->fr.consumer = off_v1.fr.consumer;
- off->fr.desc = off_v1.fr.desc;
- off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
-
- off->cr.producer = off_v1.cr.producer;
- off->cr.consumer = off_v1.cr.consumer;
- off->cr.desc = off_v1.cr.desc;
- off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
-}
-
-static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
-{
- socklen_t optlen;
- int err;
-
- optlen = sizeof(*off);
- err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
- if (err)
- return err;
-
- if (optlen == sizeof(*off))
- return 0;
-
- if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
- xsk_mmap_offsets_v1(off);
- return 0;
- }
-
- return -EINVAL;
-}
-
-int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
- __u64 size, struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_umem_config *usr_config)
-{
- struct xdp_mmap_offsets off;
- struct xdp_umem_reg mr;
- struct xsk_umem *umem;
- void *map;
- int err;
-
- if (!umem_area || !umem_ptr || !fill || !comp)
- return -EFAULT;
- if (!size && !xsk_page_aligned(umem_area))
- return -EINVAL;
-
- umem = calloc(1, sizeof(*umem));
- if (!umem)
- return -ENOMEM;
-
- umem->fd = socket(AF_XDP, SOCK_RAW, 0);
- if (umem->fd < 0) {
- err = -errno;
- goto out_umem_alloc;
- }
-
- umem->umem_area = umem_area;
- xsk_set_umem_config(&umem->config, usr_config);
-
- memset(&mr, 0, sizeof(mr));
- mr.addr = (uintptr_t)umem_area;
- mr.len = size;
- mr.chunk_size = umem->config.frame_size;
- mr.headroom = umem->config.frame_headroom;
- mr.flags = umem->config.flags;
-
- err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
- if (err) {
- err = -errno;
- goto out_socket;
- }
- err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
- &umem->config.fill_size,
- sizeof(umem->config.fill_size));
- if (err) {
- err = -errno;
- goto out_socket;
- }
- err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
- &umem->config.comp_size,
- sizeof(umem->config.comp_size));
- if (err) {
- err = -errno;
- goto out_socket;
- }
-
- err = xsk_get_mmap_offsets(umem->fd, &off);
- if (err) {
- err = -errno;
- goto out_socket;
- }
-
- map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
- XDP_UMEM_PGOFF_FILL_RING);
- if (map == MAP_FAILED) {
- err = -errno;
- goto out_socket;
- }
-
- umem->fill = fill;
- fill->mask = umem->config.fill_size - 1;
- fill->size = umem->config.fill_size;
- fill->producer = map + off.fr.producer;
- fill->consumer = map + off.fr.consumer;
- fill->flags = map + off.fr.flags;
- fill->ring = map + off.fr.desc;
- fill->cached_prod = *fill->producer;
- /* cached_cons is "size" bigger than the real consumer pointer
- * See xsk_prod_nb_free
- */
- fill->cached_cons = *fill->consumer + umem->config.fill_size;
-
- map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
- XDP_UMEM_PGOFF_COMPLETION_RING);
- if (map == MAP_FAILED) {
- err = -errno;
- goto out_mmap;
- }
-
- umem->comp = comp;
- comp->mask = umem->config.comp_size - 1;
- comp->size = umem->config.comp_size;
- comp->producer = map + off.cr.producer;
- comp->consumer = map + off.cr.consumer;
- comp->flags = map + off.cr.flags;
- comp->ring = map + off.cr.desc;
- comp->cached_prod = *comp->producer;
- comp->cached_cons = *comp->consumer;
-
- *umem_ptr = umem;
- return 0;
-
-out_mmap:
- munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
-out_socket:
- close(umem->fd);
-out_umem_alloc:
- free(umem);
- return err;
-}
-
-struct xsk_umem_config_v1 {
- __u32 fill_size;
- __u32 comp_size;
- __u32 frame_size;
- __u32 frame_headroom;
-};
-
-int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
- __u64 size, struct xsk_ring_prod *fill,
- struct xsk_ring_cons *comp,
- const struct xsk_umem_config *usr_config)
-{
- struct xsk_umem_config config;
-
- memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
- config.flags = 0;
-
- return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
- &config);
-}
-COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
-DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
-
-static int xsk_load_xdp_prog(struct xsk_socket *xsk)
-{
- static const int log_buf_size = 16 * 1024;
- char log_buf[log_buf_size];
- int err, prog_fd;
-
- /* This is the C-program:
- * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
- * {
- * int ret, index = ctx->rx_queue_index;
- *
- * // A set entry here means that the correspnding queue_id
- * // has an active AF_XDP socket bound to it.
- * ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
- * if (ret > 0)
- * return ret;
- *
- * // Fallback for pre-5.3 kernels, not supporting default
- * // action in the flags parameter.
- * if (bpf_map_lookup_elem(&xsks_map, &index))
- * return bpf_redirect_map(&xsks_map, index, 0);
- * return XDP_PASS;
- * }
- */
- struct bpf_insn prog[] = {
- /* r2 = *(u32 *)(r1 + 16) */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
- /* *(u32 *)(r10 - 4) = r2 */
- BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
- /* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
- /* r3 = XDP_PASS */
- BPF_MOV64_IMM(BPF_REG_3, 2),
- /* call bpf_redirect_map */
- BPF_EMIT_CALL(BPF_FUNC_redirect_map),
- /* if w0 != 0 goto pc+13 */
- BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
- /* r2 = r10 */
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
- /* r2 += -4 */
- BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
- /* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
- /* call bpf_map_lookup_elem */
- BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- /* r1 = r0 */
- BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
- /* r0 = XDP_PASS */
- BPF_MOV64_IMM(BPF_REG_0, 2),
- /* if r1 == 0 goto pc+5 */
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
- /* r2 = *(u32 *)(r10 - 4) */
- BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
- /* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
- /* r3 = 0 */
- BPF_MOV64_IMM(BPF_REG_3, 0),
- /* call bpf_redirect_map */
- BPF_EMIT_CALL(BPF_FUNC_redirect_map),
- /* The jumps are to this instruction */
- BPF_EXIT_INSN(),
- };
- size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
-
- prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
- "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
- log_buf_size);
- if (prog_fd < 0) {
- pr_warn("BPF log buffer:\n%s", log_buf);
- return prog_fd;
- }
-
- err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
- if (err) {
- close(prog_fd);
- return err;
- }
-
- xsk->prog_fd = prog_fd;
- return 0;
-}
-
-static int xsk_get_max_queues(struct xsk_socket *xsk)
-{
- struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
- struct ifreq ifr = {};
- int fd, err, ret;
-
- fd = socket(AF_INET, SOCK_DGRAM, 0);
- if (fd < 0)
- return -errno;
-
- ifr.ifr_data = (void *)&channels;
- memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
- ifr.ifr_name[IFNAMSIZ - 1] = '\0';
- err = ioctl(fd, SIOCETHTOOL, &ifr);
- if (err && errno != EOPNOTSUPP) {
- ret = -errno;
- goto out;
- }
-
- if (err) {
- /* If the device says it has no channels, then all traffic
- * is sent to a single stream, so max queues = 1.
- */
- ret = 1;
- } else {
- /* Take the max of rx, tx, combined. Drivers return
- * the number of channels in different ways.
- */
- ret = max(channels.max_rx, channels.max_tx);
- ret = max(ret, (int)channels.max_combined);
- }
-
-out:
- close(fd);
- return ret;
-}
-
-static int xsk_create_bpf_maps(struct xsk_socket *xsk)
-{
- int max_queues;
- int fd;
-
- max_queues = xsk_get_max_queues(xsk);
- if (max_queues < 0)
- return max_queues;
-
- fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
- sizeof(int), sizeof(int), max_queues, 0);
- if (fd < 0)
- return fd;
-
- xsk->xsks_map_fd = fd;
-
- return 0;
-}
-
-static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
-{
- bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
- close(xsk->xsks_map_fd);
-}
-
-static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
-{
- __u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
- __u32 map_len = sizeof(struct bpf_map_info);
- struct bpf_prog_info prog_info = {};
- struct bpf_map_info map_info;
- int fd, err;
-
- err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
- if (err)
- return err;
-
- num_maps = prog_info.nr_map_ids;
-
- map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
- if (!map_ids)
- return -ENOMEM;
-
- memset(&prog_info, 0, prog_len);
- prog_info.nr_map_ids = num_maps;
- prog_info.map_ids = (__u64)(unsigned long)map_ids;
-
- err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
- if (err)
- goto out_map_ids;
-
- xsk->xsks_map_fd = -1;
-
- for (i = 0; i < prog_info.nr_map_ids; i++) {
- fd = bpf_map_get_fd_by_id(map_ids[i]);
- if (fd < 0)
- continue;
-
- err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
- if (err) {
- close(fd);
- continue;
- }
-
- if (!strcmp(map_info.name, "xsks_map")) {
- xsk->xsks_map_fd = fd;
- continue;
- }
-
- close(fd);
- }
-
- err = 0;
- if (xsk->xsks_map_fd == -1)
- err = -ENOENT;
-
-out_map_ids:
- free(map_ids);
- return err;
-}
-
-static int xsk_set_bpf_maps(struct xsk_socket *xsk)
-{
- return bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
- &xsk->fd, 0);
-}
-
-static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
-{
- __u32 prog_id = 0;
- int err;
-
- err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
- xsk->config.xdp_flags);
- if (err)
- return err;
-
- if (!prog_id) {
- err = xsk_create_bpf_maps(xsk);
- if (err)
- return err;
-
- err = xsk_load_xdp_prog(xsk);
- if (err) {
- xsk_delete_bpf_maps(xsk);
- return err;
- }
- } else {
- xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
- if (xsk->prog_fd < 0)
- return -errno;
- err = xsk_lookup_bpf_maps(xsk);
- if (err) {
- close(xsk->prog_fd);
- return err;
- }
- }
-
- if (xsk->rx)
- err = xsk_set_bpf_maps(xsk);
- if (err) {
- xsk_delete_bpf_maps(xsk);
- close(xsk->prog_fd);
- return err;
- }
-
- return 0;
-}
-
-int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
- __u32 queue_id, struct xsk_umem *umem,
- struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
- const struct xsk_socket_config *usr_config)
-{
- void *rx_map = NULL, *tx_map = NULL;
- struct sockaddr_xdp sxdp = {};
- struct xdp_mmap_offsets off;
- struct xsk_socket *xsk;
- int err;
-
- if (!umem || !xsk_ptr || !(rx || tx))
- return -EFAULT;
-
- xsk = calloc(1, sizeof(*xsk));
- if (!xsk)
- return -ENOMEM;
-
- err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
- if (err)
- goto out_xsk_alloc;
-
- if (umem->refcount &&
- !(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
- pr_warn("Error: shared umems not supported by libbpf supplied XDP program.\n");
- err = -EBUSY;
- goto out_xsk_alloc;
- }
-
- if (umem->refcount++ > 0) {
- xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
- if (xsk->fd < 0) {
- err = -errno;
- goto out_xsk_alloc;
- }
- } else {
- xsk->fd = umem->fd;
- }
-
- xsk->outstanding_tx = 0;
- xsk->queue_id = queue_id;
- xsk->umem = umem;
- xsk->ifindex = if_nametoindex(ifname);
- if (!xsk->ifindex) {
- err = -errno;
- goto out_socket;
- }
- memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
- xsk->ifname[IFNAMSIZ - 1] = '\0';
-
- if (rx) {
- err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
- &xsk->config.rx_size,
- sizeof(xsk->config.rx_size));
- if (err) {
- err = -errno;
- goto out_socket;
- }
- }
- if (tx) {
- err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
- &xsk->config.tx_size,
- sizeof(xsk->config.tx_size));
- if (err) {
- err = -errno;
- goto out_socket;
- }
- }
-
- err = xsk_get_mmap_offsets(xsk->fd, &off);
- if (err) {
- err = -errno;
- goto out_socket;
- }
-
- if (rx) {
- rx_map = mmap(NULL, off.rx.desc +
- xsk->config.rx_size * sizeof(struct xdp_desc),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
- xsk->fd, XDP_PGOFF_RX_RING);
- if (rx_map == MAP_FAILED) {
- err = -errno;
- goto out_socket;
- }
-
- rx->mask = xsk->config.rx_size - 1;
- rx->size = xsk->config.rx_size;
- rx->producer = rx_map + off.rx.producer;
- rx->consumer = rx_map + off.rx.consumer;
- rx->flags = rx_map + off.rx.flags;
- rx->ring = rx_map + off.rx.desc;
- rx->cached_prod = *rx->producer;
- rx->cached_cons = *rx->consumer;
- }
- xsk->rx = rx;
-
- if (tx) {
- tx_map = mmap(NULL, off.tx.desc +
- xsk->config.tx_size * sizeof(struct xdp_desc),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
- xsk->fd, XDP_PGOFF_TX_RING);
- if (tx_map == MAP_FAILED) {
- err = -errno;
- goto out_mmap_rx;
- }
-
- tx->mask = xsk->config.tx_size - 1;
- tx->size = xsk->config.tx_size;
- tx->producer = tx_map + off.tx.producer;
- tx->consumer = tx_map + off.tx.consumer;
- tx->flags = tx_map + off.tx.flags;
- tx->ring = tx_map + off.tx.desc;
- tx->cached_prod = *tx->producer;
- /* cached_cons is r->size bigger than the real consumer pointer
- * See xsk_prod_nb_free
- */
- tx->cached_cons = *tx->consumer + xsk->config.tx_size;
- }
- xsk->tx = tx;
-
- sxdp.sxdp_family = PF_XDP;
- sxdp.sxdp_ifindex = xsk->ifindex;
- sxdp.sxdp_queue_id = xsk->queue_id;
- if (umem->refcount > 1) {
- sxdp.sxdp_flags = XDP_SHARED_UMEM;
- sxdp.sxdp_shared_umem_fd = umem->fd;
- } else {
- sxdp.sxdp_flags = xsk->config.bind_flags;
- }
-
- err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
- if (err) {
- err = -errno;
- goto out_mmap_tx;
- }
-
- xsk->prog_fd = -1;
-
- if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
- err = xsk_setup_xdp_prog(xsk);
- if (err)
- goto out_mmap_tx;
- }
-
- *xsk_ptr = xsk;
- return 0;
-
-out_mmap_tx:
- if (tx)
- munmap(tx_map, off.tx.desc +
- xsk->config.tx_size * sizeof(struct xdp_desc));
-out_mmap_rx:
- if (rx)
- munmap(rx_map, off.rx.desc +
- xsk->config.rx_size * sizeof(struct xdp_desc));
-out_socket:
- if (--umem->refcount)
- close(xsk->fd);
-out_xsk_alloc:
- free(xsk);
- return err;
-}
-
-int xsk_umem__delete(struct xsk_umem *umem)
-{
- struct xdp_mmap_offsets off;
- int err;
-
- if (!umem)
- return 0;
-
- if (umem->refcount)
- return -EBUSY;
-
- err = xsk_get_mmap_offsets(umem->fd, &off);
- if (!err) {
- munmap(umem->fill->ring - off.fr.desc,
- off.fr.desc + umem->config.fill_size * sizeof(__u64));
- munmap(umem->comp->ring - off.cr.desc,
- off.cr.desc + umem->config.comp_size * sizeof(__u64));
- }
-
- close(umem->fd);
- free(umem);
-
- return 0;
-}
-
-void xsk_socket__delete(struct xsk_socket *xsk)
-{
- size_t desc_sz = sizeof(struct xdp_desc);
- struct xdp_mmap_offsets off;
- int err;
-
- if (!xsk)
- return;
-
- if (xsk->prog_fd != -1) {
- xsk_delete_bpf_maps(xsk);
- close(xsk->prog_fd);
- }
-
- err = xsk_get_mmap_offsets(xsk->fd, &off);
- if (!err) {
- if (xsk->rx) {
- munmap(xsk->rx->ring - off.rx.desc,
- off.rx.desc + xsk->config.rx_size * desc_sz);
- }
- if (xsk->tx) {
- munmap(xsk->tx->ring - off.tx.desc,
- off.tx.desc + xsk->config.tx_size * desc_sz);
- }
-
- }
-
- xsk->umem->refcount--;
- /* Do not close an fd that also has an associated umem connected
- * to it.
- */
- if (xsk->fd != xsk->umem->fd)
- close(xsk->fd);
- free(xsk);
-}
diff --git a/tools/lib/bpf/zip.c b/tools/lib/bpf/zip.c
new file mode 100644
index 000000000000..3f26d629b2b4
--- /dev/null
+++ b/tools/lib/bpf/zip.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+/*
+ * Routines for dealing with .zip archives.
+ *
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "libbpf_internal.h"
+#include "zip.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpacked"
+#pragma GCC diagnostic ignored "-Wattributes"
+
+/* Specification of ZIP file format can be found here:
+ * https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
+ * For a high level overview of the structure of a ZIP file see
+ * sections 4.3.1 - 4.3.6.
+ *
+ * Data structures appearing in ZIP files do not contain any
+ * padding and they might be misaligned. To allow us to safely
+ * operate on pointers to such structures and their members, we
+ * declare the types as packed.
+ */
+
+#define END_OF_CD_RECORD_MAGIC 0x06054b50
+
+/* See section 4.3.16 of the spec. */
+struct end_of_cd_record {
+ /* Magic value equal to END_OF_CD_RECORD_MAGIC */
+ __u32 magic;
+
+ /* Number of the file containing this structure or 0xFFFF if ZIP64 archive.
+ * Zip archive might span multiple files (disks).
+ */
+ __u16 this_disk;
+
+ /* Number of the file containing the beginning of the central directory or
+ * 0xFFFF if ZIP64 archive.
+ */
+ __u16 cd_disk;
+
+ /* Number of central directory records on this disk or 0xFFFF if ZIP64
+ * archive.
+ */
+ __u16 cd_records;
+
+ /* Number of central directory records on all disks or 0xFFFF if ZIP64
+ * archive.
+ */
+ __u16 cd_records_total;
+
+ /* Size of the central directory record or 0xFFFFFFFF if ZIP64 archive. */
+ __u32 cd_size;
+
+ /* Offset of the central directory from the beginning of the archive or
+ * 0xFFFFFFFF if ZIP64 archive.
+ */
+ __u32 cd_offset;
+
+ /* Length of comment data following end of central directory record. */
+ __u16 comment_length;
+
+ /* Up to 64k of arbitrary bytes. */
+ /* uint8_t comment[comment_length] */
+} __attribute__((packed));
+
+#define CD_FILE_HEADER_MAGIC 0x02014b50
+#define FLAG_ENCRYPTED (1 << 0)
+#define FLAG_HAS_DATA_DESCRIPTOR (1 << 3)
+
+/* See section 4.3.12 of the spec. */
+struct cd_file_header {
+ /* Magic value equal to CD_FILE_HEADER_MAGIC. */
+ __u32 magic;
+ __u16 version;
+ /* Minimum zip version needed to extract the file. */
+ __u16 min_version;
+ __u16 flags;
+ __u16 compression;
+ __u16 last_modified_time;
+ __u16 last_modified_date;
+ __u32 crc;
+ __u32 compressed_size;
+ __u32 uncompressed_size;
+ __u16 file_name_length;
+ __u16 extra_field_length;
+ __u16 file_comment_length;
+ /* Number of the disk where the file starts or 0xFFFF if ZIP64 archive. */
+ __u16 disk;
+ __u16 internal_attributes;
+ __u32 external_attributes;
+ /* Offset from the start of the disk containing the local file header to the
+ * start of the local file header.
+ */
+ __u32 offset;
+} __attribute__((packed));
+
+#define LOCAL_FILE_HEADER_MAGIC 0x04034b50
+
+/* See section 4.3.7 of the spec. */
+struct local_file_header {
+ /* Magic value equal to LOCAL_FILE_HEADER_MAGIC. */
+ __u32 magic;
+ /* Minimum zip version needed to extract the file. */
+ __u16 min_version;
+ __u16 flags;
+ __u16 compression;
+ __u16 last_modified_time;
+ __u16 last_modified_date;
+ __u32 crc;
+ __u32 compressed_size;
+ __u32 uncompressed_size;
+ __u16 file_name_length;
+ __u16 extra_field_length;
+} __attribute__((packed));
+
+#pragma GCC diagnostic pop
+
+struct zip_archive {
+ void *data;
+ __u32 size;
+ __u32 cd_offset;
+ __u32 cd_records;
+};
+
+static void *check_access(struct zip_archive *archive, __u32 offset, __u32 size)
+{
+ if (offset + size > archive->size || offset > offset + size)
+ return NULL;
+
+ return archive->data + offset;
+}
+
+/* Returns 0 on success, -EINVAL on error and -ENOTSUP if the eocd indicates the
+ * archive uses features which are not supported.
+ */
+static int try_parse_end_of_cd(struct zip_archive *archive, __u32 offset)
+{
+ __u16 comment_length, cd_records;
+ struct end_of_cd_record *eocd;
+ __u32 cd_offset, cd_size;
+
+ eocd = check_access(archive, offset, sizeof(*eocd));
+ if (!eocd || eocd->magic != END_OF_CD_RECORD_MAGIC)
+ return -EINVAL;
+
+ comment_length = eocd->comment_length;
+ if (offset + sizeof(*eocd) + comment_length != archive->size)
+ return -EINVAL;
+
+ cd_records = eocd->cd_records;
+ if (eocd->this_disk != 0 || eocd->cd_disk != 0 || eocd->cd_records_total != cd_records)
+ /* This is a valid eocd, but we only support single-file non-ZIP64 archives. */
+ return -ENOTSUP;
+
+ cd_offset = eocd->cd_offset;
+ cd_size = eocd->cd_size;
+ if (!check_access(archive, cd_offset, cd_size))
+ return -EINVAL;
+
+ archive->cd_offset = cd_offset;
+ archive->cd_records = cd_records;
+ return 0;
+}
+
+static int find_cd(struct zip_archive *archive)
+{
+ int64_t limit, offset;
+ int rc = -EINVAL;
+
+ if (archive->size <= sizeof(struct end_of_cd_record))
+ return -EINVAL;
+
+ /* Because the end of central directory ends with a variable length array of
+ * up to 0xFFFF bytes we can't know exactly where it starts and need to
+ * search for it at the end of the file, scanning the (limit, offset] range.
+ */
+ offset = archive->size - sizeof(struct end_of_cd_record);
+ limit = (int64_t)offset - (1 << 16);
+
+ for (; offset >= 0 && offset > limit && rc != 0; offset--) {
+ rc = try_parse_end_of_cd(archive, offset);
+ if (rc == -ENOTSUP)
+ break;
+ }
+ return rc;
+}
+
+struct zip_archive *zip_archive_open(const char *path)
+{
+ struct zip_archive *archive;
+ int err, fd;
+ off_t size;
+ void *data;
+
+ fd = open(path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0)
+ return ERR_PTR(-errno);
+
+ size = lseek(fd, 0, SEEK_END);
+ if (size == (off_t)-1 || size > UINT32_MAX) {
+ close(fd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
+ err = -errno;
+ close(fd);
+
+ if (data == MAP_FAILED)
+ return ERR_PTR(err);
+
+ archive = malloc(sizeof(*archive));
+ if (!archive) {
+ munmap(data, size);
+ return ERR_PTR(-ENOMEM);
+ };
+
+ archive->data = data;
+ archive->size = size;
+
+ err = find_cd(archive);
+ if (err) {
+ munmap(data, size);
+ free(archive);
+ return ERR_PTR(err);
+ }
+
+ return archive;
+}
+
+void zip_archive_close(struct zip_archive *archive)
+{
+ munmap(archive->data, archive->size);
+ free(archive);
+}
+
+static struct local_file_header *local_file_header_at_offset(struct zip_archive *archive,
+ __u32 offset)
+{
+ struct local_file_header *lfh;
+
+ lfh = check_access(archive, offset, sizeof(*lfh));
+ if (!lfh || lfh->magic != LOCAL_FILE_HEADER_MAGIC)
+ return NULL;
+
+ return lfh;
+}
+
+static int get_entry_at_offset(struct zip_archive *archive, __u32 offset, struct zip_entry *out)
+{
+ struct local_file_header *lfh;
+ __u32 compressed_size;
+ const char *name;
+ void *data;
+
+ lfh = local_file_header_at_offset(archive, offset);
+ if (!lfh)
+ return -EINVAL;
+
+ offset += sizeof(*lfh);
+ if ((lfh->flags & FLAG_ENCRYPTED) || (lfh->flags & FLAG_HAS_DATA_DESCRIPTOR))
+ return -EINVAL;
+
+ name = check_access(archive, offset, lfh->file_name_length);
+ if (!name)
+ return -EINVAL;
+
+ offset += lfh->file_name_length;
+ if (!check_access(archive, offset, lfh->extra_field_length))
+ return -EINVAL;
+
+ offset += lfh->extra_field_length;
+ compressed_size = lfh->compressed_size;
+ data = check_access(archive, offset, compressed_size);
+ if (!data)
+ return -EINVAL;
+
+ out->compression = lfh->compression;
+ out->name_length = lfh->file_name_length;
+ out->name = name;
+ out->data = data;
+ out->data_length = compressed_size;
+ out->data_offset = offset;
+
+ return 0;
+}
+
+int zip_archive_find_entry(struct zip_archive *archive, const char *file_name,
+ struct zip_entry *out)
+{
+ size_t file_name_length = strlen(file_name);
+ __u32 i, offset = archive->cd_offset;
+
+ for (i = 0; i < archive->cd_records; ++i) {
+ __u16 cdfh_name_length, cdfh_flags;
+ struct cd_file_header *cdfh;
+ const char *cdfh_name;
+
+ cdfh = check_access(archive, offset, sizeof(*cdfh));
+ if (!cdfh || cdfh->magic != CD_FILE_HEADER_MAGIC)
+ return -EINVAL;
+
+ offset += sizeof(*cdfh);
+ cdfh_name_length = cdfh->file_name_length;
+ cdfh_name = check_access(archive, offset, cdfh_name_length);
+ if (!cdfh_name)
+ return -EINVAL;
+
+ cdfh_flags = cdfh->flags;
+ if ((cdfh_flags & FLAG_ENCRYPTED) == 0 &&
+ (cdfh_flags & FLAG_HAS_DATA_DESCRIPTOR) == 0 &&
+ file_name_length == cdfh_name_length &&
+ memcmp(file_name, archive->data + offset, file_name_length) == 0) {
+ return get_entry_at_offset(archive, cdfh->offset, out);
+ }
+
+ offset += cdfh_name_length;
+ offset += cdfh->extra_field_length;
+ offset += cdfh->file_comment_length;
+ }
+
+ return -ENOENT;
+}
diff --git a/tools/lib/bpf/zip.h b/tools/lib/bpf/zip.h
new file mode 100644
index 000000000000..1c1bb21fba76
--- /dev/null
+++ b/tools/lib/bpf/zip.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+#ifndef __LIBBPF_ZIP_H
+#define __LIBBPF_ZIP_H
+
+#include <linux/types.h>
+
+/* Represents an open zip archive.
+ * Only basic ZIP files are supported, in particular the following are not
+ * supported:
+ * - encryption
+ * - streaming
+ * - multi-part ZIP files
+ * - ZIP64
+ */
+struct zip_archive;
+
+/* Carries information on name, compression method, and data corresponding to a
+ * file in a zip archive.
+ */
+struct zip_entry {
+ /* Compression method as defined in pkzip spec. 0 means data is uncompressed. */
+ __u16 compression;
+
+ /* Non-null terminated name of the file. */
+ const char *name;
+ /* Length of the file name. */
+ __u16 name_length;
+
+ /* Pointer to the file data. */
+ const void *data;
+ /* Length of the file data. */
+ __u32 data_length;
+ /* Offset of the file data within the archive. */
+ __u32 data_offset;
+};
+
+/* Open a zip archive. Returns NULL in case of an error. */
+struct zip_archive *zip_archive_open(const char *path);
+
+/* Close a zip archive and release resources. */
+void zip_archive_close(struct zip_archive *archive);
+
+/* Look up an entry corresponding to a file in given zip archive. */
+int zip_archive_find_entry(struct zip_archive *archive, const char *name, struct zip_entry *out);
+
+#endif
diff --git a/tools/lib/find_bit.c b/tools/lib/find_bit.c
index ac37022e9486..6a3dc167d30e 100644
--- a/tools/lib/find_bit.c
+++ b/tools/lib/find_bit.c
@@ -18,74 +18,74 @@
#include <linux/bitmap.h>
#include <linux/kernel.h>
-#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
- !defined(find_next_and_bit)
-
/*
- * This is a common helper function for find_next_bit, find_next_zero_bit, and
- * find_next_and_bit. The differences are:
- * - The "invert" argument, which is XORed with each fetched word before
- * searching it for one bits.
- * - The optional "addr2", which is anded with "addr1" if present.
+ * Common helper for find_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
*/
-static inline unsigned long _find_next_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long nbits,
- unsigned long start, unsigned long invert)
-{
- unsigned long tmp;
-
- if (unlikely(start >= nbits))
- return nbits;
-
- tmp = addr1[start / BITS_PER_LONG];
- if (addr2)
- tmp &= addr2[start / BITS_PER_LONG];
- tmp ^= invert;
-
- /* Handle 1st word. */
- tmp &= BITMAP_FIRST_WORD_MASK(start);
- start = round_down(start, BITS_PER_LONG);
-
- while (!tmp) {
- start += BITS_PER_LONG;
- if (start >= nbits)
- return nbits;
-
- tmp = addr1[start / BITS_PER_LONG];
- if (addr2)
- tmp &= addr2[start / BITS_PER_LONG];
- tmp ^= invert;
- }
+#define FIND_FIRST_BIT(FETCH, MUNGE, size) \
+({ \
+ unsigned long idx, val, sz = (size); \
+ \
+ for (idx = 0; idx * BITS_PER_LONG < sz; idx++) { \
+ val = (FETCH); \
+ if (val) { \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(val)), sz); \
+ break; \
+ } \
+ } \
+ \
+ sz; \
+})
- return min(start + __ffs(tmp), nbits);
-}
-#endif
+/*
+ * Common helper for find_next_bit() function family
+ * @FETCH: The expression that fetches and pre-processes each word of bitmap(s)
+ * @MUNGE: The expression that post-processes a word containing found bit (may be empty)
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ */
+#define FIND_NEXT_BIT(FETCH, MUNGE, size, start) \
+({ \
+ unsigned long mask, idx, tmp, sz = (size), __start = (start); \
+ \
+ if (unlikely(__start >= sz)) \
+ goto out; \
+ \
+ mask = MUNGE(BITMAP_FIRST_WORD_MASK(__start)); \
+ idx = __start / BITS_PER_LONG; \
+ \
+ for (tmp = (FETCH) & mask; !tmp; tmp = (FETCH)) { \
+ if ((idx + 1) * BITS_PER_LONG >= sz) \
+ goto out; \
+ idx++; \
+ } \
+ \
+ sz = min(idx * BITS_PER_LONG + __ffs(MUNGE(tmp)), sz); \
+out: \
+ sz; \
+})
-#ifndef find_next_bit
+#ifndef find_first_bit
/*
- * Find the next set bit in a memory region.
+ * Find the first set bit in a memory region.
*/
-unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+unsigned long _find_first_bit(const unsigned long *addr, unsigned long size)
{
- return _find_next_bit(addr, NULL, size, offset, 0UL);
+ return FIND_FIRST_BIT(addr[idx], /* nop */, size);
}
#endif
-#ifndef find_first_bit
+#ifndef find_first_and_bit
/*
- * Find the first set bit in a memory region.
+ * Find the first set bit in two memory regions.
*/
-unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
{
- unsigned long idx;
-
- for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
- if (addr[idx])
- return min(idx * BITS_PER_LONG + __ffs(addr[idx]), size);
- }
-
- return size;
+ return FIND_FIRST_BIT(addr1[idx] & addr2[idx], /* nop */, size);
}
#endif
@@ -93,32 +93,31 @@ unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
/*
* Find the first cleared bit in a memory region.
*/
-unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
- unsigned long idx;
-
- for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
- if (addr[idx] != ~0UL)
- return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
- }
-
- return size;
+ return FIND_FIRST_BIT(~addr[idx], /* nop */, size);
}
#endif
-#ifndef find_next_zero_bit
-unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
- unsigned long offset)
+#ifndef find_next_bit
+unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start)
{
- return _find_next_bit(addr, NULL, size, offset, ~0UL);
+ return FIND_NEXT_BIT(addr[idx], /* nop */, nbits, start);
}
#endif
#ifndef find_next_and_bit
-unsigned long find_next_and_bit(const unsigned long *addr1,
- const unsigned long *addr2, unsigned long size,
- unsigned long offset)
+unsigned long _find_next_and_bit(const unsigned long *addr1, const unsigned long *addr2,
+ unsigned long nbits, unsigned long start)
+{
+ return FIND_NEXT_BIT(addr1[idx] & addr2[idx], /* nop */, nbits, start);
+}
+#endif
+
+#ifndef find_next_zero_bit
+unsigned long _find_next_zero_bit(const unsigned long *addr, unsigned long nbits,
+ unsigned long start)
{
- return _find_next_bit(addr1, addr2, size, offset, 0UL);
+ return FIND_NEXT_BIT(~addr[idx], /* nop */, nbits, start);
}
#endif
diff --git a/tools/lib/list_sort.c b/tools/lib/list_sort.c
new file mode 100644
index 000000000000..10c067e3a8d2
--- /dev/null
+++ b/tools/lib/list_sort.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/list_sort.h>
+#include <linux/list.h>
+
+/*
+ * Returns a list organized in an intermediate format suited
+ * to chaining of merge() calls: null-terminated, no reserved or
+ * sentinel head node, "prev" links not maintained.
+ */
+__attribute__((nonnull(2,3,4)))
+static struct list_head *merge(void *priv, list_cmp_func_t cmp,
+ struct list_head *a, struct list_head *b)
+{
+ struct list_head *head, **tail = &head;
+
+ for (;;) {
+ /* if equal, take 'a' -- important for sort stability */
+ if (cmp(priv, a, b) <= 0) {
+ *tail = a;
+ tail = &a->next;
+ a = a->next;
+ if (!a) {
+ *tail = b;
+ break;
+ }
+ } else {
+ *tail = b;
+ tail = &b->next;
+ b = b->next;
+ if (!b) {
+ *tail = a;
+ break;
+ }
+ }
+ }
+ return head;
+}
+
+/*
+ * Combine final list merge with restoration of standard doubly-linked
+ * list structure. This approach duplicates code from merge(), but
+ * runs faster than the tidier alternatives of either a separate final
+ * prev-link restoration pass, or maintaining the prev links
+ * throughout.
+ */
+__attribute__((nonnull(2,3,4,5)))
+static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head,
+ struct list_head *a, struct list_head *b)
+{
+ struct list_head *tail = head;
+ u8 count = 0;
+
+ for (;;) {
+ /* if equal, take 'a' -- important for sort stability */
+ if (cmp(priv, a, b) <= 0) {
+ tail->next = a;
+ a->prev = tail;
+ tail = a;
+ a = a->next;
+ if (!a)
+ break;
+ } else {
+ tail->next = b;
+ b->prev = tail;
+ tail = b;
+ b = b->next;
+ if (!b) {
+ b = a;
+ break;
+ }
+ }
+ }
+
+ /* Finish linking remainder of list b on to tail */
+ tail->next = b;
+ do {
+ /*
+ * If the merge is highly unbalanced (e.g. the input is
+ * already sorted), this loop may run many iterations.
+ * Continue callbacks to the client even though no
+ * element comparison is needed, so the client's cmp()
+ * routine can invoke cond_resched() periodically.
+ */
+ if (unlikely(!++count))
+ cmp(priv, b, b);
+ b->prev = tail;
+ tail = b;
+ b = b->next;
+ } while (b);
+
+ /* And the final links to make a circular doubly-linked list */
+ tail->next = head;
+ head->prev = tail;
+}
+
+/**
+ * list_sort - sort a list
+ * @priv: private data, opaque to list_sort(), passed to @cmp
+ * @head: the list to sort
+ * @cmp: the elements comparison function
+ *
+ * The comparison function @cmp must return > 0 if @a should sort after
+ * @b ("@a > @b" if you want an ascending sort), and <= 0 if @a should
+ * sort before @b *or* their original order should be preserved. It is
+ * always called with the element that came first in the input in @a,
+ * and list_sort is a stable sort, so it is not necessary to distinguish
+ * the @a < @b and @a == @b cases.
+ *
+ * This is compatible with two styles of @cmp function:
+ * - The traditional style which returns <0 / =0 / >0, or
+ * - Returning a boolean 0/1.
+ * The latter offers a chance to save a few cycles in the comparison
+ * (which is used by e.g. plug_ctx_cmp() in block/blk-mq.c).
+ *
+ * A good way to write a multi-word comparison is::
+ *
+ * if (a->high != b->high)
+ * return a->high > b->high;
+ * if (a->middle != b->middle)
+ * return a->middle > b->middle;
+ * return a->low > b->low;
+ *
+ *
+ * This mergesort is as eager as possible while always performing at least
+ * 2:1 balanced merges. Given two pending sublists of size 2^k, they are
+ * merged to a size-2^(k+1) list as soon as we have 2^k following elements.
+ *
+ * Thus, it will avoid cache thrashing as long as 3*2^k elements can
+ * fit into the cache. Not quite as good as a fully-eager bottom-up
+ * mergesort, but it does use 0.2*n fewer comparisons, so is faster in
+ * the common case that everything fits into L1.
+ *
+ *
+ * The merging is controlled by "count", the number of elements in the
+ * pending lists. This is beautifully simple code, but rather subtle.
+ *
+ * Each time we increment "count", we set one bit (bit k) and clear
+ * bits k-1 .. 0. Each time this happens (except the very first time
+ * for each bit, when count increments to 2^k), we merge two lists of
+ * size 2^k into one list of size 2^(k+1).
+ *
+ * This merge happens exactly when the count reaches an odd multiple of
+ * 2^k, which is when we have 2^k elements pending in smaller lists,
+ * so it's safe to merge away two lists of size 2^k.
+ *
+ * After this happens twice, we have created two lists of size 2^(k+1),
+ * which will be merged into a list of size 2^(k+2) before we create
+ * a third list of size 2^(k+1), so there are never more than two pending.
+ *
+ * The number of pending lists of size 2^k is determined by the
+ * state of bit k of "count" plus two extra pieces of information:
+ *
+ * - The state of bit k-1 (when k == 0, consider bit -1 always set), and
+ * - Whether the higher-order bits are zero or non-zero (i.e.
+ * is count >= 2^(k+1)).
+ *
+ * There are six states we distinguish. "x" represents some arbitrary
+ * bits, and "y" represents some arbitrary non-zero bits:
+ * 0: 00x: 0 pending of size 2^k; x pending of sizes < 2^k
+ * 1: 01x: 0 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
+ * 2: x10x: 0 pending of size 2^k; 2^k + x pending of sizes < 2^k
+ * 3: x11x: 1 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
+ * 4: y00x: 1 pending of size 2^k; 2^k + x pending of sizes < 2^k
+ * 5: y01x: 2 pending of size 2^k; 2^(k-1) + x pending of sizes < 2^k
+ * (merge and loop back to state 2)
+ *
+ * We gain lists of size 2^k in the 2->3 and 4->5 transitions (because
+ * bit k-1 is set while the more significant bits are non-zero) and
+ * merge them away in the 5->2 transition. Note in particular that just
+ * before the 5->2 transition, all lower-order bits are 11 (state 3),
+ * so there is one list of each smaller size.
+ *
+ * When we reach the end of the input, we merge all the pending
+ * lists, from smallest to largest. If you work through cases 2 to
+ * 5 above, you can see that the number of elements we merge with a list
+ * of size 2^k varies from 2^(k-1) (cases 3 and 5 when x == 0) to
+ * 2^(k+1) - 1 (second merge of case 5 when x == 2^(k-1) - 1).
+ */
+__attribute__((nonnull(2,3)))
+void list_sort(void *priv, struct list_head *head, list_cmp_func_t cmp)
+{
+ struct list_head *list = head->next, *pending = NULL;
+ size_t count = 0; /* Count of pending */
+
+ if (list == head->prev) /* Zero or one elements */
+ return;
+
+ /* Convert to a null-terminated singly-linked list. */
+ head->prev->next = NULL;
+
+ /*
+ * Data structure invariants:
+ * - All lists are singly linked and null-terminated; prev
+ * pointers are not maintained.
+ * - pending is a prev-linked "list of lists" of sorted
+ * sublists awaiting further merging.
+ * - Each of the sorted sublists is power-of-two in size.
+ * - Sublists are sorted by size and age, smallest & newest at front.
+ * - There are zero to two sublists of each size.
+ * - A pair of pending sublists are merged as soon as the number
+ * of following pending elements equals their size (i.e.
+ * each time count reaches an odd multiple of that size).
+ * That ensures each later final merge will be at worst 2:1.
+ * - Each round consists of:
+ * - Merging the two sublists selected by the highest bit
+ * which flips when count is incremented, and
+ * - Adding an element from the input as a size-1 sublist.
+ */
+ do {
+ size_t bits;
+ struct list_head **tail = &pending;
+
+ /* Find the least-significant clear bit in count */
+ for (bits = count; bits & 1; bits >>= 1)
+ tail = &(*tail)->prev;
+ /* Do the indicated merge */
+ if (likely(bits)) {
+ struct list_head *a = *tail, *b = a->prev;
+
+ a = merge(priv, cmp, b, a);
+ /* Install the merged result in place of the inputs */
+ a->prev = b->prev;
+ *tail = a;
+ }
+
+ /* Move one element from input list to pending */
+ list->prev = pending;
+ pending = list;
+ list = list->next;
+ pending->next = NULL;
+ count++;
+ } while (list);
+
+ /* End of input; merge together all the pending lists. */
+ list = pending;
+ pending = pending->prev;
+ for (;;) {
+ struct list_head *next = pending->prev;
+
+ if (!next)
+ break;
+ list = merge(priv, cmp, pending, list);
+ pending = next;
+ }
+ /* The final merge, rebuilding prev links */
+ merge_final(priv, cmp, head, pending, list);
+}
+EXPORT_SYMBOL(list_sort);
diff --git a/tools/lib/lockdep/Build b/tools/lib/lockdep/Build
deleted file mode 100644
index 6f667355b068..000000000000
--- a/tools/lib/lockdep/Build
+++ /dev/null
@@ -1 +0,0 @@
-liblockdep-y += common.o lockdep.o preload.o rbtree.o
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
deleted file mode 100644
index 9dafb8cb752f..000000000000
--- a/tools/lib/lockdep/Makefile
+++ /dev/null
@@ -1,162 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# file format version
-FILE_VERSION = 1
-
-LIBLOCKDEP_VERSION=$(shell make --no-print-directory -sC ../../.. kernelversion)
-
-# Makefiles suck: This macro sets a default value of $(2) for the
-# variable named by $(1), unless the variable has been set by
-# environment or command line. This is necessary for CC and AR
-# because make sets default values, so the simpler ?= approach
-# won't work as expected.
-define allow-override
- $(if $(or $(findstring environment,$(origin $(1))),\
- $(findstring command line,$(origin $(1)))),,\
- $(eval $(1) = $(2)))
-endef
-
-# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-$(call allow-override,LD,$(CROSS_COMPILE)ld)
-
-INSTALL = install
-
-# Use DESTDIR for installing into a different root directory.
-# This is useful for building a package. The program will be
-# installed in this directory as if it was the root directory.
-# Then the build tool can move it later.
-DESTDIR ?=
-DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
-
-prefix ?= /usr/local
-libdir_relative = lib
-libdir = $(prefix)/$(libdir_relative)
-bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
-
-export DESTDIR DESTDIR_SQ INSTALL
-
-MAKEFLAGS += --no-print-directory
-
-include ../../scripts/Makefile.include
-
-# copy a bit from Linux kbuild
-
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(CURDIR)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
-# Shell quotes
-libdir_SQ = $(subst ','\'',$(libdir))
-bindir_SQ = $(subst ','\'',$(bindir))
-
-LIB_IN := $(OUTPUT)liblockdep-in.o
-
-BIN_FILE = lockdep
-LIB_FILE = $(OUTPUT)liblockdep.a $(OUTPUT)liblockdep.so.$(LIBLOCKDEP_VERSION)
-
-CONFIG_INCLUDES =
-CONFIG_LIBS =
-CONFIG_FLAGS =
-
-OBJ = $@
-N =
-
-export Q VERBOSE
-
-INCLUDES = -I. -I./uinclude -I./include -I../../include $(CONFIG_INCLUDES)
-
-# Set compile option CFLAGS if not set elsewhere
-CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
-CFLAGS += -fPIC
-CFLAGS += -Wall
-
-override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
-
-ifeq ($(VERBOSE),1)
- Q =
- print_shared_lib_compile =
- print_install =
-else
- Q = @
- print_shared_lib_compile = echo ' LD '$(OBJ);
- print_static_lib_build = echo ' LD '$(OBJ);
- print_install = echo ' INSTALL '$1' to $(DESTDIR_SQ)$2';
-endif
-
-all:
-
-export srctree OUTPUT CC LD CFLAGS V
-include $(srctree)/tools/build/Makefile.include
-
-do_compile_shared_library = \
- ($(print_shared_lib_compile) \
- $(CC) $(LDFLAGS) --shared $^ -o $@ -lpthread -ldl -Wl,-soname='$(@F)';$(shell ln -sf $(@F) $(@D)/liblockdep.so))
-
-do_build_static_lib = \
- ($(print_static_lib_build) \
- $(RM) $@; $(AR) rcs $@ $^)
-
-CMD_TARGETS = $(LIB_FILE)
-
-TARGETS = $(CMD_TARGETS)
-
-
-all: fixdep all_cmd
-
-all_cmd: $(CMD_TARGETS)
-
-$(LIB_IN): force
- $(Q)$(MAKE) $(build)=liblockdep
-
-$(OUTPUT)liblockdep.so.$(LIBLOCKDEP_VERSION): $(LIB_IN)
- $(Q)$(do_compile_shared_library)
-
-$(OUTPUT)liblockdep.a: $(LIB_IN)
- $(Q)$(do_build_static_lib)
-
-tags: force
- $(RM) tags
- find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
- --regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/'
-
-TAGS: force
- $(RM) TAGS
- find . -name '*.[ch]' | xargs etags \
- --regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/'
-
-define do_install
- $(print_install) \
- if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
- fi; \
- $(INSTALL) $1 '$(DESTDIR_SQ)$2'
-endef
-
-install_lib: all_cmd
- $(Q)$(call do_install,$(LIB_FILE),$(libdir_SQ))
- $(Q)$(call do_install,$(BIN_FILE),$(bindir_SQ))
-
-install: install_lib
-
-clean:
- $(RM) $(OUTPUT)*.o *~ $(TARGETS) $(OUTPUT)*.a $(OUTPUT)*liblockdep*.so* $(VERSION_FILES) $(OUTPUT).*.d $(OUTPUT).*.cmd
- $(RM) tags TAGS
-
-PHONY += force
-force:
-
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable so we can use it in if_changed and friends.
-.PHONY: $(PHONY)
diff --git a/tools/lib/lockdep/common.c b/tools/lib/lockdep/common.c
deleted file mode 100644
index 5c3b58cce8a9..000000000000
--- a/tools/lib/lockdep/common.c
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stddef.h>
-#include <stdbool.h>
-#include <linux/compiler.h>
-#include <linux/lockdep.h>
-#include <unistd.h>
-#include <sys/syscall.h>
-
-static __thread struct task_struct current_obj;
-
-/* lockdep wants these */
-bool debug_locks = true;
-bool debug_locks_silent;
-
-__attribute__((destructor)) static void liblockdep_exit(void)
-{
- debug_check_no_locks_held();
-}
-
-struct task_struct *__curr(void)
-{
- if (current_obj.pid == 0) {
- /* Makes lockdep output pretty */
- prctl(PR_GET_NAME, current_obj.comm);
- current_obj.pid = syscall(__NR_gettid);
- }
-
- return &current_obj;
-}
diff --git a/tools/lib/lockdep/include/liblockdep/common.h b/tools/lib/lockdep/include/liblockdep/common.h
deleted file mode 100644
index a6d7ee5f18ba..000000000000
--- a/tools/lib/lockdep/include/liblockdep/common.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_COMMON_H
-#define _LIBLOCKDEP_COMMON_H
-
-#include <pthread.h>
-
-#define NR_LOCKDEP_CACHING_CLASSES 2
-#define MAX_LOCKDEP_SUBCLASSES 8UL
-
-#ifndef CALLER_ADDR0
-#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
-#endif
-
-#ifndef _RET_IP_
-#define _RET_IP_ CALLER_ADDR0
-#endif
-
-#ifndef _THIS_IP_
-#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
-#endif
-
-struct lockdep_subclass_key {
- char __one_byte;
-};
-
-struct lock_class_key {
- struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
-};
-
-struct lockdep_map {
- struct lock_class_key *key;
- struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
- const char *name;
-#ifdef CONFIG_LOCK_STAT
- int cpu;
- unsigned long ip;
-#endif
-};
-
-void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass);
-void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
- int trylock, int read, int check,
- struct lockdep_map *nest_lock, unsigned long ip);
-void lock_release(struct lockdep_map *lock, unsigned long ip);
-void lockdep_reset_lock(struct lockdep_map *lock);
-void lockdep_register_key(struct lock_class_key *key);
-void lockdep_unregister_key(struct lock_class_key *key);
-extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-
-#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
- { .name = (_name), .key = (void *)(_key), }
-
-#endif
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
deleted file mode 100644
index bd106b82759b..000000000000
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_MUTEX_H
-#define _LIBLOCKDEP_MUTEX_H
-
-#include <pthread.h>
-#include "common.h"
-
-struct liblockdep_pthread_mutex {
- pthread_mutex_t mutex;
- struct lock_class_key key;
- struct lockdep_map dep_map;
-};
-
-typedef struct liblockdep_pthread_mutex liblockdep_pthread_mutex_t;
-
-#define LIBLOCKDEP_PTHREAD_MUTEX_INITIALIZER(mtx) \
- (const struct liblockdep_pthread_mutex) { \
- .mutex = PTHREAD_MUTEX_INITIALIZER, \
- .dep_map = STATIC_LOCKDEP_MAP_INIT(#mtx, &((&(mtx))->dep_map)), \
-}
-
-static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
- const char *name,
- struct lock_class_key *key,
- const pthread_mutexattr_t *__mutexattr)
-{
- lockdep_init_map(&lock->dep_map, name, key, 0);
- return pthread_mutex_init(&lock->mutex, __mutexattr);
-}
-
-#define liblockdep_pthread_mutex_init(mutex, mutexattr) \
-({ \
- lockdep_register_key(&(mutex)->key); \
- __mutex_init((mutex), #mutex, &(mutex)->key, (mutexattr)); \
-})
-
-static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_mutex_lock(&lock->mutex);
-}
-
-static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lock)
-{
- lock_release(&lock->dep_map, (unsigned long)_RET_IP_);
- return pthread_mutex_unlock(&lock->mutex);
-}
-
-static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
-}
-
-static inline int liblockdep_pthread_mutex_destroy(liblockdep_pthread_mutex_t *lock)
-{
- lockdep_reset_lock(&lock->dep_map);
- lockdep_unregister_key(&lock->key);
- return pthread_mutex_destroy(&lock->mutex);
-}
-
-#ifdef __USE_LIBLOCKDEP
-
-#define pthread_mutex_t liblockdep_pthread_mutex_t
-#define pthread_mutex_init liblockdep_pthread_mutex_init
-#define pthread_mutex_lock liblockdep_pthread_mutex_lock
-#define pthread_mutex_unlock liblockdep_pthread_mutex_unlock
-#define pthread_mutex_trylock liblockdep_pthread_mutex_trylock
-#define pthread_mutex_destroy liblockdep_pthread_mutex_destroy
-
-#endif
-
-#endif
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
deleted file mode 100644
index 6d5d2932bf4d..000000000000
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_RWLOCK_H
-#define _LIBLOCKDEP_RWLOCK_H
-
-#include <pthread.h>
-#include "common.h"
-
-struct liblockdep_pthread_rwlock {
- pthread_rwlock_t rwlock;
- struct lockdep_map dep_map;
-};
-
-typedef struct liblockdep_pthread_rwlock liblockdep_pthread_rwlock_t;
-
-#define LIBLOCKDEP_PTHREAD_RWLOCK_INITIALIZER(rwl) \
- (struct liblockdep_pthread_rwlock) { \
- .rwlock = PTHREAD_RWLOCK_INITIALIZER, \
- .dep_map = STATIC_LOCKDEP_MAP_INIT(#rwl, &((&(rwl))->dep_map)), \
-}
-
-static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
- const char *name,
- struct lock_class_key *key,
- const pthread_rwlockattr_t *attr)
-{
- lockdep_init_map(&lock->dep_map, name, key, 0);
-
- return pthread_rwlock_init(&lock->rwlock, attr);
-}
-
-#define liblockdep_pthread_rwlock_init(lock, attr) \
-({ \
- static struct lock_class_key __key; \
- \
- __rwlock_init((lock), #lock, &__key, (attr)); \
-})
-
-static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_rwlock_rdlock(&lock->rwlock);
-
-}
-
-static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_release(&lock->dep_map, (unsigned long)_RET_IP_);
- return pthread_rwlock_unlock(&lock->rwlock);
-}
-
-static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_rwlock_wrlock(&lock->rwlock);
-}
-
-static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
-}
-
-static inline int liblockdep_pthread_rwlock_trywrlock(liblockdep_pthread_rwlock_t *lock)
-{
- lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
- return pthread_rwlock_trywrlock(&lock->rwlock) == 0 ? 1 : 0;
-}
-
-static inline int liblockdep_rwlock_destroy(liblockdep_pthread_rwlock_t *lock)
-{
- return pthread_rwlock_destroy(&lock->rwlock);
-}
-
-#ifdef __USE_LIBLOCKDEP
-
-#define pthread_rwlock_t liblockdep_pthread_rwlock_t
-#define pthread_rwlock_init liblockdep_pthread_rwlock_init
-#define pthread_rwlock_rdlock liblockdep_pthread_rwlock_rdlock
-#define pthread_rwlock_unlock liblockdep_pthread_rwlock_unlock
-#define pthread_rwlock_wrlock liblockdep_pthread_rwlock_wrlock
-#define pthread_rwlock_tryrdlock liblockdep_pthread_rwlock_tryrdlock
-#define pthread_rwlock_trywrlock liblockdep_pthread_rwlock_trywrlock
-#define pthread_rwlock_destroy liblockdep_rwlock_destroy
-
-#endif
-
-#endif
diff --git a/tools/lib/lockdep/lockdep b/tools/lib/lockdep/lockdep
deleted file mode 100755
index 49af9fe19f5b..000000000000
--- a/tools/lib/lockdep/lockdep
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/bash
-
-LD_PRELOAD="./liblockdep.so $LD_PRELOAD" "$@"
diff --git a/tools/lib/lockdep/lockdep.c b/tools/lib/lockdep/lockdep.c
deleted file mode 100644
index 348a9d0fb766..000000000000
--- a/tools/lib/lockdep/lockdep.c
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/lockdep.h>
-#include <stdlib.h>
-
-/* Trivial API wrappers, we don't (yet) have RCU in user-space: */
-#define hlist_for_each_entry_rcu hlist_for_each_entry
-#define hlist_add_head_rcu hlist_add_head
-#define hlist_del_rcu hlist_del
-#define list_for_each_entry_rcu list_for_each_entry
-#define list_add_tail_rcu list_add_tail
-
-u32 prandom_u32(void)
-{
- /* Used only by lock_pin_lock() which is dead code */
- abort();
-}
-
-void print_irqtrace_events(struct task_struct *curr)
-{
- abort();
-}
-
-static struct new_utsname *init_utsname(void)
-{
- static struct new_utsname n = (struct new_utsname) {
- .release = "liblockdep",
- .version = LIBLOCKDEP_VERSION,
- };
-
- return &n;
-}
-
-#include "../../../kernel/locking/lockdep.c"
diff --git a/tools/lib/lockdep/lockdep_internals.h b/tools/lib/lockdep/lockdep_internals.h
deleted file mode 100644
index 29d0c954cc24..000000000000
--- a/tools/lib/lockdep/lockdep_internals.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../kernel/locking/lockdep_internals.h"
diff --git a/tools/lib/lockdep/lockdep_states.h b/tools/lib/lockdep/lockdep_states.h
deleted file mode 100644
index 248d235efda9..000000000000
--- a/tools/lib/lockdep/lockdep_states.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../kernel/locking/lockdep_states.h"
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
deleted file mode 100644
index 8f1adbe887b2..000000000000
--- a/tools/lib/lockdep/preload.c
+++ /dev/null
@@ -1,443 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#define _GNU_SOURCE
-#include <pthread.h>
-#include <stdio.h>
-#include <dlfcn.h>
-#include <stdlib.h>
-#include <sysexits.h>
-#include <unistd.h>
-#include "include/liblockdep/mutex.h"
-#include "../../include/linux/rbtree.h"
-
-/**
- * struct lock_lookup - liblockdep's view of a single unique lock
- * @orig: pointer to the original pthread lock, used for lookups
- * @dep_map: lockdep's dep_map structure
- * @key: lockdep's key structure
- * @node: rb-tree node used to store the lock in a global tree
- * @name: a unique name for the lock
- */
-struct lock_lookup {
- void *orig; /* Original pthread lock, used for lookups */
- struct lockdep_map dep_map; /* Since all locks are dynamic, we need
- * a dep_map and a key for each lock */
- /*
- * Wait, there's no support for key classes? Yup :(
- * Most big projects wrap the pthread api with their own calls to
- * be compatible with different locking methods. This means that
- * "classes" will be brokes since the function that creates all
- * locks will point to a generic locking function instead of the
- * actual code that wants to do the locking.
- */
- struct lock_class_key key;
- struct rb_node node;
-#define LIBLOCKDEP_MAX_LOCK_NAME 22
- char name[LIBLOCKDEP_MAX_LOCK_NAME];
-};
-
-/* This is where we store our locks */
-static struct rb_root locks = RB_ROOT;
-static pthread_rwlock_t locks_rwlock = PTHREAD_RWLOCK_INITIALIZER;
-
-/* pthread mutex API */
-
-#ifdef __GLIBC__
-extern int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
-extern int __pthread_mutex_lock(pthread_mutex_t *mutex);
-extern int __pthread_mutex_trylock(pthread_mutex_t *mutex);
-extern int __pthread_mutex_unlock(pthread_mutex_t *mutex);
-extern int __pthread_mutex_destroy(pthread_mutex_t *mutex);
-#else
-#define __pthread_mutex_init NULL
-#define __pthread_mutex_lock NULL
-#define __pthread_mutex_trylock NULL
-#define __pthread_mutex_unlock NULL
-#define __pthread_mutex_destroy NULL
-#endif
-static int (*ll_pthread_mutex_init)(pthread_mutex_t *mutex,
- const pthread_mutexattr_t *attr) = __pthread_mutex_init;
-static int (*ll_pthread_mutex_lock)(pthread_mutex_t *mutex) = __pthread_mutex_lock;
-static int (*ll_pthread_mutex_trylock)(pthread_mutex_t *mutex) = __pthread_mutex_trylock;
-static int (*ll_pthread_mutex_unlock)(pthread_mutex_t *mutex) = __pthread_mutex_unlock;
-static int (*ll_pthread_mutex_destroy)(pthread_mutex_t *mutex) = __pthread_mutex_destroy;
-
-/* pthread rwlock API */
-
-#ifdef __GLIBC__
-extern int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
-extern int __pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
-extern int __pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
-#else
-#define __pthread_rwlock_init NULL
-#define __pthread_rwlock_destroy NULL
-#define __pthread_rwlock_wrlock NULL
-#define __pthread_rwlock_trywrlock NULL
-#define __pthread_rwlock_rdlock NULL
-#define __pthread_rwlock_tryrdlock NULL
-#define __pthread_rwlock_unlock NULL
-#endif
-
-static int (*ll_pthread_rwlock_init)(pthread_rwlock_t *rwlock,
- const pthread_rwlockattr_t *attr) = __pthread_rwlock_init;
-static int (*ll_pthread_rwlock_destroy)(pthread_rwlock_t *rwlock) = __pthread_rwlock_destroy;
-static int (*ll_pthread_rwlock_rdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_rdlock;
-static int (*ll_pthread_rwlock_tryrdlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_tryrdlock;
-static int (*ll_pthread_rwlock_trywrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_trywrlock;
-static int (*ll_pthread_rwlock_wrlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_wrlock;
-static int (*ll_pthread_rwlock_unlock)(pthread_rwlock_t *rwlock) = __pthread_rwlock_unlock;
-
-enum { none, prepare, done, } __init_state;
-static void init_preload(void);
-static void try_init_preload(void)
-{
- if (__init_state != done)
- init_preload();
-}
-
-static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent)
-{
- struct rb_node **node = &locks.rb_node;
- struct lock_lookup *l;
-
- *parent = NULL;
-
- while (*node) {
- l = rb_entry(*node, struct lock_lookup, node);
-
- *parent = *node;
- if (lock < l->orig)
- node = &l->node.rb_left;
- else if (lock > l->orig)
- node = &l->node.rb_right;
- else
- return node;
- }
-
- return node;
-}
-
-#ifndef LIBLOCKDEP_STATIC_ENTRIES
-#define LIBLOCKDEP_STATIC_ENTRIES 1024
-#endif
-
-static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES];
-static int __locks_nr;
-
-static inline bool is_static_lock(struct lock_lookup *lock)
-{
- return lock >= __locks && lock < __locks + ARRAY_SIZE(__locks);
-}
-
-static struct lock_lookup *alloc_lock(void)
-{
- if (__init_state != done) {
- /*
- * Some programs attempt to initialize and use locks in their
- * allocation path. This means that a call to malloc() would
- * result in locks being initialized and locked.
- *
- * Why is it an issue for us? dlsym() below will try allocating
- * to give us the original function. Since this allocation will
- * result in a locking operations, we have to let pthread deal
- * with it, but we can't! we don't have the pointer to the
- * original API since we're inside dlsym() trying to get it
- */
-
- int idx = __locks_nr++;
- if (idx >= ARRAY_SIZE(__locks)) {
- dprintf(STDERR_FILENO,
- "LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
- exit(EX_UNAVAILABLE);
- }
- return __locks + idx;
- }
-
- return malloc(sizeof(struct lock_lookup));
-}
-
-static inline void free_lock(struct lock_lookup *lock)
-{
- if (likely(!is_static_lock(lock)))
- free(lock);
-}
-
-/**
- * __get_lock - find or create a lock instance
- * @lock: pointer to a pthread lock function
- *
- * Try to find an existing lock in the rbtree using the provided pointer. If
- * one wasn't found - create it.
- */
-static struct lock_lookup *__get_lock(void *lock)
-{
- struct rb_node **node, *parent;
- struct lock_lookup *l;
-
- ll_pthread_rwlock_rdlock(&locks_rwlock);
- node = __get_lock_node(lock, &parent);
- ll_pthread_rwlock_unlock(&locks_rwlock);
- if (*node) {
- return rb_entry(*node, struct lock_lookup, node);
- }
-
- /* We didn't find the lock, let's create it */
- l = alloc_lock();
- if (l == NULL)
- return NULL;
-
- l->orig = lock;
- /*
- * Currently the name of the lock is the ptr value of the pthread lock,
- * while not optimal, it makes debugging a bit easier.
- *
- * TODO: Get the real name of the lock using libdwarf
- */
- sprintf(l->name, "%p", lock);
- lockdep_init_map(&l->dep_map, l->name, &l->key, 0);
-
- ll_pthread_rwlock_wrlock(&locks_rwlock);
- /* This might have changed since the last time we fetched it */
- node = __get_lock_node(lock, &parent);
- rb_link_node(&l->node, parent, node);
- rb_insert_color(&l->node, &locks);
- ll_pthread_rwlock_unlock(&locks_rwlock);
-
- return l;
-}
-
-static void __del_lock(struct lock_lookup *lock)
-{
- ll_pthread_rwlock_wrlock(&locks_rwlock);
- rb_erase(&lock->node, &locks);
- ll_pthread_rwlock_unlock(&locks_rwlock);
- free_lock(lock);
-}
-
-int pthread_mutex_init(pthread_mutex_t *mutex,
- const pthread_mutexattr_t *attr)
-{
- int r;
-
- /*
- * We keep trying to init our preload module because there might be
- * code in init sections that tries to touch locks before we are
- * initialized, in that case we'll need to manually call preload
- * to get us going.
- *
- * Funny enough, kernel's lockdep had the same issue, and used
- * (almost) the same solution. See look_up_lock_class() in
- * kernel/locking/lockdep.c for details.
- */
- try_init_preload();
-
- r = ll_pthread_mutex_init(mutex, attr);
- if (r == 0)
- /*
- * We do a dummy initialization here so that lockdep could
- * warn us if something fishy is going on - such as
- * initializing a held lock.
- */
- __get_lock(mutex);
-
- return r;
-}
-
-int pthread_mutex_lock(pthread_mutex_t *mutex)
-{
- int r;
-
- try_init_preload();
-
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
- (unsigned long)_RET_IP_);
- /*
- * Here's the thing with pthread mutexes: unlike the kernel variant,
- * they can fail.
- *
- * This means that the behaviour here is a bit different from what's
- * going on in the kernel: there we just tell lockdep that we took the
- * lock before actually taking it, but here we must deal with the case
- * that locking failed.
- *
- * To do that we'll "release" the lock if locking failed - this way
- * we'll get lockdep doing the correct checks when we try to take
- * the lock, and if that fails - we'll be back to the correct
- * state by releasing it.
- */
- r = ll_pthread_mutex_lock(mutex);
- if (r)
- lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_mutex_trylock(pthread_mutex_t *mutex)
-{
- int r;
-
- try_init_preload();
-
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_mutex_trylock(mutex);
- if (r)
- lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_mutex_unlock(pthread_mutex_t *mutex)
-{
- int r;
-
- try_init_preload();
-
- lock_release(&__get_lock(mutex)->dep_map, (unsigned long)_RET_IP_);
- /*
- * Just like taking a lock, only in reverse!
- *
- * If we fail releasing the lock, tell lockdep we're holding it again.
- */
- r = ll_pthread_mutex_unlock(mutex);
- if (r)
- lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_mutex_destroy(pthread_mutex_t *mutex)
-{
- try_init_preload();
-
- /*
- * Let's see if we're releasing a lock that's held.
- *
- * TODO: Hook into free() and add that check there as well.
- */
- debug_check_no_locks_freed(mutex, sizeof(*mutex));
- __del_lock(__get_lock(mutex));
- return ll_pthread_mutex_destroy(mutex);
-}
-
-/* This is the rwlock part, very similar to what happened with mutex above */
-int pthread_rwlock_init(pthread_rwlock_t *rwlock,
- const pthread_rwlockattr_t *attr)
-{
- int r;
-
- try_init_preload();
-
- r = ll_pthread_rwlock_init(rwlock, attr);
- if (r == 0)
- __get_lock(rwlock);
-
- return r;
-}
-
-int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
-{
- try_init_preload();
-
- debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
- __del_lock(__get_lock(rwlock));
- return ll_pthread_rwlock_destroy(rwlock);
-}
-
-int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_rdlock(rwlock);
- if (r)
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_tryrdlock(rwlock);
- if (r)
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_trywrlock(rwlock);
- if (r)
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_wrlock(rwlock);
- if (r)
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
-{
- int r;
-
- init_preload();
-
- lock_release(&__get_lock(rwlock)->dep_map, (unsigned long)_RET_IP_);
- r = ll_pthread_rwlock_unlock(rwlock);
- if (r)
- lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
-
- return r;
-}
-
-__attribute__((constructor)) static void init_preload(void)
-{
- if (__init_state == done)
- return;
-
-#ifndef __GLIBC__
- __init_state = prepare;
-
- ll_pthread_mutex_init = dlsym(RTLD_NEXT, "pthread_mutex_init");
- ll_pthread_mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
- ll_pthread_mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
- ll_pthread_mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
- ll_pthread_mutex_destroy = dlsym(RTLD_NEXT, "pthread_mutex_destroy");
-
- ll_pthread_rwlock_init = dlsym(RTLD_NEXT, "pthread_rwlock_init");
- ll_pthread_rwlock_destroy = dlsym(RTLD_NEXT, "pthread_rwlock_destroy");
- ll_pthread_rwlock_rdlock = dlsym(RTLD_NEXT, "pthread_rwlock_rdlock");
- ll_pthread_rwlock_tryrdlock = dlsym(RTLD_NEXT, "pthread_rwlock_tryrdlock");
- ll_pthread_rwlock_wrlock = dlsym(RTLD_NEXT, "pthread_rwlock_wrlock");
- ll_pthread_rwlock_trywrlock = dlsym(RTLD_NEXT, "pthread_rwlock_trywrlock");
- ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
-#endif
-
- __init_state = done;
-}
diff --git a/tools/lib/lockdep/rbtree.c b/tools/lib/lockdep/rbtree.c
deleted file mode 100644
index 297c304571f8..000000000000
--- a/tools/lib/lockdep/rbtree.c
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../lib/rbtree.c"
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
deleted file mode 100755
index 11f425662b43..000000000000
--- a/tools/lib/lockdep/run_tests.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#! /bin/bash
-# SPDX-License-Identifier: GPL-2.0
-
-if ! make >/dev/null; then
- echo "Building liblockdep failed."
- echo "FAILED!"
- exit 1
-fi
-
-find tests -name '*.c' | sort | while read -r i; do
- testname=$(basename "$i" .c)
- echo -ne "$testname... "
- if gcc -o "tests/$testname" -pthread "$i" liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &&
- timeout 1 "tests/$testname" 2>&1 | /bin/bash "tests/${testname}.sh"; then
- echo "PASSED!"
- else
- echo "FAILED!"
- fi
- rm -f "tests/$testname"
-done
-
-find tests -name '*.c' | sort | while read -r i; do
- testname=$(basename "$i" .c)
- echo -ne "(PRELOAD) $testname... "
- if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
- timeout 1 ./lockdep "tests/$testname" 2>&1 |
- /bin/bash "tests/${testname}.sh"; then
- echo "PASSED!"
- else
- echo "FAILED!"
- fi
- rm -f "tests/$testname"
-done
-
-find tests -name '*.c' | sort | while read -r i; do
- testname=$(basename "$i" .c)
- echo -ne "(PRELOAD + Valgrind) $testname... "
- if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
- { timeout 10 valgrind --read-var-info=yes ./lockdep "./tests/$testname" >& "tests/${testname}.vg.out"; true; } &&
- /bin/bash "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
- ! grep -Eq '(^==[0-9]*== (Invalid |Uninitialised ))|Mismatched free|Source and destination overlap| UME ' "tests/${testname}.vg.out"; then
- echo "PASSED!"
- else
- echo "FAILED!"
- fi
- rm -f "tests/$testname"
-done
diff --git a/tools/lib/lockdep/tests/AA.c b/tools/lib/lockdep/tests/AA.c
deleted file mode 100644
index 63c7ce97bda3..000000000000
--- a/tools/lib/lockdep/tests/AA.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-
-int main(void)
-{
- pthread_mutex_t a;
-
- pthread_mutex_init(&a, NULL);
-
- pthread_mutex_lock(&a);
- pthread_mutex_lock(&a);
-
- return 0;
-}
diff --git a/tools/lib/lockdep/tests/AA.sh b/tools/lib/lockdep/tests/AA.sh
deleted file mode 100644
index f39b32865074..000000000000
--- a/tools/lib/lockdep/tests/AA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible recursive locking detected'
diff --git a/tools/lib/lockdep/tests/ABA.c b/tools/lib/lockdep/tests/ABA.c
deleted file mode 100644
index efa39b23f05d..000000000000
--- a/tools/lib/lockdep/tests/ABA.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-
-void main(void)
-{
- pthread_mutex_t a, b;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
-
- pthread_mutex_lock(&a);
- pthread_mutex_lock(&b);
- pthread_mutex_lock(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABA.sh b/tools/lib/lockdep/tests/ABA.sh
deleted file mode 100644
index f39b32865074..000000000000
--- a/tools/lib/lockdep/tests/ABA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible recursive locking detected'
diff --git a/tools/lib/lockdep/tests/ABBA.c b/tools/lib/lockdep/tests/ABBA.c
deleted file mode 100644
index 543789bc3e37..000000000000
--- a/tools/lib/lockdep/tests/ABBA.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(b, a);
-
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(b, a);
-
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABBA.sh b/tools/lib/lockdep/tests/ABBA.sh
deleted file mode 100644
index fc31c607a5a8..000000000000
--- a/tools/lib/lockdep/tests/ABBA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABBA_2threads.c b/tools/lib/lockdep/tests/ABBA_2threads.c
deleted file mode 100644
index 39325ef8a2ac..000000000000
--- a/tools/lib/lockdep/tests/ABBA_2threads.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include <pthread.h>
-
-pthread_mutex_t a = PTHREAD_MUTEX_INITIALIZER;
-pthread_mutex_t b = PTHREAD_MUTEX_INITIALIZER;
-pthread_barrier_t bar;
-
-void *ba_lock(void *arg)
-{
- int ret, i;
-
- pthread_mutex_lock(&b);
-
- if (pthread_barrier_wait(&bar) == PTHREAD_BARRIER_SERIAL_THREAD)
- pthread_barrier_destroy(&bar);
-
- pthread_mutex_lock(&a);
-
- pthread_mutex_unlock(&a);
- pthread_mutex_unlock(&b);
-}
-
-int main(void)
-{
- pthread_t t;
-
- pthread_barrier_init(&bar, NULL, 2);
-
- if (pthread_create(&t, NULL, ba_lock, NULL)) {
- fprintf(stderr, "pthread_create() failed\n");
- return 1;
- }
- pthread_mutex_lock(&a);
-
- if (pthread_barrier_wait(&bar) == PTHREAD_BARRIER_SERIAL_THREAD)
- pthread_barrier_destroy(&bar);
-
- pthread_mutex_lock(&b);
-
- pthread_mutex_unlock(&b);
- pthread_mutex_unlock(&a);
-
- pthread_join(t, NULL);
-
- return 0;
-}
diff --git a/tools/lib/lockdep/tests/ABBA_2threads.sh b/tools/lib/lockdep/tests/ABBA_2threads.sh
deleted file mode 100644
index fc31c607a5a8..000000000000
--- a/tools/lib/lockdep/tests/ABBA_2threads.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABBCCA.c b/tools/lib/lockdep/tests/ABBCCA.c
deleted file mode 100644
index 48446129d496..000000000000
--- a/tools/lib/lockdep/tests/ABBCCA.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(b, c);
- LOCK_UNLOCK_2(c, a);
-
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABBCCA.sh b/tools/lib/lockdep/tests/ABBCCA.sh
deleted file mode 100644
index fc31c607a5a8..000000000000
--- a/tools/lib/lockdep/tests/ABBCCA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABBCCDDA.c b/tools/lib/lockdep/tests/ABBCCDDA.c
deleted file mode 100644
index 3570bf7b3804..000000000000
--- a/tools/lib/lockdep/tests/ABBCCDDA.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c, d;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
- pthread_mutex_init(&d, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(b, c);
- LOCK_UNLOCK_2(c, d);
- LOCK_UNLOCK_2(d, a);
-
- pthread_mutex_destroy(&d);
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABBCCDDA.sh b/tools/lib/lockdep/tests/ABBCCDDA.sh
deleted file mode 100644
index fc31c607a5a8..000000000000
--- a/tools/lib/lockdep/tests/ABBCCDDA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABCABC.c b/tools/lib/lockdep/tests/ABCABC.c
deleted file mode 100644
index a1c4659894cd..000000000000
--- a/tools/lib/lockdep/tests/ABCABC.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(c, a);
- LOCK_UNLOCK_2(b, c);
-
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABCABC.sh b/tools/lib/lockdep/tests/ABCABC.sh
deleted file mode 100644
index fc31c607a5a8..000000000000
--- a/tools/lib/lockdep/tests/ABCABC.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABCDBCDA.c b/tools/lib/lockdep/tests/ABCDBCDA.c
deleted file mode 100644
index 335af1c90ab5..000000000000
--- a/tools/lib/lockdep/tests/ABCDBCDA.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c, d;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
- pthread_mutex_init(&d, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(c, d);
- LOCK_UNLOCK_2(b, c);
- LOCK_UNLOCK_2(d, a);
-
- pthread_mutex_destroy(&d);
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABCDBCDA.sh b/tools/lib/lockdep/tests/ABCDBCDA.sh
deleted file mode 100644
index fc31c607a5a8..000000000000
--- a/tools/lib/lockdep/tests/ABCDBCDA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/ABCDBDDA.c b/tools/lib/lockdep/tests/ABCDBDDA.c
deleted file mode 100644
index 3c5972863049..000000000000
--- a/tools/lib/lockdep/tests/ABCDBDDA.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-#include "common.h"
-
-void main(void)
-{
- pthread_mutex_t a, b, c, d;
-
- pthread_mutex_init(&a, NULL);
- pthread_mutex_init(&b, NULL);
- pthread_mutex_init(&c, NULL);
- pthread_mutex_init(&d, NULL);
-
- LOCK_UNLOCK_2(a, b);
- LOCK_UNLOCK_2(c, d);
- LOCK_UNLOCK_2(b, d);
- LOCK_UNLOCK_2(d, a);
-
- pthread_mutex_destroy(&d);
- pthread_mutex_destroy(&c);
- pthread_mutex_destroy(&b);
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/ABCDBDDA.sh b/tools/lib/lockdep/tests/ABCDBDDA.sh
deleted file mode 100644
index fc31c607a5a8..000000000000
--- a/tools/lib/lockdep/tests/ABCDBDDA.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible circular locking dependency detected'
diff --git a/tools/lib/lockdep/tests/WW.c b/tools/lib/lockdep/tests/WW.c
deleted file mode 100644
index eee88df7fc41..000000000000
--- a/tools/lib/lockdep/tests/WW.c
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/rwlock.h>
-
-void main(void)
-{
- pthread_rwlock_t a, b;
-
- pthread_rwlock_init(&a, NULL);
- pthread_rwlock_init(&b, NULL);
-
- pthread_rwlock_wrlock(&a);
- pthread_rwlock_rdlock(&b);
- pthread_rwlock_wrlock(&a);
-}
diff --git a/tools/lib/lockdep/tests/WW.sh b/tools/lib/lockdep/tests/WW.sh
deleted file mode 100644
index f39b32865074..000000000000
--- a/tools/lib/lockdep/tests/WW.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: possible recursive locking detected'
diff --git a/tools/lib/lockdep/tests/common.h b/tools/lib/lockdep/tests/common.h
deleted file mode 100644
index 3026c29ccb5c..000000000000
--- a/tools/lib/lockdep/tests/common.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LIBLOCKDEP_TEST_COMMON_H
-#define _LIBLOCKDEP_TEST_COMMON_H
-
-#define LOCK_UNLOCK_2(a, b) \
- do { \
- pthread_mutex_lock(&(a)); \
- pthread_mutex_lock(&(b)); \
- pthread_mutex_unlock(&(b)); \
- pthread_mutex_unlock(&(a)); \
- } while(0)
-
-#endif
diff --git a/tools/lib/lockdep/tests/unlock_balance.c b/tools/lib/lockdep/tests/unlock_balance.c
deleted file mode 100644
index dba25064b50a..000000000000
--- a/tools/lib/lockdep/tests/unlock_balance.c
+++ /dev/null
@@ -1,15 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <liblockdep/mutex.h>
-
-void main(void)
-{
- pthread_mutex_t a;
-
- pthread_mutex_init(&a, NULL);
-
- pthread_mutex_lock(&a);
- pthread_mutex_unlock(&a);
- pthread_mutex_unlock(&a);
-
- pthread_mutex_destroy(&a);
-}
diff --git a/tools/lib/lockdep/tests/unlock_balance.sh b/tools/lib/lockdep/tests/unlock_balance.sh
deleted file mode 100644
index c6e3952303fe..000000000000
--- a/tools/lib/lockdep/tests/unlock_balance.sh
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/bin/bash
-grep -q 'WARNING: bad unlock balance detected'
diff --git a/tools/lib/perf/Build b/tools/lib/perf/Build
index 2ef9a4ec6d99..e8f5b7fb9973 100644
--- a/tools/lib/perf/Build
+++ b/tools/lib/perf/Build
@@ -11,3 +11,5 @@ libperf-y += lib.o
$(OUTPUT)zalloc.o: ../../lib/zalloc.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
+
+tests-y += tests/
diff --git a/tools/lib/perf/Documentation/libperf-counting.txt b/tools/lib/perf/Documentation/libperf-counting.txt
index cae9757f49c1..8b75efcd67ce 100644
--- a/tools/lib/perf/Documentation/libperf-counting.txt
+++ b/tools/lib/perf/Documentation/libperf-counting.txt
@@ -7,13 +7,13 @@ libperf-counting - counting interface
DESCRIPTION
-----------
-The counting interface provides API to meassure and get count for specific perf events.
+The counting interface provides API to measure and get count for specific perf events.
The following test tries to explain count on `counting.c` example.
It is by no means complete guide to counting, but shows libperf basic API for counting.
-The `counting.c` comes with libbperf package and can be compiled and run like:
+The `counting.c` comes with libperf package and can be compiled and run like:
[source,bash]
--
@@ -26,7 +26,8 @@ count 176242, enabled 176242, run 176242
It requires root access, because of the `PERF_COUNT_SW_CPU_CLOCK` event,
which is available only for root.
-The `counting.c` example monitors two events on the current process and displays their count, in a nutshel it:
+The `counting.c` example monitors two events on the current process and displays
+their count, in a nutshell it:
* creates events
* adds them to the event list
@@ -152,7 +153,7 @@ Configure event list with the thread map and open events:
--
Both events are created as disabled (note the `disabled = 1` assignment above),
-so we need to enable the whole list explicitely (both events).
+so we need to enable the whole list explicitly (both events).
From this moment events are counting and we can do our workload.
@@ -167,7 +168,8 @@ When we are done we disable the events list.
79 perf_evlist__disable(evlist);
--
-Now we need to get the counts from events, following code iterates throught the events list and read counts:
+Now we need to get the counts from events, following code iterates through the
+events list and read counts:
[source,c]
--
@@ -178,7 +180,7 @@ Now we need to get the counts from events, following code iterates throught the
85 }
--
-And finaly cleanup.
+And finally cleanup.
We close the whole events list (both events) and remove it together with the threads map:
diff --git a/tools/lib/perf/Documentation/libperf-sampling.txt b/tools/lib/perf/Documentation/libperf-sampling.txt
index d71a7b4fcf5f..d6ca24f6ef78 100644
--- a/tools/lib/perf/Documentation/libperf-sampling.txt
+++ b/tools/lib/perf/Documentation/libperf-sampling.txt
@@ -8,13 +8,13 @@ libperf-sampling - sampling interface
DESCRIPTION
-----------
-The sampling interface provides API to meassure and get count for specific perf events.
+The sampling interface provides API to measure and get count for specific perf events.
The following test tries to explain count on `sampling.c` example.
It is by no means complete guide to sampling, but shows libperf basic API for sampling.
-The `sampling.c` comes with libbperf package and can be compiled and run like:
+The `sampling.c` comes with libperf package and can be compiled and run like:
[source,bash]
--
@@ -33,7 +33,8 @@ cpu 0, pid 4465, tid 4470, ip 7f84fe0ebebf, period 176
It requires root access, because it uses hardware cycles event.
-The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a nutshel it:
+The `sampling.c` example profiles/samples all CPUs with hardware cycles, in a
+nutshell it:
- creates events
- adds them to the event list
@@ -90,7 +91,7 @@ Once the setup is complete we start by defining cycles event using the `struct p
36 };
--
-Next step is to prepare cpus map.
+Next step is to prepare CPUs map.
In this case we will monitor all the available CPUs:
@@ -152,7 +153,7 @@ Once the events list is open, we can create memory maps AKA perf ring buffers:
--
The event is created as disabled (note the `disabled = 1` assignment above),
-so we need to enable the events list explicitely.
+so we need to enable the events list explicitly.
From this moment the cycles event is sampling.
@@ -212,7 +213,7 @@ Each sample needs to get parsed:
106 cpu, pid, tid, ip, period);
--
-And finaly cleanup.
+And finally cleanup.
We close the whole events list (both events) and remove it together with the threads map:
diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt
index 5a6bb512789d..a8f1a237931b 100644
--- a/tools/lib/perf/Documentation/libperf.txt
+++ b/tools/lib/perf/Documentation/libperf.txt
@@ -29,7 +29,7 @@ SYNOPSIS
void libperf_init(libperf_print_fn_t fn);
--
-*API to handle cpu maps:*
+*API to handle CPU maps:*
[source,c]
--
@@ -48,6 +48,7 @@ SYNOPSIS
int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
bool perf_cpu_map__empty(const struct perf_cpu_map *map);
int perf_cpu_map__max(struct perf_cpu_map *map);
+ bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu);
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus)
--
@@ -61,11 +62,12 @@ SYNOPSIS
struct perf_thread_map;
struct perf_thread_map *perf_thread_map__new_dummy(void);
+ struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
- void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid);
- char *perf_thread_map__comm(struct perf_thread_map *map, int thread);
+ void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid);
+ char *perf_thread_map__comm(struct perf_thread_map *map, int idx);
int perf_thread_map__nr(struct perf_thread_map *threads);
- pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread);
+ pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx);
struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
void perf_thread_map__put(struct perf_thread_map *map);
@@ -135,13 +137,16 @@ SYNOPSIS
int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
void perf_evsel__close(struct perf_evsel *evsel);
- void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu);
- int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+ void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+ int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
+ void perf_evsel__munmap(struct perf_evsel *evsel);
+ void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread);
+ int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count);
int perf_evsel__enable(struct perf_evsel *evsel);
- int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu);
+ int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
int perf_evsel__disable(struct perf_evsel *evsel);
- int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu);
+ int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
@@ -217,7 +222,7 @@ Following objects are key to the libperf interface:
[horizontal]
-struct perf_cpu_map:: Provides a cpu list abstraction.
+struct perf_cpu_map:: Provides a CPU list abstraction.
struct perf_thread_map:: Provides a thread list abstraction.
diff --git a/tools/lib/perf/Makefile b/tools/lib/perf/Makefile
index 3718d65cffac..3a9b2140aa04 100644
--- a/tools/lib/perf/Makefile
+++ b/tools/lib/perf/Makefile
@@ -52,6 +52,8 @@ else
Q = @
endif
+TEST_ARGS := $(if $(V),-v)
+
# Set compile option CFLAGS
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
@@ -136,12 +138,30 @@ all: fixdep
clean: $(LIBAPI)-clean
$(call QUIET_CLEAN, libperf) $(RM) $(LIBPERF_A) \
- *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd LIBPERF-CFLAGS $(LIBPERF_PC)
- $(Q)$(MAKE) -C tests clean
+ *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBPERF_VERSION) .*.d .*.cmd tests/*.o LIBPERF-CFLAGS $(LIBPERF_PC) \
+ $(TESTS_STATIC) $(TESTS_SHARED)
+
+TESTS_IN = tests-in.o
+
+TESTS_STATIC = $(OUTPUT)tests-static
+TESTS_SHARED = $(OUTPUT)tests-shared
+
+$(TESTS_IN): FORCE
+ $(Q)$(MAKE) $(build)=tests
+
+$(TESTS_STATIC): $(TESTS_IN) $(LIBPERF_A) $(LIBAPI)
+ $(QUIET_LINK)$(CC) -o $@ $^
+
+$(TESTS_SHARED): $(TESTS_IN) $(LIBAPI)
+ $(QUIET_LINK)$(CC) -o $@ -L$(or $(OUTPUT),.) $^ -lperf
-tests: libs
- $(Q)$(MAKE) -C tests
- $(Q)$(MAKE) -C tests run
+make-tests: libs $(TESTS_SHARED) $(TESTS_STATIC)
+
+tests: make-tests
+ @echo "running static:"
+ @./$(TESTS_STATIC) $(TEST_ARGS)
+ @echo "running dynamic:"
+ @LD_LIBRARY_PATH=. ./$(TESTS_SHARED) $(TEST_ARGS)
$(LIBPERF_PC):
$(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
@@ -156,10 +176,10 @@ define do_install_mkdir
endef
define do_install
- if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
- fi; \
- $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
+ if [ ! -d '$2' ]; then \
+ $(INSTALL) -d -m 755 '$2'; \
+ fi; \
+ $(INSTALL) $1 $(if $3,-m $3,) '$2'
endef
install_lib: libs
@@ -167,19 +187,28 @@ install_lib: libs
$(call do_install_mkdir,$(libdir_SQ)); \
cp -fpR $(LIBPERF_ALL) $(DESTDIR)$(libdir_SQ)
-install_headers:
- $(call QUIET_INSTALL, headers) \
- $(call do_install,include/perf/core.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/cpumap.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/threadmap.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/evlist.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/evsel.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/event.h,$(prefix)/include/perf,644); \
- $(call do_install,include/perf/mmap.h,$(prefix)/include/perf,644);
+HDRS := bpf_perf.h core.h cpumap.h threadmap.h evlist.h evsel.h event.h mmap.h
+INTERNAL_HDRS := cpumap.h evlist.h evsel.h lib.h mmap.h rc_check.h threadmap.h xyarray.h
+
+INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/perf
+INSTALL_HDRS := $(addprefix $(INSTALL_HDRS_PFX)/, $(HDRS))
+INSTALL_INTERNAL_HDRS_PFX := $(DESTDIR)$(prefix)/include/internal
+INSTALL_INTERNAL_HDRS := $(addprefix $(INSTALL_INTERNAL_HDRS_PFX)/, $(INTERNAL_HDRS))
+
+$(INSTALL_HDRS): $(INSTALL_HDRS_PFX)/%.h: include/perf/%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_HDRS_PFX)/,644)
+
+$(INSTALL_INTERNAL_HDRS): $(INSTALL_INTERNAL_HDRS_PFX)/%.h: include/internal/%.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_INTERNAL_HDRS_PFX)/,644)
+
+install_headers: $(INSTALL_HDRS) $(INSTALL_INTERNAL_HDRS)
+ $(call QUIET_INSTALL, libperf_headers)
install_pkgconfig: $(LIBPERF_PC)
$(call QUIET_INSTALL, $(LIBPERF_PC)) \
- $(call do_install,$(LIBPERF_PC),$(libdir_SQ)/pkgconfig,644)
+ $(call do_install,$(LIBPERF_PC),$(DESTDIR_SQ)$(libdir_SQ)/pkgconfig,644)
install_doc:
$(Q)$(MAKE) -C Documentation install-man install-html install-examples
diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c
index f93f4e703e4c..1229b18bcdb1 100644
--- a/tools/lib/perf/cpumap.c
+++ b/tools/lib/perf/cpumap.c
@@ -10,15 +10,29 @@
#include <ctype.h>
#include <limits.h>
-struct perf_cpu_map *perf_cpu_map__dummy_new(void)
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus)
{
- struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
+ RC_CHK_ACCESS(map)->nr = nr_cpus;
+}
- if (cpus != NULL) {
- cpus->nr = 1;
- cpus->map[0] = -1;
+struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus)
+{
+ RC_STRUCT(perf_cpu_map) *cpus = malloc(sizeof(*cpus) + sizeof(struct perf_cpu) * nr_cpus);
+ struct perf_cpu_map *result;
+
+ if (ADD_RC_CHK(result, cpus)) {
+ cpus->nr = nr_cpus;
refcount_set(&cpus->refcnt, 1);
}
+ return result;
+}
+
+struct perf_cpu_map *perf_cpu_map__dummy_new(void)
+{
+ struct perf_cpu_map *cpus = perf_cpu_map__alloc(1);
+
+ if (cpus)
+ RC_CHK_ACCESS(cpus)->map[0].cpu = -1;
return cpus;
}
@@ -26,23 +40,30 @@ struct perf_cpu_map *perf_cpu_map__dummy_new(void)
static void cpu_map__delete(struct perf_cpu_map *map)
{
if (map) {
- WARN_ONCE(refcount_read(&map->refcnt) != 0,
+ WARN_ONCE(refcount_read(perf_cpu_map__refcnt(map)) != 0,
"cpu_map refcnt unbalanced\n");
- free(map);
+ RC_CHK_FREE(map);
}
}
struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map)
{
- if (map)
- refcount_inc(&map->refcnt);
- return map;
+ struct perf_cpu_map *result;
+
+ if (RC_CHK_GET(result, map))
+ refcount_inc(perf_cpu_map__refcnt(map));
+
+ return result;
}
void perf_cpu_map__put(struct perf_cpu_map *map)
{
- if (map && refcount_dec_and_test(&map->refcnt))
- cpu_map__delete(map);
+ if (map) {
+ if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
+ cpu_map__delete(map);
+ else
+ RC_CHK_PUT(map);
+ }
}
static struct perf_cpu_map *cpu_map__default_new(void)
@@ -54,45 +75,48 @@ static struct perf_cpu_map *cpu_map__default_new(void)
if (nr_cpus < 0)
return NULL;
- cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
+ cpus = perf_cpu_map__alloc(nr_cpus);
if (cpus != NULL) {
int i;
for (i = 0; i < nr_cpus; ++i)
- cpus->map[i] = i;
-
- cpus->nr = nr_cpus;
- refcount_set(&cpus->refcnt, 1);
+ RC_CHK_ACCESS(cpus)->map[i].cpu = i;
}
return cpus;
}
-static int cmp_int(const void *a, const void *b)
+struct perf_cpu_map *perf_cpu_map__default_new(void)
+{
+ return cpu_map__default_new();
+}
+
+
+static int cmp_cpu(const void *a, const void *b)
{
- return *(const int *)a - *(const int*)b;
+ const struct perf_cpu *cpu_a = a, *cpu_b = b;
+
+ return cpu_a->cpu - cpu_b->cpu;
}
-static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
+static struct perf_cpu_map *cpu_map__trim_new(int nr_cpus, const struct perf_cpu *tmp_cpus)
{
- size_t payload_size = nr_cpus * sizeof(int);
- struct perf_cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
+ size_t payload_size = nr_cpus * sizeof(struct perf_cpu);
+ struct perf_cpu_map *cpus = perf_cpu_map__alloc(nr_cpus);
int i, j;
if (cpus != NULL) {
- memcpy(cpus->map, tmp_cpus, payload_size);
- qsort(cpus->map, nr_cpus, sizeof(int), cmp_int);
+ memcpy(RC_CHK_ACCESS(cpus)->map, tmp_cpus, payload_size);
+ qsort(RC_CHK_ACCESS(cpus)->map, nr_cpus, sizeof(struct perf_cpu), cmp_cpu);
/* Remove dups */
j = 0;
for (i = 0; i < nr_cpus; i++) {
- if (i == 0 || cpus->map[i] != cpus->map[i - 1])
- cpus->map[j++] = cpus->map[i];
+ if (i == 0 || RC_CHK_ACCESS(cpus)->map[i].cpu != RC_CHK_ACCESS(cpus)->map[i - 1].cpu)
+ RC_CHK_ACCESS(cpus)->map[j++].cpu = RC_CHK_ACCESS(cpus)->map[i].cpu;
}
- cpus->nr = j;
+ perf_cpu_map__set_nr(cpus, j);
assert(j <= nr_cpus);
- refcount_set(&cpus->refcnt, 1);
}
-
return cpus;
}
@@ -100,7 +124,7 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file)
{
struct perf_cpu_map *cpus = NULL;
int nr_cpus = 0;
- int *tmp_cpus = NULL, *tmp;
+ struct perf_cpu *tmp_cpus = NULL, *tmp;
int max_entries = 0;
int n, cpu, prev;
char sep;
@@ -119,24 +143,24 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file)
if (new_max >= max_entries) {
max_entries = new_max + MAX_NR_CPUS / 2;
- tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
if (tmp == NULL)
goto out_free_tmp;
tmp_cpus = tmp;
}
while (++prev < cpu)
- tmp_cpus[nr_cpus++] = prev;
+ tmp_cpus[nr_cpus++].cpu = prev;
}
if (nr_cpus == max_entries) {
max_entries += MAX_NR_CPUS;
- tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
if (tmp == NULL)
goto out_free_tmp;
tmp_cpus = tmp;
}
- tmp_cpus[nr_cpus++] = cpu;
+ tmp_cpus[nr_cpus++].cpu = cpu;
if (n == 2 && sep == '-')
prev = cpu;
else
@@ -174,7 +198,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
unsigned long start_cpu, end_cpu = 0;
char *p = NULL;
int i, nr_cpus = 0;
- int *tmp_cpus = NULL, *tmp;
+ struct perf_cpu *tmp_cpus = NULL, *tmp;
int max_entries = 0;
if (!cpu_list)
@@ -215,17 +239,17 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list)
for (; start_cpu <= end_cpu; start_cpu++) {
/* check for duplicates */
for (i = 0; i < nr_cpus; i++)
- if (tmp_cpus[i] == (int)start_cpu)
+ if (tmp_cpus[i].cpu == (int)start_cpu)
goto invalid;
if (nr_cpus == max_entries) {
max_entries += MAX_NR_CPUS;
- tmp = realloc(tmp_cpus, max_entries * sizeof(int));
+ tmp = realloc(tmp_cpus, max_entries * sizeof(struct perf_cpu));
if (tmp == NULL)
goto invalid;
tmp_cpus = tmp;
}
- tmp_cpus[nr_cpus++] = (int)start_cpu;
+ tmp_cpus[nr_cpus++].cpu = (int)start_cpu;
}
if (*p)
++p;
@@ -245,46 +269,86 @@ out:
return cpus;
}
-int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
+struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx)
{
- if (idx < cpus->nr)
- return cpus->map[idx];
+ struct perf_cpu result = {
+ .cpu = -1
+ };
- return -1;
+ if (cpus && idx < RC_CHK_ACCESS(cpus)->nr)
+ return RC_CHK_ACCESS(cpus)->map[idx];
+
+ return result;
}
int perf_cpu_map__nr(const struct perf_cpu_map *cpus)
{
- return cpus ? cpus->nr : 1;
+ return cpus ? RC_CHK_ACCESS(cpus)->nr : 1;
}
bool perf_cpu_map__empty(const struct perf_cpu_map *map)
{
- return map ? map->map[0] == -1 : true;
+ return map ? RC_CHK_ACCESS(map)->map[0].cpu == -1 : true;
}
-int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu)
+int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
{
- int i;
+ int low, high;
- for (i = 0; i < cpus->nr; ++i) {
- if (cpus->map[i] == cpu)
- return i;
+ if (!cpus)
+ return -1;
+
+ low = 0;
+ high = RC_CHK_ACCESS(cpus)->nr;
+ while (low < high) {
+ int idx = (low + high) / 2;
+ struct perf_cpu cpu_at_idx = RC_CHK_ACCESS(cpus)->map[idx];
+
+ if (cpu_at_idx.cpu == cpu.cpu)
+ return idx;
+
+ if (cpu_at_idx.cpu > cpu.cpu)
+ high = idx;
+ else
+ low = idx + 1;
}
return -1;
}
-int perf_cpu_map__max(struct perf_cpu_map *map)
+bool perf_cpu_map__has(const struct perf_cpu_map *cpus, struct perf_cpu cpu)
+{
+ return perf_cpu_map__idx(cpus, cpu) != -1;
+}
+
+struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map)
{
- int i, max = -1;
+ struct perf_cpu result = {
+ .cpu = -1
+ };
- for (i = 0; i < map->nr; i++) {
- if (map->map[i] > max)
- max = map->map[i];
- }
+ // cpu_map__trim_new() qsort()s it, cpu_map__default_new() sorts it as well.
+ return RC_CHK_ACCESS(map)->nr > 0 ? RC_CHK_ACCESS(map)->map[RC_CHK_ACCESS(map)->nr - 1] : result;
+}
- return max;
+/** Is 'b' a subset of 'a'. */
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b)
+{
+ if (a == b || !b)
+ return true;
+ if (!a || RC_CHK_ACCESS(b)->nr > RC_CHK_ACCESS(a)->nr)
+ return false;
+
+ for (int i = 0, j = 0; i < RC_CHK_ACCESS(a)->nr; i++) {
+ if (RC_CHK_ACCESS(a)->map[i].cpu > RC_CHK_ACCESS(b)->map[j].cpu)
+ return false;
+ if (RC_CHK_ACCESS(a)->map[i].cpu == RC_CHK_ACCESS(b)->map[j].cpu) {
+ j++;
+ if (j == RC_CHK_ACCESS(b)->nr)
+ return true;
+ }
+ }
+ return false;
}
/*
@@ -298,44 +362,39 @@ int perf_cpu_map__max(struct perf_cpu_map *map)
struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
struct perf_cpu_map *other)
{
- int *tmp_cpus;
+ struct perf_cpu *tmp_cpus;
int tmp_len;
int i, j, k;
struct perf_cpu_map *merged;
- if (!orig && !other)
- return NULL;
- if (!orig) {
- perf_cpu_map__get(other);
- return other;
- }
- if (!other)
- return orig;
- if (orig->nr == other->nr &&
- !memcmp(orig->map, other->map, orig->nr * sizeof(int)))
+ if (perf_cpu_map__is_subset(orig, other))
return orig;
+ if (perf_cpu_map__is_subset(other, orig)) {
+ perf_cpu_map__put(orig);
+ return perf_cpu_map__get(other);
+ }
- tmp_len = orig->nr + other->nr;
- tmp_cpus = malloc(tmp_len * sizeof(int));
+ tmp_len = RC_CHK_ACCESS(orig)->nr + RC_CHK_ACCESS(other)->nr;
+ tmp_cpus = malloc(tmp_len * sizeof(struct perf_cpu));
if (!tmp_cpus)
return NULL;
/* Standard merge algorithm from wikipedia */
i = j = k = 0;
- while (i < orig->nr && j < other->nr) {
- if (orig->map[i] <= other->map[j]) {
- if (orig->map[i] == other->map[j])
+ while (i < RC_CHK_ACCESS(orig)->nr && j < RC_CHK_ACCESS(other)->nr) {
+ if (RC_CHK_ACCESS(orig)->map[i].cpu <= RC_CHK_ACCESS(other)->map[j].cpu) {
+ if (RC_CHK_ACCESS(orig)->map[i].cpu == RC_CHK_ACCESS(other)->map[j].cpu)
j++;
- tmp_cpus[k++] = orig->map[i++];
+ tmp_cpus[k++] = RC_CHK_ACCESS(orig)->map[i++];
} else
- tmp_cpus[k++] = other->map[j++];
+ tmp_cpus[k++] = RC_CHK_ACCESS(other)->map[j++];
}
- while (i < orig->nr)
- tmp_cpus[k++] = orig->map[i++];
+ while (i < RC_CHK_ACCESS(orig)->nr)
+ tmp_cpus[k++] = RC_CHK_ACCESS(orig)->map[i++];
- while (j < other->nr)
- tmp_cpus[k++] = other->map[j++];
+ while (j < RC_CHK_ACCESS(other)->nr)
+ tmp_cpus[k++] = RC_CHK_ACCESS(other)->map[j++];
assert(k <= tmp_len);
merged = cpu_map__trim_new(k, tmp_cpus);
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index 5b9f2ca50591..81e8b5fcd8ba 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -11,10 +11,8 @@
#include <internal/mmap.h>
#include <internal/cpumap.h>
#include <internal/threadmap.h>
-#include <internal/xyarray.h>
#include <internal/lib.h>
#include <linux/zalloc.h>
-#include <sys/ioctl.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
@@ -25,16 +23,14 @@
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <api/fd/array.h>
+#include "internal.h"
void perf_evlist__init(struct perf_evlist *evlist)
{
- int i;
-
- for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
- INIT_HLIST_HEAD(&evlist->heads[i]);
INIT_LIST_HEAD(&evlist->entries);
evlist->nr_entries = 0;
fdarray__init(&evlist->pollfd, 64);
+ perf_evlist__reset_id_hash(evlist);
}
static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
@@ -44,16 +40,26 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
* We already have cpus for evsel (via PMU sysfs) so
* keep it, if there's no target cpu list defined.
*/
- if (!evsel->own_cpus || evlist->has_user_cpus) {
+ if (evsel->system_wide) {
+ perf_cpu_map__put(evsel->cpus);
+ evsel->cpus = perf_cpu_map__new(NULL);
+ } else if (!evsel->own_cpus || evlist->has_user_cpus ||
+ (!evsel->requires_cpu && perf_cpu_map__empty(evlist->user_requested_cpus))) {
perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__get(evlist->cpus);
+ evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
} else if (evsel->cpus != evsel->own_cpus) {
perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
}
- perf_thread_map__put(evsel->threads);
- evsel->threads = perf_thread_map__get(evlist->threads);
+ if (evsel->system_wide) {
+ perf_thread_map__put(evsel->threads);
+ evsel->threads = perf_thread_map__new_dummy();
+ } else {
+ perf_thread_map__put(evsel->threads);
+ evsel->threads = perf_thread_map__get(evlist->threads);
+ }
+
evlist->all_cpus = perf_cpu_map__merge(evlist->all_cpus, evsel->cpus);
}
@@ -61,6 +67,8 @@ static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
{
struct perf_evsel *evsel;
+ evlist->needs_map_propagation = true;
+
perf_evlist__for_each_evsel(evlist, evsel)
__perf_evlist__propagate_maps(evlist, evsel);
}
@@ -68,9 +76,12 @@ static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
void perf_evlist__add(struct perf_evlist *evlist,
struct perf_evsel *evsel)
{
+ evsel->idx = evlist->nr_entries;
list_add_tail(&evsel->node, &evlist->entries);
evlist->nr_entries += 1;
- __perf_evlist__propagate_maps(evlist, evsel);
+
+ if (evlist->needs_map_propagation)
+ __perf_evlist__propagate_maps(evlist, evsel);
}
void perf_evlist__remove(struct perf_evlist *evlist,
@@ -124,9 +135,11 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
void perf_evlist__exit(struct perf_evlist *evlist)
{
- perf_cpu_map__put(evlist->cpus);
+ perf_cpu_map__put(evlist->user_requested_cpus);
+ perf_cpu_map__put(evlist->all_cpus);
perf_thread_map__put(evlist->threads);
- evlist->cpus = NULL;
+ evlist->user_requested_cpus = NULL;
+ evlist->all_cpus = NULL;
evlist->threads = NULL;
fdarray__exit(&evlist->pollfd);
}
@@ -154,9 +167,9 @@ void perf_evlist__set_maps(struct perf_evlist *evlist,
* original reference count of 1. If that is not the case it is up to
* the caller to increase the reference count.
*/
- if (cpus != evlist->cpus) {
- perf_cpu_map__put(evlist->cpus);
- evlist->cpus = perf_cpu_map__get(cpus);
+ if (cpus != evlist->user_requested_cpus) {
+ perf_cpu_map__put(evlist->user_requested_cpus);
+ evlist->user_requested_cpus = perf_cpu_map__get(cpus);
}
if (threads != evlist->threads) {
@@ -164,9 +177,6 @@ void perf_evlist__set_maps(struct perf_evlist *evlist,
evlist->threads = perf_thread_map__get(threads);
}
- if (!evlist->all_cpus && cpus)
- evlist->all_cpus = perf_cpu_map__get(cpus);
-
perf_evlist__propagate_maps(evlist);
}
@@ -234,6 +244,14 @@ static void perf_evlist__id_hash(struct perf_evlist *evlist,
hlist_add_head(&sid->node, &evlist->heads[hash]);
}
+void perf_evlist__reset_id_hash(struct perf_evlist *evlist)
+{
+ int i;
+
+ for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
+ INIT_HLIST_HEAD(&evlist->heads[i]);
+}
+
void perf_evlist__id_add(struct perf_evlist *evlist,
struct perf_evsel *evsel,
int cpu, int thread, u64 id)
@@ -285,7 +303,7 @@ add:
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
- int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
int nr_threads = perf_thread_map__nr(evlist->threads);
int nfds = 0;
struct perf_evsel *evsel;
@@ -305,9 +323,9 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
}
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
- void *ptr, short revent)
+ void *ptr, short revent, enum fdarray_flags flags)
{
- int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP);
+ int pos = fdarray__add(&evlist->pollfd, fd, revent | POLLERR | POLLHUP, flags);
if (pos >= 0) {
evlist->pollfd.priv[pos].ptr = ptr;
@@ -364,21 +382,13 @@ static struct perf_mmap* perf_evlist__alloc_mmap(struct perf_evlist *evlist, boo
return map;
}
-static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
- struct perf_evsel *evsel, int idx, int cpu,
- int thread)
+static void perf_evsel__set_sid_idx(struct perf_evsel *evsel, int idx, int cpu, int thread)
{
struct perf_sample_id *sid = SID(evsel, cpu, thread);
sid->idx = idx;
- if (evlist->cpus && cpu >= 0)
- sid->cpu = evlist->cpus->map[cpu];
- else
- sid->cpu = -1;
- if (!evsel->system_wide && evlist->threads && thread >= 0)
- sid->tid = perf_thread_map__pid(evlist->threads, thread);
- else
- sid->tid = -1;
+ sid->cpu = perf_cpu_map__cpu(evsel->cpus, cpu);
+ sid->tid = perf_thread_map__pid(evsel->threads, thread);
}
static struct perf_mmap*
@@ -406,7 +416,7 @@ perf_evlist__mmap_cb_get(struct perf_evlist *evlist, bool overwrite, int idx)
static int
perf_evlist__mmap_cb_mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
- int output, int cpu)
+ int output, struct perf_cpu cpu)
{
return perf_mmap__mmap(map, mp, output, cpu);
}
@@ -423,14 +433,15 @@ static void perf_evlist__set_mmap_first(struct perf_evlist *evlist, struct perf_
static int
mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int idx, struct perf_mmap_param *mp, int cpu_idx,
- int thread, int *_output, int *_output_overwrite)
+ int thread, int *_output, int *_output_overwrite, int *nr_mmaps)
{
- int evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
+ struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->all_cpus, cpu_idx);
struct perf_evsel *evsel;
int revent;
perf_evlist__for_each_entry(evlist, evsel) {
bool overwrite = evsel->attr.write_backward;
+ enum fdarray_flags flgs;
struct perf_mmap *map;
int *output, fd, cpu;
@@ -473,12 +484,21 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
*/
refcount_set(&map->refcnt, 2);
+ if (ops->idx)
+ ops->idx(evlist, evsel, mp, idx);
+
+ /* Debug message used by test scripts */
+ pr_debug("idx %d: mmapping fd %d\n", idx, *output);
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
return -1;
+ *nr_mmaps += 1;
+
if (!idx)
perf_evlist__set_mmap_first(evlist, map, overwrite);
} else {
+ /* Debug message used by test scripts */
+ pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
return -1;
@@ -487,8 +507,8 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
revent = !overwrite ? POLLIN : 0;
- if (!evsel->system_wide &&
- perf_evlist__add_pollfd(evlist, fd, map, revent) < 0) {
+ flgs = evsel->system_wide ? fdarray_flag__nonfilterable : fdarray_flag__default;
+ if (perf_evlist__add_pollfd(evlist, fd, map, revent, flgs) < 0) {
perf_mmap__put(map);
return -1;
}
@@ -497,8 +517,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
fd) < 0)
return -1;
- perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
- thread);
+ perf_evsel__set_sid_idx(evsel, idx, cpu, thread);
}
}
@@ -509,21 +528,37 @@ static int
mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
- int thread;
int nr_threads = perf_thread_map__nr(evlist->threads);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int cpu, thread, idx = 0;
+ int nr_mmaps = 0;
- for (thread = 0; thread < nr_threads; thread++) {
+ pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
+ __func__, nr_cpus, nr_threads);
+
+ /* per-thread mmaps */
+ for (thread = 0; thread < nr_threads; thread++, idx++) {
int output = -1;
int output_overwrite = -1;
- if (ops->idx)
- ops->idx(evlist, mp, thread, false);
+ if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
+ &output_overwrite, &nr_mmaps))
+ goto out_unmap;
+ }
- if (mmap_per_evsel(evlist, ops, thread, mp, 0, thread,
- &output, &output_overwrite))
+ /* system-wide mmaps i.e. per-cpu */
+ for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
+ int output = -1;
+ int output_overwrite = -1;
+
+ if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
+ &output_overwrite, &nr_mmaps))
goto out_unmap;
}
+ if (nr_mmaps != evlist->nr_mmaps)
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
return 0;
out_unmap:
@@ -536,23 +571,26 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
int nr_threads = perf_thread_map__nr(evlist->threads);
- int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int nr_mmaps = 0;
int cpu, thread;
+ pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
+
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
int output_overwrite = -1;
- if (ops->idx)
- ops->idx(evlist, mp, cpu, true);
-
for (thread = 0; thread < nr_threads; thread++) {
if (mmap_per_evsel(evlist, ops, cpu, mp, cpu,
- thread, &output, &output_overwrite))
+ thread, &output, &output_overwrite, &nr_mmaps))
goto out_unmap;
}
}
+ if (nr_mmaps != evlist->nr_mmaps)
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
return 0;
out_unmap:
@@ -564,9 +602,14 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist)
{
int nr_mmaps;
- nr_mmaps = perf_cpu_map__nr(evlist->cpus);
- if (perf_cpu_map__empty(evlist->cpus))
- nr_mmaps = perf_thread_map__nr(evlist->threads);
+ /* One for each CPU */
+ nr_mmaps = perf_cpu_map__nr(evlist->all_cpus);
+ if (perf_cpu_map__empty(evlist->all_cpus)) {
+ /* Plus one for each thread */
+ nr_mmaps += perf_thread_map__nr(evlist->threads);
+ /* Minus the per-thread CPU (-1) */
+ nr_mmaps -= 1;
+ }
return nr_mmaps;
}
@@ -575,9 +618,8 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
+ const struct perf_cpu_map *cpus = evlist->all_cpus;
struct perf_evsel *evsel;
- const struct perf_cpu_map *cpus = evlist->cpus;
- const struct perf_thread_map *threads = evlist->threads;
if (!ops || !ops->get || !ops->mmap)
return -EINVAL;
@@ -589,7 +631,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
perf_evlist__for_each_entry(evlist, evsel) {
if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
evsel->sample_id == NULL &&
- perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
+ perf_evsel__alloc_id(evsel, evsel->fd->max_x, evsel->fd->max_y) < 0)
return -ENOMEM;
}
@@ -642,3 +684,42 @@ perf_evlist__next_mmap(struct perf_evlist *evlist, struct perf_mmap *map,
return overwrite ? evlist->mmap_ovw_first : evlist->mmap_first;
}
+
+void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader)
+{
+ struct perf_evsel *evsel;
+ int n = 0;
+
+ __perf_evlist__for_each_entry(list, evsel) {
+ evsel->leader = leader;
+ n++;
+ }
+ leader->nr_members = n;
+}
+
+void perf_evlist__set_leader(struct perf_evlist *evlist)
+{
+ if (evlist->nr_entries) {
+ struct perf_evsel *first = list_entry(evlist->entries.next,
+ struct perf_evsel, node);
+
+ __perf_evlist__set_leader(&evlist->entries, first);
+ }
+}
+
+int perf_evlist__nr_groups(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ int nr_groups = 0;
+
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ /*
+ * evsels by default have a nr_members of 1, and they are their
+ * own leader. If the nr_members is >1 then this is an
+ * indication of a group.
+ */
+ if (evsel->leader == evsel && evsel->nr_members > 1)
+ nr_groups++;
+ }
+ return nr_groups;
+}
diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c
index 4dc06289f4c7..8b51b008a81f 100644
--- a/tools/lib/perf/evsel.c
+++ b/tools/lib/perf/evsel.c
@@ -11,15 +11,21 @@
#include <stdlib.h>
#include <internal/xyarray.h>
#include <internal/cpumap.h>
+#include <internal/mmap.h>
#include <internal/threadmap.h>
#include <internal/lib.h>
#include <linux/string.h>
#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <asm/bug.h>
-void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr)
+void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
+ int idx)
{
INIT_LIST_HEAD(&evsel->node);
evsel->attr = *attr;
+ evsel->idx = idx;
+ evsel->leader = evsel;
}
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
@@ -27,7 +33,7 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
struct perf_evsel *evsel = zalloc(sizeof(*evsel));
if (evsel != NULL)
- perf_evsel__init(evsel, attr);
+ perf_evsel__init(evsel, attr, 0);
return evsel;
}
@@ -37,17 +43,25 @@ void perf_evsel__delete(struct perf_evsel *evsel)
free(evsel);
}
-#define FD(e, x, y) (*(int *) xyarray__entry(e->fd, x, y))
+#define FD(_evsel, _cpu_map_idx, _thread) \
+ ((int *)xyarray__entry(_evsel->fd, _cpu_map_idx, _thread))
+#define MMAP(_evsel, _cpu_map_idx, _thread) \
+ (_evsel->mmap ? ((struct perf_mmap *) xyarray__entry(_evsel->mmap, _cpu_map_idx, _thread)) \
+ : NULL)
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
{
evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
if (evsel->fd) {
- int cpu, thread;
- for (cpu = 0; cpu < ncpus; cpu++) {
+ int idx, thread;
+
+ for (idx = 0; idx < ncpus; idx++) {
for (thread = 0; thread < nthreads; thread++) {
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, idx, thread);
+
+ if (fd)
+ *fd = -1;
}
}
}
@@ -55,18 +69,52 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
return evsel->fd != NULL ? 0 : -ENOMEM;
}
+static int perf_evsel__alloc_mmap(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ evsel->mmap = xyarray__new(ncpus, nthreads, sizeof(struct perf_mmap));
+
+ return evsel->mmap != NULL ? 0 : -ENOMEM;
+}
+
static int
sys_perf_event_open(struct perf_event_attr *attr,
- pid_t pid, int cpu, int group_fd,
+ pid_t pid, struct perf_cpu cpu, int group_fd,
unsigned long flags)
{
- return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+ return syscall(__NR_perf_event_open, attr, pid, cpu.cpu, group_fd, flags);
+}
+
+static int get_group_fd(struct perf_evsel *evsel, int cpu_map_idx, int thread, int *group_fd)
+{
+ struct perf_evsel *leader = evsel->leader;
+ int *fd;
+
+ if (evsel == leader) {
+ *group_fd = -1;
+ return 0;
+ }
+
+ /*
+ * Leader must be already processed/open,
+ * if not it's a bug.
+ */
+ if (!leader->fd)
+ return -ENOTCONN;
+
+ fd = FD(leader, cpu_map_idx, thread);
+ if (fd == NULL || *fd == -1)
+ return -EBADF;
+
+ *group_fd = *fd;
+
+ return 0;
}
int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
- int cpu, thread, err = 0;
+ struct perf_cpu cpu;
+ int idx, thread, err = 0;
if (cpus == NULL) {
static struct perf_cpu_map *empty_cpu_map;
@@ -93,44 +141,60 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
}
if (evsel->fd == NULL &&
- perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
+ perf_evsel__alloc_fd(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
return -ENOMEM;
- for (cpu = 0; cpu < cpus->nr; cpu++) {
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
for (thread = 0; thread < threads->nr; thread++) {
- int fd;
+ int fd, group_fd, *evsel_fd;
+
+ evsel_fd = FD(evsel, idx, thread);
+ if (evsel_fd == NULL) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = get_group_fd(evsel, idx, thread, &group_fd);
+ if (err < 0)
+ goto out;
fd = sys_perf_event_open(&evsel->attr,
threads->map[thread].pid,
- cpus->map[cpu], -1, 0);
+ cpu, group_fd, 0);
- if (fd < 0)
- return -errno;
+ if (fd < 0) {
+ err = -errno;
+ goto out;
+ }
- FD(evsel, cpu, thread) = fd;
+ *evsel_fd = fd;
}
}
+out:
+ if (err)
+ perf_evsel__close(evsel);
return err;
}
-static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu)
+static void perf_evsel__close_fd_cpu(struct perf_evsel *evsel, int cpu_map_idx)
{
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); ++thread) {
- if (FD(evsel, cpu, thread) >= 0)
- close(FD(evsel, cpu, thread));
- FD(evsel, cpu, thread) = -1;
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd && *fd >= 0) {
+ close(*fd);
+ *fd = -1;
+ }
}
}
void perf_evsel__close_fd(struct perf_evsel *evsel)
{
- int cpu;
-
- for (cpu = 0; cpu < xyarray__max_x(evsel->fd); cpu++)
- perf_evsel__close_fd_cpu(evsel, cpu);
+ for (int idx = 0; idx < xyarray__max_x(evsel->fd); idx++)
+ perf_evsel__close_fd_cpu(evsel, idx);
}
void perf_evsel__free_fd(struct perf_evsel *evsel)
@@ -148,12 +212,81 @@ void perf_evsel__close(struct perf_evsel *evsel)
perf_evsel__free_fd(evsel);
}
-void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu)
+void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx)
{
if (evsel->fd == NULL)
return;
- perf_evsel__close_fd_cpu(evsel, cpu);
+ perf_evsel__close_fd_cpu(evsel, cpu_map_idx);
+}
+
+void perf_evsel__munmap(struct perf_evsel *evsel)
+{
+ int idx, thread;
+
+ if (evsel->fd == NULL || evsel->mmap == NULL)
+ return;
+
+ for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+ int *fd = FD(evsel, idx, thread);
+
+ if (fd == NULL || *fd < 0)
+ continue;
+
+ perf_mmap__munmap(MMAP(evsel, idx, thread));
+ }
+ }
+
+ xyarray__delete(evsel->mmap);
+ evsel->mmap = NULL;
+}
+
+int perf_evsel__mmap(struct perf_evsel *evsel, int pages)
+{
+ int ret, idx, thread;
+ struct perf_mmap_param mp = {
+ .prot = PROT_READ | PROT_WRITE,
+ .mask = (pages * page_size) - 1,
+ };
+
+ if (evsel->fd == NULL || evsel->mmap)
+ return -EINVAL;
+
+ if (perf_evsel__alloc_mmap(evsel, xyarray__max_x(evsel->fd), xyarray__max_y(evsel->fd)) < 0)
+ return -ENOMEM;
+
+ for (idx = 0; idx < xyarray__max_x(evsel->fd); idx++) {
+ for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
+ int *fd = FD(evsel, idx, thread);
+ struct perf_mmap *map;
+ struct perf_cpu cpu = perf_cpu_map__cpu(evsel->cpus, idx);
+
+ if (fd == NULL || *fd < 0)
+ continue;
+
+ map = MMAP(evsel, idx, thread);
+ perf_mmap__init(map, NULL, false, NULL);
+
+ ret = perf_mmap__mmap(map, &mp, *fd, cpu);
+ if (ret) {
+ perf_evsel__munmap(evsel);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread)
+{
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd == NULL || *fd < 0 || MMAP(evsel, cpu_map_idx, thread) == NULL)
+ return NULL;
+
+ return MMAP(evsel, cpu_map_idx, thread)->base;
}
int perf_evsel__read_size(struct perf_evsel *evsel)
@@ -172,6 +305,9 @@ int perf_evsel__read_size(struct perf_evsel *evsel)
if (read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
+ if (read_format & PERF_FORMAT_LOST)
+ entry += sizeof(u64);
+
if (read_format & PERF_FORMAT_GROUP) {
nr = evsel->nr_members;
size += sizeof(u64);
@@ -181,31 +317,120 @@ int perf_evsel__read_size(struct perf_evsel *evsel)
return size;
}
-int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+/* This only reads values for the leader */
+static int perf_evsel__read_group(struct perf_evsel *evsel, int cpu_map_idx,
+ int thread, struct perf_counts_values *count)
+{
+ size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu_map_idx, thread);
+ u64 read_format = evsel->attr.read_format;
+ u64 *data;
+ int idx = 1;
+
+ if (fd == NULL || *fd < 0)
+ return -EINVAL;
+
+ data = calloc(1, size);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (readn(*fd, data, size) <= 0) {
+ free(data);
+ return -errno;
+ }
+
+ /*
+ * This reads only the leader event intentionally since we don't have
+ * perf counts values for sibling events.
+ */
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ count->ena = data[idx++];
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ count->run = data[idx++];
+
+ /* value is always available */
+ count->val = data[idx++];
+ if (read_format & PERF_FORMAT_ID)
+ count->id = data[idx++];
+ if (read_format & PERF_FORMAT_LOST)
+ count->lost = data[idx++];
+
+ free(data);
+ return 0;
+}
+
+/*
+ * The perf read format is very flexible. It needs to set the proper
+ * values according to the read format.
+ */
+static void perf_evsel__adjust_values(struct perf_evsel *evsel, u64 *buf,
+ struct perf_counts_values *count)
+{
+ u64 read_format = evsel->attr.read_format;
+ int n = 0;
+
+ count->val = buf[n++];
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ count->ena = buf[n++];
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ count->run = buf[n++];
+
+ if (read_format & PERF_FORMAT_ID)
+ count->id = buf[n++];
+
+ if (read_format & PERF_FORMAT_LOST)
+ count->lost = buf[n++];
+}
+
+int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count)
{
size_t size = perf_evsel__read_size(evsel);
+ int *fd = FD(evsel, cpu_map_idx, thread);
+ u64 read_format = evsel->attr.read_format;
+ struct perf_counts_values buf;
memset(count, 0, sizeof(*count));
- if (FD(evsel, cpu, thread) < 0)
+ if (fd == NULL || *fd < 0)
return -EINVAL;
- if (readn(FD(evsel, cpu, thread), count->values, size) <= 0)
+ if (read_format & PERF_FORMAT_GROUP)
+ return perf_evsel__read_group(evsel, cpu_map_idx, thread, count);
+
+ if (MMAP(evsel, cpu_map_idx, thread) &&
+ !(read_format & (PERF_FORMAT_ID | PERF_FORMAT_LOST)) &&
+ !perf_mmap__read_self(MMAP(evsel, cpu_map_idx, thread), count))
+ return 0;
+
+ if (readn(*fd, buf.values, size) <= 0)
return -errno;
+ perf_evsel__adjust_values(evsel, buf.values, count);
return 0;
}
+static int perf_evsel__ioctl(struct perf_evsel *evsel, int ioc, void *arg,
+ int cpu_map_idx, int thread)
+{
+ int *fd = FD(evsel, cpu_map_idx, thread);
+
+ if (fd == NULL || *fd < 0)
+ return -1;
+
+ return ioctl(*fd, ioc, arg);
+}
+
static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
int ioc, void *arg,
- int cpu)
+ int cpu_map_idx)
{
int thread;
for (thread = 0; thread < xyarray__max_y(evsel->fd); thread++) {
- int fd = FD(evsel, cpu, thread),
- err = ioctl(fd, ioc, arg);
+ int err = perf_evsel__ioctl(evsel, ioc, arg, cpu_map_idx, thread);
if (err)
return err;
@@ -214,9 +439,24 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel,
return 0;
}
-int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu)
+int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
+{
+ return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu_map_idx);
+}
+
+int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread)
{
- return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, cpu);
+ struct perf_cpu cpu __maybe_unused;
+ int idx;
+ int err;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, evsel->cpus) {
+ err = perf_evsel__ioctl(evsel, PERF_EVENT_IOC_ENABLE, NULL, idx, thread);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
int perf_evsel__enable(struct perf_evsel *evsel)
@@ -229,9 +469,9 @@ int perf_evsel__enable(struct perf_evsel *evsel)
return err;
}
-int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu)
+int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx)
{
- return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu);
+ return perf_evsel__run_ioctl(evsel, PERF_EVENT_IOC_DISABLE, NULL, cpu_map_idx);
}
int perf_evsel__disable(struct perf_evsel *evsel)
@@ -248,7 +488,7 @@ int perf_evsel__apply_filter(struct perf_evsel *evsel, const char *filter)
{
int err = 0, i;
- for (i = 0; i < evsel->cpus->nr && !err; i++)
+ for (i = 0; i < perf_cpu_map__nr(evsel->cpus) && !err; i++)
err = perf_evsel__run_ioctl(evsel,
PERF_EVENT_IOC_SET_FILTER,
(void *)filter, i);
@@ -275,9 +515,6 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
if (ncpus == 0 || nthreads == 0)
return 0;
- if (evsel->system_wide)
- nthreads = 1;
-
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
if (evsel->sample_id == NULL)
return -ENOMEM;
@@ -299,3 +536,22 @@ void perf_evsel__free_id(struct perf_evsel *evsel)
zfree(&evsel->id);
evsel->ids = 0;
}
+
+void perf_counts_values__scale(struct perf_counts_values *count,
+ bool scale, __s8 *pscaled)
+{
+ s8 scaled = 0;
+
+ if (scale) {
+ if (count->run == 0) {
+ scaled = -1;
+ count->val = 0;
+ } else if (count->run < count->ena) {
+ scaled = 1;
+ count->val = (u64)((double)count->val * count->ena / count->run);
+ }
+ }
+
+ if (pscaled)
+ *pscaled = scaled;
+}
diff --git a/tools/lib/perf/include/internal/cpumap.h b/tools/lib/perf/include/internal/cpumap.h
index 840d4032587b..49649eb51ce4 100644
--- a/tools/lib/perf/include/internal/cpumap.h
+++ b/tools/lib/perf/include/internal/cpumap.h
@@ -3,17 +3,36 @@
#define __LIBPERF_INTERNAL_CPUMAP_H
#include <linux/refcount.h>
+#include <perf/cpumap.h>
+#include <internal/rc_check.h>
-struct perf_cpu_map {
+/**
+ * A sized, reference counted, sorted array of integers representing CPU
+ * numbers. This is commonly used to capture which CPUs a PMU is associated
+ * with. The indices into the cpumap are frequently used as they avoid having
+ * gaps if CPU numbers were used. For events associated with a pid, rather than
+ * a CPU, a single dummy map with an entry of -1 is used.
+ */
+DECLARE_RC_STRUCT(perf_cpu_map) {
refcount_t refcnt;
+ /** Length of the map array. */
int nr;
- int map[];
+ /** The CPU values. */
+ struct perf_cpu map[];
};
#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 2048
#endif
-int perf_cpu_map__idx(struct perf_cpu_map *cpus, int cpu);
+struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus);
+int perf_cpu_map__idx(const struct perf_cpu_map *cpus, struct perf_cpu cpu);
+bool perf_cpu_map__is_subset(const struct perf_cpu_map *a, const struct perf_cpu_map *b);
+void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus);
+
+static inline refcount_t *perf_cpu_map__refcnt(struct perf_cpu_map *map)
+{
+ return &RC_CHK_ACCESS(map)->refcnt;
+}
#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
diff --git a/tools/lib/perf/include/internal/evlist.h b/tools/lib/perf/include/internal/evlist.h
index 74dc8c3f0b66..3339bc2f1765 100644
--- a/tools/lib/perf/include/internal/evlist.h
+++ b/tools/lib/perf/include/internal/evlist.h
@@ -4,6 +4,7 @@
#include <linux/list.h>
#include <api/fd/array.h>
+#include <internal/cpumap.h>
#include <internal/evsel.h>
#define PERF_EVLIST__HLIST_BITS 8
@@ -17,7 +18,13 @@ struct perf_evlist {
struct list_head entries;
int nr_entries;
bool has_user_cpus;
- struct perf_cpu_map *cpus;
+ bool needs_map_propagation;
+ /**
+ * The cpus passed from the command line or all online CPUs by
+ * default.
+ */
+ struct perf_cpu_map *user_requested_cpus;
+ /** The union of all evsel cpu maps. */
struct perf_cpu_map *all_cpus;
struct perf_thread_map *threads;
int nr_mmaps;
@@ -31,11 +38,12 @@ struct perf_evlist {
};
typedef void
-(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_mmap_param*, int, bool);
+(*perf_evlist_mmap__cb_idx_t)(struct perf_evlist*, struct perf_evsel*,
+ struct perf_mmap_param*, int);
typedef struct perf_mmap*
(*perf_evlist_mmap__cb_get_t)(struct perf_evlist*, bool, int);
typedef int
-(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, int);
+(*perf_evlist_mmap__cb_mmap_t)(struct perf_mmap*, struct perf_mmap_param*, int, struct perf_cpu);
struct perf_evlist_mmap_ops {
perf_evlist_mmap__cb_idx_t idx;
@@ -45,7 +53,7 @@ struct perf_evlist_mmap_ops {
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd,
- void *ptr, short revent);
+ void *ptr, short revent, enum fdarray_flags flags);
int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_evlist_mmap_ops *ops,
@@ -124,4 +132,7 @@ int perf_evlist__id_add_fd(struct perf_evlist *evlist,
struct perf_evsel *evsel,
int cpu, int thread, int fd);
+void perf_evlist__reset_id_hash(struct perf_evlist *evlist);
+
+void __perf_evlist__set_leader(struct list_head *list, struct perf_evsel *leader);
#endif /* __LIBPERF_INTERNAL_EVLIST_H */
diff --git a/tools/lib/perf/include/internal/evsel.h b/tools/lib/perf/include/internal/evsel.h
index 1ffd083b235e..a99a75d9e78f 100644
--- a/tools/lib/perf/include/internal/evsel.h
+++ b/tools/lib/perf/include/internal/evsel.h
@@ -6,8 +6,8 @@
#include <linux/perf_event.h>
#include <stdbool.h>
#include <sys/types.h>
+#include <internal/cpumap.h>
-struct perf_cpu_map;
struct perf_thread_map;
struct xyarray;
@@ -27,9 +27,13 @@ struct perf_sample_id {
* queue number.
*/
int idx;
- int cpu;
+ struct perf_cpu cpu;
pid_t tid;
+ /* Guest machine pid and VCPU, valid only if machine_pid is non-zero */
+ pid_t machine_pid;
+ struct perf_cpu vcpu;
+
/* Holds total ID period value for PERF_SAMPLE_READ processing. */
u64 period;
};
@@ -41,16 +45,31 @@ struct perf_evsel {
struct perf_cpu_map *own_cpus;
struct perf_thread_map *threads;
struct xyarray *fd;
+ struct xyarray *mmap;
struct xyarray *sample_id;
u64 *id;
u32 ids;
+ struct perf_evsel *leader;
/* parse modifier helper */
int nr_members;
+ /*
+ * system_wide is for events that need to be on every CPU, irrespective
+ * of user requested CPUs or threads. Map propagation will set cpus to
+ * this event's own_cpus, whereby they will contribute to evlist
+ * all_cpus.
+ */
bool system_wide;
+ /*
+ * Some events, for example uncore events, require a CPU.
+ * i.e. it cannot be the 'any CPU' value of -1.
+ */
+ bool requires_cpu;
+ int idx;
};
-void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr);
+void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr,
+ int idx);
int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
void perf_evsel__close_fd(struct perf_evsel *evsel);
void perf_evsel__free_fd(struct perf_evsel *evsel);
diff --git a/tools/lib/perf/include/internal/lib.h b/tools/lib/perf/include/internal/lib.h
index 5175d491b2d4..85471a4b900f 100644
--- a/tools/lib/perf/include/internal/lib.h
+++ b/tools/lib/perf/include/internal/lib.h
@@ -9,4 +9,6 @@ extern unsigned int page_size;
ssize_t readn(int fd, void *buf, size_t n);
ssize_t writen(int fd, const void *buf, size_t n);
+ssize_t preadn(int fd, void *buf, size_t n, off_t offs);
+
#endif /* __LIBPERF_INTERNAL_CPUMAP_H */
diff --git a/tools/lib/perf/include/internal/mmap.h b/tools/lib/perf/include/internal/mmap.h
index be7556e0a2b2..5a062af8e9d8 100644
--- a/tools/lib/perf/include/internal/mmap.h
+++ b/tools/lib/perf/include/internal/mmap.h
@@ -6,11 +6,13 @@
#include <linux/refcount.h>
#include <linux/types.h>
#include <stdbool.h>
+#include <internal/cpumap.h>
/* perf sample has 16 bits size limit */
#define PERF_SAMPLE_MAX_SIZE (1 << 16)
struct perf_mmap;
+struct perf_counts_values;
typedef void (*libperf_unmap_cb_t)(struct perf_mmap *map);
@@ -23,7 +25,7 @@ struct perf_mmap {
void *base;
int mask;
int fd;
- int cpu;
+ struct perf_cpu cpu;
refcount_t refcnt;
u64 prev;
u64 start;
@@ -45,11 +47,13 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map);
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
bool overwrite, libperf_unmap_cb_t unmap_cb);
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
- int fd, int cpu);
+ int fd, struct perf_cpu cpu);
void perf_mmap__munmap(struct perf_mmap *map);
void perf_mmap__get(struct perf_mmap *map);
void perf_mmap__put(struct perf_mmap *map);
u64 perf_mmap__read_head(struct perf_mmap *map);
+int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count);
+
#endif /* __LIBPERF_INTERNAL_MMAP_H */
diff --git a/tools/lib/perf/include/internal/rc_check.h b/tools/lib/perf/include/internal/rc_check.h
new file mode 100644
index 000000000000..d5d771ccdc7b
--- /dev/null
+++ b/tools/lib/perf/include/internal/rc_check.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __LIBPERF_INTERNAL_RC_CHECK_H
+#define __LIBPERF_INTERNAL_RC_CHECK_H
+
+#include <stdlib.h>
+#include <linux/zalloc.h>
+
+/*
+ * Enable reference count checking implicitly with leak checking, which is
+ * integrated into address sanitizer.
+ */
+#if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
+#define REFCNT_CHECKING 1
+#endif
+
+/*
+ * Shared reference count checking macros.
+ *
+ * Reference count checking is an approach to sanitizing the use of reference
+ * counted structs. It leverages address and leak sanitizers to make sure gets
+ * are paired with a put. Reference count checking adds a malloc-ed layer of
+ * indirection on a get, and frees it on a put. A missed put will be reported as
+ * a memory leak. A double put will be reported as a double free. Accessing
+ * after a put will cause a use-after-free and/or a segfault.
+ */
+
+#ifndef REFCNT_CHECKING
+/* Replaces "struct foo" so that the pointer may be interposed. */
+#define DECLARE_RC_STRUCT(struct_name) \
+ struct struct_name
+
+/* Declare a reference counted struct variable. */
+#define RC_STRUCT(struct_name) struct struct_name
+
+/*
+ * Interpose the indirection. Result will hold the indirection and object is the
+ * reference counted struct.
+ */
+#define ADD_RC_CHK(result, object) (result = object, object)
+
+/* Strip the indirection layer. */
+#define RC_CHK_ACCESS(object) object
+
+/* Frees the object and the indirection layer. */
+#define RC_CHK_FREE(object) free(object)
+
+/* A get operation adding the indirection layer. */
+#define RC_CHK_GET(result, object) ADD_RC_CHK(result, object)
+
+/* A put operation removing the indirection layer. */
+#define RC_CHK_PUT(object) {}
+
+#else
+
+/* Replaces "struct foo" so that the pointer may be interposed. */
+#define DECLARE_RC_STRUCT(struct_name) \
+ struct original_##struct_name; \
+ struct struct_name { \
+ struct original_##struct_name *orig; \
+ }; \
+ struct original_##struct_name
+
+/* Declare a reference counted struct variable. */
+#define RC_STRUCT(struct_name) struct original_##struct_name
+
+/*
+ * Interpose the indirection. Result will hold the indirection and object is the
+ * reference counted struct.
+ */
+#define ADD_RC_CHK(result, object) \
+ ( \
+ object ? (result = malloc(sizeof(*result)), \
+ result ? (result->orig = object, result) \
+ : (result = NULL, NULL)) \
+ : (result = NULL, NULL) \
+ )
+
+/* Strip the indirection layer. */
+#define RC_CHK_ACCESS(object) object->orig
+
+/* Frees the object and the indirection layer. */
+#define RC_CHK_FREE(object) \
+ do { \
+ zfree(&object->orig); \
+ free(object); \
+ } while(0)
+
+/* A get operation adding the indirection layer. */
+#define RC_CHK_GET(result, object) ADD_RC_CHK(result, (object ? object->orig : NULL))
+
+/* A put operation removing the indirection layer. */
+#define RC_CHK_PUT(object) \
+ do { \
+ if (object) { \
+ object->orig = NULL; \
+ free(object); \
+ } \
+ } while(0)
+
+#endif
+
+#endif /* __LIBPERF_INTERNAL_RC_CHECK_H */
diff --git a/tools/lib/perf/include/internal/tests.h b/tools/lib/perf/include/internal/tests.h
index 2093e8868a67..b130a6663ff8 100644
--- a/tools/lib/perf/include/internal/tests.h
+++ b/tools/lib/perf/include/internal/tests.h
@@ -3,11 +3,34 @@
#define __LIBPERF_INTERNAL_TESTS_H
#include <stdio.h>
+#include <unistd.h>
-int tests_failed;
+extern int tests_failed;
+extern int tests_verbose;
+
+static inline int get_verbose(char **argv, int argc)
+{
+ int c;
+ int verbose = 0;
+
+ while ((c = getopt(argc, argv, "v")) != -1) {
+ switch (c)
+ {
+ case 'v':
+ verbose = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ optind = 1;
+
+ return verbose;
+}
#define __T_START \
do { \
+ tests_verbose = get_verbose(argv, argc); \
fprintf(stdout, "- running %s...", __FILE__); \
fflush(NULL); \
tests_failed = 0; \
@@ -30,4 +53,15 @@ do {
} \
} while (0)
+#define __T_VERBOSE(...) \
+do { \
+ if (tests_verbose) { \
+ if (tests_verbose == 1) { \
+ fputc('\n', stderr); \
+ tests_verbose++; \
+ } \
+ fprintf(stderr, ##__VA_ARGS__); \
+ } \
+} while (0)
+
#endif /* __LIBPERF_INTERNAL_TESTS_H */
diff --git a/tools/lib/perf/include/internal/xyarray.h b/tools/lib/perf/include/internal/xyarray.h
index 51e35d6c8ec4..f10af3da7b21 100644
--- a/tools/lib/perf/include/internal/xyarray.h
+++ b/tools/lib/perf/include/internal/xyarray.h
@@ -18,11 +18,18 @@ struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
void xyarray__delete(struct xyarray *xy);
void xyarray__reset(struct xyarray *xy);
-static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
+static inline void *__xyarray__entry(struct xyarray *xy, int x, int y)
{
return &xy->contents[x * xy->row_size + y * xy->entry_size];
}
+static inline void *xyarray__entry(struct xyarray *xy, size_t x, size_t y)
+{
+ if (x >= xy->max_x || y >= xy->max_y)
+ return NULL;
+ return __xyarray__entry(xy, x, y);
+}
+
static inline int xyarray__max_y(struct xyarray *xy)
{
return xy->max_y;
diff --git a/tools/lib/perf/include/perf/bpf_perf.h b/tools/lib/perf/include/perf/bpf_perf.h
new file mode 100644
index 000000000000..e7cf6ba7b674
--- /dev/null
+++ b/tools/lib/perf/include/perf/bpf_perf.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+#ifndef __LIBPERF_BPF_PERF_H
+#define __LIBPERF_BPF_PERF_H
+
+#include <linux/types.h> /* for __u32 */
+
+/*
+ * bpf_perf uses a hashmap, the attr_map, to track all the leader programs.
+ * The hashmap is pinned in bpffs. flock() on this file is used to ensure
+ * no concurrent access to the attr_map. The key of attr_map is struct
+ * perf_event_attr, and the value is struct perf_event_attr_map_entry.
+ *
+ * struct perf_event_attr_map_entry contains two __u32 IDs, bpf_link of the
+ * leader prog, and the diff_map. Each perf-stat session holds a reference
+ * to the bpf_link to make sure the leader prog is attached to sched_switch
+ * tracepoint.
+ *
+ * Since the hashmap only contains IDs of the bpf_link and diff_map, it
+ * does not hold any references to the leader program. Once all perf-stat
+ * sessions of these events exit, the leader prog, its maps, and the
+ * perf_events will be freed.
+ */
+struct perf_event_attr_map_entry {
+ __u32 link_id;
+ __u32 diff_map_id;
+};
+
+/* default attr_map name */
+#define BPF_PERF_DEFAULT_ATTR_MAP_PATH "perf_attr_map"
+
+#endif /* __LIBPERF_BPF_PERF_H */
diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h
index 6a17ad730cbc..3f43f770cdac 100644
--- a/tools/lib/perf/include/perf/cpumap.h
+++ b/tools/lib/perf/include/perf/cpumap.h
@@ -6,23 +6,33 @@
#include <stdio.h>
#include <stdbool.h>
+/** A wrapper around a CPU to avoid confusion with the perf_cpu_map's map's indices. */
+struct perf_cpu {
+ int cpu;
+};
+
struct perf_cpu_map;
LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void);
+LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map);
LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig,
struct perf_cpu_map *other);
LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map);
-LIBPERF_API int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
+LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx);
LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus);
LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map);
-LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map);
+LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map);
+LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu);
#define perf_cpu_map__for_each_cpu(cpu, idx, cpus) \
for ((idx) = 0, (cpu) = perf_cpu_map__cpu(cpus, idx); \
(idx) < perf_cpu_map__nr(cpus); \
(idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx))
+#define perf_cpu_map__for_each_idx(idx, cpus) \
+ for ((idx) = 0; (idx) < perf_cpu_map__nr(cpus); (idx)++)
+
#endif /* __LIBPERF_CPUMAP_H */
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 69b44d2cc0f5..51b9338f4c11 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -8,6 +8,8 @@
#include <linux/bpf.h>
#include <sys/types.h> /* pid_t */
+#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
+
struct perf_record_mmap {
struct perf_event_header header;
__u32 pid, tid;
@@ -23,10 +25,20 @@ struct perf_record_mmap2 {
__u64 start;
__u64 len;
__u64 pgoff;
- __u32 maj;
- __u32 min;
- __u64 ino;
- __u64 ino_generation;
+ union {
+ struct {
+ __u32 maj;
+ __u32 min;
+ __u64 ino;
+ __u64 ino_generation;
+ };
+ struct {
+ __u8 build_id_size;
+ __u8 __reserved_1;
+ __u16 __reserved_2;
+ __u8 build_id[20];
+ };
+ };
__u32 prot;
__u32 flags;
char filename[PATH_MAX];
@@ -58,13 +70,15 @@ struct perf_record_lost {
__u64 lost;
};
+#define PERF_RECORD_MISC_LOST_SAMPLES_BPF (1 << 15)
+
struct perf_record_lost_samples {
struct perf_event_header header;
__u64 lost;
};
/*
- * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
+ * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID | PERF_FORMAT_LOST
*/
struct perf_record_read {
struct perf_event_header header;
@@ -73,6 +87,7 @@ struct perf_record_read {
__u64 time_enabled;
__u64 time_running;
__u64 id;
+ __u64 lost;
};
struct perf_record_throttle {
@@ -83,7 +98,7 @@ struct perf_record_throttle {
};
#ifndef KSYM_NAME_LEN
-#define KSYM_NAME_LEN 256
+#define KSYM_NAME_LEN 512
#endif
struct perf_record_ksymbol {
@@ -111,6 +126,14 @@ struct perf_record_cgroup {
char path[PATH_MAX];
};
+struct perf_record_text_poke_event {
+ struct perf_event_header header;
+ __u64 addr;
+ __u16 old_len;
+ __u16 new_len;
+ __u8 bytes[];
+};
+
struct perf_record_sample {
struct perf_event_header header;
__u64 array[];
@@ -131,23 +154,75 @@ struct perf_record_header_attr {
enum {
PERF_CPU_MAP__CPUS = 0,
PERF_CPU_MAP__MASK = 1,
+ PERF_CPU_MAP__RANGE_CPUS = 2,
};
+/*
+ * Array encoding of a perf_cpu_map where nr is the number of entries in cpu[]
+ * and each entry is a value for a CPU in the map.
+ */
struct cpu_map_entries {
__u16 nr;
__u16 cpu[];
};
-struct perf_record_record_cpu_map {
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 32-bit. */
+struct perf_record_mask_cpu_map32 {
+ /* Number of mask values. */
__u16 nr;
+ /* Constant 4. */
__u16 long_size;
- unsigned long mask[];
+ /* Bitmap data. */
+ __u32 mask[];
+};
+
+/* Bitmap encoding of a perf_cpu_map where bitmap entries are 64-bit. */
+struct perf_record_mask_cpu_map64 {
+ /* Number of mask values. */
+ __u16 nr;
+ /* Constant 8. */
+ __u16 long_size;
+ /* Legacy padding. */
+ char __pad[4];
+ /* Bitmap data. */
+ __u64 mask[];
+};
+
+/*
+ * 'struct perf_record_cpu_map_data' is packed as unfortunately an earlier
+ * version had unaligned data and we wish to retain file format compatibility.
+ * -irogers
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpacked"
+#pragma GCC diagnostic ignored "-Wattributes"
+
+/*
+ * An encoding of a CPU map for a range starting at start_cpu through to
+ * end_cpu. If any_cpu is 1, an any CPU (-1) value (aka dummy value) is present.
+ */
+struct perf_record_range_cpu_map {
+ __u8 any_cpu;
+ __u8 __pad;
+ __u16 start_cpu;
+ __u16 end_cpu;
};
struct perf_record_cpu_map_data {
__u16 type;
- char data[];
-};
+ union {
+ /* Used when type == PERF_CPU_MAP__CPUS. */
+ struct cpu_map_entries cpus_data;
+ /* Used when type == PERF_CPU_MAP__MASK and long_size == 4. */
+ struct perf_record_mask_cpu_map32 mask32_data;
+ /* Used when type == PERF_CPU_MAP__MASK and long_size == 8. */
+ struct perf_record_mask_cpu_map64 mask64_data;
+ /* Used when type == PERF_CPU_MAP__RANGE_CPUS. */
+ struct perf_record_range_cpu_map range_cpu_data;
+ };
+} __attribute__((packed));
+
+#pragma GCC diagnostic pop
struct perf_record_cpu_map {
struct perf_event_header header;
@@ -173,7 +248,16 @@ struct perf_record_event_update {
struct perf_event_header header;
__u64 type;
__u64 id;
- char data[];
+ union {
+ /* Used when type == PERF_EVENT_UPDATE__SCALE. */
+ struct perf_record_event_update_scale scale;
+ /* Used when type == PERF_EVENT_UPDATE__UNIT. */
+ char unit[0];
+ /* Used when type == PERF_EVENT_UPDATE__NAME. */
+ char name[0];
+ /* Used when type == PERF_EVENT_UPDATE__CPUS. */
+ struct perf_record_event_update_cpus cpus;
+ };
};
#define MAX_EVENT_NAME 64
@@ -193,10 +277,20 @@ struct perf_record_header_tracing_data {
__u32 size;
};
+#define PERF_RECORD_MISC_BUILD_ID_SIZE (1 << 15)
+
struct perf_record_header_build_id {
struct perf_event_header header;
pid_t pid;
- __u8 build_id[24];
+ union {
+ __u8 build_id[24];
+ struct {
+ __u8 data[20];
+ __u8 size;
+ __u8 reserved1__;
+ __u16 reserved2__;
+ };
+ };
char filename[];
};
@@ -207,10 +301,15 @@ struct id_index_entry {
__u64 tid;
};
+struct id_index_entry_2 {
+ __u64 machine_pid;
+ __u64 vcpu;
+};
+
struct perf_record_id_index {
struct perf_event_header header;
__u64 nr;
- struct id_index_entry entries[0];
+ struct id_index_entry entries[];
};
struct perf_record_auxtrace_info {
@@ -244,6 +343,8 @@ struct perf_record_auxtrace_error {
__u64 ip;
__u64 time;
char msg[MAX_AUXTRACE_ERROR_MSG];
+ __u32 machine_pid;
+ __u32 vcpu;
};
struct perf_record_aux {
@@ -259,6 +360,11 @@ struct perf_record_itrace_start {
__u32 tid;
};
+struct perf_record_aux_output_hw_id {
+ struct perf_event_header header;
+ __u64 hw_id;
+};
+
struct perf_record_thread_map_entry {
__u64 pid;
char comm[16];
@@ -316,6 +422,11 @@ struct perf_record_time_conv {
__u64 time_shift;
__u64 time_mult;
__u64 time_zero;
+ __u64 time_cycles;
+ __u64 time_mask;
+ __u8 cap_user_time_zero;
+ __u8 cap_user_time_short;
+ __u8 reserved[6]; /* For alignment */
};
struct perf_record_header_feature {
@@ -349,6 +460,7 @@ enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_TIME_CONV = 79,
PERF_RECORD_HEADER_FEATURE = 80,
PERF_RECORD_COMPRESSED = 81,
+ PERF_RECORD_FINISHED_INIT = 82,
PERF_RECORD_HEADER_MAX
};
@@ -367,6 +479,7 @@ union perf_event {
struct perf_record_sample sample;
struct perf_record_bpf_event bpf;
struct perf_record_ksymbol ksymbol;
+ struct perf_record_text_poke_event text_poke;
struct perf_record_header_attr attr;
struct perf_record_event_update event_update;
struct perf_record_header_event_type event_type;
@@ -378,6 +491,7 @@ union perf_event {
struct perf_record_auxtrace_error auxtrace_error;
struct perf_record_aux aux;
struct perf_record_itrace_start itrace_start;
+ struct perf_record_aux_output_hw_id aux_output_hw_id;
struct perf_record_switch context_switch;
struct perf_record_thread_map thread_map;
struct perf_record_cpu_map cpu_map;
diff --git a/tools/lib/perf/include/perf/evlist.h b/tools/lib/perf/include/perf/evlist.h
index 0a7479dc13bf..e894b770779e 100644
--- a/tools/lib/perf/include/perf/evlist.h
+++ b/tools/lib/perf/include/perf/evlist.h
@@ -46,4 +46,6 @@ LIBPERF_API struct perf_mmap *perf_evlist__next_mmap(struct perf_evlist *evlist,
(pos) != NULL; \
(pos) = perf_evlist__next_mmap((evlist), (pos), overwrite))
+LIBPERF_API void perf_evlist__set_leader(struct perf_evlist *evlist);
+LIBPERF_API int perf_evlist__nr_groups(struct perf_evlist *evlist);
#endif /* __LIBPERF_EVLIST_H */
diff --git a/tools/lib/perf/include/perf/evsel.h b/tools/lib/perf/include/perf/evsel.h
index c82ec39a4ad0..6f92204075c2 100644
--- a/tools/lib/perf/include/perf/evsel.h
+++ b/tools/lib/perf/include/perf/evsel.h
@@ -4,6 +4,8 @@
#include <stdint.h>
#include <perf/core.h>
+#include <stdbool.h>
+#include <linux/types.h>
struct perf_evsel;
struct perf_event_attr;
@@ -16,8 +18,10 @@ struct perf_counts_values {
uint64_t val;
uint64_t ena;
uint64_t run;
+ uint64_t id;
+ uint64_t lost;
};
- uint64_t values[3];
+ uint64_t values[5];
};
};
@@ -26,15 +30,21 @@ LIBPERF_API void perf_evsel__delete(struct perf_evsel *evsel);
LIBPERF_API int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads);
LIBPERF_API void perf_evsel__close(struct perf_evsel *evsel);
-LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu);
-LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+LIBPERF_API void perf_evsel__close_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+LIBPERF_API int perf_evsel__mmap(struct perf_evsel *evsel, int pages);
+LIBPERF_API void perf_evsel__munmap(struct perf_evsel *evsel);
+LIBPERF_API void *perf_evsel__mmap_base(struct perf_evsel *evsel, int cpu_map_idx, int thread);
+LIBPERF_API int perf_evsel__read(struct perf_evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count);
LIBPERF_API int perf_evsel__enable(struct perf_evsel *evsel);
-LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu);
+LIBPERF_API int perf_evsel__enable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
+LIBPERF_API int perf_evsel__enable_thread(struct perf_evsel *evsel, int thread);
LIBPERF_API int perf_evsel__disable(struct perf_evsel *evsel);
-LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu);
+LIBPERF_API int perf_evsel__disable_cpu(struct perf_evsel *evsel, int cpu_map_idx);
LIBPERF_API struct perf_cpu_map *perf_evsel__cpus(struct perf_evsel *evsel);
LIBPERF_API struct perf_thread_map *perf_evsel__threads(struct perf_evsel *evsel);
LIBPERF_API struct perf_event_attr *perf_evsel__attr(struct perf_evsel *evsel);
+LIBPERF_API void perf_counts_values__scale(struct perf_counts_values *count,
+ bool scale, __s8 *pscaled);
#endif /* __LIBPERF_EVSEL_H */
diff --git a/tools/lib/perf/include/perf/threadmap.h b/tools/lib/perf/include/perf/threadmap.h
index a7c50de8d010..8b40e7777cea 100644
--- a/tools/lib/perf/include/perf/threadmap.h
+++ b/tools/lib/perf/include/perf/threadmap.h
@@ -8,11 +8,12 @@
struct perf_thread_map;
LIBPERF_API struct perf_thread_map *perf_thread_map__new_dummy(void);
+LIBPERF_API struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array);
-LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid);
-LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int thread);
+LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid);
+LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int idx);
LIBPERF_API int perf_thread_map__nr(struct perf_thread_map *threads);
-LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread);
+LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx);
LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map);
LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map);
diff --git a/tools/lib/perf/lib.c b/tools/lib/perf/lib.c
index 18658931fc71..696fb0ea67c6 100644
--- a/tools/lib/perf/lib.c
+++ b/tools/lib/perf/lib.c
@@ -38,6 +38,26 @@ ssize_t readn(int fd, void *buf, size_t n)
return ion(true, fd, buf, n);
}
+ssize_t preadn(int fd, void *buf, size_t n, off_t offs)
+{
+ size_t left = n;
+
+ while (left) {
+ ssize_t ret = pread(fd, buf, left, offs);
+
+ if (ret < 0 && errno == EINTR)
+ continue;
+ if (ret <= 0)
+ return ret;
+
+ left -= ret;
+ buf += ret;
+ offs += ret;
+ }
+
+ return n;
+}
+
/*
* Write exactly 'n' bytes or return an error.
*/
diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map
index 7be1af8a546c..190b56ae923a 100644
--- a/tools/lib/perf/libperf.map
+++ b/tools/lib/perf/libperf.map
@@ -2,6 +2,7 @@ LIBPERF_0.0.1 {
global:
libperf_init;
perf_cpu_map__dummy_new;
+ perf_cpu_map__default_new;
perf_cpu_map__get;
perf_cpu_map__put;
perf_cpu_map__new;
@@ -10,6 +11,8 @@ LIBPERF_0.0.1 {
perf_cpu_map__cpu;
perf_cpu_map__empty;
perf_cpu_map__max;
+ perf_cpu_map__has;
+ perf_thread_map__new_array;
perf_thread_map__new_dummy;
perf_thread_map__set_pid;
perf_thread_map__comm;
@@ -23,6 +26,9 @@ LIBPERF_0.0.1 {
perf_evsel__disable;
perf_evsel__open;
perf_evsel__close;
+ perf_evsel__mmap;
+ perf_evsel__munmap;
+ perf_evsel__mmap_base;
perf_evsel__read;
perf_evsel__cpus;
perf_evsel__threads;
@@ -42,10 +48,12 @@ LIBPERF_0.0.1 {
perf_evlist__munmap;
perf_evlist__filter_pollfd;
perf_evlist__next_mmap;
+ perf_evlist__set_leader;
perf_mmap__consume;
perf_mmap__read_init;
perf_mmap__read_done;
perf_mmap__read_event;
+ perf_counts_values__scale;
local:
*;
};
diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c
index 79d5ed6c38cc..0d1634cedf44 100644
--- a/tools/lib/perf/mmap.c
+++ b/tools/lib/perf/mmap.c
@@ -8,9 +8,12 @@
#include <linux/perf_event.h>
#include <perf/mmap.h>
#include <perf/event.h>
+#include <perf/evsel.h>
#include <internal/mmap.h>
#include <internal/lib.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/stringify.h>
#include "internal.h"
void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev,
@@ -30,7 +33,7 @@ size_t perf_mmap__mmap_len(struct perf_mmap *map)
}
int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp,
- int fd, int cpu)
+ int fd, struct perf_cpu cpu)
{
map->prev = 0;
map->mask = mp->mask;
@@ -273,3 +276,184 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map)
return event;
}
+
+#if defined(__i386__) || defined(__x86_64__)
+static u64 read_perf_counter(unsigned int counter)
+{
+ unsigned int low, high;
+
+ asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
+
+ return low | ((u64)high) << 32;
+}
+
+static u64 read_timestamp(void)
+{
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((u64)high) << 32;
+}
+#elif defined(__aarch64__)
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+static u64 read_pmccntr(void)
+{
+ return read_sysreg(pmccntr_el0);
+}
+
+#define PMEVCNTR_READ(idx) \
+ static u64 read_pmevcntr_##idx(void) { \
+ return read_sysreg(pmevcntr##idx##_el0); \
+ }
+
+PMEVCNTR_READ(0);
+PMEVCNTR_READ(1);
+PMEVCNTR_READ(2);
+PMEVCNTR_READ(3);
+PMEVCNTR_READ(4);
+PMEVCNTR_READ(5);
+PMEVCNTR_READ(6);
+PMEVCNTR_READ(7);
+PMEVCNTR_READ(8);
+PMEVCNTR_READ(9);
+PMEVCNTR_READ(10);
+PMEVCNTR_READ(11);
+PMEVCNTR_READ(12);
+PMEVCNTR_READ(13);
+PMEVCNTR_READ(14);
+PMEVCNTR_READ(15);
+PMEVCNTR_READ(16);
+PMEVCNTR_READ(17);
+PMEVCNTR_READ(18);
+PMEVCNTR_READ(19);
+PMEVCNTR_READ(20);
+PMEVCNTR_READ(21);
+PMEVCNTR_READ(22);
+PMEVCNTR_READ(23);
+PMEVCNTR_READ(24);
+PMEVCNTR_READ(25);
+PMEVCNTR_READ(26);
+PMEVCNTR_READ(27);
+PMEVCNTR_READ(28);
+PMEVCNTR_READ(29);
+PMEVCNTR_READ(30);
+
+/*
+ * Read a value direct from PMEVCNTR<idx>
+ */
+static u64 read_perf_counter(unsigned int counter)
+{
+ static u64 (* const read_f[])(void) = {
+ read_pmevcntr_0,
+ read_pmevcntr_1,
+ read_pmevcntr_2,
+ read_pmevcntr_3,
+ read_pmevcntr_4,
+ read_pmevcntr_5,
+ read_pmevcntr_6,
+ read_pmevcntr_7,
+ read_pmevcntr_8,
+ read_pmevcntr_9,
+ read_pmevcntr_10,
+ read_pmevcntr_11,
+ read_pmevcntr_13,
+ read_pmevcntr_12,
+ read_pmevcntr_14,
+ read_pmevcntr_15,
+ read_pmevcntr_16,
+ read_pmevcntr_17,
+ read_pmevcntr_18,
+ read_pmevcntr_19,
+ read_pmevcntr_20,
+ read_pmevcntr_21,
+ read_pmevcntr_22,
+ read_pmevcntr_23,
+ read_pmevcntr_24,
+ read_pmevcntr_25,
+ read_pmevcntr_26,
+ read_pmevcntr_27,
+ read_pmevcntr_28,
+ read_pmevcntr_29,
+ read_pmevcntr_30,
+ read_pmccntr
+ };
+
+ if (counter < ARRAY_SIZE(read_f))
+ return (read_f[counter])();
+
+ return 0;
+}
+
+static u64 read_timestamp(void) { return read_sysreg(cntvct_el0); }
+
+#else
+static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; }
+static u64 read_timestamp(void) { return 0; }
+#endif
+
+int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count)
+{
+ struct perf_event_mmap_page *pc = map->base;
+ u32 seq, idx, time_mult = 0, time_shift = 0;
+ u64 cnt, cyc = 0, time_offset = 0, time_cycles = 0, time_mask = ~0ULL;
+
+ if (!pc || !pc->cap_user_rdpmc)
+ return -1;
+
+ do {
+ seq = READ_ONCE(pc->lock);
+ barrier();
+
+ count->ena = READ_ONCE(pc->time_enabled);
+ count->run = READ_ONCE(pc->time_running);
+
+ if (pc->cap_user_time && count->ena != count->run) {
+ cyc = read_timestamp();
+ time_mult = READ_ONCE(pc->time_mult);
+ time_shift = READ_ONCE(pc->time_shift);
+ time_offset = READ_ONCE(pc->time_offset);
+
+ if (pc->cap_user_time_short) {
+ time_cycles = READ_ONCE(pc->time_cycles);
+ time_mask = READ_ONCE(pc->time_mask);
+ }
+ }
+
+ idx = READ_ONCE(pc->index);
+ cnt = READ_ONCE(pc->offset);
+ if (pc->cap_user_rdpmc && idx) {
+ s64 evcnt = read_perf_counter(idx - 1);
+ u16 width = READ_ONCE(pc->pmc_width);
+
+ evcnt <<= 64 - width;
+ evcnt >>= 64 - width;
+ cnt += evcnt;
+ } else
+ return -1;
+
+ barrier();
+ } while (READ_ONCE(pc->lock) != seq);
+
+ if (count->ena != count->run) {
+ u64 delta;
+
+ /* Adjust for cap_usr_time_short, a nop if not */
+ cyc = time_cycles + ((cyc - time_cycles) & time_mask);
+
+ delta = time_offset + mul_u64_u32_shr(cyc, time_mult, time_shift);
+
+ count->ena += delta;
+ if (idx)
+ count->run += delta;
+ }
+
+ count->val = cnt;
+
+ return 0;
+}
diff --git a/tools/lib/perf/tests/Build b/tools/lib/perf/tests/Build
new file mode 100644
index 000000000000..56e81378d443
--- /dev/null
+++ b/tools/lib/perf/tests/Build
@@ -0,0 +1,5 @@
+tests-y += main.o
+tests-y += test-evsel.o
+tests-y += test-evlist.o
+tests-y += test-cpumap.o
+tests-y += test-threadmap.o
diff --git a/tools/lib/perf/tests/Makefile b/tools/lib/perf/tests/Makefile
deleted file mode 100644
index 96841775feaf..000000000000
--- a/tools/lib/perf/tests/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
-
-TESTS = test-cpumap test-threadmap test-evlist test-evsel
-
-TESTS_SO := $(addsuffix -so,$(TESTS))
-TESTS_A := $(addsuffix -a,$(TESTS))
-
-# Set compile option CFLAGS
-ifdef EXTRA_CFLAGS
- CFLAGS := $(EXTRA_CFLAGS)
-else
- CFLAGS := -g -Wall
-endif
-
-all:
-
-include $(srctree)/tools/scripts/Makefile.include
-
-INCLUDE = -I$(srctree)/tools/lib/perf/include -I$(srctree)/tools/include -I$(srctree)/tools/lib
-
-$(TESTS_A): FORCE
- $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -o $@ $(subst -a,.c,$@) ../libperf.a $(LIBAPI)
-
-$(TESTS_SO): FORCE
- $(QUIET_LINK)$(CC) $(INCLUDE) $(CFLAGS) -L.. -o $@ $(subst -so,.c,$@) $(LIBAPI) -lperf
-
-all: $(TESTS_A) $(TESTS_SO)
-
-run:
- @echo "running static:"
- @for i in $(TESTS_A); do ./$$i; done
- @echo "running dynamic:"
- @for i in $(TESTS_SO); do LD_LIBRARY_PATH=../ ./$$i; done
-
-clean:
- $(call QUIET_CLEAN, tests)$(RM) $(TESTS_A) $(TESTS_SO)
-
-.PHONY: all clean FORCE
diff --git a/tools/lib/perf/tests/main.c b/tools/lib/perf/tests/main.c
new file mode 100644
index 000000000000..56423fd4db19
--- /dev/null
+++ b/tools/lib/perf/tests/main.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <internal/tests.h>
+#include "tests.h"
+
+int tests_failed;
+int tests_verbose;
+
+int main(int argc, char **argv)
+{
+ __T("test cpumap", !test_cpumap(argc, argv));
+ __T("test threadmap", !test_threadmap(argc, argv));
+ __T("test evlist", !test_evlist(argc, argv));
+ __T("test evsel", !test_evsel(argc, argv));
+ return 0;
+}
diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c
index c8d45091e7c2..87b0510a556f 100644
--- a/tools/lib/perf/tests/test-cpumap.c
+++ b/tools/lib/perf/tests/test-cpumap.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <perf/cpumap.h>
#include <internal/tests.h>
+#include "tests.h"
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -10,9 +11,11 @@ static int libperf_print(enum libperf_print_level level,
return vfprintf(stderr, fmt, ap);
}
-int main(int argc, char **argv)
+int test_cpumap(int argc, char **argv)
{
struct perf_cpu_map *cpus;
+ struct perf_cpu cpu;
+ int idx;
__T_START;
@@ -26,6 +29,15 @@ int main(int argc, char **argv)
perf_cpu_map__put(cpus);
perf_cpu_map__put(cpus);
+ cpus = perf_cpu_map__default_new();
+ if (!cpus)
+ return -1;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+ __T("wrong cpu number", cpu.cpu != -1);
+
+ perf_cpu_map__put(cpus);
+
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c
index 6d8ebe0c2504..ed616fc19b4f 100644
--- a/tools/lib/perf/tests/test-evlist.c
+++ b/tools/lib/perf/tests/test-evlist.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#define _GNU_SOURCE // needed for sched.h to get sched_[gs]etaffinity and CPU_(ZERO,SET)
+#include <inttypes.h>
#include <sched.h>
#include <stdio.h>
#include <stdarg.h>
@@ -18,6 +19,11 @@
#include <perf/event.h>
#include <internal/tests.h>
#include <api/fs/fs.h>
+#include "tests.h"
+#include <internal/evsel.h>
+
+#define EVENT_NUM 15
+#define WAIT_COUNT 100000000UL
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -29,7 +35,7 @@ static int test_stat_cpu(void)
{
struct perf_cpu_map *cpus;
struct perf_evlist *evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *leader;
struct perf_event_attr attr1 = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
@@ -38,7 +44,7 @@ static int test_stat_cpu(void)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_TASK_CLOCK,
};
- int err, cpu, tmp;
+ int err, idx;
cpus = perf_cpu_map__new(NULL);
__T("failed to create cpus", cpus);
@@ -46,7 +52,7 @@ static int test_stat_cpu(void)
evlist = perf_evlist__new();
__T("failed to create evlist", evlist);
- evsel = perf_evsel__new(&attr1);
+ evsel = leader = perf_evsel__new(&attr1);
__T("failed to create evsel1", evsel);
perf_evlist__add(evlist, evsel);
@@ -56,18 +62,22 @@ static int test_stat_cpu(void)
perf_evlist__add(evlist, evsel);
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
perf_evlist__set_maps(evlist, cpus, NULL);
err = perf_evlist__open(evlist);
- __T("failed to open evsel", err == 0);
+ __T("failed to open evlist", err == 0);
perf_evlist__for_each_evsel(evlist, evsel) {
cpus = perf_evsel__cpus(evsel);
- perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
struct perf_counts_values counts = { .val = 0 };
- perf_evsel__read(evsel, cpu, 0, &counts);
+ perf_evsel__read(evsel, idx, 0, &counts);
__T("failed to read value for evsel", counts.val != 0);
}
}
@@ -84,7 +94,7 @@ static int test_stat_thread(void)
struct perf_counts_values counts = { .val = 0 };
struct perf_thread_map *threads;
struct perf_evlist *evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *leader;
struct perf_event_attr attr1 = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
@@ -103,7 +113,7 @@ static int test_stat_thread(void)
evlist = perf_evlist__new();
__T("failed to create evlist", evlist);
- evsel = perf_evsel__new(&attr1);
+ evsel = leader = perf_evsel__new(&attr1);
__T("failed to create evsel1", evsel);
perf_evlist__add(evlist, evsel);
@@ -113,10 +123,14 @@ static int test_stat_thread(void)
perf_evlist__add(evlist, evsel);
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
perf_evlist__set_maps(evlist, NULL, threads);
err = perf_evlist__open(evlist);
- __T("failed to open evsel", err == 0);
+ __T("failed to open evlist", err == 0);
perf_evlist__for_each_evsel(evlist, evsel) {
perf_evsel__read(evsel, 0, 0, &counts);
@@ -135,7 +149,7 @@ static int test_stat_thread_enable(void)
struct perf_counts_values counts = { .val = 0 };
struct perf_thread_map *threads;
struct perf_evlist *evlist;
- struct perf_evsel *evsel;
+ struct perf_evsel *evsel, *leader;
struct perf_event_attr attr1 = {
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
@@ -156,7 +170,7 @@ static int test_stat_thread_enable(void)
evlist = perf_evlist__new();
__T("failed to create evlist", evlist);
- evsel = perf_evsel__new(&attr1);
+ evsel = leader = perf_evsel__new(&attr1);
__T("failed to create evsel1", evsel);
perf_evlist__add(evlist, evsel);
@@ -166,10 +180,14 @@ static int test_stat_thread_enable(void)
perf_evlist__add(evlist, evsel);
+ perf_evlist__set_leader(evlist);
+ __T("failed to set leader", leader->leader == leader);
+ __T("failed to set leader", evsel->leader == leader);
+
perf_evlist__set_maps(evlist, NULL, threads);
err = perf_evlist__open(evlist);
- __T("failed to open evsel", err == 0);
+ __T("failed to open evlist", err == 0);
perf_evlist__for_each_evsel(evlist, evsel) {
perf_evsel__read(evsel, 0, 0, &counts);
@@ -208,13 +226,13 @@ static int test_mmap_thread(void)
char path[PATH_MAX];
int id, err, pid, go_pipe[2];
union perf_event *event;
- char bf;
int count = 0;
snprintf(path, PATH_MAX, "%s/kernel/debug/tracing/events/syscalls/sys_enter_prctl/id",
sysfs__mountpoint());
if (filename__read_int(path, &id)) {
+ tests_failed++;
fprintf(stderr, "error: failed to get tracepoint id: %s\n", path);
return -1;
}
@@ -229,6 +247,7 @@ static int test_mmap_thread(void)
pid = fork();
if (!pid) {
int i;
+ char bf;
read(go_pipe[0], &bf, 1);
@@ -252,6 +271,7 @@ static int test_mmap_thread(void)
evsel = perf_evsel__new(&attr);
__T("failed to create evsel1", evsel);
+ __T("failed to set leader", evsel->leader == evsel);
perf_evlist__add(evlist, evsel);
@@ -266,7 +286,7 @@ static int test_mmap_thread(void)
perf_evlist__enable(evlist);
/* kick the child and wait for it to finish */
- write(go_pipe[1], &bf, 1);
+ write(go_pipe[1], "A", 1);
waitpid(pid, NULL, 0);
/*
@@ -315,7 +335,8 @@ static int test_mmap_cpus(void)
};
cpu_set_t saved_mask;
char path[PATH_MAX];
- int id, err, cpu, tmp;
+ int id, err, tmp;
+ struct perf_cpu cpu;
union perf_event *event;
int count = 0;
@@ -337,6 +358,7 @@ static int test_mmap_cpus(void)
evsel = perf_evsel__new(&attr);
__T("failed to create evsel1", evsel);
+ __T("failed to set leader", evsel->leader == evsel);
perf_evlist__add(evlist, evsel);
@@ -357,7 +379,7 @@ static int test_mmap_cpus(void)
cpu_set_t mask;
CPU_ZERO(&mask);
- CPU_SET(cpu, &mask);
+ CPU_SET(cpu.cpu, &mask);
err = sched_setaffinity(0, sizeof(mask), &mask);
__T("sched_setaffinity failed", err == 0);
@@ -396,7 +418,160 @@ static int test_mmap_cpus(void)
return 0;
}
-int main(int argc, char **argv)
+static double display_error(long long average,
+ long long high,
+ long long low,
+ long long expected)
+{
+ double error;
+
+ error = (((double)average - expected) / expected) * 100.0;
+
+ __T_VERBOSE(" Expected: %lld\n", expected);
+ __T_VERBOSE(" High: %lld Low: %lld Average: %lld\n",
+ high, low, average);
+
+ __T_VERBOSE(" Average Error = %.2f%%\n", error);
+
+ return error;
+}
+
+static int test_stat_multiplexing(void)
+{
+ struct perf_counts_values expected_counts = { .val = 0 };
+ struct perf_counts_values counts[EVENT_NUM] = {{ .val = 0 },};
+ struct perf_thread_map *threads;
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_INSTRUCTIONS,
+ .read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING,
+ .disabled = 1,
+ };
+ int err, i, nonzero = 0;
+ unsigned long count;
+ long long max = 0, min = 0, avg = 0;
+ double error = 0.0;
+ s8 scaled = 0;
+
+ /* read for non-multiplexing event count */
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ __T("failed to open evsel", err == 0);
+
+ err = perf_evsel__enable(evsel);
+ __T("failed to enable evsel", err == 0);
+
+ /* wait loop */
+ count = WAIT_COUNT;
+ while (count--)
+ ;
+
+ perf_evsel__read(evsel, 0, 0, &expected_counts);
+ __T("failed to read value for evsel", expected_counts.val != 0);
+ __T("failed to read non-multiplexing event count",
+ expected_counts.ena == expected_counts.run);
+
+ err = perf_evsel__disable(evsel);
+ __T("failed to enable evsel", err == 0);
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+
+ /* read for multiplexing event count */
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evlist = perf_evlist__new();
+ __T("failed to create evlist", evlist);
+
+ for (i = 0; i < EVENT_NUM; i++) {
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ perf_evlist__add(evlist, evsel);
+ }
+ perf_evlist__set_maps(evlist, NULL, threads);
+
+ err = perf_evlist__open(evlist);
+ __T("failed to open evlist", err == 0);
+
+ perf_evlist__enable(evlist);
+
+ /* wait loop */
+ count = WAIT_COUNT;
+ while (count--)
+ ;
+
+ i = 0;
+ perf_evlist__for_each_evsel(evlist, evsel) {
+ perf_evsel__read(evsel, 0, 0, &counts[i]);
+ __T("failed to read value for evsel", counts[i].val != 0);
+ i++;
+ }
+
+ perf_evlist__disable(evlist);
+
+ min = counts[0].val;
+ for (i = 0; i < EVENT_NUM; i++) {
+ __T_VERBOSE("Event %2d -- Raw count = %" PRIu64 ", run = %" PRIu64 ", enable = %" PRIu64 "\n",
+ i, counts[i].val, counts[i].run, counts[i].ena);
+
+ perf_counts_values__scale(&counts[i], true, &scaled);
+ if (scaled == 1) {
+ __T_VERBOSE("\t Scaled count = %" PRIu64 " (%.2lf%%, %" PRIu64 "/%" PRIu64 ")\n",
+ counts[i].val,
+ (double)counts[i].run / (double)counts[i].ena * 100.0,
+ counts[i].run, counts[i].ena);
+ } else if (scaled == -1) {
+ __T_VERBOSE("\t Not Running\n");
+ } else {
+ __T_VERBOSE("\t Not Scaling\n");
+ }
+
+ if (counts[i].val > max)
+ max = counts[i].val;
+
+ if (counts[i].val < min)
+ min = counts[i].val;
+
+ avg += counts[i].val;
+
+ if (counts[i].val != 0)
+ nonzero++;
+ }
+
+ if (nonzero != 0)
+ avg = avg / nonzero;
+ else
+ avg = 0;
+
+ error = display_error(avg, max, min, expected_counts.val);
+
+ __T("Error out of range!", ((error <= 1.0) && (error >= -1.0)));
+
+ perf_evlist__close(evlist);
+ perf_evlist__delete(evlist);
+
+ perf_thread_map__put(threads);
+
+ return 0;
+}
+
+int test_evlist(int argc, char **argv)
{
__T_START;
@@ -407,7 +582,8 @@ int main(int argc, char **argv)
test_stat_thread_enable();
test_mmap_thread();
test_mmap_cpus();
+ test_stat_multiplexing();
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c
index 135722ac965b..a11fc51bfb68 100644
--- a/tools/lib/perf/tests/test-evsel.c
+++ b/tools/lib/perf/tests/test-evsel.c
@@ -1,11 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdarg.h>
#include <stdio.h>
+#include <string.h>
#include <linux/perf_event.h>
+#include <linux/kernel.h>
#include <perf/cpumap.h>
#include <perf/threadmap.h>
#include <perf/evsel.h>
+#include <internal/evsel.h>
#include <internal/tests.h>
+#include "tests.h"
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -21,7 +25,7 @@ static int test_stat_cpu(void)
.type = PERF_TYPE_SOFTWARE,
.config = PERF_COUNT_SW_CPU_CLOCK,
};
- int err, cpu, tmp;
+ int err, idx;
cpus = perf_cpu_map__new(NULL);
__T("failed to create cpus", cpus);
@@ -32,10 +36,10 @@ static int test_stat_cpu(void)
err = perf_evsel__open(evsel, cpus, NULL);
__T("failed to open evsel", err == 0);
- perf_cpu_map__for_each_cpu(cpu, tmp, cpus) {
+ for (idx = 0; idx < perf_cpu_map__nr(cpus); idx++) {
struct perf_counts_values counts = { .val = 0 };
- perf_evsel__read(evsel, cpu, 0, &counts);
+ perf_evsel__read(evsel, idx, 0, &counts);
__T("failed to read value for evsel", counts.val != 0);
}
@@ -120,7 +124,232 @@ static int test_stat_thread_enable(void)
return 0;
}
-int main(int argc, char **argv)
+static int test_stat_user_read(int event)
+{
+ struct perf_counts_values counts = { .val = 0 };
+ struct perf_thread_map *threads;
+ struct perf_evsel *evsel;
+ struct perf_event_mmap_page *pc;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = event,
+#ifdef __aarch64__
+ .config1 = 0x2, /* Request user access */
+#endif
+ };
+ int err, i;
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ evsel = perf_evsel__new(&attr);
+ __T("failed to create evsel", evsel);
+
+ err = perf_evsel__open(evsel, NULL, threads);
+ __T("failed to open evsel", err == 0);
+
+ err = perf_evsel__mmap(evsel, 0);
+ __T("failed to mmap evsel", err == 0);
+
+ pc = perf_evsel__mmap_base(evsel, 0, 0);
+ __T("failed to get mmapped address", pc);
+
+#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+ __T("userspace counter access not supported", pc->cap_user_rdpmc);
+ __T("userspace counter access not enabled", pc->index);
+ __T("userspace counter width not set", pc->pmc_width >= 32);
+#endif
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ __T("failed to read value for evsel", counts.val != 0);
+
+ for (i = 0; i < 5; i++) {
+ volatile int count = 0x10000 << i;
+ __u64 start, end, last = 0;
+
+ __T_VERBOSE("\tloop = %u, ", count);
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ start = counts.val;
+
+ while (count--) ;
+
+ perf_evsel__read(evsel, 0, 0, &counts);
+ end = counts.val;
+
+ __T("invalid counter data", (end - start) > last);
+ last = end - start;
+ __T_VERBOSE("count = %llu\n", end - start);
+ }
+
+ perf_evsel__munmap(evsel);
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+static int test_stat_read_format_single(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+ struct perf_evsel *evsel;
+ struct perf_counts_values counts;
+ volatile int count = 0x100000;
+ int err;
+
+ evsel = perf_evsel__new(attr);
+ __T("failed to create evsel", evsel);
+
+ /* skip old kernels that don't support the format */
+ err = perf_evsel__open(evsel, NULL, threads);
+ if (err < 0)
+ return 0;
+
+ while (count--) ;
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(evsel, 0, 0, &counts);
+
+ __T("failed to read value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read LOST", counts.lost == 0);
+
+ perf_evsel__close(evsel);
+ perf_evsel__delete(evsel);
+ return 0;
+}
+
+static int test_stat_read_format_group(struct perf_event_attr *attr, struct perf_thread_map *threads)
+{
+ struct perf_evsel *leader, *member;
+ struct perf_counts_values counts;
+ volatile int count = 0x100000;
+ int err;
+
+ attr->read_format |= PERF_FORMAT_GROUP;
+ leader = perf_evsel__new(attr);
+ __T("failed to create leader", leader);
+
+ attr->read_format &= ~PERF_FORMAT_GROUP;
+ member = perf_evsel__new(attr);
+ __T("failed to create member", member);
+
+ member->leader = leader;
+ leader->nr_members = 2;
+
+ /* skip old kernels that don't support the format */
+ err = perf_evsel__open(leader, NULL, threads);
+ if (err < 0)
+ return 0;
+ err = perf_evsel__open(member, NULL, threads);
+ if (err < 0)
+ return 0;
+
+ while (count--) ;
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(leader, 0, 0, &counts);
+
+ __T("failed to read leader value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read leader TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read leader TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read leader ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read leader LOST", counts.lost == 0);
+
+ memset(&counts, -1, sizeof(counts));
+ perf_evsel__read(member, 0, 0, &counts);
+
+ __T("failed to read member value", counts.val);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ __T("failed to read member TOTAL_TIME_ENABLED", counts.ena);
+ if (attr->read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ __T("failed to read member TOTAL_TIME_RUNNING", counts.run);
+ if (attr->read_format & PERF_FORMAT_ID)
+ __T("failed to read member ID", counts.id);
+ if (attr->read_format & PERF_FORMAT_LOST)
+ __T("failed to read member LOST", counts.lost == 0);
+
+ perf_evsel__close(member);
+ perf_evsel__close(leader);
+ perf_evsel__delete(member);
+ perf_evsel__delete(leader);
+ return 0;
+}
+
+static int test_stat_read_format(void)
+{
+ struct perf_thread_map *threads;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ };
+ int err, i;
+
+#define FMT(_fmt) PERF_FORMAT_ ## _fmt
+#define FMT_TIME (FMT(TOTAL_TIME_ENABLED) | FMT(TOTAL_TIME_RUNNING))
+
+ uint64_t test_formats [] = {
+ 0,
+ FMT_TIME,
+ FMT(ID),
+ FMT(LOST),
+ FMT_TIME | FMT(ID),
+ FMT_TIME | FMT(LOST),
+ FMT_TIME | FMT(ID) | FMT(LOST),
+ FMT(ID) | FMT(LOST),
+ };
+
+#undef FMT
+#undef FMT_TIME
+
+ threads = perf_thread_map__new_dummy();
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+
+ for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+ attr.read_format = test_formats[i];
+ __T_VERBOSE("testing single read with read_format: %lx\n",
+ (unsigned long)test_formats[i]);
+
+ err = test_stat_read_format_single(&attr, threads);
+ __T("failed to read single format", err == 0);
+ }
+
+ perf_thread_map__put(threads);
+
+ threads = perf_thread_map__new_array(2, NULL);
+ __T("failed to create threads", threads);
+
+ perf_thread_map__set_pid(threads, 0, 0);
+ perf_thread_map__set_pid(threads, 1, 0);
+
+ for (i = 0; i < (int)ARRAY_SIZE(test_formats); i++) {
+ attr.read_format = test_formats[i];
+ __T_VERBOSE("testing group read with read_format: %lx\n",
+ (unsigned long)test_formats[i]);
+
+ err = test_stat_read_format_group(&attr, threads);
+ __T("failed to read group format", err == 0);
+ }
+
+ perf_thread_map__put(threads);
+ return 0;
+}
+
+int test_evsel(int argc, char **argv)
{
__T_START;
@@ -129,7 +358,10 @@ int main(int argc, char **argv)
test_stat_cpu();
test_stat_thread();
test_stat_thread_enable();
+ test_stat_user_read(PERF_COUNT_HW_INSTRUCTIONS);
+ test_stat_user_read(PERF_COUNT_HW_CPU_CYCLES);
+ test_stat_read_format();
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/test-threadmap.c b/tools/lib/perf/tests/test-threadmap.c
index 7dc4d6fbedde..f728ad7002bb 100644
--- a/tools/lib/perf/tests/test-threadmap.c
+++ b/tools/lib/perf/tests/test-threadmap.c
@@ -3,6 +3,7 @@
#include <stdio.h>
#include <perf/threadmap.h>
#include <internal/tests.h>
+#include "tests.h"
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
@@ -10,9 +11,43 @@ static int libperf_print(enum libperf_print_level level,
return vfprintf(stderr, fmt, ap);
}
-int main(int argc, char **argv)
+static int test_threadmap_array(int nr, pid_t *array)
{
struct perf_thread_map *threads;
+ int i;
+
+ threads = perf_thread_map__new_array(nr, array);
+ __T("Failed to allocate new thread map", threads);
+
+ __T("Unexpected number of threads", perf_thread_map__nr(threads) == nr);
+
+ for (i = 0; i < nr; i++) {
+ __T("Unexpected initial value of thread",
+ perf_thread_map__pid(threads, i) == (array ? array[i] : -1));
+ }
+
+ for (i = 1; i < nr; i++)
+ perf_thread_map__set_pid(threads, i, i * 100);
+
+ __T("Unexpected value of thread 0",
+ perf_thread_map__pid(threads, 0) == (array ? array[0] : -1));
+
+ for (i = 1; i < nr; i++) {
+ __T("Unexpected thread value",
+ perf_thread_map__pid(threads, i) == i * 100);
+ }
+
+ perf_thread_map__put(threads);
+
+ return 0;
+}
+
+#define THREADS_NR 10
+int test_threadmap(int argc, char **argv)
+{
+ struct perf_thread_map *threads;
+ pid_t thr_array[THREADS_NR];
+ int i;
__T_START;
@@ -26,6 +61,13 @@ int main(int argc, char **argv)
perf_thread_map__put(threads);
perf_thread_map__put(threads);
+ test_threadmap_array(THREADS_NR, NULL);
+
+ for (i = 0; i < THREADS_NR; i++)
+ thr_array[i] = i + 100;
+
+ test_threadmap_array(THREADS_NR, thr_array);
+
__T_END;
- return 0;
+ return tests_failed == 0 ? 0 : -1;
}
diff --git a/tools/lib/perf/tests/tests.h b/tools/lib/perf/tests/tests.h
new file mode 100644
index 000000000000..604838f21b2b
--- /dev/null
+++ b/tools/lib/perf/tests/tests.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef TESTS_H
+#define TESTS_H
+
+int test_cpumap(int argc, char **argv);
+int test_threadmap(int argc, char **argv);
+int test_evlist(int argc, char **argv);
+int test_evsel(int argc, char **argv);
+
+#endif /* TESTS_H */
diff --git a/tools/lib/perf/threadmap.c b/tools/lib/perf/threadmap.c
index e92c368b0a6c..07968f3ea093 100644
--- a/tools/lib/perf/threadmap.c
+++ b/tools/lib/perf/threadmap.c
@@ -32,28 +32,38 @@ struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, in
#define thread_map__alloc(__nr) perf_thread_map__realloc(NULL, __nr)
-void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid)
+void perf_thread_map__set_pid(struct perf_thread_map *map, int idx, pid_t pid)
{
- map->map[thread].pid = pid;
+ map->map[idx].pid = pid;
}
-char *perf_thread_map__comm(struct perf_thread_map *map, int thread)
+char *perf_thread_map__comm(struct perf_thread_map *map, int idx)
{
- return map->map[thread].comm;
+ return map->map[idx].comm;
}
-struct perf_thread_map *perf_thread_map__new_dummy(void)
+struct perf_thread_map *perf_thread_map__new_array(int nr_threads, pid_t *array)
{
- struct perf_thread_map *threads = thread_map__alloc(1);
+ struct perf_thread_map *threads = thread_map__alloc(nr_threads);
+ int i;
+
+ if (!threads)
+ return NULL;
+
+ for (i = 0; i < nr_threads; i++)
+ perf_thread_map__set_pid(threads, i, array ? array[i] : -1);
+
+ threads->nr = nr_threads;
+ refcount_set(&threads->refcnt, 1);
- if (threads != NULL) {
- perf_thread_map__set_pid(threads, 0, -1);
- threads->nr = 1;
- refcount_set(&threads->refcnt, 1);
- }
return threads;
}
+struct perf_thread_map *perf_thread_map__new_dummy(void)
+{
+ return perf_thread_map__new_array(1, NULL);
+}
+
static void perf_thread_map__delete(struct perf_thread_map *threads)
{
if (threads) {
@@ -85,7 +95,7 @@ int perf_thread_map__nr(struct perf_thread_map *threads)
return threads ? threads->nr : 1;
}
-pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread)
+pid_t perf_thread_map__pid(struct perf_thread_map *map, int idx)
{
- return map->map[thread].pid;
+ return map->map[idx].pid;
}
diff --git a/tools/lib/rbtree.c b/tools/lib/rbtree.c
index 06ac7bd2144b..727396de6be5 100644
--- a/tools/lib/rbtree.c
+++ b/tools/lib/rbtree.c
@@ -13,7 +13,7 @@
#include <linux/export.h>
/*
- * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
+ * red-black trees properties: https://en.wikipedia.org/wiki/Rbtree
*
* 1) A node is either red or black
* 2) The root is black
diff --git a/tools/lib/slab.c b/tools/lib/slab.c
new file mode 100644
index 000000000000..959997fb0652
--- /dev/null
+++ b/tools/lib/slab.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <stdio.h>
+#include <string.h>
+
+#include <urcu/uatomic.h>
+#include <linux/slab.h>
+#include <malloc.h>
+#include <linux/gfp.h>
+
+int kmalloc_nr_allocated;
+int kmalloc_verbose;
+
+void *kmalloc(size_t size, gfp_t gfp)
+{
+ void *ret;
+
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
+ return NULL;
+
+ ret = malloc(size);
+ uatomic_inc(&kmalloc_nr_allocated);
+ if (kmalloc_verbose)
+ printf("Allocating %p from malloc\n", ret);
+ if (gfp & __GFP_ZERO)
+ memset(ret, 0, size);
+ return ret;
+}
+
+void kfree(void *p)
+{
+ if (!p)
+ return;
+ uatomic_dec(&kmalloc_nr_allocated);
+ if (kmalloc_verbose)
+ printf("Freeing %p to malloc\n", p);
+ free(p);
+}
diff --git a/tools/lib/string.c b/tools/lib/string.c
index f645343815de..8b6892f959ab 100644
--- a/tools/lib/string.c
+++ b/tools/lib/string.c
@@ -168,3 +168,61 @@ char *strreplace(char *s, char old, char new)
*s = new;
return s;
}
+
+static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
+{
+ while (bytes) {
+ if (*start != value)
+ return (void *)start;
+ start++;
+ bytes--;
+ }
+ return NULL;
+}
+
+/**
+ * memchr_inv - Find an unmatching character in an area of memory.
+ * @start: The memory area
+ * @c: Find a character other than c
+ * @bytes: The size of the area.
+ *
+ * returns the address of the first character other than @c, or %NULL
+ * if the whole buffer contains just @c.
+ */
+void *memchr_inv(const void *start, int c, size_t bytes)
+{
+ u8 value = c;
+ u64 value64;
+ unsigned int words, prefix;
+
+ if (bytes <= 16)
+ return check_bytes8(start, value, bytes);
+
+ value64 = value;
+ value64 |= value64 << 8;
+ value64 |= value64 << 16;
+ value64 |= value64 << 32;
+
+ prefix = (unsigned long)start % 8;
+ if (prefix) {
+ u8 *r;
+
+ prefix = 8 - prefix;
+ r = check_bytes8(start, value, prefix);
+ if (r)
+ return r;
+ start += prefix;
+ bytes -= prefix;
+ }
+
+ words = bytes / 8;
+
+ while (words) {
+ if (*(u64 *)start != value64)
+ return check_bytes8(start, value, 8);
+ start += 8;
+ words--;
+ }
+
+ return check_bytes8(start, value, bytes % 8);
+}
diff --git a/tools/lib/subcmd/Makefile b/tools/lib/subcmd/Makefile
index 1c777a72bb39..b87213263a5e 100644
--- a/tools/lib/subcmd/Makefile
+++ b/tools/lib/subcmd/Makefile
@@ -17,6 +17,15 @@ RM = rm -f
MAKEFLAGS += --no-print-directory
+INSTALL = install
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
LIBFILE = $(OUTPUT)libsubcmd.a
CFLAGS := -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
@@ -48,6 +57,18 @@ CFLAGS += $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
SUBCMD_IN := $(OUTPUT)libsubcmd-in.o
+ifeq ($(LP64), 1)
+ libdir_relative = lib64
+else
+ libdir_relative = lib
+endif
+
+prefix ?=
+libdir = $(prefix)/$(libdir_relative)
+
+# Shell quotes
+libdir_SQ = $(subst ','\'',$(libdir))
+
all:
export srctree OUTPUT CC LD CFLAGS V
@@ -61,9 +82,40 @@ $(SUBCMD_IN): FORCE
$(LIBFILE): $(SUBCMD_IN)
$(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(SUBCMD_IN)
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
+define do_install
+ if [ ! -d '$2' ]; then \
+ $(INSTALL) -d -m 755 '$2'; \
+ fi; \
+ $(INSTALL) $1 $(if $3,-m $3,) '$2'
+endef
+
+install_lib: $(LIBFILE)
+ $(call QUIET_INSTALL, $(LIBFILE)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIBFILE) $(DESTDIR)$(libdir_SQ)
+
+HDRS := exec-cmd.h help.h pager.h parse-options.h run-command.h
+INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/subcmd
+INSTALL_HDRS := $(addprefix $(INSTALL_HDRS_PFX)/, $(HDRS))
+
+$(INSTALL_HDRS): $(INSTALL_HDRS_PFX)/%.h: %.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_HDRS_PFX)/,644)
+
+install_headers: $(INSTALL_HDRS)
+ $(call QUIET_INSTALL, libsubcmd_headers)
+
+install: install_lib install_headers
+
clean:
$(call QUIET_CLEAN, libsubcmd) $(RM) $(LIBFILE); \
- find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+ find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
FORCE:
diff --git a/tools/lib/subcmd/exec-cmd.c b/tools/lib/subcmd/exec-cmd.c
index 33e94fb83986..5dbea456973e 100644
--- a/tools/lib/subcmd/exec-cmd.c
+++ b/tools/lib/subcmd/exec-cmd.c
@@ -24,6 +24,9 @@ void exec_cmd_init(const char *exec_name, const char *prefix,
subcmd_config.prefix = prefix;
subcmd_config.exec_path = exec_path;
subcmd_config.exec_path_env = exec_path_env;
+
+ /* Setup environment variable for invoked shell script. */
+ setenv("PREFIX", prefix, 1);
}
#define is_dir_sep(c) ((c) == '/')
diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c
index 2859f107abc8..bf02d62a3b2b 100644
--- a/tools/lib/subcmd/help.c
+++ b/tools/lib/subcmd/help.c
@@ -65,12 +65,14 @@ void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes)
ci = cj = ei = 0;
while (ci < cmds->cnt && ei < excludes->cnt) {
cmp = strcmp(cmds->names[ci]->name, excludes->names[ei]->name);
- if (cmp < 0)
+ if (cmp < 0) {
cmds->names[cj++] = cmds->names[ci++];
- else if (cmp == 0)
- ci++, ei++;
- else if (cmp > 0)
+ } else if (cmp == 0) {
+ ci++;
ei++;
+ } else if (cmp > 0) {
+ ei++;
+ }
}
while (ci < cmds->cnt)
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
index dbb9efbf718a..9fa75943f2ed 100644
--- a/tools/lib/subcmd/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -237,6 +237,9 @@ static int get_value(struct parse_opt_ctx_t *p,
return err;
case OPTION_CALLBACK:
+ if (opt->set)
+ *(bool *)opt->set = true;
+
if (unset)
return (*opt->callback)(opt, NULL, 1) ? (-1) : 0;
if (opt->flags & PARSE_OPT_NOARG)
@@ -803,9 +806,9 @@ static int option__cmp(const void *va, const void *vb)
static struct option *options__order(const struct option *opts)
{
- int nr_opts = 0, len;
+ int nr_opts = 0, nr_group = 0, len;
const struct option *o = opts;
- struct option *ordered;
+ struct option *opt, *ordered, *group;
for (o = opts; o->type != OPTION_END; o++)
++nr_opts;
@@ -816,7 +819,18 @@ static struct option *options__order(const struct option *opts)
goto out;
memcpy(ordered, opts, len);
- qsort(ordered, nr_opts, sizeof(*o), option__cmp);
+ /* sort each option group individually */
+ for (opt = group = ordered; opt->type != OPTION_END; opt++) {
+ if (opt->type == OPTION_GROUP) {
+ qsort(group, nr_group, sizeof(*opt), option__cmp);
+ group = opt + 1;
+ nr_group = 0;
+ continue;
+ }
+ nr_group++;
+ }
+ qsort(group, nr_group, sizeof(*opt), option__cmp);
+
out:
return ordered;
}
diff --git a/tools/lib/subcmd/parse-options.h b/tools/lib/subcmd/parse-options.h
index af9def589863..41b9b942504d 100644
--- a/tools/lib/subcmd/parse-options.h
+++ b/tools/lib/subcmd/parse-options.h
@@ -133,6 +133,7 @@ struct option {
#define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
#define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, int *), .help = (h) }
#define OPT_UINTEGER(s, l, v, h) { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h) }
+#define OPT_UINTEGER_OPTARG(s, l, v, d, h) { .type = OPTION_UINTEGER, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned int *), .help = (h), .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) }
#define OPT_LONG(s, l, v, h) { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) }
#define OPT_ULONG(s, l, v, h) { .type = OPTION_ULONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, unsigned long *), .help = (h) }
#define OPT_U64(s, l, v, h) { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) }
@@ -151,6 +152,8 @@ struct option {
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
#define OPT_CALLBACK(s, l, v, a, h, f) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f) }
+#define OPT_CALLBACK_SET(s, l, v, os, a, h, f) \
+ { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .set = check_vtype(os, bool *)}
#define OPT_CALLBACK_NOOPT(s, l, v, a, h, f) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG }
#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \
diff --git a/tools/lib/subcmd/subcmd-util.h b/tools/lib/subcmd/subcmd-util.h
index 794a375dad36..b2aec04fce8f 100644
--- a/tools/lib/subcmd/subcmd-util.h
+++ b/tools/lib/subcmd/subcmd-util.h
@@ -50,15 +50,8 @@ static NORETURN inline void die(const char *err, ...)
static inline void *xrealloc(void *ptr, size_t size)
{
void *ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret) {
- ret = realloc(ptr, size);
- if (!ret && !size)
- ret = realloc(ptr, 1);
- if (!ret)
- die("Out of memory, realloc failed");
- }
+ if (!ret)
+ die("Out of memory, realloc failed");
return ret;
}
diff --git a/tools/lib/symbol/Build b/tools/lib/symbol/Build
new file mode 100644
index 000000000000..9b9a9c78d3c9
--- /dev/null
+++ b/tools/lib/symbol/Build
@@ -0,0 +1 @@
+libsymbol-y += kallsyms.o
diff --git a/tools/lib/symbol/Makefile b/tools/lib/symbol/Makefile
new file mode 100644
index 000000000000..13d43c6f92b4
--- /dev/null
+++ b/tools/lib/symbol/Makefile
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: GPL-2.0
+include ../../scripts/Makefile.include
+include ../../scripts/utilities.mak # QUIET_CLEAN
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+#$(info Determined 'srctree' to be $(srctree))
+endif
+
+CC ?= $(CROSS_COMPILE)gcc
+AR ?= $(CROSS_COMPILE)ar
+LD ?= $(CROSS_COMPILE)ld
+
+MAKEFLAGS += --no-print-directory
+
+INSTALL = install
+
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+LIBFILE = $(OUTPUT)libsymbol.a
+
+CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu11 -U_FORTIFY_SOURCE -fPIC
+
+ifeq ($(DEBUG),0)
+ifeq ($(CC_NO_CLANG), 0)
+ CFLAGS += -O3
+else
+ CFLAGS += -O6
+endif
+endif
+
+ifeq ($(DEBUG),0)
+ CFLAGS += -D_FORTIFY_SOURCE
+endif
+
+# Treat warnings as errors unless directed not to
+ifneq ($(WERROR),0)
+ CFLAGS += -Werror
+endif
+
+CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+
+CFLAGS += -I$(srctree)/tools/lib
+CFLAGS += -I$(srctree)/tools/include
+
+RM = rm -f
+
+SYMBOL_IN := $(OUTPUT)libsymbol-in.o
+
+ifeq ($(LP64), 1)
+ libdir_relative = lib64
+else
+ libdir_relative = lib
+endif
+
+prefix ?=
+libdir = $(prefix)/$(libdir_relative)
+
+# Shell quotes
+libdir_SQ = $(subst ','\'',$(libdir))
+
+all:
+
+export srctree OUTPUT CC LD CFLAGS V
+include $(srctree)/tools/build/Makefile.include
+include $(srctree)/tools/scripts/Makefile.include
+
+all: fixdep $(LIBFILE)
+
+$(SYMBOL_IN): FORCE
+ @$(MAKE) $(build)=libsymbol
+
+$(LIBFILE): $(SYMBOL_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(SYMBOL_IN)
+
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
+define do_install
+ if [ ! -d '$2' ]; then \
+ $(INSTALL) -d -m 755 '$2'; \
+ fi; \
+ $(INSTALL) $1 $(if $3,-m $3,) '$2'
+endef
+
+install_lib: $(LIBFILE)
+ $(call QUIET_INSTALL, $(LIBFILE)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIBFILE) $(DESTDIR)$(libdir_SQ)
+
+HDRS := kallsyms.h
+INSTALL_HDRS_PFX := $(DESTDIR)$(prefix)/include/symbol
+INSTALL_HDRS := $(addprefix $(INSTALL_HDRS_PFX)/, $(HDRS))
+
+$(INSTALL_HDRS): $(INSTALL_HDRS_PFX)/%.h: %.h
+ $(call QUIET_INSTALL, $@) \
+ $(call do_install,$<,$(INSTALL_HDRS_PFX)/,644)
+
+install_headers: $(INSTALL_HDRS)
+ $(call QUIET_INSTALL, libsymbol_headers)
+
+install: install_lib install_headers
+
+clean:
+ $(call QUIET_CLEAN, libsymbol) $(RM) $(LIBFILE); \
+ find $(or $(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM)
+
+FORCE:
+
+.PHONY: clean FORCE
diff --git a/tools/lib/symbol/kallsyms.c b/tools/lib/symbol/kallsyms.c
index 1a7a9f877095..e335ac2b9e19 100644
--- a/tools/lib/symbol/kallsyms.c
+++ b/tools/lib/symbol/kallsyms.c
@@ -1,7 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include "symbol/kallsyms.h"
+#include "api/io.h"
#include <stdio.h>
-#include <stdlib.h>
+#include <sys/stat.h>
+#include <fcntl.h>
u8 kallsyms2elf_type(char type)
{
@@ -15,74 +17,62 @@ bool kallsyms__is_function(char symbol_type)
return symbol_type == 'T' || symbol_type == 'W';
}
-/*
- * While we find nice hex chars, build a long_val.
- * Return number of chars processed.
- */
-int hex2u64(const char *ptr, u64 *long_val)
+static void read_to_eol(struct io *io)
{
- char *p;
+ int ch;
- *long_val = strtoull(ptr, &p, 16);
-
- return p - ptr;
+ for (;;) {
+ ch = io__get_char(io);
+ if (ch < 0 || ch == '\n')
+ return;
+ }
}
int kallsyms__parse(const char *filename, void *arg,
int (*process_symbol)(void *arg, const char *name,
char type, u64 start))
{
- char *line = NULL;
- size_t n;
- int err = -1;
- FILE *file = fopen(filename, "r");
-
- if (file == NULL)
- goto out_failure;
-
- err = 0;
+ struct io io;
+ char bf[BUFSIZ];
+ int err;
- while (!feof(file)) {
- u64 start;
- int line_len, len;
- char symbol_type;
- char *symbol_name;
+ io.fd = open(filename, O_RDONLY, 0);
- line_len = getline(&line, &n, file);
- if (line_len < 0 || !line)
- break;
+ if (io.fd < 0)
+ return -1;
- line[--line_len] = '\0'; /* \n */
+ io__init(&io, io.fd, bf, sizeof(bf));
- len = hex2u64(line, &start);
+ err = 0;
+ while (!io.eof) {
+ __u64 start;
+ int ch;
+ size_t i;
+ char symbol_type;
+ char symbol_name[KSYM_NAME_LEN + 1];
- /* Skip the line if we failed to parse the address. */
- if (!len)
+ if (io__get_hex(&io, &start) != ' ') {
+ read_to_eol(&io);
continue;
-
- len++;
- if (len + 2 >= line_len)
+ }
+ symbol_type = io__get_char(&io);
+ if (io__get_char(&io) != ' ') {
+ read_to_eol(&io);
continue;
-
- symbol_type = line[len];
- len += 2;
- symbol_name = line + len;
- len = line_len - len;
-
- if (len >= KSYM_NAME_LEN) {
- err = -1;
- break;
}
+ for (i = 0; i < sizeof(symbol_name); i++) {
+ ch = io__get_char(&io);
+ if (ch < 0 || ch == '\n')
+ break;
+ symbol_name[i] = ch;
+ }
+ symbol_name[i] = '\0';
err = process_symbol(arg, symbol_name, symbol_type, start);
if (err)
break;
}
- free(line);
- fclose(file);
+ close(io.fd);
return err;
-
-out_failure:
- return -1;
}
diff --git a/tools/lib/symbol/kallsyms.h b/tools/lib/symbol/kallsyms.h
index bd988f7b18d4..542f9b059c3b 100644
--- a/tools/lib/symbol/kallsyms.h
+++ b/tools/lib/symbol/kallsyms.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#ifndef KSYM_NAME_LEN
-#define KSYM_NAME_LEN 256
+#define KSYM_NAME_LEN 512
#endif
static inline u8 kallsyms2elf_binding(char type)
@@ -18,8 +18,6 @@ static inline u8 kallsyms2elf_binding(char type)
return isupper(type) ? STB_GLOBAL : STB_LOCAL;
}
-int hex2u64(const char *ptr, u64 *long_val);
-
u8 kallsyms2elf_type(char type);
bool kallsyms__is_function(char symbol_type);
diff --git a/tools/lib/thermal/.gitignore b/tools/lib/thermal/.gitignore
new file mode 100644
index 000000000000..5d2aeda80fea
--- /dev/null
+++ b/tools/lib/thermal/.gitignore
@@ -0,0 +1,2 @@
+libthermal.so*
+libthermal.pc
diff --git a/tools/lib/thermal/Build b/tools/lib/thermal/Build
new file mode 100644
index 000000000000..4a892d9e24f9
--- /dev/null
+++ b/tools/lib/thermal/Build
@@ -0,0 +1,5 @@
+libthermal-y += commands.o
+libthermal-y += events.o
+libthermal-y += thermal_nl.o
+libthermal-y += sampling.o
+libthermal-y += thermal.o
diff --git a/tools/lib/thermal/Makefile b/tools/lib/thermal/Makefile
new file mode 100644
index 000000000000..2d0d255fd0e1
--- /dev/null
+++ b/tools/lib/thermal/Makefile
@@ -0,0 +1,165 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+# Most of this file is copied from tools/lib/perf/Makefile
+
+LIBTHERMAL_VERSION = 0
+LIBTHERMAL_PATCHLEVEL = 0
+LIBTHERMAL_EXTRAVERSION = 1
+
+MAKEFLAGS += --no-print-directory
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+# $(info Determined 'srctree' to be $(srctree))
+endif
+
+INSTALL = install
+
+# Use DESTDIR for installing into a different root directory.
+# This is useful for building a package. The program will be
+# installed in this directory as if it was the root directory.
+# Then the build tool can move it later.
+DESTDIR ?=
+DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
+
+include $(srctree)/tools/scripts/Makefile.include
+include $(srctree)/tools/scripts/Makefile.arch
+
+ifeq ($(LP64), 1)
+ libdir_relative = lib64
+else
+ libdir_relative = lib
+endif
+
+prefix ?=
+libdir = $(prefix)/$(libdir_relative)
+
+# Shell quotes
+libdir_SQ = $(subst ','\'',$(libdir))
+libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
+
+ifeq ("$(origin V)", "command line")
+ VERBOSE = $(V)
+endif
+ifndef VERBOSE
+ VERBOSE = 0
+endif
+
+ifeq ($(VERBOSE),1)
+ Q =
+else
+ Q = @
+endif
+
+# Set compile option CFLAGS
+ifdef EXTRA_CFLAGS
+ CFLAGS := $(EXTRA_CFLAGS)
+else
+ CFLAGS := -g -Wall
+endif
+
+INCLUDES = \
+-I/usr/include/libnl3 \
+-I$(srctree)/tools/lib/thermal/include \
+-I$(srctree)/tools/lib/ \
+-I$(srctree)/tools/include \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/ \
+-I$(srctree)/tools/arch/$(SRCARCH)/include/uapi \
+-I$(srctree)/tools/include/uapi
+
+# Append required CFLAGS
+override CFLAGS += $(EXTRA_WARNINGS)
+override CFLAGS += -Werror -Wall
+override CFLAGS += -fPIC
+override CFLAGS += $(INCLUDES)
+override CFLAGS += -fvisibility=hidden
+override CFGLAS += -Wl,-L.
+override CFGLAS += -Wl,-lthermal
+
+all:
+
+export srctree OUTPUT CC LD CFLAGS V
+export DESTDIR DESTDIR_SQ
+
+include $(srctree)/tools/build/Makefile.include
+
+VERSION_SCRIPT := libthermal.map
+
+PATCHLEVEL = $(LIBTHERMAL_PATCHLEVEL)
+EXTRAVERSION = $(LIBTHERMAL_EXTRAVERSION)
+VERSION = $(LIBTHERMAL_VERSION).$(LIBTHERMAL_PATCHLEVEL).$(LIBTHERMAL_EXTRAVERSION)
+
+LIBTHERMAL_SO := $(OUTPUT)libthermal.so.$(VERSION)
+LIBTHERMAL_A := $(OUTPUT)libthermal.a
+LIBTHERMAL_IN := $(OUTPUT)libthermal-in.o
+LIBTHERMAL_PC := $(OUTPUT)libthermal.pc
+LIBTHERMAL_ALL := $(LIBTHERMAL_A) $(OUTPUT)libthermal.so*
+
+THERMAL_UAPI := include/uapi/linux/thermal.h
+
+$(THERMAL_UAPI): FORCE
+ ln -sf $(srctree)/$@ $(srctree)/tools/$@
+
+$(LIBTHERMAL_IN): FORCE
+ $(Q)$(MAKE) $(build)=libthermal
+
+$(LIBTHERMAL_A): $(LIBTHERMAL_IN)
+ $(QUIET_AR)$(RM) $@ && $(AR) rcs $@ $(LIBTHERMAL_IN)
+
+$(LIBTHERMAL_SO): $(LIBTHERMAL_IN)
+ $(QUIET_LINK)$(CC) --shared -Wl,-soname,libthermal.so \
+ -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
+ @ln -sf $(@F) $(OUTPUT)libthermal.so
+ @ln -sf $(@F) $(OUTPUT)libthermal.so.$(LIBTHERMAL_VERSION)
+
+
+libs: $(THERMAL_UAPI) $(LIBTHERMAL_A) $(LIBTHERMAL_SO) $(LIBTHERMAL_PC)
+
+all: fixdep
+ $(Q)$(MAKE) libs
+
+clean:
+ $(call QUIET_CLEAN, libthermal) $(RM) $(LIBTHERMAL_A) \
+ *.o *~ *.a *.so *.so.$(VERSION) *.so.$(LIBTHERMAL_VERSION) .*.d .*.cmd LIBTHERMAL-CFLAGS $(LIBTHERMAL_PC)
+
+$(LIBTHERMAL_PC):
+ $(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
+ -e "s|@LIBDIR@|$(libdir_SQ)|" \
+ -e "s|@VERSION@|$(VERSION)|" \
+ < libthermal.pc.template > $@
+
+define do_install_mkdir
+ if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+ fi
+endef
+
+define do_install
+ if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
+ fi; \
+ $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
+endef
+
+install_lib: libs
+ $(call QUIET_INSTALL, $(LIBTHERMAL_ALL)) \
+ $(call do_install_mkdir,$(libdir_SQ)); \
+ cp -fpR $(LIBTHERMAL_ALL) $(DESTDIR)$(libdir_SQ)
+
+install_headers:
+ $(call QUIET_INSTALL, headers) \
+ $(call do_install,include/thermal.h,$(prefix)/include/thermal,644); \
+
+install_pkgconfig: $(LIBTHERMAL_PC)
+ $(call QUIET_INSTALL, $(LIBTHERMAL_PC)) \
+ $(call do_install,$(LIBTHERMAL_PC),$(libdir_SQ)/pkgconfig,644)
+
+install_doc:
+ $(Q)$(MAKE) -C Documentation install-man install-html install-examples
+
+install: install_lib install_headers install_pkgconfig
+
+FORCE:
+
+.PHONY: all install clean FORCE
diff --git a/tools/lib/thermal/commands.c b/tools/lib/thermal/commands.c
new file mode 100644
index 000000000000..73d4d4e8d6ec
--- /dev/null
+++ b/tools/lib/thermal/commands.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <thermal.h>
+#include "thermal_nl.h"
+
+static struct nla_policy thermal_genl_policy[THERMAL_GENL_ATTR_MAX + 1] = {
+ /* Thermal zone */
+ [THERMAL_GENL_ATTR_TZ] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_TZ_ID] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TEMP] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TRIP] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_TZ_TRIP_ID] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TRIP_TEMP] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TRIP_TYPE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_TRIP_HYST] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_MODE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_CDEV_WEIGHT] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_TZ_NAME] = { .type = NLA_STRING },
+
+ /* Governor(s) */
+ [THERMAL_GENL_ATTR_TZ_GOV] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_TZ_GOV_NAME] = { .type = NLA_STRING },
+
+ /* Cooling devices */
+ [THERMAL_GENL_ATTR_CDEV] = { .type = NLA_NESTED },
+ [THERMAL_GENL_ATTR_CDEV_ID] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CDEV_CUR_STATE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CDEV_MAX_STATE] = { .type = NLA_U32 },
+ [THERMAL_GENL_ATTR_CDEV_NAME] = { .type = NLA_STRING },
+};
+
+static int parse_tz_get(struct genl_info *info, struct thermal_zone **tz)
+{
+ struct nlattr *attr;
+ struct thermal_zone *__tz = NULL;
+ size_t size = 0;
+ int rem;
+
+ nla_for_each_nested(attr, info->attrs[THERMAL_GENL_ATTR_TZ], rem) {
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_ID) {
+
+ size++;
+
+ __tz = realloc(__tz, sizeof(*__tz) * (size + 2));
+ if (!__tz)
+ return THERMAL_ERROR;
+
+ __tz[size - 1].id = nla_get_u32(attr);
+ }
+
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_NAME)
+ nla_strlcpy(__tz[size - 1].name, attr,
+ THERMAL_NAME_LENGTH);
+ }
+
+ if (__tz)
+ __tz[size].id = -1;
+
+ *tz = __tz;
+
+ return THERMAL_SUCCESS;
+}
+
+static int parse_cdev_get(struct genl_info *info, struct thermal_cdev **cdev)
+{
+ struct nlattr *attr;
+ struct thermal_cdev *__cdev = NULL;
+ size_t size = 0;
+ int rem;
+
+ nla_for_each_nested(attr, info->attrs[THERMAL_GENL_ATTR_CDEV], rem) {
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_CDEV_ID) {
+
+ size++;
+
+ __cdev = realloc(__cdev, sizeof(*__cdev) * (size + 2));
+ if (!__cdev)
+ return THERMAL_ERROR;
+
+ __cdev[size - 1].id = nla_get_u32(attr);
+ }
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_CDEV_NAME) {
+ nla_strlcpy(__cdev[size - 1].name, attr,
+ THERMAL_NAME_LENGTH);
+ }
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_CDEV_CUR_STATE)
+ __cdev[size - 1].cur_state = nla_get_u32(attr);
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_CDEV_MAX_STATE)
+ __cdev[size - 1].max_state = nla_get_u32(attr);
+ }
+
+ if (__cdev)
+ __cdev[size].id = -1;
+
+ *cdev = __cdev;
+
+ return THERMAL_SUCCESS;
+}
+
+static int parse_tz_get_trip(struct genl_info *info, struct thermal_zone *tz)
+{
+ struct nlattr *attr;
+ struct thermal_trip *__tt = NULL;
+ size_t size = 0;
+ int rem;
+
+ nla_for_each_nested(attr, info->attrs[THERMAL_GENL_ATTR_TZ_TRIP], rem) {
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_TRIP_ID) {
+
+ size++;
+
+ __tt = realloc(__tt, sizeof(*__tt) * (size + 2));
+ if (!__tt)
+ return THERMAL_ERROR;
+
+ __tt[size - 1].id = nla_get_u32(attr);
+ }
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_TRIP_TYPE)
+ __tt[size - 1].type = nla_get_u32(attr);
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_TRIP_TEMP)
+ __tt[size - 1].temp = nla_get_u32(attr);
+
+ if (nla_type(attr) == THERMAL_GENL_ATTR_TZ_TRIP_HYST)
+ __tt[size - 1].hyst = nla_get_u32(attr);
+ }
+
+ if (__tt)
+ __tt[size].id = -1;
+
+ tz->trip = __tt;
+
+ return THERMAL_SUCCESS;
+}
+
+static int parse_tz_get_temp(struct genl_info *info, struct thermal_zone *tz)
+{
+ int id = -1;
+
+ if (info->attrs[THERMAL_GENL_ATTR_TZ_ID])
+ id = nla_get_u32(info->attrs[THERMAL_GENL_ATTR_TZ_ID]);
+
+ if (tz->id != id)
+ return THERMAL_ERROR;
+
+ if (info->attrs[THERMAL_GENL_ATTR_TZ_TEMP])
+ tz->temp = nla_get_u32(info->attrs[THERMAL_GENL_ATTR_TZ_TEMP]);
+
+ return THERMAL_SUCCESS;
+}
+
+static int parse_tz_get_gov(struct genl_info *info, struct thermal_zone *tz)
+{
+ int id = -1;
+
+ if (info->attrs[THERMAL_GENL_ATTR_TZ_ID])
+ id = nla_get_u32(info->attrs[THERMAL_GENL_ATTR_TZ_ID]);
+
+ if (tz->id != id)
+ return THERMAL_ERROR;
+
+ if (info->attrs[THERMAL_GENL_ATTR_TZ_GOV_NAME]) {
+ nla_strlcpy(tz->governor,
+ info->attrs[THERMAL_GENL_ATTR_TZ_GOV_NAME],
+ THERMAL_NAME_LENGTH);
+ }
+
+ return THERMAL_SUCCESS;
+}
+
+static int handle_netlink(struct nl_cache_ops *unused,
+ struct genl_cmd *cmd,
+ struct genl_info *info, void *arg)
+{
+ int ret;
+
+ switch (cmd->c_id) {
+
+ case THERMAL_GENL_CMD_TZ_GET_ID:
+ ret = parse_tz_get(info, arg);
+ break;
+
+ case THERMAL_GENL_CMD_CDEV_GET:
+ ret = parse_cdev_get(info, arg);
+ break;
+
+ case THERMAL_GENL_CMD_TZ_GET_TEMP:
+ ret = parse_tz_get_temp(info, arg);
+ break;
+
+ case THERMAL_GENL_CMD_TZ_GET_TRIP:
+ ret = parse_tz_get_trip(info, arg);
+ break;
+
+ case THERMAL_GENL_CMD_TZ_GET_GOV:
+ ret = parse_tz_get_gov(info, arg);
+ break;
+
+ default:
+ return THERMAL_ERROR;
+ }
+
+ return ret;
+}
+
+static struct genl_cmd thermal_cmds[] = {
+ {
+ .c_id = THERMAL_GENL_CMD_TZ_GET_ID,
+ .c_name = (char *)"List thermal zones",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+ {
+ .c_id = THERMAL_GENL_CMD_TZ_GET_GOV,
+ .c_name = (char *)"Get governor",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+ {
+ .c_id = THERMAL_GENL_CMD_TZ_GET_TEMP,
+ .c_name = (char *)"Get thermal zone temperature",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+ {
+ .c_id = THERMAL_GENL_CMD_TZ_GET_TRIP,
+ .c_name = (char *)"Get thermal zone trip points",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+ {
+ .c_id = THERMAL_GENL_CMD_CDEV_GET,
+ .c_name = (char *)"Get cooling devices",
+ .c_msg_parser = handle_netlink,
+ .c_maxattr = THERMAL_GENL_ATTR_MAX,
+ .c_attr_policy = thermal_genl_policy,
+ },
+};
+
+static struct genl_ops thermal_cmd_ops = {
+ .o_name = (char *)"thermal",
+ .o_cmds = thermal_cmds,
+ .o_ncmds = ARRAY_SIZE(thermal_cmds),
+};
+
+static thermal_error_t thermal_genl_auto(struct thermal_handler *th, int id, int cmd,
+ int flags, void *arg)
+{
+ struct nl_msg *msg;
+ void *hdr;
+
+ msg = nlmsg_alloc();
+ if (!msg)
+ return THERMAL_ERROR;
+
+ hdr = genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, thermal_cmd_ops.o_id,
+ 0, flags, cmd, THERMAL_GENL_VERSION);
+ if (!hdr)
+ return THERMAL_ERROR;
+
+ if (id >= 0 && nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_ID, id))
+ return THERMAL_ERROR;
+
+ if (nl_send_msg(th->sk_cmd, th->cb_cmd, msg, genl_handle_msg, arg))
+ return THERMAL_ERROR;
+
+ nlmsg_free(msg);
+
+ return THERMAL_SUCCESS;
+}
+
+thermal_error_t thermal_cmd_get_tz(struct thermal_handler *th, struct thermal_zone **tz)
+{
+ return thermal_genl_auto(th, -1, THERMAL_GENL_CMD_TZ_GET_ID,
+ NLM_F_DUMP | NLM_F_ACK, tz);
+}
+
+thermal_error_t thermal_cmd_get_cdev(struct thermal_handler *th, struct thermal_cdev **tc)
+{
+ return thermal_genl_auto(th, -1, THERMAL_GENL_CMD_CDEV_GET,
+ NLM_F_DUMP | NLM_F_ACK, tc);
+}
+
+thermal_error_t thermal_cmd_get_trip(struct thermal_handler *th, struct thermal_zone *tz)
+{
+ return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_TRIP,
+ 0, tz);
+}
+
+thermal_error_t thermal_cmd_get_governor(struct thermal_handler *th, struct thermal_zone *tz)
+{
+ return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_GOV, 0, tz);
+}
+
+thermal_error_t thermal_cmd_get_temp(struct thermal_handler *th, struct thermal_zone *tz)
+{
+ return thermal_genl_auto(th, tz->id, THERMAL_GENL_CMD_TZ_GET_TEMP, 0, tz);
+}
+
+thermal_error_t thermal_cmd_exit(struct thermal_handler *th)
+{
+ if (genl_unregister_family(&thermal_cmd_ops))
+ return THERMAL_ERROR;
+
+ nl_thermal_disconnect(th->sk_cmd, th->cb_cmd);
+
+ return THERMAL_SUCCESS;
+}
+
+thermal_error_t thermal_cmd_init(struct thermal_handler *th)
+{
+ int ret;
+ int family;
+
+ if (nl_thermal_connect(&th->sk_cmd, &th->cb_cmd))
+ return THERMAL_ERROR;
+
+ ret = genl_register_family(&thermal_cmd_ops);
+ if (ret)
+ return THERMAL_ERROR;
+
+ ret = genl_ops_resolve(th->sk_cmd, &thermal_cmd_ops);
+ if (ret)
+ return THERMAL_ERROR;
+
+ family = genl_ctrl_resolve(th->sk_cmd, "nlctrl");
+ if (family != GENL_ID_CTRL)
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
diff --git a/tools/lib/thermal/events.c b/tools/lib/thermal/events.c
new file mode 100644
index 000000000000..a7a55d1a0c4c
--- /dev/null
+++ b/tools/lib/thermal/events.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <linux/netlink.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+
+#include <thermal.h>
+#include "thermal_nl.h"
+
+/*
+ * Optimization: fill this array to tell which event we do want to pay
+ * attention to. That happens at init time with the ops
+ * structure. Each ops will enable the event and the general handler
+ * will be able to discard the event if there is not ops associated
+ * with it.
+ */
+static int enabled_ops[__THERMAL_GENL_EVENT_MAX];
+
+static int handle_thermal_event(struct nl_msg *n, void *arg)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(n);
+ struct genlmsghdr *genlhdr = genlmsg_hdr(nlh);
+ struct nlattr *attrs[THERMAL_GENL_ATTR_MAX + 1];
+ struct thermal_handler_param *thp = arg;
+ struct thermal_events_ops *ops = &thp->th->ops->events;
+
+ genlmsg_parse(nlh, 0, attrs, THERMAL_GENL_ATTR_MAX, NULL);
+
+ arg = thp->arg;
+
+ /*
+ * This is an event we don't care of, bail out.
+ */
+ if (!enabled_ops[genlhdr->cmd])
+ return THERMAL_SUCCESS;
+
+ switch (genlhdr->cmd) {
+
+ case THERMAL_GENL_EVENT_TZ_CREATE:
+ return ops->tz_create(nla_get_string(attrs[THERMAL_GENL_ATTR_TZ_NAME]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_DELETE:
+ return ops->tz_delete(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_ENABLE:
+ return ops->tz_enable(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_DISABLE:
+ return ops->tz_disable(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_CHANGE:
+ return ops->trip_change(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_TYPE]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_TEMP]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_HYST]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_ADD:
+ return ops->trip_add(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_TYPE]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_TEMP]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_HYST]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_DELETE:
+ return ops->trip_delete(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_UP:
+ return ops->trip_high(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TEMP]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_TRIP_DOWN:
+ return ops->trip_low(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TRIP_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TEMP]), arg);
+
+ case THERMAL_GENL_EVENT_CDEV_ADD:
+ return ops->cdev_add(nla_get_string(attrs[THERMAL_GENL_ATTR_CDEV_NAME]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_MAX_STATE]), arg);
+
+ case THERMAL_GENL_EVENT_CDEV_DELETE:
+ return ops->cdev_delete(nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_ID]), arg);
+
+ case THERMAL_GENL_EVENT_CDEV_STATE_UPDATE:
+ return ops->cdev_update(nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_CDEV_CUR_STATE]), arg);
+
+ case THERMAL_GENL_EVENT_TZ_GOV_CHANGE:
+ return ops->gov_change(nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_string(attrs[THERMAL_GENL_ATTR_GOV_NAME]), arg);
+ default:
+ return -1;
+ }
+}
+
+static void thermal_events_ops_init(struct thermal_events_ops *ops)
+{
+ enabled_ops[THERMAL_GENL_EVENT_TZ_CREATE] = !!ops->tz_create;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_DELETE] = !!ops->tz_delete;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_DISABLE] = !!ops->tz_disable;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_ENABLE] = !!ops->tz_enable;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_UP] = !!ops->trip_high;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_DOWN] = !!ops->trip_low;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_CHANGE] = !!ops->trip_change;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_ADD] = !!ops->trip_add;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_TRIP_DELETE] = !!ops->trip_delete;
+ enabled_ops[THERMAL_GENL_EVENT_CDEV_ADD] = !!ops->cdev_add;
+ enabled_ops[THERMAL_GENL_EVENT_CDEV_DELETE] = !!ops->cdev_delete;
+ enabled_ops[THERMAL_GENL_EVENT_CDEV_STATE_UPDATE] = !!ops->cdev_update;
+ enabled_ops[THERMAL_GENL_EVENT_TZ_GOV_CHANGE] = !!ops->gov_change;
+}
+
+thermal_error_t thermal_events_handle(struct thermal_handler *th, void *arg)
+{
+ struct thermal_handler_param thp = { .th = th, .arg = arg };
+
+ if (!th)
+ return THERMAL_ERROR;
+
+ if (nl_cb_set(th->cb_event, NL_CB_VALID, NL_CB_CUSTOM,
+ handle_thermal_event, &thp))
+ return THERMAL_ERROR;
+
+ return nl_recvmsgs(th->sk_event, th->cb_event);
+}
+
+int thermal_events_fd(struct thermal_handler *th)
+{
+ if (!th)
+ return -1;
+
+ return nl_socket_get_fd(th->sk_event);
+}
+
+thermal_error_t thermal_events_exit(struct thermal_handler *th)
+{
+ if (nl_unsubscribe_thermal(th->sk_event, th->cb_event,
+ THERMAL_GENL_EVENT_GROUP_NAME))
+ return THERMAL_ERROR;
+
+ nl_thermal_disconnect(th->sk_event, th->cb_event);
+
+ return THERMAL_SUCCESS;
+}
+
+thermal_error_t thermal_events_init(struct thermal_handler *th)
+{
+ thermal_events_ops_init(&th->ops->events);
+
+ if (nl_thermal_connect(&th->sk_event, &th->cb_event))
+ return THERMAL_ERROR;
+
+ if (nl_subscribe_thermal(th->sk_event, th->cb_event,
+ THERMAL_GENL_EVENT_GROUP_NAME))
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
diff --git a/tools/lib/thermal/include/thermal.h b/tools/lib/thermal/include/thermal.h
new file mode 100644
index 000000000000..1abc560602cf
--- /dev/null
+++ b/tools/lib/thermal/include/thermal.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/* Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org> */
+#ifndef __LIBTHERMAL_H
+#define __LIBTHERMAL_H
+
+#include <linux/thermal.h>
+
+#ifndef LIBTHERMAL_API
+#define LIBTHERMAL_API __attribute__((visibility("default")))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct thermal_sampling_ops {
+ int (*tz_temp)(int tz_id, int temp, void *arg);
+};
+
+struct thermal_events_ops {
+ int (*tz_create)(const char *name, int tz_id, void *arg);
+ int (*tz_delete)(int tz_id, void *arg);
+ int (*tz_enable)(int tz_id, void *arg);
+ int (*tz_disable)(int tz_id, void *arg);
+ int (*trip_high)(int tz_id, int trip_id, int temp, void *arg);
+ int (*trip_low)(int tz_id, int trip_id, int temp, void *arg);
+ int (*trip_add)(int tz_id, int trip_id, int type, int temp, int hyst, void *arg);
+ int (*trip_change)(int tz_id, int trip_id, int type, int temp, int hyst, void *arg);
+ int (*trip_delete)(int tz_id, int trip_id, void *arg);
+ int (*cdev_add)(const char *name, int cdev_id, int max_state, void *arg);
+ int (*cdev_delete)(int cdev_id, void *arg);
+ int (*cdev_update)(int cdev_id, int cur_state, void *arg);
+ int (*gov_change)(int tz_id, const char *gov_name, void *arg);
+};
+
+struct thermal_ops {
+ struct thermal_sampling_ops sampling;
+ struct thermal_events_ops events;
+};
+
+struct thermal_trip {
+ int id;
+ int type;
+ int temp;
+ int hyst;
+};
+
+struct thermal_zone {
+ int id;
+ int temp;
+ char name[THERMAL_NAME_LENGTH];
+ char governor[THERMAL_NAME_LENGTH];
+ struct thermal_trip *trip;
+};
+
+struct thermal_cdev {
+ int id;
+ char name[THERMAL_NAME_LENGTH];
+ int max_state;
+ int min_state;
+ int cur_state;
+};
+
+typedef enum {
+ THERMAL_ERROR = -1,
+ THERMAL_SUCCESS = 0,
+} thermal_error_t;
+
+struct thermal_handler;
+
+typedef int (*cb_tz_t)(struct thermal_zone *, void *);
+
+typedef int (*cb_tt_t)(struct thermal_trip *, void *);
+
+typedef int (*cb_tc_t)(struct thermal_cdev *, void *);
+
+LIBTHERMAL_API int for_each_thermal_zone(struct thermal_zone *tz, cb_tz_t cb, void *arg);
+
+LIBTHERMAL_API int for_each_thermal_trip(struct thermal_trip *tt, cb_tt_t cb, void *arg);
+
+LIBTHERMAL_API int for_each_thermal_cdev(struct thermal_cdev *cdev, cb_tc_t cb, void *arg);
+
+LIBTHERMAL_API struct thermal_zone *thermal_zone_find_by_name(struct thermal_zone *tz,
+ const char *name);
+
+LIBTHERMAL_API struct thermal_zone *thermal_zone_find_by_id(struct thermal_zone *tz, int id);
+
+LIBTHERMAL_API struct thermal_zone *thermal_zone_discover(struct thermal_handler *th);
+
+LIBTHERMAL_API struct thermal_handler *thermal_init(struct thermal_ops *ops);
+
+LIBTHERMAL_API void thermal_exit(struct thermal_handler *th);
+
+/*
+ * Netlink thermal events
+ */
+LIBTHERMAL_API thermal_error_t thermal_events_exit(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_events_init(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_events_handle(struct thermal_handler *th, void *arg);
+
+LIBTHERMAL_API int thermal_events_fd(struct thermal_handler *th);
+
+/*
+ * Netlink thermal commands
+ */
+LIBTHERMAL_API thermal_error_t thermal_cmd_exit(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_init(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_tz(struct thermal_handler *th,
+ struct thermal_zone **tz);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_cdev(struct thermal_handler *th,
+ struct thermal_cdev **tc);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_trip(struct thermal_handler *th,
+ struct thermal_zone *tz);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_governor(struct thermal_handler *th,
+ struct thermal_zone *tz);
+
+LIBTHERMAL_API thermal_error_t thermal_cmd_get_temp(struct thermal_handler *th,
+ struct thermal_zone *tz);
+
+/*
+ * Netlink thermal samples
+ */
+LIBTHERMAL_API thermal_error_t thermal_sampling_exit(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_sampling_init(struct thermal_handler *th);
+
+LIBTHERMAL_API thermal_error_t thermal_sampling_handle(struct thermal_handler *th, void *arg);
+
+LIBTHERMAL_API int thermal_sampling_fd(struct thermal_handler *th);
+
+#endif /* __LIBTHERMAL_H */
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/tools/lib/thermal/libthermal.map b/tools/lib/thermal/libthermal.map
new file mode 100644
index 000000000000..d5e77738c7a4
--- /dev/null
+++ b/tools/lib/thermal/libthermal.map
@@ -0,0 +1,25 @@
+LIBTHERMAL_0.0.1 {
+ global:
+ thermal_init;
+ for_each_thermal_zone;
+ for_each_thermal_trip;
+ for_each_thermal_cdev;
+ thermal_zone_find_by_name;
+ thermal_zone_find_by_id;
+ thermal_zone_discover;
+ thermal_init;
+ thermal_events_init;
+ thermal_events_handle;
+ thermal_events_fd;
+ thermal_cmd_init;
+ thermal_cmd_get_tz;
+ thermal_cmd_get_cdev;
+ thermal_cmd_get_trip;
+ thermal_cmd_get_governor;
+ thermal_cmd_get_temp;
+ thermal_sampling_init;
+ thermal_sampling_handle;
+ thermal_sampling_fd;
+local:
+ *;
+};
diff --git a/tools/lib/thermal/libthermal.pc.template b/tools/lib/thermal/libthermal.pc.template
new file mode 100644
index 000000000000..ac24d0ab17f5
--- /dev/null
+++ b/tools/lib/thermal/libthermal.pc.template
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+prefix=@PREFIX@
+libdir=@LIBDIR@
+includedir=${prefix}/include
+
+Name: libthermal
+Description: thermal library
+Requires: libnl-3.0 libnl-genl-3.0
+Version: @VERSION@
+Libs: -L${libdir} -lnl-genl-3 -lnl-3
+Cflags: -I${includedir} -I${include}/libnl3
diff --git a/tools/lib/thermal/sampling.c b/tools/lib/thermal/sampling.c
new file mode 100644
index 000000000000..70577423a9f0
--- /dev/null
+++ b/tools/lib/thermal/sampling.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <thermal.h>
+#include "thermal_nl.h"
+
+static int handle_thermal_sample(struct nl_msg *n, void *arg)
+{
+ struct nlmsghdr *nlh = nlmsg_hdr(n);
+ struct genlmsghdr *genlhdr = genlmsg_hdr(nlh);
+ struct nlattr *attrs[THERMAL_GENL_ATTR_MAX + 1];
+ struct thermal_handler_param *thp = arg;
+ struct thermal_handler *th = thp->th;
+
+ genlmsg_parse(nlh, 0, attrs, THERMAL_GENL_ATTR_MAX, NULL);
+
+ switch (genlhdr->cmd) {
+
+ case THERMAL_GENL_SAMPLING_TEMP:
+ return th->ops->sampling.tz_temp(
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_ID]),
+ nla_get_u32(attrs[THERMAL_GENL_ATTR_TZ_TEMP]), arg);
+ default:
+ return THERMAL_ERROR;
+ }
+}
+
+thermal_error_t thermal_sampling_handle(struct thermal_handler *th, void *arg)
+{
+ struct thermal_handler_param thp = { .th = th, .arg = arg };
+
+ if (!th)
+ return THERMAL_ERROR;
+
+ if (nl_cb_set(th->cb_sampling, NL_CB_VALID, NL_CB_CUSTOM,
+ handle_thermal_sample, &thp))
+ return THERMAL_ERROR;
+
+ return nl_recvmsgs(th->sk_sampling, th->cb_sampling);
+}
+
+int thermal_sampling_fd(struct thermal_handler *th)
+{
+ if (!th)
+ return -1;
+
+ return nl_socket_get_fd(th->sk_sampling);
+}
+
+thermal_error_t thermal_sampling_exit(struct thermal_handler *th)
+{
+ if (nl_unsubscribe_thermal(th->sk_sampling, th->cb_sampling,
+ THERMAL_GENL_SAMPLING_GROUP_NAME))
+ return THERMAL_ERROR;
+
+ nl_thermal_disconnect(th->sk_sampling, th->cb_sampling);
+
+ return THERMAL_SUCCESS;
+}
+
+thermal_error_t thermal_sampling_init(struct thermal_handler *th)
+{
+ if (nl_thermal_connect(&th->sk_sampling, &th->cb_sampling))
+ return THERMAL_ERROR;
+
+ if (nl_subscribe_thermal(th->sk_sampling, th->cb_sampling,
+ THERMAL_GENL_SAMPLING_GROUP_NAME))
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
diff --git a/tools/lib/thermal/thermal.c b/tools/lib/thermal/thermal.c
new file mode 100644
index 000000000000..72a76dc205bc
--- /dev/null
+++ b/tools/lib/thermal/thermal.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <stdio.h>
+#include <thermal.h>
+
+#include "thermal_nl.h"
+
+int for_each_thermal_cdev(struct thermal_cdev *cdev, cb_tc_t cb, void *arg)
+{
+ int i, ret = 0;
+
+ if (!cdev)
+ return 0;
+
+ for (i = 0; cdev[i].id != -1; i++)
+ ret |= cb(&cdev[i], arg);
+
+ return ret;
+}
+
+int for_each_thermal_trip(struct thermal_trip *tt, cb_tt_t cb, void *arg)
+{
+ int i, ret = 0;
+
+ if (!tt)
+ return 0;
+
+ for (i = 0; tt[i].id != -1; i++)
+ ret |= cb(&tt[i], arg);
+
+ return ret;
+}
+
+int for_each_thermal_zone(struct thermal_zone *tz, cb_tz_t cb, void *arg)
+{
+ int i, ret = 0;
+
+ if (!tz)
+ return 0;
+
+ for (i = 0; tz[i].id != -1; i++)
+ ret |= cb(&tz[i], arg);
+
+ return ret;
+}
+
+struct thermal_zone *thermal_zone_find_by_name(struct thermal_zone *tz,
+ const char *name)
+{
+ int i;
+
+ if (!tz || !name)
+ return NULL;
+
+ for (i = 0; tz[i].id != -1; i++) {
+ if (!strcmp(tz[i].name, name))
+ return &tz[i];
+ }
+
+ return NULL;
+}
+
+struct thermal_zone *thermal_zone_find_by_id(struct thermal_zone *tz, int id)
+{
+ int i;
+
+ if (!tz || id < 0)
+ return NULL;
+
+ for (i = 0; tz[i].id != -1; i++) {
+ if (tz[i].id == id)
+ return &tz[i];
+ }
+
+ return NULL;
+}
+
+static int __thermal_zone_discover(struct thermal_zone *tz, void *th)
+{
+ if (thermal_cmd_get_trip(th, tz) < 0)
+ return -1;
+
+ if (thermal_cmd_get_governor(th, tz))
+ return -1;
+
+ return 0;
+}
+
+struct thermal_zone *thermal_zone_discover(struct thermal_handler *th)
+{
+ struct thermal_zone *tz;
+
+ if (thermal_cmd_get_tz(th, &tz) < 0)
+ return NULL;
+
+ if (for_each_thermal_zone(tz, __thermal_zone_discover, th))
+ return NULL;
+
+ return tz;
+}
+
+void thermal_exit(struct thermal_handler *th)
+{
+ thermal_cmd_exit(th);
+ thermal_events_exit(th);
+ thermal_sampling_exit(th);
+
+ free(th);
+}
+
+struct thermal_handler *thermal_init(struct thermal_ops *ops)
+{
+ struct thermal_handler *th;
+
+ th = malloc(sizeof(*th));
+ if (!th)
+ return NULL;
+ th->ops = ops;
+
+ if (thermal_events_init(th))
+ goto out_free;
+
+ if (thermal_sampling_init(th))
+ goto out_free;
+
+ if (thermal_cmd_init(th))
+ goto out_free;
+
+ return th;
+
+out_free:
+ free(th);
+
+ return NULL;
+}
diff --git a/tools/lib/thermal/thermal_nl.c b/tools/lib/thermal/thermal_nl.c
new file mode 100644
index 000000000000..b05cf9569858
--- /dev/null
+++ b/tools/lib/thermal/thermal_nl.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: LGPL-2.1+
+// Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <thermal.h>
+#include "thermal_nl.h"
+
+struct handler_args {
+ const char *group;
+ int id;
+};
+
+static __thread int err;
+static __thread int done;
+
+static int nl_seq_check_handler(struct nl_msg *msg, void *arg)
+{
+ return NL_OK;
+}
+
+static int nl_error_handler(struct sockaddr_nl *nla, struct nlmsgerr *nl_err,
+ void *arg)
+{
+ int *ret = arg;
+
+ if (ret)
+ *ret = nl_err->error;
+
+ return NL_STOP;
+}
+
+static int nl_finish_handler(struct nl_msg *msg, void *arg)
+{
+ int *ret = arg;
+
+ if (ret)
+ *ret = 1;
+
+ return NL_OK;
+}
+
+static int nl_ack_handler(struct nl_msg *msg, void *arg)
+{
+ int *ret = arg;
+
+ if (ret)
+ *ret = 1;
+
+ return NL_OK;
+}
+
+int nl_send_msg(struct nl_sock *sock, struct nl_cb *cb, struct nl_msg *msg,
+ int (*rx_handler)(struct nl_msg *, void *), void *data)
+{
+ if (!rx_handler)
+ return THERMAL_ERROR;
+
+ err = nl_send_auto_complete(sock, msg);
+ if (err < 0)
+ return err;
+
+ nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, rx_handler, data);
+
+ err = done = 0;
+
+ while (err == 0 && done == 0)
+ nl_recvmsgs(sock, cb);
+
+ return err;
+}
+
+static int nl_family_handler(struct nl_msg *msg, void *arg)
+{
+ struct handler_args *grp = arg;
+ struct nlattr *tb[CTRL_ATTR_MAX + 1];
+ struct genlmsghdr *gnlh = nlmsg_data(nlmsg_hdr(msg));
+ struct nlattr *mcgrp;
+ int rem_mcgrp;
+
+ nla_parse(tb, CTRL_ATTR_MAX, genlmsg_attrdata(gnlh, 0),
+ genlmsg_attrlen(gnlh, 0), NULL);
+
+ if (!tb[CTRL_ATTR_MCAST_GROUPS])
+ return THERMAL_ERROR;
+
+ nla_for_each_nested(mcgrp, tb[CTRL_ATTR_MCAST_GROUPS], rem_mcgrp) {
+
+ struct nlattr *tb_mcgrp[CTRL_ATTR_MCAST_GRP_MAX + 1];
+
+ nla_parse(tb_mcgrp, CTRL_ATTR_MCAST_GRP_MAX,
+ nla_data(mcgrp), nla_len(mcgrp), NULL);
+
+ if (!tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME] ||
+ !tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID])
+ continue;
+
+ if (strncmp(nla_data(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME]),
+ grp->group,
+ nla_len(tb_mcgrp[CTRL_ATTR_MCAST_GRP_NAME])))
+ continue;
+
+ grp->id = nla_get_u32(tb_mcgrp[CTRL_ATTR_MCAST_GRP_ID]);
+
+ break;
+ }
+
+ return THERMAL_SUCCESS;
+}
+
+static int nl_get_multicast_id(struct nl_sock *sock, struct nl_cb *cb,
+ const char *family, const char *group)
+{
+ struct nl_msg *msg;
+ int ret = 0, ctrlid;
+ struct handler_args grp = {
+ .group = group,
+ .id = -ENOENT,
+ };
+
+ msg = nlmsg_alloc();
+ if (!msg)
+ return THERMAL_ERROR;
+
+ ctrlid = genl_ctrl_resolve(sock, "nlctrl");
+
+ genlmsg_put(msg, 0, 0, ctrlid, 0, 0, CTRL_CMD_GETFAMILY, 0);
+
+ nla_put_string(msg, CTRL_ATTR_FAMILY_NAME, family);
+
+ ret = nl_send_msg(sock, cb, msg, nl_family_handler, &grp);
+ if (ret)
+ goto nla_put_failure;
+
+ ret = grp.id;
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return ret;
+}
+
+int nl_thermal_connect(struct nl_sock **nl_sock, struct nl_cb **nl_cb)
+{
+ struct nl_cb *cb;
+ struct nl_sock *sock;
+
+ cb = nl_cb_alloc(NL_CB_DEFAULT);
+ if (!cb)
+ return THERMAL_ERROR;
+
+ sock = nl_socket_alloc();
+ if (!sock)
+ goto out_cb_free;
+
+ if (genl_connect(sock))
+ goto out_socket_free;
+
+ if (nl_cb_err(cb, NL_CB_CUSTOM, nl_error_handler, &err) ||
+ nl_cb_set(cb, NL_CB_FINISH, NL_CB_CUSTOM, nl_finish_handler, &done) ||
+ nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, nl_ack_handler, &done) ||
+ nl_cb_set(cb, NL_CB_SEQ_CHECK, NL_CB_CUSTOM, nl_seq_check_handler, &done))
+ return THERMAL_ERROR;
+
+ *nl_sock = sock;
+ *nl_cb = cb;
+
+ return THERMAL_SUCCESS;
+
+out_socket_free:
+ nl_socket_free(sock);
+out_cb_free:
+ nl_cb_put(cb);
+ return THERMAL_ERROR;
+}
+
+void nl_thermal_disconnect(struct nl_sock *nl_sock, struct nl_cb *nl_cb)
+{
+ nl_close(nl_sock);
+ nl_socket_free(nl_sock);
+ nl_cb_put(nl_cb);
+}
+
+int nl_unsubscribe_thermal(struct nl_sock *nl_sock, struct nl_cb *nl_cb,
+ const char *group)
+{
+ int mcid;
+
+ mcid = nl_get_multicast_id(nl_sock, nl_cb, THERMAL_GENL_FAMILY_NAME,
+ group);
+ if (mcid < 0)
+ return THERMAL_ERROR;
+
+ if (nl_socket_drop_membership(nl_sock, mcid))
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
+
+int nl_subscribe_thermal(struct nl_sock *nl_sock, struct nl_cb *nl_cb,
+ const char *group)
+{
+ int mcid;
+
+ mcid = nl_get_multicast_id(nl_sock, nl_cb, THERMAL_GENL_FAMILY_NAME,
+ group);
+ if (mcid < 0)
+ return THERMAL_ERROR;
+
+ if (nl_socket_add_membership(nl_sock, mcid))
+ return THERMAL_ERROR;
+
+ return THERMAL_SUCCESS;
+}
diff --git a/tools/lib/thermal/thermal_nl.h b/tools/lib/thermal/thermal_nl.h
new file mode 100644
index 000000000000..ddf635642f07
--- /dev/null
+++ b/tools/lib/thermal/thermal_nl.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/* Copyright (C) 2022, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org> */
+#ifndef __THERMAL_H
+#define __THERMAL_H
+
+#include <netlink/netlink.h>
+#include <netlink/genl/genl.h>
+#include <netlink/genl/mngt.h>
+#include <netlink/genl/ctrl.h>
+
+struct thermal_handler {
+ int done;
+ int error;
+ struct thermal_ops *ops;
+ struct nl_msg *msg;
+ struct nl_sock *sk_event;
+ struct nl_sock *sk_sampling;
+ struct nl_sock *sk_cmd;
+ struct nl_cb *cb_cmd;
+ struct nl_cb *cb_event;
+ struct nl_cb *cb_sampling;
+};
+
+struct thermal_handler_param {
+ struct thermal_handler *th;
+ void *arg;
+};
+
+/*
+ * Low level netlink
+ */
+extern int nl_subscribe_thermal(struct nl_sock *nl_sock, struct nl_cb *nl_cb,
+ const char *group);
+
+extern int nl_unsubscribe_thermal(struct nl_sock *nl_sock, struct nl_cb *nl_cb,
+ const char *group);
+
+extern int nl_thermal_connect(struct nl_sock **nl_sock, struct nl_cb **nl_cb);
+
+extern void nl_thermal_disconnect(struct nl_sock *nl_sock, struct nl_cb *nl_cb);
+
+extern int nl_send_msg(struct nl_sock *sock, struct nl_cb *nl_cb, struct nl_msg *msg,
+ int (*rx_handler)(struct nl_msg *, void *),
+ void *data);
+
+#endif /* __THERMAL_H */
diff --git a/tools/lib/traceevent/.gitignore b/tools/lib/traceevent/.gitignore
deleted file mode 100644
index 7123c70b9ebc..000000000000
--- a/tools/lib/traceevent/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-TRACEEVENT-CFLAGS
-libtraceevent-dynamic-list
-libtraceevent.so.*
diff --git a/tools/lib/traceevent/Build b/tools/lib/traceevent/Build
deleted file mode 100644
index f9a5d79578f5..000000000000
--- a/tools/lib/traceevent/Build
+++ /dev/null
@@ -1,8 +0,0 @@
-libtraceevent-y += event-parse.o
-libtraceevent-y += event-plugin.o
-libtraceevent-y += trace-seq.o
-libtraceevent-y += parse-filter.o
-libtraceevent-y += parse-utils.o
-libtraceevent-y += kbuffer-parse.o
-libtraceevent-y += tep_strerror.o
-libtraceevent-y += event-parse-api.o
diff --git a/tools/lib/traceevent/Documentation/Makefile b/tools/lib/traceevent/Documentation/Makefile
deleted file mode 100644
index aa72ab96c3c1..000000000000
--- a/tools/lib/traceevent/Documentation/Makefile
+++ /dev/null
@@ -1,207 +0,0 @@
-include ../../../scripts/Makefile.include
-include ../../../scripts/utilities.mak
-
-# This Makefile and manpage XSL files were taken from tools/perf/Documentation
-# and modified for libtraceevent.
-
-MAN3_TXT= \
- $(wildcard libtraceevent-*.txt) \
- libtraceevent.txt
-
-MAN_TXT = $(MAN3_TXT)
-_MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT))
-_MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT))
-_DOC_MAN3=$(patsubst %.txt,%.3,$(MAN3_TXT))
-
-MAN_XML=$(addprefix $(OUTPUT),$(_MAN_XML))
-MAN_HTML=$(addprefix $(OUTPUT),$(_MAN_HTML))
-DOC_MAN3=$(addprefix $(OUTPUT),$(_DOC_MAN3))
-
-# Make the path relative to DESTDIR, not prefix
-ifndef DESTDIR
-prefix?=$(HOME)
-endif
-bindir?=$(prefix)/bin
-htmldir?=$(prefix)/share/doc/libtraceevent-doc
-pdfdir?=$(prefix)/share/doc/libtraceevent-doc
-mandir?=$(prefix)/share/man
-man3dir=$(mandir)/man3
-
-ASCIIDOC=asciidoc
-ASCIIDOC_EXTRA = --unsafe -f asciidoc.conf
-ASCIIDOC_HTML = xhtml11
-MANPAGE_XSL = manpage-normal.xsl
-XMLTO_EXTRA =
-INSTALL?=install
-RM ?= rm -f
-
-ifdef USE_ASCIIDOCTOR
-ASCIIDOC = asciidoctor
-ASCIIDOC_EXTRA = -a compat-mode
-ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
-ASCIIDOC_EXTRA += -a mansource="libtraceevent" -a manmanual="libtraceevent Manual"
-ASCIIDOC_HTML = xhtml5
-endif
-
-XMLTO=xmlto
-
-_tmp_tool_path := $(call get-executable,$(ASCIIDOC))
-ifeq ($(_tmp_tool_path),)
- missing_tools = $(ASCIIDOC)
-endif
-
-ifndef USE_ASCIIDOCTOR
-_tmp_tool_path := $(call get-executable,$(XMLTO))
-ifeq ($(_tmp_tool_path),)
- missing_tools += $(XMLTO)
-endif
-endif
-
-#
-# For asciidoc ...
-# -7.1.2, no extra settings are needed.
-# 8.0-, set ASCIIDOC8.
-#
-
-#
-# For docbook-xsl ...
-# -1.68.1, set ASCIIDOC_NO_ROFF? (based on changelog from 1.73.0)
-# 1.69.0, no extra settings are needed?
-# 1.69.1-1.71.0, set DOCBOOK_SUPPRESS_SP?
-# 1.71.1, no extra settings are needed?
-# 1.72.0, set DOCBOOK_XSL_172.
-# 1.73.0-, set ASCIIDOC_NO_ROFF
-#
-
-#
-# If you had been using DOCBOOK_XSL_172 in an attempt to get rid
-# of 'the ".ft C" problem' in your generated manpages, and you
-# instead ended up with weird characters around callouts, try
-# using ASCIIDOC_NO_ROFF instead (it works fine with ASCIIDOC8).
-#
-
-ifdef ASCIIDOC8
-ASCIIDOC_EXTRA += -a asciidoc7compatible
-endif
-ifdef DOCBOOK_XSL_172
-ASCIIDOC_EXTRA += -a libtraceevent-asciidoc-no-roff
-MANPAGE_XSL = manpage-1.72.xsl
-else
- ifdef ASCIIDOC_NO_ROFF
- # docbook-xsl after 1.72 needs the regular XSL, but will not
- # pass-thru raw roff codes from asciidoc.conf, so turn them off.
- ASCIIDOC_EXTRA += -a libtraceevent-asciidoc-no-roff
- endif
-endif
-ifdef MAN_BOLD_LITERAL
-XMLTO_EXTRA += -m manpage-bold-literal.xsl
-endif
-ifdef DOCBOOK_SUPPRESS_SP
-XMLTO_EXTRA += -m manpage-suppress-sp.xsl
-endif
-
-SHELL_PATH ?= $(SHELL)
-# Shell quote;
-SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
-
-DESTDIR ?=
-DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
-
-export DESTDIR DESTDIR_SQ
-
-#
-# Please note that there is a minor bug in asciidoc.
-# The version after 6.0.3 _will_ include the patch found here:
-# http://marc.theaimsgroup.com/?l=libtraceevent&m=111558757202243&w=2
-#
-# Until that version is released you may have to apply the patch
-# yourself - yes, all 6 characters of it!
-#
-QUIET_SUBDIR0 = +$(MAKE) -C # space to separate -C and subdir
-QUIET_SUBDIR1 =
-
-ifneq ($(findstring $(MAKEFLAGS),w),w)
-PRINT_DIR = --no-print-directory
-else # "make -w"
-NO_SUBDIR = :
-endif
-
-ifneq ($(findstring $(MAKEFLAGS),s),s)
-ifneq ($(V),1)
- QUIET_ASCIIDOC = @echo ' ASCIIDOC '$@;
- QUIET_XMLTO = @echo ' XMLTO '$@;
- QUIET_SUBDIR0 = +@subdir=
- QUIET_SUBDIR1 = ;$(NO_SUBDIR) \
- echo ' SUBDIR ' $$subdir; \
- $(MAKE) $(PRINT_DIR) -C $$subdir
- export V
-endif
-endif
-
-all: html man
-
-man: man3
-man3: $(DOC_MAN3)
-
-html: $(MAN_HTML)
-
-$(MAN_HTML) $(DOC_MAN3): asciidoc.conf
-
-install: install-man
-
-check-man-tools:
-ifdef missing_tools
- $(error "You need to install $(missing_tools) for man pages")
-endif
-
-do-install-man: man
- $(call QUIET_INSTALL, Documentation-man) \
- $(INSTALL) -d -m 755 $(DESTDIR)$(man3dir); \
- $(INSTALL) -m 644 $(DOC_MAN3) $(DESTDIR)$(man3dir);
-
-install-man: check-man-tools man do-install-man
-
-uninstall: uninstall-man
-
-uninstall-man:
- $(call QUIET_UNINST, Documentation-man) \
- $(Q)$(RM) $(addprefix $(DESTDIR)$(man3dir)/,$(DOC_MAN3))
-
-
-ifdef missing_tools
- DO_INSTALL_MAN = $(warning Please install $(missing_tools) to have the man pages installed)
-else
- DO_INSTALL_MAN = do-install-man
-endif
-
-CLEAN_FILES = \
- $(MAN_XML) $(addsuffix +,$(MAN_XML)) \
- $(MAN_HTML) $(addsuffix +,$(MAN_HTML)) \
- $(DOC_MAN3) *.3
-
-clean:
- $(call QUIET_CLEAN, Documentation) $(RM) $(CLEAN_FILES)
-
-ifdef USE_ASCIIDOCTOR
-$(OUTPUT)%.3 : $(OUTPUT)%.txt
- $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
- $(ASCIIDOC) -b manpage -d manpage \
- $(ASCIIDOC_EXTRA) -alibtraceevent_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
- mv $@+ $@
-endif
-
-$(OUTPUT)%.3 : $(OUTPUT)%.xml
- $(QUIET_XMLTO)$(RM) $@ && \
- $(XMLTO) -o $(OUTPUT). -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
-
-$(OUTPUT)%.xml : %.txt
- $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
- $(ASCIIDOC) -b docbook -d manpage \
- $(ASCIIDOC_EXTRA) -alibtraceevent_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
- mv $@+ $@
-
-$(MAN_HTML): $(OUTPUT)%.html : %.txt
- $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
- $(ASCIIDOC) -b $(ASCIIDOC_HTML) -d manpage \
- $(ASCIIDOC_EXTRA) -aperf_version=$(EVENT_PARSE_VERSION) -o $@+ $< && \
- mv $@+ $@
diff --git a/tools/lib/traceevent/Documentation/asciidoc.conf b/tools/lib/traceevent/Documentation/asciidoc.conf
deleted file mode 100644
index 07595717f06e..000000000000
--- a/tools/lib/traceevent/Documentation/asciidoc.conf
+++ /dev/null
@@ -1,120 +0,0 @@
-## linktep: macro
-#
-# Usage: linktep:command[manpage-section]
-#
-# Note, {0} is the manpage section, while {target} is the command.
-#
-# Show TEP link as: <command>(<section>); if section is defined, else just show
-# the command.
-
-[macros]
-(?su)[\\]?(?P<name>linktep):(?P<target>\S*?)\[(?P<attrlist>.*?)\]=
-
-[attributes]
-asterisk=&#42;
-plus=&#43;
-caret=&#94;
-startsb=&#91;
-endsb=&#93;
-tilde=&#126;
-
-ifdef::backend-docbook[]
-[linktep-inlinemacro]
-{0%{target}}
-{0#<citerefentry>}
-{0#<refentrytitle>{target}</refentrytitle><manvolnum>{0}</manvolnum>}
-{0#</citerefentry>}
-endif::backend-docbook[]
-
-ifdef::backend-docbook[]
-ifndef::tep-asciidoc-no-roff[]
-# "unbreak" docbook-xsl v1.68 for manpages. v1.69 works with or without this.
-# v1.72 breaks with this because it replaces dots not in roff requests.
-[listingblock]
-<example><title>{title}</title>
-<literallayout>
-ifdef::doctype-manpage[]
-&#10;.ft C&#10;
-endif::doctype-manpage[]
-|
-ifdef::doctype-manpage[]
-&#10;.ft&#10;
-endif::doctype-manpage[]
-</literallayout>
-{title#}</example>
-endif::tep-asciidoc-no-roff[]
-
-ifdef::tep-asciidoc-no-roff[]
-ifdef::doctype-manpage[]
-# The following two small workarounds insert a simple paragraph after screen
-[listingblock]
-<example><title>{title}</title>
-<literallayout>
-|
-</literallayout><simpara></simpara>
-{title#}</example>
-
-[verseblock]
-<formalpara{id? id="{id}"}><title>{title}</title><para>
-{title%}<literallayout{id? id="{id}"}>
-{title#}<literallayout>
-|
-</literallayout>
-{title#}</para></formalpara>
-{title%}<simpara></simpara>
-endif::doctype-manpage[]
-endif::tep-asciidoc-no-roff[]
-endif::backend-docbook[]
-
-ifdef::doctype-manpage[]
-ifdef::backend-docbook[]
-[header]
-template::[header-declarations]
-<refentry>
-<refmeta>
-<refentrytitle>{mantitle}</refentrytitle>
-<manvolnum>{manvolnum}</manvolnum>
-<refmiscinfo class="source">libtraceevent</refmiscinfo>
-<refmiscinfo class="version">{libtraceevent_version}</refmiscinfo>
-<refmiscinfo class="manual">libtraceevent Manual</refmiscinfo>
-</refmeta>
-<refnamediv>
- <refname>{manname1}</refname>
- <refname>{manname2}</refname>
- <refname>{manname3}</refname>
- <refname>{manname4}</refname>
- <refname>{manname5}</refname>
- <refname>{manname6}</refname>
- <refname>{manname7}</refname>
- <refname>{manname8}</refname>
- <refname>{manname9}</refname>
- <refname>{manname10}</refname>
- <refname>{manname11}</refname>
- <refname>{manname12}</refname>
- <refname>{manname13}</refname>
- <refname>{manname14}</refname>
- <refname>{manname15}</refname>
- <refname>{manname16}</refname>
- <refname>{manname17}</refname>
- <refname>{manname18}</refname>
- <refname>{manname19}</refname>
- <refname>{manname20}</refname>
- <refname>{manname21}</refname>
- <refname>{manname22}</refname>
- <refname>{manname23}</refname>
- <refname>{manname24}</refname>
- <refname>{manname25}</refname>
- <refname>{manname26}</refname>
- <refname>{manname27}</refname>
- <refname>{manname28}</refname>
- <refname>{manname29}</refname>
- <refname>{manname30}</refname>
- <refpurpose>{manpurpose}</refpurpose>
-</refnamediv>
-endif::backend-docbook[]
-endif::doctype-manpage[]
-
-ifdef::backend-xhtml11[]
-[linktep-inlinemacro]
-<a href="{target}.html">{target}{0?({0})}</a>
-endif::backend-xhtml11[]
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-commands.txt b/tools/lib/traceevent/Documentation/libtraceevent-commands.txt
deleted file mode 100644
index bec552001f8e..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-commands.txt
+++ /dev/null
@@ -1,153 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_register_comm, tep_override_comm, tep_pid_is_registered,
-tep_data_comm_from_pid, tep_data_pid_from_comm, tep_cmdline_pid -
-Manage pid to process name mappings.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-int *tep_register_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
-int *tep_override_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
-bool *tep_is_pid_registered*(struct tep_handle pass:[*]_tep_, int _pid_);
-const char pass:[*]*tep_data_comm_from_pid*(struct tep_handle pass:[*]_pevent_, int _pid_);
-struct cmdline pass:[*]*tep_data_pid_from_comm*(struct tep_handle pass:[*]_pevent_, const char pass:[*]_comm_, struct cmdline pass:[*]_next_);
-int *tep_cmdline_pid*(struct tep_handle pass:[*]_pevent_, struct cmdline pass:[*]_cmdline_);
---
-
-DESCRIPTION
------------
-These functions can be used to handle the mapping between pid and process name.
-The library builds a cache of these mappings, which is used to display the name
-of the process, instead of its pid. This information can be retrieved from
-tracefs/saved_cmdlines file.
-
-The _tep_register_comm()_ function registers a _pid_ / process name mapping.
-If a command with the same _pid_ is already registered, an error is returned.
-The _pid_ argument is the process ID, the _comm_ argument is the process name,
-_tep_ is the event context. The _comm_ is duplicated internally.
-
-The _tep_override_comm()_ function registers a _pid_ / process name mapping.
-If a process with the same pid is already registered, the process name string is
-udapted with the new one. The _pid_ argument is the process ID, the _comm_
-argument is the process name, _tep_ is the event context. The _comm_ is
-duplicated internally.
-
-The _tep_is_pid_registered()_ function checks if a pid has a process name
-mapping registered. The _pid_ argument is the process ID, _tep_ is the event
-context.
-
-The _tep_data_comm_from_pid()_ function returns the process name for a given
-pid. The _pid_ argument is the process ID, _tep_ is the event context.
-The returned string should not be freed, but will be freed when the _tep_
-handler is closed.
-
-The _tep_data_pid_from_comm()_ function returns a pid for a given process name.
-The _comm_ argument is the process name, _tep_ is the event context.
-The argument _next_ is the cmdline structure to search for the next pid.
-As there may be more than one pid for a given process, the result of this call
-can be passed back into a recurring call in the _next_ parameter, to search for
-the next pid. If _next_ is NULL, it will return the first pid associated with
-the _comm_. The function performs a linear search, so it may be slow.
-
-The _tep_cmdline_pid()_ function returns the pid associated with a given
-_cmdline_. The _tep_ argument is the event context.
-
-RETURN VALUE
-------------
-_tep_register_comm()_ function returns 0 on success. In case of an error -1 is
-returned and errno is set to indicate the cause of the problem: ENOMEM, if there
-is not enough memory to duplicate the _comm_ or EEXIST if a mapping for this
-_pid_ is already registered.
-
-_tep_override_comm()_ function returns 0 on success. In case of an error -1 is
-returned and errno is set to indicate the cause of the problem: ENOMEM, if there
-is not enough memory to duplicate the _comm_.
-
-_tep_is_pid_registered()_ function returns true if the _pid_ has a process name
-mapped to it, false otherwise.
-
-_tep_data_comm_from_pid()_ function returns the process name as string, or the
-string "<...>" if there is no mapping for the given pid.
-
-_tep_data_pid_from_comm()_ function returns a pointer to a struct cmdline, that
-holds a pid for a given process, or NULL if none is found. This result can be
-passed back into a recurring call as the _next_ parameter of the function.
-
-_tep_cmdline_pid()_ functions returns the pid for the give cmdline. If _cmdline_
- is NULL, then -1 is returned.
-
-EXAMPLE
--------
-The following example registers pid for command "ls", in context of event _tep_
-and performs various searches for pid / process name mappings:
-[source,c]
---
-#include <event-parse.h>
-...
-int ret;
-int ls_pid = 1021;
-struct tep_handle *tep = tep_alloc();
-...
- ret = tep_register_comm(tep, "ls", ls_pid);
- if (ret != 0 && errno == EEXIST)
- ret = tep_override_comm(tep, "ls", ls_pid);
- if (ret != 0) {
- /* Failed to register pid / command mapping */
- }
-...
- if (tep_is_pid_registered(tep, ls_pid) == 0) {
- /* Command mapping for ls_pid is not registered */
- }
-...
- const char *comm = tep_data_comm_from_pid(tep, ls_pid);
- if (comm) {
- /* Found process name for ls_pid */
- }
-...
- int pid;
- struct cmdline *cmd = tep_data_pid_from_comm(tep, "ls", NULL);
- while (cmd) {
- pid = tep_cmdline_pid(tep, cmd);
- /* Found pid for process "ls" */
- cmd = tep_data_pid_from_comm(tep, "ls", cmd);
- }
---
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-cpus.txt b/tools/lib/traceevent/Documentation/libtraceevent-cpus.txt
deleted file mode 100644
index 5ad70e43b752..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-cpus.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_get_cpus, tep_set_cpus - Get / set the number of CPUs, which have a tracing
-buffer representing it. Note, the buffer may be empty.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-int *tep_get_cpus*(struct tep_handle pass:[*]_tep_);
-void *tep_set_cpus*(struct tep_handle pass:[*]_tep_, int _cpus_);
---
-
-DESCRIPTION
------------
-The _tep_get_cpus()_ function gets the number of CPUs, which have a tracing
-buffer representing it. The _tep_ argument is trace event parser context.
-
-The _tep_set_cpus()_ function sets the number of CPUs, which have a tracing
-buffer representing it. The _tep_ argument is trace event parser context.
-The _cpu_ argument is the number of CPUs with tracing data.
-
-RETURN VALUE
-------------
-The _tep_get_cpus()_ functions returns the number of CPUs, which have tracing
-data recorded.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
- tep_set_cpus(tep, 5);
-...
- printf("We have tracing data for %d CPUs", tep_get_cpus(tep));
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt b/tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt
deleted file mode 100644
index e64851b6e189..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-endian_read.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_read_number - Reads a number from raw data.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-unsigned long long *tep_read_number*(struct tep_handle pass:[*]_tep_, const void pass:[*]_ptr_, int _size_);
---
-
-DESCRIPTION
------------
-The _tep_read_number()_ function reads an integer from raw data, taking into
-account the endianness of the raw data and the current host. The _tep_ argument
-is the trace event parser context. The _ptr_ is a pointer to the raw data, where
-the integer is, and the _size_ is the size of the integer.
-
-RETURN VALUE
-------------
-The _tep_read_number()_ function returns the integer in the byte order of
-the current host. In case of an error, 0 is returned.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-void process_record(struct tep_record *record)
-{
- int offset = 24;
- int data = tep_read_number(tep, record->data + offset, 4);
-
- /* Read the 4 bytes at the offset 24 of data as an integer */
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_find.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_find.txt
deleted file mode 100644
index 7bc062c9f76f..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-event_find.txt
+++ /dev/null
@@ -1,103 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_find_event,tep_find_event_by_name,tep_find_event_by_record -
-Find events by given key.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-struct tep_event pass:[*]*tep_find_event*(struct tep_handle pass:[*]_tep_, int _id_);
-struct tep_event pass:[*]*tep_find_event_by_name*(struct tep_handle pass:[*]_tep_, const char pass:[*]_sys_, const char pass:[*]_name_);
-struct tep_event pass:[*]*tep_find_event_by_record*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_record_);
---
-
-DESCRIPTION
------------
-This set of functions can be used to search for an event, based on a given
-criteria. All functions require a pointer to a _tep_, trace event parser
-context.
-
-The _tep_find_event()_ function searches for an event by given event _id_. The
-event ID is assigned dynamically and can be viewed in event's format file,
-"ID" field.
-
-The tep_find_event_by_name()_ function searches for an event by given
-event _name_, under the system _sys_. If the _sys_ is NULL (not specified),
-the first event with _name_ is returned.
-
-The tep_find_event_by_record()_ function searches for an event from a given
-_record_.
-
-RETURN VALUE
-------------
-All these functions return a pointer to the found event, or NULL if there is no
-such event.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-struct tep_event *event;
-
-event = tep_find_event(tep, 1857);
-if (event == NULL) {
- /* There is no event with ID 1857 */
-}
-
-event = tep_find_event_by_name(tep, "kvm", "kvm_exit");
-if (event == NULL) {
- /* There is no kvm_exit event, from kvm system */
-}
-
-void event_from_record(struct tep_record *record)
-{
- struct tep_event *event = tep_find_event_by_record(tep, record);
- if (event == NULL) {
- /* There is no event from given record */
- }
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_get.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_get.txt
deleted file mode 100644
index 6525092fc417..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-event_get.txt
+++ /dev/null
@@ -1,99 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_get_event, tep_get_first_event, tep_get_events_count - Access events.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-struct tep_event pass:[*]*tep_get_event*(struct tep_handle pass:[*]_tep_, int _index_);
-struct tep_event pass:[*]*tep_get_first_event*(struct tep_handle pass:[*]_tep_);
-int *tep_get_events_count*(struct tep_handle pass:[*]_tep_);
---
-
-DESCRIPTION
------------
-The _tep_get_event()_ function returns a pointer to event at the given _index_.
-The _tep_ argument is trace event parser context, the _index_ is the index of
-the requested event.
-
-The _tep_get_first_event()_ function returns a pointer to the first event.
-As events are stored in an array, this function returns the pointer to the
-beginning of the array. The _tep_ argument is trace event parser context.
-
-The _tep_get_events_count()_ function returns the number of the events
-in the array. The _tep_ argument is trace event parser context.
-
-RETURN VALUE
-------------
-The _tep_get_event()_ returns a pointer to the event located at _index_.
-NULL is returned in case of error, in case there are no events or _index_ is
-out of range.
-
-The _tep_get_first_event()_ returns a pointer to the first event. NULL is
-returned in case of error, or in case there are no events.
-
-The _tep_get_events_count()_ returns the number of the events. 0 is
-returned in case of error, or in case there are no events.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-int i,count = tep_get_events_count(tep);
-struct tep_event *event, *events = tep_get_first_event(tep);
-
-if (events == NULL) {
- /* There are no events */
-} else {
- for (i = 0; i < count; i++) {
- event = (events+i);
- /* process events[i] */
- }
-
- /* Get the last event */
- event = tep_get_event(tep, count-1);
-}
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_list.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_list.txt
deleted file mode 100644
index fba350e5a4cb..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-event_list.txt
+++ /dev/null
@@ -1,122 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_list_events, tep_list_events_copy -
-Get list of events, sorted by given criteria.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-enum *tep_event_sort_type* {
- _TEP_EVENT_SORT_ID_,
- _TEP_EVENT_SORT_NAME_,
- _TEP_EVENT_SORT_SYSTEM_,
-};
-
-struct tep_event pass:[*]pass:[*]*tep_list_events*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
-struct tep_event pass:[*]pass:[*]*tep_list_events_copy*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
---
-
-DESCRIPTION
------------
-The _tep_list_events()_ function returns an array of pointers to the events,
-sorted by the _sort_type_ criteria. The last element of the array is NULL.
-The returned memory must not be freed, it is managed by the library.
-The function is not thread safe. The _tep_ argument is trace event parser
-context. The _sort_type_ argument is the required sort criteria:
-[verse]
---
- _TEP_EVENT_SORT_ID_ - sort by the event ID.
- _TEP_EVENT_SORT_NAME_ - sort by the event (name, system, id) triplet.
- _TEP_EVENT_SORT_SYSTEM_ - sort by the event (system, name, id) triplet.
---
-
-The _tep_list_events_copy()_ is a thread safe version of _tep_list_events()_.
-It has the same behavior, but the returned array is allocated internally and
-must be freed by the caller. Note that the content of the array must not be
-freed (see the EXAMPLE below).
-
-RETURN VALUE
-------------
-The _tep_list_events()_ function returns an array of pointers to events.
-In case of an error, NULL is returned. The returned array must not be freed,
-it is managed by the library.
-
-The _tep_list_events_copy()_ function returns an array of pointers to events.
-In case of an error, NULL is returned. The returned array must be freed by
-the caller.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-int i;
-struct tep_event_format **events;
-
-i=0;
-events = tep_list_events(tep, TEP_EVENT_SORT_ID);
-if (events == NULL) {
- /* Failed to get the events, sorted by ID */
-} else {
- while(events[i]) {
- /* walk through the list of the events, sorted by ID */
- i++;
- }
-}
-
-i=0;
-events = tep_list_events_copy(tep, TEP_EVENT_SORT_NAME);
-if (events == NULL) {
- /* Failed to get the events, sorted by name */
-} else {
- while(events[i]) {
- /* walk through the list of the events, sorted by name */
- i++;
- }
- free(events);
-}
-
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-event_print.txt b/tools/lib/traceevent/Documentation/libtraceevent-event_print.txt
deleted file mode 100644
index 2c6a61811118..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-event_print.txt
+++ /dev/null
@@ -1,130 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_print_event - Writes event information into a trace sequence.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-*#include <trace-seq.h>*
-
-void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seqpass:[*]_s_, struct tep_record pass:[*]_record_, const char pass:[*]_fmt_, _..._)
---
-
-DESCRIPTION
------------
-
-The _tep_print_event()_ function parses the event information of the given
-_record_ and writes it into the trace sequence _s_, according to the format
-string _fmt_. The desired information is specified after the format string.
-The _fmt_ is printf-like format string, following arguments are supported:
-[verse]
---
- TEP_PRINT_PID, "%d" - PID of the event.
- TEP_PRINT_CPU, "%d" - Event CPU.
- TEP_PRINT_COMM, "%s" - Event command string.
- TEP_PRINT_NAME, "%s" - Event name.
- TEP_PRINT_LATENCY, "%s" - Latency of the event. It prints 4 or more
- fields - interrupt state, scheduling state,
- current context, and preemption count.
- Field 1 is the interrupt enabled state:
- d : Interrupts are disabled
- . : Interrupts are enabled
- X : The architecture does not support this
- information
- Field 2 is the "need resched" state.
- N : The task is set to call the scheduler when
- possible, as another higher priority task
- may need to be scheduled in.
- . : The task is not set to call the scheduler.
- Field 3 is the context state.
- . : Normal context
- s : Soft interrupt context
- h : Hard interrupt context
- H : Hard interrupt context which triggered
- during soft interrupt context.
- z : NMI context
- Z : NMI context which triggered during hard
- interrupt context
- Field 4 is the preemption count.
- . : The preempt count is zero.
- On preemptible kernels (where the task can be scheduled
- out in arbitrary locations while in kernel context), the
- preempt count, when non zero, will prevent the kernel
- from scheduling out the current task. The preempt count
- number is displayed when it is not zero.
- Depending on the kernel, it may show other fields
- (lock depth, or migration disabled, which are unique to
- specialized kernels).
- TEP_PRINT_TIME, %d - event time stamp. A divisor and precision can be
- specified as part of this format string:
- "%precision.divisord". Example:
- "%3.1000d" - divide the time by 1000 and print the first
- 3 digits before the dot. Thus, the time stamp
- "123456000" will be printed as "123.456"
- TEP_PRINT_INFO, "%s" - event information.
- TEP_PRINT_INFO_RAW, "%s" - event information, in raw format.
-
---
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-#include <trace-seq.h>
-...
-struct trace_seq seq;
-trace_seq_init(&seq);
-struct tep_handle *tep = tep_alloc();
-...
-void print_my_event(struct tep_record *record)
-{
- trace_seq_reset(&seq);
- tep_print_event(tep, s, record, "%16s-%-5d [%03d] %s %6.1000d %s %s",
- TEP_PRINT_COMM, TEP_PRINT_PID, TEP_PRINT_CPU,
- TEP_PRINT_LATENCY, TEP_PRINT_TIME, TEP_PRINT_NAME,
- TEP_PRINT_INFO);
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*trace-seq.h*
- Header file to include in order to have access to trace sequences related APIs.
- Trace sequences are used to allow a function to call several other functions
- to create a string of data to use.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_find.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_find.txt
deleted file mode 100644
index 0896af5b9eff..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-field_find.txt
+++ /dev/null
@@ -1,118 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_find_common_field, tep_find_field, tep_find_any_field -
-Search for a field in an event.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-struct tep_format_field pass:[*]*tep_find_common_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
-struct tep_format_field pass:[*]*tep_find_field*(struct tep_event_ormat pass:[*]_event_, const char pass:[*]_name_);
-struct tep_format_field pass:[*]*tep_find_any_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
---
-
-DESCRIPTION
------------
-These functions search for a field with given name in an event. The field
-returned can be used to find the field content from within a data record.
-
-The _tep_find_common_field()_ function searches for a common field with _name_
-in the _event_.
-
-The _tep_find_field()_ function searches for an event specific field with
-_name_ in the _event_.
-
-The _tep_find_any_field()_ function searches for any field with _name_ in the
-_event_.
-
-RETURN VALUE
-------------
-The _tep_find_common_field(), _tep_find_field()_ and _tep_find_any_field()_
-functions return a pointer to the found field, or NULL in case there is no field
-with the requested name.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-void get_htimer_info(struct tep_handle *tep, struct tep_record *record)
-{
- struct tep_format_field *field;
- struct tep_event *event;
- long long softexpires;
- int mode;
- int pid;
-
- event = tep_find_event_by_name(tep, "timer", "hrtimer_start");
-
- field = tep_find_common_field(event, "common_pid");
- if (field == NULL) {
- /* Cannot find "common_pid" field in the event */
- } else {
- /* Get pid from the data record */
- pid = tep_read_number(tep, record->data + field->offset,
- field->size);
- }
-
- field = tep_find_field(event, "softexpires");
- if (field == NULL) {
- /* Cannot find "softexpires" event specific field in the event */
- } else {
- /* Get softexpires parameter from the data record */
- softexpires = tep_read_number(tep, record->data + field->offset,
- field->size);
- }
-
- field = tep_find_any_field(event, "mode");
- if (field == NULL) {
- /* Cannot find "mode" field in the event */
- } else
- {
- /* Get mode parameter from the data record */
- mode = tep_read_number(tep, record->data + field->offset,
- field->size);
- }
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt
deleted file mode 100644
index 6324f0d48aeb..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-field_get_val.txt
+++ /dev/null
@@ -1,122 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_get_any_field_val, tep_get_common_field_val, tep_get_field_val,
-tep_get_field_raw - Get value of a field.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-*#include <trace-seq.h>*
-
-int *tep_get_any_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
-int *tep_get_common_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
-int *tep_get_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
-void pass:[*]*tep_get_field_raw*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int pass:[*]_len_, int _err_);
---
-
-DESCRIPTION
------------
-These functions can be used to find a field and retrieve its value.
-
-The _tep_get_any_field_val()_ function searches in the _record_ for a field
-with _name_, part of the _event_. If the field is found, its value is stored in
-_val_. If there is an error and _err_ is not zero, then an error string is
-written into _s_.
-
-The _tep_get_common_field_val()_ function does the same as
-_tep_get_any_field_val()_, but searches only in the common fields. This works
-for any event as all events include the common fields.
-
-The _tep_get_field_val()_ function does the same as _tep_get_any_field_val()_,
-but searches only in the event specific fields.
-
-The _tep_get_field_raw()_ function searches in the _record_ for a field with
-_name_, part of the _event_. If the field is found, a pointer to where the field
-exists in the record's raw data is returned. The size of the data is stored in
-_len_. If there is an error and _err_ is not zero, then an error string is
-written into _s_.
-
-RETURN VALUE
-------------
-The _tep_get_any_field_val()_, _tep_get_common_field_val()_ and
-_tep_get_field_val()_ functions return 0 on success, or -1 in case of an error.
-
-The _tep_get_field_raw()_ function returns a pointer to field's raw data, and
-places the length of this data in _len_. In case of an error NULL is returned.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-#include <trace-seq.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-struct tep_event *event = tep_find_event_by_name(tep, "kvm", "kvm_exit");
-...
-void process_record(struct tep_record *record)
-{
- int len;
- char *comm;
- struct tep_event_format *event;
- unsigned long long val;
-
- event = tep_find_event_by_record(pevent, record);
- if (event != NULL) {
- if (tep_get_common_field_val(NULL, event, "common_type",
- record, &val, 0) == 0) {
- /* Got the value of common type field */
- }
- if (tep_get_field_val(NULL, event, "pid", record, &val, 0) == 0) {
- /* Got the value of pid specific field */
- }
- comm = tep_get_field_raw(NULL, event, "comm", record, &len, 0);
- if (comm != NULL) {
- /* Got a pointer to the comm event specific field */
- }
- }
-}
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*trace-seq.h*
- Header file to include in order to have access to trace sequences
- related APIs. Trace sequences are used to allow a function to call
- several other functions to create a string of data to use.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_print.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_print.txt
deleted file mode 100644
index 9a9df98ac44d..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-field_print.txt
+++ /dev/null
@@ -1,126 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_print_field, tep_print_fields, tep_print_num_field, tep_print_func_field -
-Print the field content.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-*#include <trace-seq.h>*
-
-void *tep_print_field*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, struct tep_format_field pass:[*]_field_);
-void *tep_print_fields*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, int _size_, struct tep_event pass:[*]_event_);
-int *tep_print_num_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
-int *tep_print_func_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
---
-
-DESCRIPTION
------------
-These functions print recorded field's data, according to the field's type.
-
-The _tep_print_field()_ function extracts from the recorded raw _data_ value of
-the _field_ and prints it into _s_, according to the field type.
-
-The _tep_print_fields()_ prints each field name followed by the record's field
-value according to the field's type:
-[verse]
---
-"field1_name=field1_value field2_name=field2_value ..."
---
-It iterates all fields of the _event_, and calls _tep_print_field()_ for each of
-them.
-
-The _tep_print_num_field()_ function prints a numeric field with given format
-string. A search is performed in the _event_ for a field with _name_. If such
-field is found, its value is extracted from the _record_ and is printed in the
-_s_, according to the given format string _fmt_. If the argument _err_ is
-non-zero, and an error occures - it is printed in the _s_.
-
-The _tep_print_func_field()_ function prints a function field with given format
-string. A search is performed in the _event_ for a field with _name_. If such
-field is found, its value is extracted from the _record_. The value is assumed
-to be a function address, and a search is perform to find the name of this
-function. The function name (if found) and its address are printed in the _s_,
-according to the given format string _fmt_. If the argument _err_ is non-zero,
-and an error occures - it is printed in _s_.
-
-RETURN VALUE
-------------
-The _tep_print_num_field()_ and _tep_print_func_field()_ functions return 1
-on success, -1 in case of an error or 0 if the print buffer _s_ is full.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-#include <trace-seq.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-struct trace_seq seq;
-trace_seq_init(&seq);
-struct tep_event *event = tep_find_event_by_name(tep, "timer", "hrtimer_start");
-...
-void process_record(struct tep_record *record)
-{
- struct tep_format_field *field_pid = tep_find_common_field(event, "common_pid");
-
- trace_seq_reset(&seq);
-
- /* Print the value of "common_pid" */
- tep_print_field(&seq, record->data, field_pid);
-
- /* Print all fields of the "hrtimer_start" event */
- tep_print_fields(&seq, record->data, record->size, event);
-
- /* Print the value of "expires" field with custom format string */
- tep_print_num_field(&seq, " timer expires in %llu ", event, "expires", record, 0);
-
- /* Print the address and the name of "function" field with custom format string */
- tep_print_func_field(&seq, " timer function is %s ", event, "function", record, 0);
- }
- ...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*trace-seq.h*
- Header file to include in order to have access to trace sequences related APIs.
- Trace sequences are used to allow a function to call several other functions
- to create a string of data to use.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-field_read.txt b/tools/lib/traceevent/Documentation/libtraceevent-field_read.txt
deleted file mode 100644
index 64e9e25d3fd9..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-field_read.txt
+++ /dev/null
@@ -1,81 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_read_number_field - Reads a number from raw data.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-int *tep_read_number_field*(struct tep_format_field pass:[*]_field_, const void pass:[*]_data_, unsigned long long pass:[*]_value_);
---
-
-DESCRIPTION
------------
-The _tep_read_number_field()_ function reads the value of the _field_ from the
-raw _data_ and stores it in the _value_. The function sets the _value_ according
-to the endianness of the raw data and the current machine and stores it in
-_value_.
-
-RETURN VALUE
-------------
-The _tep_read_number_field()_ function retunrs 0 in case of success, or -1 in
-case of an error.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-struct tep_event *event = tep_find_event_by_name(tep, "timer", "hrtimer_start");
-...
-void process_record(struct tep_record *record)
-{
- unsigned long long pid;
- struct tep_format_field *field_pid = tep_find_common_field(event, "common_pid");
-
- if (tep_read_number_field(field_pid, record->data, &pid) != 0) {
- /* Failed to get "common_pid" value */
- }
-}
-...
---
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-fields.txt b/tools/lib/traceevent/Documentation/libtraceevent-fields.txt
deleted file mode 100644
index 1ccb531d5114..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-fields.txt
+++ /dev/null
@@ -1,105 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_event_common_fields, tep_event_fields - Get a list of fields for an event.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-struct tep_format_field pass:[*]pass:[*]*tep_event_common_fields*(struct tep_event pass:[*]_event_);
-struct tep_format_field pass:[*]pass:[*]*tep_event_fields*(struct tep_event pass:[*]_event_);
---
-
-DESCRIPTION
------------
-The _tep_event_common_fields()_ function returns an array of pointers to common
-fields for the _event_. The array is allocated in the function and must be freed
-by free(). The last element of the array is NULL.
-
-The _tep_event_fields()_ function returns an array of pointers to event specific
-fields for the _event_. The array is allocated in the function and must be freed
-by free(). The last element of the array is NULL.
-
-RETURN VALUE
-------------
-Both _tep_event_common_fields()_ and _tep_event_fields()_ functions return
-an array of pointers to tep_format_field structures in case of success, or
-NULL in case of an error.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-int i;
-struct tep_format_field **fields;
-struct tep_event *event = tep_find_event_by_name(tep, "kvm", "kvm_exit");
-if (event != NULL) {
- fields = tep_event_common_fields(event);
- if (fields != NULL) {
- i = 0;
- while (fields[i]) {
- /*
- walk through the list of the common fields
- of the kvm_exit event
- */
- i++;
- }
- free(fields);
- }
- fields = tep_event_fields(event);
- if (fields != NULL) {
- i = 0;
- while (fields[i]) {
- /*
- walk through the list of the event specific
- fields of the kvm_exit event
- */
- i++;
- }
- free(fields);
- }
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt b/tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt
deleted file mode 100644
index f401ad311047..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-file_endian.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_is_file_bigendian, tep_set_file_bigendian - Get / set the endianness of the
-raw data being accessed by the tep handler.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-enum *tep_endian* {
- TEP_LITTLE_ENDIAN = 0,
- TEP_BIG_ENDIAN
-};
-
-bool *tep_is_file_bigendian*(struct tep_handle pass:[*]_tep_);
-void *tep_set_file_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
-
---
-DESCRIPTION
------------
-The _tep_is_file_bigendian()_ function gets the endianness of the raw data,
-being accessed by the tep handler. The _tep_ argument is trace event parser
-context.
-
-The _tep_set_file_bigendian()_ function sets the endianness of raw data being
-accessed by the tep handler. The _tep_ argument is trace event parser context.
-[verse]
---
-The _endian_ argument is the endianness:
- _TEP_LITTLE_ENDIAN_ - the raw data is in little endian format,
- _TEP_BIG_ENDIAN_ - the raw data is in big endian format.
---
-RETURN VALUE
-------------
-The _tep_is_file_bigendian()_ function returns true if the data is in bigendian
-format, false otherwise.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
- tep_set_file_bigendian(tep, TEP_LITTLE_ENDIAN);
-...
- if (tep_is_file_bigendian(tep)) {
- /* The raw data is in big endian */
- } else {
- /* The raw data is in little endian */
- }
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-filter.txt b/tools/lib/traceevent/Documentation/libtraceevent-filter.txt
deleted file mode 100644
index 4a9962d8cb59..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-filter.txt
+++ /dev/null
@@ -1,209 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_filter_alloc, tep_filter_free, tep_filter_reset, tep_filter_make_string,
-tep_filter_copy, tep_filter_compare, tep_filter_match, tep_event_filtered,
-tep_filter_remove_event, tep_filter_strerror, tep_filter_add_filter_str -
-Event filter related APIs.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-struct tep_event_filter pass:[*]*tep_filter_alloc*(struct tep_handle pass:[*]_tep_);
-void *tep_filter_free*(struct tep_event_filter pass:[*]_filter_);
-void *tep_filter_reset*(struct tep_event_filter pass:[*]_filter_);
-enum tep_errno *tep_filter_add_filter_str*(struct tep_event_filter pass:[*]_filter_, const char pass:[*]_filter_str_);
-int *tep_event_filtered*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
-int *tep_filter_remove_event*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
-enum tep_errno *tep_filter_match*(struct tep_event_filter pass:[*]_filter_, struct tep_record pass:[*]_record_);
-int *tep_filter_copy*(struct tep_event_filter pass:[*]_dest_, struct tep_event_filter pass:[*]_source_);
-int *tep_filter_compare*(struct tep_event_filter pass:[*]_filter1_, struct tep_event_filter pass:[*]_filter2_);
-char pass:[*]*tep_filter_make_string*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
-int *tep_filter_strerror*(struct tep_event_filter pass:[*]_filter_, enum tep_errno _err_, char pass:[*]buf, size_t _buflen_);
---
-
-DESCRIPTION
------------
-Filters can be attached to traced events. They can be used to filter out various
-events when outputting them. Each event can be filtered based on its parameters,
-described in the event's format file. This set of functions can be used to
-create, delete, modify and attach event filters.
-
-The _tep_filter_alloc()_ function creates a new event filter. The _tep_ argument
-is the trace event parser context.
-
-The _tep_filter_free()_ function frees an event filter and all resources that it
-had used.
-
-The _tep_filter_reset()_ function removes all rules from an event filter and
-resets it.
-
-The _tep_filter_add_filter_str()_ function adds a new rule to the _filter_. The
-_filter_str_ argument is the filter string, that contains the rule.
-
-The _tep_event_filtered()_ function checks if the event with _event_id_ has
-_filter_.
-
-The _tep_filter_remove_event()_ function removes a _filter_ for an event with
-_event_id_.
-
-The _tep_filter_match()_ function tests if a _record_ matches given _filter_.
-
-The _tep_filter_copy()_ function copies a _source_ filter into a _dest_ filter.
-
-The _tep_filter_compare()_ function compares two filers - _filter1_ and _filter2_.
-
-The _tep_filter_make_string()_ function constructs a string, displaying
-the _filter_ contents for given _event_id_.
-
-The _tep_filter_strerror()_ function copies the _filter_ error buffer into the
-given _buf_ with the size _buflen_. If the error buffer is empty, in the _buf_
-is copied a string, describing the error _err_.
-
-RETURN VALUE
-------------
-The _tep_filter_alloc()_ function returns a pointer to the newly created event
-filter, or NULL in case of an error.
-
-The _tep_filter_add_filter_str()_ function returns 0 if the rule was
-successfully added or a negative error code. Use _tep_filter_strerror()_ to see
-actual error message in case of an error.
-
-The _tep_event_filtered()_ function returns 1 if the filter is found for given
-event, or 0 otherwise.
-
-The _tep_filter_remove_event()_ function returns 1 if the vent was removed, or
-0 if the event was not found.
-
-The _tep_filter_match()_ function returns _tep_errno_, according to the result:
-[verse]
---
-_pass:[TEP_ERRNO__FILTER_MATCH]_ - filter found for event, the record matches.
-_pass:[TEP_ERRNO__FILTER_MISS]_ - filter found for event, the record does not match.
-_pass:[TEP_ERRNO__FILTER_NOT_FOUND]_ - no filter found for record's event.
-_pass:[TEP_ERRNO__NO_FILTER]_ - no rules in the filter.
---
-or any other _tep_errno_, if an error occurred during the test.
-
-The _tep_filter_copy()_ function returns 0 on success or -1 if not all rules
- were copied.
-
-The _tep_filter_compare()_ function returns 1 if the two filters hold the same
-content, or 0 if they do not.
-
-The _tep_filter_make_string()_ function returns a string, which must be freed
-with free(), or NULL in case of an error.
-
-The _tep_filter_strerror()_ function returns 0 if message was filled
-successfully, or -1 in case of an error.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-char errstr[200];
-int ret;
-
-struct tep_event_filter *filter = tep_filter_alloc(tep);
-struct tep_event_filter *filter1 = tep_filter_alloc(tep);
-ret = tep_filter_add_filter_str(filter, "sched/sched_wakeup:target_cpu==1");
-if(ret < 0) {
- tep_filter_strerror(filter, ret, errstr, sizeof(errstr));
- /* Failed to add a new rule to the filter, the error string is in errstr */
-}
-if (tep_filter_copy(filter1, filter) != 0) {
- /* Failed to copy filter in filter1 */
-}
-...
-if (tep_filter_compare(filter, filter1) != 1) {
- /* Both filters are different */
-}
-...
-void process_record(struct tep_handle *tep, struct tep_record *record)
-{
- struct tep_event *event;
- char *fstring;
-
- event = tep_find_event_by_record(tep, record);
-
- if (tep_event_filtered(filter, event->id) == 1) {
- /* The event has filter */
- fstring = tep_filter_make_string(filter, event->id);
- if (fstring != NULL) {
- /* The filter for the event is in fstring */
- free(fstring);
- }
- }
-
- switch (tep_filter_match(filter, record)) {
- case TEP_ERRNO__FILTER_MATCH:
- /* The filter matches the record */
- break;
- case TEP_ERRNO__FILTER_MISS:
- /* The filter does not match the record */
- break;
- case TEP_ERRNO__FILTER_NOT_FOUND:
- /* No filter found for record's event */
- break;
- case TEP_ERRNO__NO_FILTER:
- /* There are no rules in the filter */
- break
- default:
- /* An error occurred during the test */
- break;
- }
-
- if (tep_filter_remove_event(filter, event->id) == 1) {
- /* The event was removed from the filter */
- }
-}
-
-...
-tep_filter_reset(filter);
-...
-tep_filter_free(filter);
-tep_filter_free(filter1);
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt b/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt
deleted file mode 100644
index f6aca0df2151..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-func_apis.txt
+++ /dev/null
@@ -1,183 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_find_function, tep_find_function_address, tep_set_function_resolver,
-tep_reset_function_resolver, tep_register_function, tep_register_print_string -
-function related tep APIs
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-typedef char pass:[*](*tep_func_resolver_t*)(void pass:[*]_priv_, unsigned long long pass:[*]_addrp_, char pass:[**]_modp_);
-int *tep_set_function_resolver*(struct tep_handle pass:[*]_tep_, tep_func_resolver_t pass:[*]_func_, void pass:[*]_priv_);
-void *tep_reset_function_resolver*(struct tep_handle pass:[*]_tep_);
-const char pass:[*]*tep_find_function*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
-unsigned long long *tep_find_function_address*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
-int *tep_register_function*(struct tep_handle pass:[*]_tep_, char pass:[*]_name_, unsigned long long _addr_, char pass:[*]_mod_);
-int *tep_register_print_string*(struct tep_handle pass:[*]_tep_, const char pass:[*]_fmt_, unsigned long long _addr_);
---
-
-DESCRIPTION
------------
-Some tools may have already a way to resolve the kernel functions. These APIs
-allow them to keep using it instead of duplicating all the entries inside.
-
-The _tep_func_resolver_t_ type is the prototype of the alternative kernel
-functions resolver. This function receives a pointer to its custom context
-(set with the _tep_set_function_resolver()_ call ) and the address of a kernel
-function, which has to be resolved. In case of success, it should return
-the name of the function and its module (if any) in _modp_.
-
-The _tep_set_function_resolver()_ function registers _func_ as an alternative
-kernel functions resolver. The _tep_ argument is trace event parser context.
-The _priv_ argument is a custom context of the _func_ function. The function
-resolver is used by the APIs _tep_find_function()_,
-_tep_find_function_address()_, and _tep_print_func_field()_ to resolve
-a function address to a function name.
-
-The _tep_reset_function_resolver()_ function resets the kernel functions
-resolver to the default function. The _tep_ argument is trace event parser
-context.
-
-
-These APIs can be used to find function name and start address, by given
-address. The given address does not have to be exact, it will select
-the function that would contain it.
-
-The _tep_find_function()_ function returns the function name, which contains the
-given address _addr_. The _tep_ argument is the trace event parser context.
-
-The _tep_find_function_address()_ function returns the function start address,
-by given address _addr_. The _addr_ does not have to be exact, it will select
-the function that would contain it. The _tep_ argument is the trace event
-parser context.
-
-The _tep_register_function()_ function registers a function name mapped to an
-address and (optional) module. This mapping is used in case the function tracer
-or events have "%pS" parameter in its format string. It is common to pass in
-the kallsyms function names with their corresponding addresses with this
-function. The _tep_ argument is the trace event parser context. The _name_ is
-the name of the function, the string is copied internally. The _addr_ is the
-start address of the function. The _mod_ is the kernel module the function may
-be in (NULL for none).
-
-The _tep_register_print_string()_ function registers a string by the address
-it was stored in the kernel. Some strings internal to the kernel with static
-address are passed to certain events. The "%s" in the event's format field
-which has an address needs to know what string would be at that address. The
-tep_register_print_string() supplies the parsing with the mapping between kernel
-addresses and those strings. The _tep_ argument is the trace event parser
-context. The _fmt_ is the string to register, it is copied internally.
-The _addr_ is the address the string was located at.
-
-
-RETURN VALUE
-------------
-The _tep_set_function_resolver()_ function returns 0 in case of success, or -1
-in case of an error.
-
-The _tep_find_function()_ function returns the function name, or NULL in case
-it cannot be found.
-
-The _tep_find_function_address()_ function returns the function start address,
-or 0 in case it cannot be found.
-
-The _tep_register_function()_ function returns 0 in case of success. In case of
-an error -1 is returned, and errno is set to the appropriate error number.
-
-The _tep_register_print_string()_ function returns 0 in case of success. In case
-of an error -1 is returned, and errno is set to the appropriate error number.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-char *my_resolve_kernel_addr(void *context,
- unsigned long long *addrp, char **modp)
-{
- struct db *function_database = context;
- struct symbol *sym = sql_lookup(function_database, *addrp);
-
- if (!sym)
- return NULL;
-
- *modp = sym->module_name;
- return sym->name;
-}
-
-void show_function( unsigned long long addr)
-{
- unsigned long long fstart;
- const char *fname;
-
- if (tep_set_function_resolver(tep, my_resolve_kernel_addr,
- function_database) != 0) {
- /* failed to register my_resolve_kernel_addr */
- }
-
- /* These APIs use my_resolve_kernel_addr() to resolve the addr */
- fname = tep_find_function(tep, addr);
- fstart = tep_find_function_address(tep, addr);
-
- /*
- addr is in function named fname, starting at fstart address,
- at offset (addr - fstart)
- */
-
- tep_reset_function_resolver(tep);
-
-}
-...
- if (tep_register_function(tep, "kvm_exit",
- (unsigned long long) 0x12345678, "kvm") != 0) {
- /* Failed to register kvm_exit address mapping */
- }
-...
- if (tep_register_print_string(tep, "print string",
- (unsigned long long) 0x87654321, NULL) != 0) {
- /* Failed to register "print string" address mapping */
- }
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-func_find.txt b/tools/lib/traceevent/Documentation/libtraceevent-func_find.txt
deleted file mode 100644
index 04840e244445..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-func_find.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_find_function,tep_find_function_address - Find function name / start address.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-const char pass:[*]*tep_find_function*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
-unsigned long long *tep_find_function_address*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
---
-
-DESCRIPTION
------------
-These functions can be used to find function name and start address, by given
-address. The given address does not have to be exact, it will select the function
-that would contain it.
-
-The _tep_find_function()_ function returns the function name, which contains the
-given address _addr_. The _tep_ argument is the trace event parser context.
-
-The _tep_find_function_address()_ function returns the function start address,
-by given address _addr_. The _addr_ does not have to be exact, it will select the
-function that would contain it. The _tep_ argument is the trace event parser context.
-
-RETURN VALUE
-------------
-The _tep_find_function()_ function returns the function name, or NULL in case
-it cannot be found.
-
-The _tep_find_function_address()_ function returns the function start address,
-or 0 in case it cannot be found.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-void show_function( unsigned long long addr)
-{
- const char *fname = tep_find_function(tep, addr);
- unsigned long long fstart = tep_find_function_address(tep, addr);
-
- /* addr is in function named fname, starting at fstart address, at offset (addr - fstart) */
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-handle.txt b/tools/lib/traceevent/Documentation/libtraceevent-handle.txt
deleted file mode 100644
index 45b20172e262..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-handle.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_alloc, tep_free,tep_ref, tep_unref,tep_get_ref - Create, destroy, manage
-references of trace event parser context.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-struct tep_handle pass:[*]*tep_alloc*(void);
-void *tep_free*(struct tep_handle pass:[*]_tep_);
-void *tep_ref*(struct tep_handle pass:[*]_tep_);
-void *tep_unref*(struct tep_handle pass:[*]_tep_);
-int *tep_get_ref*(struct tep_handle pass:[*]_tep_);
---
-
-DESCRIPTION
------------
-These are the main functions to create and destroy tep_handle - the main
-structure, representing the trace event parser context. This context is used as
-the input parameter of most library APIs.
-
-The _tep_alloc()_ function allocates and initializes the tep context.
-
-The _tep_free()_ function will decrement the reference of the _tep_ handler.
-When there is no more references, then it will free the handler, as well
-as clean up all its resources that it had used. The argument _tep_ is
-the pointer to the trace event parser context.
-
-The _tep_ref()_ function adds a reference to the _tep_ handler.
-
-The _tep_unref()_ function removes a reference from the _tep_ handler. When
-the last reference is removed, the _tep_ is destroyed, and all resources that
-it had used are cleaned up.
-
-The _tep_ref_get()_ functions gets the current references of the _tep_ handler.
-
-RETURN VALUE
-------------
-_tep_alloc()_ returns a pointer to a newly created tep_handle structure.
-NULL is returned in case there is not enough free memory to allocate it.
-
-_tep_ref_get()_ returns the current references of _tep_.
-If _tep_ is NULL, 0 is returned.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-
-...
-struct tep_handle *tep = tep_alloc();
-...
-int ref = tep_get_ref(tep);
-tep_ref(tep);
-if ( (ref+1) != tep_get_ref(tep)) {
- /* Something wrong happened, the counter is not incremented by 1 */
-}
-tep_unref(tep);
-...
-tep_free(tep);
-...
---
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-header_page.txt b/tools/lib/traceevent/Documentation/libtraceevent-header_page.txt
deleted file mode 100644
index 615d117dc39f..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-header_page.txt
+++ /dev/null
@@ -1,102 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_get_header_page_size, tep_get_header_timestamp_size, tep_is_old_format -
-Get the data stored in the header page, in kernel context.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-int *tep_get_header_page_size*(struct tep_handle pass:[*]_tep_);
-int *tep_get_header_timestamp_size*(struct tep_handle pass:[*]_tep_);
-bool *tep_is_old_format*(struct tep_handle pass:[*]_tep_);
---
-DESCRIPTION
------------
-These functions retrieve information from kernel context, stored in tracefs
-events/header_page. Old kernels do not have header page info, so default values
-from user space context are used.
-
-The _tep_get_header_page_size()_ function returns the size of a long integer,
-in kernel context. The _tep_ argument is trace event parser context.
-This information is retrieved from tracefs events/header_page, "commit" field.
-
-The _tep_get_header_timestamp_size()_ function returns the size of timestamps,
-in kernel context. The _tep_ argument is trace event parser context. This
-information is retrieved from tracefs events/header_page, "timestamp" field.
-
-The _tep_is_old_format()_ function returns true if the kernel predates
-the addition of events/header_page, otherwise it returns false.
-
-RETURN VALUE
-------------
-The _tep_get_header_page_size()_ function returns the size of a long integer,
-in bytes.
-
-The _tep_get_header_timestamp_size()_ function returns the size of timestamps,
-in bytes.
-
-The _tep_is_old_format()_ function returns true, if an old kernel is used to
-generate the tracing data, which has no event/header_page. If the kernel is new,
-or _tep_ is NULL, false is returned.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
- int longsize;
- int timesize;
- bool old;
-
- longsize = tep_get_header_page_size(tep);
- timesize = tep_get_header_timestamp_size(tep);
- old = tep_is_old_format(tep);
-
- printf ("%s kernel is used to generate the tracing data.\n",
- old?"Old":"New");
- printf("The size of a long integer is %d bytes.\n", longsize);
- printf("The timestamps size is %d bytes.\n", timesize);
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt b/tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt
deleted file mode 100644
index d5d375eb8d1e..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-host_endian.txt
+++ /dev/null
@@ -1,104 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_is_bigendian, tep_is_local_bigendian, tep_set_local_bigendian - Get / set
-the endianness of the local machine.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-enum *tep_endian* {
- TEP_LITTLE_ENDIAN = 0,
- TEP_BIG_ENDIAN
-};
-
-int *tep_is_bigendian*(void);
-bool *tep_is_local_bigendian*(struct tep_handle pass:[*]_tep_);
-void *tep_set_local_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
---
-
-DESCRIPTION
------------
-
-The _tep_is_bigendian()_ gets the endianness of the machine, executing
-the function.
-
-The _tep_is_local_bigendian()_ function gets the endianness of the local
-machine, saved in the _tep_ handler. The _tep_ argument is the trace event
-parser context. This API is a bit faster than _tep_is_bigendian()_, as it
-returns cached endianness of the local machine instead of checking it each time.
-
-The _tep_set_local_bigendian()_ function sets the endianness of the local
-machine in the _tep_ handler. The _tep_ argument is trace event parser context.
-The _endian_ argument is the endianness:
-[verse]
---
- _TEP_LITTLE_ENDIAN_ - the machine is little endian,
- _TEP_BIG_ENDIAN_ - the machine is big endian.
---
-
-RETURN VALUE
-------------
-The _tep_is_bigendian()_ function returns non zero if the endianness of the
-machine, executing the code, is big endian and zero otherwise.
-
-The _tep_is_local_bigendian()_ function returns true, if the endianness of the
-local machine, saved in the _tep_ handler, is big endian, or false otherwise.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
- if (tep_is_bigendian())
- tep_set_local_bigendian(tep, TEP_BIG_ENDIAN);
- else
- tep_set_local_bigendian(tep, TEP_LITTLE_ENDIAN);
-...
- if (tep_is_local_bigendian(tep))
- printf("This machine you are running on is bigendian\n");
- else
- printf("This machine you are running on is little endian\n");
-
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-long_size.txt b/tools/lib/traceevent/Documentation/libtraceevent-long_size.txt
deleted file mode 100644
index 01d78ea2519a..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-long_size.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_get_long_size, tep_set_long_size - Get / set the size of a long integer on
-the machine, where the trace is generated, in bytes
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-int *tep_get_long_size*(strucqt tep_handle pass:[*]_tep_);
-void *tep_set_long_size*(struct tep_handle pass:[*]_tep_, int _long_size_);
---
-
-DESCRIPTION
------------
-The _tep_get_long_size()_ function returns the size of a long integer on the machine,
-where the trace is generated. The _tep_ argument is trace event parser context.
-
-The _tep_set_long_size()_ function sets the size of a long integer on the machine,
-where the trace is generated. The _tep_ argument is trace event parser context.
-The _long_size_ is the size of a long integer, in bytes.
-
-RETURN VALUE
-------------
-The _tep_get_long_size()_ function returns the size of a long integer on the machine,
-where the trace is generated, in bytes.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-tep_set_long_size(tep, 4);
-...
-int long_size = tep_get_long_size(tep);
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-page_size.txt b/tools/lib/traceevent/Documentation/libtraceevent-page_size.txt
deleted file mode 100644
index 452c0cfa1822..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-page_size.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_get_page_size, tep_set_page_size - Get / set the size of a memory page on
-the machine, where the trace is generated
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-int *tep_get_page_size*(struct tep_handle pass:[*]_tep_);
-void *tep_set_page_size*(struct tep_handle pass:[*]_tep_, int _page_size_);
---
-
-DESCRIPTION
------------
-The _tep_get_page_size()_ function returns the size of a memory page on
-the machine, where the trace is generated. The _tep_ argument is trace
-event parser context.
-
-The _tep_set_page_size()_ function stores in the _tep_ context the size of a
-memory page on the machine, where the trace is generated.
-The _tep_ argument is trace event parser context.
-The _page_size_ argument is the size of a memory page, in bytes.
-
-RETURN VALUE
-------------
-The _tep_get_page_size()_ function returns size of the memory page, in bytes.
-
-EXAMPLE
--------
-[source,c]
---
-#include <unistd.h>
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
- int page_size = getpagesize();
-
- tep_set_page_size(tep, page_size);
-
- printf("The page size for this machine is %d\n", tep_get_page_size(tep));
-
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt b/tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt
deleted file mode 100644
index f248114ca1ff..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-parse_event.txt
+++ /dev/null
@@ -1,90 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_parse_event, tep_parse_format - Parse the event format information
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-enum tep_errno *tep_parse_event*(struct tep_handle pass:[*]_tep_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
-enum tep_errno *tep_parse_format*(struct tep_handle pass:[*]_tep_, struct tep_event pass:[*]pass:[*]_eventp_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
---
-
-DESCRIPTION
------------
-The _tep_parse_event()_ function parses the event format and creates an event
-structure to quickly parse raw data for a given event. The _tep_ argument is
-the trace event parser context. The created event structure is stored in the
-_tep_ context. The _buf_ argument is a buffer with _size_, where the event
-format data is. The event format data can be taken from
-tracefs/events/.../.../format files. The _sys_ argument is the system of
-the event.
-
-The _tep_parse_format()_ function does the same as _tep_parse_event()_. The only
-difference is in the extra _eventp_ argument, where the newly created event
-structure is returned.
-
-RETURN VALUE
-------------
-Both _tep_parse_event()_ and _tep_parse_format()_ functions return 0 on success,
-or TEP_ERRNO__... in case of an error.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-char *buf;
-int size;
-struct tep_event *event = NULL;
-buf = read_file("/sys/kernel/tracing/events/ftrace/print/format", &size);
-if (tep_parse_event(tep, buf, size, "ftrace") != 0) {
- /* Failed to parse the ftrace print format */
-}
-
-if (tep_parse_format(tep, &event, buf, size, "ftrace") != 0) {
- /* Failed to parse the ftrace print format */
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt b/tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt
deleted file mode 100644
index c90f16c7d8e6..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-parse_head.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_parse_header_page - Parses the data stored in the header page.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-int *tep_parse_header_page*(struct tep_handle pass:[*]_tep_, char pass:[*]_buf_, unsigned long _size_, int _long_size_);
---
-
-DESCRIPTION
------------
-The _tep_parse_header_page()_ function parses the header page data from _buf_,
-and initializes the _tep_, trace event parser context, with it. The buffer
-_buf_ is with _size_, and is supposed to be copied from
-tracefs/events/header_page.
-
-Some old kernels do not have header page info, in this case the
-_tep_parse_header_page()_ function can be called with _size_ equal to 0. The
-_tep_ context is initialized with default values. The _long_size_ can be used in
-this use case, to set the size of a long integer to be used.
-
-RETURN VALUE
-------------
-The _tep_parse_header_page()_ function returns 0 in case of success, or -1
-in case of an error.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-char *buf;
-int size;
-buf = read_file("/sys/kernel/tracing/events/header_page", &size);
-if (tep_parse_header_page(tep, buf, size, sizeof(unsigned long)) != 0) {
- /* Failed to parse the header page */
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-plugins.txt b/tools/lib/traceevent/Documentation/libtraceevent-plugins.txt
deleted file mode 100644
index 596032ade31f..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-plugins.txt
+++ /dev/null
@@ -1,99 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_load_plugins, tep_unload_plugins - Load / unload traceevent plugins.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-struct tep_plugin_list pass:[*]*tep_load_plugins*(struct tep_handle pass:[*]_tep_);
-void *tep_unload_plugins*(struct tep_plugin_list pass:[*]_plugin_list_, struct tep_handle pass:[*]_tep_);
---
-
-DESCRIPTION
------------
-The _tep_load_plugins()_ function loads all plugins, located in the plugin
-directories. The _tep_ argument is trace event parser context.
-The plugin directories are :
-[verse]
---
- - System's plugin directory, defined at the library compile time. It
- depends on the library installation prefix and usually is
- _(install_preffix)/lib/traceevent/plugins_
- - Directory, defined by the environment variable _TRACEEVENT_PLUGIN_DIR_
- - User's plugin directory, located at _~/.local/lib/traceevent/plugins_
---
-Loading of plugins can be controlled by the _tep_flags_, using the
-_tep_set_flag()_ API:
-[verse]
---
- _TEP_DISABLE_SYS_PLUGINS_ - do not load plugins, located in
- the system's plugin directory.
- _TEP_DISABLE_PLUGINS_ - do not load any plugins.
---
-The _tep_set_flag()_ API needs to be called before _tep_load_plugins()_, if
-loading of all plugins is not the desired case.
-
-The _tep_unload_plugins()_ function unloads the plugins, previously loaded by
-_tep_load_plugins()_. The _tep_ argument is trace event parser context. The
-_plugin_list_ is the list of loaded plugins, returned by
-the _tep_load_plugins()_ function.
-
-RETURN VALUE
-------------
-The _tep_load_plugins()_ function returns a list of successfully loaded plugins,
-or NULL in case no plugins are loaded.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-struct tep_plugin_list *plugins = tep_load_plugins(tep);
-if (plugins == NULL) {
- /* no plugins are loaded */
-}
-...
-tep_unload_plugins(plugins, tep);
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_, _tep_set_flag(3)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt b/tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt
deleted file mode 100644
index e9a69116c78b..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-record_parse.txt
+++ /dev/null
@@ -1,137 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_data_type, tep_data_pid,tep_data_preempt_count, tep_data_flags -
-Extract common fields from a record.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-enum *trace_flag_type* {
- _TRACE_FLAG_IRQS_OFF_,
- _TRACE_FLAG_IRQS_NOSUPPORT_,
- _TRACE_FLAG_NEED_RESCHED_,
- _TRACE_FLAG_HARDIRQ_,
- _TRACE_FLAG_SOFTIRQ_,
-};
-
-int *tep_data_type*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
-int *tep_data_pid*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
-int *tep_data_preempt_count*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
-int *tep_data_flags*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
---
-
-DESCRIPTION
------------
-This set of functions can be used to extract common fields from a record.
-
-The _tep_data_type()_ function gets the event id from the record _rec_.
-It reads the "common_type" field. The _tep_ argument is the trace event parser
-context.
-
-The _tep_data_pid()_ function gets the process id from the record _rec_.
-It reads the "common_pid" field. The _tep_ argument is the trace event parser
-context.
-
-The _tep_data_preempt_count()_ function gets the preemption count from the
-record _rec_. It reads the "common_preempt_count" field. The _tep_ argument is
-the trace event parser context.
-
-The _tep_data_flags()_ function gets the latency flags from the record _rec_.
-It reads the "common_flags" field. The _tep_ argument is the trace event parser
-context. Supported latency flags are:
-[verse]
---
- _TRACE_FLAG_IRQS_OFF_, Interrupts are disabled.
- _TRACE_FLAG_IRQS_NOSUPPORT_, Reading IRQ flag is not supported by the architecture.
- _TRACE_FLAG_NEED_RESCHED_, Task needs rescheduling.
- _TRACE_FLAG_HARDIRQ_, Hard IRQ is running.
- _TRACE_FLAG_SOFTIRQ_, Soft IRQ is running.
---
-
-RETURN VALUE
-------------
-The _tep_data_type()_ function returns an integer, representing the event id.
-
-The _tep_data_pid()_ function returns an integer, representing the process id
-
-The _tep_data_preempt_count()_ function returns an integer, representing the
-preemption count.
-
-The _tep_data_flags()_ function returns an integer, representing the latency
-flags. Look at the _trace_flag_type_ enum for supported flags.
-
-All these functions in case of an error return a negative integer.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-void process_record(struct tep_record *record)
-{
- int data;
-
- data = tep_data_type(tep, record);
- if (data >= 0) {
- /* Got the ID of the event */
- }
-
- data = tep_data_pid(tep, record);
- if (data >= 0) {
- /* Got the process ID */
- }
-
- data = tep_data_preempt_count(tep, record);
- if (data >= 0) {
- /* Got the preemption count */
- }
-
- data = tep_data_flags(tep, record);
- if (data >= 0) {
- /* Got the latency flags */
- }
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt b/tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt
deleted file mode 100644
index 53d37d72a1c1..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-reg_event_handler.txt
+++ /dev/null
@@ -1,156 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_register_event_handler, tep_unregister_event_handler - Register /
-unregisters a callback function to parse an event information.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-enum *tep_reg_handler* {
- _TEP_REGISTER_SUCCESS_,
- _TEP_REGISTER_SUCCESS_OVERWRITE_,
-};
-
-int *tep_register_event_handler*(struct tep_handle pass:[*]_tep_, int _id_, const char pass:[*]_sys_name_, const char pass:[*]_event_name_, tep_event_handler_func _func_, void pass:[*]_context_);
-int *tep_unregister_event_handler*(struct tep_handle pass:[*]tep, int id, const char pass:[*]sys_name, const char pass:[*]event_name, tep_event_handler_func func, void pass:[*]_context_);
-
-typedef int (*pass:[*]tep_event_handler_func*)(struct trace_seq pass:[*]s, struct tep_record pass:[*]record, struct tep_event pass:[*]event, void pass:[*]context);
---
-
-DESCRIPTION
------------
-The _tep_register_event_handler()_ function registers a handler function,
-which is going to be called to parse the information for a given event.
-The _tep_ argument is the trace event parser context. The _id_ argument is
-the id of the event. The _sys_name_ argument is the name of the system,
-the event belongs to. The _event_name_ argument is the name of the event.
-If _id_ is >= 0, it is used to find the event, otherwise _sys_name_ and
-_event_name_ are used. The _func_ is a pointer to the function, which is going
-to be called to parse the event information. The _context_ argument is a pointer
-to the context data, which will be passed to the _func_. If a handler function
-for the same event is already registered, it will be overridden with the new
-one. This mechanism allows a developer to override the parsing of a given event.
-If for some reason the default print format is not sufficient, the developer
-can register a function for an event to be used to parse the data instead.
-
-The _tep_unregister_event_handler()_ function unregisters the handler function,
-previously registered with _tep_register_event_handler()_. The _tep_ argument
-is the trace event parser context. The _id_, _sys_name_, _event_name_, _func_,
-and _context_ are the same arguments, as when the callback function _func_ was
-registered.
-
-The _tep_event_handler_func_ is the type of the custom event handler
-function. The _s_ argument is the trace sequence, it can be used to create a
-custom string, describing the event. A _record_ to get the event from is passed
-as input parameter and also the _event_ - the handle to the record's event. The
-_context_ is custom context, set when the custom event handler is registered.
-
-RETURN VALUE
-------------
-The _tep_register_event_handler()_ function returns _TEP_REGISTER_SUCCESS_
-if the new handler is registered successfully or
-_TEP_REGISTER_SUCCESS_OVERWRITE_ if an existing handler is overwritten.
-If there is not enough memory to complete the registration,
-TEP_ERRNO__MEM_ALLOC_FAILED is returned.
-
-The _tep_unregister_event_handler()_ function returns 0 if _func_ was removed
-successful or, -1 if the event was not found.
-
-The _tep_event_handler_func_ should return -1 in case of an error,
-or 0 otherwise.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-#include <trace-seq.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-int timer_expire_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event *event, void *context)
-{
- trace_seq_printf(s, "hrtimer=");
-
- if (tep_print_num_field(s, "0x%llx", event, "timer", record, 0) == -1)
- tep_print_num_field(s, "0x%llx", event, "hrtimer", record, 1);
-
- trace_seq_printf(s, " now=");
-
- tep_print_num_field(s, "%llu", event, "now", record, 1);
-
- tep_print_func_field(s, " function=%s", event, "function", record, 0);
-
- return 0;
-}
-...
- int ret;
-
- ret = tep_register_event_handler(tep, -1, "timer", "hrtimer_expire_entry",
- timer_expire_handler, NULL);
- if (ret < 0) {
- char buf[32];
-
- tep_strerror(tep, ret, buf, 32)
- printf("Failed to register handler for hrtimer_expire_entry: %s\n", buf);
- } else {
- switch (ret) {
- case TEP_REGISTER_SUCCESS:
- printf ("Registered handler for hrtimer_expire_entry\n");
- break;
- case TEP_REGISTER_SUCCESS_OVERWRITE:
- printf ("Overwrote handler for hrtimer_expire_entry\n");
- break;
- }
- }
-...
- ret = tep_unregister_event_handler(tep, -1, "timer", "hrtimer_expire_entry",
- timer_expire_handler, NULL);
- if ( ret )
- printf ("Failed to unregister handler for hrtimer_expire_entry\n");
-
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*trace-seq.h*
- Header file to include in order to have access to trace sequences
- related APIs. Trace sequences are used to allow a function to call
- several other functions to create a string of data to use.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt b/tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt
deleted file mode 100644
index 708dce91ebd8..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-reg_print_func.txt
+++ /dev/null
@@ -1,155 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_register_print_function,tep_unregister_print_function -
-Registers / Unregisters a helper function.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-enum *tep_func_arg_type* {
- TEP_FUNC_ARG_VOID,
- TEP_FUNC_ARG_INT,
- TEP_FUNC_ARG_LONG,
- TEP_FUNC_ARG_STRING,
- TEP_FUNC_ARG_PTR,
- TEP_FUNC_ARG_MAX_TYPES
-};
-
-typedef unsigned long long (*pass:[*]tep_func_handler*)(struct trace_seq pass:[*]s, unsigned long long pass:[*]args);
-
-int *tep_register_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, enum tep_func_arg_type _ret_type_, char pass:[*]_name_, _..._);
-int *tep_unregister_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, char pass:[*]_name_);
---
-
-DESCRIPTION
------------
-Some events may have helper functions in the print format arguments.
-This allows a plugin to dynamically create a way to process one of
-these functions.
-
-The _tep_register_print_function()_ registers such helper function. The _tep_
-argument is the trace event parser context. The _func_ argument is a pointer
-to the helper function. The _ret_type_ argument is the return type of the
-helper function, value from the _tep_func_arg_type_ enum. The _name_ is the name
-of the helper function, as seen in the print format arguments. The _..._ is a
-variable list of _tep_func_arg_type_ enums, the _func_ function arguments.
-This list must end with _TEP_FUNC_ARG_VOID_. See 'EXAMPLE' section.
-
-The _tep_unregister_print_function()_ unregisters a helper function, previously
-registered with _tep_register_print_function()_. The _tep_ argument is the
-trace event parser context. The _func_ and _name_ arguments are the same, used
-when the helper function was registered.
-
-The _tep_func_handler_ is the type of the helper function. The _s_ argument is
-the trace sequence, it can be used to create a custom string.
-The _args_ is a list of arguments, defined when the helper function was
-registered.
-
-RETURN VALUE
-------------
-The _tep_register_print_function()_ function returns 0 in case of success.
-In case of an error, TEP_ERRNO_... code is returned.
-
-The _tep_unregister_print_function()_ returns 0 in case of success, or -1 in
-case of an error.
-
-EXAMPLE
--------
-Some events have internal functions calls, that appear in the print format
-output. For example "tracefs/events/i915/g4x_wm/format" has:
-[source,c]
---
-print fmt: "pipe %c, frame=%u, scanline=%u, wm %d/%d/%d, sr %s/%d/%d/%d, hpll %s/%d/%d/%d, fbc %s",
- ((REC->pipe) + 'A'), REC->frame, REC->scanline, REC->primary,
- REC->sprite, REC->cursor, yesno(REC->cxsr), REC->sr_plane,
- REC->sr_cursor, REC->sr_fbc, yesno(REC->hpll), REC->hpll_plane,
- REC->hpll_cursor, REC->hpll_fbc, yesno(REC->fbc)
---
-Notice the call to function _yesno()_ in the print arguments. In the kernel
-context, this function has the following implementation:
-[source,c]
---
-static const char *yesno(int x)
-{
- static const char *yes = "yes";
- static const char *no = "no";
-
- return x ? yes : no;
-}
---
-The user space event parser has no idea how to handle this _yesno()_ function.
-The _tep_register_print_function()_ API can be used to register a user space
-helper function, mapped to the kernel's _yesno()_:
-[source,c]
---
-#include <event-parse.h>
-#include <trace-seq.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-static const char *yes_no_helper(int x)
-{
- return x ? "yes" : "no";
-}
-...
- if ( tep_register_print_function(tep,
- yes_no_helper,
- TEP_FUNC_ARG_STRING,
- "yesno",
- TEP_FUNC_ARG_INT,
- TEP_FUNC_ARG_VOID) != 0) {
- /* Failed to register yes_no_helper function */
- }
-
-/*
- Now, when the event parser encounters this yesno() function, it will know
- how to handle it.
-*/
-...
- if (tep_unregister_print_function(tep, yes_no_helper, "yesno") != 0) {
- /* Failed to unregister yes_no_helper function */
- }
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*trace-seq.h*
- Header file to include in order to have access to trace sequences
- related APIs. Trace sequences are used to allow a function to call
- several other functions to create a string of data to use.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt b/tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt
deleted file mode 100644
index b0599780b9a6..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-set_flag.txt
+++ /dev/null
@@ -1,104 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_set_flag, tep_clear_flag, tep_test_flag -
-Manage flags of trace event parser context.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-enum *tep_flag* {
- _TEP_NSEC_OUTPUT_,
- _TEP_DISABLE_SYS_PLUGINS_,
- _TEP_DISABLE_PLUGINS_
-};
-void *tep_set_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
-void *tep_clear_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
-bool *tep_test_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
---
-
-DESCRIPTION
------------
-Trace event parser context flags are defined in *enum tep_flag*:
-[verse]
---
-_TEP_NSEC_OUTPUT_ - print event's timestamp in nano seconds, instead of micro seconds.
-_TEP_DISABLE_SYS_PLUGINS_ - disable plugins, located in system's plugin
- directory. This directory is defined at library compile
- time, and usually depends on library installation
- prefix: (install_preffix)/lib/traceevent/plugins
-_TEP_DISABLE_PLUGINS_ - disable all library plugins:
- - in system's plugin directory
- - in directory, defined by the environment variable _TRACEEVENT_PLUGIN_DIR_
- - in user's home directory, _~/.traceevent/plugins_
---
-Note: plugin related flags must me set before calling _tep_load_plugins()_ API.
-
-The _tep_set_flag()_ function sets _flag_ to _tep_ context.
-
-The _tep_clear_flag()_ function clears _flag_ from _tep_ context.
-
-The _tep_test_flag()_ function tests if _flag_ is set to _tep_ context.
-
-RETURN VALUE
-------------
-_tep_test_flag()_ function returns true if _flag_ is set, false otherwise.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-/* Print timestamps in nanoseconds */
-tep_set_flag(tep, TEP_NSEC_OUTPUT);
-...
-if (tep_test_flag(tep, TEP_NSEC_OUTPUT)) {
- /* print timestamps in nanoseconds */
-} else {
- /* print timestamps in microseconds */
-}
-...
-/* Print timestamps in microseconds */
-tep_clear_flag(tep, TEP_NSEC_OUTPUT);
-...
---
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-strerror.txt b/tools/lib/traceevent/Documentation/libtraceevent-strerror.txt
deleted file mode 100644
index ee4062a00c9f..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-strerror.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-tep_strerror - Returns a string describing regular errno and tep error number.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-int *tep_strerror*(struct tep_handle pass:[*]_tep_, enum tep_errno _errnum_, char pass:[*]_buf_, size_t _buflen_);
-
---
-DESCRIPTION
------------
-The _tep_strerror()_ function converts tep error number into a human
-readable string.
-The _tep_ argument is trace event parser context. The _errnum_ is a regular
-errno, defined in errno.h, or a tep error number. The string, describing this
-error number is copied in the _buf_ argument. The _buflen_ argument is
-the size of the _buf_.
-
-It as a thread safe wrapper around strerror_r(). The library function has two
-different behaviors - POSIX and GNU specific. The _tep_strerror()_ API always
-behaves as the POSIX version - the error string is copied in the user supplied
-buffer.
-
-RETURN VALUE
-------------
-The _tep_strerror()_ function returns 0, if a valid _errnum_ is passed and the
-string is copied into _buf_. If _errnum_ is not a valid error number,
--1 is returned and _buf_ is not modified.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-...
-struct tep_handle *tep = tep_alloc();
-...
-char buf[32];
-char *pool = calloc(1, 128);
-if (tep == NULL) {
- tep_strerror(tep, TEP_ERRNO__MEM_ALLOC_FAILED, buf, 32);
- printf ("The pool is not initialized, %s", buf);
-}
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent-tseq.txt b/tools/lib/traceevent/Documentation/libtraceevent-tseq.txt
deleted file mode 100644
index 8ac6aa174e12..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent-tseq.txt
+++ /dev/null
@@ -1,158 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-trace_seq_init, trace_seq_destroy, trace_seq_reset, trace_seq_terminate,
-trace_seq_putc, trace_seq_puts, trace_seq_printf, trace_seq_vprintf,
-trace_seq_do_fprintf, trace_seq_do_printf -
-Initialize / destroy a trace sequence.
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-*#include <trace-seq.h>*
-
-void *trace_seq_init*(struct trace_seq pass:[*]_s_);
-void *trace_seq_destroy*(struct trace_seq pass:[*]_s_);
-void *trace_seq_reset*(struct trace_seq pass:[*]_s_);
-void *trace_seq_terminate*(struct trace_seq pass:[*]_s_);
-int *trace_seq_putc*(struct trace_seq pass:[*]_s_, unsigned char _c_);
-int *trace_seq_puts*(struct trace_seq pass:[*]_s_, const char pass:[*]_str_);
-int *trace_seq_printf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, _..._);
-int *trace_seq_vprintf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, va_list _args_);
-int *trace_seq_do_printf*(struct trace_seq pass:[*]_s_);
-int *trace_seq_do_fprintf*(struct trace_seq pass:[*]_s_, FILE pass:[*]_fp_);
---
-
-DESCRIPTION
------------
-Trace sequences are used to allow a function to call several other functions
-to create a string of data to use.
-
-The _trace_seq_init()_ function initializes the trace sequence _s_.
-
-The _trace_seq_destroy()_ function destroys the trace sequence _s_ and frees
-all its resources that it had used.
-
-The _trace_seq_reset()_ function re-initializes the trace sequence _s_. All
-characters already written in _s_ will be deleted.
-
-The _trace_seq_terminate()_ function terminates the trace sequence _s_. It puts
-the null character pass:['\0'] at the end of the buffer.
-
-The _trace_seq_putc()_ function puts a single character _c_ in the trace
-sequence _s_.
-
-The _trace_seq_puts()_ function puts a NULL terminated string _str_ in the
-trace sequence _s_.
-
-The _trace_seq_printf()_ function puts a formated string _fmt _with
-variable arguments _..._ in the trace sequence _s_.
-
-The _trace_seq_vprintf()_ function puts a formated string _fmt _with
-list of arguments _args_ in the trace sequence _s_.
-
-The _trace_seq_do_printf()_ function prints the buffer of trace sequence _s_ to
-the standard output stdout.
-
-The _trace_seq_do_fprintf()_ function prints the buffer of trace sequence _s_
-to the given file _fp_.
-
-RETURN VALUE
-------------
-Both _trace_seq_putc()_ and _trace_seq_puts()_ functions return the number of
-characters put in the trace sequence, or 0 in case of an error
-
-Both _trace_seq_printf()_ and _trace_seq_vprintf()_ functions return 0 if the
-trace oversizes the buffer's free space, the number of characters printed, or
-a negative value in case of an error.
-
-Both _trace_seq_do_printf()_ and _trace_seq_do_fprintf()_ functions return the
-number of printed characters, or -1 in case of an error.
-
-EXAMPLE
--------
-[source,c]
---
-#include <event-parse.h>
-#include <trace-seq.h>
-...
-struct trace_seq seq;
-trace_seq_init(&seq);
-...
-void foo_seq_print(struct trace_seq *tseq, char *format, ...)
-{
- va_list ap;
- va_start(ap, format);
- if (trace_seq_vprintf(tseq, format, ap) <= 0) {
- /* Failed to print in the trace sequence */
- }
- va_end(ap);
-}
-
-trace_seq_reset(&seq);
-
-char *str = " MAN page example";
-if (trace_seq_puts(&seq, str) != strlen(str)) {
- /* Failed to put str in the trace sequence */
-}
-if (trace_seq_putc(&seq, ':') != 1) {
- /* Failed to put ':' in the trace sequence */
-}
-if (trace_seq_printf(&seq, " trace sequence: %d", 1) <= 0) {
- /* Failed to print in the trace sequence */
-}
-foo_seq_print( &seq, " %d\n", 2);
-
-trace_seq_terminate(&seq);
-...
-
-if (trace_seq_do_printf(&seq) < 0 ) {
- /* Failed to print the sequence buffer to the standard output */
-}
-FILE *fp = fopen("trace.txt", "w");
-if (trace_seq_do_fprintf(&seq, fp) < 0 ) [
- /* Failed to print the sequence buffer to the trace.txt file */
-}
-
-trace_seq_destroy(&seq);
-...
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*trace-seq.h*
- Header file to include in order to have access to trace sequences related APIs.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_libtraceevent(3)_, _trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/libtraceevent.txt b/tools/lib/traceevent/Documentation/libtraceevent.txt
deleted file mode 100644
index d530a7ce8fb2..000000000000
--- a/tools/lib/traceevent/Documentation/libtraceevent.txt
+++ /dev/null
@@ -1,192 +0,0 @@
-libtraceevent(3)
-================
-
-NAME
-----
-libtraceevent - Linux kernel trace event library
-
-SYNOPSIS
---------
-[verse]
---
-*#include <event-parse.h>*
-
-Management of tep handler data structure and access of its members:
- struct tep_handle pass:[*]*tep_alloc*(void);
- void *tep_free*(struct tep_handle pass:[*]_tep_);
- void *tep_ref*(struct tep_handle pass:[*]_tep_);
- void *tep_unref*(struct tep_handle pass:[*]_tep_);
- int *tep_get_ref*(struct tep_handle pass:[*]_tep_);
- void *tep_set_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
- void *tep_clear_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flag_);
- bool *tep_test_flag*(struct tep_handle pass:[*]_tep_, enum tep_flag _flags_);
- int *tep_get_cpus*(struct tep_handle pass:[*]_tep_);
- void *tep_set_cpus*(struct tep_handle pass:[*]_tep_, int _cpus_);
- int *tep_get_long_size*(strucqt tep_handle pass:[*]_tep_);
- void *tep_set_long_size*(struct tep_handle pass:[*]_tep_, int _long_size_);
- int *tep_get_page_size*(struct tep_handle pass:[*]_tep_);
- void *tep_set_page_size*(struct tep_handle pass:[*]_tep_, int _page_size_);
- int *tep_get_header_page_size*(struct tep_handle pass:[*]_tep_);
- int *tep_get_header_timestamp_size*(struct tep_handle pass:[*]_tep_);
- bool *tep_is_old_format*(struct tep_handle pass:[*]_tep_);
- int *tep_strerror*(struct tep_handle pass:[*]_tep_, enum tep_errno _errnum_, char pass:[*]_buf_, size_t _buflen_);
-
-Register / unregister APIs:
- int *tep_register_function*(struct tep_handle pass:[*]_tep_, char pass:[*]_name_, unsigned long long _addr_, char pass:[*]_mod_);
- int *tep_register_event_handler*(struct tep_handle pass:[*]_tep_, int _id_, const char pass:[*]_sys_name_, const char pass:[*]_event_name_, tep_event_handler_func _func_, void pass:[*]_context_);
- int *tep_unregister_event_handler*(struct tep_handle pass:[*]tep, int id, const char pass:[*]sys_name, const char pass:[*]event_name, tep_event_handler_func func, void pass:[*]_context_);
- int *tep_register_print_string*(struct tep_handle pass:[*]_tep_, const char pass:[*]_fmt_, unsigned long long _addr_);
- int *tep_register_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, enum tep_func_arg_type _ret_type_, char pass:[*]_name_, _..._);
- int *tep_unregister_print_function*(struct tep_handle pass:[*]_tep_, tep_func_handler _func_, char pass:[*]_name_);
-
-Plugins management:
- struct tep_plugin_list pass:[*]*tep_load_plugins*(struct tep_handle pass:[*]_tep_);
- void *tep_unload_plugins*(struct tep_plugin_list pass:[*]_plugin_list_, struct tep_handle pass:[*]_tep_);
- char pass:[*]pass:[*]*tep_plugin_list_options*(void);
- void *tep_plugin_free_options_list*(char pass:[*]pass:[*]_list_);
- int *tep_plugin_add_options*(const char pass:[*]_name_, struct tep_plugin_option pass:[*]_options_);
- void *tep_plugin_remove_options*(struct tep_plugin_option pass:[*]_options_);
- void *tep_print_plugins*(struct trace_seq pass:[*]_s_, const char pass:[*]_prefix_, const char pass:[*]_suffix_, const struct tep_plugin_list pass:[*]_list_);
-
-Event related APIs:
- struct tep_event pass:[*]*tep_get_event*(struct tep_handle pass:[*]_tep_, int _index_);
- struct tep_event pass:[*]*tep_get_first_event*(struct tep_handle pass:[*]_tep_);
- int *tep_get_events_count*(struct tep_handle pass:[*]_tep_);
- struct tep_event pass:[*]pass:[*]*tep_list_events*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
- struct tep_event pass:[*]pass:[*]*tep_list_events_copy*(struct tep_handle pass:[*]_tep_, enum tep_event_sort_type _sort_type_);
- void *tep_print_event*(struct tep_handle pass:[*]_tep_, struct trace_seq pass:[*]_s_, struct tep_record pass:[*]_record_, const char pass:[*]_fmt_, _..._);
-
-Event finding:
- struct tep_event pass:[*]*tep_find_event*(struct tep_handle pass:[*]_tep_, int _id_);
- struct tep_event pass:[*]*tep_find_event_by_name*(struct tep_handle pass:[*]_tep_, const char pass:[*]_sys_, const char pass:[*]_name_);
- struct tep_event pass:[*]*tep_find_event_by_record*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_record_);
-
-Parsing of event files:
- int *tep_parse_header_page*(struct tep_handle pass:[*]_tep_, char pass:[*]_buf_, unsigned long _size_, int _long_size_);
- enum tep_errno *tep_parse_event*(struct tep_handle pass:[*]_tep_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
- enum tep_errno *tep_parse_format*(struct tep_handle pass:[*]_tep_, struct tep_event pass:[*]pass:[*]_eventp_, const char pass:[*]_buf_, unsigned long _size_, const char pass:[*]_sys_);
-
-APIs related to fields from event's format files:
- struct tep_format_field pass:[*]pass:[*]*tep_event_common_fields*(struct tep_event pass:[*]_event_);
- struct tep_format_field pass:[*]pass:[*]*tep_event_fields*(struct tep_event pass:[*]_event_);
- void pass:[*]*tep_get_field_raw*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int pass:[*]_len_, int _err_);
- int *tep_get_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
- int *tep_get_common_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
- int *tep_get_any_field_val*(struct trace_seq pass:[*]_s_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, unsigned long long pass:[*]_val_, int _err_);
- int *tep_read_number_field*(struct tep_format_field pass:[*]_field_, const void pass:[*]_data_, unsigned long long pass:[*]_value_);
-
-Event fields printing:
- void *tep_print_field*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, struct tep_format_field pass:[*]_field_);
- void *tep_print_fields*(struct trace_seq pass:[*]_s_, void pass:[*]_data_, int _size_, struct tep_event pass:[*]_event_);
- int *tep_print_num_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
- int *tep_print_func_field*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, struct tep_event pass:[*]_event_, const char pass:[*]_name_, struct tep_record pass:[*]_record_, int _err_);
-
-Event fields finding:
- struct tep_format_field pass:[*]*tep_find_common_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
- struct tep_format_field pass:[*]*tep_find_field*(struct tep_event_ormat pass:[*]_event_, const char pass:[*]_name_);
- struct tep_format_field pass:[*]*tep_find_any_field*(struct tep_event pass:[*]_event_, const char pass:[*]_name_);
-
-Functions resolver:
- int *tep_set_function_resolver*(struct tep_handle pass:[*]_tep_, tep_func_resolver_t pass:[*]_func_, void pass:[*]_priv_);
- void *tep_reset_function_resolver*(struct tep_handle pass:[*]_tep_);
- const char pass:[*]*tep_find_function*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
- unsigned long long *tep_find_function_address*(struct tep_handle pass:[*]_tep_, unsigned long long _addr_);
-
-Filter management:
- struct tep_event_filter pass:[*]*tep_filter_alloc*(struct tep_handle pass:[*]_tep_);
- enum tep_errno *tep_filter_add_filter_str*(struct tep_event_filter pass:[*]_filter_, const char pass:[*]_filter_str_);
- enum tep_errno *tep_filter_match*(struct tep_event_filter pass:[*]_filter_, struct tep_record pass:[*]_record_);
- int *tep_filter_strerror*(struct tep_event_filter pass:[*]_filter_, enum tep_errno _err_, char pass:[*]buf, size_t _buflen_);
- int *tep_event_filtered*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
- void *tep_filter_reset*(struct tep_event_filter pass:[*]_filter_);
- void *tep_filter_free*(struct tep_event_filter pass:[*]_filter_);
- char pass:[*]*tep_filter_make_string*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
- int *tep_filter_remove_event*(struct tep_event_filter pass:[*]_filter_, int _event_id_);
- int *tep_filter_copy*(struct tep_event_filter pass:[*]_dest_, struct tep_event_filter pass:[*]_source_);
- int *tep_filter_compare*(struct tep_event_filter pass:[*]_filter1_, struct tep_event_filter pass:[*]_filter2_);
-
-Parsing various data from the records:
- int *tep_data_type*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
- int *tep_data_pid*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
- int *tep_data_preempt_count*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
- int *tep_data_flags*(struct tep_handle pass:[*]_tep_, struct tep_record pass:[*]_rec_);
-
-Command and task related APIs:
- const char pass:[*]*tep_data_comm_from_pid*(struct tep_handle pass:[*]_tep_, int _pid_);
- struct cmdline pass:[*]*tep_data_pid_from_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, struct cmdline pass:[*]_next_);
- int *tep_register_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
- int *tep_override_comm*(struct tep_handle pass:[*]_tep_, const char pass:[*]_comm_, int _pid_);
- bool *tep_is_pid_registered*(struct tep_handle pass:[*]_tep_, int _pid_);
- int *tep_cmdline_pid*(struct tep_handle pass:[*]_tep_, struct cmdline pass:[*]_cmdline_);
-
-Endian related APIs:
- int *tep_is_bigendian*(void);
- unsigned long long *tep_read_number*(struct tep_handle pass:[*]_tep_, const void pass:[*]_ptr_, int _size_);
- bool *tep_is_file_bigendian*(struct tep_handle pass:[*]_tep_);
- void *tep_set_file_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
- bool *tep_is_local_bigendian*(struct tep_handle pass:[*]_tep_);
- void *tep_set_local_bigendian*(struct tep_handle pass:[*]_tep_, enum tep_endian _endian_);
-
-Trace sequences:
-*#include <trace-seq.h>*
- void *trace_seq_init*(struct trace_seq pass:[*]_s_);
- void *trace_seq_reset*(struct trace_seq pass:[*]_s_);
- void *trace_seq_destroy*(struct trace_seq pass:[*]_s_);
- int *trace_seq_printf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, ...);
- int *trace_seq_vprintf*(struct trace_seq pass:[*]_s_, const char pass:[*]_fmt_, va_list _args_);
- int *trace_seq_puts*(struct trace_seq pass:[*]_s_, const char pass:[*]_str_);
- int *trace_seq_putc*(struct trace_seq pass:[*]_s_, unsigned char _c_);
- void *trace_seq_terminate*(struct trace_seq pass:[*]_s_);
- int *trace_seq_do_fprintf*(struct trace_seq pass:[*]_s_, FILE pass:[*]_fp_);
- int *trace_seq_do_printf*(struct trace_seq pass:[*]_s_);
---
-
-DESCRIPTION
------------
-The libtraceevent(3) library provides APIs to access kernel tracepoint events,
-located in the tracefs file system under the events directory.
-
-ENVIRONMENT
------------
-[verse]
---
-TRACEEVENT_PLUGIN_DIR
- Additional plugin directory. All shared object files, located in this directory will be loaded as traceevent plugins.
---
-
-FILES
------
-[verse]
---
-*event-parse.h*
- Header file to include in order to have access to the library APIs.
-*trace-seq.h*
- Header file to include in order to have access to trace sequences related APIs.
- Trace sequences are used to allow a function to call several other functions
- to create a string of data to use.
-*-ltraceevent*
- Linker switch to add when building a program that uses the library.
---
-
-SEE ALSO
---------
-_trace-cmd(1)_
-
-AUTHOR
-------
-[verse]
---
-*Steven Rostedt* <rostedt@goodmis.org>, author of *libtraceevent*.
-*Tzvetomir Stoyanov* <tz.stoyanov@gmail.com>, author of this man page.
---
-REPORTING BUGS
---------------
-Report bugs to <linux-trace-devel@vger.kernel.org>
-
-LICENSE
--------
-libtraceevent is Free Software licensed under the GNU LGPL 2.1
-
-RESOURCES
----------
-https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
diff --git a/tools/lib/traceevent/Documentation/manpage-1.72.xsl b/tools/lib/traceevent/Documentation/manpage-1.72.xsl
deleted file mode 100644
index b4d315cb8c47..000000000000
--- a/tools/lib/traceevent/Documentation/manpage-1.72.xsl
+++ /dev/null
@@ -1,14 +0,0 @@
-<!-- manpage-1.72.xsl:
- special settings for manpages rendered from asciidoc+docbook
- handles peculiarities in docbook-xsl 1.72.0 -->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0">
-
-<xsl:import href="manpage-base.xsl"/>
-
-<!-- these are the special values for the roff control characters
- needed for docbook-xsl 1.72.0 -->
-<xsl:param name="git.docbook.backslash">&#x2593;</xsl:param>
-<xsl:param name="git.docbook.dot" >&#x2302;</xsl:param>
-
-</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-base.xsl b/tools/lib/traceevent/Documentation/manpage-base.xsl
deleted file mode 100644
index a264fa616093..000000000000
--- a/tools/lib/traceevent/Documentation/manpage-base.xsl
+++ /dev/null
@@ -1,35 +0,0 @@
-<!-- manpage-base.xsl:
- special formatting for manpages rendered from asciidoc+docbook -->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0">
-
-<!-- these params silence some output from xmlto -->
-<xsl:param name="man.output.quietly" select="1"/>
-<xsl:param name="refentry.meta.get.quietly" select="1"/>
-
-<!-- convert asciidoc callouts to man page format;
- git.docbook.backslash and git.docbook.dot params
- must be supplied by another XSL file or other means -->
-<xsl:template match="co">
- <xsl:value-of select="concat(
- $git.docbook.backslash,'fB(',
- substring-after(@id,'-'),')',
- $git.docbook.backslash,'fR')"/>
-</xsl:template>
-<xsl:template match="calloutlist">
- <xsl:value-of select="$git.docbook.dot"/>
- <xsl:text>sp&#10;</xsl:text>
- <xsl:apply-templates/>
- <xsl:text>&#10;</xsl:text>
-</xsl:template>
-<xsl:template match="callout">
- <xsl:value-of select="concat(
- $git.docbook.backslash,'fB',
- substring-after(@arearefs,'-'),
- '. ',$git.docbook.backslash,'fR')"/>
- <xsl:apply-templates/>
- <xsl:value-of select="$git.docbook.dot"/>
- <xsl:text>br&#10;</xsl:text>
-</xsl:template>
-
-</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-bold-literal.xsl b/tools/lib/traceevent/Documentation/manpage-bold-literal.xsl
deleted file mode 100644
index 608eb5df6281..000000000000
--- a/tools/lib/traceevent/Documentation/manpage-bold-literal.xsl
+++ /dev/null
@@ -1,17 +0,0 @@
-<!-- manpage-bold-literal.xsl:
- special formatting for manpages rendered from asciidoc+docbook -->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0">
-
-<!-- render literal text as bold (instead of plain or monospace);
- this makes literal text easier to distinguish in manpages
- viewed on a tty -->
-<xsl:template match="literal">
- <xsl:value-of select="$git.docbook.backslash"/>
- <xsl:text>fB</xsl:text>
- <xsl:apply-templates/>
- <xsl:value-of select="$git.docbook.backslash"/>
- <xsl:text>fR</xsl:text>
-</xsl:template>
-
-</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-normal.xsl b/tools/lib/traceevent/Documentation/manpage-normal.xsl
deleted file mode 100644
index a48f5b11f3dc..000000000000
--- a/tools/lib/traceevent/Documentation/manpage-normal.xsl
+++ /dev/null
@@ -1,13 +0,0 @@
-<!-- manpage-normal.xsl:
- special settings for manpages rendered from asciidoc+docbook
- handles anything we want to keep away from docbook-xsl 1.72.0 -->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0">
-
-<xsl:import href="manpage-base.xsl"/>
-
-<!-- these are the normal values for the roff control characters -->
-<xsl:param name="git.docbook.backslash">\</xsl:param>
-<xsl:param name="git.docbook.dot" >.</xsl:param>
-
-</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl b/tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl
deleted file mode 100644
index a63c7632a87d..000000000000
--- a/tools/lib/traceevent/Documentation/manpage-suppress-sp.xsl
+++ /dev/null
@@ -1,21 +0,0 @@
-<!-- manpage-suppress-sp.xsl:
- special settings for manpages rendered from asciidoc+docbook
- handles erroneous, inline .sp in manpage output of some
- versions of docbook-xsl -->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0">
-
-<!-- attempt to work around spurious .sp at the tail of the line
- that some versions of docbook stylesheets seem to add -->
-<xsl:template match="simpara">
- <xsl:variable name="content">
- <xsl:apply-templates/>
- </xsl:variable>
- <xsl:value-of select="normalize-space($content)"/>
- <xsl:if test="not(ancestor::authorblurb) and
- not(ancestor::personblurb)">
- <xsl:text>&#10;&#10;</xsl:text>
- </xsl:if>
-</xsl:template>
-
-</xsl:stylesheet>
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
deleted file mode 100644
index c874c017c636..000000000000
--- a/tools/lib/traceevent/Makefile
+++ /dev/null
@@ -1,300 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-# trace-cmd version
-EP_VERSION = 1
-EP_PATCHLEVEL = 1
-EP_EXTRAVERSION = 0
-
-# file format version
-FILE_VERSION = 6
-
-MAKEFLAGS += --no-print-directory
-
-
-# Makefiles suck: This macro sets a default value of $(2) for the
-# variable named by $(1), unless the variable has been set by
-# environment or command line. This is necessary for CC and AR
-# because make sets default values, so the simpler ?= approach
-# won't work as expected.
-define allow-override
- $(if $(or $(findstring environment,$(origin $(1))),\
- $(findstring command line,$(origin $(1)))),,\
- $(eval $(1) = $(2)))
-endef
-
-# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-$(call allow-override,NM,$(CROSS_COMPILE)nm)
-$(call allow-override,PKG_CONFIG,pkg-config)
-
-EXT = -std=gnu99
-INSTALL = install
-
-# Use DESTDIR for installing into a different root directory.
-# This is useful for building a package. The program will be
-# installed in this directory as if it was the root directory.
-# Then the build tool can move it later.
-DESTDIR ?=
-DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
-
-LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
-ifeq ($(LP64), 1)
- libdir_relative_temp = lib64
-else
- libdir_relative_temp = lib
-endif
-
-libdir_relative ?= $(libdir_relative_temp)
-prefix ?= /usr/local
-libdir = $(prefix)/$(libdir_relative)
-man_dir = $(prefix)/share/man
-man_dir_SQ = '$(subst ','\'',$(man_dir))'
-pkgconfig_dir ?= $(word 1,$(shell $(PKG_CONFIG) \
- --variable pc_path pkg-config | tr ":" " "))
-includedir_relative = traceevent
-includedir = $(prefix)/include/$(includedir_relative)
-includedir_SQ = '$(subst ','\'',$(includedir))'
-
-export man_dir man_dir_SQ INSTALL
-export DESTDIR DESTDIR_SQ
-export EVENT_PARSE_VERSION
-
-include ../../scripts/Makefile.include
-
-# copy a bit from Linux kbuild
-
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(CURDIR)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
-export prefix libdir src obj
-
-# Shell quotes
-libdir_SQ = $(subst ','\'',$(libdir))
-libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
-
-CONFIG_INCLUDES =
-CONFIG_LIBS =
-CONFIG_FLAGS =
-
-VERSION = $(EP_VERSION)
-PATCHLEVEL = $(EP_PATCHLEVEL)
-EXTRAVERSION = $(EP_EXTRAVERSION)
-
-OBJ = $@
-N =
-
-EVENT_PARSE_VERSION = $(EP_VERSION).$(EP_PATCHLEVEL).$(EP_EXTRAVERSION)
-
-LIB_TARGET = libtraceevent.a libtraceevent.so.$(EVENT_PARSE_VERSION)
-LIB_INSTALL = libtraceevent.a libtraceevent.so*
-LIB_INSTALL := $(addprefix $(OUTPUT),$(LIB_INSTALL))
-
-INCLUDES = -I. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
-
-# Set compile option CFLAGS
-ifdef EXTRA_CFLAGS
- CFLAGS := $(EXTRA_CFLAGS)
-else
- CFLAGS := -g -Wall
-endif
-
-# Append required CFLAGS
-override CFLAGS += -fPIC
-override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
-override CFLAGS += $(udis86-flags) -D_GNU_SOURCE
-
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
-# Disable command line variables (CFLAGS) override from top
-# level Makefile (perf), otherwise build Makefile will get
-# the same command line setup.
-MAKEOVERRIDES=
-
-export srctree OUTPUT CC LD CFLAGS V
-build := -f $(srctree)/tools/build/Makefile.build dir=. obj
-
-TE_IN := $(OUTPUT)libtraceevent-in.o
-LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
-
-CMD_TARGETS = $(LIB_TARGET)
-
-TARGETS = $(CMD_TARGETS)
-
-all: all_cmd plugins
-
-all_cmd: $(CMD_TARGETS)
-
-$(TE_IN): force
- $(Q)$(MAKE) $(build)=libtraceevent
-
-$(OUTPUT)libtraceevent.so.$(EVENT_PARSE_VERSION): $(TE_IN)
- $(QUIET_LINK)$(CC) --shared $(LDFLAGS) $^ -Wl,-soname,libtraceevent.so.$(EP_VERSION) -o $@
- @ln -sf $(@F) $(OUTPUT)libtraceevent.so
- @ln -sf $(@F) $(OUTPUT)libtraceevent.so.$(EP_VERSION)
-
-$(OUTPUT)libtraceevent.a: $(TE_IN)
- $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
-
-$(OUTPUT)%.so: $(OUTPUT)%-in.o
- $(QUIET_LINK)$(CC) $(CFLAGS) -shared $(LDFLAGS) -nostartfiles -o $@ $^
-
-define make_version.h
- (echo '/* This file is automatically generated. Do not modify. */'; \
- echo \#define VERSION_CODE $(shell \
- expr $(VERSION) \* 256 + $(PATCHLEVEL)); \
- echo '#define EXTRAVERSION ' $(EXTRAVERSION); \
- echo '#define VERSION_STRING "'$(VERSION).$(PATCHLEVEL).$(EXTRAVERSION)'"'; \
- echo '#define FILE_VERSION '$(FILE_VERSION); \
- ) > $1
-endef
-
-define update_version.h
- ($(call make_version.h, $@.tmp); \
- if [ -r $@ ] && cmp -s $@ $@.tmp; then \
- rm -f $@.tmp; \
- else \
- echo ' UPDATE $@'; \
- mv -f $@.tmp $@; \
- fi);
-endef
-
-ep_version.h: force
- $(Q)$(N)$(call update_version.h)
-
-VERSION_FILES = ep_version.h
-
-define update_dir
- (echo $1 > $@.tmp; \
- if [ -r $@ ] && cmp -s $@ $@.tmp; then \
- rm -f $@.tmp; \
- else \
- echo ' UPDATE $@'; \
- mv -f $@.tmp $@; \
- fi);
-endef
-
-tags: force
- $(RM) tags
- find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
- --regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/'
-
-TAGS: force
- $(RM) TAGS
- find . -name '*.[ch]' | xargs etags \
- --regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/'
-
-define do_install_mkdir
- if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
- fi
-endef
-
-define do_install
- $(call do_install_mkdir,$2); \
- $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
-endef
-
-PKG_CONFIG_SOURCE_FILE = libtraceevent.pc
-PKG_CONFIG_FILE := $(addprefix $(OUTPUT),$(PKG_CONFIG_SOURCE_FILE))
-define do_install_pkgconfig_file
- if [ -n "${pkgconfig_dir}" ]; then \
- cp -f ${PKG_CONFIG_SOURCE_FILE}.template ${PKG_CONFIG_FILE}; \
- sed -i "s|INSTALL_PREFIX|${1}|g" ${PKG_CONFIG_FILE}; \
- sed -i "s|LIB_VERSION|${EVENT_PARSE_VERSION}|g" ${PKG_CONFIG_FILE}; \
- sed -i "s|LIB_DIR|${libdir}|g" ${PKG_CONFIG_FILE}; \
- sed -i "s|HEADER_DIR|$(includedir)|g" ${PKG_CONFIG_FILE}; \
- $(call do_install,$(PKG_CONFIG_FILE),$(pkgconfig_dir),644); \
- else \
- (echo Failed to locate pkg-config directory) 1>&2; \
- fi
-endef
-
-install_lib: all_cmd install_plugins install_headers install_pkgconfig
- $(call QUIET_INSTALL, $(LIB_TARGET)) \
- $(call do_install_mkdir,$(libdir_SQ)); \
- cp -fpR $(LIB_INSTALL) $(DESTDIR)$(libdir_SQ)
-
-install_pkgconfig:
- $(call QUIET_INSTALL, $(PKG_CONFIG_FILE)) \
- $(call do_install_pkgconfig_file,$(prefix))
-
-install_headers:
- $(call QUIET_INSTALL, headers) \
- $(call do_install,event-parse.h,$(includedir_SQ),644); \
- $(call do_install,event-utils.h,$(includedir_SQ),644); \
- $(call do_install,trace-seq.h,$(includedir_SQ),644); \
- $(call do_install,kbuffer.h,$(includedir_SQ),644)
-
-install: install_lib
-
-clean: clean_plugins
- $(call QUIET_CLEAN, libtraceevent) \
- $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \
- $(RM) TRACEEVENT-CFLAGS tags TAGS; \
- $(RM) $(PKG_CONFIG_FILE)
-
-PHONY += doc
-doc:
- $(call descend,Documentation)
-
-PHONY += doc-clean
-doc-clean:
- $(call descend,Documentation,clean)
-
-PHONY += doc-install
-doc-install:
- $(call descend,Documentation,install)
-
-PHONY += doc-uninstall
-doc-uninstall:
- $(call descend,Documentation,uninstall)
-
-PHONY += help
-help:
- @echo 'Possible targets:'
- @echo''
- @echo ' all - default, compile the library and the'\
- 'plugins'
- @echo ' plugins - compile the plugins'
- @echo ' install - install the library, the plugins,'\
- 'the header and pkgconfig files'
- @echo ' clean - clean the library and the plugins object files'
- @echo ' doc - compile the documentation files - man'\
- 'and html pages, in the Documentation directory'
- @echo ' doc-clean - clean the documentation files'
- @echo ' doc-install - install the man pages'
- @echo ' doc-uninstall - uninstall the man pages'
- @echo''
-
-PHONY += plugins
-plugins:
- $(call descend,plugins)
-
-PHONY += install_plugins
-install_plugins:
- $(call descend,plugins,install)
-
-PHONY += clean_plugins
-clean_plugins:
- $(call descend,plugins,clean)
-
-force:
-
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable so we can use it in if_changed and friends.
-.PHONY: $(PHONY)
diff --git a/tools/lib/traceevent/event-parse-api.c b/tools/lib/traceevent/event-parse-api.c
deleted file mode 100644
index 4faf52a65791..000000000000
--- a/tools/lib/traceevent/event-parse-api.c
+++ /dev/null
@@ -1,333 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- */
-
-#include "event-parse.h"
-#include "event-parse-local.h"
-#include "event-utils.h"
-
-/**
- * tep_get_event - returns the event with the given index
- * @tep: a handle to the tep_handle
- * @index: index of the requested event, in the range 0 .. nr_events
- *
- * This returns pointer to the element of the events array with the given index
- * If @tep is NULL, or @index is not in the range 0 .. nr_events, NULL is returned.
- */
-struct tep_event *tep_get_event(struct tep_handle *tep, int index)
-{
- if (tep && tep->events && index < tep->nr_events)
- return tep->events[index];
-
- return NULL;
-}
-
-/**
- * tep_get_first_event - returns the first event in the events array
- * @tep: a handle to the tep_handle
- *
- * This returns pointer to the first element of the events array
- * If @tep is NULL, NULL is returned.
- */
-struct tep_event *tep_get_first_event(struct tep_handle *tep)
-{
- return tep_get_event(tep, 0);
-}
-
-/**
- * tep_get_events_count - get the number of defined events
- * @tep: a handle to the tep_handle
- *
- * This returns number of elements in event array
- * If @tep is NULL, 0 is returned.
- */
-int tep_get_events_count(struct tep_handle *tep)
-{
- if (tep)
- return tep->nr_events;
- return 0;
-}
-
-/**
- * tep_set_flag - set event parser flag
- * @tep: a handle to the tep_handle
- * @flag: flag, or combination of flags to be set
- * can be any combination from enum tep_flag
- *
- * This sets a flag or combination of flags from enum tep_flag
- */
-void tep_set_flag(struct tep_handle *tep, int flag)
-{
- if (tep)
- tep->flags |= flag;
-}
-
-/**
- * tep_clear_flag - clear event parser flag
- * @tep: a handle to the tep_handle
- * @flag: flag to be cleared
- *
- * This clears a tep flag
- */
-void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag)
-{
- if (tep)
- tep->flags &= ~flag;
-}
-
-/**
- * tep_test_flag - check the state of event parser flag
- * @tep: a handle to the tep_handle
- * @flag: flag to be checked
- *
- * This returns the state of the requested tep flag.
- * Returns: true if the flag is set, false otherwise.
- */
-bool tep_test_flag(struct tep_handle *tep, enum tep_flag flag)
-{
- if (tep)
- return tep->flags & flag;
- return false;
-}
-
-unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data)
-{
- unsigned short swap;
-
- if (!tep || tep->host_bigendian == tep->file_bigendian)
- return data;
-
- swap = ((data & 0xffULL) << 8) |
- ((data & (0xffULL << 8)) >> 8);
-
- return swap;
-}
-
-unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data)
-{
- unsigned int swap;
-
- if (!tep || tep->host_bigendian == tep->file_bigendian)
- return data;
-
- swap = ((data & 0xffULL) << 24) |
- ((data & (0xffULL << 8)) << 8) |
- ((data & (0xffULL << 16)) >> 8) |
- ((data & (0xffULL << 24)) >> 24);
-
- return swap;
-}
-
-unsigned long long
-tep_data2host8(struct tep_handle *tep, unsigned long long data)
-{
- unsigned long long swap;
-
- if (!tep || tep->host_bigendian == tep->file_bigendian)
- return data;
-
- swap = ((data & 0xffULL) << 56) |
- ((data & (0xffULL << 8)) << 40) |
- ((data & (0xffULL << 16)) << 24) |
- ((data & (0xffULL << 24)) << 8) |
- ((data & (0xffULL << 32)) >> 8) |
- ((data & (0xffULL << 40)) >> 24) |
- ((data & (0xffULL << 48)) >> 40) |
- ((data & (0xffULL << 56)) >> 56);
-
- return swap;
-}
-
-/**
- * tep_get_header_page_size - get size of the header page
- * @tep: a handle to the tep_handle
- *
- * This returns size of the header page
- * If @tep is NULL, 0 is returned.
- */
-int tep_get_header_page_size(struct tep_handle *tep)
-{
- if (tep)
- return tep->header_page_size_size;
- return 0;
-}
-
-/**
- * tep_get_header_timestamp_size - get size of the timestamp in the header page
- * @tep: a handle to the tep_handle
- *
- * This returns size of the timestamp in the header page
- * If @tep is NULL, 0 is returned.
- */
-int tep_get_header_timestamp_size(struct tep_handle *tep)
-{
- if (tep)
- return tep->header_page_ts_size;
- return 0;
-}
-
-/**
- * tep_get_cpus - get the number of CPUs
- * @tep: a handle to the tep_handle
- *
- * This returns the number of CPUs
- * If @tep is NULL, 0 is returned.
- */
-int tep_get_cpus(struct tep_handle *tep)
-{
- if (tep)
- return tep->cpus;
- return 0;
-}
-
-/**
- * tep_set_cpus - set the number of CPUs
- * @tep: a handle to the tep_handle
- *
- * This sets the number of CPUs
- */
-void tep_set_cpus(struct tep_handle *tep, int cpus)
-{
- if (tep)
- tep->cpus = cpus;
-}
-
-/**
- * tep_get_long_size - get the size of a long integer on the traced machine
- * @tep: a handle to the tep_handle
- *
- * This returns the size of a long integer on the traced machine
- * If @tep is NULL, 0 is returned.
- */
-int tep_get_long_size(struct tep_handle *tep)
-{
- if (tep)
- return tep->long_size;
- return 0;
-}
-
-/**
- * tep_set_long_size - set the size of a long integer on the traced machine
- * @tep: a handle to the tep_handle
- * @size: size, in bytes, of a long integer
- *
- * This sets the size of a long integer on the traced machine
- */
-void tep_set_long_size(struct tep_handle *tep, int long_size)
-{
- if (tep)
- tep->long_size = long_size;
-}
-
-/**
- * tep_get_page_size - get the size of a memory page on the traced machine
- * @tep: a handle to the tep_handle
- *
- * This returns the size of a memory page on the traced machine
- * If @tep is NULL, 0 is returned.
- */
-int tep_get_page_size(struct tep_handle *tep)
-{
- if (tep)
- return tep->page_size;
- return 0;
-}
-
-/**
- * tep_set_page_size - set the size of a memory page on the traced machine
- * @tep: a handle to the tep_handle
- * @_page_size: size of a memory page, in bytes
- *
- * This sets the size of a memory page on the traced machine
- */
-void tep_set_page_size(struct tep_handle *tep, int _page_size)
-{
- if (tep)
- tep->page_size = _page_size;
-}
-
-/**
- * tep_is_file_bigendian - return the endian of the file
- * @tep: a handle to the tep_handle
- *
- * This returns true if the file is in big endian order
- * If @tep is NULL, false is returned.
- */
-bool tep_is_file_bigendian(struct tep_handle *tep)
-{
- if (tep)
- return (tep->file_bigendian == TEP_BIG_ENDIAN);
- return false;
-}
-
-/**
- * tep_set_file_bigendian - set if the file is in big endian order
- * @tep: a handle to the tep_handle
- * @endian: non zero, if the file is in big endian order
- *
- * This sets if the file is in big endian order
- */
-void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian)
-{
- if (tep)
- tep->file_bigendian = endian;
-}
-
-/**
- * tep_is_local_bigendian - return the endian of the saved local machine
- * @tep: a handle to the tep_handle
- *
- * This returns true if the saved local machine in @tep is big endian.
- * If @tep is NULL, false is returned.
- */
-bool tep_is_local_bigendian(struct tep_handle *tep)
-{
- if (tep)
- return (tep->host_bigendian == TEP_BIG_ENDIAN);
- return 0;
-}
-
-/**
- * tep_set_local_bigendian - set the stored local machine endian order
- * @tep: a handle to the tep_handle
- * @endian: non zero, if the local host has big endian order
- *
- * This sets the endian order for the local machine.
- */
-void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian)
-{
- if (tep)
- tep->host_bigendian = endian;
-}
-
-/**
- * tep_is_old_format - get if an old kernel is used
- * @tep: a handle to the tep_handle
- *
- * This returns true, if an old kernel is used to generate the tracing events or
- * false if a new kernel is used. Old kernels did not have header page info.
- * If @tep is NULL, false is returned.
- */
-bool tep_is_old_format(struct tep_handle *tep)
-{
- if (tep)
- return tep->old_format;
- return false;
-}
-
-/**
- * tep_set_test_filters - set a flag to test a filter string
- * @tep: a handle to the tep_handle
- * @test_filters: the new value of the test_filters flag
- *
- * This sets a flag to test a filter string. If this flag is set, when
- * tep_filter_add_filter_str() API as called,it will print the filter string
- * instead of adding it.
- */
-void tep_set_test_filters(struct tep_handle *tep, int test_filters)
-{
- if (tep)
- tep->test_filters = test_filters;
-}
diff --git a/tools/lib/traceevent/event-parse-local.h b/tools/lib/traceevent/event-parse-local.h
deleted file mode 100644
index cee469803a34..000000000000
--- a/tools/lib/traceevent/event-parse-local.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- */
-
-#ifndef _PARSE_EVENTS_INT_H
-#define _PARSE_EVENTS_INT_H
-
-struct tep_cmdline;
-struct cmdline_list;
-struct func_map;
-struct func_list;
-struct event_handler;
-struct func_resolver;
-
-struct tep_handle {
- int ref_count;
-
- int header_page_ts_offset;
- int header_page_ts_size;
- int header_page_size_offset;
- int header_page_size_size;
- int header_page_data_offset;
- int header_page_data_size;
- int header_page_overwrite;
-
- enum tep_endian file_bigendian;
- enum tep_endian host_bigendian;
-
- int old_format;
-
- int cpus;
- int long_size;
- int page_size;
-
- struct tep_cmdline *cmdlines;
- struct cmdline_list *cmdlist;
- int cmdline_count;
-
- struct func_map *func_map;
- struct func_resolver *func_resolver;
- struct func_list *funclist;
- unsigned int func_count;
-
- struct printk_map *printk_map;
- struct printk_list *printklist;
- unsigned int printk_count;
-
-
- struct tep_event **events;
- int nr_events;
- struct tep_event **sort_events;
- enum tep_event_sort_type last_type;
-
- int type_offset;
- int type_size;
-
- int pid_offset;
- int pid_size;
-
- int pc_offset;
- int pc_size;
-
- int flags_offset;
- int flags_size;
-
- int ld_offset;
- int ld_size;
-
- int test_filters;
-
- int flags;
-
- struct tep_format_field *bprint_ip_field;
- struct tep_format_field *bprint_fmt_field;
- struct tep_format_field *bprint_buf_field;
-
- struct event_handler *handlers;
- struct tep_function_handler *func_handlers;
-
- /* cache */
- struct tep_event *last_event;
-};
-
-void tep_free_event(struct tep_event *event);
-void tep_free_format_field(struct tep_format_field *field);
-
-unsigned short tep_data2host2(struct tep_handle *tep, unsigned short data);
-unsigned int tep_data2host4(struct tep_handle *tep, unsigned int data);
-unsigned long long tep_data2host8(struct tep_handle *tep, unsigned long long data);
-
-#endif /* _PARSE_EVENTS_INT_H */
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
deleted file mode 100644
index e1bd2a93c6db..000000000000
--- a/tools/lib/traceevent/event-parse.c
+++ /dev/null
@@ -1,7075 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- *
- * The parts for function graph printing was taken and modified from the
- * Linux Kernel that were written by
- * - Copyright (C) 2009 Frederic Weisbecker,
- * Frederic Weisbecker gave his permission to relicense the code to
- * the Lesser General Public License.
- */
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdarg.h>
-#include <ctype.h>
-#include <errno.h>
-#include <stdint.h>
-#include <limits.h>
-#include <linux/time64.h>
-
-#include <netinet/in.h>
-#include "event-parse.h"
-
-#include "event-parse-local.h"
-#include "event-utils.h"
-#include "trace-seq.h"
-
-static const char *input_buf;
-static unsigned long long input_buf_ptr;
-static unsigned long long input_buf_siz;
-
-static int is_flag_field;
-static int is_symbolic_field;
-
-static int show_warning = 1;
-
-#define do_warning(fmt, ...) \
- do { \
- if (show_warning) \
- warning(fmt, ##__VA_ARGS__); \
- } while (0)
-
-#define do_warning_event(event, fmt, ...) \
- do { \
- if (!show_warning) \
- continue; \
- \
- if (event) \
- warning("[%s:%s] " fmt, event->system, \
- event->name, ##__VA_ARGS__); \
- else \
- warning(fmt, ##__VA_ARGS__); \
- } while (0)
-
-static void init_input_buf(const char *buf, unsigned long long size)
-{
- input_buf = buf;
- input_buf_siz = size;
- input_buf_ptr = 0;
-}
-
-const char *tep_get_input_buf(void)
-{
- return input_buf;
-}
-
-unsigned long long tep_get_input_buf_ptr(void)
-{
- return input_buf_ptr;
-}
-
-struct event_handler {
- struct event_handler *next;
- int id;
- const char *sys_name;
- const char *event_name;
- tep_event_handler_func func;
- void *context;
-};
-
-struct func_params {
- struct func_params *next;
- enum tep_func_arg_type type;
-};
-
-struct tep_function_handler {
- struct tep_function_handler *next;
- enum tep_func_arg_type ret_type;
- char *name;
- tep_func_handler func;
- struct func_params *params;
- int nr_args;
-};
-
-static unsigned long long
-process_defined_func(struct trace_seq *s, void *data, int size,
- struct tep_event *event, struct tep_print_arg *arg);
-
-static void free_func_handle(struct tep_function_handler *func);
-
-/**
- * tep_buffer_init - init buffer for parsing
- * @buf: buffer to parse
- * @size: the size of the buffer
- *
- * For use with tep_read_token(), this initializes the internal
- * buffer that tep_read_token() will parse.
- */
-void tep_buffer_init(const char *buf, unsigned long long size)
-{
- init_input_buf(buf, size);
-}
-
-void breakpoint(void)
-{
- static int x;
- x++;
-}
-
-struct tep_print_arg *alloc_arg(void)
-{
- return calloc(1, sizeof(struct tep_print_arg));
-}
-
-struct tep_cmdline {
- char *comm;
- int pid;
-};
-
-static int cmdline_cmp(const void *a, const void *b)
-{
- const struct tep_cmdline *ca = a;
- const struct tep_cmdline *cb = b;
-
- if (ca->pid < cb->pid)
- return -1;
- if (ca->pid > cb->pid)
- return 1;
-
- return 0;
-}
-
-/* Looking for where to place the key */
-static int cmdline_slot_cmp(const void *a, const void *b)
-{
- const struct tep_cmdline *ca = a;
- const struct tep_cmdline *cb = b;
- const struct tep_cmdline *cb1 = cb + 1;
-
- if (ca->pid < cb->pid)
- return -1;
-
- if (ca->pid > cb->pid) {
- if (ca->pid <= cb1->pid)
- return 0;
- return 1;
- }
-
- return 0;
-}
-
-struct cmdline_list {
- struct cmdline_list *next;
- char *comm;
- int pid;
-};
-
-static int cmdline_init(struct tep_handle *tep)
-{
- struct cmdline_list *cmdlist = tep->cmdlist;
- struct cmdline_list *item;
- struct tep_cmdline *cmdlines;
- int i;
-
- cmdlines = malloc(sizeof(*cmdlines) * tep->cmdline_count);
- if (!cmdlines)
- return -1;
-
- i = 0;
- while (cmdlist) {
- cmdlines[i].pid = cmdlist->pid;
- cmdlines[i].comm = cmdlist->comm;
- i++;
- item = cmdlist;
- cmdlist = cmdlist->next;
- free(item);
- }
-
- qsort(cmdlines, tep->cmdline_count, sizeof(*cmdlines), cmdline_cmp);
-
- tep->cmdlines = cmdlines;
- tep->cmdlist = NULL;
-
- return 0;
-}
-
-static const char *find_cmdline(struct tep_handle *tep, int pid)
-{
- const struct tep_cmdline *comm;
- struct tep_cmdline key;
-
- if (!pid)
- return "<idle>";
-
- if (!tep->cmdlines && cmdline_init(tep))
- return "<not enough memory for cmdlines!>";
-
- key.pid = pid;
-
- comm = bsearch(&key, tep->cmdlines, tep->cmdline_count,
- sizeof(*tep->cmdlines), cmdline_cmp);
-
- if (comm)
- return comm->comm;
- return "<...>";
-}
-
-/**
- * tep_is_pid_registered - return if a pid has a cmdline registered
- * @tep: a handle to the trace event parser context
- * @pid: The pid to check if it has a cmdline registered with.
- *
- * Returns true if the pid has a cmdline mapped to it
- * false otherwise.
- */
-bool tep_is_pid_registered(struct tep_handle *tep, int pid)
-{
- const struct tep_cmdline *comm;
- struct tep_cmdline key;
-
- if (!pid)
- return true;
-
- if (!tep->cmdlines && cmdline_init(tep))
- return false;
-
- key.pid = pid;
-
- comm = bsearch(&key, tep->cmdlines, tep->cmdline_count,
- sizeof(*tep->cmdlines), cmdline_cmp);
-
- if (comm)
- return true;
- return false;
-}
-
-/*
- * If the command lines have been converted to an array, then
- * we must add this pid. This is much slower than when cmdlines
- * are added before the array is initialized.
- */
-static int add_new_comm(struct tep_handle *tep,
- const char *comm, int pid, bool override)
-{
- struct tep_cmdline *cmdlines = tep->cmdlines;
- struct tep_cmdline *cmdline;
- struct tep_cmdline key;
- char *new_comm;
- int cnt;
-
- if (!pid)
- return 0;
-
- /* avoid duplicates */
- key.pid = pid;
-
- cmdline = bsearch(&key, tep->cmdlines, tep->cmdline_count,
- sizeof(*tep->cmdlines), cmdline_cmp);
- if (cmdline) {
- if (!override) {
- errno = EEXIST;
- return -1;
- }
- new_comm = strdup(comm);
- if (!new_comm) {
- errno = ENOMEM;
- return -1;
- }
- free(cmdline->comm);
- cmdline->comm = new_comm;
-
- return 0;
- }
-
- cmdlines = realloc(cmdlines, sizeof(*cmdlines) * (tep->cmdline_count + 1));
- if (!cmdlines) {
- errno = ENOMEM;
- return -1;
- }
- tep->cmdlines = cmdlines;
-
- key.comm = strdup(comm);
- if (!key.comm) {
- errno = ENOMEM;
- return -1;
- }
-
- if (!tep->cmdline_count) {
- /* no entries yet */
- tep->cmdlines[0] = key;
- tep->cmdline_count++;
- return 0;
- }
-
- /* Now find where we want to store the new cmdline */
- cmdline = bsearch(&key, tep->cmdlines, tep->cmdline_count - 1,
- sizeof(*tep->cmdlines), cmdline_slot_cmp);
-
- cnt = tep->cmdline_count;
- if (cmdline) {
- /* cmdline points to the one before the spot we want */
- cmdline++;
- cnt -= cmdline - tep->cmdlines;
-
- } else {
- /* The new entry is either before or after the list */
- if (key.pid > tep->cmdlines[tep->cmdline_count - 1].pid) {
- tep->cmdlines[tep->cmdline_count++] = key;
- return 0;
- }
- cmdline = &tep->cmdlines[0];
- }
- memmove(cmdline + 1, cmdline, (cnt * sizeof(*cmdline)));
- *cmdline = key;
-
- tep->cmdline_count++;
-
- return 0;
-}
-
-static int _tep_register_comm(struct tep_handle *tep,
- const char *comm, int pid, bool override)
-{
- struct cmdline_list *item;
-
- if (tep->cmdlines)
- return add_new_comm(tep, comm, pid, override);
-
- item = malloc(sizeof(*item));
- if (!item)
- return -1;
-
- if (comm)
- item->comm = strdup(comm);
- else
- item->comm = strdup("<...>");
- if (!item->comm) {
- free(item);
- return -1;
- }
- item->pid = pid;
- item->next = tep->cmdlist;
-
- tep->cmdlist = item;
- tep->cmdline_count++;
-
- return 0;
-}
-
-/**
- * tep_register_comm - register a pid / comm mapping
- * @tep: a handle to the trace event parser context
- * @comm: the command line to register
- * @pid: the pid to map the command line to
- *
- * This adds a mapping to search for command line names with
- * a given pid. The comm is duplicated. If a command with the same pid
- * already exist, -1 is returned and errno is set to EEXIST
- */
-int tep_register_comm(struct tep_handle *tep, const char *comm, int pid)
-{
- return _tep_register_comm(tep, comm, pid, false);
-}
-
-/**
- * tep_override_comm - register a pid / comm mapping
- * @tep: a handle to the trace event parser context
- * @comm: the command line to register
- * @pid: the pid to map the command line to
- *
- * This adds a mapping to search for command line names with
- * a given pid. The comm is duplicated. If a command with the same pid
- * already exist, the command string is udapted with the new one
- */
-int tep_override_comm(struct tep_handle *tep, const char *comm, int pid)
-{
- if (!tep->cmdlines && cmdline_init(tep)) {
- errno = ENOMEM;
- return -1;
- }
- return _tep_register_comm(tep, comm, pid, true);
-}
-
-struct func_map {
- unsigned long long addr;
- char *func;
- char *mod;
-};
-
-struct func_list {
- struct func_list *next;
- unsigned long long addr;
- char *func;
- char *mod;
-};
-
-static int func_cmp(const void *a, const void *b)
-{
- const struct func_map *fa = a;
- const struct func_map *fb = b;
-
- if (fa->addr < fb->addr)
- return -1;
- if (fa->addr > fb->addr)
- return 1;
-
- return 0;
-}
-
-/*
- * We are searching for a record in between, not an exact
- * match.
- */
-static int func_bcmp(const void *a, const void *b)
-{
- const struct func_map *fa = a;
- const struct func_map *fb = b;
-
- if ((fa->addr == fb->addr) ||
-
- (fa->addr > fb->addr &&
- fa->addr < (fb+1)->addr))
- return 0;
-
- if (fa->addr < fb->addr)
- return -1;
-
- return 1;
-}
-
-static int func_map_init(struct tep_handle *tep)
-{
- struct func_list *funclist;
- struct func_list *item;
- struct func_map *func_map;
- int i;
-
- func_map = malloc(sizeof(*func_map) * (tep->func_count + 1));
- if (!func_map)
- return -1;
-
- funclist = tep->funclist;
-
- i = 0;
- while (funclist) {
- func_map[i].func = funclist->func;
- func_map[i].addr = funclist->addr;
- func_map[i].mod = funclist->mod;
- i++;
- item = funclist;
- funclist = funclist->next;
- free(item);
- }
-
- qsort(func_map, tep->func_count, sizeof(*func_map), func_cmp);
-
- /*
- * Add a special record at the end.
- */
- func_map[tep->func_count].func = NULL;
- func_map[tep->func_count].addr = 0;
- func_map[tep->func_count].mod = NULL;
-
- tep->func_map = func_map;
- tep->funclist = NULL;
-
- return 0;
-}
-
-static struct func_map *
-__find_func(struct tep_handle *tep, unsigned long long addr)
-{
- struct func_map *func;
- struct func_map key;
-
- if (!tep->func_map)
- func_map_init(tep);
-
- key.addr = addr;
-
- func = bsearch(&key, tep->func_map, tep->func_count,
- sizeof(*tep->func_map), func_bcmp);
-
- return func;
-}
-
-struct func_resolver {
- tep_func_resolver_t *func;
- void *priv;
- struct func_map map;
-};
-
-/**
- * tep_set_function_resolver - set an alternative function resolver
- * @tep: a handle to the trace event parser context
- * @resolver: function to be used
- * @priv: resolver function private state.
- *
- * Some tools may have already a way to resolve kernel functions, allow them to
- * keep using it instead of duplicating all the entries inside tep->funclist.
- */
-int tep_set_function_resolver(struct tep_handle *tep,
- tep_func_resolver_t *func, void *priv)
-{
- struct func_resolver *resolver = malloc(sizeof(*resolver));
-
- if (resolver == NULL)
- return -1;
-
- resolver->func = func;
- resolver->priv = priv;
-
- free(tep->func_resolver);
- tep->func_resolver = resolver;
-
- return 0;
-}
-
-/**
- * tep_reset_function_resolver - reset alternative function resolver
- * @tep: a handle to the trace event parser context
- *
- * Stop using whatever alternative resolver was set, use the default
- * one instead.
- */
-void tep_reset_function_resolver(struct tep_handle *tep)
-{
- free(tep->func_resolver);
- tep->func_resolver = NULL;
-}
-
-static struct func_map *
-find_func(struct tep_handle *tep, unsigned long long addr)
-{
- struct func_map *map;
-
- if (!tep->func_resolver)
- return __find_func(tep, addr);
-
- map = &tep->func_resolver->map;
- map->mod = NULL;
- map->addr = addr;
- map->func = tep->func_resolver->func(tep->func_resolver->priv,
- &map->addr, &map->mod);
- if (map->func == NULL)
- return NULL;
-
- return map;
-}
-
-/**
- * tep_find_function - find a function by a given address
- * @tep: a handle to the trace event parser context
- * @addr: the address to find the function with
- *
- * Returns a pointer to the function stored that has the given
- * address. Note, the address does not have to be exact, it
- * will select the function that would contain the address.
- */
-const char *tep_find_function(struct tep_handle *tep, unsigned long long addr)
-{
- struct func_map *map;
-
- map = find_func(tep, addr);
- if (!map)
- return NULL;
-
- return map->func;
-}
-
-/**
- * tep_find_function_address - find a function address by a given address
- * @tep: a handle to the trace event parser context
- * @addr: the address to find the function with
- *
- * Returns the address the function starts at. This can be used in
- * conjunction with tep_find_function to print both the function
- * name and the function offset.
- */
-unsigned long long
-tep_find_function_address(struct tep_handle *tep, unsigned long long addr)
-{
- struct func_map *map;
-
- map = find_func(tep, addr);
- if (!map)
- return 0;
-
- return map->addr;
-}
-
-/**
- * tep_register_function - register a function with a given address
- * @tep: a handle to the trace event parser context
- * @function: the function name to register
- * @addr: the address the function starts at
- * @mod: the kernel module the function may be in (NULL for none)
- *
- * This registers a function name with an address and module.
- * The @func passed in is duplicated.
- */
-int tep_register_function(struct tep_handle *tep, char *func,
- unsigned long long addr, char *mod)
-{
- struct func_list *item = malloc(sizeof(*item));
-
- if (!item)
- return -1;
-
- item->next = tep->funclist;
- item->func = strdup(func);
- if (!item->func)
- goto out_free;
-
- if (mod) {
- item->mod = strdup(mod);
- if (!item->mod)
- goto out_free_func;
- } else
- item->mod = NULL;
- item->addr = addr;
-
- tep->funclist = item;
- tep->func_count++;
-
- return 0;
-
-out_free_func:
- free(item->func);
- item->func = NULL;
-out_free:
- free(item);
- errno = ENOMEM;
- return -1;
-}
-
-/**
- * tep_print_funcs - print out the stored functions
- * @tep: a handle to the trace event parser context
- *
- * This prints out the stored functions.
- */
-void tep_print_funcs(struct tep_handle *tep)
-{
- int i;
-
- if (!tep->func_map)
- func_map_init(tep);
-
- for (i = 0; i < (int)tep->func_count; i++) {
- printf("%016llx %s",
- tep->func_map[i].addr,
- tep->func_map[i].func);
- if (tep->func_map[i].mod)
- printf(" [%s]\n", tep->func_map[i].mod);
- else
- printf("\n");
- }
-}
-
-struct printk_map {
- unsigned long long addr;
- char *printk;
-};
-
-struct printk_list {
- struct printk_list *next;
- unsigned long long addr;
- char *printk;
-};
-
-static int printk_cmp(const void *a, const void *b)
-{
- const struct printk_map *pa = a;
- const struct printk_map *pb = b;
-
- if (pa->addr < pb->addr)
- return -1;
- if (pa->addr > pb->addr)
- return 1;
-
- return 0;
-}
-
-static int printk_map_init(struct tep_handle *tep)
-{
- struct printk_list *printklist;
- struct printk_list *item;
- struct printk_map *printk_map;
- int i;
-
- printk_map = malloc(sizeof(*printk_map) * (tep->printk_count + 1));
- if (!printk_map)
- return -1;
-
- printklist = tep->printklist;
-
- i = 0;
- while (printklist) {
- printk_map[i].printk = printklist->printk;
- printk_map[i].addr = printklist->addr;
- i++;
- item = printklist;
- printklist = printklist->next;
- free(item);
- }
-
- qsort(printk_map, tep->printk_count, sizeof(*printk_map), printk_cmp);
-
- tep->printk_map = printk_map;
- tep->printklist = NULL;
-
- return 0;
-}
-
-static struct printk_map *
-find_printk(struct tep_handle *tep, unsigned long long addr)
-{
- struct printk_map *printk;
- struct printk_map key;
-
- if (!tep->printk_map && printk_map_init(tep))
- return NULL;
-
- key.addr = addr;
-
- printk = bsearch(&key, tep->printk_map, tep->printk_count,
- sizeof(*tep->printk_map), printk_cmp);
-
- return printk;
-}
-
-/**
- * tep_register_print_string - register a string by its address
- * @tep: a handle to the trace event parser context
- * @fmt: the string format to register
- * @addr: the address the string was located at
- *
- * This registers a string by the address it was stored in the kernel.
- * The @fmt passed in is duplicated.
- */
-int tep_register_print_string(struct tep_handle *tep, const char *fmt,
- unsigned long long addr)
-{
- struct printk_list *item = malloc(sizeof(*item));
- char *p;
-
- if (!item)
- return -1;
-
- item->next = tep->printklist;
- item->addr = addr;
-
- /* Strip off quotes and '\n' from the end */
- if (fmt[0] == '"')
- fmt++;
- item->printk = strdup(fmt);
- if (!item->printk)
- goto out_free;
-
- p = item->printk + strlen(item->printk) - 1;
- if (*p == '"')
- *p = 0;
-
- p -= 2;
- if (strcmp(p, "\\n") == 0)
- *p = 0;
-
- tep->printklist = item;
- tep->printk_count++;
-
- return 0;
-
-out_free:
- free(item);
- errno = ENOMEM;
- return -1;
-}
-
-/**
- * tep_print_printk - print out the stored strings
- * @tep: a handle to the trace event parser context
- *
- * This prints the string formats that were stored.
- */
-void tep_print_printk(struct tep_handle *tep)
-{
- int i;
-
- if (!tep->printk_map)
- printk_map_init(tep);
-
- for (i = 0; i < (int)tep->printk_count; i++) {
- printf("%016llx %s\n",
- tep->printk_map[i].addr,
- tep->printk_map[i].printk);
- }
-}
-
-static struct tep_event *alloc_event(void)
-{
- return calloc(1, sizeof(struct tep_event));
-}
-
-static int add_event(struct tep_handle *tep, struct tep_event *event)
-{
- int i;
- struct tep_event **events = realloc(tep->events, sizeof(event) *
- (tep->nr_events + 1));
- if (!events)
- return -1;
-
- tep->events = events;
-
- for (i = 0; i < tep->nr_events; i++) {
- if (tep->events[i]->id > event->id)
- break;
- }
- if (i < tep->nr_events)
- memmove(&tep->events[i + 1],
- &tep->events[i],
- sizeof(event) * (tep->nr_events - i));
-
- tep->events[i] = event;
- tep->nr_events++;
-
- event->tep = tep;
-
- return 0;
-}
-
-static int event_item_type(enum tep_event_type type)
-{
- switch (type) {
- case TEP_EVENT_ITEM ... TEP_EVENT_SQUOTE:
- return 1;
- case TEP_EVENT_ERROR ... TEP_EVENT_DELIM:
- default:
- return 0;
- }
-}
-
-static void free_flag_sym(struct tep_print_flag_sym *fsym)
-{
- struct tep_print_flag_sym *next;
-
- while (fsym) {
- next = fsym->next;
- free(fsym->value);
- free(fsym->str);
- free(fsym);
- fsym = next;
- }
-}
-
-static void free_arg(struct tep_print_arg *arg)
-{
- struct tep_print_arg *farg;
-
- if (!arg)
- return;
-
- switch (arg->type) {
- case TEP_PRINT_ATOM:
- free(arg->atom.atom);
- break;
- case TEP_PRINT_FIELD:
- free(arg->field.name);
- break;
- case TEP_PRINT_FLAGS:
- free_arg(arg->flags.field);
- free(arg->flags.delim);
- free_flag_sym(arg->flags.flags);
- break;
- case TEP_PRINT_SYMBOL:
- free_arg(arg->symbol.field);
- free_flag_sym(arg->symbol.symbols);
- break;
- case TEP_PRINT_HEX:
- case TEP_PRINT_HEX_STR:
- free_arg(arg->hex.field);
- free_arg(arg->hex.size);
- break;
- case TEP_PRINT_INT_ARRAY:
- free_arg(arg->int_array.field);
- free_arg(arg->int_array.count);
- free_arg(arg->int_array.el_size);
- break;
- case TEP_PRINT_TYPE:
- free(arg->typecast.type);
- free_arg(arg->typecast.item);
- break;
- case TEP_PRINT_STRING:
- case TEP_PRINT_BSTRING:
- free(arg->string.string);
- break;
- case TEP_PRINT_BITMASK:
- free(arg->bitmask.bitmask);
- break;
- case TEP_PRINT_DYNAMIC_ARRAY:
- case TEP_PRINT_DYNAMIC_ARRAY_LEN:
- free(arg->dynarray.index);
- break;
- case TEP_PRINT_OP:
- free(arg->op.op);
- free_arg(arg->op.left);
- free_arg(arg->op.right);
- break;
- case TEP_PRINT_FUNC:
- while (arg->func.args) {
- farg = arg->func.args;
- arg->func.args = farg->next;
- free_arg(farg);
- }
- break;
-
- case TEP_PRINT_NULL:
- default:
- break;
- }
-
- free(arg);
-}
-
-static enum tep_event_type get_type(int ch)
-{
- if (ch == '\n')
- return TEP_EVENT_NEWLINE;
- if (isspace(ch))
- return TEP_EVENT_SPACE;
- if (isalnum(ch) || ch == '_')
- return TEP_EVENT_ITEM;
- if (ch == '\'')
- return TEP_EVENT_SQUOTE;
- if (ch == '"')
- return TEP_EVENT_DQUOTE;
- if (!isprint(ch))
- return TEP_EVENT_NONE;
- if (ch == '(' || ch == ')' || ch == ',')
- return TEP_EVENT_DELIM;
-
- return TEP_EVENT_OP;
-}
-
-static int __read_char(void)
-{
- if (input_buf_ptr >= input_buf_siz)
- return -1;
-
- return input_buf[input_buf_ptr++];
-}
-
-static int __peek_char(void)
-{
- if (input_buf_ptr >= input_buf_siz)
- return -1;
-
- return input_buf[input_buf_ptr];
-}
-
-/**
- * tep_peek_char - peek at the next character that will be read
- *
- * Returns the next character read, or -1 if end of buffer.
- */
-int tep_peek_char(void)
-{
- return __peek_char();
-}
-
-static int extend_token(char **tok, char *buf, int size)
-{
- char *newtok = realloc(*tok, size);
-
- if (!newtok) {
- free(*tok);
- *tok = NULL;
- return -1;
- }
-
- if (!*tok)
- strcpy(newtok, buf);
- else
- strcat(newtok, buf);
- *tok = newtok;
-
- return 0;
-}
-
-static enum tep_event_type force_token(const char *str, char **tok);
-
-static enum tep_event_type __read_token(char **tok)
-{
- char buf[BUFSIZ];
- int ch, last_ch, quote_ch, next_ch;
- int i = 0;
- int tok_size = 0;
- enum tep_event_type type;
-
- *tok = NULL;
-
-
- ch = __read_char();
- if (ch < 0)
- return TEP_EVENT_NONE;
-
- type = get_type(ch);
- if (type == TEP_EVENT_NONE)
- return type;
-
- buf[i++] = ch;
-
- switch (type) {
- case TEP_EVENT_NEWLINE:
- case TEP_EVENT_DELIM:
- if (asprintf(tok, "%c", ch) < 0)
- return TEP_EVENT_ERROR;
-
- return type;
-
- case TEP_EVENT_OP:
- switch (ch) {
- case '-':
- next_ch = __peek_char();
- if (next_ch == '>') {
- buf[i++] = __read_char();
- break;
- }
- /* fall through */
- case '+':
- case '|':
- case '&':
- case '>':
- case '<':
- last_ch = ch;
- ch = __peek_char();
- if (ch != last_ch)
- goto test_equal;
- buf[i++] = __read_char();
- switch (last_ch) {
- case '>':
- case '<':
- goto test_equal;
- default:
- break;
- }
- break;
- case '!':
- case '=':
- goto test_equal;
- default: /* what should we do instead? */
- break;
- }
- buf[i] = 0;
- *tok = strdup(buf);
- return type;
-
- test_equal:
- ch = __peek_char();
- if (ch == '=')
- buf[i++] = __read_char();
- goto out;
-
- case TEP_EVENT_DQUOTE:
- case TEP_EVENT_SQUOTE:
- /* don't keep quotes */
- i--;
- quote_ch = ch;
- last_ch = 0;
- concat:
- do {
- if (i == (BUFSIZ - 1)) {
- buf[i] = 0;
- tok_size += BUFSIZ;
-
- if (extend_token(tok, buf, tok_size) < 0)
- return TEP_EVENT_NONE;
- i = 0;
- }
- last_ch = ch;
- ch = __read_char();
- buf[i++] = ch;
- /* the '\' '\' will cancel itself */
- if (ch == '\\' && last_ch == '\\')
- last_ch = 0;
- } while (ch != quote_ch || last_ch == '\\');
- /* remove the last quote */
- i--;
-
- /*
- * For strings (double quotes) check the next token.
- * If it is another string, concatinate the two.
- */
- if (type == TEP_EVENT_DQUOTE) {
- unsigned long long save_input_buf_ptr = input_buf_ptr;
-
- do {
- ch = __read_char();
- } while (isspace(ch));
- if (ch == '"')
- goto concat;
- input_buf_ptr = save_input_buf_ptr;
- }
-
- goto out;
-
- case TEP_EVENT_ERROR ... TEP_EVENT_SPACE:
- case TEP_EVENT_ITEM:
- default:
- break;
- }
-
- while (get_type(__peek_char()) == type) {
- if (i == (BUFSIZ - 1)) {
- buf[i] = 0;
- tok_size += BUFSIZ;
-
- if (extend_token(tok, buf, tok_size) < 0)
- return TEP_EVENT_NONE;
- i = 0;
- }
- ch = __read_char();
- buf[i++] = ch;
- }
-
- out:
- buf[i] = 0;
- if (extend_token(tok, buf, tok_size + i + 1) < 0)
- return TEP_EVENT_NONE;
-
- if (type == TEP_EVENT_ITEM) {
- /*
- * Older versions of the kernel has a bug that
- * creates invalid symbols and will break the mac80211
- * parsing. This is a work around to that bug.
- *
- * See Linux kernel commit:
- * 811cb50baf63461ce0bdb234927046131fc7fa8b
- */
- if (strcmp(*tok, "LOCAL_PR_FMT") == 0) {
- free(*tok);
- *tok = NULL;
- return force_token("\"%s\" ", tok);
- } else if (strcmp(*tok, "STA_PR_FMT") == 0) {
- free(*tok);
- *tok = NULL;
- return force_token("\" sta:%pM\" ", tok);
- } else if (strcmp(*tok, "VIF_PR_FMT") == 0) {
- free(*tok);
- *tok = NULL;
- return force_token("\" vif:%p(%d)\" ", tok);
- }
- }
-
- return type;
-}
-
-static enum tep_event_type force_token(const char *str, char **tok)
-{
- const char *save_input_buf;
- unsigned long long save_input_buf_ptr;
- unsigned long long save_input_buf_siz;
- enum tep_event_type type;
-
- /* save off the current input pointers */
- save_input_buf = input_buf;
- save_input_buf_ptr = input_buf_ptr;
- save_input_buf_siz = input_buf_siz;
-
- init_input_buf(str, strlen(str));
-
- type = __read_token(tok);
-
- /* reset back to original token */
- input_buf = save_input_buf;
- input_buf_ptr = save_input_buf_ptr;
- input_buf_siz = save_input_buf_siz;
-
- return type;
-}
-
-static void free_token(char *tok)
-{
- if (tok)
- free(tok);
-}
-
-static enum tep_event_type read_token(char **tok)
-{
- enum tep_event_type type;
-
- for (;;) {
- type = __read_token(tok);
- if (type != TEP_EVENT_SPACE)
- return type;
-
- free_token(*tok);
- }
-
- /* not reached */
- *tok = NULL;
- return TEP_EVENT_NONE;
-}
-
-/**
- * tep_read_token - access to utilities to use the tep parser
- * @tok: The token to return
- *
- * This will parse tokens from the string given by
- * tep_init_data().
- *
- * Returns the token type.
- */
-enum tep_event_type tep_read_token(char **tok)
-{
- return read_token(tok);
-}
-
-/**
- * tep_free_token - free a token returned by tep_read_token
- * @token: the token to free
- */
-void tep_free_token(char *token)
-{
- free_token(token);
-}
-
-/* no newline */
-static enum tep_event_type read_token_item(char **tok)
-{
- enum tep_event_type type;
-
- for (;;) {
- type = __read_token(tok);
- if (type != TEP_EVENT_SPACE && type != TEP_EVENT_NEWLINE)
- return type;
- free_token(*tok);
- *tok = NULL;
- }
-
- /* not reached */
- *tok = NULL;
- return TEP_EVENT_NONE;
-}
-
-static int test_type(enum tep_event_type type, enum tep_event_type expect)
-{
- if (type != expect) {
- do_warning("Error: expected type %d but read %d",
- expect, type);
- return -1;
- }
- return 0;
-}
-
-static int test_type_token(enum tep_event_type type, const char *token,
- enum tep_event_type expect, const char *expect_tok)
-{
- if (type != expect) {
- do_warning("Error: expected type %d but read %d",
- expect, type);
- return -1;
- }
-
- if (strcmp(token, expect_tok) != 0) {
- do_warning("Error: expected '%s' but read '%s'",
- expect_tok, token);
- return -1;
- }
- return 0;
-}
-
-static int __read_expect_type(enum tep_event_type expect, char **tok, int newline_ok)
-{
- enum tep_event_type type;
-
- if (newline_ok)
- type = read_token(tok);
- else
- type = read_token_item(tok);
- return test_type(type, expect);
-}
-
-static int read_expect_type(enum tep_event_type expect, char **tok)
-{
- return __read_expect_type(expect, tok, 1);
-}
-
-static int __read_expected(enum tep_event_type expect, const char *str,
- int newline_ok)
-{
- enum tep_event_type type;
- char *token;
- int ret;
-
- if (newline_ok)
- type = read_token(&token);
- else
- type = read_token_item(&token);
-
- ret = test_type_token(type, token, expect, str);
-
- free_token(token);
-
- return ret;
-}
-
-static int read_expected(enum tep_event_type expect, const char *str)
-{
- return __read_expected(expect, str, 1);
-}
-
-static int read_expected_item(enum tep_event_type expect, const char *str)
-{
- return __read_expected(expect, str, 0);
-}
-
-static char *event_read_name(void)
-{
- char *token;
-
- if (read_expected(TEP_EVENT_ITEM, "name") < 0)
- return NULL;
-
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- return NULL;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto fail;
-
- return token;
-
- fail:
- free_token(token);
- return NULL;
-}
-
-static int event_read_id(void)
-{
- char *token;
- int id;
-
- if (read_expected_item(TEP_EVENT_ITEM, "ID") < 0)
- return -1;
-
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- return -1;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto fail;
-
- id = strtoul(token, NULL, 0);
- free_token(token);
- return id;
-
- fail:
- free_token(token);
- return -1;
-}
-
-static int field_is_string(struct tep_format_field *field)
-{
- if ((field->flags & TEP_FIELD_IS_ARRAY) &&
- (strstr(field->type, "char") || strstr(field->type, "u8") ||
- strstr(field->type, "s8")))
- return 1;
-
- return 0;
-}
-
-static int field_is_dynamic(struct tep_format_field *field)
-{
- if (strncmp(field->type, "__data_loc", 10) == 0)
- return 1;
-
- return 0;
-}
-
-static int field_is_long(struct tep_format_field *field)
-{
- /* includes long long */
- if (strstr(field->type, "long"))
- return 1;
-
- return 0;
-}
-
-static unsigned int type_size(const char *name)
-{
- /* This covers all TEP_FIELD_IS_STRING types. */
- static struct {
- const char *type;
- unsigned int size;
- } table[] = {
- { "u8", 1 },
- { "u16", 2 },
- { "u32", 4 },
- { "u64", 8 },
- { "s8", 1 },
- { "s16", 2 },
- { "s32", 4 },
- { "s64", 8 },
- { "char", 1 },
- { },
- };
- int i;
-
- for (i = 0; table[i].type; i++) {
- if (!strcmp(table[i].type, name))
- return table[i].size;
- }
-
- return 0;
-}
-
-static int event_read_fields(struct tep_event *event, struct tep_format_field **fields)
-{
- struct tep_format_field *field = NULL;
- enum tep_event_type type;
- char *token;
- char *last_token;
- int count = 0;
-
- do {
- unsigned int size_dynamic = 0;
-
- type = read_token(&token);
- if (type == TEP_EVENT_NEWLINE) {
- free_token(token);
- return count;
- }
-
- count++;
-
- if (test_type_token(type, token, TEP_EVENT_ITEM, "field"))
- goto fail;
- free_token(token);
-
- type = read_token(&token);
- /*
- * The ftrace fields may still use the "special" name.
- * Just ignore it.
- */
- if (event->flags & TEP_EVENT_FL_ISFTRACE &&
- type == TEP_EVENT_ITEM && strcmp(token, "special") == 0) {
- free_token(token);
- type = read_token(&token);
- }
-
- if (test_type_token(type, token, TEP_EVENT_OP, ":") < 0)
- goto fail;
-
- free_token(token);
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto fail;
-
- last_token = token;
-
- field = calloc(1, sizeof(*field));
- if (!field)
- goto fail;
-
- field->event = event;
-
- /* read the rest of the type */
- for (;;) {
- type = read_token(&token);
- if (type == TEP_EVENT_ITEM ||
- (type == TEP_EVENT_OP && strcmp(token, "*") == 0) ||
- /*
- * Some of the ftrace fields are broken and have
- * an illegal "." in them.
- */
- (event->flags & TEP_EVENT_FL_ISFTRACE &&
- type == TEP_EVENT_OP && strcmp(token, ".") == 0)) {
-
- if (strcmp(token, "*") == 0)
- field->flags |= TEP_FIELD_IS_POINTER;
-
- if (field->type) {
- char *new_type;
- new_type = realloc(field->type,
- strlen(field->type) +
- strlen(last_token) + 2);
- if (!new_type) {
- free(last_token);
- goto fail;
- }
- field->type = new_type;
- strcat(field->type, " ");
- strcat(field->type, last_token);
- free(last_token);
- } else
- field->type = last_token;
- last_token = token;
- continue;
- }
-
- break;
- }
-
- if (!field->type) {
- do_warning_event(event, "%s: no type found", __func__);
- goto fail;
- }
- field->name = field->alias = last_token;
-
- if (test_type(type, TEP_EVENT_OP))
- goto fail;
-
- if (strcmp(token, "[") == 0) {
- enum tep_event_type last_type = type;
- char *brackets = token;
- char *new_brackets;
- int len;
-
- field->flags |= TEP_FIELD_IS_ARRAY;
-
- type = read_token(&token);
-
- if (type == TEP_EVENT_ITEM)
- field->arraylen = strtoul(token, NULL, 0);
- else
- field->arraylen = 0;
-
- while (strcmp(token, "]") != 0) {
- if (last_type == TEP_EVENT_ITEM &&
- type == TEP_EVENT_ITEM)
- len = 2;
- else
- len = 1;
- last_type = type;
-
- new_brackets = realloc(brackets,
- strlen(brackets) +
- strlen(token) + len);
- if (!new_brackets) {
- free(brackets);
- goto fail;
- }
- brackets = new_brackets;
- if (len == 2)
- strcat(brackets, " ");
- strcat(brackets, token);
- /* We only care about the last token */
- field->arraylen = strtoul(token, NULL, 0);
- free_token(token);
- type = read_token(&token);
- if (type == TEP_EVENT_NONE) {
- do_warning_event(event, "failed to find token");
- goto fail;
- }
- }
-
- free_token(token);
-
- new_brackets = realloc(brackets, strlen(brackets) + 2);
- if (!new_brackets) {
- free(brackets);
- goto fail;
- }
- brackets = new_brackets;
- strcat(brackets, "]");
-
- /* add brackets to type */
-
- type = read_token(&token);
- /*
- * If the next token is not an OP, then it is of
- * the format: type [] item;
- */
- if (type == TEP_EVENT_ITEM) {
- char *new_type;
- new_type = realloc(field->type,
- strlen(field->type) +
- strlen(field->name) +
- strlen(brackets) + 2);
- if (!new_type) {
- free(brackets);
- goto fail;
- }
- field->type = new_type;
- strcat(field->type, " ");
- strcat(field->type, field->name);
- size_dynamic = type_size(field->name);
- free_token(field->name);
- strcat(field->type, brackets);
- field->name = field->alias = token;
- type = read_token(&token);
- } else {
- char *new_type;
- new_type = realloc(field->type,
- strlen(field->type) +
- strlen(brackets) + 1);
- if (!new_type) {
- free(brackets);
- goto fail;
- }
- field->type = new_type;
- strcat(field->type, brackets);
- }
- free(brackets);
- }
-
- if (field_is_string(field))
- field->flags |= TEP_FIELD_IS_STRING;
- if (field_is_dynamic(field))
- field->flags |= TEP_FIELD_IS_DYNAMIC;
- if (field_is_long(field))
- field->flags |= TEP_FIELD_IS_LONG;
-
- if (test_type_token(type, token, TEP_EVENT_OP, ";"))
- goto fail;
- free_token(token);
-
- if (read_expected(TEP_EVENT_ITEM, "offset") < 0)
- goto fail_expect;
-
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- goto fail_expect;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token))
- goto fail;
- field->offset = strtoul(token, NULL, 0);
- free_token(token);
-
- if (read_expected(TEP_EVENT_OP, ";") < 0)
- goto fail_expect;
-
- if (read_expected(TEP_EVENT_ITEM, "size") < 0)
- goto fail_expect;
-
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- goto fail_expect;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token))
- goto fail;
- field->size = strtoul(token, NULL, 0);
- free_token(token);
-
- if (read_expected(TEP_EVENT_OP, ";") < 0)
- goto fail_expect;
-
- type = read_token(&token);
- if (type != TEP_EVENT_NEWLINE) {
- /* newer versions of the kernel have a "signed" type */
- if (test_type_token(type, token, TEP_EVENT_ITEM, "signed"))
- goto fail;
-
- free_token(token);
-
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- goto fail_expect;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token))
- goto fail;
-
- if (strtoul(token, NULL, 0))
- field->flags |= TEP_FIELD_IS_SIGNED;
-
- free_token(token);
- if (read_expected(TEP_EVENT_OP, ";") < 0)
- goto fail_expect;
-
- if (read_expect_type(TEP_EVENT_NEWLINE, &token))
- goto fail;
- }
-
- free_token(token);
-
- if (field->flags & TEP_FIELD_IS_ARRAY) {
- if (field->arraylen)
- field->elementsize = field->size / field->arraylen;
- else if (field->flags & TEP_FIELD_IS_DYNAMIC)
- field->elementsize = size_dynamic;
- else if (field->flags & TEP_FIELD_IS_STRING)
- field->elementsize = 1;
- else if (field->flags & TEP_FIELD_IS_LONG)
- field->elementsize = event->tep ?
- event->tep->long_size :
- sizeof(long);
- } else
- field->elementsize = field->size;
-
- *fields = field;
- fields = &field->next;
-
- } while (1);
-
- return 0;
-
-fail:
- free_token(token);
-fail_expect:
- if (field) {
- free(field->type);
- free(field->name);
- free(field);
- }
- return -1;
-}
-
-static int event_read_format(struct tep_event *event)
-{
- char *token;
- int ret;
-
- if (read_expected_item(TEP_EVENT_ITEM, "format") < 0)
- return -1;
-
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- return -1;
-
- if (read_expect_type(TEP_EVENT_NEWLINE, &token))
- goto fail;
- free_token(token);
-
- ret = event_read_fields(event, &event->format.common_fields);
- if (ret < 0)
- return ret;
- event->format.nr_common = ret;
-
- ret = event_read_fields(event, &event->format.fields);
- if (ret < 0)
- return ret;
- event->format.nr_fields = ret;
-
- return 0;
-
- fail:
- free_token(token);
- return -1;
-}
-
-static enum tep_event_type
-process_arg_token(struct tep_event *event, struct tep_print_arg *arg,
- char **tok, enum tep_event_type type);
-
-static enum tep_event_type
-process_arg(struct tep_event *event, struct tep_print_arg *arg, char **tok)
-{
- enum tep_event_type type;
- char *token;
-
- type = read_token(&token);
- *tok = token;
-
- return process_arg_token(event, arg, tok, type);
-}
-
-static enum tep_event_type
-process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok);
-
-/*
- * For __print_symbolic() and __print_flags, we need to completely
- * evaluate the first argument, which defines what to print next.
- */
-static enum tep_event_type
-process_field_arg(struct tep_event *event, struct tep_print_arg *arg, char **tok)
-{
- enum tep_event_type type;
-
- type = process_arg(event, arg, tok);
-
- while (type == TEP_EVENT_OP) {
- type = process_op(event, arg, tok);
- }
-
- return type;
-}
-
-static enum tep_event_type
-process_cond(struct tep_event *event, struct tep_print_arg *top, char **tok)
-{
- struct tep_print_arg *arg, *left, *right;
- enum tep_event_type type;
- char *token = NULL;
-
- arg = alloc_arg();
- left = alloc_arg();
- right = alloc_arg();
-
- if (!arg || !left || !right) {
- do_warning_event(event, "%s: not enough memory!", __func__);
- /* arg will be freed at out_free */
- free_arg(left);
- free_arg(right);
- goto out_free;
- }
-
- arg->type = TEP_PRINT_OP;
- arg->op.left = left;
- arg->op.right = right;
-
- *tok = NULL;
- type = process_arg(event, left, &token);
-
- again:
- if (type == TEP_EVENT_ERROR)
- goto out_free;
-
- /* Handle other operations in the arguments */
- if (type == TEP_EVENT_OP && strcmp(token, ":") != 0) {
- type = process_op(event, left, &token);
- goto again;
- }
-
- if (test_type_token(type, token, TEP_EVENT_OP, ":"))
- goto out_free;
-
- arg->op.op = token;
-
- type = process_arg(event, right, &token);
-
- top->op.right = arg;
-
- *tok = token;
- return type;
-
-out_free:
- /* Top may point to itself */
- top->op.right = NULL;
- free_token(token);
- free_arg(arg);
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_array(struct tep_event *event, struct tep_print_arg *top, char **tok)
-{
- struct tep_print_arg *arg;
- enum tep_event_type type;
- char *token = NULL;
-
- arg = alloc_arg();
- if (!arg) {
- do_warning_event(event, "%s: not enough memory!", __func__);
- /* '*tok' is set to top->op.op. No need to free. */
- *tok = NULL;
- return TEP_EVENT_ERROR;
- }
-
- *tok = NULL;
- type = process_arg(event, arg, &token);
- if (test_type_token(type, token, TEP_EVENT_OP, "]"))
- goto out_free;
-
- top->op.right = arg;
-
- free_token(token);
- type = read_token_item(&token);
- *tok = token;
-
- return type;
-
-out_free:
- free_token(token);
- free_arg(arg);
- return TEP_EVENT_ERROR;
-}
-
-static int get_op_prio(char *op)
-{
- if (!op[1]) {
- switch (op[0]) {
- case '~':
- case '!':
- return 4;
- case '*':
- case '/':
- case '%':
- return 6;
- case '+':
- case '-':
- return 7;
- /* '>>' and '<<' are 8 */
- case '<':
- case '>':
- return 9;
- /* '==' and '!=' are 10 */
- case '&':
- return 11;
- case '^':
- return 12;
- case '|':
- return 13;
- case '?':
- return 16;
- default:
- do_warning("unknown op '%c'", op[0]);
- return -1;
- }
- } else {
- if (strcmp(op, "++") == 0 ||
- strcmp(op, "--") == 0) {
- return 3;
- } else if (strcmp(op, ">>") == 0 ||
- strcmp(op, "<<") == 0) {
- return 8;
- } else if (strcmp(op, ">=") == 0 ||
- strcmp(op, "<=") == 0) {
- return 9;
- } else if (strcmp(op, "==") == 0 ||
- strcmp(op, "!=") == 0) {
- return 10;
- } else if (strcmp(op, "&&") == 0) {
- return 14;
- } else if (strcmp(op, "||") == 0) {
- return 15;
- } else {
- do_warning("unknown op '%s'", op);
- return -1;
- }
- }
-}
-
-static int set_op_prio(struct tep_print_arg *arg)
-{
-
- /* single ops are the greatest */
- if (!arg->op.left || arg->op.left->type == TEP_PRINT_NULL)
- arg->op.prio = 0;
- else
- arg->op.prio = get_op_prio(arg->op.op);
-
- return arg->op.prio;
-}
-
-/* Note, *tok does not get freed, but will most likely be saved */
-static enum tep_event_type
-process_op(struct tep_event *event, struct tep_print_arg *arg, char **tok)
-{
- struct tep_print_arg *left, *right = NULL;
- enum tep_event_type type;
- char *token;
-
- /* the op is passed in via tok */
- token = *tok;
-
- if (arg->type == TEP_PRINT_OP && !arg->op.left) {
- /* handle single op */
- if (token[1]) {
- do_warning_event(event, "bad op token %s", token);
- goto out_free;
- }
- switch (token[0]) {
- case '~':
- case '!':
- case '+':
- case '-':
- break;
- default:
- do_warning_event(event, "bad op token %s", token);
- goto out_free;
-
- }
-
- /* make an empty left */
- left = alloc_arg();
- if (!left)
- goto out_warn_free;
-
- left->type = TEP_PRINT_NULL;
- arg->op.left = left;
-
- right = alloc_arg();
- if (!right)
- goto out_warn_free;
-
- arg->op.right = right;
-
- /* do not free the token, it belongs to an op */
- *tok = NULL;
- type = process_arg(event, right, tok);
-
- } else if (strcmp(token, "?") == 0) {
-
- left = alloc_arg();
- if (!left)
- goto out_warn_free;
-
- /* copy the top arg to the left */
- *left = *arg;
-
- arg->type = TEP_PRINT_OP;
- arg->op.op = token;
- arg->op.left = left;
- arg->op.prio = 0;
-
- /* it will set arg->op.right */
- type = process_cond(event, arg, tok);
-
- } else if (strcmp(token, ">>") == 0 ||
- strcmp(token, "<<") == 0 ||
- strcmp(token, "&") == 0 ||
- strcmp(token, "|") == 0 ||
- strcmp(token, "&&") == 0 ||
- strcmp(token, "||") == 0 ||
- strcmp(token, "-") == 0 ||
- strcmp(token, "+") == 0 ||
- strcmp(token, "*") == 0 ||
- strcmp(token, "^") == 0 ||
- strcmp(token, "/") == 0 ||
- strcmp(token, "%") == 0 ||
- strcmp(token, "<") == 0 ||
- strcmp(token, ">") == 0 ||
- strcmp(token, "<=") == 0 ||
- strcmp(token, ">=") == 0 ||
- strcmp(token, "==") == 0 ||
- strcmp(token, "!=") == 0) {
-
- left = alloc_arg();
- if (!left)
- goto out_warn_free;
-
- /* copy the top arg to the left */
- *left = *arg;
-
- arg->type = TEP_PRINT_OP;
- arg->op.op = token;
- arg->op.left = left;
- arg->op.right = NULL;
-
- if (set_op_prio(arg) == -1) {
- event->flags |= TEP_EVENT_FL_FAILED;
- /* arg->op.op (= token) will be freed at out_free */
- arg->op.op = NULL;
- goto out_free;
- }
-
- type = read_token_item(&token);
- *tok = token;
-
- /* could just be a type pointer */
- if ((strcmp(arg->op.op, "*") == 0) &&
- type == TEP_EVENT_DELIM && (strcmp(token, ")") == 0)) {
- char *new_atom;
-
- if (left->type != TEP_PRINT_ATOM) {
- do_warning_event(event, "bad pointer type");
- goto out_free;
- }
- new_atom = realloc(left->atom.atom,
- strlen(left->atom.atom) + 3);
- if (!new_atom)
- goto out_warn_free;
-
- left->atom.atom = new_atom;
- strcat(left->atom.atom, " *");
- free(arg->op.op);
- *arg = *left;
- free(left);
-
- return type;
- }
-
- right = alloc_arg();
- if (!right)
- goto out_warn_free;
-
- type = process_arg_token(event, right, tok, type);
- if (type == TEP_EVENT_ERROR) {
- free_arg(right);
- /* token was freed in process_arg_token() via *tok */
- token = NULL;
- goto out_free;
- }
-
- if (right->type == TEP_PRINT_OP &&
- get_op_prio(arg->op.op) < get_op_prio(right->op.op)) {
- struct tep_print_arg tmp;
-
- /* rotate ops according to the priority */
- arg->op.right = right->op.left;
-
- tmp = *arg;
- *arg = *right;
- *right = tmp;
-
- arg->op.left = right;
- } else {
- arg->op.right = right;
- }
-
- } else if (strcmp(token, "[") == 0) {
-
- left = alloc_arg();
- if (!left)
- goto out_warn_free;
-
- *left = *arg;
-
- arg->type = TEP_PRINT_OP;
- arg->op.op = token;
- arg->op.left = left;
-
- arg->op.prio = 0;
-
- /* it will set arg->op.right */
- type = process_array(event, arg, tok);
-
- } else {
- do_warning_event(event, "unknown op '%s'", token);
- event->flags |= TEP_EVENT_FL_FAILED;
- /* the arg is now the left side */
- goto out_free;
- }
-
- if (type == TEP_EVENT_OP && strcmp(*tok, ":") != 0) {
- int prio;
-
- /* higher prios need to be closer to the root */
- prio = get_op_prio(*tok);
-
- if (prio > arg->op.prio)
- return process_op(event, arg, tok);
-
- return process_op(event, right, tok);
- }
-
- return type;
-
-out_warn_free:
- do_warning_event(event, "%s: not enough memory!", __func__);
-out_free:
- free_token(token);
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_entry(struct tep_event *event __maybe_unused, struct tep_print_arg *arg,
- char **tok)
-{
- enum tep_event_type type;
- char *field;
- char *token;
-
- if (read_expected(TEP_EVENT_OP, "->") < 0)
- goto out_err;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto out_free;
- field = token;
-
- arg->type = TEP_PRINT_FIELD;
- arg->field.name = field;
-
- if (is_flag_field) {
- arg->field.field = tep_find_any_field(event, arg->field.name);
- arg->field.field->flags |= TEP_FIELD_IS_FLAG;
- is_flag_field = 0;
- } else if (is_symbolic_field) {
- arg->field.field = tep_find_any_field(event, arg->field.name);
- arg->field.field->flags |= TEP_FIELD_IS_SYMBOLIC;
- is_symbolic_field = 0;
- }
-
- type = read_token(&token);
- *tok = token;
-
- return type;
-
- out_free:
- free_token(token);
- out_err:
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static int alloc_and_process_delim(struct tep_event *event, char *next_token,
- struct tep_print_arg **print_arg)
-{
- struct tep_print_arg *field;
- enum tep_event_type type;
- char *token;
- int ret = 0;
-
- field = alloc_arg();
- if (!field) {
- do_warning_event(event, "%s: not enough memory!", __func__);
- errno = ENOMEM;
- return -1;
- }
-
- type = process_arg(event, field, &token);
-
- if (test_type_token(type, token, TEP_EVENT_DELIM, next_token)) {
- errno = EINVAL;
- ret = -1;
- free_arg(field);
- goto out_free_token;
- }
-
- *print_arg = field;
-
-out_free_token:
- free_token(token);
-
- return ret;
-}
-
-static char *arg_eval (struct tep_print_arg *arg);
-
-static unsigned long long
-eval_type_str(unsigned long long val, const char *type, int pointer)
-{
- int sign = 0;
- char *ref;
- int len;
-
- len = strlen(type);
-
- if (pointer) {
-
- if (type[len-1] != '*') {
- do_warning("pointer expected with non pointer type");
- return val;
- }
-
- ref = malloc(len);
- if (!ref) {
- do_warning("%s: not enough memory!", __func__);
- return val;
- }
- memcpy(ref, type, len);
-
- /* chop off the " *" */
- ref[len - 2] = 0;
-
- val = eval_type_str(val, ref, 0);
- free(ref);
- return val;
- }
-
- /* check if this is a pointer */
- if (type[len - 1] == '*')
- return val;
-
- /* Try to figure out the arg size*/
- if (strncmp(type, "struct", 6) == 0)
- /* all bets off */
- return val;
-
- if (strcmp(type, "u8") == 0)
- return val & 0xff;
-
- if (strcmp(type, "u16") == 0)
- return val & 0xffff;
-
- if (strcmp(type, "u32") == 0)
- return val & 0xffffffff;
-
- if (strcmp(type, "u64") == 0 ||
- strcmp(type, "s64") == 0)
- return val;
-
- if (strcmp(type, "s8") == 0)
- return (unsigned long long)(char)val & 0xff;
-
- if (strcmp(type, "s16") == 0)
- return (unsigned long long)(short)val & 0xffff;
-
- if (strcmp(type, "s32") == 0)
- return (unsigned long long)(int)val & 0xffffffff;
-
- if (strncmp(type, "unsigned ", 9) == 0) {
- sign = 0;
- type += 9;
- }
-
- if (strcmp(type, "char") == 0) {
- if (sign)
- return (unsigned long long)(char)val & 0xff;
- else
- return val & 0xff;
- }
-
- if (strcmp(type, "short") == 0) {
- if (sign)
- return (unsigned long long)(short)val & 0xffff;
- else
- return val & 0xffff;
- }
-
- if (strcmp(type, "int") == 0) {
- if (sign)
- return (unsigned long long)(int)val & 0xffffffff;
- else
- return val & 0xffffffff;
- }
-
- return val;
-}
-
-/*
- * Try to figure out the type.
- */
-static unsigned long long
-eval_type(unsigned long long val, struct tep_print_arg *arg, int pointer)
-{
- if (arg->type != TEP_PRINT_TYPE) {
- do_warning("expected type argument");
- return 0;
- }
-
- return eval_type_str(val, arg->typecast.type, pointer);
-}
-
-static int arg_num_eval(struct tep_print_arg *arg, long long *val)
-{
- long long left, right;
- int ret = 1;
-
- switch (arg->type) {
- case TEP_PRINT_ATOM:
- *val = strtoll(arg->atom.atom, NULL, 0);
- break;
- case TEP_PRINT_TYPE:
- ret = arg_num_eval(arg->typecast.item, val);
- if (!ret)
- break;
- *val = eval_type(*val, arg, 0);
- break;
- case TEP_PRINT_OP:
- switch (arg->op.op[0]) {
- case '|':
- ret = arg_num_eval(arg->op.left, &left);
- if (!ret)
- break;
- ret = arg_num_eval(arg->op.right, &right);
- if (!ret)
- break;
- if (arg->op.op[1])
- *val = left || right;
- else
- *val = left | right;
- break;
- case '&':
- ret = arg_num_eval(arg->op.left, &left);
- if (!ret)
- break;
- ret = arg_num_eval(arg->op.right, &right);
- if (!ret)
- break;
- if (arg->op.op[1])
- *val = left && right;
- else
- *val = left & right;
- break;
- case '<':
- ret = arg_num_eval(arg->op.left, &left);
- if (!ret)
- break;
- ret = arg_num_eval(arg->op.right, &right);
- if (!ret)
- break;
- switch (arg->op.op[1]) {
- case 0:
- *val = left < right;
- break;
- case '<':
- *val = left << right;
- break;
- case '=':
- *val = left <= right;
- break;
- default:
- do_warning("unknown op '%s'", arg->op.op);
- ret = 0;
- }
- break;
- case '>':
- ret = arg_num_eval(arg->op.left, &left);
- if (!ret)
- break;
- ret = arg_num_eval(arg->op.right, &right);
- if (!ret)
- break;
- switch (arg->op.op[1]) {
- case 0:
- *val = left > right;
- break;
- case '>':
- *val = left >> right;
- break;
- case '=':
- *val = left >= right;
- break;
- default:
- do_warning("unknown op '%s'", arg->op.op);
- ret = 0;
- }
- break;
- case '=':
- ret = arg_num_eval(arg->op.left, &left);
- if (!ret)
- break;
- ret = arg_num_eval(arg->op.right, &right);
- if (!ret)
- break;
-
- if (arg->op.op[1] != '=') {
- do_warning("unknown op '%s'", arg->op.op);
- ret = 0;
- } else
- *val = left == right;
- break;
- case '!':
- ret = arg_num_eval(arg->op.left, &left);
- if (!ret)
- break;
- ret = arg_num_eval(arg->op.right, &right);
- if (!ret)
- break;
-
- switch (arg->op.op[1]) {
- case '=':
- *val = left != right;
- break;
- default:
- do_warning("unknown op '%s'", arg->op.op);
- ret = 0;
- }
- break;
- case '-':
- /* check for negative */
- if (arg->op.left->type == TEP_PRINT_NULL)
- left = 0;
- else
- ret = arg_num_eval(arg->op.left, &left);
- if (!ret)
- break;
- ret = arg_num_eval(arg->op.right, &right);
- if (!ret)
- break;
- *val = left - right;
- break;
- case '+':
- if (arg->op.left->type == TEP_PRINT_NULL)
- left = 0;
- else
- ret = arg_num_eval(arg->op.left, &left);
- if (!ret)
- break;
- ret = arg_num_eval(arg->op.right, &right);
- if (!ret)
- break;
- *val = left + right;
- break;
- case '~':
- ret = arg_num_eval(arg->op.right, &right);
- if (!ret)
- break;
- *val = ~right;
- break;
- default:
- do_warning("unknown op '%s'", arg->op.op);
- ret = 0;
- }
- break;
-
- case TEP_PRINT_NULL:
- case TEP_PRINT_FIELD ... TEP_PRINT_SYMBOL:
- case TEP_PRINT_STRING:
- case TEP_PRINT_BSTRING:
- case TEP_PRINT_BITMASK:
- default:
- do_warning("invalid eval type %d", arg->type);
- ret = 0;
-
- }
- return ret;
-}
-
-static char *arg_eval (struct tep_print_arg *arg)
-{
- long long val;
- static char buf[24];
-
- switch (arg->type) {
- case TEP_PRINT_ATOM:
- return arg->atom.atom;
- case TEP_PRINT_TYPE:
- return arg_eval(arg->typecast.item);
- case TEP_PRINT_OP:
- if (!arg_num_eval(arg, &val))
- break;
- sprintf(buf, "%lld", val);
- return buf;
-
- case TEP_PRINT_NULL:
- case TEP_PRINT_FIELD ... TEP_PRINT_SYMBOL:
- case TEP_PRINT_STRING:
- case TEP_PRINT_BSTRING:
- case TEP_PRINT_BITMASK:
- default:
- do_warning("invalid eval type %d", arg->type);
- break;
- }
-
- return NULL;
-}
-
-static enum tep_event_type
-process_fields(struct tep_event *event, struct tep_print_flag_sym **list, char **tok)
-{
- enum tep_event_type type;
- struct tep_print_arg *arg = NULL;
- struct tep_print_flag_sym *field;
- char *token = *tok;
- char *value;
-
- do {
- free_token(token);
- type = read_token_item(&token);
- if (test_type_token(type, token, TEP_EVENT_OP, "{"))
- break;
-
- arg = alloc_arg();
- if (!arg)
- goto out_free;
-
- free_token(token);
- type = process_arg(event, arg, &token);
-
- if (type == TEP_EVENT_OP)
- type = process_op(event, arg, &token);
-
- if (type == TEP_EVENT_ERROR)
- goto out_free;
-
- if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
- goto out_free;
-
- field = calloc(1, sizeof(*field));
- if (!field)
- goto out_free;
-
- value = arg_eval(arg);
- if (value == NULL)
- goto out_free_field;
- field->value = strdup(value);
- if (field->value == NULL)
- goto out_free_field;
-
- free_arg(arg);
- arg = alloc_arg();
- if (!arg)
- goto out_free;
-
- free_token(token);
- type = process_arg(event, arg, &token);
- if (test_type_token(type, token, TEP_EVENT_OP, "}"))
- goto out_free_field;
-
- value = arg_eval(arg);
- if (value == NULL)
- goto out_free_field;
- field->str = strdup(value);
- if (field->str == NULL)
- goto out_free_field;
- free_arg(arg);
- arg = NULL;
-
- *list = field;
- list = &field->next;
-
- free_token(token);
- type = read_token_item(&token);
- } while (type == TEP_EVENT_DELIM && strcmp(token, ",") == 0);
-
- *tok = token;
- return type;
-
-out_free_field:
- free_flag_sym(field);
-out_free:
- free_arg(arg);
- free_token(token);
- *tok = NULL;
-
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_flags(struct tep_event *event, struct tep_print_arg *arg, char **tok)
-{
- struct tep_print_arg *field;
- enum tep_event_type type;
- char *token = NULL;
-
- memset(arg, 0, sizeof(*arg));
- arg->type = TEP_PRINT_FLAGS;
-
- field = alloc_arg();
- if (!field) {
- do_warning_event(event, "%s: not enough memory!", __func__);
- goto out_free;
- }
-
- type = process_field_arg(event, field, &token);
-
- /* Handle operations in the first argument */
- while (type == TEP_EVENT_OP)
- type = process_op(event, field, &token);
-
- if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
- goto out_free_field;
- free_token(token);
-
- arg->flags.field = field;
-
- type = read_token_item(&token);
- if (event_item_type(type)) {
- arg->flags.delim = token;
- type = read_token_item(&token);
- }
-
- if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
- goto out_free;
-
- type = process_fields(event, &arg->flags.flags, &token);
- if (test_type_token(type, token, TEP_EVENT_DELIM, ")"))
- goto out_free;
-
- free_token(token);
- type = read_token_item(tok);
- return type;
-
-out_free_field:
- free_arg(field);
-out_free:
- free_token(token);
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_symbols(struct tep_event *event, struct tep_print_arg *arg, char **tok)
-{
- struct tep_print_arg *field;
- enum tep_event_type type;
- char *token = NULL;
-
- memset(arg, 0, sizeof(*arg));
- arg->type = TEP_PRINT_SYMBOL;
-
- field = alloc_arg();
- if (!field) {
- do_warning_event(event, "%s: not enough memory!", __func__);
- goto out_free;
- }
-
- type = process_field_arg(event, field, &token);
-
- if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
- goto out_free_field;
-
- arg->symbol.field = field;
-
- type = process_fields(event, &arg->symbol.symbols, &token);
- if (test_type_token(type, token, TEP_EVENT_DELIM, ")"))
- goto out_free;
-
- free_token(token);
- type = read_token_item(tok);
- return type;
-
-out_free_field:
- free_arg(field);
-out_free:
- free_token(token);
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_hex_common(struct tep_event *event, struct tep_print_arg *arg,
- char **tok, enum tep_print_arg_type type)
-{
- memset(arg, 0, sizeof(*arg));
- arg->type = type;
-
- if (alloc_and_process_delim(event, ",", &arg->hex.field))
- goto out;
-
- if (alloc_and_process_delim(event, ")", &arg->hex.size))
- goto free_field;
-
- return read_token_item(tok);
-
-free_field:
- free_arg(arg->hex.field);
- arg->hex.field = NULL;
-out:
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_hex(struct tep_event *event, struct tep_print_arg *arg, char **tok)
-{
- return process_hex_common(event, arg, tok, TEP_PRINT_HEX);
-}
-
-static enum tep_event_type
-process_hex_str(struct tep_event *event, struct tep_print_arg *arg,
- char **tok)
-{
- return process_hex_common(event, arg, tok, TEP_PRINT_HEX_STR);
-}
-
-static enum tep_event_type
-process_int_array(struct tep_event *event, struct tep_print_arg *arg, char **tok)
-{
- memset(arg, 0, sizeof(*arg));
- arg->type = TEP_PRINT_INT_ARRAY;
-
- if (alloc_and_process_delim(event, ",", &arg->int_array.field))
- goto out;
-
- if (alloc_and_process_delim(event, ",", &arg->int_array.count))
- goto free_field;
-
- if (alloc_and_process_delim(event, ")", &arg->int_array.el_size))
- goto free_size;
-
- return read_token_item(tok);
-
-free_size:
- free_arg(arg->int_array.count);
- arg->int_array.count = NULL;
-free_field:
- free_arg(arg->int_array.field);
- arg->int_array.field = NULL;
-out:
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_dynamic_array(struct tep_event *event, struct tep_print_arg *arg, char **tok)
-{
- struct tep_format_field *field;
- enum tep_event_type type;
- char *token;
-
- memset(arg, 0, sizeof(*arg));
- arg->type = TEP_PRINT_DYNAMIC_ARRAY;
-
- /*
- * The item within the parenthesis is another field that holds
- * the index into where the array starts.
- */
- type = read_token(&token);
- *tok = token;
- if (type != TEP_EVENT_ITEM)
- goto out_free;
-
- /* Find the field */
-
- field = tep_find_field(event, token);
- if (!field)
- goto out_free;
-
- arg->dynarray.field = field;
- arg->dynarray.index = 0;
-
- if (read_expected(TEP_EVENT_DELIM, ")") < 0)
- goto out_free;
-
- free_token(token);
- type = read_token_item(&token);
- *tok = token;
- if (type != TEP_EVENT_OP || strcmp(token, "[") != 0)
- return type;
-
- free_token(token);
- arg = alloc_arg();
- if (!arg) {
- do_warning_event(event, "%s: not enough memory!", __func__);
- *tok = NULL;
- return TEP_EVENT_ERROR;
- }
-
- type = process_arg(event, arg, &token);
- if (type == TEP_EVENT_ERROR)
- goto out_free_arg;
-
- if (!test_type_token(type, token, TEP_EVENT_OP, "]"))
- goto out_free_arg;
-
- free_token(token);
- type = read_token_item(tok);
- return type;
-
- out_free_arg:
- free_arg(arg);
- out_free:
- free_token(token);
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_dynamic_array_len(struct tep_event *event, struct tep_print_arg *arg,
- char **tok)
-{
- struct tep_format_field *field;
- enum tep_event_type type;
- char *token;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto out_free;
-
- arg->type = TEP_PRINT_DYNAMIC_ARRAY_LEN;
-
- /* Find the field */
- field = tep_find_field(event, token);
- if (!field)
- goto out_free;
-
- arg->dynarray.field = field;
- arg->dynarray.index = 0;
-
- if (read_expected(TEP_EVENT_DELIM, ")") < 0)
- goto out_err;
-
- type = read_token(&token);
- *tok = token;
-
- return type;
-
- out_free:
- free_token(token);
- out_err:
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_paren(struct tep_event *event, struct tep_print_arg *arg, char **tok)
-{
- struct tep_print_arg *item_arg;
- enum tep_event_type type;
- char *token;
-
- type = process_arg(event, arg, &token);
-
- if (type == TEP_EVENT_ERROR)
- goto out_free;
-
- if (type == TEP_EVENT_OP)
- type = process_op(event, arg, &token);
-
- if (type == TEP_EVENT_ERROR)
- goto out_free;
-
- if (test_type_token(type, token, TEP_EVENT_DELIM, ")"))
- goto out_free;
-
- free_token(token);
- type = read_token_item(&token);
-
- /*
- * If the next token is an item or another open paren, then
- * this was a typecast.
- */
- if (event_item_type(type) ||
- (type == TEP_EVENT_DELIM && strcmp(token, "(") == 0)) {
-
- /* make this a typecast and contine */
-
- /* prevous must be an atom */
- if (arg->type != TEP_PRINT_ATOM) {
- do_warning_event(event, "previous needed to be TEP_PRINT_ATOM");
- goto out_free;
- }
-
- item_arg = alloc_arg();
- if (!item_arg) {
- do_warning_event(event, "%s: not enough memory!",
- __func__);
- goto out_free;
- }
-
- arg->type = TEP_PRINT_TYPE;
- arg->typecast.type = arg->atom.atom;
- arg->typecast.item = item_arg;
- type = process_arg_token(event, item_arg, &token, type);
-
- }
-
- *tok = token;
- return type;
-
- out_free:
- free_token(token);
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-
-static enum tep_event_type
-process_str(struct tep_event *event __maybe_unused, struct tep_print_arg *arg,
- char **tok)
-{
- enum tep_event_type type;
- char *token;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto out_free;
-
- arg->type = TEP_PRINT_STRING;
- arg->string.string = token;
- arg->string.offset = -1;
-
- if (read_expected(TEP_EVENT_DELIM, ")") < 0)
- goto out_err;
-
- type = read_token(&token);
- *tok = token;
-
- return type;
-
- out_free:
- free_token(token);
- out_err:
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_bitmask(struct tep_event *event __maybe_unused, struct tep_print_arg *arg,
- char **tok)
-{
- enum tep_event_type type;
- char *token;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto out_free;
-
- arg->type = TEP_PRINT_BITMASK;
- arg->bitmask.bitmask = token;
- arg->bitmask.offset = -1;
-
- if (read_expected(TEP_EVENT_DELIM, ")") < 0)
- goto out_err;
-
- type = read_token(&token);
- *tok = token;
-
- return type;
-
- out_free:
- free_token(token);
- out_err:
- *tok = NULL;
- return TEP_EVENT_ERROR;
-}
-
-static struct tep_function_handler *
-find_func_handler(struct tep_handle *tep, char *func_name)
-{
- struct tep_function_handler *func;
-
- if (!tep)
- return NULL;
-
- for (func = tep->func_handlers; func; func = func->next) {
- if (strcmp(func->name, func_name) == 0)
- break;
- }
-
- return func;
-}
-
-static void remove_func_handler(struct tep_handle *tep, char *func_name)
-{
- struct tep_function_handler *func;
- struct tep_function_handler **next;
-
- next = &tep->func_handlers;
- while ((func = *next)) {
- if (strcmp(func->name, func_name) == 0) {
- *next = func->next;
- free_func_handle(func);
- break;
- }
- next = &func->next;
- }
-}
-
-static enum tep_event_type
-process_func_handler(struct tep_event *event, struct tep_function_handler *func,
- struct tep_print_arg *arg, char **tok)
-{
- struct tep_print_arg **next_arg;
- struct tep_print_arg *farg;
- enum tep_event_type type;
- char *token;
- int i;
-
- arg->type = TEP_PRINT_FUNC;
- arg->func.func = func;
-
- *tok = NULL;
-
- next_arg = &(arg->func.args);
- for (i = 0; i < func->nr_args; i++) {
- farg = alloc_arg();
- if (!farg) {
- do_warning_event(event, "%s: not enough memory!",
- __func__);
- return TEP_EVENT_ERROR;
- }
-
- type = process_arg(event, farg, &token);
- if (i < (func->nr_args - 1)) {
- if (type != TEP_EVENT_DELIM || strcmp(token, ",") != 0) {
- do_warning_event(event,
- "Error: function '%s()' expects %d arguments but event %s only uses %d",
- func->name, func->nr_args,
- event->name, i + 1);
- goto err;
- }
- } else {
- if (type != TEP_EVENT_DELIM || strcmp(token, ")") != 0) {
- do_warning_event(event,
- "Error: function '%s()' only expects %d arguments but event %s has more",
- func->name, func->nr_args, event->name);
- goto err;
- }
- }
-
- *next_arg = farg;
- next_arg = &(farg->next);
- free_token(token);
- }
-
- type = read_token(&token);
- *tok = token;
-
- return type;
-
-err:
- free_arg(farg);
- free_token(token);
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_function(struct tep_event *event, struct tep_print_arg *arg,
- char *token, char **tok)
-{
- struct tep_function_handler *func;
-
- if (strcmp(token, "__print_flags") == 0) {
- free_token(token);
- is_flag_field = 1;
- return process_flags(event, arg, tok);
- }
- if (strcmp(token, "__print_symbolic") == 0) {
- free_token(token);
- is_symbolic_field = 1;
- return process_symbols(event, arg, tok);
- }
- if (strcmp(token, "__print_hex") == 0) {
- free_token(token);
- return process_hex(event, arg, tok);
- }
- if (strcmp(token, "__print_hex_str") == 0) {
- free_token(token);
- return process_hex_str(event, arg, tok);
- }
- if (strcmp(token, "__print_array") == 0) {
- free_token(token);
- return process_int_array(event, arg, tok);
- }
- if (strcmp(token, "__get_str") == 0) {
- free_token(token);
- return process_str(event, arg, tok);
- }
- if (strcmp(token, "__get_bitmask") == 0) {
- free_token(token);
- return process_bitmask(event, arg, tok);
- }
- if (strcmp(token, "__get_dynamic_array") == 0) {
- free_token(token);
- return process_dynamic_array(event, arg, tok);
- }
- if (strcmp(token, "__get_dynamic_array_len") == 0) {
- free_token(token);
- return process_dynamic_array_len(event, arg, tok);
- }
-
- func = find_func_handler(event->tep, token);
- if (func) {
- free_token(token);
- return process_func_handler(event, func, arg, tok);
- }
-
- do_warning_event(event, "function %s not defined", token);
- free_token(token);
- return TEP_EVENT_ERROR;
-}
-
-static enum tep_event_type
-process_arg_token(struct tep_event *event, struct tep_print_arg *arg,
- char **tok, enum tep_event_type type)
-{
- char *token;
- char *atom;
-
- token = *tok;
-
- switch (type) {
- case TEP_EVENT_ITEM:
- if (strcmp(token, "REC") == 0) {
- free_token(token);
- type = process_entry(event, arg, &token);
- break;
- }
- atom = token;
- /* test the next token */
- type = read_token_item(&token);
-
- /*
- * If the next token is a parenthesis, then this
- * is a function.
- */
- if (type == TEP_EVENT_DELIM && strcmp(token, "(") == 0) {
- free_token(token);
- token = NULL;
- /* this will free atom. */
- type = process_function(event, arg, atom, &token);
- break;
- }
- /* atoms can be more than one token long */
- while (type == TEP_EVENT_ITEM) {
- char *new_atom;
- new_atom = realloc(atom,
- strlen(atom) + strlen(token) + 2);
- if (!new_atom) {
- free(atom);
- *tok = NULL;
- free_token(token);
- return TEP_EVENT_ERROR;
- }
- atom = new_atom;
- strcat(atom, " ");
- strcat(atom, token);
- free_token(token);
- type = read_token_item(&token);
- }
-
- arg->type = TEP_PRINT_ATOM;
- arg->atom.atom = atom;
- break;
-
- case TEP_EVENT_DQUOTE:
- case TEP_EVENT_SQUOTE:
- arg->type = TEP_PRINT_ATOM;
- arg->atom.atom = token;
- type = read_token_item(&token);
- break;
- case TEP_EVENT_DELIM:
- if (strcmp(token, "(") == 0) {
- free_token(token);
- type = process_paren(event, arg, &token);
- break;
- }
- case TEP_EVENT_OP:
- /* handle single ops */
- arg->type = TEP_PRINT_OP;
- arg->op.op = token;
- arg->op.left = NULL;
- type = process_op(event, arg, &token);
-
- /* On error, the op is freed */
- if (type == TEP_EVENT_ERROR)
- arg->op.op = NULL;
-
- /* return error type if errored */
- break;
-
- case TEP_EVENT_ERROR ... TEP_EVENT_NEWLINE:
- default:
- do_warning_event(event, "unexpected type %d", type);
- return TEP_EVENT_ERROR;
- }
- *tok = token;
-
- return type;
-}
-
-static int event_read_print_args(struct tep_event *event, struct tep_print_arg **list)
-{
- enum tep_event_type type = TEP_EVENT_ERROR;
- struct tep_print_arg *arg;
- char *token;
- int args = 0;
-
- do {
- if (type == TEP_EVENT_NEWLINE) {
- type = read_token_item(&token);
- continue;
- }
-
- arg = alloc_arg();
- if (!arg) {
- do_warning_event(event, "%s: not enough memory!",
- __func__);
- return -1;
- }
-
- type = process_arg(event, arg, &token);
-
- if (type == TEP_EVENT_ERROR) {
- free_token(token);
- free_arg(arg);
- return -1;
- }
-
- *list = arg;
- args++;
-
- if (type == TEP_EVENT_OP) {
- type = process_op(event, arg, &token);
- free_token(token);
- if (type == TEP_EVENT_ERROR) {
- *list = NULL;
- free_arg(arg);
- return -1;
- }
- list = &arg->next;
- continue;
- }
-
- if (type == TEP_EVENT_DELIM && strcmp(token, ",") == 0) {
- free_token(token);
- *list = arg;
- list = &arg->next;
- continue;
- }
- break;
- } while (type != TEP_EVENT_NONE);
-
- if (type != TEP_EVENT_NONE && type != TEP_EVENT_ERROR)
- free_token(token);
-
- return args;
-}
-
-static int event_read_print(struct tep_event *event)
-{
- enum tep_event_type type;
- char *token;
- int ret;
-
- if (read_expected_item(TEP_EVENT_ITEM, "print") < 0)
- return -1;
-
- if (read_expected(TEP_EVENT_ITEM, "fmt") < 0)
- return -1;
-
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- return -1;
-
- if (read_expect_type(TEP_EVENT_DQUOTE, &token) < 0)
- goto fail;
-
- concat:
- event->print_fmt.format = token;
- event->print_fmt.args = NULL;
-
- /* ok to have no arg */
- type = read_token_item(&token);
-
- if (type == TEP_EVENT_NONE)
- return 0;
-
- /* Handle concatenation of print lines */
- if (type == TEP_EVENT_DQUOTE) {
- char *cat;
-
- if (asprintf(&cat, "%s%s", event->print_fmt.format, token) < 0)
- goto fail;
- free_token(token);
- free_token(event->print_fmt.format);
- event->print_fmt.format = NULL;
- token = cat;
- goto concat;
- }
-
- if (test_type_token(type, token, TEP_EVENT_DELIM, ","))
- goto fail;
-
- free_token(token);
-
- ret = event_read_print_args(event, &event->print_fmt.args);
- if (ret < 0)
- return -1;
-
- return ret;
-
- fail:
- free_token(token);
- return -1;
-}
-
-/**
- * tep_find_common_field - return a common field by event
- * @event: handle for the event
- * @name: the name of the common field to return
- *
- * Returns a common field from the event by the given @name.
- * This only searches the common fields and not all field.
- */
-struct tep_format_field *
-tep_find_common_field(struct tep_event *event, const char *name)
-{
- struct tep_format_field *format;
-
- for (format = event->format.common_fields;
- format; format = format->next) {
- if (strcmp(format->name, name) == 0)
- break;
- }
-
- return format;
-}
-
-/**
- * tep_find_field - find a non-common field
- * @event: handle for the event
- * @name: the name of the non-common field
- *
- * Returns a non-common field by the given @name.
- * This does not search common fields.
- */
-struct tep_format_field *
-tep_find_field(struct tep_event *event, const char *name)
-{
- struct tep_format_field *format;
-
- for (format = event->format.fields;
- format; format = format->next) {
- if (strcmp(format->name, name) == 0)
- break;
- }
-
- return format;
-}
-
-/**
- * tep_find_any_field - find any field by name
- * @event: handle for the event
- * @name: the name of the field
- *
- * Returns a field by the given @name.
- * This searches the common field names first, then
- * the non-common ones if a common one was not found.
- */
-struct tep_format_field *
-tep_find_any_field(struct tep_event *event, const char *name)
-{
- struct tep_format_field *format;
-
- format = tep_find_common_field(event, name);
- if (format)
- return format;
- return tep_find_field(event, name);
-}
-
-/**
- * tep_read_number - read a number from data
- * @tep: a handle to the trace event parser context
- * @ptr: the raw data
- * @size: the size of the data that holds the number
- *
- * Returns the number (converted to host) from the
- * raw data.
- */
-unsigned long long tep_read_number(struct tep_handle *tep,
- const void *ptr, int size)
-{
- unsigned long long val;
-
- switch (size) {
- case 1:
- return *(unsigned char *)ptr;
- case 2:
- return tep_data2host2(tep, *(unsigned short *)ptr);
- case 4:
- return tep_data2host4(tep, *(unsigned int *)ptr);
- case 8:
- memcpy(&val, (ptr), sizeof(unsigned long long));
- return tep_data2host8(tep, val);
- default:
- /* BUG! */
- return 0;
- }
-}
-
-/**
- * tep_read_number_field - read a number from data
- * @field: a handle to the field
- * @data: the raw data to read
- * @value: the value to place the number in
- *
- * Reads raw data according to a field offset and size,
- * and translates it into @value.
- *
- * Returns 0 on success, -1 otherwise.
- */
-int tep_read_number_field(struct tep_format_field *field, const void *data,
- unsigned long long *value)
-{
- if (!field)
- return -1;
- switch (field->size) {
- case 1:
- case 2:
- case 4:
- case 8:
- *value = tep_read_number(field->event->tep,
- data + field->offset, field->size);
- return 0;
- default:
- return -1;
- }
-}
-
-static int get_common_info(struct tep_handle *tep,
- const char *type, int *offset, int *size)
-{
- struct tep_event *event;
- struct tep_format_field *field;
-
- /*
- * All events should have the same common elements.
- * Pick any event to find where the type is;
- */
- if (!tep->events) {
- do_warning("no event_list!");
- return -1;
- }
-
- event = tep->events[0];
- field = tep_find_common_field(event, type);
- if (!field)
- return -1;
-
- *offset = field->offset;
- *size = field->size;
-
- return 0;
-}
-
-static int __parse_common(struct tep_handle *tep, void *data,
- int *size, int *offset, const char *name)
-{
- int ret;
-
- if (!*size) {
- ret = get_common_info(tep, name, offset, size);
- if (ret < 0)
- return ret;
- }
- return tep_read_number(tep, data + *offset, *size);
-}
-
-static int trace_parse_common_type(struct tep_handle *tep, void *data)
-{
- return __parse_common(tep, data,
- &tep->type_size, &tep->type_offset,
- "common_type");
-}
-
-static int parse_common_pid(struct tep_handle *tep, void *data)
-{
- return __parse_common(tep, data,
- &tep->pid_size, &tep->pid_offset,
- "common_pid");
-}
-
-static int parse_common_pc(struct tep_handle *tep, void *data)
-{
- return __parse_common(tep, data,
- &tep->pc_size, &tep->pc_offset,
- "common_preempt_count");
-}
-
-static int parse_common_flags(struct tep_handle *tep, void *data)
-{
- return __parse_common(tep, data,
- &tep->flags_size, &tep->flags_offset,
- "common_flags");
-}
-
-static int parse_common_lock_depth(struct tep_handle *tep, void *data)
-{
- return __parse_common(tep, data,
- &tep->ld_size, &tep->ld_offset,
- "common_lock_depth");
-}
-
-static int parse_common_migrate_disable(struct tep_handle *tep, void *data)
-{
- return __parse_common(tep, data,
- &tep->ld_size, &tep->ld_offset,
- "common_migrate_disable");
-}
-
-static int events_id_cmp(const void *a, const void *b);
-
-/**
- * tep_find_event - find an event by given id
- * @tep: a handle to the trace event parser context
- * @id: the id of the event
- *
- * Returns an event that has a given @id.
- */
-struct tep_event *tep_find_event(struct tep_handle *tep, int id)
-{
- struct tep_event **eventptr;
- struct tep_event key;
- struct tep_event *pkey = &key;
-
- /* Check cache first */
- if (tep->last_event && tep->last_event->id == id)
- return tep->last_event;
-
- key.id = id;
-
- eventptr = bsearch(&pkey, tep->events, tep->nr_events,
- sizeof(*tep->events), events_id_cmp);
-
- if (eventptr) {
- tep->last_event = *eventptr;
- return *eventptr;
- }
-
- return NULL;
-}
-
-/**
- * tep_find_event_by_name - find an event by given name
- * @tep: a handle to the trace event parser context
- * @sys: the system name to search for
- * @name: the name of the event to search for
- *
- * This returns an event with a given @name and under the system
- * @sys. If @sys is NULL the first event with @name is returned.
- */
-struct tep_event *
-tep_find_event_by_name(struct tep_handle *tep,
- const char *sys, const char *name)
-{
- struct tep_event *event = NULL;
- int i;
-
- if (tep->last_event &&
- strcmp(tep->last_event->name, name) == 0 &&
- (!sys || strcmp(tep->last_event->system, sys) == 0))
- return tep->last_event;
-
- for (i = 0; i < tep->nr_events; i++) {
- event = tep->events[i];
- if (strcmp(event->name, name) == 0) {
- if (!sys)
- break;
- if (strcmp(event->system, sys) == 0)
- break;
- }
- }
- if (i == tep->nr_events)
- event = NULL;
-
- tep->last_event = event;
- return event;
-}
-
-static unsigned long long
-eval_num_arg(void *data, int size, struct tep_event *event, struct tep_print_arg *arg)
-{
- struct tep_handle *tep = event->tep;
- unsigned long long val = 0;
- unsigned long long left, right;
- struct tep_print_arg *typearg = NULL;
- struct tep_print_arg *larg;
- unsigned long offset;
- unsigned int field_size;
-
- switch (arg->type) {
- case TEP_PRINT_NULL:
- /* ?? */
- return 0;
- case TEP_PRINT_ATOM:
- return strtoull(arg->atom.atom, NULL, 0);
- case TEP_PRINT_FIELD:
- if (!arg->field.field) {
- arg->field.field = tep_find_any_field(event, arg->field.name);
- if (!arg->field.field)
- goto out_warning_field;
-
- }
- /* must be a number */
- val = tep_read_number(tep, data + arg->field.field->offset,
- arg->field.field->size);
- break;
- case TEP_PRINT_FLAGS:
- case TEP_PRINT_SYMBOL:
- case TEP_PRINT_INT_ARRAY:
- case TEP_PRINT_HEX:
- case TEP_PRINT_HEX_STR:
- break;
- case TEP_PRINT_TYPE:
- val = eval_num_arg(data, size, event, arg->typecast.item);
- return eval_type(val, arg, 0);
- case TEP_PRINT_STRING:
- case TEP_PRINT_BSTRING:
- case TEP_PRINT_BITMASK:
- return 0;
- case TEP_PRINT_FUNC: {
- struct trace_seq s;
- trace_seq_init(&s);
- val = process_defined_func(&s, data, size, event, arg);
- trace_seq_destroy(&s);
- return val;
- }
- case TEP_PRINT_OP:
- if (strcmp(arg->op.op, "[") == 0) {
- /*
- * Arrays are special, since we don't want
- * to read the arg as is.
- */
- right = eval_num_arg(data, size, event, arg->op.right);
-
- /* handle typecasts */
- larg = arg->op.left;
- while (larg->type == TEP_PRINT_TYPE) {
- if (!typearg)
- typearg = larg;
- larg = larg->typecast.item;
- }
-
- /* Default to long size */
- field_size = tep->long_size;
-
- switch (larg->type) {
- case TEP_PRINT_DYNAMIC_ARRAY:
- offset = tep_read_number(tep,
- data + larg->dynarray.field->offset,
- larg->dynarray.field->size);
- if (larg->dynarray.field->elementsize)
- field_size = larg->dynarray.field->elementsize;
- /*
- * The actual length of the dynamic array is stored
- * in the top half of the field, and the offset
- * is in the bottom half of the 32 bit field.
- */
- offset &= 0xffff;
- offset += right;
- break;
- case TEP_PRINT_FIELD:
- if (!larg->field.field) {
- larg->field.field =
- tep_find_any_field(event, larg->field.name);
- if (!larg->field.field) {
- arg = larg;
- goto out_warning_field;
- }
- }
- field_size = larg->field.field->elementsize;
- offset = larg->field.field->offset +
- right * larg->field.field->elementsize;
- break;
- default:
- goto default_op; /* oops, all bets off */
- }
- val = tep_read_number(tep,
- data + offset, field_size);
- if (typearg)
- val = eval_type(val, typearg, 1);
- break;
- } else if (strcmp(arg->op.op, "?") == 0) {
- left = eval_num_arg(data, size, event, arg->op.left);
- arg = arg->op.right;
- if (left)
- val = eval_num_arg(data, size, event, arg->op.left);
- else
- val = eval_num_arg(data, size, event, arg->op.right);
- break;
- }
- default_op:
- left = eval_num_arg(data, size, event, arg->op.left);
- right = eval_num_arg(data, size, event, arg->op.right);
- switch (arg->op.op[0]) {
- case '!':
- switch (arg->op.op[1]) {
- case 0:
- val = !right;
- break;
- case '=':
- val = left != right;
- break;
- default:
- goto out_warning_op;
- }
- break;
- case '~':
- val = ~right;
- break;
- case '|':
- if (arg->op.op[1])
- val = left || right;
- else
- val = left | right;
- break;
- case '&':
- if (arg->op.op[1])
- val = left && right;
- else
- val = left & right;
- break;
- case '<':
- switch (arg->op.op[1]) {
- case 0:
- val = left < right;
- break;
- case '<':
- val = left << right;
- break;
- case '=':
- val = left <= right;
- break;
- default:
- goto out_warning_op;
- }
- break;
- case '>':
- switch (arg->op.op[1]) {
- case 0:
- val = left > right;
- break;
- case '>':
- val = left >> right;
- break;
- case '=':
- val = left >= right;
- break;
- default:
- goto out_warning_op;
- }
- break;
- case '=':
- if (arg->op.op[1] != '=')
- goto out_warning_op;
-
- val = left == right;
- break;
- case '-':
- val = left - right;
- break;
- case '+':
- val = left + right;
- break;
- case '/':
- val = left / right;
- break;
- case '%':
- val = left % right;
- break;
- case '*':
- val = left * right;
- break;
- default:
- goto out_warning_op;
- }
- break;
- case TEP_PRINT_DYNAMIC_ARRAY_LEN:
- offset = tep_read_number(tep,
- data + arg->dynarray.field->offset,
- arg->dynarray.field->size);
- /*
- * The total allocated length of the dynamic array is
- * stored in the top half of the field, and the offset
- * is in the bottom half of the 32 bit field.
- */
- val = (unsigned long long)(offset >> 16);
- break;
- case TEP_PRINT_DYNAMIC_ARRAY:
- /* Without [], we pass the address to the dynamic data */
- offset = tep_read_number(tep,
- data + arg->dynarray.field->offset,
- arg->dynarray.field->size);
- /*
- * The total allocated length of the dynamic array is
- * stored in the top half of the field, and the offset
- * is in the bottom half of the 32 bit field.
- */
- offset &= 0xffff;
- val = (unsigned long long)((unsigned long)data + offset);
- break;
- default: /* not sure what to do there */
- return 0;
- }
- return val;
-
-out_warning_op:
- do_warning_event(event, "%s: unknown op '%s'", __func__, arg->op.op);
- return 0;
-
-out_warning_field:
- do_warning_event(event, "%s: field %s not found",
- __func__, arg->field.name);
- return 0;
-}
-
-struct flag {
- const char *name;
- unsigned long long value;
-};
-
-static const struct flag flags[] = {
- { "HI_SOFTIRQ", 0 },
- { "TIMER_SOFTIRQ", 1 },
- { "NET_TX_SOFTIRQ", 2 },
- { "NET_RX_SOFTIRQ", 3 },
- { "BLOCK_SOFTIRQ", 4 },
- { "IRQ_POLL_SOFTIRQ", 5 },
- { "TASKLET_SOFTIRQ", 6 },
- { "SCHED_SOFTIRQ", 7 },
- { "HRTIMER_SOFTIRQ", 8 },
- { "RCU_SOFTIRQ", 9 },
-
- { "HRTIMER_NORESTART", 0 },
- { "HRTIMER_RESTART", 1 },
-};
-
-static long long eval_flag(const char *flag)
-{
- int i;
-
- /*
- * Some flags in the format files do not get converted.
- * If the flag is not numeric, see if it is something that
- * we already know about.
- */
- if (isdigit(flag[0]))
- return strtoull(flag, NULL, 0);
-
- for (i = 0; i < (int)(sizeof(flags)/sizeof(flags[0])); i++)
- if (strcmp(flags[i].name, flag) == 0)
- return flags[i].value;
-
- return -1LL;
-}
-
-static void print_str_to_seq(struct trace_seq *s, const char *format,
- int len_arg, const char *str)
-{
- if (len_arg >= 0)
- trace_seq_printf(s, format, len_arg, str);
- else
- trace_seq_printf(s, format, str);
-}
-
-static void print_bitmask_to_seq(struct tep_handle *tep,
- struct trace_seq *s, const char *format,
- int len_arg, const void *data, int size)
-{
- int nr_bits = size * 8;
- int str_size = (nr_bits + 3) / 4;
- int len = 0;
- char buf[3];
- char *str;
- int index;
- int i;
-
- /*
- * The kernel likes to put in commas every 32 bits, we
- * can do the same.
- */
- str_size += (nr_bits - 1) / 32;
-
- str = malloc(str_size + 1);
- if (!str) {
- do_warning("%s: not enough memory!", __func__);
- return;
- }
- str[str_size] = 0;
-
- /* Start out with -2 for the two chars per byte */
- for (i = str_size - 2; i >= 0; i -= 2) {
- /*
- * data points to a bit mask of size bytes.
- * In the kernel, this is an array of long words, thus
- * endianness is very important.
- */
- if (tep->file_bigendian)
- index = size - (len + 1);
- else
- index = len;
-
- snprintf(buf, 3, "%02x", *((unsigned char *)data + index));
- memcpy(str + i, buf, 2);
- len++;
- if (!(len & 3) && i > 0) {
- i--;
- str[i] = ',';
- }
- }
-
- if (len_arg >= 0)
- trace_seq_printf(s, format, len_arg, str);
- else
- trace_seq_printf(s, format, str);
-
- free(str);
-}
-
-static void print_str_arg(struct trace_seq *s, void *data, int size,
- struct tep_event *event, const char *format,
- int len_arg, struct tep_print_arg *arg)
-{
- struct tep_handle *tep = event->tep;
- struct tep_print_flag_sym *flag;
- struct tep_format_field *field;
- struct printk_map *printk;
- long long val, fval;
- unsigned long long addr;
- char *str;
- unsigned char *hex;
- int print;
- int i, len;
-
- switch (arg->type) {
- case TEP_PRINT_NULL:
- /* ?? */
- return;
- case TEP_PRINT_ATOM:
- print_str_to_seq(s, format, len_arg, arg->atom.atom);
- return;
- case TEP_PRINT_FIELD:
- field = arg->field.field;
- if (!field) {
- field = tep_find_any_field(event, arg->field.name);
- if (!field) {
- str = arg->field.name;
- goto out_warning_field;
- }
- arg->field.field = field;
- }
- /* Zero sized fields, mean the rest of the data */
- len = field->size ? : size - field->offset;
-
- /*
- * Some events pass in pointers. If this is not an array
- * and the size is the same as long_size, assume that it
- * is a pointer.
- */
- if (!(field->flags & TEP_FIELD_IS_ARRAY) &&
- field->size == tep->long_size) {
-
- /* Handle heterogeneous recording and processing
- * architectures
- *
- * CASE I:
- * Traces recorded on 32-bit devices (32-bit
- * addressing) and processed on 64-bit devices:
- * In this case, only 32 bits should be read.
- *
- * CASE II:
- * Traces recorded on 64 bit devices and processed
- * on 32-bit devices:
- * In this case, 64 bits must be read.
- */
- addr = (tep->long_size == 8) ?
- *(unsigned long long *)(data + field->offset) :
- (unsigned long long)*(unsigned int *)(data + field->offset);
-
- /* Check if it matches a print format */
- printk = find_printk(tep, addr);
- if (printk)
- trace_seq_puts(s, printk->printk);
- else
- trace_seq_printf(s, "%llx", addr);
- break;
- }
- str = malloc(len + 1);
- if (!str) {
- do_warning_event(event, "%s: not enough memory!",
- __func__);
- return;
- }
- memcpy(str, data + field->offset, len);
- str[len] = 0;
- print_str_to_seq(s, format, len_arg, str);
- free(str);
- break;
- case TEP_PRINT_FLAGS:
- val = eval_num_arg(data, size, event, arg->flags.field);
- print = 0;
- for (flag = arg->flags.flags; flag; flag = flag->next) {
- fval = eval_flag(flag->value);
- if (!val && fval < 0) {
- print_str_to_seq(s, format, len_arg, flag->str);
- break;
- }
- if (fval > 0 && (val & fval) == fval) {
- if (print && arg->flags.delim)
- trace_seq_puts(s, arg->flags.delim);
- print_str_to_seq(s, format, len_arg, flag->str);
- print = 1;
- val &= ~fval;
- }
- }
- if (val) {
- if (print && arg->flags.delim)
- trace_seq_puts(s, arg->flags.delim);
- trace_seq_printf(s, "0x%llx", val);
- }
- break;
- case TEP_PRINT_SYMBOL:
- val = eval_num_arg(data, size, event, arg->symbol.field);
- for (flag = arg->symbol.symbols; flag; flag = flag->next) {
- fval = eval_flag(flag->value);
- if (val == fval) {
- print_str_to_seq(s, format, len_arg, flag->str);
- break;
- }
- }
- if (!flag)
- trace_seq_printf(s, "0x%llx", val);
- break;
- case TEP_PRINT_HEX:
- case TEP_PRINT_HEX_STR:
- if (arg->hex.field->type == TEP_PRINT_DYNAMIC_ARRAY) {
- unsigned long offset;
- offset = tep_read_number(tep,
- data + arg->hex.field->dynarray.field->offset,
- arg->hex.field->dynarray.field->size);
- hex = data + (offset & 0xffff);
- } else {
- field = arg->hex.field->field.field;
- if (!field) {
- str = arg->hex.field->field.name;
- field = tep_find_any_field(event, str);
- if (!field)
- goto out_warning_field;
- arg->hex.field->field.field = field;
- }
- hex = data + field->offset;
- }
- len = eval_num_arg(data, size, event, arg->hex.size);
- for (i = 0; i < len; i++) {
- if (i && arg->type == TEP_PRINT_HEX)
- trace_seq_putc(s, ' ');
- trace_seq_printf(s, "%02x", hex[i]);
- }
- break;
-
- case TEP_PRINT_INT_ARRAY: {
- void *num;
- int el_size;
-
- if (arg->int_array.field->type == TEP_PRINT_DYNAMIC_ARRAY) {
- unsigned long offset;
- struct tep_format_field *field =
- arg->int_array.field->dynarray.field;
- offset = tep_read_number(tep,
- data + field->offset,
- field->size);
- num = data + (offset & 0xffff);
- } else {
- field = arg->int_array.field->field.field;
- if (!field) {
- str = arg->int_array.field->field.name;
- field = tep_find_any_field(event, str);
- if (!field)
- goto out_warning_field;
- arg->int_array.field->field.field = field;
- }
- num = data + field->offset;
- }
- len = eval_num_arg(data, size, event, arg->int_array.count);
- el_size = eval_num_arg(data, size, event,
- arg->int_array.el_size);
- for (i = 0; i < len; i++) {
- if (i)
- trace_seq_putc(s, ' ');
-
- if (el_size == 1) {
- trace_seq_printf(s, "%u", *(uint8_t *)num);
- } else if (el_size == 2) {
- trace_seq_printf(s, "%u", *(uint16_t *)num);
- } else if (el_size == 4) {
- trace_seq_printf(s, "%u", *(uint32_t *)num);
- } else if (el_size == 8) {
- trace_seq_printf(s, "%"PRIu64, *(uint64_t *)num);
- } else {
- trace_seq_printf(s, "BAD SIZE:%d 0x%x",
- el_size, *(uint8_t *)num);
- el_size = 1;
- }
-
- num += el_size;
- }
- break;
- }
- case TEP_PRINT_TYPE:
- break;
- case TEP_PRINT_STRING: {
- int str_offset;
-
- if (arg->string.offset == -1) {
- struct tep_format_field *f;
-
- f = tep_find_any_field(event, arg->string.string);
- arg->string.offset = f->offset;
- }
- str_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->string.offset));
- str_offset &= 0xffff;
- print_str_to_seq(s, format, len_arg, ((char *)data) + str_offset);
- break;
- }
- case TEP_PRINT_BSTRING:
- print_str_to_seq(s, format, len_arg, arg->string.string);
- break;
- case TEP_PRINT_BITMASK: {
- int bitmask_offset;
- int bitmask_size;
-
- if (arg->bitmask.offset == -1) {
- struct tep_format_field *f;
-
- f = tep_find_any_field(event, arg->bitmask.bitmask);
- arg->bitmask.offset = f->offset;
- }
- bitmask_offset = tep_data2host4(tep, *(unsigned int *)(data + arg->bitmask.offset));
- bitmask_size = bitmask_offset >> 16;
- bitmask_offset &= 0xffff;
- print_bitmask_to_seq(tep, s, format, len_arg,
- data + bitmask_offset, bitmask_size);
- break;
- }
- case TEP_PRINT_OP:
- /*
- * The only op for string should be ? :
- */
- if (arg->op.op[0] != '?')
- return;
- val = eval_num_arg(data, size, event, arg->op.left);
- if (val)
- print_str_arg(s, data, size, event,
- format, len_arg, arg->op.right->op.left);
- else
- print_str_arg(s, data, size, event,
- format, len_arg, arg->op.right->op.right);
- break;
- case TEP_PRINT_FUNC:
- process_defined_func(s, data, size, event, arg);
- break;
- default:
- /* well... */
- break;
- }
-
- return;
-
-out_warning_field:
- do_warning_event(event, "%s: field %s not found",
- __func__, arg->field.name);
-}
-
-static unsigned long long
-process_defined_func(struct trace_seq *s, void *data, int size,
- struct tep_event *event, struct tep_print_arg *arg)
-{
- struct tep_function_handler *func_handle = arg->func.func;
- struct func_params *param;
- unsigned long long *args;
- unsigned long long ret;
- struct tep_print_arg *farg;
- struct trace_seq str;
- struct save_str {
- struct save_str *next;
- char *str;
- } *strings = NULL, *string;
- int i;
-
- if (!func_handle->nr_args) {
- ret = (*func_handle->func)(s, NULL);
- goto out;
- }
-
- farg = arg->func.args;
- param = func_handle->params;
-
- ret = ULLONG_MAX;
- args = malloc(sizeof(*args) * func_handle->nr_args);
- if (!args)
- goto out;
-
- for (i = 0; i < func_handle->nr_args; i++) {
- switch (param->type) {
- case TEP_FUNC_ARG_INT:
- case TEP_FUNC_ARG_LONG:
- case TEP_FUNC_ARG_PTR:
- args[i] = eval_num_arg(data, size, event, farg);
- break;
- case TEP_FUNC_ARG_STRING:
- trace_seq_init(&str);
- print_str_arg(&str, data, size, event, "%s", -1, farg);
- trace_seq_terminate(&str);
- string = malloc(sizeof(*string));
- if (!string) {
- do_warning_event(event, "%s(%d): malloc str",
- __func__, __LINE__);
- goto out_free;
- }
- string->next = strings;
- string->str = strdup(str.buffer);
- if (!string->str) {
- free(string);
- do_warning_event(event, "%s(%d): malloc str",
- __func__, __LINE__);
- goto out_free;
- }
- args[i] = (uintptr_t)string->str;
- strings = string;
- trace_seq_destroy(&str);
- break;
- default:
- /*
- * Something went totally wrong, this is not
- * an input error, something in this code broke.
- */
- do_warning_event(event, "Unexpected end of arguments\n");
- goto out_free;
- }
- farg = farg->next;
- param = param->next;
- }
-
- ret = (*func_handle->func)(s, args);
-out_free:
- free(args);
- while (strings) {
- string = strings;
- strings = string->next;
- free(string->str);
- free(string);
- }
-
- out:
- /* TBD : handle return type here */
- return ret;
-}
-
-static void free_args(struct tep_print_arg *args)
-{
- struct tep_print_arg *next;
-
- while (args) {
- next = args->next;
-
- free_arg(args);
- args = next;
- }
-}
-
-static struct tep_print_arg *make_bprint_args(char *fmt, void *data, int size, struct tep_event *event)
-{
- struct tep_handle *tep = event->tep;
- struct tep_format_field *field, *ip_field;
- struct tep_print_arg *args, *arg, **next;
- unsigned long long ip, val;
- char *ptr;
- void *bptr;
- int vsize = 0;
-
- field = tep->bprint_buf_field;
- ip_field = tep->bprint_ip_field;
-
- if (!field) {
- field = tep_find_field(event, "buf");
- if (!field) {
- do_warning_event(event, "can't find buffer field for binary printk");
- return NULL;
- }
- ip_field = tep_find_field(event, "ip");
- if (!ip_field) {
- do_warning_event(event, "can't find ip field for binary printk");
- return NULL;
- }
- tep->bprint_buf_field = field;
- tep->bprint_ip_field = ip_field;
- }
-
- ip = tep_read_number(tep, data + ip_field->offset, ip_field->size);
-
- /*
- * The first arg is the IP pointer.
- */
- args = alloc_arg();
- if (!args) {
- do_warning_event(event, "%s(%d): not enough memory!",
- __func__, __LINE__);
- return NULL;
- }
- arg = args;
- arg->next = NULL;
- next = &arg->next;
-
- arg->type = TEP_PRINT_ATOM;
-
- if (asprintf(&arg->atom.atom, "%lld", ip) < 0)
- goto out_free;
-
- /* skip the first "%ps: " */
- for (ptr = fmt + 5, bptr = data + field->offset;
- bptr < data + size && *ptr; ptr++) {
- int ls = 0;
-
- if (*ptr == '%') {
- process_again:
- ptr++;
- switch (*ptr) {
- case '%':
- break;
- case 'l':
- ls++;
- goto process_again;
- case 'L':
- ls = 2;
- goto process_again;
- case '0' ... '9':
- goto process_again;
- case '.':
- goto process_again;
- case 'z':
- case 'Z':
- ls = 1;
- goto process_again;
- case 'p':
- ls = 1;
- if (isalnum(ptr[1])) {
- ptr++;
- /* Check for special pointers */
- switch (*ptr) {
- case 's':
- case 'S':
- case 'x':
- break;
- case 'f':
- case 'F':
- /*
- * Pre-5.5 kernels use %pf and
- * %pF for printing symbols
- * while kernels since 5.5 use
- * %pfw for fwnodes. So check
- * %p[fF] isn't followed by 'w'.
- */
- if (ptr[1] != 'w')
- break;
- /* fall through */
- default:
- /*
- * Older kernels do not process
- * dereferenced pointers.
- * Only process if the pointer
- * value is a printable.
- */
- if (isprint(*(char *)bptr))
- goto process_string;
- }
- }
- /* fall through */
- case 'd':
- case 'u':
- case 'i':
- case 'x':
- case 'X':
- case 'o':
- switch (ls) {
- case 0:
- vsize = 4;
- break;
- case 1:
- vsize = tep->long_size;
- break;
- case 2:
- vsize = 8;
- break;
- default:
- vsize = ls; /* ? */
- break;
- }
- /* fall through */
- case '*':
- if (*ptr == '*')
- vsize = 4;
-
- /* the pointers are always 4 bytes aligned */
- bptr = (void *)(((unsigned long)bptr + 3) &
- ~3);
- val = tep_read_number(tep, bptr, vsize);
- bptr += vsize;
- arg = alloc_arg();
- if (!arg) {
- do_warning_event(event, "%s(%d): not enough memory!",
- __func__, __LINE__);
- goto out_free;
- }
- arg->next = NULL;
- arg->type = TEP_PRINT_ATOM;
- if (asprintf(&arg->atom.atom, "%lld", val) < 0) {
- free(arg);
- goto out_free;
- }
- *next = arg;
- next = &arg->next;
- /*
- * The '*' case means that an arg is used as the length.
- * We need to continue to figure out for what.
- */
- if (*ptr == '*')
- goto process_again;
-
- break;
- case 's':
- process_string:
- arg = alloc_arg();
- if (!arg) {
- do_warning_event(event, "%s(%d): not enough memory!",
- __func__, __LINE__);
- goto out_free;
- }
- arg->next = NULL;
- arg->type = TEP_PRINT_BSTRING;
- arg->string.string = strdup(bptr);
- if (!arg->string.string)
- goto out_free;
- bptr += strlen(bptr) + 1;
- *next = arg;
- next = &arg->next;
- default:
- break;
- }
- }
- }
-
- return args;
-
-out_free:
- free_args(args);
- return NULL;
-}
-
-static char *
-get_bprint_format(void *data, int size __maybe_unused,
- struct tep_event *event)
-{
- struct tep_handle *tep = event->tep;
- unsigned long long addr;
- struct tep_format_field *field;
- struct printk_map *printk;
- char *format;
-
- field = tep->bprint_fmt_field;
-
- if (!field) {
- field = tep_find_field(event, "fmt");
- if (!field) {
- do_warning_event(event, "can't find format field for binary printk");
- return NULL;
- }
- tep->bprint_fmt_field = field;
- }
-
- addr = tep_read_number(tep, data + field->offset, field->size);
-
- printk = find_printk(tep, addr);
- if (!printk) {
- if (asprintf(&format, "%%ps: (NO FORMAT FOUND at %llx)\n", addr) < 0)
- return NULL;
- return format;
- }
-
- if (asprintf(&format, "%s: %s", "%ps", printk->printk) < 0)
- return NULL;
-
- return format;
-}
-
-static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size,
- struct tep_event *event, struct tep_print_arg *arg)
-{
- unsigned char *buf;
- const char *fmt = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x";
-
- if (arg->type == TEP_PRINT_FUNC) {
- process_defined_func(s, data, size, event, arg);
- return;
- }
-
- if (arg->type != TEP_PRINT_FIELD) {
- trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d",
- arg->type);
- return;
- }
-
- if (mac == 'm')
- fmt = "%.2x%.2x%.2x%.2x%.2x%.2x";
- if (!arg->field.field) {
- arg->field.field =
- tep_find_any_field(event, arg->field.name);
- if (!arg->field.field) {
- do_warning_event(event, "%s: field %s not found",
- __func__, arg->field.name);
- return;
- }
- }
- if (arg->field.field->size != 6) {
- trace_seq_printf(s, "INVALIDMAC");
- return;
- }
- buf = data + arg->field.field->offset;
- trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]);
-}
-
-static void print_ip4_addr(struct trace_seq *s, char i, unsigned char *buf)
-{
- const char *fmt;
-
- if (i == 'i')
- fmt = "%03d.%03d.%03d.%03d";
- else
- fmt = "%d.%d.%d.%d";
-
- trace_seq_printf(s, fmt, buf[0], buf[1], buf[2], buf[3]);
-}
-
-static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
-{
- return ((unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
- (unsigned long)(a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
-}
-
-static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
-{
- return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE);
-}
-
-static void print_ip6c_addr(struct trace_seq *s, unsigned char *addr)
-{
- int i, j, range;
- unsigned char zerolength[8];
- int longest = 1;
- int colonpos = -1;
- uint16_t word;
- uint8_t hi, lo;
- bool needcolon = false;
- bool useIPv4;
- struct in6_addr in6;
-
- memcpy(&in6, addr, sizeof(struct in6_addr));
-
- useIPv4 = ipv6_addr_v4mapped(&in6) || ipv6_addr_is_isatap(&in6);
-
- memset(zerolength, 0, sizeof(zerolength));
-
- if (useIPv4)
- range = 6;
- else
- range = 8;
-
- /* find position of longest 0 run */
- for (i = 0; i < range; i++) {
- for (j = i; j < range; j++) {
- if (in6.s6_addr16[j] != 0)
- break;
- zerolength[i]++;
- }
- }
- for (i = 0; i < range; i++) {
- if (zerolength[i] > longest) {
- longest = zerolength[i];
- colonpos = i;
- }
- }
- if (longest == 1) /* don't compress a single 0 */
- colonpos = -1;
-
- /* emit address */
- for (i = 0; i < range; i++) {
- if (i == colonpos) {
- if (needcolon || i == 0)
- trace_seq_printf(s, ":");
- trace_seq_printf(s, ":");
- needcolon = false;
- i += longest - 1;
- continue;
- }
- if (needcolon) {
- trace_seq_printf(s, ":");
- needcolon = false;
- }
- /* hex u16 without leading 0s */
- word = ntohs(in6.s6_addr16[i]);
- hi = word >> 8;
- lo = word & 0xff;
- if (hi)
- trace_seq_printf(s, "%x%02x", hi, lo);
- else
- trace_seq_printf(s, "%x", lo);
-
- needcolon = true;
- }
-
- if (useIPv4) {
- if (needcolon)
- trace_seq_printf(s, ":");
- print_ip4_addr(s, 'I', &in6.s6_addr[12]);
- }
-
- return;
-}
-
-static void print_ip6_addr(struct trace_seq *s, char i, unsigned char *buf)
-{
- int j;
-
- for (j = 0; j < 16; j += 2) {
- trace_seq_printf(s, "%02x%02x", buf[j], buf[j+1]);
- if (i == 'I' && j < 14)
- trace_seq_printf(s, ":");
- }
-}
-
-/*
- * %pi4 print an IPv4 address with leading zeros
- * %pI4 print an IPv4 address without leading zeros
- * %pi6 print an IPv6 address without colons
- * %pI6 print an IPv6 address with colons
- * %pI6c print an IPv6 address in compressed form with colons
- * %pISpc print an IP address based on sockaddr; p adds port.
- */
-static int print_ipv4_arg(struct trace_seq *s, const char *ptr, char i,
- void *data, int size, struct tep_event *event,
- struct tep_print_arg *arg)
-{
- unsigned char *buf;
-
- if (arg->type == TEP_PRINT_FUNC) {
- process_defined_func(s, data, size, event, arg);
- return 0;
- }
-
- if (arg->type != TEP_PRINT_FIELD) {
- trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
- return 0;
- }
-
- if (!arg->field.field) {
- arg->field.field =
- tep_find_any_field(event, arg->field.name);
- if (!arg->field.field) {
- do_warning("%s: field %s not found",
- __func__, arg->field.name);
- return 0;
- }
- }
-
- buf = data + arg->field.field->offset;
-
- if (arg->field.field->size != 4) {
- trace_seq_printf(s, "INVALIDIPv4");
- return 0;
- }
- print_ip4_addr(s, i, buf);
-
- return 0;
-}
-
-static int print_ipv6_arg(struct trace_seq *s, const char *ptr, char i,
- void *data, int size, struct tep_event *event,
- struct tep_print_arg *arg)
-{
- char have_c = 0;
- unsigned char *buf;
- int rc = 0;
-
- /* pI6c */
- if (i == 'I' && *ptr == 'c') {
- have_c = 1;
- ptr++;
- rc++;
- }
-
- if (arg->type == TEP_PRINT_FUNC) {
- process_defined_func(s, data, size, event, arg);
- return rc;
- }
-
- if (arg->type != TEP_PRINT_FIELD) {
- trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
- return rc;
- }
-
- if (!arg->field.field) {
- arg->field.field =
- tep_find_any_field(event, arg->field.name);
- if (!arg->field.field) {
- do_warning("%s: field %s not found",
- __func__, arg->field.name);
- return rc;
- }
- }
-
- buf = data + arg->field.field->offset;
-
- if (arg->field.field->size != 16) {
- trace_seq_printf(s, "INVALIDIPv6");
- return rc;
- }
-
- if (have_c)
- print_ip6c_addr(s, buf);
- else
- print_ip6_addr(s, i, buf);
-
- return rc;
-}
-
-static int print_ipsa_arg(struct trace_seq *s, const char *ptr, char i,
- void *data, int size, struct tep_event *event,
- struct tep_print_arg *arg)
-{
- char have_c = 0, have_p = 0;
- unsigned char *buf;
- struct sockaddr_storage *sa;
- int rc = 0;
-
- /* pISpc */
- if (i == 'I') {
- if (*ptr == 'p') {
- have_p = 1;
- ptr++;
- rc++;
- }
- if (*ptr == 'c') {
- have_c = 1;
- ptr++;
- rc++;
- }
- }
-
- if (arg->type == TEP_PRINT_FUNC) {
- process_defined_func(s, data, size, event, arg);
- return rc;
- }
-
- if (arg->type != TEP_PRINT_FIELD) {
- trace_seq_printf(s, "ARG TYPE NOT FIELD BUT %d", arg->type);
- return rc;
- }
-
- if (!arg->field.field) {
- arg->field.field =
- tep_find_any_field(event, arg->field.name);
- if (!arg->field.field) {
- do_warning("%s: field %s not found",
- __func__, arg->field.name);
- return rc;
- }
- }
-
- sa = (struct sockaddr_storage *) (data + arg->field.field->offset);
-
- if (sa->ss_family == AF_INET) {
- struct sockaddr_in *sa4 = (struct sockaddr_in *) sa;
-
- if (arg->field.field->size < sizeof(struct sockaddr_in)) {
- trace_seq_printf(s, "INVALIDIPv4");
- return rc;
- }
-
- print_ip4_addr(s, i, (unsigned char *) &sa4->sin_addr);
- if (have_p)
- trace_seq_printf(s, ":%d", ntohs(sa4->sin_port));
-
-
- } else if (sa->ss_family == AF_INET6) {
- struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) sa;
-
- if (arg->field.field->size < sizeof(struct sockaddr_in6)) {
- trace_seq_printf(s, "INVALIDIPv6");
- return rc;
- }
-
- if (have_p)
- trace_seq_printf(s, "[");
-
- buf = (unsigned char *) &sa6->sin6_addr;
- if (have_c)
- print_ip6c_addr(s, buf);
- else
- print_ip6_addr(s, i, buf);
-
- if (have_p)
- trace_seq_printf(s, "]:%d", ntohs(sa6->sin6_port));
- }
-
- return rc;
-}
-
-static int print_ip_arg(struct trace_seq *s, const char *ptr,
- void *data, int size, struct tep_event *event,
- struct tep_print_arg *arg)
-{
- char i = *ptr; /* 'i' or 'I' */
- char ver;
- int rc = 0;
-
- ptr++;
- rc++;
-
- ver = *ptr;
- ptr++;
- rc++;
-
- switch (ver) {
- case '4':
- rc += print_ipv4_arg(s, ptr, i, data, size, event, arg);
- break;
- case '6':
- rc += print_ipv6_arg(s, ptr, i, data, size, event, arg);
- break;
- case 'S':
- rc += print_ipsa_arg(s, ptr, i, data, size, event, arg);
- break;
- default:
- return 0;
- }
-
- return rc;
-}
-
-static int is_printable_array(char *p, unsigned int len)
-{
- unsigned int i;
-
- for (i = 0; i < len && p[i]; i++)
- if (!isprint(p[i]) && !isspace(p[i]))
- return 0;
- return 1;
-}
-
-void tep_print_field(struct trace_seq *s, void *data,
- struct tep_format_field *field)
-{
- unsigned long long val;
- unsigned int offset, len, i;
- struct tep_handle *tep = field->event->tep;
-
- if (field->flags & TEP_FIELD_IS_ARRAY) {
- offset = field->offset;
- len = field->size;
- if (field->flags & TEP_FIELD_IS_DYNAMIC) {
- val = tep_read_number(tep, data + offset, len);
- offset = val;
- len = offset >> 16;
- offset &= 0xffff;
- }
- if (field->flags & TEP_FIELD_IS_STRING &&
- is_printable_array(data + offset, len)) {
- trace_seq_printf(s, "%s", (char *)data + offset);
- } else {
- trace_seq_puts(s, "ARRAY[");
- for (i = 0; i < len; i++) {
- if (i)
- trace_seq_puts(s, ", ");
- trace_seq_printf(s, "%02x",
- *((unsigned char *)data + offset + i));
- }
- trace_seq_putc(s, ']');
- field->flags &= ~TEP_FIELD_IS_STRING;
- }
- } else {
- val = tep_read_number(tep, data + field->offset,
- field->size);
- if (field->flags & TEP_FIELD_IS_POINTER) {
- trace_seq_printf(s, "0x%llx", val);
- } else if (field->flags & TEP_FIELD_IS_SIGNED) {
- switch (field->size) {
- case 4:
- /*
- * If field is long then print it in hex.
- * A long usually stores pointers.
- */
- if (field->flags & TEP_FIELD_IS_LONG)
- trace_seq_printf(s, "0x%x", (int)val);
- else
- trace_seq_printf(s, "%d", (int)val);
- break;
- case 2:
- trace_seq_printf(s, "%2d", (short)val);
- break;
- case 1:
- trace_seq_printf(s, "%1d", (char)val);
- break;
- default:
- trace_seq_printf(s, "%lld", val);
- }
- } else {
- if (field->flags & TEP_FIELD_IS_LONG)
- trace_seq_printf(s, "0x%llx", val);
- else
- trace_seq_printf(s, "%llu", val);
- }
- }
-}
-
-void tep_print_fields(struct trace_seq *s, void *data,
- int size __maybe_unused, struct tep_event *event)
-{
- struct tep_format_field *field;
-
- field = event->format.fields;
- while (field) {
- trace_seq_printf(s, " %s=", field->name);
- tep_print_field(s, data, field);
- field = field->next;
- }
-}
-
-static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_event *event)
-{
- struct tep_handle *tep = event->tep;
- struct tep_print_fmt *print_fmt = &event->print_fmt;
- struct tep_print_arg *arg = print_fmt->args;
- struct tep_print_arg *args = NULL;
- const char *ptr = print_fmt->format;
- unsigned long long val;
- struct func_map *func;
- const char *saveptr;
- struct trace_seq p;
- char *bprint_fmt = NULL;
- char format[32];
- int show_func;
- int len_as_arg;
- int len_arg = 0;
- int len;
- int ls;
-
- if (event->flags & TEP_EVENT_FL_FAILED) {
- trace_seq_printf(s, "[FAILED TO PARSE]");
- tep_print_fields(s, data, size, event);
- return;
- }
-
- if (event->flags & TEP_EVENT_FL_ISBPRINT) {
- bprint_fmt = get_bprint_format(data, size, event);
- args = make_bprint_args(bprint_fmt, data, size, event);
- arg = args;
- ptr = bprint_fmt;
- }
-
- for (; *ptr; ptr++) {
- ls = 0;
- if (*ptr == '\\') {
- ptr++;
- switch (*ptr) {
- case 'n':
- trace_seq_putc(s, '\n');
- break;
- case 't':
- trace_seq_putc(s, '\t');
- break;
- case 'r':
- trace_seq_putc(s, '\r');
- break;
- case '\\':
- trace_seq_putc(s, '\\');
- break;
- default:
- trace_seq_putc(s, *ptr);
- break;
- }
-
- } else if (*ptr == '%') {
- saveptr = ptr;
- show_func = 0;
- len_as_arg = 0;
- cont_process:
- ptr++;
- switch (*ptr) {
- case '%':
- trace_seq_putc(s, '%');
- break;
- case '#':
- /* FIXME: need to handle properly */
- goto cont_process;
- case 'h':
- ls--;
- goto cont_process;
- case 'l':
- ls++;
- goto cont_process;
- case 'L':
- ls = 2;
- goto cont_process;
- case '*':
- /* The argument is the length. */
- if (!arg) {
- do_warning_event(event, "no argument match");
- event->flags |= TEP_EVENT_FL_FAILED;
- goto out_failed;
- }
- len_arg = eval_num_arg(data, size, event, arg);
- len_as_arg = 1;
- arg = arg->next;
- goto cont_process;
- case '.':
- case 'z':
- case 'Z':
- case '0' ... '9':
- case '-':
- goto cont_process;
- case 'p':
- if (tep->long_size == 4)
- ls = 1;
- else
- ls = 2;
-
- if (isalnum(ptr[1]))
- ptr++;
-
- if (arg->type == TEP_PRINT_BSTRING) {
- trace_seq_puts(s, arg->string.string);
- arg = arg->next;
- break;
- }
-
- if (*ptr == 'F' || *ptr == 'f' ||
- *ptr == 'S' || *ptr == 's') {
- show_func = *ptr;
- } else if (*ptr == 'M' || *ptr == 'm') {
- print_mac_arg(s, *ptr, data, size, event, arg);
- arg = arg->next;
- break;
- } else if (*ptr == 'I' || *ptr == 'i') {
- int n;
-
- n = print_ip_arg(s, ptr, data, size, event, arg);
- if (n > 0) {
- ptr += n - 1;
- arg = arg->next;
- break;
- }
- }
-
- /* fall through */
- case 'd':
- case 'u':
- case 'i':
- case 'x':
- case 'X':
- case 'o':
- if (!arg) {
- do_warning_event(event, "no argument match");
- event->flags |= TEP_EVENT_FL_FAILED;
- goto out_failed;
- }
-
- len = ((unsigned long)ptr + 1) -
- (unsigned long)saveptr;
-
- /* should never happen */
- if (len > 31) {
- do_warning_event(event, "bad format!");
- event->flags |= TEP_EVENT_FL_FAILED;
- len = 31;
- }
-
- memcpy(format, saveptr, len);
- format[len] = 0;
-
- val = eval_num_arg(data, size, event, arg);
- arg = arg->next;
-
- if (show_func) {
- func = find_func(tep, val);
- if (func) {
- trace_seq_puts(s, func->func);
- if (show_func == 'F')
- trace_seq_printf(s,
- "+0x%llx",
- val - func->addr);
- break;
- }
- }
- if (tep->long_size == 8 && ls == 1 &&
- sizeof(long) != 8) {
- char *p;
-
- /* make %l into %ll */
- if (ls == 1 && (p = strchr(format, 'l')))
- memmove(p+1, p, strlen(p)+1);
- else if (strcmp(format, "%p") == 0)
- strcpy(format, "0x%llx");
- ls = 2;
- }
- switch (ls) {
- case -2:
- if (len_as_arg)
- trace_seq_printf(s, format, len_arg, (char)val);
- else
- trace_seq_printf(s, format, (char)val);
- break;
- case -1:
- if (len_as_arg)
- trace_seq_printf(s, format, len_arg, (short)val);
- else
- trace_seq_printf(s, format, (short)val);
- break;
- case 0:
- if (len_as_arg)
- trace_seq_printf(s, format, len_arg, (int)val);
- else
- trace_seq_printf(s, format, (int)val);
- break;
- case 1:
- if (len_as_arg)
- trace_seq_printf(s, format, len_arg, (long)val);
- else
- trace_seq_printf(s, format, (long)val);
- break;
- case 2:
- if (len_as_arg)
- trace_seq_printf(s, format, len_arg,
- (long long)val);
- else
- trace_seq_printf(s, format, (long long)val);
- break;
- default:
- do_warning_event(event, "bad count (%d)", ls);
- event->flags |= TEP_EVENT_FL_FAILED;
- }
- break;
- case 's':
- if (!arg) {
- do_warning_event(event, "no matching argument");
- event->flags |= TEP_EVENT_FL_FAILED;
- goto out_failed;
- }
-
- len = ((unsigned long)ptr + 1) -
- (unsigned long)saveptr;
-
- /* should never happen */
- if (len > 31) {
- do_warning_event(event, "bad format!");
- event->flags |= TEP_EVENT_FL_FAILED;
- len = 31;
- }
-
- memcpy(format, saveptr, len);
- format[len] = 0;
- if (!len_as_arg)
- len_arg = -1;
- /* Use helper trace_seq */
- trace_seq_init(&p);
- print_str_arg(&p, data, size, event,
- format, len_arg, arg);
- trace_seq_terminate(&p);
- trace_seq_puts(s, p.buffer);
- trace_seq_destroy(&p);
- arg = arg->next;
- break;
- default:
- trace_seq_printf(s, ">%c<", *ptr);
-
- }
- } else
- trace_seq_putc(s, *ptr);
- }
-
- if (event->flags & TEP_EVENT_FL_FAILED) {
-out_failed:
- trace_seq_printf(s, "[FAILED TO PARSE]");
- }
-
- if (args) {
- free_args(args);
- free(bprint_fmt);
- }
-}
-
-/*
- * This parses out the Latency format (interrupts disabled,
- * need rescheduling, in hard/soft interrupt, preempt count
- * and lock depth) and places it into the trace_seq.
- */
-static void data_latency_format(struct tep_handle *tep, struct trace_seq *s,
- char *format, struct tep_record *record)
-{
- static int check_lock_depth = 1;
- static int check_migrate_disable = 1;
- static int lock_depth_exists;
- static int migrate_disable_exists;
- unsigned int lat_flags;
- struct trace_seq sq;
- unsigned int pc;
- int lock_depth = 0;
- int migrate_disable = 0;
- int hardirq;
- int softirq;
- void *data = record->data;
-
- trace_seq_init(&sq);
- lat_flags = parse_common_flags(tep, data);
- pc = parse_common_pc(tep, data);
- /* lock_depth may not always exist */
- if (lock_depth_exists)
- lock_depth = parse_common_lock_depth(tep, data);
- else if (check_lock_depth) {
- lock_depth = parse_common_lock_depth(tep, data);
- if (lock_depth < 0)
- check_lock_depth = 0;
- else
- lock_depth_exists = 1;
- }
-
- /* migrate_disable may not always exist */
- if (migrate_disable_exists)
- migrate_disable = parse_common_migrate_disable(tep, data);
- else if (check_migrate_disable) {
- migrate_disable = parse_common_migrate_disable(tep, data);
- if (migrate_disable < 0)
- check_migrate_disable = 0;
- else
- migrate_disable_exists = 1;
- }
-
- hardirq = lat_flags & TRACE_FLAG_HARDIRQ;
- softirq = lat_flags & TRACE_FLAG_SOFTIRQ;
-
- trace_seq_printf(&sq, "%c%c%c",
- (lat_flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
- (lat_flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
- 'X' : '.',
- (lat_flags & TRACE_FLAG_NEED_RESCHED) ?
- 'N' : '.',
- (hardirq && softirq) ? 'H' :
- hardirq ? 'h' : softirq ? 's' : '.');
-
- if (pc)
- trace_seq_printf(&sq, "%x", pc);
- else
- trace_seq_printf(&sq, ".");
-
- if (migrate_disable_exists) {
- if (migrate_disable < 0)
- trace_seq_printf(&sq, ".");
- else
- trace_seq_printf(&sq, "%d", migrate_disable);
- }
-
- if (lock_depth_exists) {
- if (lock_depth < 0)
- trace_seq_printf(&sq, ".");
- else
- trace_seq_printf(&sq, "%d", lock_depth);
- }
-
- if (sq.state == TRACE_SEQ__MEM_ALLOC_FAILED) {
- s->state = TRACE_SEQ__MEM_ALLOC_FAILED;
- return;
- }
-
- trace_seq_terminate(&sq);
- trace_seq_puts(s, sq.buffer);
- trace_seq_destroy(&sq);
- trace_seq_terminate(s);
-}
-
-/**
- * tep_data_type - parse out the given event type
- * @tep: a handle to the trace event parser context
- * @rec: the record to read from
- *
- * This returns the event id from the @rec.
- */
-int tep_data_type(struct tep_handle *tep, struct tep_record *rec)
-{
- return trace_parse_common_type(tep, rec->data);
-}
-
-/**
- * tep_data_pid - parse the PID from record
- * @tep: a handle to the trace event parser context
- * @rec: the record to parse
- *
- * This returns the PID from a record.
- */
-int tep_data_pid(struct tep_handle *tep, struct tep_record *rec)
-{
- return parse_common_pid(tep, rec->data);
-}
-
-/**
- * tep_data_preempt_count - parse the preempt count from the record
- * @tep: a handle to the trace event parser context
- * @rec: the record to parse
- *
- * This returns the preempt count from a record.
- */
-int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec)
-{
- return parse_common_pc(tep, rec->data);
-}
-
-/**
- * tep_data_flags - parse the latency flags from the record
- * @tep: a handle to the trace event parser context
- * @rec: the record to parse
- *
- * This returns the latency flags from a record.
- *
- * Use trace_flag_type enum for the flags (see event-parse.h).
- */
-int tep_data_flags(struct tep_handle *tep, struct tep_record *rec)
-{
- return parse_common_flags(tep, rec->data);
-}
-
-/**
- * tep_data_comm_from_pid - return the command line from PID
- * @tep: a handle to the trace event parser context
- * @pid: the PID of the task to search for
- *
- * This returns a pointer to the command line that has the given
- * @pid.
- */
-const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid)
-{
- const char *comm;
-
- comm = find_cmdline(tep, pid);
- return comm;
-}
-
-static struct tep_cmdline *
-pid_from_cmdlist(struct tep_handle *tep, const char *comm, struct tep_cmdline *next)
-{
- struct cmdline_list *cmdlist = (struct cmdline_list *)next;
-
- if (cmdlist)
- cmdlist = cmdlist->next;
- else
- cmdlist = tep->cmdlist;
-
- while (cmdlist && strcmp(cmdlist->comm, comm) != 0)
- cmdlist = cmdlist->next;
-
- return (struct tep_cmdline *)cmdlist;
-}
-
-/**
- * tep_data_pid_from_comm - return the pid from a given comm
- * @tep: a handle to the trace event parser context
- * @comm: the cmdline to find the pid from
- * @next: the cmdline structure to find the next comm
- *
- * This returns the cmdline structure that holds a pid for a given
- * comm, or NULL if none found. As there may be more than one pid for
- * a given comm, the result of this call can be passed back into
- * a recurring call in the @next parameter, and then it will find the
- * next pid.
- * Also, it does a linear search, so it may be slow.
- */
-struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm,
- struct tep_cmdline *next)
-{
- struct tep_cmdline *cmdline;
-
- /*
- * If the cmdlines have not been converted yet, then use
- * the list.
- */
- if (!tep->cmdlines)
- return pid_from_cmdlist(tep, comm, next);
-
- if (next) {
- /*
- * The next pointer could have been still from
- * a previous call before cmdlines were created
- */
- if (next < tep->cmdlines ||
- next >= tep->cmdlines + tep->cmdline_count)
- next = NULL;
- else
- cmdline = next++;
- }
-
- if (!next)
- cmdline = tep->cmdlines;
-
- while (cmdline < tep->cmdlines + tep->cmdline_count) {
- if (strcmp(cmdline->comm, comm) == 0)
- return cmdline;
- cmdline++;
- }
- return NULL;
-}
-
-/**
- * tep_cmdline_pid - return the pid associated to a given cmdline
- * @tep: a handle to the trace event parser context
- * @cmdline: The cmdline structure to get the pid from
- *
- * Returns the pid for a give cmdline. If @cmdline is NULL, then
- * -1 is returned.
- */
-int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline)
-{
- struct cmdline_list *cmdlist = (struct cmdline_list *)cmdline;
-
- if (!cmdline)
- return -1;
-
- /*
- * If cmdlines have not been created yet, or cmdline is
- * not part of the array, then treat it as a cmdlist instead.
- */
- if (!tep->cmdlines ||
- cmdline < tep->cmdlines ||
- cmdline >= tep->cmdlines + tep->cmdline_count)
- return cmdlist->pid;
-
- return cmdline->pid;
-}
-
-/*
- * This parses the raw @data using the given @event information and
- * writes the print format into the trace_seq.
- */
-static void print_event_info(struct trace_seq *s, char *format, bool raw,
- struct tep_event *event, struct tep_record *record)
-{
- int print_pretty = 1;
-
- if (raw || (event->flags & TEP_EVENT_FL_PRINTRAW))
- tep_print_fields(s, record->data, record->size, event);
- else {
-
- if (event->handler && !(event->flags & TEP_EVENT_FL_NOHANDLE))
- print_pretty = event->handler(s, record, event,
- event->context);
-
- if (print_pretty)
- pretty_print(s, record->data, record->size, event);
- }
-
- trace_seq_terminate(s);
-}
-
-/**
- * tep_find_event_by_record - return the event from a given record
- * @tep: a handle to the trace event parser context
- * @record: The record to get the event from
- *
- * Returns the associated event for a given record, or NULL if non is
- * is found.
- */
-struct tep_event *
-tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record)
-{
- int type;
-
- if (record->size < 0) {
- do_warning("ug! negative record size %d", record->size);
- return NULL;
- }
-
- type = trace_parse_common_type(tep, record->data);
-
- return tep_find_event(tep, type);
-}
-
-/*
- * Writes the timestamp of the record into @s. Time divisor and precision can be
- * specified as part of printf @format string. Example:
- * "%3.1000d" - divide the time by 1000 and print the first 3 digits
- * before the dot. Thus, the timestamp "123456000" will be printed as
- * "123.456"
- */
-static void print_event_time(struct tep_handle *tep, struct trace_seq *s,
- char *format, struct tep_event *event,
- struct tep_record *record)
-{
- unsigned long long time;
- char *divstr;
- int prec = 0, pr;
- int div = 0;
- int p10 = 1;
-
- if (isdigit(*(format + 1)))
- prec = atoi(format + 1);
- divstr = strchr(format, '.');
- if (divstr && isdigit(*(divstr + 1)))
- div = atoi(divstr + 1);
- time = record->ts;
- if (div) {
- time += div / 2;
- time /= div;
- }
- pr = prec;
- while (pr--)
- p10 *= 10;
-
- if (p10 > 1 && p10 < time)
- trace_seq_printf(s, "%5llu.%0*llu", time / p10, prec, time % p10);
- else
- trace_seq_printf(s, "%12llu", time);
-}
-
-struct print_event_type {
- enum {
- EVENT_TYPE_INT = 1,
- EVENT_TYPE_STRING,
- EVENT_TYPE_UNKNOWN,
- } type;
- char format[32];
-};
-
-static void print_string(struct tep_handle *tep, struct trace_seq *s,
- struct tep_record *record, struct tep_event *event,
- const char *arg, struct print_event_type *type)
-{
- const char *comm;
- int pid;
-
- if (strncmp(arg, TEP_PRINT_LATENCY, strlen(TEP_PRINT_LATENCY)) == 0) {
- data_latency_format(tep, s, type->format, record);
- } else if (strncmp(arg, TEP_PRINT_COMM, strlen(TEP_PRINT_COMM)) == 0) {
- pid = parse_common_pid(tep, record->data);
- comm = find_cmdline(tep, pid);
- trace_seq_printf(s, type->format, comm);
- } else if (strncmp(arg, TEP_PRINT_INFO_RAW, strlen(TEP_PRINT_INFO_RAW)) == 0) {
- print_event_info(s, type->format, true, event, record);
- } else if (strncmp(arg, TEP_PRINT_INFO, strlen(TEP_PRINT_INFO)) == 0) {
- print_event_info(s, type->format, false, event, record);
- } else if (strncmp(arg, TEP_PRINT_NAME, strlen(TEP_PRINT_NAME)) == 0) {
- trace_seq_printf(s, type->format, event->name);
- } else {
- trace_seq_printf(s, "[UNKNOWN TEP TYPE %s]", arg);
- }
-
-}
-
-static void print_int(struct tep_handle *tep, struct trace_seq *s,
- struct tep_record *record, struct tep_event *event,
- int arg, struct print_event_type *type)
-{
- int param;
-
- switch (arg) {
- case TEP_PRINT_CPU:
- param = record->cpu;
- break;
- case TEP_PRINT_PID:
- param = parse_common_pid(tep, record->data);
- break;
- case TEP_PRINT_TIME:
- return print_event_time(tep, s, type->format, event, record);
- default:
- return;
- }
- trace_seq_printf(s, type->format, param);
-}
-
-static int tep_print_event_param_type(char *format,
- struct print_event_type *type)
-{
- char *str = format + 1;
- int i = 1;
-
- type->type = EVENT_TYPE_UNKNOWN;
- while (*str) {
- switch (*str) {
- case 'd':
- case 'u':
- case 'i':
- case 'x':
- case 'X':
- case 'o':
- type->type = EVENT_TYPE_INT;
- break;
- case 's':
- type->type = EVENT_TYPE_STRING;
- break;
- }
- str++;
- i++;
- if (type->type != EVENT_TYPE_UNKNOWN)
- break;
- }
- memset(type->format, 0, 32);
- memcpy(type->format, format, i < 32 ? i : 31);
- return i;
-}
-
-/**
- * tep_print_event - Write various event information
- * @tep: a handle to the trace event parser context
- * @s: the trace_seq to write to
- * @record: The record to get the event from
- * @format: a printf format string. Supported event fileds:
- * TEP_PRINT_PID, "%d" - event PID
- * TEP_PRINT_CPU, "%d" - event CPU
- * TEP_PRINT_COMM, "%s" - event command string
- * TEP_PRINT_NAME, "%s" - event name
- * TEP_PRINT_LATENCY, "%s" - event latency
- * TEP_PRINT_TIME, %d - event time stamp. A divisor and precision
- * can be specified as part of this format string:
- * "%precision.divisord". Example:
- * "%3.1000d" - divide the time by 1000 and print the first
- * 3 digits before the dot. Thus, the time stamp
- * "123456000" will be printed as "123.456"
- * TEP_PRINT_INFO, "%s" - event information. If any width is specified in
- * the format string, the event information will be printed
- * in raw format.
- * Writes the specified event information into @s.
- */
-void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
- struct tep_record *record, const char *fmt, ...)
-{
- struct print_event_type type;
- char *format = strdup(fmt);
- char *current = format;
- char *str = format;
- int offset;
- va_list args;
- struct tep_event *event;
-
- if (!format)
- return;
-
- event = tep_find_event_by_record(tep, record);
- va_start(args, fmt);
- while (*current) {
- current = strchr(str, '%');
- if (!current) {
- trace_seq_puts(s, str);
- break;
- }
- memset(&type, 0, sizeof(type));
- offset = tep_print_event_param_type(current, &type);
- *current = '\0';
- trace_seq_puts(s, str);
- current += offset;
- switch (type.type) {
- case EVENT_TYPE_STRING:
- print_string(tep, s, record, event,
- va_arg(args, char*), &type);
- break;
- case EVENT_TYPE_INT:
- print_int(tep, s, record, event,
- va_arg(args, int), &type);
- break;
- case EVENT_TYPE_UNKNOWN:
- default:
- trace_seq_printf(s, "[UNKNOWN TYPE]");
- break;
- }
- str = current;
-
- }
- va_end(args);
- free(format);
-}
-
-static int events_id_cmp(const void *a, const void *b)
-{
- struct tep_event * const * ea = a;
- struct tep_event * const * eb = b;
-
- if ((*ea)->id < (*eb)->id)
- return -1;
-
- if ((*ea)->id > (*eb)->id)
- return 1;
-
- return 0;
-}
-
-static int events_name_cmp(const void *a, const void *b)
-{
- struct tep_event * const * ea = a;
- struct tep_event * const * eb = b;
- int res;
-
- res = strcmp((*ea)->name, (*eb)->name);
- if (res)
- return res;
-
- res = strcmp((*ea)->system, (*eb)->system);
- if (res)
- return res;
-
- return events_id_cmp(a, b);
-}
-
-static int events_system_cmp(const void *a, const void *b)
-{
- struct tep_event * const * ea = a;
- struct tep_event * const * eb = b;
- int res;
-
- res = strcmp((*ea)->system, (*eb)->system);
- if (res)
- return res;
-
- res = strcmp((*ea)->name, (*eb)->name);
- if (res)
- return res;
-
- return events_id_cmp(a, b);
-}
-
-static struct tep_event **list_events_copy(struct tep_handle *tep)
-{
- struct tep_event **events;
-
- if (!tep)
- return NULL;
-
- events = malloc(sizeof(*events) * (tep->nr_events + 1));
- if (!events)
- return NULL;
-
- memcpy(events, tep->events, sizeof(*events) * tep->nr_events);
- events[tep->nr_events] = NULL;
- return events;
-}
-
-static void list_events_sort(struct tep_event **events, int nr_events,
- enum tep_event_sort_type sort_type)
-{
- int (*sort)(const void *a, const void *b);
-
- switch (sort_type) {
- case TEP_EVENT_SORT_ID:
- sort = events_id_cmp;
- break;
- case TEP_EVENT_SORT_NAME:
- sort = events_name_cmp;
- break;
- case TEP_EVENT_SORT_SYSTEM:
- sort = events_system_cmp;
- break;
- default:
- sort = NULL;
- }
-
- if (sort)
- qsort(events, nr_events, sizeof(*events), sort);
-}
-
-/**
- * tep_list_events - Get events, sorted by given criteria.
- * @tep: a handle to the tep context
- * @sort_type: desired sort order of the events in the array
- *
- * Returns an array of pointers to all events, sorted by the given
- * @sort_type criteria. The last element of the array is NULL. The returned
- * memory must not be freed, it is managed by the library.
- * The function is not thread safe.
- */
-struct tep_event **tep_list_events(struct tep_handle *tep,
- enum tep_event_sort_type sort_type)
-{
- struct tep_event **events;
-
- if (!tep)
- return NULL;
-
- events = tep->sort_events;
- if (events && tep->last_type == sort_type)
- return events;
-
- if (!events) {
- events = list_events_copy(tep);
- if (!events)
- return NULL;
-
- tep->sort_events = events;
-
- /* the internal events are sorted by id */
- if (sort_type == TEP_EVENT_SORT_ID) {
- tep->last_type = sort_type;
- return events;
- }
- }
-
- list_events_sort(events, tep->nr_events, sort_type);
- tep->last_type = sort_type;
-
- return events;
-}
-
-
-/**
- * tep_list_events_copy - Thread safe version of tep_list_events()
- * @tep: a handle to the tep context
- * @sort_type: desired sort order of the events in the array
- *
- * Returns an array of pointers to all events, sorted by the given
- * @sort_type criteria. The last element of the array is NULL. The returned
- * array is newly allocated inside the function and must be freed by the caller
- */
-struct tep_event **tep_list_events_copy(struct tep_handle *tep,
- enum tep_event_sort_type sort_type)
-{
- struct tep_event **events;
-
- if (!tep)
- return NULL;
-
- events = list_events_copy(tep);
- if (!events)
- return NULL;
-
- /* the internal events are sorted by id */
- if (sort_type == TEP_EVENT_SORT_ID)
- return events;
-
- list_events_sort(events, tep->nr_events, sort_type);
-
- return events;
-}
-
-static struct tep_format_field **
-get_event_fields(const char *type, const char *name,
- int count, struct tep_format_field *list)
-{
- struct tep_format_field **fields;
- struct tep_format_field *field;
- int i = 0;
-
- fields = malloc(sizeof(*fields) * (count + 1));
- if (!fields)
- return NULL;
-
- for (field = list; field; field = field->next) {
- fields[i++] = field;
- if (i == count + 1) {
- do_warning("event %s has more %s fields than specified",
- name, type);
- i--;
- break;
- }
- }
-
- if (i != count)
- do_warning("event %s has less %s fields than specified",
- name, type);
-
- fields[i] = NULL;
-
- return fields;
-}
-
-/**
- * tep_event_common_fields - return a list of common fields for an event
- * @event: the event to return the common fields of.
- *
- * Returns an allocated array of fields. The last item in the array is NULL.
- * The array must be freed with free().
- */
-struct tep_format_field **tep_event_common_fields(struct tep_event *event)
-{
- return get_event_fields("common", event->name,
- event->format.nr_common,
- event->format.common_fields);
-}
-
-/**
- * tep_event_fields - return a list of event specific fields for an event
- * @event: the event to return the fields of.
- *
- * Returns an allocated array of fields. The last item in the array is NULL.
- * The array must be freed with free().
- */
-struct tep_format_field **tep_event_fields(struct tep_event *event)
-{
- return get_event_fields("event", event->name,
- event->format.nr_fields,
- event->format.fields);
-}
-
-static void print_fields(struct trace_seq *s, struct tep_print_flag_sym *field)
-{
- trace_seq_printf(s, "{ %s, %s }", field->value, field->str);
- if (field->next) {
- trace_seq_puts(s, ", ");
- print_fields(s, field->next);
- }
-}
-
-/* for debugging */
-static void print_args(struct tep_print_arg *args)
-{
- int print_paren = 1;
- struct trace_seq s;
-
- switch (args->type) {
- case TEP_PRINT_NULL:
- printf("null");
- break;
- case TEP_PRINT_ATOM:
- printf("%s", args->atom.atom);
- break;
- case TEP_PRINT_FIELD:
- printf("REC->%s", args->field.name);
- break;
- case TEP_PRINT_FLAGS:
- printf("__print_flags(");
- print_args(args->flags.field);
- printf(", %s, ", args->flags.delim);
- trace_seq_init(&s);
- print_fields(&s, args->flags.flags);
- trace_seq_do_printf(&s);
- trace_seq_destroy(&s);
- printf(")");
- break;
- case TEP_PRINT_SYMBOL:
- printf("__print_symbolic(");
- print_args(args->symbol.field);
- printf(", ");
- trace_seq_init(&s);
- print_fields(&s, args->symbol.symbols);
- trace_seq_do_printf(&s);
- trace_seq_destroy(&s);
- printf(")");
- break;
- case TEP_PRINT_HEX:
- printf("__print_hex(");
- print_args(args->hex.field);
- printf(", ");
- print_args(args->hex.size);
- printf(")");
- break;
- case TEP_PRINT_HEX_STR:
- printf("__print_hex_str(");
- print_args(args->hex.field);
- printf(", ");
- print_args(args->hex.size);
- printf(")");
- break;
- case TEP_PRINT_INT_ARRAY:
- printf("__print_array(");
- print_args(args->int_array.field);
- printf(", ");
- print_args(args->int_array.count);
- printf(", ");
- print_args(args->int_array.el_size);
- printf(")");
- break;
- case TEP_PRINT_STRING:
- case TEP_PRINT_BSTRING:
- printf("__get_str(%s)", args->string.string);
- break;
- case TEP_PRINT_BITMASK:
- printf("__get_bitmask(%s)", args->bitmask.bitmask);
- break;
- case TEP_PRINT_TYPE:
- printf("(%s)", args->typecast.type);
- print_args(args->typecast.item);
- break;
- case TEP_PRINT_OP:
- if (strcmp(args->op.op, ":") == 0)
- print_paren = 0;
- if (print_paren)
- printf("(");
- print_args(args->op.left);
- printf(" %s ", args->op.op);
- print_args(args->op.right);
- if (print_paren)
- printf(")");
- break;
- default:
- /* we should warn... */
- return;
- }
- if (args->next) {
- printf("\n");
- print_args(args->next);
- }
-}
-
-static void parse_header_field(const char *field,
- int *offset, int *size, int mandatory)
-{
- unsigned long long save_input_buf_ptr;
- unsigned long long save_input_buf_siz;
- char *token;
- int type;
-
- save_input_buf_ptr = input_buf_ptr;
- save_input_buf_siz = input_buf_siz;
-
- if (read_expected(TEP_EVENT_ITEM, "field") < 0)
- return;
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- return;
-
- /* type */
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto fail;
- free_token(token);
-
- /*
- * If this is not a mandatory field, then test it first.
- */
- if (mandatory) {
- if (read_expected(TEP_EVENT_ITEM, field) < 0)
- return;
- } else {
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto fail;
- if (strcmp(token, field) != 0)
- goto discard;
- free_token(token);
- }
-
- if (read_expected(TEP_EVENT_OP, ";") < 0)
- return;
- if (read_expected(TEP_EVENT_ITEM, "offset") < 0)
- return;
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- return;
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto fail;
- *offset = atoi(token);
- free_token(token);
- if (read_expected(TEP_EVENT_OP, ";") < 0)
- return;
- if (read_expected(TEP_EVENT_ITEM, "size") < 0)
- return;
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- return;
- if (read_expect_type(TEP_EVENT_ITEM, &token) < 0)
- goto fail;
- *size = atoi(token);
- free_token(token);
- if (read_expected(TEP_EVENT_OP, ";") < 0)
- return;
- type = read_token(&token);
- if (type != TEP_EVENT_NEWLINE) {
- /* newer versions of the kernel have a "signed" type */
- if (type != TEP_EVENT_ITEM)
- goto fail;
-
- if (strcmp(token, "signed") != 0)
- goto fail;
-
- free_token(token);
-
- if (read_expected(TEP_EVENT_OP, ":") < 0)
- return;
-
- if (read_expect_type(TEP_EVENT_ITEM, &token))
- goto fail;
-
- free_token(token);
- if (read_expected(TEP_EVENT_OP, ";") < 0)
- return;
-
- if (read_expect_type(TEP_EVENT_NEWLINE, &token))
- goto fail;
- }
- fail:
- free_token(token);
- return;
-
- discard:
- input_buf_ptr = save_input_buf_ptr;
- input_buf_siz = save_input_buf_siz;
- *offset = 0;
- *size = 0;
- free_token(token);
-}
-
-/**
- * tep_parse_header_page - parse the data stored in the header page
- * @tep: a handle to the trace event parser context
- * @buf: the buffer storing the header page format string
- * @size: the size of @buf
- * @long_size: the long size to use if there is no header
- *
- * This parses the header page format for information on the
- * ring buffer used. The @buf should be copied from
- *
- * /sys/kernel/debug/tracing/events/header_page
- */
-int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size,
- int long_size)
-{
- int ignore;
-
- if (!size) {
- /*
- * Old kernels did not have header page info.
- * Sorry but we just use what we find here in user space.
- */
- tep->header_page_ts_size = sizeof(long long);
- tep->header_page_size_size = long_size;
- tep->header_page_data_offset = sizeof(long long) + long_size;
- tep->old_format = 1;
- return -1;
- }
- init_input_buf(buf, size);
-
- parse_header_field("timestamp", &tep->header_page_ts_offset,
- &tep->header_page_ts_size, 1);
- parse_header_field("commit", &tep->header_page_size_offset,
- &tep->header_page_size_size, 1);
- parse_header_field("overwrite", &tep->header_page_overwrite,
- &ignore, 0);
- parse_header_field("data", &tep->header_page_data_offset,
- &tep->header_page_data_size, 1);
-
- return 0;
-}
-
-static int event_matches(struct tep_event *event,
- int id, const char *sys_name,
- const char *event_name)
-{
- if (id >= 0 && id != event->id)
- return 0;
-
- if (event_name && (strcmp(event_name, event->name) != 0))
- return 0;
-
- if (sys_name && (strcmp(sys_name, event->system) != 0))
- return 0;
-
- return 1;
-}
-
-static void free_handler(struct event_handler *handle)
-{
- free((void *)handle->sys_name);
- free((void *)handle->event_name);
- free(handle);
-}
-
-static int find_event_handle(struct tep_handle *tep, struct tep_event *event)
-{
- struct event_handler *handle, **next;
-
- for (next = &tep->handlers; *next;
- next = &(*next)->next) {
- handle = *next;
- if (event_matches(event, handle->id,
- handle->sys_name,
- handle->event_name))
- break;
- }
-
- if (!(*next))
- return 0;
-
- pr_stat("overriding event (%d) %s:%s with new print handler",
- event->id, event->system, event->name);
-
- event->handler = handle->func;
- event->context = handle->context;
-
- *next = handle->next;
- free_handler(handle);
-
- return 1;
-}
-
-/**
- * __tep_parse_format - parse the event format
- * @buf: the buffer storing the event format string
- * @size: the size of @buf
- * @sys: the system the event belongs to
- *
- * This parses the event format and creates an event structure
- * to quickly parse raw data for a given event.
- *
- * These files currently come from:
- *
- * /sys/kernel/debug/tracing/events/.../.../format
- */
-enum tep_errno __tep_parse_format(struct tep_event **eventp,
- struct tep_handle *tep, const char *buf,
- unsigned long size, const char *sys)
-{
- struct tep_event *event;
- int ret;
-
- init_input_buf(buf, size);
-
- *eventp = event = alloc_event();
- if (!event)
- return TEP_ERRNO__MEM_ALLOC_FAILED;
-
- event->name = event_read_name();
- if (!event->name) {
- /* Bad event? */
- ret = TEP_ERRNO__MEM_ALLOC_FAILED;
- goto event_alloc_failed;
- }
-
- if (strcmp(sys, "ftrace") == 0) {
- event->flags |= TEP_EVENT_FL_ISFTRACE;
-
- if (strcmp(event->name, "bprint") == 0)
- event->flags |= TEP_EVENT_FL_ISBPRINT;
- }
-
- event->id = event_read_id();
- if (event->id < 0) {
- ret = TEP_ERRNO__READ_ID_FAILED;
- /*
- * This isn't an allocation error actually.
- * But as the ID is critical, just bail out.
- */
- goto event_alloc_failed;
- }
-
- event->system = strdup(sys);
- if (!event->system) {
- ret = TEP_ERRNO__MEM_ALLOC_FAILED;
- goto event_alloc_failed;
- }
-
- /* Add tep to event so that it can be referenced */
- event->tep = tep;
-
- ret = event_read_format(event);
- if (ret < 0) {
- ret = TEP_ERRNO__READ_FORMAT_FAILED;
- goto event_parse_failed;
- }
-
- /*
- * If the event has an override, don't print warnings if the event
- * print format fails to parse.
- */
- if (tep && find_event_handle(tep, event))
- show_warning = 0;
-
- ret = event_read_print(event);
- show_warning = 1;
-
- if (ret < 0) {
- ret = TEP_ERRNO__READ_PRINT_FAILED;
- goto event_parse_failed;
- }
-
- if (!ret && (event->flags & TEP_EVENT_FL_ISFTRACE)) {
- struct tep_format_field *field;
- struct tep_print_arg *arg, **list;
-
- /* old ftrace had no args */
- list = &event->print_fmt.args;
- for (field = event->format.fields; field; field = field->next) {
- arg = alloc_arg();
- if (!arg) {
- event->flags |= TEP_EVENT_FL_FAILED;
- return TEP_ERRNO__OLD_FTRACE_ARG_FAILED;
- }
- arg->type = TEP_PRINT_FIELD;
- arg->field.name = strdup(field->name);
- if (!arg->field.name) {
- event->flags |= TEP_EVENT_FL_FAILED;
- free_arg(arg);
- return TEP_ERRNO__OLD_FTRACE_ARG_FAILED;
- }
- arg->field.field = field;
- *list = arg;
- list = &arg->next;
- }
- return 0;
- }
-
- return 0;
-
- event_parse_failed:
- event->flags |= TEP_EVENT_FL_FAILED;
- return ret;
-
- event_alloc_failed:
- free(event->system);
- free(event->name);
- free(event);
- *eventp = NULL;
- return ret;
-}
-
-static enum tep_errno
-__parse_event(struct tep_handle *tep,
- struct tep_event **eventp,
- const char *buf, unsigned long size,
- const char *sys)
-{
- int ret = __tep_parse_format(eventp, tep, buf, size, sys);
- struct tep_event *event = *eventp;
-
- if (event == NULL)
- return ret;
-
- if (tep && add_event(tep, event)) {
- ret = TEP_ERRNO__MEM_ALLOC_FAILED;
- goto event_add_failed;
- }
-
-#define PRINT_ARGS 0
- if (PRINT_ARGS && event->print_fmt.args)
- print_args(event->print_fmt.args);
-
- return 0;
-
-event_add_failed:
- tep_free_event(event);
- return ret;
-}
-
-/**
- * tep_parse_format - parse the event format
- * @tep: a handle to the trace event parser context
- * @eventp: returned format
- * @buf: the buffer storing the event format string
- * @size: the size of @buf
- * @sys: the system the event belongs to
- *
- * This parses the event format and creates an event structure
- * to quickly parse raw data for a given event.
- *
- * These files currently come from:
- *
- * /sys/kernel/debug/tracing/events/.../.../format
- */
-enum tep_errno tep_parse_format(struct tep_handle *tep,
- struct tep_event **eventp,
- const char *buf,
- unsigned long size, const char *sys)
-{
- return __parse_event(tep, eventp, buf, size, sys);
-}
-
-/**
- * tep_parse_event - parse the event format
- * @tep: a handle to the trace event parser context
- * @buf: the buffer storing the event format string
- * @size: the size of @buf
- * @sys: the system the event belongs to
- *
- * This parses the event format and creates an event structure
- * to quickly parse raw data for a given event.
- *
- * These files currently come from:
- *
- * /sys/kernel/debug/tracing/events/.../.../format
- */
-enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf,
- unsigned long size, const char *sys)
-{
- struct tep_event *event = NULL;
- return __parse_event(tep, &event, buf, size, sys);
-}
-
-int get_field_val(struct trace_seq *s, struct tep_format_field *field,
- const char *name, struct tep_record *record,
- unsigned long long *val, int err)
-{
- if (!field) {
- if (err)
- trace_seq_printf(s, "<CANT FIND FIELD %s>", name);
- return -1;
- }
-
- if (tep_read_number_field(field, record->data, val)) {
- if (err)
- trace_seq_printf(s, " %s=INVALID", name);
- return -1;
- }
-
- return 0;
-}
-
-/**
- * tep_get_field_raw - return the raw pointer into the data field
- * @s: The seq to print to on error
- * @event: the event that the field is for
- * @name: The name of the field
- * @record: The record with the field name.
- * @len: place to store the field length.
- * @err: print default error if failed.
- *
- * Returns a pointer into record->data of the field and places
- * the length of the field in @len.
- *
- * On failure, it returns NULL.
- */
-void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event,
- const char *name, struct tep_record *record,
- int *len, int err)
-{
- struct tep_format_field *field;
- void *data = record->data;
- unsigned offset;
- int dummy;
-
- if (!event)
- return NULL;
-
- field = tep_find_field(event, name);
-
- if (!field) {
- if (err)
- trace_seq_printf(s, "<CANT FIND FIELD %s>", name);
- return NULL;
- }
-
- /* Allow @len to be NULL */
- if (!len)
- len = &dummy;
-
- offset = field->offset;
- if (field->flags & TEP_FIELD_IS_DYNAMIC) {
- offset = tep_read_number(event->tep,
- data + offset, field->size);
- *len = offset >> 16;
- offset &= 0xffff;
- } else
- *len = field->size;
-
- return data + offset;
-}
-
-/**
- * tep_get_field_val - find a field and return its value
- * @s: The seq to print to on error
- * @event: the event that the field is for
- * @name: The name of the field
- * @record: The record with the field name.
- * @val: place to store the value of the field.
- * @err: print default error if failed.
- *
- * Returns 0 on success -1 on field not found.
- */
-int tep_get_field_val(struct trace_seq *s, struct tep_event *event,
- const char *name, struct tep_record *record,
- unsigned long long *val, int err)
-{
- struct tep_format_field *field;
-
- if (!event)
- return -1;
-
- field = tep_find_field(event, name);
-
- return get_field_val(s, field, name, record, val, err);
-}
-
-/**
- * tep_get_common_field_val - find a common field and return its value
- * @s: The seq to print to on error
- * @event: the event that the field is for
- * @name: The name of the field
- * @record: The record with the field name.
- * @val: place to store the value of the field.
- * @err: print default error if failed.
- *
- * Returns 0 on success -1 on field not found.
- */
-int tep_get_common_field_val(struct trace_seq *s, struct tep_event *event,
- const char *name, struct tep_record *record,
- unsigned long long *val, int err)
-{
- struct tep_format_field *field;
-
- if (!event)
- return -1;
-
- field = tep_find_common_field(event, name);
-
- return get_field_val(s, field, name, record, val, err);
-}
-
-/**
- * tep_get_any_field_val - find a any field and return its value
- * @s: The seq to print to on error
- * @event: the event that the field is for
- * @name: The name of the field
- * @record: The record with the field name.
- * @val: place to store the value of the field.
- * @err: print default error if failed.
- *
- * Returns 0 on success -1 on field not found.
- */
-int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event,
- const char *name, struct tep_record *record,
- unsigned long long *val, int err)
-{
- struct tep_format_field *field;
-
- if (!event)
- return -1;
-
- field = tep_find_any_field(event, name);
-
- return get_field_val(s, field, name, record, val, err);
-}
-
-/**
- * tep_print_num_field - print a field and a format
- * @s: The seq to print to
- * @fmt: The printf format to print the field with.
- * @event: the event that the field is for
- * @name: The name of the field
- * @record: The record with the field name.
- * @err: print default error if failed.
- *
- * Returns positive value on success, negative in case of an error,
- * or 0 if buffer is full.
- */
-int tep_print_num_field(struct trace_seq *s, const char *fmt,
- struct tep_event *event, const char *name,
- struct tep_record *record, int err)
-{
- struct tep_format_field *field = tep_find_field(event, name);
- unsigned long long val;
-
- if (!field)
- goto failed;
-
- if (tep_read_number_field(field, record->data, &val))
- goto failed;
-
- return trace_seq_printf(s, fmt, val);
-
- failed:
- if (err)
- trace_seq_printf(s, "CAN'T FIND FIELD \"%s\"", name);
- return -1;
-}
-
-/**
- * tep_print_func_field - print a field and a format for function pointers
- * @s: The seq to print to
- * @fmt: The printf format to print the field with.
- * @event: the event that the field is for
- * @name: The name of the field
- * @record: The record with the field name.
- * @err: print default error if failed.
- *
- * Returns positive value on success, negative in case of an error,
- * or 0 if buffer is full.
- */
-int tep_print_func_field(struct trace_seq *s, const char *fmt,
- struct tep_event *event, const char *name,
- struct tep_record *record, int err)
-{
- struct tep_format_field *field = tep_find_field(event, name);
- struct tep_handle *tep = event->tep;
- unsigned long long val;
- struct func_map *func;
- char tmp[128];
-
- if (!field)
- goto failed;
-
- if (tep_read_number_field(field, record->data, &val))
- goto failed;
-
- func = find_func(tep, val);
-
- if (func)
- snprintf(tmp, 128, "%s/0x%llx", func->func, func->addr - val);
- else
- sprintf(tmp, "0x%08llx", val);
-
- return trace_seq_printf(s, fmt, tmp);
-
- failed:
- if (err)
- trace_seq_printf(s, "CAN'T FIND FIELD \"%s\"", name);
- return -1;
-}
-
-static void free_func_handle(struct tep_function_handler *func)
-{
- struct func_params *params;
-
- free(func->name);
-
- while (func->params) {
- params = func->params;
- func->params = params->next;
- free(params);
- }
-
- free(func);
-}
-
-/**
- * tep_register_print_function - register a helper function
- * @tep: a handle to the trace event parser context
- * @func: the function to process the helper function
- * @ret_type: the return type of the helper function
- * @name: the name of the helper function
- * @parameters: A list of enum tep_func_arg_type
- *
- * Some events may have helper functions in the print format arguments.
- * This allows a plugin to dynamically create a way to process one
- * of these functions.
- *
- * The @parameters is a variable list of tep_func_arg_type enums that
- * must end with TEP_FUNC_ARG_VOID.
- */
-int tep_register_print_function(struct tep_handle *tep,
- tep_func_handler func,
- enum tep_func_arg_type ret_type,
- char *name, ...)
-{
- struct tep_function_handler *func_handle;
- struct func_params **next_param;
- struct func_params *param;
- enum tep_func_arg_type type;
- va_list ap;
- int ret;
-
- func_handle = find_func_handler(tep, name);
- if (func_handle) {
- /*
- * This is most like caused by the users own
- * plugins updating the function. This overrides the
- * system defaults.
- */
- pr_stat("override of function helper '%s'", name);
- remove_func_handler(tep, name);
- }
-
- func_handle = calloc(1, sizeof(*func_handle));
- if (!func_handle) {
- do_warning("Failed to allocate function handler");
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
-
- func_handle->ret_type = ret_type;
- func_handle->name = strdup(name);
- func_handle->func = func;
- if (!func_handle->name) {
- do_warning("Failed to allocate function name");
- free(func_handle);
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
-
- next_param = &(func_handle->params);
- va_start(ap, name);
- for (;;) {
- type = va_arg(ap, enum tep_func_arg_type);
- if (type == TEP_FUNC_ARG_VOID)
- break;
-
- if (type >= TEP_FUNC_ARG_MAX_TYPES) {
- do_warning("Invalid argument type %d", type);
- ret = TEP_ERRNO__INVALID_ARG_TYPE;
- goto out_free;
- }
-
- param = malloc(sizeof(*param));
- if (!param) {
- do_warning("Failed to allocate function param");
- ret = TEP_ERRNO__MEM_ALLOC_FAILED;
- goto out_free;
- }
- param->type = type;
- param->next = NULL;
-
- *next_param = param;
- next_param = &(param->next);
-
- func_handle->nr_args++;
- }
- va_end(ap);
-
- func_handle->next = tep->func_handlers;
- tep->func_handlers = func_handle;
-
- return 0;
- out_free:
- va_end(ap);
- free_func_handle(func_handle);
- return ret;
-}
-
-/**
- * tep_unregister_print_function - unregister a helper function
- * @tep: a handle to the trace event parser context
- * @func: the function to process the helper function
- * @name: the name of the helper function
- *
- * This function removes existing print handler for function @name.
- *
- * Returns 0 if the handler was removed successully, -1 otherwise.
- */
-int tep_unregister_print_function(struct tep_handle *tep,
- tep_func_handler func, char *name)
-{
- struct tep_function_handler *func_handle;
-
- func_handle = find_func_handler(tep, name);
- if (func_handle && func_handle->func == func) {
- remove_func_handler(tep, name);
- return 0;
- }
- return -1;
-}
-
-static struct tep_event *search_event(struct tep_handle *tep, int id,
- const char *sys_name,
- const char *event_name)
-{
- struct tep_event *event;
-
- if (id >= 0) {
- /* search by id */
- event = tep_find_event(tep, id);
- if (!event)
- return NULL;
- if (event_name && (strcmp(event_name, event->name) != 0))
- return NULL;
- if (sys_name && (strcmp(sys_name, event->system) != 0))
- return NULL;
- } else {
- event = tep_find_event_by_name(tep, sys_name, event_name);
- if (!event)
- return NULL;
- }
- return event;
-}
-
-/**
- * tep_register_event_handler - register a way to parse an event
- * @tep: a handle to the trace event parser context
- * @id: the id of the event to register
- * @sys_name: the system name the event belongs to
- * @event_name: the name of the event
- * @func: the function to call to parse the event information
- * @context: the data to be passed to @func
- *
- * This function allows a developer to override the parsing of
- * a given event. If for some reason the default print format
- * is not sufficient, this function will register a function
- * for an event to be used to parse the data instead.
- *
- * If @id is >= 0, then it is used to find the event.
- * else @sys_name and @event_name are used.
- *
- * Returns:
- * TEP_REGISTER_SUCCESS_OVERWRITE if an existing handler is overwritten
- * TEP_REGISTER_SUCCESS if a new handler is registered successfully
- * negative TEP_ERRNO_... in case of an error
- *
- */
-int tep_register_event_handler(struct tep_handle *tep, int id,
- const char *sys_name, const char *event_name,
- tep_event_handler_func func, void *context)
-{
- struct tep_event *event;
- struct event_handler *handle;
-
- event = search_event(tep, id, sys_name, event_name);
- if (event == NULL)
- goto not_found;
-
- pr_stat("overriding event (%d) %s:%s with new print handler",
- event->id, event->system, event->name);
-
- event->handler = func;
- event->context = context;
- return TEP_REGISTER_SUCCESS_OVERWRITE;
-
- not_found:
- /* Save for later use. */
- handle = calloc(1, sizeof(*handle));
- if (!handle) {
- do_warning("Failed to allocate event handler");
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
-
- handle->id = id;
- if (event_name)
- handle->event_name = strdup(event_name);
- if (sys_name)
- handle->sys_name = strdup(sys_name);
-
- if ((event_name && !handle->event_name) ||
- (sys_name && !handle->sys_name)) {
- do_warning("Failed to allocate event/sys name");
- free((void *)handle->event_name);
- free((void *)handle->sys_name);
- free(handle);
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
-
- handle->func = func;
- handle->next = tep->handlers;
- tep->handlers = handle;
- handle->context = context;
-
- return TEP_REGISTER_SUCCESS;
-}
-
-static int handle_matches(struct event_handler *handler, int id,
- const char *sys_name, const char *event_name,
- tep_event_handler_func func, void *context)
-{
- if (id >= 0 && id != handler->id)
- return 0;
-
- if (event_name && (strcmp(event_name, handler->event_name) != 0))
- return 0;
-
- if (sys_name && (strcmp(sys_name, handler->sys_name) != 0))
- return 0;
-
- if (func != handler->func || context != handler->context)
- return 0;
-
- return 1;
-}
-
-/**
- * tep_unregister_event_handler - unregister an existing event handler
- * @tep: a handle to the trace event parser context
- * @id: the id of the event to unregister
- * @sys_name: the system name the handler belongs to
- * @event_name: the name of the event handler
- * @func: the function to call to parse the event information
- * @context: the data to be passed to @func
- *
- * This function removes existing event handler (parser).
- *
- * If @id is >= 0, then it is used to find the event.
- * else @sys_name and @event_name are used.
- *
- * Returns 0 if handler was removed successfully, -1 if event was not found.
- */
-int tep_unregister_event_handler(struct tep_handle *tep, int id,
- const char *sys_name, const char *event_name,
- tep_event_handler_func func, void *context)
-{
- struct tep_event *event;
- struct event_handler *handle;
- struct event_handler **next;
-
- event = search_event(tep, id, sys_name, event_name);
- if (event == NULL)
- goto not_found;
-
- if (event->handler == func && event->context == context) {
- pr_stat("removing override handler for event (%d) %s:%s. Going back to default handler.",
- event->id, event->system, event->name);
-
- event->handler = NULL;
- event->context = NULL;
- return 0;
- }
-
-not_found:
- for (next = &tep->handlers; *next; next = &(*next)->next) {
- handle = *next;
- if (handle_matches(handle, id, sys_name, event_name,
- func, context))
- break;
- }
-
- if (!(*next))
- return -1;
-
- *next = handle->next;
- free_handler(handle);
-
- return 0;
-}
-
-/**
- * tep_alloc - create a tep handle
- */
-struct tep_handle *tep_alloc(void)
-{
- struct tep_handle *tep = calloc(1, sizeof(*tep));
-
- if (tep) {
- tep->ref_count = 1;
- tep->host_bigendian = tep_is_bigendian();
- }
-
- return tep;
-}
-
-void tep_ref(struct tep_handle *tep)
-{
- tep->ref_count++;
-}
-
-int tep_get_ref(struct tep_handle *tep)
-{
- if (tep)
- return tep->ref_count;
- return 0;
-}
-
-void tep_free_format_field(struct tep_format_field *field)
-{
- free(field->type);
- if (field->alias != field->name)
- free(field->alias);
- free(field->name);
- free(field);
-}
-
-static void free_format_fields(struct tep_format_field *field)
-{
- struct tep_format_field *next;
-
- while (field) {
- next = field->next;
- tep_free_format_field(field);
- field = next;
- }
-}
-
-static void free_formats(struct tep_format *format)
-{
- free_format_fields(format->common_fields);
- free_format_fields(format->fields);
-}
-
-void tep_free_event(struct tep_event *event)
-{
- free(event->name);
- free(event->system);
-
- free_formats(&event->format);
-
- free(event->print_fmt.format);
- free_args(event->print_fmt.args);
-
- free(event);
-}
-
-/**
- * tep_free - free a tep handle
- * @tep: the tep handle to free
- */
-void tep_free(struct tep_handle *tep)
-{
- struct cmdline_list *cmdlist, *cmdnext;
- struct func_list *funclist, *funcnext;
- struct printk_list *printklist, *printknext;
- struct tep_function_handler *func_handler;
- struct event_handler *handle;
- int i;
-
- if (!tep)
- return;
-
- cmdlist = tep->cmdlist;
- funclist = tep->funclist;
- printklist = tep->printklist;
-
- tep->ref_count--;
- if (tep->ref_count)
- return;
-
- if (tep->cmdlines) {
- for (i = 0; i < tep->cmdline_count; i++)
- free(tep->cmdlines[i].comm);
- free(tep->cmdlines);
- }
-
- while (cmdlist) {
- cmdnext = cmdlist->next;
- free(cmdlist->comm);
- free(cmdlist);
- cmdlist = cmdnext;
- }
-
- if (tep->func_map) {
- for (i = 0; i < (int)tep->func_count; i++) {
- free(tep->func_map[i].func);
- free(tep->func_map[i].mod);
- }
- free(tep->func_map);
- }
-
- while (funclist) {
- funcnext = funclist->next;
- free(funclist->func);
- free(funclist->mod);
- free(funclist);
- funclist = funcnext;
- }
-
- while (tep->func_handlers) {
- func_handler = tep->func_handlers;
- tep->func_handlers = func_handler->next;
- free_func_handle(func_handler);
- }
-
- if (tep->printk_map) {
- for (i = 0; i < (int)tep->printk_count; i++)
- free(tep->printk_map[i].printk);
- free(tep->printk_map);
- }
-
- while (printklist) {
- printknext = printklist->next;
- free(printklist->printk);
- free(printklist);
- printklist = printknext;
- }
-
- for (i = 0; i < tep->nr_events; i++)
- tep_free_event(tep->events[i]);
-
- while (tep->handlers) {
- handle = tep->handlers;
- tep->handlers = handle->next;
- free_handler(handle);
- }
-
- free(tep->events);
- free(tep->sort_events);
- free(tep->func_resolver);
-
- free(tep);
-}
-
-void tep_unref(struct tep_handle *tep)
-{
- tep_free(tep);
-}
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
deleted file mode 100644
index b77837f75a0d..000000000000
--- a/tools/lib/traceevent/event-parse.h
+++ /dev/null
@@ -1,753 +0,0 @@
-/*
- * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#ifndef _PARSE_EVENTS_H
-#define _PARSE_EVENTS_H
-
-#include <stdbool.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <regex.h>
-#include <string.h>
-
-#include "trace-seq.h"
-
-#ifndef __maybe_unused
-#define __maybe_unused __attribute__((unused))
-#endif
-
-#ifndef DEBUG_RECORD
-#define DEBUG_RECORD 0
-#endif
-
-struct tep_record {
- unsigned long long ts;
- unsigned long long offset;
- long long missed_events; /* buffer dropped events before */
- int record_size; /* size of binary record */
- int size; /* size of data */
- void *data;
- int cpu;
- int ref_count;
- int locked; /* Do not free, even if ref_count is zero */
- void *priv;
-#if DEBUG_RECORD
- struct tep_record *prev;
- struct tep_record *next;
- long alloc_addr;
-#endif
-};
-
-/* ----------------------- tep ----------------------- */
-
-struct tep_handle;
-struct tep_event;
-
-typedef int (*tep_event_handler_func)(struct trace_seq *s,
- struct tep_record *record,
- struct tep_event *event,
- void *context);
-
-typedef int (*tep_plugin_load_func)(struct tep_handle *tep);
-typedef int (*tep_plugin_unload_func)(struct tep_handle *tep);
-
-struct tep_plugin_option {
- struct tep_plugin_option *next;
- void *handle;
- char *file;
- char *name;
- char *plugin_alias;
- char *description;
- const char *value;
- void *priv;
- int set;
-};
-
-/*
- * Plugin hooks that can be called:
- *
- * TEP_PLUGIN_LOADER: (required)
- * The function name to initialized the plugin.
- *
- * int TEP_PLUGIN_LOADER(struct tep_handle *tep)
- *
- * TEP_PLUGIN_UNLOADER: (optional)
- * The function called just before unloading
- *
- * int TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
- *
- * TEP_PLUGIN_OPTIONS: (optional)
- * Plugin options that can be set before loading
- *
- * struct tep_plugin_option TEP_PLUGIN_OPTIONS[] = {
- * {
- * .name = "option-name",
- * .plugin_alias = "override-file-name", (optional)
- * .description = "description of option to show users",
- * },
- * {
- * .name = NULL,
- * },
- * };
- *
- * Array must end with .name = NULL;
- *
- *
- * .plugin_alias is used to give a shorter name to access
- * the vairable. Useful if a plugin handles more than one event.
- *
- * If .value is not set, then it is considered a boolean and only
- * .set will be processed. If .value is defined, then it is considered
- * a string option and .set will be ignored.
- *
- * TEP_PLUGIN_ALIAS: (optional)
- * The name to use for finding options (uses filename if not defined)
- */
-#define TEP_PLUGIN_LOADER tep_plugin_loader
-#define TEP_PLUGIN_UNLOADER tep_plugin_unloader
-#define TEP_PLUGIN_OPTIONS tep_plugin_options
-#define TEP_PLUGIN_ALIAS tep_plugin_alias
-#define _MAKE_STR(x) #x
-#define MAKE_STR(x) _MAKE_STR(x)
-#define TEP_PLUGIN_LOADER_NAME MAKE_STR(TEP_PLUGIN_LOADER)
-#define TEP_PLUGIN_UNLOADER_NAME MAKE_STR(TEP_PLUGIN_UNLOADER)
-#define TEP_PLUGIN_OPTIONS_NAME MAKE_STR(TEP_PLUGIN_OPTIONS)
-#define TEP_PLUGIN_ALIAS_NAME MAKE_STR(TEP_PLUGIN_ALIAS)
-
-enum tep_format_flags {
- TEP_FIELD_IS_ARRAY = 1,
- TEP_FIELD_IS_POINTER = 2,
- TEP_FIELD_IS_SIGNED = 4,
- TEP_FIELD_IS_STRING = 8,
- TEP_FIELD_IS_DYNAMIC = 16,
- TEP_FIELD_IS_LONG = 32,
- TEP_FIELD_IS_FLAG = 64,
- TEP_FIELD_IS_SYMBOLIC = 128,
-};
-
-struct tep_format_field {
- struct tep_format_field *next;
- struct tep_event *event;
- char *type;
- char *name;
- char *alias;
- int offset;
- int size;
- unsigned int arraylen;
- unsigned int elementsize;
- unsigned long flags;
-};
-
-struct tep_format {
- int nr_common;
- int nr_fields;
- struct tep_format_field *common_fields;
- struct tep_format_field *fields;
-};
-
-struct tep_print_arg_atom {
- char *atom;
-};
-
-struct tep_print_arg_string {
- char *string;
- int offset;
-};
-
-struct tep_print_arg_bitmask {
- char *bitmask;
- int offset;
-};
-
-struct tep_print_arg_field {
- char *name;
- struct tep_format_field *field;
-};
-
-struct tep_print_flag_sym {
- struct tep_print_flag_sym *next;
- char *value;
- char *str;
-};
-
-struct tep_print_arg_typecast {
- char *type;
- struct tep_print_arg *item;
-};
-
-struct tep_print_arg_flags {
- struct tep_print_arg *field;
- char *delim;
- struct tep_print_flag_sym *flags;
-};
-
-struct tep_print_arg_symbol {
- struct tep_print_arg *field;
- struct tep_print_flag_sym *symbols;
-};
-
-struct tep_print_arg_hex {
- struct tep_print_arg *field;
- struct tep_print_arg *size;
-};
-
-struct tep_print_arg_int_array {
- struct tep_print_arg *field;
- struct tep_print_arg *count;
- struct tep_print_arg *el_size;
-};
-
-struct tep_print_arg_dynarray {
- struct tep_format_field *field;
- struct tep_print_arg *index;
-};
-
-struct tep_print_arg;
-
-struct tep_print_arg_op {
- char *op;
- int prio;
- struct tep_print_arg *left;
- struct tep_print_arg *right;
-};
-
-struct tep_function_handler;
-
-struct tep_print_arg_func {
- struct tep_function_handler *func;
- struct tep_print_arg *args;
-};
-
-enum tep_print_arg_type {
- TEP_PRINT_NULL,
- TEP_PRINT_ATOM,
- TEP_PRINT_FIELD,
- TEP_PRINT_FLAGS,
- TEP_PRINT_SYMBOL,
- TEP_PRINT_HEX,
- TEP_PRINT_INT_ARRAY,
- TEP_PRINT_TYPE,
- TEP_PRINT_STRING,
- TEP_PRINT_BSTRING,
- TEP_PRINT_DYNAMIC_ARRAY,
- TEP_PRINT_OP,
- TEP_PRINT_FUNC,
- TEP_PRINT_BITMASK,
- TEP_PRINT_DYNAMIC_ARRAY_LEN,
- TEP_PRINT_HEX_STR,
-};
-
-struct tep_print_arg {
- struct tep_print_arg *next;
- enum tep_print_arg_type type;
- union {
- struct tep_print_arg_atom atom;
- struct tep_print_arg_field field;
- struct tep_print_arg_typecast typecast;
- struct tep_print_arg_flags flags;
- struct tep_print_arg_symbol symbol;
- struct tep_print_arg_hex hex;
- struct tep_print_arg_int_array int_array;
- struct tep_print_arg_func func;
- struct tep_print_arg_string string;
- struct tep_print_arg_bitmask bitmask;
- struct tep_print_arg_op op;
- struct tep_print_arg_dynarray dynarray;
- };
-};
-
-struct tep_print_fmt {
- char *format;
- struct tep_print_arg *args;
-};
-
-struct tep_event {
- struct tep_handle *tep;
- char *name;
- int id;
- int flags;
- struct tep_format format;
- struct tep_print_fmt print_fmt;
- char *system;
- tep_event_handler_func handler;
- void *context;
-};
-
-enum {
- TEP_EVENT_FL_ISFTRACE = 0x01,
- TEP_EVENT_FL_ISPRINT = 0x02,
- TEP_EVENT_FL_ISBPRINT = 0x04,
- TEP_EVENT_FL_ISFUNCENT = 0x10,
- TEP_EVENT_FL_ISFUNCRET = 0x20,
- TEP_EVENT_FL_NOHANDLE = 0x40,
- TEP_EVENT_FL_PRINTRAW = 0x80,
-
- TEP_EVENT_FL_FAILED = 0x80000000
-};
-
-enum tep_event_sort_type {
- TEP_EVENT_SORT_ID,
- TEP_EVENT_SORT_NAME,
- TEP_EVENT_SORT_SYSTEM,
-};
-
-enum tep_event_type {
- TEP_EVENT_ERROR,
- TEP_EVENT_NONE,
- TEP_EVENT_SPACE,
- TEP_EVENT_NEWLINE,
- TEP_EVENT_OP,
- TEP_EVENT_DELIM,
- TEP_EVENT_ITEM,
- TEP_EVENT_DQUOTE,
- TEP_EVENT_SQUOTE,
-};
-
-typedef unsigned long long (*tep_func_handler)(struct trace_seq *s,
- unsigned long long *args);
-
-enum tep_func_arg_type {
- TEP_FUNC_ARG_VOID,
- TEP_FUNC_ARG_INT,
- TEP_FUNC_ARG_LONG,
- TEP_FUNC_ARG_STRING,
- TEP_FUNC_ARG_PTR,
- TEP_FUNC_ARG_MAX_TYPES
-};
-
-enum tep_flag {
- TEP_NSEC_OUTPUT = 1, /* output in NSECS */
- TEP_DISABLE_SYS_PLUGINS = 1 << 1,
- TEP_DISABLE_PLUGINS = 1 << 2,
-};
-
-#define TEP_ERRORS \
- _PE(MEM_ALLOC_FAILED, "failed to allocate memory"), \
- _PE(PARSE_EVENT_FAILED, "failed to parse event"), \
- _PE(READ_ID_FAILED, "failed to read event id"), \
- _PE(READ_FORMAT_FAILED, "failed to read event format"), \
- _PE(READ_PRINT_FAILED, "failed to read event print fmt"), \
- _PE(OLD_FTRACE_ARG_FAILED,"failed to allocate field name for ftrace"),\
- _PE(INVALID_ARG_TYPE, "invalid argument type"), \
- _PE(INVALID_EXP_TYPE, "invalid expression type"), \
- _PE(INVALID_OP_TYPE, "invalid operator type"), \
- _PE(INVALID_EVENT_NAME, "invalid event name"), \
- _PE(EVENT_NOT_FOUND, "no event found"), \
- _PE(SYNTAX_ERROR, "syntax error"), \
- _PE(ILLEGAL_RVALUE, "illegal rvalue"), \
- _PE(ILLEGAL_LVALUE, "illegal lvalue for string comparison"), \
- _PE(INVALID_REGEX, "regex did not compute"), \
- _PE(ILLEGAL_STRING_CMP, "illegal comparison for string"), \
- _PE(ILLEGAL_INTEGER_CMP,"illegal comparison for integer"), \
- _PE(REPARENT_NOT_OP, "cannot reparent other than OP"), \
- _PE(REPARENT_FAILED, "failed to reparent filter OP"), \
- _PE(BAD_FILTER_ARG, "bad arg in filter tree"), \
- _PE(UNEXPECTED_TYPE, "unexpected type (not a value)"), \
- _PE(ILLEGAL_TOKEN, "illegal token"), \
- _PE(INVALID_PAREN, "open parenthesis cannot come here"), \
- _PE(UNBALANCED_PAREN, "unbalanced number of parenthesis"), \
- _PE(UNKNOWN_TOKEN, "unknown token"), \
- _PE(FILTER_NOT_FOUND, "no filter found"), \
- _PE(NOT_A_NUMBER, "must have number field"), \
- _PE(NO_FILTER, "no filters exists"), \
- _PE(FILTER_MISS, "record does not match to filter")
-
-#undef _PE
-#define _PE(__code, __str) TEP_ERRNO__ ## __code
-enum tep_errno {
- TEP_ERRNO__SUCCESS = 0,
- TEP_ERRNO__FILTER_MATCH = TEP_ERRNO__SUCCESS,
-
- /*
- * Choose an arbitrary negative big number not to clash with standard
- * errno since SUS requires the errno has distinct positive values.
- * See 'Issue 6' in the link below.
- *
- * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
- */
- __TEP_ERRNO__START = -100000,
-
- TEP_ERRORS,
-
- __TEP_ERRNO__END,
-};
-#undef _PE
-
-struct tep_plugin_list;
-
-#define INVALID_PLUGIN_LIST_OPTION ((char **)((unsigned long)-1))
-
-struct tep_plugin_list *tep_load_plugins(struct tep_handle *tep);
-void tep_unload_plugins(struct tep_plugin_list *plugin_list,
- struct tep_handle *tep);
-char **tep_plugin_list_options(void);
-void tep_plugin_free_options_list(char **list);
-int tep_plugin_add_options(const char *name,
- struct tep_plugin_option *options);
-void tep_plugin_remove_options(struct tep_plugin_option *options);
-void tep_print_plugins(struct trace_seq *s,
- const char *prefix, const char *suffix,
- const struct tep_plugin_list *list);
-
-/* tep_handle */
-typedef char *(tep_func_resolver_t)(void *priv,
- unsigned long long *addrp, char **modp);
-void tep_set_flag(struct tep_handle *tep, int flag);
-void tep_clear_flag(struct tep_handle *tep, enum tep_flag flag);
-bool tep_test_flag(struct tep_handle *tep, enum tep_flag flags);
-
-static inline int tep_is_bigendian(void)
-{
- unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 };
- unsigned int val;
-
- memcpy(&val, str, 4);
- return val == 0x01020304;
-}
-
-/* taken from kernel/trace/trace.h */
-enum trace_flag_type {
- TRACE_FLAG_IRQS_OFF = 0x01,
- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
- TRACE_FLAG_NEED_RESCHED = 0x04,
- TRACE_FLAG_HARDIRQ = 0x08,
- TRACE_FLAG_SOFTIRQ = 0x10,
-};
-
-int tep_set_function_resolver(struct tep_handle *tep,
- tep_func_resolver_t *func, void *priv);
-void tep_reset_function_resolver(struct tep_handle *tep);
-int tep_register_comm(struct tep_handle *tep, const char *comm, int pid);
-int tep_override_comm(struct tep_handle *tep, const char *comm, int pid);
-int tep_register_function(struct tep_handle *tep, char *name,
- unsigned long long addr, char *mod);
-int tep_register_print_string(struct tep_handle *tep, const char *fmt,
- unsigned long long addr);
-bool tep_is_pid_registered(struct tep_handle *tep, int pid);
-
-struct tep_event *tep_get_event(struct tep_handle *tep, int index);
-
-#define TEP_PRINT_INFO "INFO"
-#define TEP_PRINT_INFO_RAW "INFO_RAW"
-#define TEP_PRINT_COMM "COMM"
-#define TEP_PRINT_LATENCY "LATENCY"
-#define TEP_PRINT_NAME "NAME"
-#define TEP_PRINT_PID 1U
-#define TEP_PRINT_TIME 2U
-#define TEP_PRINT_CPU 3U
-
-void tep_print_event(struct tep_handle *tep, struct trace_seq *s,
- struct tep_record *record, const char *fmt, ...)
- __attribute__ ((format (printf, 4, 5)));
-
-int tep_parse_header_page(struct tep_handle *tep, char *buf, unsigned long size,
- int long_size);
-
-enum tep_errno tep_parse_event(struct tep_handle *tep, const char *buf,
- unsigned long size, const char *sys);
-enum tep_errno tep_parse_format(struct tep_handle *tep,
- struct tep_event **eventp,
- const char *buf,
- unsigned long size, const char *sys);
-
-void *tep_get_field_raw(struct trace_seq *s, struct tep_event *event,
- const char *name, struct tep_record *record,
- int *len, int err);
-
-int tep_get_field_val(struct trace_seq *s, struct tep_event *event,
- const char *name, struct tep_record *record,
- unsigned long long *val, int err);
-int tep_get_common_field_val(struct trace_seq *s, struct tep_event *event,
- const char *name, struct tep_record *record,
- unsigned long long *val, int err);
-int tep_get_any_field_val(struct trace_seq *s, struct tep_event *event,
- const char *name, struct tep_record *record,
- unsigned long long *val, int err);
-
-int tep_print_num_field(struct trace_seq *s, const char *fmt,
- struct tep_event *event, const char *name,
- struct tep_record *record, int err);
-
-int tep_print_func_field(struct trace_seq *s, const char *fmt,
- struct tep_event *event, const char *name,
- struct tep_record *record, int err);
-
-enum tep_reg_handler {
- TEP_REGISTER_SUCCESS = 0,
- TEP_REGISTER_SUCCESS_OVERWRITE,
-};
-
-int tep_register_event_handler(struct tep_handle *tep, int id,
- const char *sys_name, const char *event_name,
- tep_event_handler_func func, void *context);
-int tep_unregister_event_handler(struct tep_handle *tep, int id,
- const char *sys_name, const char *event_name,
- tep_event_handler_func func, void *context);
-int tep_register_print_function(struct tep_handle *tep,
- tep_func_handler func,
- enum tep_func_arg_type ret_type,
- char *name, ...);
-int tep_unregister_print_function(struct tep_handle *tep,
- tep_func_handler func, char *name);
-
-struct tep_format_field *tep_find_common_field(struct tep_event *event, const char *name);
-struct tep_format_field *tep_find_field(struct tep_event *event, const char *name);
-struct tep_format_field *tep_find_any_field(struct tep_event *event, const char *name);
-
-const char *tep_find_function(struct tep_handle *tep, unsigned long long addr);
-unsigned long long
-tep_find_function_address(struct tep_handle *tep, unsigned long long addr);
-unsigned long long tep_read_number(struct tep_handle *tep, const void *ptr, int size);
-int tep_read_number_field(struct tep_format_field *field, const void *data,
- unsigned long long *value);
-
-struct tep_event *tep_get_first_event(struct tep_handle *tep);
-int tep_get_events_count(struct tep_handle *tep);
-struct tep_event *tep_find_event(struct tep_handle *tep, int id);
-
-struct tep_event *
-tep_find_event_by_name(struct tep_handle *tep, const char *sys, const char *name);
-struct tep_event *
-tep_find_event_by_record(struct tep_handle *tep, struct tep_record *record);
-
-int tep_data_type(struct tep_handle *tep, struct tep_record *rec);
-int tep_data_pid(struct tep_handle *tep, struct tep_record *rec);
-int tep_data_preempt_count(struct tep_handle *tep, struct tep_record *rec);
-int tep_data_flags(struct tep_handle *tep, struct tep_record *rec);
-const char *tep_data_comm_from_pid(struct tep_handle *tep, int pid);
-struct tep_cmdline;
-struct tep_cmdline *tep_data_pid_from_comm(struct tep_handle *tep, const char *comm,
- struct tep_cmdline *next);
-int tep_cmdline_pid(struct tep_handle *tep, struct tep_cmdline *cmdline);
-
-void tep_print_field(struct trace_seq *s, void *data,
- struct tep_format_field *field);
-void tep_print_fields(struct trace_seq *s, void *data,
- int size __maybe_unused, struct tep_event *event);
-int tep_strerror(struct tep_handle *tep, enum tep_errno errnum,
- char *buf, size_t buflen);
-
-struct tep_event **tep_list_events(struct tep_handle *tep, enum tep_event_sort_type);
-struct tep_event **tep_list_events_copy(struct tep_handle *tep,
- enum tep_event_sort_type);
-struct tep_format_field **tep_event_common_fields(struct tep_event *event);
-struct tep_format_field **tep_event_fields(struct tep_event *event);
-
-enum tep_endian {
- TEP_LITTLE_ENDIAN = 0,
- TEP_BIG_ENDIAN
-};
-int tep_get_cpus(struct tep_handle *tep);
-void tep_set_cpus(struct tep_handle *tep, int cpus);
-int tep_get_long_size(struct tep_handle *tep);
-void tep_set_long_size(struct tep_handle *tep, int long_size);
-int tep_get_page_size(struct tep_handle *tep);
-void tep_set_page_size(struct tep_handle *tep, int _page_size);
-bool tep_is_file_bigendian(struct tep_handle *tep);
-void tep_set_file_bigendian(struct tep_handle *tep, enum tep_endian endian);
-bool tep_is_local_bigendian(struct tep_handle *tep);
-void tep_set_local_bigendian(struct tep_handle *tep, enum tep_endian endian);
-int tep_get_header_page_size(struct tep_handle *tep);
-int tep_get_header_timestamp_size(struct tep_handle *tep);
-bool tep_is_old_format(struct tep_handle *tep);
-void tep_set_test_filters(struct tep_handle *tep, int test_filters);
-
-struct tep_handle *tep_alloc(void);
-void tep_free(struct tep_handle *tep);
-void tep_ref(struct tep_handle *tep);
-void tep_unref(struct tep_handle *tep);
-int tep_get_ref(struct tep_handle *tep);
-
-/* access to the internal parser */
-void tep_buffer_init(const char *buf, unsigned long long size);
-enum tep_event_type tep_read_token(char **tok);
-void tep_free_token(char *token);
-int tep_peek_char(void);
-const char *tep_get_input_buf(void);
-unsigned long long tep_get_input_buf_ptr(void);
-
-/* for debugging */
-void tep_print_funcs(struct tep_handle *tep);
-void tep_print_printk(struct tep_handle *tep);
-
-/* ----------------------- filtering ----------------------- */
-
-enum tep_filter_boolean_type {
- TEP_FILTER_FALSE,
- TEP_FILTER_TRUE,
-};
-
-enum tep_filter_op_type {
- TEP_FILTER_OP_AND = 1,
- TEP_FILTER_OP_OR,
- TEP_FILTER_OP_NOT,
-};
-
-enum tep_filter_cmp_type {
- TEP_FILTER_CMP_NONE,
- TEP_FILTER_CMP_EQ,
- TEP_FILTER_CMP_NE,
- TEP_FILTER_CMP_GT,
- TEP_FILTER_CMP_LT,
- TEP_FILTER_CMP_GE,
- TEP_FILTER_CMP_LE,
- TEP_FILTER_CMP_MATCH,
- TEP_FILTER_CMP_NOT_MATCH,
- TEP_FILTER_CMP_REGEX,
- TEP_FILTER_CMP_NOT_REGEX,
-};
-
-enum tep_filter_exp_type {
- TEP_FILTER_EXP_NONE,
- TEP_FILTER_EXP_ADD,
- TEP_FILTER_EXP_SUB,
- TEP_FILTER_EXP_MUL,
- TEP_FILTER_EXP_DIV,
- TEP_FILTER_EXP_MOD,
- TEP_FILTER_EXP_RSHIFT,
- TEP_FILTER_EXP_LSHIFT,
- TEP_FILTER_EXP_AND,
- TEP_FILTER_EXP_OR,
- TEP_FILTER_EXP_XOR,
- TEP_FILTER_EXP_NOT,
-};
-
-enum tep_filter_arg_type {
- TEP_FILTER_ARG_NONE,
- TEP_FILTER_ARG_BOOLEAN,
- TEP_FILTER_ARG_VALUE,
- TEP_FILTER_ARG_FIELD,
- TEP_FILTER_ARG_EXP,
- TEP_FILTER_ARG_OP,
- TEP_FILTER_ARG_NUM,
- TEP_FILTER_ARG_STR,
-};
-
-enum tep_filter_value_type {
- TEP_FILTER_NUMBER,
- TEP_FILTER_STRING,
- TEP_FILTER_CHAR
-};
-
-struct tep_filter_arg;
-
-struct tep_filter_arg_boolean {
- enum tep_filter_boolean_type value;
-};
-
-struct tep_filter_arg_field {
- struct tep_format_field *field;
-};
-
-struct tep_filter_arg_value {
- enum tep_filter_value_type type;
- union {
- char *str;
- unsigned long long val;
- };
-};
-
-struct tep_filter_arg_op {
- enum tep_filter_op_type type;
- struct tep_filter_arg *left;
- struct tep_filter_arg *right;
-};
-
-struct tep_filter_arg_exp {
- enum tep_filter_exp_type type;
- struct tep_filter_arg *left;
- struct tep_filter_arg *right;
-};
-
-struct tep_filter_arg_num {
- enum tep_filter_cmp_type type;
- struct tep_filter_arg *left;
- struct tep_filter_arg *right;
-};
-
-struct tep_filter_arg_str {
- enum tep_filter_cmp_type type;
- struct tep_format_field *field;
- char *val;
- char *buffer;
- regex_t reg;
-};
-
-struct tep_filter_arg {
- enum tep_filter_arg_type type;
- union {
- struct tep_filter_arg_boolean boolean;
- struct tep_filter_arg_field field;
- struct tep_filter_arg_value value;
- struct tep_filter_arg_op op;
- struct tep_filter_arg_exp exp;
- struct tep_filter_arg_num num;
- struct tep_filter_arg_str str;
- };
-};
-
-struct tep_filter_type {
- int event_id;
- struct tep_event *event;
- struct tep_filter_arg *filter;
-};
-
-#define TEP_FILTER_ERROR_BUFSZ 1024
-
-struct tep_event_filter {
- struct tep_handle *tep;
- int filters;
- struct tep_filter_type *event_filters;
- char error_buffer[TEP_FILTER_ERROR_BUFSZ];
-};
-
-struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep);
-
-/* for backward compatibility */
-#define FILTER_NONE TEP_ERRNO__NO_FILTER
-#define FILTER_NOEXIST TEP_ERRNO__FILTER_NOT_FOUND
-#define FILTER_MISS TEP_ERRNO__FILTER_MISS
-#define FILTER_MATCH TEP_ERRNO__FILTER_MATCH
-
-enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
- const char *filter_str);
-
-enum tep_errno tep_filter_match(struct tep_event_filter *filter,
- struct tep_record *record);
-
-int tep_filter_strerror(struct tep_event_filter *filter, enum tep_errno err,
- char *buf, size_t buflen);
-
-int tep_event_filtered(struct tep_event_filter *filter,
- int event_id);
-
-void tep_filter_reset(struct tep_event_filter *filter);
-
-void tep_filter_free(struct tep_event_filter *filter);
-
-char *tep_filter_make_string(struct tep_event_filter *filter, int event_id);
-
-int tep_filter_remove_event(struct tep_event_filter *filter,
- int event_id);
-
-int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *source);
-
-int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter *filter2);
-
-#endif /* _PARSE_EVENTS_H */
diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
deleted file mode 100644
index e1f7ddd5a6cf..000000000000
--- a/tools/lib/traceevent/event-plugin.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- */
-
-#include <ctype.h>
-#include <stdio.h>
-#include <string.h>
-#include <dlfcn.h>
-#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <dirent.h>
-#include "event-parse.h"
-#include "event-parse-local.h"
-#include "event-utils.h"
-#include "trace-seq.h"
-
-#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/"
-
-static struct registered_plugin_options {
- struct registered_plugin_options *next;
- struct tep_plugin_option *options;
-} *registered_options;
-
-static struct trace_plugin_options {
- struct trace_plugin_options *next;
- char *plugin;
- char *option;
- char *value;
-} *trace_plugin_options;
-
-struct tep_plugin_list {
- struct tep_plugin_list *next;
- char *name;
- void *handle;
-};
-
-static void lower_case(char *str)
-{
- if (!str)
- return;
- for (; *str; str++)
- *str = tolower(*str);
-}
-
-static int update_option_value(struct tep_plugin_option *op, const char *val)
-{
- char *op_val;
-
- if (!val) {
- /* toggle, only if option is boolean */
- if (op->value)
- /* Warn? */
- return 0;
- op->set ^= 1;
- return 0;
- }
-
- /*
- * If the option has a value then it takes a string
- * otherwise the option is a boolean.
- */
- if (op->value) {
- op->value = val;
- return 0;
- }
-
- /* Option is boolean, must be either "1", "0", "true" or "false" */
-
- op_val = strdup(val);
- if (!op_val)
- return -1;
- lower_case(op_val);
-
- if (strcmp(val, "1") == 0 || strcmp(val, "true") == 0)
- op->set = 1;
- else if (strcmp(val, "0") == 0 || strcmp(val, "false") == 0)
- op->set = 0;
- free(op_val);
-
- return 0;
-}
-
-/**
- * tep_plugin_list_options - get list of plugin options
- *
- * Returns an array of char strings that list the currently registered
- * plugin options in the format of <plugin>:<option>. This list can be
- * used by toggling the option.
- *
- * Returns NULL if there's no options registered. On error it returns
- * INVALID_PLUGIN_LIST_OPTION
- *
- * Must be freed with tep_plugin_free_options_list().
- */
-char **tep_plugin_list_options(void)
-{
- struct registered_plugin_options *reg;
- struct tep_plugin_option *op;
- char **list = NULL;
- char *name;
- int count = 0;
-
- for (reg = registered_options; reg; reg = reg->next) {
- for (op = reg->options; op->name; op++) {
- char *alias = op->plugin_alias ? op->plugin_alias : op->file;
- char **temp = list;
- int ret;
-
- ret = asprintf(&name, "%s:%s", alias, op->name);
- if (ret < 0)
- goto err;
-
- list = realloc(list, count + 2);
- if (!list) {
- list = temp;
- free(name);
- goto err;
- }
- list[count++] = name;
- list[count] = NULL;
- }
- }
- return list;
-
- err:
- while (--count >= 0)
- free(list[count]);
- free(list);
-
- return INVALID_PLUGIN_LIST_OPTION;
-}
-
-void tep_plugin_free_options_list(char **list)
-{
- int i;
-
- if (!list)
- return;
-
- if (list == INVALID_PLUGIN_LIST_OPTION)
- return;
-
- for (i = 0; list[i]; i++)
- free(list[i]);
-
- free(list);
-}
-
-static int
-update_option(const char *file, struct tep_plugin_option *option)
-{
- struct trace_plugin_options *op;
- char *plugin;
- int ret = 0;
-
- if (option->plugin_alias) {
- plugin = strdup(option->plugin_alias);
- if (!plugin)
- return -1;
- } else {
- char *p;
- plugin = strdup(file);
- if (!plugin)
- return -1;
- p = strstr(plugin, ".");
- if (p)
- *p = '\0';
- }
-
- /* first look for named options */
- for (op = trace_plugin_options; op; op = op->next) {
- if (!op->plugin)
- continue;
- if (strcmp(op->plugin, plugin) != 0)
- continue;
- if (strcmp(op->option, option->name) != 0)
- continue;
-
- ret = update_option_value(option, op->value);
- if (ret)
- goto out;
- break;
- }
-
- /* first look for unnamed options */
- for (op = trace_plugin_options; op; op = op->next) {
- if (op->plugin)
- continue;
- if (strcmp(op->option, option->name) != 0)
- continue;
-
- ret = update_option_value(option, op->value);
- break;
- }
-
- out:
- free(plugin);
- return ret;
-}
-
-/**
- * tep_plugin_add_options - Add a set of options by a plugin
- * @name: The name of the plugin adding the options
- * @options: The set of options being loaded
- *
- * Sets the options with the values that have been added by user.
- */
-int tep_plugin_add_options(const char *name,
- struct tep_plugin_option *options)
-{
- struct registered_plugin_options *reg;
-
- reg = malloc(sizeof(*reg));
- if (!reg)
- return -1;
- reg->next = registered_options;
- reg->options = options;
- registered_options = reg;
-
- while (options->name) {
- update_option(name, options);
- options++;
- }
- return 0;
-}
-
-/**
- * tep_plugin_remove_options - remove plugin options that were registered
- * @options: Options to removed that were registered with tep_plugin_add_options
- */
-void tep_plugin_remove_options(struct tep_plugin_option *options)
-{
- struct registered_plugin_options **last;
- struct registered_plugin_options *reg;
-
- for (last = &registered_options; *last; last = &(*last)->next) {
- if ((*last)->options == options) {
- reg = *last;
- *last = reg->next;
- free(reg);
- return;
- }
- }
-}
-
-/**
- * tep_print_plugins - print out the list of plugins loaded
- * @s: the trace_seq descripter to write to
- * @prefix: The prefix string to add before listing the option name
- * @suffix: The suffix string ot append after the option name
- * @list: The list of plugins (usually returned by tep_load_plugins()
- *
- * Writes to the trace_seq @s the list of plugins (files) that is
- * returned by tep_load_plugins(). Use @prefix and @suffix for formating:
- * @prefix = " ", @suffix = "\n".
- */
-void tep_print_plugins(struct trace_seq *s,
- const char *prefix, const char *suffix,
- const struct tep_plugin_list *list)
-{
- while (list) {
- trace_seq_printf(s, "%s%s%s", prefix, list->name, suffix);
- list = list->next;
- }
-}
-
-static void
-load_plugin(struct tep_handle *tep, const char *path,
- const char *file, void *data)
-{
- struct tep_plugin_list **plugin_list = data;
- tep_plugin_load_func func;
- struct tep_plugin_list *list;
- const char *alias;
- char *plugin;
- void *handle;
- int ret;
-
- ret = asprintf(&plugin, "%s/%s", path, file);
- if (ret < 0) {
- warning("could not allocate plugin memory\n");
- return;
- }
-
- handle = dlopen(plugin, RTLD_NOW | RTLD_GLOBAL);
- if (!handle) {
- warning("could not load plugin '%s'\n%s\n",
- plugin, dlerror());
- goto out_free;
- }
-
- alias = dlsym(handle, TEP_PLUGIN_ALIAS_NAME);
- if (!alias)
- alias = file;
-
- func = dlsym(handle, TEP_PLUGIN_LOADER_NAME);
- if (!func) {
- warning("could not find func '%s' in plugin '%s'\n%s\n",
- TEP_PLUGIN_LOADER_NAME, plugin, dlerror());
- goto out_free;
- }
-
- list = malloc(sizeof(*list));
- if (!list) {
- warning("could not allocate plugin memory\n");
- goto out_free;
- }
-
- list->next = *plugin_list;
- list->handle = handle;
- list->name = plugin;
- *plugin_list = list;
-
- pr_stat("registering plugin: %s", plugin);
- func(tep);
- return;
-
- out_free:
- free(plugin);
-}
-
-static void
-load_plugins_dir(struct tep_handle *tep, const char *suffix,
- const char *path,
- void (*load_plugin)(struct tep_handle *tep,
- const char *path,
- const char *name,
- void *data),
- void *data)
-{
- struct dirent *dent;
- struct stat st;
- DIR *dir;
- int ret;
-
- ret = stat(path, &st);
- if (ret < 0)
- return;
-
- if (!S_ISDIR(st.st_mode))
- return;
-
- dir = opendir(path);
- if (!dir)
- return;
-
- while ((dent = readdir(dir))) {
- const char *name = dent->d_name;
-
- if (strcmp(name, ".") == 0 ||
- strcmp(name, "..") == 0)
- continue;
-
- /* Only load plugins that end in suffix */
- if (strcmp(name + (strlen(name) - strlen(suffix)), suffix) != 0)
- continue;
-
- load_plugin(tep, path, name, data);
- }
-
- closedir(dir);
-}
-
-static void
-load_plugins(struct tep_handle *tep, const char *suffix,
- void (*load_plugin)(struct tep_handle *tep,
- const char *path,
- const char *name,
- void *data),
- void *data)
-{
- char *home;
- char *path;
- char *envdir;
- int ret;
-
- if (tep->flags & TEP_DISABLE_PLUGINS)
- return;
-
- /*
- * If a system plugin directory was defined,
- * check that first.
- */
-#ifdef PLUGIN_DIR
- if (!(tep->flags & TEP_DISABLE_SYS_PLUGINS))
- load_plugins_dir(tep, suffix, PLUGIN_DIR,
- load_plugin, data);
-#endif
-
- /*
- * Next let the environment-set plugin directory
- * override the system defaults.
- */
- envdir = getenv("TRACEEVENT_PLUGIN_DIR");
- if (envdir)
- load_plugins_dir(tep, suffix, envdir, load_plugin, data);
-
- /*
- * Now let the home directory override the environment
- * or system defaults.
- */
- home = getenv("HOME");
- if (!home)
- return;
-
- ret = asprintf(&path, "%s/%s", home, LOCAL_PLUGIN_DIR);
- if (ret < 0) {
- warning("could not allocate plugin memory\n");
- return;
- }
-
- load_plugins_dir(tep, suffix, path, load_plugin, data);
-
- free(path);
-}
-
-struct tep_plugin_list*
-tep_load_plugins(struct tep_handle *tep)
-{
- struct tep_plugin_list *list = NULL;
-
- load_plugins(tep, ".so", load_plugin, &list);
- return list;
-}
-
-void
-tep_unload_plugins(struct tep_plugin_list *plugin_list, struct tep_handle *tep)
-{
- tep_plugin_unload_func func;
- struct tep_plugin_list *list;
-
- while (plugin_list) {
- list = plugin_list;
- plugin_list = list->next;
- func = dlsym(list->handle, TEP_PLUGIN_UNLOADER_NAME);
- if (func)
- func(tep);
- dlclose(list->handle);
- free(list->name);
- free(list);
- }
-}
diff --git a/tools/lib/traceevent/event-utils.h b/tools/lib/traceevent/event-utils.h
deleted file mode 100644
index 0560b96a31d1..000000000000
--- a/tools/lib/traceevent/event-utils.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: LGPL-2.1 */
-/*
- * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- */
-#ifndef __UTIL_H
-#define __UTIL_H
-
-#include <ctype.h>
-
-/* Can be overridden */
-void warning(const char *fmt, ...);
-void pr_stat(const char *fmt, ...);
-void vpr_stat(const char *fmt, va_list ap);
-
-/* Always available */
-void __warning(const char *fmt, ...);
-void __pr_stat(const char *fmt, ...);
-
-void __vwarning(const char *fmt, ...);
-void __vpr_stat(const char *fmt, ...);
-
-#define min(x, y) ({ \
- typeof(x) _min1 = (x); \
- typeof(y) _min2 = (y); \
- (void) (&_min1 == &_min2); \
- _min1 < _min2 ? _min1 : _min2; })
-
-static inline char *strim(char *string)
-{
- char *ret;
-
- if (!string)
- return NULL;
- while (*string) {
- if (!isspace(*string))
- break;
- string++;
- }
- ret = string;
-
- string = ret + strlen(ret) - 1;
- while (string > ret) {
- if (!isspace(*string))
- break;
- string--;
- }
- string[1] = 0;
-
- return ret;
-}
-
-static inline int has_text(const char *text)
-{
- if (!text)
- return 0;
-
- while (*text) {
- if (!isspace(*text))
- return 1;
- text++;
- }
-
- return 0;
-}
-
-#endif
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
deleted file mode 100644
index b887e7437d67..000000000000
--- a/tools/lib/traceevent/kbuffer-parse.c
+++ /dev/null
@@ -1,778 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "kbuffer.h"
-
-#define MISSING_EVENTS (1UL << 31)
-#define MISSING_STORED (1UL << 30)
-
-#define COMMIT_MASK ((1 << 27) - 1)
-
-enum {
- KBUFFER_FL_HOST_BIG_ENDIAN = (1<<0),
- KBUFFER_FL_BIG_ENDIAN = (1<<1),
- KBUFFER_FL_LONG_8 = (1<<2),
- KBUFFER_FL_OLD_FORMAT = (1<<3),
-};
-
-#define ENDIAN_MASK (KBUFFER_FL_HOST_BIG_ENDIAN | KBUFFER_FL_BIG_ENDIAN)
-
-/** kbuffer
- * @timestamp - timestamp of current event
- * @lost_events - # of lost events between this subbuffer and previous
- * @flags - special flags of the kbuffer
- * @subbuffer - pointer to the sub-buffer page
- * @data - pointer to the start of data on the sub-buffer page
- * @index - index from @data to the @curr event data
- * @curr - offset from @data to the start of current event
- * (includes metadata)
- * @next - offset from @data to the start of next event
- * @size - The size of data on @data
- * @start - The offset from @subbuffer where @data lives
- *
- * @read_4 - Function to read 4 raw bytes (may swap)
- * @read_8 - Function to read 8 raw bytes (may swap)
- * @read_long - Function to read a long word (4 or 8 bytes with needed swap)
- */
-struct kbuffer {
- unsigned long long timestamp;
- long long lost_events;
- unsigned long flags;
- void *subbuffer;
- void *data;
- unsigned int index;
- unsigned int curr;
- unsigned int next;
- unsigned int size;
- unsigned int start;
-
- unsigned int (*read_4)(void *ptr);
- unsigned long long (*read_8)(void *ptr);
- unsigned long long (*read_long)(struct kbuffer *kbuf, void *ptr);
- int (*next_event)(struct kbuffer *kbuf);
-};
-
-static void *zmalloc(size_t size)
-{
- return calloc(1, size);
-}
-
-static int host_is_bigendian(void)
-{
- unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 };
- unsigned int *ptr;
-
- ptr = (unsigned int *)str;
- return *ptr == 0x01020304;
-}
-
-static int do_swap(struct kbuffer *kbuf)
-{
- return ((kbuf->flags & KBUFFER_FL_HOST_BIG_ENDIAN) + kbuf->flags) &
- ENDIAN_MASK;
-}
-
-static unsigned long long __read_8(void *ptr)
-{
- unsigned long long data = *(unsigned long long *)ptr;
-
- return data;
-}
-
-static unsigned long long __read_8_sw(void *ptr)
-{
- unsigned long long data = *(unsigned long long *)ptr;
- unsigned long long swap;
-
- swap = ((data & 0xffULL) << 56) |
- ((data & (0xffULL << 8)) << 40) |
- ((data & (0xffULL << 16)) << 24) |
- ((data & (0xffULL << 24)) << 8) |
- ((data & (0xffULL << 32)) >> 8) |
- ((data & (0xffULL << 40)) >> 24) |
- ((data & (0xffULL << 48)) >> 40) |
- ((data & (0xffULL << 56)) >> 56);
-
- return swap;
-}
-
-static unsigned int __read_4(void *ptr)
-{
- unsigned int data = *(unsigned int *)ptr;
-
- return data;
-}
-
-static unsigned int __read_4_sw(void *ptr)
-{
- unsigned int data = *(unsigned int *)ptr;
- unsigned int swap;
-
- swap = ((data & 0xffULL) << 24) |
- ((data & (0xffULL << 8)) << 8) |
- ((data & (0xffULL << 16)) >> 8) |
- ((data & (0xffULL << 24)) >> 24);
-
- return swap;
-}
-
-static unsigned long long read_8(struct kbuffer *kbuf, void *ptr)
-{
- return kbuf->read_8(ptr);
-}
-
-static unsigned int read_4(struct kbuffer *kbuf, void *ptr)
-{
- return kbuf->read_4(ptr);
-}
-
-static unsigned long long __read_long_8(struct kbuffer *kbuf, void *ptr)
-{
- return kbuf->read_8(ptr);
-}
-
-static unsigned long long __read_long_4(struct kbuffer *kbuf, void *ptr)
-{
- return kbuf->read_4(ptr);
-}
-
-static unsigned long long read_long(struct kbuffer *kbuf, void *ptr)
-{
- return kbuf->read_long(kbuf, ptr);
-}
-
-static int calc_index(struct kbuffer *kbuf, void *ptr)
-{
- return (unsigned long)ptr - (unsigned long)kbuf->data;
-}
-
-static int __next_event(struct kbuffer *kbuf);
-
-/**
- * kbuffer_alloc - allocat a new kbuffer
- * @size; enum to denote size of word
- * @endian: enum to denote endianness
- *
- * Allocates and returns a new kbuffer.
- */
-struct kbuffer *
-kbuffer_alloc(enum kbuffer_long_size size, enum kbuffer_endian endian)
-{
- struct kbuffer *kbuf;
- int flags = 0;
-
- switch (size) {
- case KBUFFER_LSIZE_4:
- break;
- case KBUFFER_LSIZE_8:
- flags |= KBUFFER_FL_LONG_8;
- break;
- default:
- return NULL;
- }
-
- switch (endian) {
- case KBUFFER_ENDIAN_LITTLE:
- break;
- case KBUFFER_ENDIAN_BIG:
- flags |= KBUFFER_FL_BIG_ENDIAN;
- break;
- default:
- return NULL;
- }
-
- kbuf = zmalloc(sizeof(*kbuf));
- if (!kbuf)
- return NULL;
-
- kbuf->flags = flags;
-
- if (host_is_bigendian())
- kbuf->flags |= KBUFFER_FL_HOST_BIG_ENDIAN;
-
- if (do_swap(kbuf)) {
- kbuf->read_8 = __read_8_sw;
- kbuf->read_4 = __read_4_sw;
- } else {
- kbuf->read_8 = __read_8;
- kbuf->read_4 = __read_4;
- }
-
- if (kbuf->flags & KBUFFER_FL_LONG_8)
- kbuf->read_long = __read_long_8;
- else
- kbuf->read_long = __read_long_4;
-
- /* May be changed by kbuffer_set_old_format() */
- kbuf->next_event = __next_event;
-
- return kbuf;
-}
-
-/** kbuffer_free - free an allocated kbuffer
- * @kbuf: The kbuffer to free
- *
- * Can take NULL as a parameter.
- */
-void kbuffer_free(struct kbuffer *kbuf)
-{
- free(kbuf);
-}
-
-static unsigned int type4host(struct kbuffer *kbuf,
- unsigned int type_len_ts)
-{
- if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
- return (type_len_ts >> 29) & 3;
- else
- return type_len_ts & 3;
-}
-
-static unsigned int len4host(struct kbuffer *kbuf,
- unsigned int type_len_ts)
-{
- if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
- return (type_len_ts >> 27) & 7;
- else
- return (type_len_ts >> 2) & 7;
-}
-
-static unsigned int type_len4host(struct kbuffer *kbuf,
- unsigned int type_len_ts)
-{
- if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
- return (type_len_ts >> 27) & ((1 << 5) - 1);
- else
- return type_len_ts & ((1 << 5) - 1);
-}
-
-static unsigned int ts4host(struct kbuffer *kbuf,
- unsigned int type_len_ts)
-{
- if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
- return type_len_ts & ((1 << 27) - 1);
- else
- return type_len_ts >> 5;
-}
-
-/*
- * Linux 2.6.30 and earlier (not much ealier) had a different
- * ring buffer format. It should be obsolete, but we handle it anyway.
- */
-enum old_ring_buffer_type {
- OLD_RINGBUF_TYPE_PADDING,
- OLD_RINGBUF_TYPE_TIME_EXTEND,
- OLD_RINGBUF_TYPE_TIME_STAMP,
- OLD_RINGBUF_TYPE_DATA,
-};
-
-static unsigned int old_update_pointers(struct kbuffer *kbuf)
-{
- unsigned long long extend;
- unsigned int type_len_ts;
- unsigned int type;
- unsigned int len;
- unsigned int delta;
- unsigned int length;
- void *ptr = kbuf->data + kbuf->curr;
-
- type_len_ts = read_4(kbuf, ptr);
- ptr += 4;
-
- type = type4host(kbuf, type_len_ts);
- len = len4host(kbuf, type_len_ts);
- delta = ts4host(kbuf, type_len_ts);
-
- switch (type) {
- case OLD_RINGBUF_TYPE_PADDING:
- kbuf->next = kbuf->size;
- return 0;
-
- case OLD_RINGBUF_TYPE_TIME_EXTEND:
- extend = read_4(kbuf, ptr);
- extend <<= TS_SHIFT;
- extend += delta;
- delta = extend;
- ptr += 4;
- length = 0;
- break;
-
- case OLD_RINGBUF_TYPE_TIME_STAMP:
- /* should never happen! */
- kbuf->curr = kbuf->size;
- kbuf->next = kbuf->size;
- kbuf->index = kbuf->size;
- return -1;
- default:
- if (len)
- length = len * 4;
- else {
- length = read_4(kbuf, ptr);
- length -= 4;
- ptr += 4;
- }
- break;
- }
-
- kbuf->timestamp += delta;
- kbuf->index = calc_index(kbuf, ptr);
- kbuf->next = kbuf->index + length;
-
- return type;
-}
-
-static int __old_next_event(struct kbuffer *kbuf)
-{
- int type;
-
- do {
- kbuf->curr = kbuf->next;
- if (kbuf->next >= kbuf->size)
- return -1;
- type = old_update_pointers(kbuf);
- } while (type == OLD_RINGBUF_TYPE_TIME_EXTEND || type == OLD_RINGBUF_TYPE_PADDING);
-
- return 0;
-}
-
-static unsigned int
-translate_data(struct kbuffer *kbuf, void *data, void **rptr,
- unsigned long long *delta, int *length)
-{
- unsigned long long extend;
- unsigned int type_len_ts;
- unsigned int type_len;
-
- type_len_ts = read_4(kbuf, data);
- data += 4;
-
- type_len = type_len4host(kbuf, type_len_ts);
- *delta = ts4host(kbuf, type_len_ts);
-
- switch (type_len) {
- case KBUFFER_TYPE_PADDING:
- *length = read_4(kbuf, data);
- break;
-
- case KBUFFER_TYPE_TIME_EXTEND:
- extend = read_4(kbuf, data);
- data += 4;
- extend <<= TS_SHIFT;
- extend += *delta;
- *delta = extend;
- *length = 0;
- break;
-
- case KBUFFER_TYPE_TIME_STAMP:
- data += 12;
- *length = 0;
- break;
- case 0:
- *length = read_4(kbuf, data) - 4;
- *length = (*length + 3) & ~3;
- data += 4;
- break;
- default:
- *length = type_len * 4;
- break;
- }
-
- *rptr = data;
-
- return type_len;
-}
-
-static unsigned int update_pointers(struct kbuffer *kbuf)
-{
- unsigned long long delta;
- unsigned int type_len;
- int length;
- void *ptr = kbuf->data + kbuf->curr;
-
- type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
-
- kbuf->timestamp += delta;
- kbuf->index = calc_index(kbuf, ptr);
- kbuf->next = kbuf->index + length;
-
- return type_len;
-}
-
-/**
- * kbuffer_translate_data - read raw data to get a record
- * @swap: Set to 1 if bytes in words need to be swapped when read
- * @data: The raw data to read
- * @size: Address to store the size of the event data.
- *
- * Returns a pointer to the event data. To determine the entire
- * record size (record metadata + data) just add the difference between
- * @data and the returned value to @size.
- */
-void *kbuffer_translate_data(int swap, void *data, unsigned int *size)
-{
- unsigned long long delta;
- struct kbuffer kbuf;
- int type_len;
- int length;
- void *ptr;
-
- if (swap) {
- kbuf.read_8 = __read_8_sw;
- kbuf.read_4 = __read_4_sw;
- kbuf.flags = host_is_bigendian() ? 0 : KBUFFER_FL_BIG_ENDIAN;
- } else {
- kbuf.read_8 = __read_8;
- kbuf.read_4 = __read_4;
- kbuf.flags = host_is_bigendian() ? KBUFFER_FL_BIG_ENDIAN: 0;
- }
-
- type_len = translate_data(&kbuf, data, &ptr, &delta, &length);
- switch (type_len) {
- case KBUFFER_TYPE_PADDING:
- case KBUFFER_TYPE_TIME_EXTEND:
- case KBUFFER_TYPE_TIME_STAMP:
- return NULL;
- };
-
- *size = length;
-
- return ptr;
-}
-
-static int __next_event(struct kbuffer *kbuf)
-{
- int type;
-
- do {
- kbuf->curr = kbuf->next;
- if (kbuf->next >= kbuf->size)
- return -1;
- type = update_pointers(kbuf);
- } while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING);
-
- return 0;
-}
-
-static int next_event(struct kbuffer *kbuf)
-{
- return kbuf->next_event(kbuf);
-}
-
-/**
- * kbuffer_next_event - increment the current pointer
- * @kbuf: The kbuffer to read
- * @ts: Address to store the next record's timestamp (may be NULL to ignore)
- *
- * Increments the pointers into the subbuffer of the kbuffer to point to the
- * next event so that the next kbuffer_read_event() will return a
- * new event.
- *
- * Returns the data of the next event if a new event exists on the subbuffer,
- * NULL otherwise.
- */
-void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts)
-{
- int ret;
-
- if (!kbuf || !kbuf->subbuffer)
- return NULL;
-
- ret = next_event(kbuf);
- if (ret < 0)
- return NULL;
-
- if (ts)
- *ts = kbuf->timestamp;
-
- return kbuf->data + kbuf->index;
-}
-
-/**
- * kbuffer_load_subbuffer - load a new subbuffer into the kbuffer
- * @kbuf: The kbuffer to load
- * @subbuffer: The subbuffer to load into @kbuf.
- *
- * Load a new subbuffer (page) into @kbuf. This will reset all
- * the pointers and update the @kbuf timestamp. The next read will
- * return the first event on @subbuffer.
- *
- * Returns 0 on succes, -1 otherwise.
- */
-int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer)
-{
- unsigned long long flags;
- void *ptr = subbuffer;
-
- if (!kbuf || !subbuffer)
- return -1;
-
- kbuf->subbuffer = subbuffer;
-
- kbuf->timestamp = read_8(kbuf, ptr);
- ptr += 8;
-
- kbuf->curr = 0;
-
- if (kbuf->flags & KBUFFER_FL_LONG_8)
- kbuf->start = 16;
- else
- kbuf->start = 12;
-
- kbuf->data = subbuffer + kbuf->start;
-
- flags = read_long(kbuf, ptr);
- kbuf->size = (unsigned int)flags & COMMIT_MASK;
-
- if (flags & MISSING_EVENTS) {
- if (flags & MISSING_STORED) {
- ptr = kbuf->data + kbuf->size;
- kbuf->lost_events = read_long(kbuf, ptr);
- } else
- kbuf->lost_events = -1;
- } else
- kbuf->lost_events = 0;
-
- kbuf->index = 0;
- kbuf->next = 0;
-
- next_event(kbuf);
-
- return 0;
-}
-
-/**
- * kbuffer_read_event - read the next event in the kbuffer subbuffer
- * @kbuf: The kbuffer to read from
- * @ts: The address to store the timestamp of the event (may be NULL to ignore)
- *
- * Returns a pointer to the data part of the current event.
- * NULL if no event is left on the subbuffer.
- */
-void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts)
-{
- if (!kbuf || !kbuf->subbuffer)
- return NULL;
-
- if (kbuf->curr >= kbuf->size)
- return NULL;
-
- if (ts)
- *ts = kbuf->timestamp;
- return kbuf->data + kbuf->index;
-}
-
-/**
- * kbuffer_timestamp - Return the timestamp of the current event
- * @kbuf: The kbuffer to read from
- *
- * Returns the timestamp of the current (next) event.
- */
-unsigned long long kbuffer_timestamp(struct kbuffer *kbuf)
-{
- return kbuf->timestamp;
-}
-
-/**
- * kbuffer_read_at_offset - read the event that is at offset
- * @kbuf: The kbuffer to read from
- * @offset: The offset into the subbuffer
- * @ts: The address to store the timestamp of the event (may be NULL to ignore)
- *
- * The @offset must be an index from the @kbuf subbuffer beginning.
- * If @offset is bigger than the stored subbuffer, NULL will be returned.
- *
- * Returns the data of the record that is at @offset. Note, @offset does
- * not need to be the start of the record, the offset just needs to be
- * in the record (or beginning of it).
- *
- * Note, the kbuf timestamp and pointers are updated to the
- * returned record. That is, kbuffer_read_event() will return the same
- * data and timestamp, and kbuffer_next_event() will increment from
- * this record.
- */
-void *kbuffer_read_at_offset(struct kbuffer *kbuf, int offset,
- unsigned long long *ts)
-{
- void *data;
-
- if (offset < kbuf->start)
- offset = 0;
- else
- offset -= kbuf->start;
-
- /* Reset the buffer */
- kbuffer_load_subbuffer(kbuf, kbuf->subbuffer);
- data = kbuffer_read_event(kbuf, ts);
-
- while (kbuf->curr < offset) {
- data = kbuffer_next_event(kbuf, ts);
- if (!data)
- break;
- }
-
- return data;
-}
-
-/**
- * kbuffer_subbuffer_size - the size of the loaded subbuffer
- * @kbuf: The kbuffer to read from
- *
- * Returns the size of the subbuffer. Note, this size is
- * where the last event resides. The stored subbuffer may actually be
- * bigger due to padding and such.
- */
-int kbuffer_subbuffer_size(struct kbuffer *kbuf)
-{
- return kbuf->size;
-}
-
-/**
- * kbuffer_curr_index - Return the index of the record
- * @kbuf: The kbuffer to read from
- *
- * Returns the index from the start of the data part of
- * the subbuffer to the current location. Note this is not
- * from the start of the subbuffer. An index of zero will
- * point to the first record. Use kbuffer_curr_offset() for
- * the actually offset (that can be used by kbuffer_read_at_offset())
- */
-int kbuffer_curr_index(struct kbuffer *kbuf)
-{
- return kbuf->curr;
-}
-
-/**
- * kbuffer_curr_offset - Return the offset of the record
- * @kbuf: The kbuffer to read from
- *
- * Returns the offset from the start of the subbuffer to the
- * current location.
- */
-int kbuffer_curr_offset(struct kbuffer *kbuf)
-{
- return kbuf->curr + kbuf->start;
-}
-
-/**
- * kbuffer_event_size - return the size of the event data
- * @kbuf: The kbuffer to read
- *
- * Returns the size of the event data (the payload not counting
- * the meta data of the record) of the current event.
- */
-int kbuffer_event_size(struct kbuffer *kbuf)
-{
- return kbuf->next - kbuf->index;
-}
-
-/**
- * kbuffer_curr_size - return the size of the entire record
- * @kbuf: The kbuffer to read
- *
- * Returns the size of the entire record (meta data and payload)
- * of the current event.
- */
-int kbuffer_curr_size(struct kbuffer *kbuf)
-{
- return kbuf->next - kbuf->curr;
-}
-
-/**
- * kbuffer_missed_events - return the # of missed events from last event.
- * @kbuf: The kbuffer to read from
- *
- * Returns the # of missed events (if recorded) before the current
- * event. Note, only events on the beginning of a subbuffer can
- * have missed events, all other events within the buffer will be
- * zero.
- */
-int kbuffer_missed_events(struct kbuffer *kbuf)
-{
- /* Only the first event can have missed events */
- if (kbuf->curr)
- return 0;
-
- return kbuf->lost_events;
-}
-
-/**
- * kbuffer_set_old_forma - set the kbuffer to use the old format parsing
- * @kbuf: The kbuffer to set
- *
- * This is obsolete (or should be). The first kernels to use the
- * new ring buffer had a slightly different ring buffer format
- * (2.6.30 and earlier). It is still somewhat supported by kbuffer,
- * but should not be counted on in the future.
- */
-void kbuffer_set_old_format(struct kbuffer *kbuf)
-{
- kbuf->flags |= KBUFFER_FL_OLD_FORMAT;
-
- kbuf->next_event = __old_next_event;
-}
-
-/**
- * kbuffer_start_of_data - return offset of where data starts on subbuffer
- * @kbuf: The kbuffer
- *
- * Returns the location on the subbuffer where the data starts.
- */
-int kbuffer_start_of_data(struct kbuffer *kbuf)
-{
- return kbuf->start;
-}
-
-/**
- * kbuffer_raw_get - get raw buffer info
- * @kbuf: The kbuffer
- * @subbuf: Start of mapped subbuffer
- * @info: Info descriptor to fill in
- *
- * For debugging. This can return internals of the ring buffer.
- * Expects to have info->next set to what it will read.
- * The type, length and timestamp delta will be filled in, and
- * @info->next will be updated to the next element.
- * The @subbuf is used to know if the info is passed the end of
- * data and NULL will be returned if it is.
- */
-struct kbuffer_raw_info *
-kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf, struct kbuffer_raw_info *info)
-{
- unsigned long long flags;
- unsigned long long delta;
- unsigned int type_len;
- unsigned int size;
- int start;
- int length;
- void *ptr = info->next;
-
- if (!kbuf || !subbuf)
- return NULL;
-
- if (kbuf->flags & KBUFFER_FL_LONG_8)
- start = 16;
- else
- start = 12;
-
- flags = read_long(kbuf, subbuf + 8);
- size = (unsigned int)flags & COMMIT_MASK;
-
- if (ptr < subbuf || ptr >= subbuf + start + size)
- return NULL;
-
- type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
-
- info->next = ptr + length;
-
- info->type = type_len;
- info->delta = delta;
- info->length = length;
-
- return info;
-}
diff --git a/tools/lib/traceevent/kbuffer.h b/tools/lib/traceevent/kbuffer.h
deleted file mode 100644
index ed4d697fc137..000000000000
--- a/tools/lib/traceevent/kbuffer.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2012 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#ifndef _KBUFFER_H
-#define _KBUFFER_H
-
-#ifndef TS_SHIFT
-#define TS_SHIFT 27
-#endif
-
-enum kbuffer_endian {
- KBUFFER_ENDIAN_BIG,
- KBUFFER_ENDIAN_LITTLE,
-};
-
-enum kbuffer_long_size {
- KBUFFER_LSIZE_4,
- KBUFFER_LSIZE_8,
-};
-
-enum {
- KBUFFER_TYPE_PADDING = 29,
- KBUFFER_TYPE_TIME_EXTEND = 30,
- KBUFFER_TYPE_TIME_STAMP = 31,
-};
-
-struct kbuffer;
-
-struct kbuffer *kbuffer_alloc(enum kbuffer_long_size size, enum kbuffer_endian endian);
-void kbuffer_free(struct kbuffer *kbuf);
-int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer);
-void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts);
-void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts);
-unsigned long long kbuffer_timestamp(struct kbuffer *kbuf);
-
-void *kbuffer_translate_data(int swap, void *data, unsigned int *size);
-
-void *kbuffer_read_at_offset(struct kbuffer *kbuf, int offset, unsigned long long *ts);
-
-int kbuffer_curr_index(struct kbuffer *kbuf);
-
-int kbuffer_curr_offset(struct kbuffer *kbuf);
-int kbuffer_curr_size(struct kbuffer *kbuf);
-int kbuffer_event_size(struct kbuffer *kbuf);
-int kbuffer_missed_events(struct kbuffer *kbuf);
-int kbuffer_subbuffer_size(struct kbuffer *kbuf);
-
-void kbuffer_set_old_format(struct kbuffer *kbuf);
-int kbuffer_start_of_data(struct kbuffer *kbuf);
-
-/* Debugging */
-
-struct kbuffer_raw_info {
- int type;
- int length;
- unsigned long long delta;
- void *next;
-};
-
-/* Read raw data */
-struct kbuffer_raw_info *kbuffer_raw_get(struct kbuffer *kbuf, void *subbuf,
- struct kbuffer_raw_info *info);
-
-#endif /* _K_BUFFER_H */
diff --git a/tools/lib/traceevent/libtraceevent.pc.template b/tools/lib/traceevent/libtraceevent.pc.template
deleted file mode 100644
index 86384fcd57f1..000000000000
--- a/tools/lib/traceevent/libtraceevent.pc.template
+++ /dev/null
@@ -1,10 +0,0 @@
-prefix=INSTALL_PREFIX
-libdir=LIB_DIR
-includedir=HEADER_DIR
-
-Name: libtraceevent
-URL: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-Description: Linux kernel trace event library
-Version: LIB_VERSION
-Cflags: -I${includedir}
-Libs: -L${libdir} -ltraceevent
diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
deleted file mode 100644
index 20eed719542e..000000000000
--- a/tools/lib/traceevent/parse-filter.c
+++ /dev/null
@@ -1,2274 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdarg.h>
-#include <errno.h>
-#include <sys/types.h>
-
-#include "event-parse.h"
-#include "event-parse-local.h"
-#include "event-utils.h"
-
-#define COMM "COMM"
-#define CPU "CPU"
-
-static struct tep_format_field comm = {
- .name = "COMM",
-};
-
-static struct tep_format_field cpu = {
- .name = "CPU",
-};
-
-struct event_list {
- struct event_list *next;
- struct tep_event *event;
-};
-
-static void show_error(char *error_buf, const char *fmt, ...)
-{
- unsigned long long index;
- const char *input;
- va_list ap;
- int len;
- int i;
-
- input = tep_get_input_buf();
- index = tep_get_input_buf_ptr();
- len = input ? strlen(input) : 0;
-
- if (len) {
- strcpy(error_buf, input);
- error_buf[len] = '\n';
- for (i = 1; i < len && i < index; i++)
- error_buf[len+i] = ' ';
- error_buf[len + i] = '^';
- error_buf[len + i + 1] = '\n';
- len += i+2;
- }
-
- va_start(ap, fmt);
- vsnprintf(error_buf + len, TEP_FILTER_ERROR_BUFSZ - len, fmt, ap);
- va_end(ap);
-}
-
-static void free_token(char *token)
-{
- tep_free_token(token);
-}
-
-static enum tep_event_type read_token(char **tok)
-{
- enum tep_event_type type;
- char *token = NULL;
-
- do {
- free_token(token);
- type = tep_read_token(&token);
- } while (type == TEP_EVENT_NEWLINE || type == TEP_EVENT_SPACE);
-
- /* If token is = or ! check to see if the next char is ~ */
- if (token &&
- (strcmp(token, "=") == 0 || strcmp(token, "!") == 0) &&
- tep_peek_char() == '~') {
- /* append it */
- *tok = malloc(3);
- if (*tok == NULL) {
- free_token(token);
- return TEP_EVENT_ERROR;
- }
- sprintf(*tok, "%c%c", *token, '~');
- free_token(token);
- /* Now remove the '~' from the buffer */
- tep_read_token(&token);
- free_token(token);
- } else
- *tok = token;
-
- return type;
-}
-
-static int filter_cmp(const void *a, const void *b)
-{
- const struct tep_filter_type *ea = a;
- const struct tep_filter_type *eb = b;
-
- if (ea->event_id < eb->event_id)
- return -1;
-
- if (ea->event_id > eb->event_id)
- return 1;
-
- return 0;
-}
-
-static struct tep_filter_type *
-find_filter_type(struct tep_event_filter *filter, int id)
-{
- struct tep_filter_type *filter_type;
- struct tep_filter_type key;
-
- key.event_id = id;
-
- filter_type = bsearch(&key, filter->event_filters,
- filter->filters,
- sizeof(*filter->event_filters),
- filter_cmp);
-
- return filter_type;
-}
-
-static struct tep_filter_type *
-add_filter_type(struct tep_event_filter *filter, int id)
-{
- struct tep_filter_type *filter_type;
- int i;
-
- filter_type = find_filter_type(filter, id);
- if (filter_type)
- return filter_type;
-
- filter_type = realloc(filter->event_filters,
- sizeof(*filter->event_filters) *
- (filter->filters + 1));
- if (!filter_type)
- return NULL;
-
- filter->event_filters = filter_type;
-
- for (i = 0; i < filter->filters; i++) {
- if (filter->event_filters[i].event_id > id)
- break;
- }
-
- if (i < filter->filters)
- memmove(&filter->event_filters[i+1],
- &filter->event_filters[i],
- sizeof(*filter->event_filters) *
- (filter->filters - i));
-
- filter_type = &filter->event_filters[i];
- filter_type->event_id = id;
- filter_type->event = tep_find_event(filter->tep, id);
- filter_type->filter = NULL;
-
- filter->filters++;
-
- return filter_type;
-}
-
-/**
- * tep_filter_alloc - create a new event filter
- * @tep: The tep that this filter is associated with
- */
-struct tep_event_filter *tep_filter_alloc(struct tep_handle *tep)
-{
- struct tep_event_filter *filter;
-
- filter = malloc(sizeof(*filter));
- if (filter == NULL)
- return NULL;
-
- memset(filter, 0, sizeof(*filter));
- filter->tep = tep;
- tep_ref(tep);
-
- return filter;
-}
-
-static struct tep_filter_arg *allocate_arg(void)
-{
- return calloc(1, sizeof(struct tep_filter_arg));
-}
-
-static void free_arg(struct tep_filter_arg *arg)
-{
- if (!arg)
- return;
-
- switch (arg->type) {
- case TEP_FILTER_ARG_NONE:
- case TEP_FILTER_ARG_BOOLEAN:
- break;
-
- case TEP_FILTER_ARG_NUM:
- free_arg(arg->num.left);
- free_arg(arg->num.right);
- break;
-
- case TEP_FILTER_ARG_EXP:
- free_arg(arg->exp.left);
- free_arg(arg->exp.right);
- break;
-
- case TEP_FILTER_ARG_STR:
- free(arg->str.val);
- regfree(&arg->str.reg);
- free(arg->str.buffer);
- break;
-
- case TEP_FILTER_ARG_VALUE:
- if (arg->value.type == TEP_FILTER_STRING ||
- arg->value.type == TEP_FILTER_CHAR)
- free(arg->value.str);
- break;
-
- case TEP_FILTER_ARG_OP:
- free_arg(arg->op.left);
- free_arg(arg->op.right);
- default:
- break;
- }
-
- free(arg);
-}
-
-static int add_event(struct event_list **events,
- struct tep_event *event)
-{
- struct event_list *list;
-
- list = malloc(sizeof(*list));
- if (list == NULL)
- return -1;
-
- list->next = *events;
- *events = list;
- list->event = event;
- return 0;
-}
-
-static int event_match(struct tep_event *event,
- regex_t *sreg, regex_t *ereg)
-{
- if (sreg) {
- return !regexec(sreg, event->system, 0, NULL, 0) &&
- !regexec(ereg, event->name, 0, NULL, 0);
- }
-
- return !regexec(ereg, event->system, 0, NULL, 0) ||
- !regexec(ereg, event->name, 0, NULL, 0);
-}
-
-static enum tep_errno
-find_event(struct tep_handle *tep, struct event_list **events,
- char *sys_name, char *event_name)
-{
- struct tep_event *event;
- regex_t ereg;
- regex_t sreg;
- int match = 0;
- int fail = 0;
- char *reg;
- int ret;
- int i;
-
- if (!event_name) {
- /* if no name is given, then swap sys and name */
- event_name = sys_name;
- sys_name = NULL;
- }
-
- ret = asprintf(&reg, "^%s$", event_name);
- if (ret < 0)
- return TEP_ERRNO__MEM_ALLOC_FAILED;
-
- ret = regcomp(&ereg, reg, REG_ICASE|REG_NOSUB);
- free(reg);
-
- if (ret)
- return TEP_ERRNO__INVALID_EVENT_NAME;
-
- if (sys_name) {
- ret = asprintf(&reg, "^%s$", sys_name);
- if (ret < 0) {
- regfree(&ereg);
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
-
- ret = regcomp(&sreg, reg, REG_ICASE|REG_NOSUB);
- free(reg);
- if (ret) {
- regfree(&ereg);
- return TEP_ERRNO__INVALID_EVENT_NAME;
- }
- }
-
- for (i = 0; i < tep->nr_events; i++) {
- event = tep->events[i];
- if (event_match(event, sys_name ? &sreg : NULL, &ereg)) {
- match = 1;
- if (add_event(events, event) < 0) {
- fail = 1;
- break;
- }
- }
- }
-
- regfree(&ereg);
- if (sys_name)
- regfree(&sreg);
-
- if (!match)
- return TEP_ERRNO__EVENT_NOT_FOUND;
- if (fail)
- return TEP_ERRNO__MEM_ALLOC_FAILED;
-
- return 0;
-}
-
-static void free_events(struct event_list *events)
-{
- struct event_list *event;
-
- while (events) {
- event = events;
- events = events->next;
- free(event);
- }
-}
-
-static enum tep_errno
-create_arg_item(struct tep_event *event, const char *token,
- enum tep_event_type type, struct tep_filter_arg **parg, char *error_str)
-{
- struct tep_format_field *field;
- struct tep_filter_arg *arg;
-
- arg = allocate_arg();
- if (arg == NULL) {
- show_error(error_str, "failed to allocate filter arg");
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
-
- switch (type) {
-
- case TEP_EVENT_SQUOTE:
- case TEP_EVENT_DQUOTE:
- arg->type = TEP_FILTER_ARG_VALUE;
- arg->value.type =
- type == TEP_EVENT_DQUOTE ? TEP_FILTER_STRING : TEP_FILTER_CHAR;
- arg->value.str = strdup(token);
- if (!arg->value.str) {
- free_arg(arg);
- show_error(error_str, "failed to allocate string filter arg");
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
- break;
- case TEP_EVENT_ITEM:
- /* if it is a number, then convert it */
- if (isdigit(token[0])) {
- arg->type = TEP_FILTER_ARG_VALUE;
- arg->value.type = TEP_FILTER_NUMBER;
- arg->value.val = strtoull(token, NULL, 0);
- break;
- }
- /* Consider this a field */
- field = tep_find_any_field(event, token);
- if (!field) {
- /* If token is 'COMM' or 'CPU' then it is special */
- if (strcmp(token, COMM) == 0) {
- field = &comm;
- } else if (strcmp(token, CPU) == 0) {
- field = &cpu;
- } else {
- /* not a field, Make it false */
- arg->type = TEP_FILTER_ARG_BOOLEAN;
- arg->boolean.value = TEP_FILTER_FALSE;
- break;
- }
- }
- arg->type = TEP_FILTER_ARG_FIELD;
- arg->field.field = field;
- break;
- default:
- free_arg(arg);
- show_error(error_str, "expected a value but found %s", token);
- return TEP_ERRNO__UNEXPECTED_TYPE;
- }
- *parg = arg;
- return 0;
-}
-
-static struct tep_filter_arg *
-create_arg_op(enum tep_filter_op_type btype)
-{
- struct tep_filter_arg *arg;
-
- arg = allocate_arg();
- if (!arg)
- return NULL;
-
- arg->type = TEP_FILTER_ARG_OP;
- arg->op.type = btype;
-
- return arg;
-}
-
-static struct tep_filter_arg *
-create_arg_exp(enum tep_filter_exp_type etype)
-{
- struct tep_filter_arg *arg;
-
- arg = allocate_arg();
- if (!arg)
- return NULL;
-
- arg->type = TEP_FILTER_ARG_EXP;
- arg->exp.type = etype;
-
- return arg;
-}
-
-static struct tep_filter_arg *
-create_arg_cmp(enum tep_filter_cmp_type ctype)
-{
- struct tep_filter_arg *arg;
-
- arg = allocate_arg();
- if (!arg)
- return NULL;
-
- /* Use NUM and change if necessary */
- arg->type = TEP_FILTER_ARG_NUM;
- arg->num.type = ctype;
-
- return arg;
-}
-
-static enum tep_errno
-add_right(struct tep_filter_arg *op, struct tep_filter_arg *arg, char *error_str)
-{
- struct tep_filter_arg *left;
- char *str;
- int op_type;
- int ret;
-
- switch (op->type) {
- case TEP_FILTER_ARG_EXP:
- if (op->exp.right)
- goto out_fail;
- op->exp.right = arg;
- break;
-
- case TEP_FILTER_ARG_OP:
- if (op->op.right)
- goto out_fail;
- op->op.right = arg;
- break;
-
- case TEP_FILTER_ARG_NUM:
- if (op->op.right)
- goto out_fail;
- /*
- * The arg must be num, str, or field
- */
- switch (arg->type) {
- case TEP_FILTER_ARG_VALUE:
- case TEP_FILTER_ARG_FIELD:
- break;
- default:
- show_error(error_str, "Illegal rvalue");
- return TEP_ERRNO__ILLEGAL_RVALUE;
- }
-
- /*
- * Depending on the type, we may need to
- * convert this to a string or regex.
- */
- switch (arg->value.type) {
- case TEP_FILTER_CHAR:
- /*
- * A char should be converted to number if
- * the string is 1 byte, and the compare
- * is not a REGEX.
- */
- if (strlen(arg->value.str) == 1 &&
- op->num.type != TEP_FILTER_CMP_REGEX &&
- op->num.type != TEP_FILTER_CMP_NOT_REGEX) {
- arg->value.type = TEP_FILTER_NUMBER;
- goto do_int;
- }
- /* fall through */
- case TEP_FILTER_STRING:
-
- /* convert op to a string arg */
- op_type = op->num.type;
- left = op->num.left;
- str = arg->value.str;
-
- /* reset the op for the new field */
- memset(op, 0, sizeof(*op));
-
- /*
- * If left arg was a field not found then
- * NULL the entire op.
- */
- if (left->type == TEP_FILTER_ARG_BOOLEAN) {
- free_arg(left);
- free_arg(arg);
- op->type = TEP_FILTER_ARG_BOOLEAN;
- op->boolean.value = TEP_FILTER_FALSE;
- break;
- }
-
- /* Left arg must be a field */
- if (left->type != TEP_FILTER_ARG_FIELD) {
- show_error(error_str,
- "Illegal lvalue for string comparison");
- return TEP_ERRNO__ILLEGAL_LVALUE;
- }
-
- /* Make sure this is a valid string compare */
- switch (op_type) {
- case TEP_FILTER_CMP_EQ:
- op_type = TEP_FILTER_CMP_MATCH;
- break;
- case TEP_FILTER_CMP_NE:
- op_type = TEP_FILTER_CMP_NOT_MATCH;
- break;
-
- case TEP_FILTER_CMP_REGEX:
- case TEP_FILTER_CMP_NOT_REGEX:
- ret = regcomp(&op->str.reg, str, REG_ICASE|REG_NOSUB);
- if (ret) {
- show_error(error_str,
- "RegEx '%s' did not compute",
- str);
- return TEP_ERRNO__INVALID_REGEX;
- }
- break;
- default:
- show_error(error_str,
- "Illegal comparison for string");
- return TEP_ERRNO__ILLEGAL_STRING_CMP;
- }
-
- op->type = TEP_FILTER_ARG_STR;
- op->str.type = op_type;
- op->str.field = left->field.field;
- op->str.val = strdup(str);
- if (!op->str.val) {
- show_error(error_str, "Failed to allocate string filter");
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
- /*
- * Need a buffer to copy data for tests
- */
- op->str.buffer = malloc(op->str.field->size + 1);
- if (!op->str.buffer) {
- show_error(error_str, "Failed to allocate string filter");
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
- /* Null terminate this buffer */
- op->str.buffer[op->str.field->size] = 0;
-
- /* We no longer have left or right args */
- free_arg(arg);
- free_arg(left);
-
- break;
-
- case TEP_FILTER_NUMBER:
-
- do_int:
- switch (op->num.type) {
- case TEP_FILTER_CMP_REGEX:
- case TEP_FILTER_CMP_NOT_REGEX:
- show_error(error_str,
- "Op not allowed with integers");
- return TEP_ERRNO__ILLEGAL_INTEGER_CMP;
-
- default:
- break;
- }
-
- /* numeric compare */
- op->num.right = arg;
- break;
- default:
- goto out_fail;
- }
- break;
- default:
- goto out_fail;
- }
-
- return 0;
-
- out_fail:
- show_error(error_str, "Syntax error");
- return TEP_ERRNO__SYNTAX_ERROR;
-}
-
-static struct tep_filter_arg *
-rotate_op_right(struct tep_filter_arg *a, struct tep_filter_arg *b)
-{
- struct tep_filter_arg *arg;
-
- arg = a->op.right;
- a->op.right = b;
- return arg;
-}
-
-static enum tep_errno add_left(struct tep_filter_arg *op, struct tep_filter_arg *arg)
-{
- switch (op->type) {
- case TEP_FILTER_ARG_EXP:
- if (arg->type == TEP_FILTER_ARG_OP)
- arg = rotate_op_right(arg, op);
- op->exp.left = arg;
- break;
-
- case TEP_FILTER_ARG_OP:
- op->op.left = arg;
- break;
- case TEP_FILTER_ARG_NUM:
- if (arg->type == TEP_FILTER_ARG_OP)
- arg = rotate_op_right(arg, op);
-
- /* left arg of compares must be a field */
- if (arg->type != TEP_FILTER_ARG_FIELD &&
- arg->type != TEP_FILTER_ARG_BOOLEAN)
- return TEP_ERRNO__INVALID_ARG_TYPE;
- op->num.left = arg;
- break;
- default:
- return TEP_ERRNO__INVALID_ARG_TYPE;
- }
- return 0;
-}
-
-enum op_type {
- OP_NONE,
- OP_BOOL,
- OP_NOT,
- OP_EXP,
- OP_CMP,
-};
-
-static enum op_type process_op(const char *token,
- enum tep_filter_op_type *btype,
- enum tep_filter_cmp_type *ctype,
- enum tep_filter_exp_type *etype)
-{
- *btype = TEP_FILTER_OP_NOT;
- *etype = TEP_FILTER_EXP_NONE;
- *ctype = TEP_FILTER_CMP_NONE;
-
- if (strcmp(token, "&&") == 0)
- *btype = TEP_FILTER_OP_AND;
- else if (strcmp(token, "||") == 0)
- *btype = TEP_FILTER_OP_OR;
- else if (strcmp(token, "!") == 0)
- return OP_NOT;
-
- if (*btype != TEP_FILTER_OP_NOT)
- return OP_BOOL;
-
- /* Check for value expressions */
- if (strcmp(token, "+") == 0) {
- *etype = TEP_FILTER_EXP_ADD;
- } else if (strcmp(token, "-") == 0) {
- *etype = TEP_FILTER_EXP_SUB;
- } else if (strcmp(token, "*") == 0) {
- *etype = TEP_FILTER_EXP_MUL;
- } else if (strcmp(token, "/") == 0) {
- *etype = TEP_FILTER_EXP_DIV;
- } else if (strcmp(token, "%") == 0) {
- *etype = TEP_FILTER_EXP_MOD;
- } else if (strcmp(token, ">>") == 0) {
- *etype = TEP_FILTER_EXP_RSHIFT;
- } else if (strcmp(token, "<<") == 0) {
- *etype = TEP_FILTER_EXP_LSHIFT;
- } else if (strcmp(token, "&") == 0) {
- *etype = TEP_FILTER_EXP_AND;
- } else if (strcmp(token, "|") == 0) {
- *etype = TEP_FILTER_EXP_OR;
- } else if (strcmp(token, "^") == 0) {
- *etype = TEP_FILTER_EXP_XOR;
- } else if (strcmp(token, "~") == 0)
- *etype = TEP_FILTER_EXP_NOT;
-
- if (*etype != TEP_FILTER_EXP_NONE)
- return OP_EXP;
-
- /* Check for compares */
- if (strcmp(token, "==") == 0)
- *ctype = TEP_FILTER_CMP_EQ;
- else if (strcmp(token, "!=") == 0)
- *ctype = TEP_FILTER_CMP_NE;
- else if (strcmp(token, "<") == 0)
- *ctype = TEP_FILTER_CMP_LT;
- else if (strcmp(token, ">") == 0)
- *ctype = TEP_FILTER_CMP_GT;
- else if (strcmp(token, "<=") == 0)
- *ctype = TEP_FILTER_CMP_LE;
- else if (strcmp(token, ">=") == 0)
- *ctype = TEP_FILTER_CMP_GE;
- else if (strcmp(token, "=~") == 0)
- *ctype = TEP_FILTER_CMP_REGEX;
- else if (strcmp(token, "!~") == 0)
- *ctype = TEP_FILTER_CMP_NOT_REGEX;
- else
- return OP_NONE;
-
- return OP_CMP;
-}
-
-static int check_op_done(struct tep_filter_arg *arg)
-{
- switch (arg->type) {
- case TEP_FILTER_ARG_EXP:
- return arg->exp.right != NULL;
-
- case TEP_FILTER_ARG_OP:
- return arg->op.right != NULL;
-
- case TEP_FILTER_ARG_NUM:
- return arg->num.right != NULL;
-
- case TEP_FILTER_ARG_STR:
- /* A string conversion is always done */
- return 1;
-
- case TEP_FILTER_ARG_BOOLEAN:
- /* field not found, is ok */
- return 1;
-
- default:
- return 0;
- }
-}
-
-enum filter_vals {
- FILTER_VAL_NORM,
- FILTER_VAL_FALSE,
- FILTER_VAL_TRUE,
-};
-
-static enum tep_errno
-reparent_op_arg(struct tep_filter_arg *parent, struct tep_filter_arg *old_child,
- struct tep_filter_arg *arg, char *error_str)
-{
- struct tep_filter_arg *other_child;
- struct tep_filter_arg **ptr;
-
- if (parent->type != TEP_FILTER_ARG_OP &&
- arg->type != TEP_FILTER_ARG_OP) {
- show_error(error_str, "can not reparent other than OP");
- return TEP_ERRNO__REPARENT_NOT_OP;
- }
-
- /* Get the sibling */
- if (old_child->op.right == arg) {
- ptr = &old_child->op.right;
- other_child = old_child->op.left;
- } else if (old_child->op.left == arg) {
- ptr = &old_child->op.left;
- other_child = old_child->op.right;
- } else {
- show_error(error_str, "Error in reparent op, find other child");
- return TEP_ERRNO__REPARENT_FAILED;
- }
-
- /* Detach arg from old_child */
- *ptr = NULL;
-
- /* Check for root */
- if (parent == old_child) {
- free_arg(other_child);
- *parent = *arg;
- /* Free arg without recussion */
- free(arg);
- return 0;
- }
-
- if (parent->op.right == old_child)
- ptr = &parent->op.right;
- else if (parent->op.left == old_child)
- ptr = &parent->op.left;
- else {
- show_error(error_str, "Error in reparent op");
- return TEP_ERRNO__REPARENT_FAILED;
- }
-
- *ptr = arg;
-
- free_arg(old_child);
- return 0;
-}
-
-/* Returns either filter_vals (success) or tep_errno (failfure) */
-static int test_arg(struct tep_filter_arg *parent, struct tep_filter_arg *arg,
- char *error_str)
-{
- int lval, rval;
-
- switch (arg->type) {
-
- /* bad case */
- case TEP_FILTER_ARG_BOOLEAN:
- return FILTER_VAL_FALSE + arg->boolean.value;
-
- /* good cases: */
- case TEP_FILTER_ARG_STR:
- case TEP_FILTER_ARG_VALUE:
- case TEP_FILTER_ARG_FIELD:
- return FILTER_VAL_NORM;
-
- case TEP_FILTER_ARG_EXP:
- lval = test_arg(arg, arg->exp.left, error_str);
- if (lval != FILTER_VAL_NORM)
- return lval;
- rval = test_arg(arg, arg->exp.right, error_str);
- if (rval != FILTER_VAL_NORM)
- return rval;
- return FILTER_VAL_NORM;
-
- case TEP_FILTER_ARG_NUM:
- lval = test_arg(arg, arg->num.left, error_str);
- if (lval != FILTER_VAL_NORM)
- return lval;
- rval = test_arg(arg, arg->num.right, error_str);
- if (rval != FILTER_VAL_NORM)
- return rval;
- return FILTER_VAL_NORM;
-
- case TEP_FILTER_ARG_OP:
- if (arg->op.type != TEP_FILTER_OP_NOT) {
- lval = test_arg(arg, arg->op.left, error_str);
- switch (lval) {
- case FILTER_VAL_NORM:
- break;
- case FILTER_VAL_TRUE:
- if (arg->op.type == TEP_FILTER_OP_OR)
- return FILTER_VAL_TRUE;
- rval = test_arg(arg, arg->op.right, error_str);
- if (rval != FILTER_VAL_NORM)
- return rval;
-
- return reparent_op_arg(parent, arg, arg->op.right,
- error_str);
-
- case FILTER_VAL_FALSE:
- if (arg->op.type == TEP_FILTER_OP_AND)
- return FILTER_VAL_FALSE;
- rval = test_arg(arg, arg->op.right, error_str);
- if (rval != FILTER_VAL_NORM)
- return rval;
-
- return reparent_op_arg(parent, arg, arg->op.right,
- error_str);
-
- default:
- return lval;
- }
- }
-
- rval = test_arg(arg, arg->op.right, error_str);
- switch (rval) {
- case FILTER_VAL_NORM:
- default:
- break;
-
- case FILTER_VAL_TRUE:
- if (arg->op.type == TEP_FILTER_OP_OR)
- return FILTER_VAL_TRUE;
- if (arg->op.type == TEP_FILTER_OP_NOT)
- return FILTER_VAL_FALSE;
-
- return reparent_op_arg(parent, arg, arg->op.left,
- error_str);
-
- case FILTER_VAL_FALSE:
- if (arg->op.type == TEP_FILTER_OP_AND)
- return FILTER_VAL_FALSE;
- if (arg->op.type == TEP_FILTER_OP_NOT)
- return FILTER_VAL_TRUE;
-
- return reparent_op_arg(parent, arg, arg->op.left,
- error_str);
- }
-
- return rval;
- default:
- show_error(error_str, "bad arg in filter tree");
- return TEP_ERRNO__BAD_FILTER_ARG;
- }
- return FILTER_VAL_NORM;
-}
-
-/* Remove any unknown event fields */
-static int collapse_tree(struct tep_filter_arg *arg,
- struct tep_filter_arg **arg_collapsed, char *error_str)
-{
- int ret;
-
- ret = test_arg(arg, arg, error_str);
- switch (ret) {
- case FILTER_VAL_NORM:
- break;
-
- case FILTER_VAL_TRUE:
- case FILTER_VAL_FALSE:
- free_arg(arg);
- arg = allocate_arg();
- if (arg) {
- arg->type = TEP_FILTER_ARG_BOOLEAN;
- arg->boolean.value = ret == FILTER_VAL_TRUE;
- } else {
- show_error(error_str, "Failed to allocate filter arg");
- ret = TEP_ERRNO__MEM_ALLOC_FAILED;
- }
- break;
-
- default:
- /* test_arg() already set the error_str */
- free_arg(arg);
- arg = NULL;
- break;
- }
-
- *arg_collapsed = arg;
- return ret;
-}
-
-static enum tep_errno
-process_filter(struct tep_event *event, struct tep_filter_arg **parg,
- char *error_str, int not)
-{
- enum tep_event_type type;
- char *token = NULL;
- struct tep_filter_arg *current_op = NULL;
- struct tep_filter_arg *current_exp = NULL;
- struct tep_filter_arg *left_item = NULL;
- struct tep_filter_arg *arg = NULL;
- enum op_type op_type;
- enum tep_filter_op_type btype;
- enum tep_filter_exp_type etype;
- enum tep_filter_cmp_type ctype;
- enum tep_errno ret;
-
- *parg = NULL;
-
- do {
- free(token);
- type = read_token(&token);
- switch (type) {
- case TEP_EVENT_SQUOTE:
- case TEP_EVENT_DQUOTE:
- case TEP_EVENT_ITEM:
- ret = create_arg_item(event, token, type, &arg, error_str);
- if (ret < 0)
- goto fail;
- if (!left_item)
- left_item = arg;
- else if (current_exp) {
- ret = add_right(current_exp, arg, error_str);
- if (ret < 0)
- goto fail;
- left_item = NULL;
- /* Not's only one one expression */
- if (not) {
- arg = NULL;
- if (current_op)
- goto fail_syntax;
- free(token);
- *parg = current_exp;
- return 0;
- }
- } else
- goto fail_syntax;
- arg = NULL;
- break;
-
- case TEP_EVENT_DELIM:
- if (*token == ',') {
- show_error(error_str, "Illegal token ','");
- ret = TEP_ERRNO__ILLEGAL_TOKEN;
- goto fail;
- }
-
- if (*token == '(') {
- if (left_item) {
- show_error(error_str,
- "Open paren can not come after item");
- ret = TEP_ERRNO__INVALID_PAREN;
- goto fail;
- }
- if (current_exp) {
- show_error(error_str,
- "Open paren can not come after expression");
- ret = TEP_ERRNO__INVALID_PAREN;
- goto fail;
- }
-
- ret = process_filter(event, &arg, error_str, 0);
- if (ret != TEP_ERRNO__UNBALANCED_PAREN) {
- if (ret == 0) {
- show_error(error_str,
- "Unbalanced number of '('");
- ret = TEP_ERRNO__UNBALANCED_PAREN;
- }
- goto fail;
- }
- ret = 0;
-
- /* A not wants just one expression */
- if (not) {
- if (current_op)
- goto fail_syntax;
- *parg = arg;
- return 0;
- }
-
- if (current_op)
- ret = add_right(current_op, arg, error_str);
- else
- current_exp = arg;
-
- if (ret < 0)
- goto fail;
-
- } else { /* ')' */
- if (!current_op && !current_exp)
- goto fail_syntax;
-
- /* Make sure everything is finished at this level */
- if (current_exp && !check_op_done(current_exp))
- goto fail_syntax;
- if (current_op && !check_op_done(current_op))
- goto fail_syntax;
-
- if (current_op)
- *parg = current_op;
- else
- *parg = current_exp;
- free(token);
- return TEP_ERRNO__UNBALANCED_PAREN;
- }
- break;
-
- case TEP_EVENT_OP:
- op_type = process_op(token, &btype, &ctype, &etype);
-
- /* All expect a left arg except for NOT */
- switch (op_type) {
- case OP_BOOL:
- /* Logic ops need a left expression */
- if (!current_exp && !current_op)
- goto fail_syntax;
- /* fall through */
- case OP_NOT:
- /* logic only processes ops and exp */
- if (left_item)
- goto fail_syntax;
- break;
- case OP_EXP:
- case OP_CMP:
- if (!left_item)
- goto fail_syntax;
- break;
- case OP_NONE:
- show_error(error_str,
- "Unknown op token %s", token);
- ret = TEP_ERRNO__UNKNOWN_TOKEN;
- goto fail;
- }
-
- ret = 0;
- switch (op_type) {
- case OP_BOOL:
- arg = create_arg_op(btype);
- if (arg == NULL)
- goto fail_alloc;
- if (current_op)
- ret = add_left(arg, current_op);
- else
- ret = add_left(arg, current_exp);
- current_op = arg;
- current_exp = NULL;
- break;
-
- case OP_NOT:
- arg = create_arg_op(btype);
- if (arg == NULL)
- goto fail_alloc;
- if (current_op)
- ret = add_right(current_op, arg, error_str);
- if (ret < 0)
- goto fail;
- current_exp = arg;
- ret = process_filter(event, &arg, error_str, 1);
- if (ret < 0)
- goto fail;
- ret = add_right(current_exp, arg, error_str);
- if (ret < 0)
- goto fail;
- break;
-
- case OP_EXP:
- case OP_CMP:
- if (op_type == OP_EXP)
- arg = create_arg_exp(etype);
- else
- arg = create_arg_cmp(ctype);
- if (arg == NULL)
- goto fail_alloc;
-
- if (current_op)
- ret = add_right(current_op, arg, error_str);
- if (ret < 0)
- goto fail;
- ret = add_left(arg, left_item);
- if (ret < 0) {
- arg = NULL;
- goto fail_syntax;
- }
- current_exp = arg;
- break;
- default:
- break;
- }
- arg = NULL;
- if (ret < 0)
- goto fail_syntax;
- break;
- case TEP_EVENT_NONE:
- break;
- case TEP_EVENT_ERROR:
- goto fail_alloc;
- default:
- goto fail_syntax;
- }
- } while (type != TEP_EVENT_NONE);
-
- if (!current_op && !current_exp)
- goto fail_syntax;
-
- if (!current_op)
- current_op = current_exp;
-
- ret = collapse_tree(current_op, parg, error_str);
- /* collapse_tree() may free current_op, and updates parg accordingly */
- current_op = NULL;
- if (ret < 0)
- goto fail;
-
- free(token);
- return 0;
-
- fail_alloc:
- show_error(error_str, "failed to allocate filter arg");
- ret = TEP_ERRNO__MEM_ALLOC_FAILED;
- goto fail;
- fail_syntax:
- show_error(error_str, "Syntax error");
- ret = TEP_ERRNO__SYNTAX_ERROR;
- fail:
- free_arg(current_op);
- free_arg(current_exp);
- free_arg(arg);
- free(token);
- return ret;
-}
-
-static enum tep_errno
-process_event(struct tep_event *event, const char *filter_str,
- struct tep_filter_arg **parg, char *error_str)
-{
- int ret;
-
- tep_buffer_init(filter_str, strlen(filter_str));
-
- ret = process_filter(event, parg, error_str, 0);
- if (ret < 0)
- return ret;
-
- /* If parg is NULL, then make it into FALSE */
- if (!*parg) {
- *parg = allocate_arg();
- if (*parg == NULL)
- return TEP_ERRNO__MEM_ALLOC_FAILED;
-
- (*parg)->type = TEP_FILTER_ARG_BOOLEAN;
- (*parg)->boolean.value = TEP_FILTER_FALSE;
- }
-
- return 0;
-}
-
-static enum tep_errno
-filter_event(struct tep_event_filter *filter, struct tep_event *event,
- const char *filter_str, char *error_str)
-{
- struct tep_filter_type *filter_type;
- struct tep_filter_arg *arg;
- enum tep_errno ret;
-
- if (filter_str) {
- ret = process_event(event, filter_str, &arg, error_str);
- if (ret < 0)
- return ret;
-
- } else {
- /* just add a TRUE arg */
- arg = allocate_arg();
- if (arg == NULL)
- return TEP_ERRNO__MEM_ALLOC_FAILED;
-
- arg->type = TEP_FILTER_ARG_BOOLEAN;
- arg->boolean.value = TEP_FILTER_TRUE;
- }
-
- filter_type = add_filter_type(filter, event->id);
- if (filter_type == NULL) {
- free_arg(arg);
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
-
- if (filter_type->filter)
- free_arg(filter_type->filter);
- filter_type->filter = arg;
-
- return 0;
-}
-
-static void filter_init_error_buf(struct tep_event_filter *filter)
-{
- /* clear buffer to reset show error */
- tep_buffer_init("", 0);
- filter->error_buffer[0] = '\0';
-}
-
-/**
- * tep_filter_add_filter_str - add a new filter
- * @filter: the event filter to add to
- * @filter_str: the filter string that contains the filter
- *
- * Returns 0 if the filter was successfully added or a
- * negative error code. Use tep_filter_strerror() to see
- * actual error message in case of error.
- */
-enum tep_errno tep_filter_add_filter_str(struct tep_event_filter *filter,
- const char *filter_str)
-{
- struct tep_handle *tep = filter->tep;
- struct event_list *event;
- struct event_list *events = NULL;
- const char *filter_start;
- const char *next_event;
- char *this_event;
- char *event_name = NULL;
- char *sys_name = NULL;
- char *sp;
- enum tep_errno rtn = 0; /* TEP_ERRNO__SUCCESS */
- int len;
- int ret;
-
- filter_init_error_buf(filter);
-
- filter_start = strchr(filter_str, ':');
- if (filter_start)
- len = filter_start - filter_str;
- else
- len = strlen(filter_str);
-
- do {
- next_event = strchr(filter_str, ',');
- if (next_event &&
- (!filter_start || next_event < filter_start))
- len = next_event - filter_str;
- else if (filter_start)
- len = filter_start - filter_str;
- else
- len = strlen(filter_str);
-
- this_event = malloc(len + 1);
- if (this_event == NULL) {
- /* This can only happen when events is NULL, but still */
- free_events(events);
- return TEP_ERRNO__MEM_ALLOC_FAILED;
- }
- memcpy(this_event, filter_str, len);
- this_event[len] = 0;
-
- if (next_event)
- next_event++;
-
- filter_str = next_event;
-
- sys_name = strtok_r(this_event, "/", &sp);
- event_name = strtok_r(NULL, "/", &sp);
-
- if (!sys_name) {
- /* This can only happen when events is NULL, but still */
- free_events(events);
- free(this_event);
- return TEP_ERRNO__FILTER_NOT_FOUND;
- }
-
- /* Find this event */
- ret = find_event(tep, &events, strim(sys_name), strim(event_name));
- if (ret < 0) {
- free_events(events);
- free(this_event);
- return ret;
- }
- free(this_event);
- } while (filter_str);
-
- /* Skip the ':' */
- if (filter_start)
- filter_start++;
-
- /* filter starts here */
- for (event = events; event; event = event->next) {
- ret = filter_event(filter, event->event, filter_start,
- filter->error_buffer);
- /* Failures are returned if a parse error happened */
- if (ret < 0)
- rtn = ret;
-
- if (ret >= 0 && tep->test_filters) {
- char *test;
- test = tep_filter_make_string(filter, event->event->id);
- if (test) {
- printf(" '%s: %s'\n", event->event->name, test);
- free(test);
- }
- }
- }
-
- free_events(events);
-
- return rtn;
-}
-
-static void free_filter_type(struct tep_filter_type *filter_type)
-{
- free_arg(filter_type->filter);
-}
-
-/**
- * tep_filter_strerror - fill error message in a buffer
- * @filter: the event filter contains error
- * @err: the error code
- * @buf: the buffer to be filled in
- * @buflen: the size of the buffer
- *
- * Returns 0 if message was filled successfully, -1 if error
- */
-int tep_filter_strerror(struct tep_event_filter *filter, enum tep_errno err,
- char *buf, size_t buflen)
-{
- if (err <= __TEP_ERRNO__START || err >= __TEP_ERRNO__END)
- return -1;
-
- if (strlen(filter->error_buffer) > 0) {
- size_t len = snprintf(buf, buflen, "%s", filter->error_buffer);
-
- if (len > buflen)
- return -1;
- return 0;
- }
-
- return tep_strerror(filter->tep, err, buf, buflen);
-}
-
-/**
- * tep_filter_remove_event - remove a filter for an event
- * @filter: the event filter to remove from
- * @event_id: the event to remove a filter for
- *
- * Removes the filter saved for an event defined by @event_id
- * from the @filter.
- *
- * Returns 1: if an event was removed
- * 0: if the event was not found
- */
-int tep_filter_remove_event(struct tep_event_filter *filter,
- int event_id)
-{
- struct tep_filter_type *filter_type;
- unsigned long len;
-
- if (!filter->filters)
- return 0;
-
- filter_type = find_filter_type(filter, event_id);
-
- if (!filter_type)
- return 0;
-
- free_filter_type(filter_type);
-
- /* The filter_type points into the event_filters array */
- len = (unsigned long)(filter->event_filters + filter->filters) -
- (unsigned long)(filter_type + 1);
-
- memmove(filter_type, filter_type + 1, len);
- filter->filters--;
-
- memset(&filter->event_filters[filter->filters], 0,
- sizeof(*filter_type));
-
- return 1;
-}
-
-/**
- * tep_filter_reset - clear all filters in a filter
- * @filter: the event filter to reset
- *
- * Removes all filters from a filter and resets it.
- */
-void tep_filter_reset(struct tep_event_filter *filter)
-{
- int i;
-
- for (i = 0; i < filter->filters; i++)
- free_filter_type(&filter->event_filters[i]);
-
- free(filter->event_filters);
- filter->filters = 0;
- filter->event_filters = NULL;
-}
-
-void tep_filter_free(struct tep_event_filter *filter)
-{
- tep_unref(filter->tep);
-
- tep_filter_reset(filter);
-
- free(filter);
-}
-
-static char *arg_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg);
-
-static int copy_filter_type(struct tep_event_filter *filter,
- struct tep_event_filter *source,
- struct tep_filter_type *filter_type)
-{
- struct tep_filter_arg *arg;
- struct tep_event *event;
- const char *sys;
- const char *name;
- char *str;
-
- /* Can't assume that the tep's are the same */
- sys = filter_type->event->system;
- name = filter_type->event->name;
- event = tep_find_event_by_name(filter->tep, sys, name);
- if (!event)
- return -1;
-
- str = arg_to_str(source, filter_type->filter);
- if (!str)
- return -1;
-
- if (strcmp(str, "TRUE") == 0 || strcmp(str, "FALSE") == 0) {
- /* Add trivial event */
- arg = allocate_arg();
- if (arg == NULL) {
- free(str);
- return -1;
- }
-
- arg->type = TEP_FILTER_ARG_BOOLEAN;
- if (strcmp(str, "TRUE") == 0)
- arg->boolean.value = 1;
- else
- arg->boolean.value = 0;
-
- filter_type = add_filter_type(filter, event->id);
- if (filter_type == NULL) {
- free(str);
- free_arg(arg);
- return -1;
- }
-
- filter_type->filter = arg;
-
- free(str);
- return 0;
- }
-
- filter_event(filter, event, str, NULL);
- free(str);
-
- return 0;
-}
-
-/**
- * tep_filter_copy - copy a filter using another filter
- * @dest - the filter to copy to
- * @source - the filter to copy from
- *
- * Returns 0 on success and -1 if not all filters were copied
- */
-int tep_filter_copy(struct tep_event_filter *dest, struct tep_event_filter *source)
-{
- int ret = 0;
- int i;
-
- tep_filter_reset(dest);
-
- for (i = 0; i < source->filters; i++) {
- if (copy_filter_type(dest, source, &source->event_filters[i]))
- ret = -1;
- }
- return ret;
-}
-
-static int test_filter(struct tep_event *event, struct tep_filter_arg *arg,
- struct tep_record *record, enum tep_errno *err);
-
-static const char *
-get_comm(struct tep_event *event, struct tep_record *record)
-{
- const char *comm;
- int pid;
-
- pid = tep_data_pid(event->tep, record);
- comm = tep_data_comm_from_pid(event->tep, pid);
- return comm;
-}
-
-static unsigned long long
-get_value(struct tep_event *event,
- struct tep_format_field *field, struct tep_record *record)
-{
- unsigned long long val;
-
- /* Handle our dummy "comm" field */
- if (field == &comm) {
- const char *name;
-
- name = get_comm(event, record);
- return (unsigned long)name;
- }
-
- /* Handle our dummy "cpu" field */
- if (field == &cpu)
- return record->cpu;
-
- tep_read_number_field(field, record->data, &val);
-
- if (!(field->flags & TEP_FIELD_IS_SIGNED))
- return val;
-
- switch (field->size) {
- case 1:
- return (char)val;
- case 2:
- return (short)val;
- case 4:
- return (int)val;
- case 8:
- return (long long)val;
- }
- return val;
-}
-
-static unsigned long long
-get_arg_value(struct tep_event *event, struct tep_filter_arg *arg,
- struct tep_record *record, enum tep_errno *err);
-
-static unsigned long long
-get_exp_value(struct tep_event *event, struct tep_filter_arg *arg,
- struct tep_record *record, enum tep_errno *err)
-{
- unsigned long long lval, rval;
-
- lval = get_arg_value(event, arg->exp.left, record, err);
- rval = get_arg_value(event, arg->exp.right, record, err);
-
- if (*err) {
- /*
- * There was an error, no need to process anymore.
- */
- return 0;
- }
-
- switch (arg->exp.type) {
- case TEP_FILTER_EXP_ADD:
- return lval + rval;
-
- case TEP_FILTER_EXP_SUB:
- return lval - rval;
-
- case TEP_FILTER_EXP_MUL:
- return lval * rval;
-
- case TEP_FILTER_EXP_DIV:
- return lval / rval;
-
- case TEP_FILTER_EXP_MOD:
- return lval % rval;
-
- case TEP_FILTER_EXP_RSHIFT:
- return lval >> rval;
-
- case TEP_FILTER_EXP_LSHIFT:
- return lval << rval;
-
- case TEP_FILTER_EXP_AND:
- return lval & rval;
-
- case TEP_FILTER_EXP_OR:
- return lval | rval;
-
- case TEP_FILTER_EXP_XOR:
- return lval ^ rval;
-
- case TEP_FILTER_EXP_NOT:
- default:
- if (!*err)
- *err = TEP_ERRNO__INVALID_EXP_TYPE;
- }
- return 0;
-}
-
-static unsigned long long
-get_arg_value(struct tep_event *event, struct tep_filter_arg *arg,
- struct tep_record *record, enum tep_errno *err)
-{
- switch (arg->type) {
- case TEP_FILTER_ARG_FIELD:
- return get_value(event, arg->field.field, record);
-
- case TEP_FILTER_ARG_VALUE:
- if (arg->value.type != TEP_FILTER_NUMBER) {
- if (!*err)
- *err = TEP_ERRNO__NOT_A_NUMBER;
- }
- return arg->value.val;
-
- case TEP_FILTER_ARG_EXP:
- return get_exp_value(event, arg, record, err);
-
- default:
- if (!*err)
- *err = TEP_ERRNO__INVALID_ARG_TYPE;
- }
- return 0;
-}
-
-static int test_num(struct tep_event *event, struct tep_filter_arg *arg,
- struct tep_record *record, enum tep_errno *err)
-{
- unsigned long long lval, rval;
-
- lval = get_arg_value(event, arg->num.left, record, err);
- rval = get_arg_value(event, arg->num.right, record, err);
-
- if (*err) {
- /*
- * There was an error, no need to process anymore.
- */
- return 0;
- }
-
- switch (arg->num.type) {
- case TEP_FILTER_CMP_EQ:
- return lval == rval;
-
- case TEP_FILTER_CMP_NE:
- return lval != rval;
-
- case TEP_FILTER_CMP_GT:
- return lval > rval;
-
- case TEP_FILTER_CMP_LT:
- return lval < rval;
-
- case TEP_FILTER_CMP_GE:
- return lval >= rval;
-
- case TEP_FILTER_CMP_LE:
- return lval <= rval;
-
- default:
- if (!*err)
- *err = TEP_ERRNO__ILLEGAL_INTEGER_CMP;
- return 0;
- }
-}
-
-static const char *get_field_str(struct tep_filter_arg *arg, struct tep_record *record)
-{
- struct tep_event *event;
- struct tep_handle *tep;
- unsigned long long addr;
- const char *val = NULL;
- unsigned int size;
- char hex[64];
-
- /* If the field is not a string convert it */
- if (arg->str.field->flags & TEP_FIELD_IS_STRING) {
- val = record->data + arg->str.field->offset;
- size = arg->str.field->size;
-
- if (arg->str.field->flags & TEP_FIELD_IS_DYNAMIC) {
- addr = *(unsigned int *)val;
- val = record->data + (addr & 0xffff);
- size = addr >> 16;
- }
-
- /*
- * We need to copy the data since we can't be sure the field
- * is null terminated.
- */
- if (*(val + size - 1)) {
- /* copy it */
- memcpy(arg->str.buffer, val, arg->str.field->size);
- /* the buffer is already NULL terminated */
- val = arg->str.buffer;
- }
-
- } else {
- event = arg->str.field->event;
- tep = event->tep;
- addr = get_value(event, arg->str.field, record);
-
- if (arg->str.field->flags & (TEP_FIELD_IS_POINTER | TEP_FIELD_IS_LONG))
- /* convert to a kernel symbol */
- val = tep_find_function(tep, addr);
-
- if (val == NULL) {
- /* just use the hex of the string name */
- snprintf(hex, 64, "0x%llx", addr);
- val = hex;
- }
- }
-
- return val;
-}
-
-static int test_str(struct tep_event *event, struct tep_filter_arg *arg,
- struct tep_record *record, enum tep_errno *err)
-{
- const char *val;
-
- if (arg->str.field == &comm)
- val = get_comm(event, record);
- else
- val = get_field_str(arg, record);
-
- switch (arg->str.type) {
- case TEP_FILTER_CMP_MATCH:
- return strcmp(val, arg->str.val) == 0;
-
- case TEP_FILTER_CMP_NOT_MATCH:
- return strcmp(val, arg->str.val) != 0;
-
- case TEP_FILTER_CMP_REGEX:
- /* Returns zero on match */
- return !regexec(&arg->str.reg, val, 0, NULL, 0);
-
- case TEP_FILTER_CMP_NOT_REGEX:
- return regexec(&arg->str.reg, val, 0, NULL, 0);
-
- default:
- if (!*err)
- *err = TEP_ERRNO__ILLEGAL_STRING_CMP;
- return 0;
- }
-}
-
-static int test_op(struct tep_event *event, struct tep_filter_arg *arg,
- struct tep_record *record, enum tep_errno *err)
-{
- switch (arg->op.type) {
- case TEP_FILTER_OP_AND:
- return test_filter(event, arg->op.left, record, err) &&
- test_filter(event, arg->op.right, record, err);
-
- case TEP_FILTER_OP_OR:
- return test_filter(event, arg->op.left, record, err) ||
- test_filter(event, arg->op.right, record, err);
-
- case TEP_FILTER_OP_NOT:
- return !test_filter(event, arg->op.right, record, err);
-
- default:
- if (!*err)
- *err = TEP_ERRNO__INVALID_OP_TYPE;
- return 0;
- }
-}
-
-static int test_filter(struct tep_event *event, struct tep_filter_arg *arg,
- struct tep_record *record, enum tep_errno *err)
-{
- if (*err) {
- /*
- * There was an error, no need to process anymore.
- */
- return 0;
- }
-
- switch (arg->type) {
- case TEP_FILTER_ARG_BOOLEAN:
- /* easy case */
- return arg->boolean.value;
-
- case TEP_FILTER_ARG_OP:
- return test_op(event, arg, record, err);
-
- case TEP_FILTER_ARG_NUM:
- return test_num(event, arg, record, err);
-
- case TEP_FILTER_ARG_STR:
- return test_str(event, arg, record, err);
-
- case TEP_FILTER_ARG_EXP:
- case TEP_FILTER_ARG_VALUE:
- case TEP_FILTER_ARG_FIELD:
- /*
- * Expressions, fields and values evaluate
- * to true if they return non zero
- */
- return !!get_arg_value(event, arg, record, err);
-
- default:
- if (!*err)
- *err = TEP_ERRNO__INVALID_ARG_TYPE;
- return 0;
- }
-}
-
-/**
- * tep_event_filtered - return true if event has filter
- * @filter: filter struct with filter information
- * @event_id: event id to test if filter exists
- *
- * Returns 1 if filter found for @event_id
- * otherwise 0;
- */
-int tep_event_filtered(struct tep_event_filter *filter, int event_id)
-{
- struct tep_filter_type *filter_type;
-
- if (!filter->filters)
- return 0;
-
- filter_type = find_filter_type(filter, event_id);
-
- return filter_type ? 1 : 0;
-}
-
-/**
- * tep_filter_match - test if a record matches a filter
- * @filter: filter struct with filter information
- * @record: the record to test against the filter
- *
- * Returns: match result or error code (prefixed with TEP_ERRNO__)
- * FILTER_MATCH - filter found for event and @record matches
- * FILTER_MISS - filter found for event and @record does not match
- * FILTER_NOT_FOUND - no filter found for @record's event
- * NO_FILTER - if no filters exist
- * otherwise - error occurred during test
- */
-enum tep_errno tep_filter_match(struct tep_event_filter *filter,
- struct tep_record *record)
-{
- struct tep_handle *tep = filter->tep;
- struct tep_filter_type *filter_type;
- int event_id;
- int ret;
- enum tep_errno err = 0;
-
- filter_init_error_buf(filter);
-
- if (!filter->filters)
- return TEP_ERRNO__NO_FILTER;
-
- event_id = tep_data_type(tep, record);
-
- filter_type = find_filter_type(filter, event_id);
- if (!filter_type)
- return TEP_ERRNO__FILTER_NOT_FOUND;
-
- ret = test_filter(filter_type->event, filter_type->filter, record, &err);
- if (err)
- return err;
-
- return ret ? TEP_ERRNO__FILTER_MATCH : TEP_ERRNO__FILTER_MISS;
-}
-
-static char *op_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
-{
- char *str = NULL;
- char *left = NULL;
- char *right = NULL;
- char *op = NULL;
- int left_val = -1;
- int right_val = -1;
- int val;
-
- switch (arg->op.type) {
- case TEP_FILTER_OP_AND:
- op = "&&";
- /* fall through */
- case TEP_FILTER_OP_OR:
- if (!op)
- op = "||";
-
- left = arg_to_str(filter, arg->op.left);
- right = arg_to_str(filter, arg->op.right);
- if (!left || !right)
- break;
-
- /* Try to consolidate boolean values */
- if (strcmp(left, "TRUE") == 0)
- left_val = 1;
- else if (strcmp(left, "FALSE") == 0)
- left_val = 0;
-
- if (strcmp(right, "TRUE") == 0)
- right_val = 1;
- else if (strcmp(right, "FALSE") == 0)
- right_val = 0;
-
- if (left_val >= 0) {
- if ((arg->op.type == TEP_FILTER_OP_AND && !left_val) ||
- (arg->op.type == TEP_FILTER_OP_OR && left_val)) {
- /* Just return left value */
- str = left;
- left = NULL;
- break;
- }
- if (right_val >= 0) {
- /* just evaluate this. */
- val = 0;
- switch (arg->op.type) {
- case TEP_FILTER_OP_AND:
- val = left_val && right_val;
- break;
- case TEP_FILTER_OP_OR:
- val = left_val || right_val;
- break;
- default:
- break;
- }
- asprintf(&str, val ? "TRUE" : "FALSE");
- break;
- }
- }
- if (right_val >= 0) {
- if ((arg->op.type == TEP_FILTER_OP_AND && !right_val) ||
- (arg->op.type == TEP_FILTER_OP_OR && right_val)) {
- /* Just return right value */
- str = right;
- right = NULL;
- break;
- }
- /* The right value is meaningless */
- str = left;
- left = NULL;
- break;
- }
-
- asprintf(&str, "(%s) %s (%s)", left, op, right);
- break;
-
- case TEP_FILTER_OP_NOT:
- op = "!";
- right = arg_to_str(filter, arg->op.right);
- if (!right)
- break;
-
- /* See if we can consolidate */
- if (strcmp(right, "TRUE") == 0)
- right_val = 1;
- else if (strcmp(right, "FALSE") == 0)
- right_val = 0;
- if (right_val >= 0) {
- /* just return the opposite */
- asprintf(&str, right_val ? "FALSE" : "TRUE");
- break;
- }
- asprintf(&str, "%s(%s)", op, right);
- break;
-
- default:
- /* ?? */
- break;
- }
- free(left);
- free(right);
- return str;
-}
-
-static char *val_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
-{
- char *str = NULL;
-
- asprintf(&str, "%lld", arg->value.val);
-
- return str;
-}
-
-static char *field_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
-{
- return strdup(arg->field.field->name);
-}
-
-static char *exp_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
-{
- char *lstr;
- char *rstr;
- char *op;
- char *str = NULL;
-
- lstr = arg_to_str(filter, arg->exp.left);
- rstr = arg_to_str(filter, arg->exp.right);
- if (!lstr || !rstr)
- goto out;
-
- switch (arg->exp.type) {
- case TEP_FILTER_EXP_ADD:
- op = "+";
- break;
- case TEP_FILTER_EXP_SUB:
- op = "-";
- break;
- case TEP_FILTER_EXP_MUL:
- op = "*";
- break;
- case TEP_FILTER_EXP_DIV:
- op = "/";
- break;
- case TEP_FILTER_EXP_MOD:
- op = "%";
- break;
- case TEP_FILTER_EXP_RSHIFT:
- op = ">>";
- break;
- case TEP_FILTER_EXP_LSHIFT:
- op = "<<";
- break;
- case TEP_FILTER_EXP_AND:
- op = "&";
- break;
- case TEP_FILTER_EXP_OR:
- op = "|";
- break;
- case TEP_FILTER_EXP_XOR:
- op = "^";
- break;
- default:
- op = "[ERROR IN EXPRESSION TYPE]";
- break;
- }
-
- asprintf(&str, "%s %s %s", lstr, op, rstr);
-out:
- free(lstr);
- free(rstr);
-
- return str;
-}
-
-static char *num_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
-{
- char *lstr;
- char *rstr;
- char *str = NULL;
- char *op = NULL;
-
- lstr = arg_to_str(filter, arg->num.left);
- rstr = arg_to_str(filter, arg->num.right);
- if (!lstr || !rstr)
- goto out;
-
- switch (arg->num.type) {
- case TEP_FILTER_CMP_EQ:
- op = "==";
- /* fall through */
- case TEP_FILTER_CMP_NE:
- if (!op)
- op = "!=";
- /* fall through */
- case TEP_FILTER_CMP_GT:
- if (!op)
- op = ">";
- /* fall through */
- case TEP_FILTER_CMP_LT:
- if (!op)
- op = "<";
- /* fall through */
- case TEP_FILTER_CMP_GE:
- if (!op)
- op = ">=";
- /* fall through */
- case TEP_FILTER_CMP_LE:
- if (!op)
- op = "<=";
-
- asprintf(&str, "%s %s %s", lstr, op, rstr);
- break;
-
- default:
- /* ?? */
- break;
- }
-
-out:
- free(lstr);
- free(rstr);
- return str;
-}
-
-static char *str_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
-{
- char *str = NULL;
- char *op = NULL;
-
- switch (arg->str.type) {
- case TEP_FILTER_CMP_MATCH:
- op = "==";
- /* fall through */
- case TEP_FILTER_CMP_NOT_MATCH:
- if (!op)
- op = "!=";
- /* fall through */
- case TEP_FILTER_CMP_REGEX:
- if (!op)
- op = "=~";
- /* fall through */
- case TEP_FILTER_CMP_NOT_REGEX:
- if (!op)
- op = "!~";
-
- asprintf(&str, "%s %s \"%s\"",
- arg->str.field->name, op, arg->str.val);
- break;
-
- default:
- /* ?? */
- break;
- }
- return str;
-}
-
-static char *arg_to_str(struct tep_event_filter *filter, struct tep_filter_arg *arg)
-{
- char *str = NULL;
-
- switch (arg->type) {
- case TEP_FILTER_ARG_BOOLEAN:
- asprintf(&str, arg->boolean.value ? "TRUE" : "FALSE");
- return str;
-
- case TEP_FILTER_ARG_OP:
- return op_to_str(filter, arg);
-
- case TEP_FILTER_ARG_NUM:
- return num_to_str(filter, arg);
-
- case TEP_FILTER_ARG_STR:
- return str_to_str(filter, arg);
-
- case TEP_FILTER_ARG_VALUE:
- return val_to_str(filter, arg);
-
- case TEP_FILTER_ARG_FIELD:
- return field_to_str(filter, arg);
-
- case TEP_FILTER_ARG_EXP:
- return exp_to_str(filter, arg);
-
- default:
- /* ?? */
- return NULL;
- }
-
-}
-
-/**
- * tep_filter_make_string - return a string showing the filter
- * @filter: filter struct with filter information
- * @event_id: the event id to return the filter string with
- *
- * Returns a string that displays the filter contents.
- * This string must be freed with free(str).
- * NULL is returned if no filter is found or allocation failed.
- */
-char *
-tep_filter_make_string(struct tep_event_filter *filter, int event_id)
-{
- struct tep_filter_type *filter_type;
-
- if (!filter->filters)
- return NULL;
-
- filter_type = find_filter_type(filter, event_id);
-
- if (!filter_type)
- return NULL;
-
- return arg_to_str(filter, filter_type->filter);
-}
-
-/**
- * tep_filter_compare - compare two filters and return if they are the same
- * @filter1: Filter to compare with @filter2
- * @filter2: Filter to compare with @filter1
- *
- * Returns:
- * 1 if the two filters hold the same content.
- * 0 if they do not.
- */
-int tep_filter_compare(struct tep_event_filter *filter1, struct tep_event_filter *filter2)
-{
- struct tep_filter_type *filter_type1;
- struct tep_filter_type *filter_type2;
- char *str1, *str2;
- int result;
- int i;
-
- /* Do the easy checks first */
- if (filter1->filters != filter2->filters)
- return 0;
- if (!filter1->filters && !filter2->filters)
- return 1;
-
- /*
- * Now take a look at each of the events to see if they have the same
- * filters to them.
- */
- for (i = 0; i < filter1->filters; i++) {
- filter_type1 = &filter1->event_filters[i];
- filter_type2 = find_filter_type(filter2, filter_type1->event_id);
- if (!filter_type2)
- break;
- if (filter_type1->filter->type != filter_type2->filter->type)
- break;
- /* The best way to compare complex filters is with strings */
- str1 = arg_to_str(filter1, filter_type1->filter);
- str2 = arg_to_str(filter2, filter_type2->filter);
- if (str1 && str2)
- result = strcmp(str1, str2) != 0;
- else
- /* bail out if allocation fails */
- result = 1;
-
- free(str1);
- free(str2);
- if (result)
- break;
- }
-
- if (i < filter1->filters)
- return 0;
- return 1;
-}
-
diff --git a/tools/lib/traceevent/parse-utils.c b/tools/lib/traceevent/parse-utils.c
deleted file mode 100644
index e99867111387..000000000000
--- a/tools/lib/traceevent/parse-utils.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdarg.h>
-#include <errno.h>
-
-#define __weak __attribute__((weak))
-
-void __vwarning(const char *fmt, va_list ap)
-{
- if (errno)
- perror("libtraceevent");
- errno = 0;
-
- fprintf(stderr, " ");
- vfprintf(stderr, fmt, ap);
-
- fprintf(stderr, "\n");
-}
-
-void __warning(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- __vwarning(fmt, ap);
- va_end(ap);
-}
-
-void __weak warning(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- __vwarning(fmt, ap);
- va_end(ap);
-}
-
-void __vpr_stat(const char *fmt, va_list ap)
-{
- vprintf(fmt, ap);
- printf("\n");
-}
-
-void __pr_stat(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- __vpr_stat(fmt, ap);
- va_end(ap);
-}
-
-void __weak vpr_stat(const char *fmt, va_list ap)
-{
- __vpr_stat(fmt, ap);
-}
-
-void __weak pr_stat(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- __vpr_stat(fmt, ap);
- va_end(ap);
-}
diff --git a/tools/lib/traceevent/plugins/Build b/tools/lib/traceevent/plugins/Build
deleted file mode 100644
index 210d26910613..000000000000
--- a/tools/lib/traceevent/plugins/Build
+++ /dev/null
@@ -1,10 +0,0 @@
-plugin_jbd2-y += plugin_jbd2.o
-plugin_hrtimer-y += plugin_hrtimer.o
-plugin_kmem-y += plugin_kmem.o
-plugin_kvm-y += plugin_kvm.o
-plugin_mac80211-y += plugin_mac80211.o
-plugin_sched_switch-y += plugin_sched_switch.o
-plugin_function-y += plugin_function.o
-plugin_xen-y += plugin_xen.o
-plugin_scsi-y += plugin_scsi.o
-plugin_cfg80211-y += plugin_cfg80211.o
diff --git a/tools/lib/traceevent/plugins/Makefile b/tools/lib/traceevent/plugins/Makefile
deleted file mode 100644
index 349bb81482ab..000000000000
--- a/tools/lib/traceevent/plugins/Makefile
+++ /dev/null
@@ -1,223 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-#MAKEFLAGS += --no-print-directory
-
-
-# Makefiles suck: This macro sets a default value of $(2) for the
-# variable named by $(1), unless the variable has been set by
-# environment or command line. This is necessary for CC and AR
-# because make sets default values, so the simpler ?= approach
-# won't work as expected.
-define allow-override
- $(if $(or $(findstring environment,$(origin $(1))),\
- $(findstring command line,$(origin $(1)))),,\
- $(eval $(1) = $(2)))
-endef
-
-# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-$(call allow-override,NM,$(CROSS_COMPILE)nm)
-$(call allow-override,PKG_CONFIG,pkg-config)
-
-EXT = -std=gnu99
-INSTALL = install
-
-# Use DESTDIR for installing into a different root directory.
-# This is useful for building a package. The program will be
-# installed in this directory as if it was the root directory.
-# Then the build tool can move it later.
-DESTDIR ?=
-DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
-
-LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
-ifeq ($(LP64), 1)
- libdir_relative_tmp = lib64
-else
- libdir_relative_tmp = lib
-endif
-
-libdir_relative ?= $(libdir_relative_tmp)
-prefix ?= /usr/local
-libdir = $(prefix)/$(libdir_relative)
-
-set_plugin_dir := 1
-
-# Set plugin_dir to preffered global plugin location
-# If we install under $HOME directory we go under
-# $(HOME)/.local/lib/traceevent/plugins
-#
-# We dont set PLUGIN_DIR in case we install under $HOME
-# directory, because by default the code looks under:
-# $(HOME)/.local/lib/traceevent/plugins by default.
-#
-ifeq ($(plugin_dir),)
-ifeq ($(prefix),$(HOME))
-override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
-set_plugin_dir := 0
-else
-override plugin_dir = $(libdir)/traceevent/plugins
-endif
-endif
-
-ifeq ($(set_plugin_dir),1)
-PLUGIN_DIR = -DPLUGIN_DIR="$(plugin_dir)"
-PLUGIN_DIR_SQ = '$(subst ','\'',$(PLUGIN_DIR))'
-endif
-
-include ../../../scripts/Makefile.include
-
-# copy a bit from Linux kbuild
-
-ifeq ("$(origin V)", "command line")
- VERBOSE = $(V)
-endif
-ifndef VERBOSE
- VERBOSE = 0
-endif
-
-ifeq ($(srctree),)
-srctree := $(patsubst %/,%,$(dir $(CURDIR)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-srctree := $(patsubst %/,%,$(dir $(srctree)))
-#$(info Determined 'srctree' to be $(srctree))
-endif
-
-export prefix libdir src obj
-
-# Shell quotes
-plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
-
-CONFIG_INCLUDES =
-CONFIG_LIBS =
-CONFIG_FLAGS =
-
-OBJ = $@
-N =
-
-INCLUDES = -I. -I.. -I $(srctree)/tools/include $(CONFIG_INCLUDES)
-
-# Set compile option CFLAGS
-ifdef EXTRA_CFLAGS
- CFLAGS := $(EXTRA_CFLAGS)
-else
- CFLAGS := -g -Wall
-endif
-
-# Append required CFLAGS
-override CFLAGS += -fPIC
-override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ)
-override CFLAGS += $(udis86-flags) -D_GNU_SOURCE
-
-ifeq ($(VERBOSE),1)
- Q =
-else
- Q = @
-endif
-
-# Disable command line variables (CFLAGS) override from top
-# level Makefile (perf), otherwise build Makefile will get
-# the same command line setup.
-MAKEOVERRIDES=
-
-export srctree OUTPUT CC LD CFLAGS V
-
-build := -f $(srctree)/tools/build/Makefile.build dir=. obj
-
-DYNAMIC_LIST_FILE := $(OUTPUT)libtraceevent-dynamic-list
-
-PLUGINS = plugin_jbd2.so
-PLUGINS += plugin_hrtimer.so
-PLUGINS += plugin_kmem.so
-PLUGINS += plugin_kvm.so
-PLUGINS += plugin_mac80211.so
-PLUGINS += plugin_sched_switch.so
-PLUGINS += plugin_function.so
-PLUGINS += plugin_xen.so
-PLUGINS += plugin_scsi.so
-PLUGINS += plugin_cfg80211.so
-
-PLUGINS := $(addprefix $(OUTPUT),$(PLUGINS))
-PLUGINS_IN := $(PLUGINS:.so=-in.o)
-
-plugins: $(PLUGINS) $(DYNAMIC_LIST_FILE)
-
-__plugin_obj = $(notdir $@)
- plugin_obj = $(__plugin_obj:-in.o=)
-
-$(PLUGINS_IN): force
- $(Q)$(MAKE) $(build)=$(plugin_obj)
-
-$(OUTPUT)libtraceevent-dynamic-list: $(PLUGINS)
- $(QUIET_GEN)$(call do_generate_dynamic_list_file, $(PLUGINS), $@)
-
-$(OUTPUT)%.so: $(OUTPUT)%-in.o
- $(QUIET_LINK)$(CC) $(CFLAGS) -shared $(LDFLAGS) -nostartfiles -o $@ $^
-
-define update_dir
- (echo $1 > $@.tmp; \
- if [ -r $@ ] && cmp -s $@ $@.tmp; then \
- rm -f $@.tmp; \
- else \
- echo ' UPDATE $@'; \
- mv -f $@.tmp $@; \
- fi);
-endef
-
-tags: force
- $(RM) tags
- find . -name '*.[ch]' | xargs ctags --extra=+f --c-kinds=+px \
- --regex-c++='/_PE\(([^,)]*).*/TEP_ERRNO__\1/'
-
-TAGS: force
- $(RM) TAGS
- find . -name '*.[ch]' | xargs etags \
- --regex='/_PE(\([^,)]*\).*/TEP_ERRNO__\1/'
-
-define do_install_mkdir
- if [ ! -d '$(DESTDIR_SQ)$1' ]; then \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
- fi
-endef
-
-define do_install
- $(call do_install_mkdir,$2); \
- $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
-endef
-
-define do_install_plugins
- for plugin in $1; do \
- $(call do_install,$$plugin,$(plugin_dir_SQ)); \
- done
-endef
-
-define do_generate_dynamic_list_file
- symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
- xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
- if [ "$$symbol_type" = "U W" ];then \
- (echo '{'; \
- $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
- echo '};'; \
- ) > $2; \
- else \
- (echo Either missing one of [$1] or bad version of $(NM)) 1>&2;\
- fi
-endef
-
-install: $(PLUGINS)
- $(call QUIET_INSTALL, trace_plugins) \
- $(call do_install_plugins, $(PLUGINS))
-
-clean:
- $(call QUIET_CLEAN, trace_plugins) \
- $(RM) *.o *~ $(TARGETS) *.a *.so $(VERSION_FILES) .*.d .*.cmd; \
- $(RM) $(OUTPUT)libtraceevent-dynamic-list \
- $(RM) TRACEEVENT-CFLAGS tags TAGS;
-
-PHONY += force plugins
-force:
-
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable so we can use it in if_changed and friends.
-.PHONY: $(PHONY)
diff --git a/tools/lib/traceevent/plugins/plugin_cfg80211.c b/tools/lib/traceevent/plugins/plugin_cfg80211.c
deleted file mode 100644
index 3d43b56a6c98..000000000000
--- a/tools/lib/traceevent/plugins/plugin_cfg80211.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
-#include <endian.h>
-#include "event-parse.h"
-
-/*
- * From glibc endian.h, for older systems where it is not present, e.g.: RHEL5,
- * Fedora6.
- */
-#ifndef le16toh
-# if __BYTE_ORDER == __LITTLE_ENDIAN
-# define le16toh(x) (x)
-# else
-# define le16toh(x) __bswap_16 (x)
-# endif
-#endif
-
-
-static unsigned long long
-process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
-{
- uint16_t *val = (uint16_t *) (unsigned long) args[0];
- return val ? (long long) le16toh(*val) : 0;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- tep_register_print_function(tep,
- process___le16_to_cpup,
- TEP_FUNC_ARG_INT,
- "__le16_to_cpup",
- TEP_FUNC_ARG_PTR,
- TEP_FUNC_ARG_VOID);
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- tep_unregister_print_function(tep, process___le16_to_cpup,
- "__le16_to_cpup");
-}
diff --git a/tools/lib/traceevent/plugins/plugin_function.c b/tools/lib/traceevent/plugins/plugin_function.c
deleted file mode 100644
index 7770fcb78e0f..000000000000
--- a/tools/lib/traceevent/plugins/plugin_function.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "event-parse.h"
-#include "event-utils.h"
-#include "trace-seq.h"
-
-static struct func_stack {
- int size;
- char **stack;
-} *fstack;
-
-static int cpus = -1;
-
-#define STK_BLK 10
-
-struct tep_plugin_option plugin_options[] =
-{
- {
- .name = "parent",
- .plugin_alias = "ftrace",
- .description =
- "Print parent of functions for function events",
- },
- {
- .name = "indent",
- .plugin_alias = "ftrace",
- .description =
- "Try to show function call indents, based on parents",
- .set = 1,
- },
- {
- .name = NULL,
- }
-};
-
-static struct tep_plugin_option *ftrace_parent = &plugin_options[0];
-static struct tep_plugin_option *ftrace_indent = &plugin_options[1];
-
-static void add_child(struct func_stack *stack, const char *child, int pos)
-{
- int i;
-
- if (!child)
- return;
-
- if (pos < stack->size)
- free(stack->stack[pos]);
- else {
- char **ptr;
-
- ptr = realloc(stack->stack, sizeof(char *) *
- (stack->size + STK_BLK));
- if (!ptr) {
- warning("could not allocate plugin memory\n");
- return;
- }
-
- stack->stack = ptr;
-
- for (i = stack->size; i < stack->size + STK_BLK; i++)
- stack->stack[i] = NULL;
- stack->size += STK_BLK;
- }
-
- stack->stack[pos] = strdup(child);
-}
-
-static int add_and_get_index(const char *parent, const char *child, int cpu)
-{
- int i;
-
- if (cpu < 0)
- return 0;
-
- if (cpu > cpus) {
- struct func_stack *ptr;
-
- ptr = realloc(fstack, sizeof(*fstack) * (cpu + 1));
- if (!ptr) {
- warning("could not allocate plugin memory\n");
- return 0;
- }
-
- fstack = ptr;
-
- /* Account for holes in the cpu count */
- for (i = cpus + 1; i <= cpu; i++)
- memset(&fstack[i], 0, sizeof(fstack[i]));
- cpus = cpu;
- }
-
- for (i = 0; i < fstack[cpu].size && fstack[cpu].stack[i]; i++) {
- if (strcmp(parent, fstack[cpu].stack[i]) == 0) {
- add_child(&fstack[cpu], child, i+1);
- return i;
- }
- }
-
- /* Not found */
- add_child(&fstack[cpu], parent, 0);
- add_child(&fstack[cpu], child, 1);
- return 0;
-}
-
-static int function_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event *event, void *context)
-{
- struct tep_handle *tep = event->tep;
- unsigned long long function;
- unsigned long long pfunction;
- const char *func;
- const char *parent;
- int index = 0;
-
- if (tep_get_field_val(s, event, "ip", record, &function, 1))
- return trace_seq_putc(s, '!');
-
- func = tep_find_function(tep, function);
-
- if (tep_get_field_val(s, event, "parent_ip", record, &pfunction, 1))
- return trace_seq_putc(s, '!');
-
- parent = tep_find_function(tep, pfunction);
-
- if (parent && ftrace_indent->set)
- index = add_and_get_index(parent, func, record->cpu);
-
- trace_seq_printf(s, "%*s", index*3, "");
-
- if (func)
- trace_seq_printf(s, "%s", func);
- else
- trace_seq_printf(s, "0x%llx", function);
-
- if (ftrace_parent->set) {
- trace_seq_printf(s, " <-- ");
- if (parent)
- trace_seq_printf(s, "%s", parent);
- else
- trace_seq_printf(s, "0x%llx", pfunction);
- }
-
- return 0;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- tep_register_event_handler(tep, -1, "ftrace", "function",
- function_handler, NULL);
-
- tep_plugin_add_options("ftrace", plugin_options);
-
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- int i, x;
-
- tep_unregister_event_handler(tep, -1, "ftrace", "function",
- function_handler, NULL);
-
- for (i = 0; i <= cpus; i++) {
- for (x = 0; x < fstack[i].size && fstack[i].stack[x]; x++)
- free(fstack[i].stack[x]);
- free(fstack[i].stack);
- }
-
- tep_plugin_remove_options(plugin_options);
-
- free(fstack);
- fstack = NULL;
- cpus = -1;
-}
diff --git a/tools/lib/traceevent/plugins/plugin_hrtimer.c b/tools/lib/traceevent/plugins/plugin_hrtimer.c
deleted file mode 100644
index bb434e0ed03a..000000000000
--- a/tools/lib/traceevent/plugins/plugin_hrtimer.c
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- * Copyright (C) 2009 Johannes Berg <johannes@sipsolutions.net>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "event-parse.h"
-#include "trace-seq.h"
-
-static int timer_expire_handler(struct trace_seq *s,
- struct tep_record *record,
- struct tep_event *event, void *context)
-{
- trace_seq_printf(s, "hrtimer=");
-
- if (tep_print_num_field(s, "0x%llx", event, "timer",
- record, 0) == -1)
- tep_print_num_field(s, "0x%llx", event, "hrtimer",
- record, 1);
-
- trace_seq_printf(s, " now=");
-
- tep_print_num_field(s, "%llu", event, "now", record, 1);
-
- tep_print_func_field(s, " function=%s", event, "function",
- record, 0);
- return 0;
-}
-
-static int timer_start_handler(struct trace_seq *s,
- struct tep_record *record,
- struct tep_event *event, void *context)
-{
- trace_seq_printf(s, "hrtimer=");
-
- if (tep_print_num_field(s, "0x%llx", event, "timer",
- record, 0) == -1)
- tep_print_num_field(s, "0x%llx", event, "hrtimer",
- record, 1);
-
- tep_print_func_field(s, " function=%s", event, "function",
- record, 0);
-
- trace_seq_printf(s, " expires=");
- tep_print_num_field(s, "%llu", event, "expires", record, 1);
-
- trace_seq_printf(s, " softexpires=");
- tep_print_num_field(s, "%llu", event, "softexpires", record, 1);
- return 0;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- tep_register_event_handler(tep, -1,
- "timer", "hrtimer_expire_entry",
- timer_expire_handler, NULL);
-
- tep_register_event_handler(tep, -1, "timer", "hrtimer_start",
- timer_start_handler, NULL);
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- tep_unregister_event_handler(tep, -1,
- "timer", "hrtimer_expire_entry",
- timer_expire_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "timer", "hrtimer_start",
- timer_start_handler, NULL);
-}
diff --git a/tools/lib/traceevent/plugins/plugin_jbd2.c b/tools/lib/traceevent/plugins/plugin_jbd2.c
deleted file mode 100644
index 04fc125f38cb..000000000000
--- a/tools/lib/traceevent/plugins/plugin_jbd2.c
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright (C) 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "event-parse.h"
-#include "trace-seq.h"
-
-#define MINORBITS 20
-#define MINORMASK ((1U << MINORBITS) - 1)
-
-#define MAJOR(dev) ((unsigned int) ((dev) >> MINORBITS))
-#define MINOR(dev) ((unsigned int) ((dev) & MINORMASK))
-
-static unsigned long long
-process_jbd2_dev_to_name(struct trace_seq *s, unsigned long long *args)
-{
- unsigned int dev = args[0];
-
- trace_seq_printf(s, "%d:%d", MAJOR(dev), MINOR(dev));
- return 0;
-}
-
-static unsigned long long
-process_jiffies_to_msecs(struct trace_seq *s, unsigned long long *args)
-{
- unsigned long long jiffies = args[0];
-
- trace_seq_printf(s, "%lld", jiffies);
- return jiffies;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- tep_register_print_function(tep,
- process_jbd2_dev_to_name,
- TEP_FUNC_ARG_STRING,
- "jbd2_dev_to_name",
- TEP_FUNC_ARG_INT,
- TEP_FUNC_ARG_VOID);
-
- tep_register_print_function(tep,
- process_jiffies_to_msecs,
- TEP_FUNC_ARG_LONG,
- "jiffies_to_msecs",
- TEP_FUNC_ARG_LONG,
- TEP_FUNC_ARG_VOID);
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- tep_unregister_print_function(tep, process_jbd2_dev_to_name,
- "jbd2_dev_to_name");
-
- tep_unregister_print_function(tep, process_jiffies_to_msecs,
- "jiffies_to_msecs");
-}
diff --git a/tools/lib/traceevent/plugins/plugin_kmem.c b/tools/lib/traceevent/plugins/plugin_kmem.c
deleted file mode 100644
index edaec5d962c3..000000000000
--- a/tools/lib/traceevent/plugins/plugin_kmem.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "event-parse.h"
-#include "trace-seq.h"
-
-static int call_site_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event *event, void *context)
-{
- struct tep_format_field *field;
- unsigned long long val, addr;
- void *data = record->data;
- const char *func;
-
- field = tep_find_field(event, "call_site");
- if (!field)
- return 1;
-
- if (tep_read_number_field(field, data, &val))
- return 1;
-
- func = tep_find_function(event->tep, val);
- if (!func)
- return 1;
-
- addr = tep_find_function_address(event->tep, val);
-
- trace_seq_printf(s, "(%s+0x%x) ", func, (int)(val - addr));
- return 1;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- tep_register_event_handler(tep, -1, "kmem", "kfree",
- call_site_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kmem", "kmalloc",
- call_site_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kmem", "kmalloc_node",
- call_site_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kmem", "kmem_cache_alloc",
- call_site_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kmem",
- "kmem_cache_alloc_node",
- call_site_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kmem", "kmem_cache_free",
- call_site_handler, NULL);
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- tep_unregister_event_handler(tep, -1, "kmem", "kfree",
- call_site_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kmem", "kmalloc",
- call_site_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kmem", "kmalloc_node",
- call_site_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_alloc",
- call_site_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kmem",
- "kmem_cache_alloc_node",
- call_site_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kmem", "kmem_cache_free",
- call_site_handler, NULL);
-}
diff --git a/tools/lib/traceevent/plugins/plugin_kvm.c b/tools/lib/traceevent/plugins/plugin_kvm.c
deleted file mode 100644
index c8e623065a7e..000000000000
--- a/tools/lib/traceevent/plugins/plugin_kvm.c
+++ /dev/null
@@ -1,523 +0,0 @@
-/*
- * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdint.h>
-
-#include "event-parse.h"
-#include "trace-seq.h"
-
-#ifdef HAVE_UDIS86
-
-#include <udis86.h>
-
-static ud_t ud;
-
-static void init_disassembler(void)
-{
- ud_init(&ud);
- ud_set_syntax(&ud, UD_SYN_ATT);
-}
-
-static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
- int cr0_pe, int eflags_vm,
- int cs_d, int cs_l)
-{
- int mode;
-
- if (!cr0_pe)
- mode = 16;
- else if (eflags_vm)
- mode = 16;
- else if (cs_l)
- mode = 64;
- else if (cs_d)
- mode = 32;
- else
- mode = 16;
-
- ud_set_pc(&ud, rip);
- ud_set_mode(&ud, mode);
- ud_set_input_buffer(&ud, insn, len);
- ud_disassemble(&ud);
- return ud_insn_asm(&ud);
-}
-
-#else
-
-static void init_disassembler(void)
-{
-}
-
-static const char *disassemble(unsigned char *insn, int len, uint64_t rip,
- int cr0_pe, int eflags_vm,
- int cs_d, int cs_l)
-{
- static char out[15*3+1];
- int i;
-
- for (i = 0; i < len; ++i)
- sprintf(out + i * 3, "%02x ", insn[i]);
- out[len*3-1] = '\0';
- return out;
-}
-
-#endif
-
-
-#define VMX_EXIT_REASONS \
- _ER(EXCEPTION_NMI, 0) \
- _ER(EXTERNAL_INTERRUPT, 1) \
- _ER(TRIPLE_FAULT, 2) \
- _ER(PENDING_INTERRUPT, 7) \
- _ER(NMI_WINDOW, 8) \
- _ER(TASK_SWITCH, 9) \
- _ER(CPUID, 10) \
- _ER(HLT, 12) \
- _ER(INVD, 13) \
- _ER(INVLPG, 14) \
- _ER(RDPMC, 15) \
- _ER(RDTSC, 16) \
- _ER(VMCALL, 18) \
- _ER(VMCLEAR, 19) \
- _ER(VMLAUNCH, 20) \
- _ER(VMPTRLD, 21) \
- _ER(VMPTRST, 22) \
- _ER(VMREAD, 23) \
- _ER(VMRESUME, 24) \
- _ER(VMWRITE, 25) \
- _ER(VMOFF, 26) \
- _ER(VMON, 27) \
- _ER(CR_ACCESS, 28) \
- _ER(DR_ACCESS, 29) \
- _ER(IO_INSTRUCTION, 30) \
- _ER(MSR_READ, 31) \
- _ER(MSR_WRITE, 32) \
- _ER(MWAIT_INSTRUCTION, 36) \
- _ER(MONITOR_INSTRUCTION, 39) \
- _ER(PAUSE_INSTRUCTION, 40) \
- _ER(MCE_DURING_VMENTRY, 41) \
- _ER(TPR_BELOW_THRESHOLD, 43) \
- _ER(APIC_ACCESS, 44) \
- _ER(EOI_INDUCED, 45) \
- _ER(EPT_VIOLATION, 48) \
- _ER(EPT_MISCONFIG, 49) \
- _ER(INVEPT, 50) \
- _ER(PREEMPTION_TIMER, 52) \
- _ER(WBINVD, 54) \
- _ER(XSETBV, 55) \
- _ER(APIC_WRITE, 56) \
- _ER(INVPCID, 58) \
- _ER(PML_FULL, 62) \
- _ER(XSAVES, 63) \
- _ER(XRSTORS, 64)
-
-#define SVM_EXIT_REASONS \
- _ER(EXIT_READ_CR0, 0x000) \
- _ER(EXIT_READ_CR3, 0x003) \
- _ER(EXIT_READ_CR4, 0x004) \
- _ER(EXIT_READ_CR8, 0x008) \
- _ER(EXIT_WRITE_CR0, 0x010) \
- _ER(EXIT_WRITE_CR3, 0x013) \
- _ER(EXIT_WRITE_CR4, 0x014) \
- _ER(EXIT_WRITE_CR8, 0x018) \
- _ER(EXIT_READ_DR0, 0x020) \
- _ER(EXIT_READ_DR1, 0x021) \
- _ER(EXIT_READ_DR2, 0x022) \
- _ER(EXIT_READ_DR3, 0x023) \
- _ER(EXIT_READ_DR4, 0x024) \
- _ER(EXIT_READ_DR5, 0x025) \
- _ER(EXIT_READ_DR6, 0x026) \
- _ER(EXIT_READ_DR7, 0x027) \
- _ER(EXIT_WRITE_DR0, 0x030) \
- _ER(EXIT_WRITE_DR1, 0x031) \
- _ER(EXIT_WRITE_DR2, 0x032) \
- _ER(EXIT_WRITE_DR3, 0x033) \
- _ER(EXIT_WRITE_DR4, 0x034) \
- _ER(EXIT_WRITE_DR5, 0x035) \
- _ER(EXIT_WRITE_DR6, 0x036) \
- _ER(EXIT_WRITE_DR7, 0x037) \
- _ER(EXIT_EXCP_BASE, 0x040) \
- _ER(EXIT_INTR, 0x060) \
- _ER(EXIT_NMI, 0x061) \
- _ER(EXIT_SMI, 0x062) \
- _ER(EXIT_INIT, 0x063) \
- _ER(EXIT_VINTR, 0x064) \
- _ER(EXIT_CR0_SEL_WRITE, 0x065) \
- _ER(EXIT_IDTR_READ, 0x066) \
- _ER(EXIT_GDTR_READ, 0x067) \
- _ER(EXIT_LDTR_READ, 0x068) \
- _ER(EXIT_TR_READ, 0x069) \
- _ER(EXIT_IDTR_WRITE, 0x06a) \
- _ER(EXIT_GDTR_WRITE, 0x06b) \
- _ER(EXIT_LDTR_WRITE, 0x06c) \
- _ER(EXIT_TR_WRITE, 0x06d) \
- _ER(EXIT_RDTSC, 0x06e) \
- _ER(EXIT_RDPMC, 0x06f) \
- _ER(EXIT_PUSHF, 0x070) \
- _ER(EXIT_POPF, 0x071) \
- _ER(EXIT_CPUID, 0x072) \
- _ER(EXIT_RSM, 0x073) \
- _ER(EXIT_IRET, 0x074) \
- _ER(EXIT_SWINT, 0x075) \
- _ER(EXIT_INVD, 0x076) \
- _ER(EXIT_PAUSE, 0x077) \
- _ER(EXIT_HLT, 0x078) \
- _ER(EXIT_INVLPG, 0x079) \
- _ER(EXIT_INVLPGA, 0x07a) \
- _ER(EXIT_IOIO, 0x07b) \
- _ER(EXIT_MSR, 0x07c) \
- _ER(EXIT_TASK_SWITCH, 0x07d) \
- _ER(EXIT_FERR_FREEZE, 0x07e) \
- _ER(EXIT_SHUTDOWN, 0x07f) \
- _ER(EXIT_VMRUN, 0x080) \
- _ER(EXIT_VMMCALL, 0x081) \
- _ER(EXIT_VMLOAD, 0x082) \
- _ER(EXIT_VMSAVE, 0x083) \
- _ER(EXIT_STGI, 0x084) \
- _ER(EXIT_CLGI, 0x085) \
- _ER(EXIT_SKINIT, 0x086) \
- _ER(EXIT_RDTSCP, 0x087) \
- _ER(EXIT_ICEBP, 0x088) \
- _ER(EXIT_WBINVD, 0x089) \
- _ER(EXIT_MONITOR, 0x08a) \
- _ER(EXIT_MWAIT, 0x08b) \
- _ER(EXIT_MWAIT_COND, 0x08c) \
- _ER(EXIT_NPF, 0x400) \
- _ER(EXIT_ERR, -1)
-
-#define _ER(reason, val) { #reason, val },
-struct str_values {
- const char *str;
- int val;
-};
-
-static struct str_values vmx_exit_reasons[] = {
- VMX_EXIT_REASONS
- { NULL, -1}
-};
-
-static struct str_values svm_exit_reasons[] = {
- SVM_EXIT_REASONS
- { NULL, -1}
-};
-
-static struct isa_exit_reasons {
- unsigned isa;
- struct str_values *strings;
-} isa_exit_reasons[] = {
- { .isa = 1, .strings = vmx_exit_reasons },
- { .isa = 2, .strings = svm_exit_reasons },
- { }
-};
-
-static const char *find_exit_reason(unsigned isa, int val)
-{
- struct str_values *strings = NULL;
- int i;
-
- for (i = 0; isa_exit_reasons[i].strings; ++i)
- if (isa_exit_reasons[i].isa == isa) {
- strings = isa_exit_reasons[i].strings;
- break;
- }
- if (!strings)
- return "UNKNOWN-ISA";
- for (i = 0; strings[i].val >= 0; i++)
- if (strings[i].val == val)
- break;
-
- return strings[i].str;
-}
-
-static int print_exit_reason(struct trace_seq *s, struct tep_record *record,
- struct tep_event *event, const char *field)
-{
- unsigned long long isa;
- unsigned long long val;
- const char *reason;
-
- if (tep_get_field_val(s, event, field, record, &val, 1) < 0)
- return -1;
-
- if (tep_get_field_val(s, event, "isa", record, &isa, 0) < 0)
- isa = 1;
-
- reason = find_exit_reason(isa, val);
- if (reason)
- trace_seq_printf(s, "reason %s", reason);
- else
- trace_seq_printf(s, "reason UNKNOWN (%llu)", val);
- return 0;
-}
-
-static int kvm_exit_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event *event, void *context)
-{
- unsigned long long info1 = 0, info2 = 0;
-
- if (print_exit_reason(s, record, event, "exit_reason") < 0)
- return -1;
-
- tep_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1);
-
- if (tep_get_field_val(s, event, "info1", record, &info1, 0) >= 0
- && tep_get_field_val(s, event, "info2", record, &info2, 0) >= 0)
- trace_seq_printf(s, " info %llx %llx", info1, info2);
-
- return 0;
-}
-
-#define KVM_EMUL_INSN_F_CR0_PE (1 << 0)
-#define KVM_EMUL_INSN_F_EFL_VM (1 << 1)
-#define KVM_EMUL_INSN_F_CS_D (1 << 2)
-#define KVM_EMUL_INSN_F_CS_L (1 << 3)
-
-static int kvm_emulate_insn_handler(struct trace_seq *s,
- struct tep_record *record,
- struct tep_event *event, void *context)
-{
- unsigned long long rip, csbase, len, flags, failed;
- int llen;
- uint8_t *insn;
- const char *disasm;
-
- if (tep_get_field_val(s, event, "rip", record, &rip, 1) < 0)
- return -1;
-
- if (tep_get_field_val(s, event, "csbase", record, &csbase, 1) < 0)
- return -1;
-
- if (tep_get_field_val(s, event, "len", record, &len, 1) < 0)
- return -1;
-
- if (tep_get_field_val(s, event, "flags", record, &flags, 1) < 0)
- return -1;
-
- if (tep_get_field_val(s, event, "failed", record, &failed, 1) < 0)
- return -1;
-
- insn = tep_get_field_raw(s, event, "insn", record, &llen, 1);
- if (!insn)
- return -1;
-
- disasm = disassemble(insn, len, rip,
- flags & KVM_EMUL_INSN_F_CR0_PE,
- flags & KVM_EMUL_INSN_F_EFL_VM,
- flags & KVM_EMUL_INSN_F_CS_D,
- flags & KVM_EMUL_INSN_F_CS_L);
-
- trace_seq_printf(s, "%llx:%llx: %s%s", csbase, rip, disasm,
- failed ? " FAIL" : "");
- return 0;
-}
-
-
-static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event *event, void *context)
-{
- if (print_exit_reason(s, record, event, "exit_code") < 0)
- return -1;
-
- tep_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1);
- tep_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1);
- tep_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1);
- tep_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1);
-
- return 0;
-}
-
-static int kvm_nested_vmexit_handler(struct trace_seq *s, struct tep_record *record,
- struct tep_event *event, void *context)
-{
- tep_print_num_field(s, "rip %llx ", event, "rip", record, 1);
-
- return kvm_nested_vmexit_inject_handler(s, record, event, context);
-}
-
-union kvm_mmu_page_role {
- unsigned word;
- struct {
- unsigned level:4;
- unsigned cr4_pae:1;
- unsigned quadrant:2;
- unsigned direct:1;
- unsigned access:3;
- unsigned invalid:1;
- unsigned nxe:1;
- unsigned cr0_wp:1;
- unsigned smep_and_not_wp:1;
- unsigned smap_and_not_wp:1;
- unsigned pad_for_nice_hex_output:8;
- unsigned smm:8;
- };
-};
-
-static int kvm_mmu_print_role(struct trace_seq *s, struct tep_record *record,
- struct tep_event *event, void *context)
-{
- unsigned long long val;
- static const char *access_str[] = {
- "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"
- };
- union kvm_mmu_page_role role;
-
- if (tep_get_field_val(s, event, "role", record, &val, 1) < 0)
- return -1;
-
- role.word = (int)val;
-
- /*
- * We can only use the structure if file is of the same
- * endianness.
- */
- if (tep_is_file_bigendian(event->tep) ==
- tep_is_local_bigendian(event->tep)) {
-
- trace_seq_printf(s, "%u q%u%s %s%s %spae %snxe %swp%s%s%s",
- role.level,
- role.quadrant,
- role.direct ? " direct" : "",
- access_str[role.access],
- role.invalid ? " invalid" : "",
- role.cr4_pae ? "" : "!",
- role.nxe ? "" : "!",
- role.cr0_wp ? "" : "!",
- role.smep_and_not_wp ? " smep" : "",
- role.smap_and_not_wp ? " smap" : "",
- role.smm ? " smm" : "");
- } else
- trace_seq_printf(s, "WORD: %08x", role.word);
-
- tep_print_num_field(s, " root %u ", event,
- "root_count", record, 1);
-
- if (tep_get_field_val(s, event, "unsync", record, &val, 1) < 0)
- return -1;
-
- trace_seq_printf(s, "%s%c", val ? "unsync" : "sync", 0);
- return 0;
-}
-
-static int kvm_mmu_get_page_handler(struct trace_seq *s,
- struct tep_record *record,
- struct tep_event *event, void *context)
-{
- unsigned long long val;
-
- if (tep_get_field_val(s, event, "created", record, &val, 1) < 0)
- return -1;
-
- trace_seq_printf(s, "%s ", val ? "new" : "existing");
-
- if (tep_get_field_val(s, event, "gfn", record, &val, 1) < 0)
- return -1;
-
- trace_seq_printf(s, "sp gfn %llx ", val);
- return kvm_mmu_print_role(s, record, event, context);
-}
-
-#define PT_WRITABLE_SHIFT 1
-#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
-
-static unsigned long long
-process_is_writable_pte(struct trace_seq *s, unsigned long long *args)
-{
- unsigned long pte = args[0];
- return pte & PT_WRITABLE_MASK;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- init_disassembler();
-
- tep_register_event_handler(tep, -1, "kvm", "kvm_exit",
- kvm_exit_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
- kvm_emulate_insn_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
- kvm_nested_vmexit_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
- kvm_nested_vmexit_inject_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
- kvm_mmu_get_page_handler, NULL);
-
- tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
- kvm_mmu_print_role, NULL);
-
- tep_register_event_handler(tep, -1,
- "kvmmmu", "kvm_mmu_unsync_page",
- kvm_mmu_print_role, NULL);
-
- tep_register_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
- kvm_mmu_print_role, NULL);
-
- tep_register_event_handler(tep, -1, "kvmmmu",
- "kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
- NULL);
-
- tep_register_print_function(tep,
- process_is_writable_pte,
- TEP_FUNC_ARG_INT,
- "is_writable_pte",
- TEP_FUNC_ARG_LONG,
- TEP_FUNC_ARG_VOID);
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- tep_unregister_event_handler(tep, -1, "kvm", "kvm_exit",
- kvm_exit_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kvm", "kvm_emulate_insn",
- kvm_emulate_insn_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit",
- kvm_nested_vmexit_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kvm", "kvm_nested_vmexit_inject",
- kvm_nested_vmexit_inject_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_get_page",
- kvm_mmu_get_page_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_sync_page",
- kvm_mmu_print_role, NULL);
-
- tep_unregister_event_handler(tep, -1,
- "kvmmmu", "kvm_mmu_unsync_page",
- kvm_mmu_print_role, NULL);
-
- tep_unregister_event_handler(tep, -1, "kvmmmu", "kvm_mmu_zap_page",
- kvm_mmu_print_role, NULL);
-
- tep_unregister_event_handler(tep, -1, "kvmmmu",
- "kvm_mmu_prepare_zap_page", kvm_mmu_print_role,
- NULL);
-
- tep_unregister_print_function(tep, process_is_writable_pte,
- "is_writable_pte");
-}
diff --git a/tools/lib/traceevent/plugins/plugin_mac80211.c b/tools/lib/traceevent/plugins/plugin_mac80211.c
deleted file mode 100644
index 884303c26b5c..000000000000
--- a/tools/lib/traceevent/plugins/plugin_mac80211.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (C) 2009 Johannes Berg <johannes@sipsolutions.net>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "event-parse.h"
-#include "trace-seq.h"
-
-#define INDENT 65
-
-static void print_string(struct trace_seq *s, struct tep_event *event,
- const char *name, const void *data)
-{
- struct tep_format_field *f = tep_find_field(event, name);
- int offset;
- int length;
-
- if (!f) {
- trace_seq_printf(s, "NOTFOUND:%s", name);
- return;
- }
-
- offset = f->offset;
- length = f->size;
-
- if (!strncmp(f->type, "__data_loc", 10)) {
- unsigned long long v;
- if (tep_read_number_field(f, data, &v)) {
- trace_seq_printf(s, "invalid_data_loc");
- return;
- }
- offset = v & 0xffff;
- length = v >> 16;
- }
-
- trace_seq_printf(s, "%.*s", length, (char *)data + offset);
-}
-
-#define SF(fn) tep_print_num_field(s, fn ":%d", event, fn, record, 0)
-#define SFX(fn) tep_print_num_field(s, fn ":%#x", event, fn, record, 0)
-#define SP() trace_seq_putc(s, ' ')
-
-static int drv_bss_info_changed(struct trace_seq *s,
- struct tep_record *record,
- struct tep_event *event, void *context)
-{
- void *data = record->data;
-
- print_string(s, event, "wiphy_name", data);
- trace_seq_printf(s, " vif:");
- print_string(s, event, "vif_name", data);
- tep_print_num_field(s, "(%d)", event, "vif_type", record, 1);
-
- trace_seq_printf(s, "\n%*s", INDENT, "");
- SF("assoc"); SP();
- SF("aid"); SP();
- SF("cts"); SP();
- SF("shortpre"); SP();
- SF("shortslot"); SP();
- SF("dtimper"); SP();
- trace_seq_printf(s, "\n%*s", INDENT, "");
- SF("bcnint"); SP();
- SFX("assoc_cap"); SP();
- SFX("basic_rates"); SP();
- SF("enable_beacon");
- trace_seq_printf(s, "\n%*s", INDENT, "");
- SF("ht_operation_mode");
-
- return 0;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- tep_register_event_handler(tep, -1, "mac80211",
- "drv_bss_info_changed",
- drv_bss_info_changed, NULL);
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- tep_unregister_event_handler(tep, -1, "mac80211",
- "drv_bss_info_changed",
- drv_bss_info_changed, NULL);
-}
diff --git a/tools/lib/traceevent/plugins/plugin_sched_switch.c b/tools/lib/traceevent/plugins/plugin_sched_switch.c
deleted file mode 100644
index 957389a0ff7a..000000000000
--- a/tools/lib/traceevent/plugins/plugin_sched_switch.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation;
- * version 2.1 of the License (not later!)
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this program; if not, see <http://www.gnu.org/licenses>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "event-parse.h"
-#include "trace-seq.h"
-
-static void write_state(struct trace_seq *s, int val)
-{
- const char states[] = "SDTtZXxW";
- int found = 0;
- int i;
-
- for (i = 0; i < (sizeof(states) - 1); i++) {
- if (!(val & (1 << i)))
- continue;
-
- if (found)
- trace_seq_putc(s, '|');
-
- found = 1;
- trace_seq_putc(s, states[i]);
- }
-
- if (!found)
- trace_seq_putc(s, 'R');
-}
-
-static void write_and_save_comm(struct tep_format_field *field,
- struct tep_record *record,
- struct trace_seq *s, int pid)
-{
- const char *comm;
- int len;
-
- comm = (char *)(record->data + field->offset);
- len = s->len;
- trace_seq_printf(s, "%.*s",
- field->size, comm);
-
- /* make sure the comm has a \0 at the end. */
- trace_seq_terminate(s);
- comm = &s->buffer[len];
-
- /* Help out the comm to ids. This will handle dups */
- tep_register_comm(field->event->tep, comm, pid);
-}
-
-static int sched_wakeup_handler(struct trace_seq *s,
- struct tep_record *record,
- struct tep_event *event, void *context)
-{
- struct tep_format_field *field;
- unsigned long long val;
-
- if (tep_get_field_val(s, event, "pid", record, &val, 1))
- return trace_seq_putc(s, '!');
-
- field = tep_find_any_field(event, "comm");
- if (field) {
- write_and_save_comm(field, record, s, val);
- trace_seq_putc(s, ':');
- }
- trace_seq_printf(s, "%lld", val);
-
- if (tep_get_field_val(s, event, "prio", record, &val, 0) == 0)
- trace_seq_printf(s, " [%lld]", val);
-
- if (tep_get_field_val(s, event, "success", record, &val, 1) == 0)
- trace_seq_printf(s, " success=%lld", val);
-
- if (tep_get_field_val(s, event, "target_cpu", record, &val, 0) == 0)
- trace_seq_printf(s, " CPU:%03llu", val);
-
- return 0;
-}
-
-static int sched_switch_handler(struct trace_seq *s,
- struct tep_record *record,
- struct tep_event *event, void *context)
-{
- struct tep_format_field *field;
- unsigned long long val;
-
- if (tep_get_field_val(s, event, "prev_pid", record, &val, 1))
- return trace_seq_putc(s, '!');
-
- field = tep_find_any_field(event, "prev_comm");
- if (field) {
- write_and_save_comm(field, record, s, val);
- trace_seq_putc(s, ':');
- }
- trace_seq_printf(s, "%lld ", val);
-
- if (tep_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
- trace_seq_printf(s, "[%d] ", (int) val);
-
- if (tep_get_field_val(s, event, "prev_state", record, &val, 0) == 0)
- write_state(s, val);
-
- trace_seq_puts(s, " ==> ");
-
- if (tep_get_field_val(s, event, "next_pid", record, &val, 1))
- return trace_seq_putc(s, '!');
-
- field = tep_find_any_field(event, "next_comm");
- if (field) {
- write_and_save_comm(field, record, s, val);
- trace_seq_putc(s, ':');
- }
- trace_seq_printf(s, "%lld", val);
-
- if (tep_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
- trace_seq_printf(s, " [%d]", (int) val);
-
- return 0;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- tep_register_event_handler(tep, -1, "sched", "sched_switch",
- sched_switch_handler, NULL);
-
- tep_register_event_handler(tep, -1, "sched", "sched_wakeup",
- sched_wakeup_handler, NULL);
-
- tep_register_event_handler(tep, -1, "sched", "sched_wakeup_new",
- sched_wakeup_handler, NULL);
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- tep_unregister_event_handler(tep, -1, "sched", "sched_switch",
- sched_switch_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup",
- sched_wakeup_handler, NULL);
-
- tep_unregister_event_handler(tep, -1, "sched", "sched_wakeup_new",
- sched_wakeup_handler, NULL);
-}
diff --git a/tools/lib/traceevent/plugins/plugin_scsi.c b/tools/lib/traceevent/plugins/plugin_scsi.c
deleted file mode 100644
index 5d0387a4b65a..000000000000
--- a/tools/lib/traceevent/plugins/plugin_scsi.c
+++ /dev/null
@@ -1,434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include <string.h>
-#include <inttypes.h>
-#include "event-parse.h"
-#include "trace-seq.h"
-
-typedef unsigned long sector_t;
-typedef uint64_t u64;
-typedef unsigned int u32;
-
-/*
- * SCSI opcodes
- */
-#define TEST_UNIT_READY 0x00
-#define REZERO_UNIT 0x01
-#define REQUEST_SENSE 0x03
-#define FORMAT_UNIT 0x04
-#define READ_BLOCK_LIMITS 0x05
-#define REASSIGN_BLOCKS 0x07
-#define INITIALIZE_ELEMENT_STATUS 0x07
-#define READ_6 0x08
-#define WRITE_6 0x0a
-#define SEEK_6 0x0b
-#define READ_REVERSE 0x0f
-#define WRITE_FILEMARKS 0x10
-#define SPACE 0x11
-#define INQUIRY 0x12
-#define RECOVER_BUFFERED_DATA 0x14
-#define MODE_SELECT 0x15
-#define RESERVE 0x16
-#define RELEASE 0x17
-#define COPY 0x18
-#define ERASE 0x19
-#define MODE_SENSE 0x1a
-#define START_STOP 0x1b
-#define RECEIVE_DIAGNOSTIC 0x1c
-#define SEND_DIAGNOSTIC 0x1d
-#define ALLOW_MEDIUM_REMOVAL 0x1e
-
-#define READ_FORMAT_CAPACITIES 0x23
-#define SET_WINDOW 0x24
-#define READ_CAPACITY 0x25
-#define READ_10 0x28
-#define WRITE_10 0x2a
-#define SEEK_10 0x2b
-#define POSITION_TO_ELEMENT 0x2b
-#define WRITE_VERIFY 0x2e
-#define VERIFY 0x2f
-#define SEARCH_HIGH 0x30
-#define SEARCH_EQUAL 0x31
-#define SEARCH_LOW 0x32
-#define SET_LIMITS 0x33
-#define PRE_FETCH 0x34
-#define READ_POSITION 0x34
-#define SYNCHRONIZE_CACHE 0x35
-#define LOCK_UNLOCK_CACHE 0x36
-#define READ_DEFECT_DATA 0x37
-#define MEDIUM_SCAN 0x38
-#define COMPARE 0x39
-#define COPY_VERIFY 0x3a
-#define WRITE_BUFFER 0x3b
-#define READ_BUFFER 0x3c
-#define UPDATE_BLOCK 0x3d
-#define READ_LONG 0x3e
-#define WRITE_LONG 0x3f
-#define CHANGE_DEFINITION 0x40
-#define WRITE_SAME 0x41
-#define UNMAP 0x42
-#define READ_TOC 0x43
-#define READ_HEADER 0x44
-#define GET_EVENT_STATUS_NOTIFICATION 0x4a
-#define LOG_SELECT 0x4c
-#define LOG_SENSE 0x4d
-#define XDWRITEREAD_10 0x53
-#define MODE_SELECT_10 0x55
-#define RESERVE_10 0x56
-#define RELEASE_10 0x57
-#define MODE_SENSE_10 0x5a
-#define PERSISTENT_RESERVE_IN 0x5e
-#define PERSISTENT_RESERVE_OUT 0x5f
-#define VARIABLE_LENGTH_CMD 0x7f
-#define REPORT_LUNS 0xa0
-#define SECURITY_PROTOCOL_IN 0xa2
-#define MAINTENANCE_IN 0xa3
-#define MAINTENANCE_OUT 0xa4
-#define MOVE_MEDIUM 0xa5
-#define EXCHANGE_MEDIUM 0xa6
-#define READ_12 0xa8
-#define SERVICE_ACTION_OUT_12 0xa9
-#define WRITE_12 0xaa
-#define SERVICE_ACTION_IN_12 0xab
-#define WRITE_VERIFY_12 0xae
-#define VERIFY_12 0xaf
-#define SEARCH_HIGH_12 0xb0
-#define SEARCH_EQUAL_12 0xb1
-#define SEARCH_LOW_12 0xb2
-#define SECURITY_PROTOCOL_OUT 0xb5
-#define READ_ELEMENT_STATUS 0xb8
-#define SEND_VOLUME_TAG 0xb6
-#define WRITE_LONG_2 0xea
-#define EXTENDED_COPY 0x83
-#define RECEIVE_COPY_RESULTS 0x84
-#define ACCESS_CONTROL_IN 0x86
-#define ACCESS_CONTROL_OUT 0x87
-#define READ_16 0x88
-#define WRITE_16 0x8a
-#define READ_ATTRIBUTE 0x8c
-#define WRITE_ATTRIBUTE 0x8d
-#define VERIFY_16 0x8f
-#define SYNCHRONIZE_CACHE_16 0x91
-#define WRITE_SAME_16 0x93
-#define SERVICE_ACTION_BIDIRECTIONAL 0x9d
-#define SERVICE_ACTION_IN_16 0x9e
-#define SERVICE_ACTION_OUT_16 0x9f
-/* values for service action in */
-#define SAI_READ_CAPACITY_16 0x10
-#define SAI_GET_LBA_STATUS 0x12
-/* values for VARIABLE_LENGTH_CMD service action codes
- * see spc4r17 Section D.3.5, table D.7 and D.8 */
-#define VLC_SA_RECEIVE_CREDENTIAL 0x1800
-/* values for maintenance in */
-#define MI_REPORT_IDENTIFYING_INFORMATION 0x05
-#define MI_REPORT_TARGET_PGS 0x0a
-#define MI_REPORT_ALIASES 0x0b
-#define MI_REPORT_SUPPORTED_OPERATION_CODES 0x0c
-#define MI_REPORT_SUPPORTED_TASK_MANAGEMENT_FUNCTIONS 0x0d
-#define MI_REPORT_PRIORITY 0x0e
-#define MI_REPORT_TIMESTAMP 0x0f
-#define MI_MANAGEMENT_PROTOCOL_IN 0x10
-/* value for MI_REPORT_TARGET_PGS ext header */
-#define MI_EXT_HDR_PARAM_FMT 0x20
-/* values for maintenance out */
-#define MO_SET_IDENTIFYING_INFORMATION 0x06
-#define MO_SET_TARGET_PGS 0x0a
-#define MO_CHANGE_ALIASES 0x0b
-#define MO_SET_PRIORITY 0x0e
-#define MO_SET_TIMESTAMP 0x0f
-#define MO_MANAGEMENT_PROTOCOL_OUT 0x10
-/* values for variable length command */
-#define XDREAD_32 0x03
-#define XDWRITE_32 0x04
-#define XPWRITE_32 0x06
-#define XDWRITEREAD_32 0x07
-#define READ_32 0x09
-#define VERIFY_32 0x0a
-#define WRITE_32 0x0b
-#define WRITE_SAME_32 0x0d
-
-#define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f)
-#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9])
-
-static const char *
-scsi_trace_misc(struct trace_seq *, unsigned char *, int);
-
-static const char *
-scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
-{
- const char *ret = p->buffer + p->len;
- sector_t lba = 0, txlen = 0;
-
- lba |= ((cdb[1] & 0x1F) << 16);
- lba |= (cdb[2] << 8);
- lba |= cdb[3];
- txlen = cdb[4];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu",
- (unsigned long long)lba, (unsigned long long)txlen);
- trace_seq_putc(p, 0);
- return ret;
-}
-
-static const char *
-scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
-{
- const char *ret = p->buffer + p->len;
- sector_t lba = 0, txlen = 0;
-
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[7] << 8);
- txlen |= cdb[8];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
- cdb[1] >> 5);
-
- if (cdb[0] == WRITE_SAME)
- trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
-
- trace_seq_putc(p, 0);
- return ret;
-}
-
-static const char *
-scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
-{
- const char *ret = p->buffer + p->len;
- sector_t lba = 0, txlen = 0;
-
- lba |= (cdb[2] << 24);
- lba |= (cdb[3] << 16);
- lba |= (cdb[4] << 8);
- lba |= cdb[5];
- txlen |= (cdb[6] << 24);
- txlen |= (cdb[7] << 16);
- txlen |= (cdb[8] << 8);
- txlen |= cdb[9];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
- cdb[1] >> 5);
- trace_seq_putc(p, 0);
- return ret;
-}
-
-static const char *
-scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
-{
- const char *ret = p->buffer + p->len;
- sector_t lba = 0, txlen = 0;
-
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- txlen |= (cdb[10] << 24);
- txlen |= (cdb[11] << 16);
- txlen |= (cdb[12] << 8);
- txlen |= cdb[13];
-
- trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u",
- (unsigned long long)lba, (unsigned long long)txlen,
- cdb[1] >> 5);
-
- if (cdb[0] == WRITE_SAME_16)
- trace_seq_printf(p, " unmap=%u", cdb[1] >> 3 & 1);
-
- trace_seq_putc(p, 0);
- return ret;
-}
-
-static const char *
-scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
-{
- const char *ret = p->buffer + p->len, *cmd;
- sector_t lba = 0, txlen = 0;
- u32 ei_lbrt = 0;
-
- switch (SERVICE_ACTION32(cdb)) {
- case READ_32:
- cmd = "READ";
- break;
- case VERIFY_32:
- cmd = "VERIFY";
- break;
- case WRITE_32:
- cmd = "WRITE";
- break;
- case WRITE_SAME_32:
- cmd = "WRITE_SAME";
- break;
- default:
- trace_seq_printf(p, "UNKNOWN");
- goto out;
- }
-
- lba |= ((u64)cdb[12] << 56);
- lba |= ((u64)cdb[13] << 48);
- lba |= ((u64)cdb[14] << 40);
- lba |= ((u64)cdb[15] << 32);
- lba |= (cdb[16] << 24);
- lba |= (cdb[17] << 16);
- lba |= (cdb[18] << 8);
- lba |= cdb[19];
- ei_lbrt |= (cdb[20] << 24);
- ei_lbrt |= (cdb[21] << 16);
- ei_lbrt |= (cdb[22] << 8);
- ei_lbrt |= cdb[23];
- txlen |= (cdb[28] << 24);
- txlen |= (cdb[29] << 16);
- txlen |= (cdb[30] << 8);
- txlen |= cdb[31];
-
- trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u",
- cmd, (unsigned long long)lba,
- (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt);
-
- if (SERVICE_ACTION32(cdb) == WRITE_SAME_32)
- trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1);
-
-out:
- trace_seq_putc(p, 0);
- return ret;
-}
-
-static const char *
-scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
-{
- const char *ret = p->buffer + p->len;
- unsigned int regions = cdb[7] << 8 | cdb[8];
-
- trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
- trace_seq_putc(p, 0);
- return ret;
-}
-
-static const char *
-scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
-{
- const char *ret = p->buffer + p->len, *cmd;
- sector_t lba = 0;
- u32 alloc_len = 0;
-
- switch (SERVICE_ACTION16(cdb)) {
- case SAI_READ_CAPACITY_16:
- cmd = "READ_CAPACITY_16";
- break;
- case SAI_GET_LBA_STATUS:
- cmd = "GET_LBA_STATUS";
- break;
- default:
- trace_seq_printf(p, "UNKNOWN");
- goto out;
- }
-
- lba |= ((u64)cdb[2] << 56);
- lba |= ((u64)cdb[3] << 48);
- lba |= ((u64)cdb[4] << 40);
- lba |= ((u64)cdb[5] << 32);
- lba |= (cdb[6] << 24);
- lba |= (cdb[7] << 16);
- lba |= (cdb[8] << 8);
- lba |= cdb[9];
- alloc_len |= (cdb[10] << 24);
- alloc_len |= (cdb[11] << 16);
- alloc_len |= (cdb[12] << 8);
- alloc_len |= cdb[13];
-
- trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd,
- (unsigned long long)lba, alloc_len);
-
-out:
- trace_seq_putc(p, 0);
- return ret;
-}
-
-static const char *
-scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
-{
- switch (SERVICE_ACTION32(cdb)) {
- case READ_32:
- case VERIFY_32:
- case WRITE_32:
- case WRITE_SAME_32:
- return scsi_trace_rw32(p, cdb, len);
- default:
- return scsi_trace_misc(p, cdb, len);
- }
-}
-
-static const char *
-scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
-{
- const char *ret = p->buffer + p->len;
-
- trace_seq_printf(p, "-");
- trace_seq_putc(p, 0);
- return ret;
-}
-
-const char *
-scsi_trace_parse_cdb(struct trace_seq *p, unsigned char *cdb, int len)
-{
- switch (cdb[0]) {
- case READ_6:
- case WRITE_6:
- return scsi_trace_rw6(p, cdb, len);
- case READ_10:
- case VERIFY:
- case WRITE_10:
- case WRITE_SAME:
- return scsi_trace_rw10(p, cdb, len);
- case READ_12:
- case VERIFY_12:
- case WRITE_12:
- return scsi_trace_rw12(p, cdb, len);
- case READ_16:
- case VERIFY_16:
- case WRITE_16:
- case WRITE_SAME_16:
- return scsi_trace_rw16(p, cdb, len);
- case UNMAP:
- return scsi_trace_unmap(p, cdb, len);
- case SERVICE_ACTION_IN_16:
- return scsi_trace_service_action_in(p, cdb, len);
- case VARIABLE_LENGTH_CMD:
- return scsi_trace_varlen(p, cdb, len);
- default:
- return scsi_trace_misc(p, cdb, len);
- }
-}
-
-unsigned long long process_scsi_trace_parse_cdb(struct trace_seq *s,
- unsigned long long *args)
-{
- scsi_trace_parse_cdb(s, (unsigned char *) (unsigned long) args[1], args[2]);
- return 0;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- tep_register_print_function(tep,
- process_scsi_trace_parse_cdb,
- TEP_FUNC_ARG_STRING,
- "scsi_trace_parse_cdb",
- TEP_FUNC_ARG_PTR,
- TEP_FUNC_ARG_PTR,
- TEP_FUNC_ARG_INT,
- TEP_FUNC_ARG_VOID);
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- tep_unregister_print_function(tep, process_scsi_trace_parse_cdb,
- "scsi_trace_parse_cdb");
-}
diff --git a/tools/lib/traceevent/plugins/plugin_xen.c b/tools/lib/traceevent/plugins/plugin_xen.c
deleted file mode 100644
index 993b208d0323..000000000000
--- a/tools/lib/traceevent/plugins/plugin_xen.c
+++ /dev/null
@@ -1,138 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include "event-parse.h"
-#include "trace-seq.h"
-
-#define __HYPERVISOR_set_trap_table 0
-#define __HYPERVISOR_mmu_update 1
-#define __HYPERVISOR_set_gdt 2
-#define __HYPERVISOR_stack_switch 3
-#define __HYPERVISOR_set_callbacks 4
-#define __HYPERVISOR_fpu_taskswitch 5
-#define __HYPERVISOR_sched_op_compat 6
-#define __HYPERVISOR_dom0_op 7
-#define __HYPERVISOR_set_debugreg 8
-#define __HYPERVISOR_get_debugreg 9
-#define __HYPERVISOR_update_descriptor 10
-#define __HYPERVISOR_memory_op 12
-#define __HYPERVISOR_multicall 13
-#define __HYPERVISOR_update_va_mapping 14
-#define __HYPERVISOR_set_timer_op 15
-#define __HYPERVISOR_event_channel_op_compat 16
-#define __HYPERVISOR_xen_version 17
-#define __HYPERVISOR_console_io 18
-#define __HYPERVISOR_physdev_op_compat 19
-#define __HYPERVISOR_grant_table_op 20
-#define __HYPERVISOR_vm_assist 21
-#define __HYPERVISOR_update_va_mapping_otherdomain 22
-#define __HYPERVISOR_iret 23 /* x86 only */
-#define __HYPERVISOR_vcpu_op 24
-#define __HYPERVISOR_set_segment_base 25 /* x86/64 only */
-#define __HYPERVISOR_mmuext_op 26
-#define __HYPERVISOR_acm_op 27
-#define __HYPERVISOR_nmi_op 28
-#define __HYPERVISOR_sched_op 29
-#define __HYPERVISOR_callback_op 30
-#define __HYPERVISOR_xenoprof_op 31
-#define __HYPERVISOR_event_channel_op 32
-#define __HYPERVISOR_physdev_op 33
-#define __HYPERVISOR_hvm_op 34
-#define __HYPERVISOR_tmem_op 38
-
-/* Architecture-specific hypercall definitions. */
-#define __HYPERVISOR_arch_0 48
-#define __HYPERVISOR_arch_1 49
-#define __HYPERVISOR_arch_2 50
-#define __HYPERVISOR_arch_3 51
-#define __HYPERVISOR_arch_4 52
-#define __HYPERVISOR_arch_5 53
-#define __HYPERVISOR_arch_6 54
-#define __HYPERVISOR_arch_7 55
-
-#define N(x) [__HYPERVISOR_##x] = "("#x")"
-static const char *xen_hypercall_names[] = {
- N(set_trap_table),
- N(mmu_update),
- N(set_gdt),
- N(stack_switch),
- N(set_callbacks),
- N(fpu_taskswitch),
- N(sched_op_compat),
- N(dom0_op),
- N(set_debugreg),
- N(get_debugreg),
- N(update_descriptor),
- N(memory_op),
- N(multicall),
- N(update_va_mapping),
- N(set_timer_op),
- N(event_channel_op_compat),
- N(xen_version),
- N(console_io),
- N(physdev_op_compat),
- N(grant_table_op),
- N(vm_assist),
- N(update_va_mapping_otherdomain),
- N(iret),
- N(vcpu_op),
- N(set_segment_base),
- N(mmuext_op),
- N(acm_op),
- N(nmi_op),
- N(sched_op),
- N(callback_op),
- N(xenoprof_op),
- N(event_channel_op),
- N(physdev_op),
- N(hvm_op),
-
-/* Architecture-specific hypercall definitions. */
- N(arch_0),
- N(arch_1),
- N(arch_2),
- N(arch_3),
- N(arch_4),
- N(arch_5),
- N(arch_6),
- N(arch_7),
-};
-#undef N
-
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
-
-static const char *xen_hypercall_name(unsigned op)
-{
- if (op < ARRAY_SIZE(xen_hypercall_names) &&
- xen_hypercall_names[op] != NULL)
- return xen_hypercall_names[op];
-
- return "";
-}
-
-unsigned long long process_xen_hypercall_name(struct trace_seq *s,
- unsigned long long *args)
-{
- unsigned int op = args[0];
-
- trace_seq_printf(s, "%s", xen_hypercall_name(op));
- return 0;
-}
-
-int TEP_PLUGIN_LOADER(struct tep_handle *tep)
-{
- tep_register_print_function(tep,
- process_xen_hypercall_name,
- TEP_FUNC_ARG_STRING,
- "xen_hypercall_name",
- TEP_FUNC_ARG_INT,
- TEP_FUNC_ARG_VOID);
- return 0;
-}
-
-void TEP_PLUGIN_UNLOADER(struct tep_handle *tep)
-{
- tep_unregister_print_function(tep, process_xen_hypercall_name,
- "xen_hypercall_name");
-}
diff --git a/tools/lib/traceevent/tep_strerror.c b/tools/lib/traceevent/tep_strerror.c
deleted file mode 100644
index 4ac26445b2f6..000000000000
--- a/tools/lib/traceevent/tep_strerror.c
+++ /dev/null
@@ -1,53 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-#undef _GNU_SOURCE
-#include <string.h>
-#include <stdio.h>
-
-#include "event-parse.h"
-
-#undef _PE
-#define _PE(code, str) str
-static const char * const tep_error_str[] = {
- TEP_ERRORS
-};
-#undef _PE
-
-/*
- * The tools so far have been using the strerror_r() GNU variant, that returns
- * a string, be it the buffer passed or something else.
- *
- * But that, besides being tricky in cases where we expect that the function
- * using strerror_r() returns the error formatted in a provided buffer (we have
- * to check if it returned something else and copy that instead), breaks the
- * build on systems not using glibc, like Alpine Linux, where musl libc is
- * used.
- *
- * So, introduce yet another wrapper, str_error_r(), that has the GNU
- * interface, but uses the portable XSI variant of strerror_r(), so that users
- * rest asured that the provided buffer is used and it is what is returned.
- */
-int tep_strerror(struct tep_handle *tep __maybe_unused,
- enum tep_errno errnum, char *buf, size_t buflen)
-{
- const char *msg;
- int idx;
-
- if (!buflen)
- return 0;
-
- if (errnum >= 0) {
- int err = strerror_r(errnum, buf, buflen);
- buf[buflen - 1] = 0;
- return err;
- }
-
- if (errnum <= __TEP_ERRNO__START ||
- errnum >= __TEP_ERRNO__END)
- return -1;
-
- idx = errnum - __TEP_ERRNO__START - 1;
- msg = tep_error_str[idx];
- snprintf(buf, buflen, "%s", msg);
-
- return 0;
-}
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
deleted file mode 100644
index 8d5ecd2bf877..000000000000
--- a/tools/lib/traceevent/trace-seq.c
+++ /dev/null
@@ -1,249 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * Copyright (C) 2009 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- */
-#include "trace-seq.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <stdarg.h>
-
-#include <asm/bug.h>
-#include "event-parse.h"
-#include "event-utils.h"
-
-/*
- * The TRACE_SEQ_POISON is to catch the use of using
- * a trace_seq structure after it was destroyed.
- */
-#define TRACE_SEQ_POISON ((void *)0xdeadbeef)
-#define TRACE_SEQ_CHECK(s) \
-do { \
- if (WARN_ONCE((s)->buffer == TRACE_SEQ_POISON, \
- "Usage of trace_seq after it was destroyed")) \
- (s)->state = TRACE_SEQ__BUFFER_POISONED; \
-} while (0)
-
-#define TRACE_SEQ_CHECK_RET_N(s, n) \
-do { \
- TRACE_SEQ_CHECK(s); \
- if ((s)->state != TRACE_SEQ__GOOD) \
- return n; \
-} while (0)
-
-#define TRACE_SEQ_CHECK_RET(s) TRACE_SEQ_CHECK_RET_N(s, )
-#define TRACE_SEQ_CHECK_RET0(s) TRACE_SEQ_CHECK_RET_N(s, 0)
-
-/**
- * trace_seq_init - initialize the trace_seq structure
- * @s: a pointer to the trace_seq structure to initialize
- */
-void trace_seq_init(struct trace_seq *s)
-{
- s->len = 0;
- s->readpos = 0;
- s->buffer_size = TRACE_SEQ_BUF_SIZE;
- s->buffer = malloc(s->buffer_size);
- if (s->buffer != NULL)
- s->state = TRACE_SEQ__GOOD;
- else
- s->state = TRACE_SEQ__MEM_ALLOC_FAILED;
-}
-
-/**
- * trace_seq_reset - re-initialize the trace_seq structure
- * @s: a pointer to the trace_seq structure to reset
- */
-void trace_seq_reset(struct trace_seq *s)
-{
- if (!s)
- return;
- TRACE_SEQ_CHECK(s);
- s->len = 0;
- s->readpos = 0;
-}
-
-/**
- * trace_seq_destroy - free up memory of a trace_seq
- * @s: a pointer to the trace_seq to free the buffer
- *
- * Only frees the buffer, not the trace_seq struct itself.
- */
-void trace_seq_destroy(struct trace_seq *s)
-{
- if (!s)
- return;
- TRACE_SEQ_CHECK_RET(s);
- free(s->buffer);
- s->buffer = TRACE_SEQ_POISON;
-}
-
-static void expand_buffer(struct trace_seq *s)
-{
- char *buf;
-
- buf = realloc(s->buffer, s->buffer_size + TRACE_SEQ_BUF_SIZE);
- if (WARN_ONCE(!buf, "Can't allocate trace_seq buffer memory")) {
- s->state = TRACE_SEQ__MEM_ALLOC_FAILED;
- return;
- }
-
- s->buffer = buf;
- s->buffer_size += TRACE_SEQ_BUF_SIZE;
-}
-
-/**
- * trace_seq_printf - sequence printing of trace information
- * @s: trace sequence descriptor
- * @fmt: printf format string
- *
- * It returns 0 if the trace oversizes the buffer's free
- * space, the number of characters printed, or a negative
- * value in case of an error.
- *
- * The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
- * trace_seq_printf is used to store strings into a special
- * buffer (@s). Then the output may be either used by
- * the sequencer or pulled into another buffer.
- */
-int
-trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
-{
- va_list ap;
- int len;
- int ret;
-
- try_again:
- TRACE_SEQ_CHECK_RET0(s);
-
- len = (s->buffer_size - 1) - s->len;
-
- va_start(ap, fmt);
- ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
- va_end(ap);
-
- if (ret >= len) {
- expand_buffer(s);
- goto try_again;
- }
-
- if (ret > 0)
- s->len += ret;
-
- return ret;
-}
-
-/**
- * trace_seq_vprintf - sequence printing of trace information
- * @s: trace sequence descriptor
- * @fmt: printf format string
- *
- * It returns 0 if the trace oversizes the buffer's free
- * space, the number of characters printed, or a negative
- * value in case of an error.
- * *
- * The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
- * trace_seq_printf is used to store strings into a special
- * buffer (@s). Then the output may be either used by
- * the sequencer or pulled into another buffer.
- */
-int
-trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
-{
- int len;
- int ret;
-
- try_again:
- TRACE_SEQ_CHECK_RET0(s);
-
- len = (s->buffer_size - 1) - s->len;
-
- ret = vsnprintf(s->buffer + s->len, len, fmt, args);
-
- if (ret >= len) {
- expand_buffer(s);
- goto try_again;
- }
-
- if (ret > 0)
- s->len += ret;
-
- return ret;
-}
-
-/**
- * trace_seq_puts - trace sequence printing of simple string
- * @s: trace sequence descriptor
- * @str: simple string to record
- *
- * The tracer may use either the sequence operations or its own
- * copy to user routines. This function records a simple string
- * into a special buffer (@s) for later retrieval by a sequencer
- * or other mechanism.
- */
-int trace_seq_puts(struct trace_seq *s, const char *str)
-{
- int len;
-
- TRACE_SEQ_CHECK_RET0(s);
-
- len = strlen(str);
-
- while (len > ((s->buffer_size - 1) - s->len))
- expand_buffer(s);
-
- TRACE_SEQ_CHECK_RET0(s);
-
- memcpy(s->buffer + s->len, str, len);
- s->len += len;
-
- return len;
-}
-
-int trace_seq_putc(struct trace_seq *s, unsigned char c)
-{
- TRACE_SEQ_CHECK_RET0(s);
-
- while (s->len >= (s->buffer_size - 1))
- expand_buffer(s);
-
- TRACE_SEQ_CHECK_RET0(s);
-
- s->buffer[s->len++] = c;
-
- return 1;
-}
-
-void trace_seq_terminate(struct trace_seq *s)
-{
- TRACE_SEQ_CHECK_RET(s);
-
- /* There's always one character left on the buffer */
- s->buffer[s->len] = 0;
-}
-
-int trace_seq_do_fprintf(struct trace_seq *s, FILE *fp)
-{
- TRACE_SEQ_CHECK(s);
-
- switch (s->state) {
- case TRACE_SEQ__GOOD:
- return fprintf(fp, "%.*s", s->len, s->buffer);
- case TRACE_SEQ__BUFFER_POISONED:
- fprintf(fp, "%s\n", "Usage of trace_seq after it was destroyed");
- break;
- case TRACE_SEQ__MEM_ALLOC_FAILED:
- fprintf(fp, "%s\n", "Can't allocate trace_seq buffer memory");
- break;
- }
- return -1;
-}
-
-int trace_seq_do_printf(struct trace_seq *s)
-{
- return trace_seq_do_fprintf(s, stdout);
-}
diff --git a/tools/lib/traceevent/trace-seq.h b/tools/lib/traceevent/trace-seq.h
deleted file mode 100644
index d68ec69f8d1a..000000000000
--- a/tools/lib/traceevent/trace-seq.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
- *
- */
-
-#ifndef _TRACE_SEQ_H
-#define _TRACE_SEQ_H
-
-#include <stdarg.h>
-#include <stdio.h>
-
-/* ----------------------- trace_seq ----------------------- */
-
-#ifndef TRACE_SEQ_BUF_SIZE
-#define TRACE_SEQ_BUF_SIZE 4096
-#endif
-
-enum trace_seq_fail {
- TRACE_SEQ__GOOD,
- TRACE_SEQ__BUFFER_POISONED,
- TRACE_SEQ__MEM_ALLOC_FAILED,
-};
-
-/*
- * Trace sequences are used to allow a function to call several other functions
- * to create a string of data to use (up to a max of PAGE_SIZE).
- */
-
-struct trace_seq {
- char *buffer;
- unsigned int buffer_size;
- unsigned int len;
- unsigned int readpos;
- enum trace_seq_fail state;
-};
-
-void trace_seq_init(struct trace_seq *s);
-void trace_seq_reset(struct trace_seq *s);
-void trace_seq_destroy(struct trace_seq *s);
-
-extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
- __attribute__ ((format (printf, 2, 3)));
-extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
- __attribute__ ((format (printf, 2, 0)));
-
-extern int trace_seq_puts(struct trace_seq *s, const char *str);
-extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
-
-extern void trace_seq_terminate(struct trace_seq *s);
-
-extern int trace_seq_do_fprintf(struct trace_seq *s, FILE *fp);
-extern int trace_seq_do_printf(struct trace_seq *s);
-
-#endif /* _TRACE_SEQ_H */
diff --git a/tools/memory-model/Documentation/README b/tools/memory-model/Documentation/README
new file mode 100644
index 000000000000..db90a26dbdf4
--- /dev/null
+++ b/tools/memory-model/Documentation/README
@@ -0,0 +1,76 @@
+It has been said that successful communication requires first identifying
+what your audience knows and then building a bridge from their current
+knowledge to what they need to know. Unfortunately, the expected
+Linux-kernel memory model (LKMM) audience might be anywhere from novice
+to expert both in kernel hacking and in understanding LKMM.
+
+This document therefore points out a number of places to start reading,
+depending on what you know and what you would like to learn. Please note
+that the documents later in this list assume that the reader understands
+the material provided by documents earlier in this list.
+
+o You are new to Linux-kernel concurrency: simple.txt
+
+o You have some background in Linux-kernel concurrency, and would
+ like an overview of the types of low-level concurrency primitives
+ that the Linux kernel provides: ordering.txt
+
+ Here, "low level" means atomic operations to single variables.
+
+o You are familiar with the Linux-kernel concurrency primitives
+ that you need, and just want to get started with LKMM litmus
+ tests: litmus-tests.txt
+
+o You are familiar with Linux-kernel concurrency, and would
+ like a detailed intuitive understanding of LKMM, including
+ situations involving more than two threads: recipes.txt
+
+o You would like a detailed understanding of what your compiler can
+ and cannot do to control dependencies: control-dependencies.txt
+
+o You are familiar with Linux-kernel concurrency and the use of
+ LKMM, and would like a quick reference: cheatsheet.txt
+
+o You are familiar with Linux-kernel concurrency and the use
+ of LKMM, and would like to learn about LKMM's requirements,
+ rationale, and implementation: explanation.txt
+
+o You are interested in the publications related to LKMM, including
+ hardware manuals, academic literature, standards-committee
+ working papers, and LWN articles: references.txt
+
+
+====================
+DESCRIPTION OF FILES
+====================
+
+README
+ This file.
+
+cheatsheet.txt
+ Quick-reference guide to the Linux-kernel memory model.
+
+control-dependencies.txt
+ Guide to preventing compiler optimizations from destroying
+ your control dependencies.
+
+explanation.txt
+ Detailed description of the memory model.
+
+litmus-tests.txt
+ The format, features, capabilities, and limitations of the litmus
+ tests that LKMM can evaluate.
+
+ordering.txt
+ Overview of the Linux kernel's low-level memory-ordering
+ primitives by category.
+
+recipes.txt
+ Common memory-ordering patterns.
+
+references.txt
+ Background information.
+
+simple.txt
+ Starting point for someone new to Linux-kernel concurrency.
+ And also a reminder of the simpler approaches to concurrency!
diff --git a/tools/memory-model/Documentation/access-marking.txt b/tools/memory-model/Documentation/access-marking.txt
new file mode 100644
index 000000000000..65778222183e
--- /dev/null
+++ b/tools/memory-model/Documentation/access-marking.txt
@@ -0,0 +1,598 @@
+MARKING SHARED-MEMORY ACCESSES
+==============================
+
+This document provides guidelines for marking intentionally concurrent
+normal accesses to shared memory, that is "normal" as in accesses that do
+not use read-modify-write atomic operations. It also describes how to
+document these accesses, both with comments and with special assertions
+processed by the Kernel Concurrency Sanitizer (KCSAN). This discussion
+builds on an earlier LWN article [1].
+
+
+ACCESS-MARKING OPTIONS
+======================
+
+The Linux kernel provides the following access-marking options:
+
+1. Plain C-language accesses (unmarked), for example, "a = b;"
+
+2. Data-race marking, for example, "data_race(a = b);"
+
+3. READ_ONCE(), for example, "a = READ_ONCE(b);"
+ The various forms of atomic_read() also fit in here.
+
+4. WRITE_ONCE(), for example, "WRITE_ONCE(a, b);"
+ The various forms of atomic_set() also fit in here.
+
+
+These may be used in combination, as shown in this admittedly improbable
+example:
+
+ WRITE_ONCE(a, b + data_race(c + d) + READ_ONCE(e));
+
+Neither plain C-language accesses nor data_race() (#1 and #2 above) place
+any sort of constraint on the compiler's choice of optimizations [2].
+In contrast, READ_ONCE() and WRITE_ONCE() (#3 and #4 above) restrict the
+compiler's use of code-motion and common-subexpression optimizations.
+Therefore, if a given access is involved in an intentional data race,
+using READ_ONCE() for loads and WRITE_ONCE() for stores is usually
+preferable to data_race(), which in turn is usually preferable to plain
+C-language accesses. It is permissible to combine #2 and #3, for example,
+data_race(READ_ONCE(a)), which will both restrict compiler optimizations
+and disable KCSAN diagnostics.
+
+KCSAN will complain about many types of data races involving plain
+C-language accesses, but marking all accesses involved in a given data
+race with one of data_race(), READ_ONCE(), or WRITE_ONCE(), will prevent
+KCSAN from complaining. Of course, lack of KCSAN complaints does not
+imply correct code. Therefore, please take a thoughtful approach
+when responding to KCSAN complaints. Churning the code base with
+ill-considered additions of data_race(), READ_ONCE(), and WRITE_ONCE()
+is unhelpful.
+
+In fact, the following sections describe situations where use of
+data_race() and even plain C-language accesses is preferable to
+READ_ONCE() and WRITE_ONCE().
+
+
+Use of the data_race() Macro
+----------------------------
+
+Here are some situations where data_race() should be used instead of
+READ_ONCE() and WRITE_ONCE():
+
+1. Data-racy loads from shared variables whose values are used only
+ for diagnostic purposes.
+
+2. Data-racy reads whose values are checked against marked reload.
+
+3. Reads whose values feed into error-tolerant heuristics.
+
+4. Writes setting values that feed into error-tolerant heuristics.
+
+
+Data-Racy Reads for Approximate Diagnostics
+
+Approximate diagnostics include lockdep reports, monitoring/statistics
+(including /proc and /sys output), WARN*()/BUG*() checks whose return
+values are ignored, and other situations where reads from shared variables
+are not an integral part of the core concurrency design.
+
+In fact, use of data_race() instead READ_ONCE() for these diagnostic
+reads can enable better checking of the remaining accesses implementing
+the core concurrency design. For example, suppose that the core design
+prevents any non-diagnostic reads from shared variable x from running
+concurrently with updates to x. Then using plain C-language writes
+to x allows KCSAN to detect reads from x from within regions of code
+that fail to exclude the updates. In this case, it is important to use
+data_race() for the diagnostic reads because otherwise KCSAN would give
+false-positive warnings about these diagnostic reads.
+
+If it is necessary to both restrict compiler optimizations and disable
+KCSAN diagnostics, use both data_race() and READ_ONCE(), for example,
+data_race(READ_ONCE(a)).
+
+In theory, plain C-language loads can also be used for this use case.
+However, in practice this will have the disadvantage of causing KCSAN
+to generate false positives because KCSAN will have no way of knowing
+that the resulting data race was intentional.
+
+
+Data-Racy Reads That Are Checked Against Marked Reload
+
+The values from some reads are not implicitly trusted. They are instead
+fed into some operation that checks the full value against a later marked
+load from memory, which means that the occasional arbitrarily bogus value
+is not a problem. For example, if a bogus value is fed into cmpxchg(),
+all that happens is that this cmpxchg() fails, which normally results
+in a retry. Unless the race condition that resulted in the bogus value
+recurs, this retry will with high probability succeed, so no harm done.
+
+However, please keep in mind that a data_race() load feeding into
+a cmpxchg_relaxed() might still be subject to load fusing on some
+architectures. Therefore, it is best to capture the return value from
+the failing cmpxchg() for the next iteration of the loop, an approach
+that provides the compiler much less scope for mischievous optimizations.
+Capturing the return value from cmpxchg() also saves a memory reference
+in many cases.
+
+In theory, plain C-language loads can also be used for this use case.
+However, in practice this will have the disadvantage of causing KCSAN
+to generate false positives because KCSAN will have no way of knowing
+that the resulting data race was intentional.
+
+
+Reads Feeding Into Error-Tolerant Heuristics
+
+Values from some reads feed into heuristics that can tolerate occasional
+errors. Such reads can use data_race(), thus allowing KCSAN to focus on
+the other accesses to the relevant shared variables. But please note
+that data_race() loads are subject to load fusing, which can result in
+consistent errors, which in turn are quite capable of breaking heuristics.
+Therefore use of data_race() should be limited to cases where some other
+code (such as a barrier() call) will force the occasional reload.
+
+Note that this use case requires that the heuristic be able to handle
+any possible error. In contrast, if the heuristics might be fatally
+confused by one or more of the possible erroneous values, use READ_ONCE()
+instead of data_race().
+
+In theory, plain C-language loads can also be used for this use case.
+However, in practice this will have the disadvantage of causing KCSAN
+to generate false positives because KCSAN will have no way of knowing
+that the resulting data race was intentional.
+
+
+Writes Setting Values Feeding Into Error-Tolerant Heuristics
+
+The values read into error-tolerant heuristics come from somewhere,
+for example, from sysfs. This means that some code in sysfs writes
+to this same variable, and these writes can also use data_race().
+After all, if the heuristic can tolerate the occasional bogus value
+due to compiler-mangled reads, it can also tolerate the occasional
+compiler-mangled write, at least assuming that the proper value is in
+place once the write completes.
+
+Plain C-language stores can also be used for this use case. However,
+in kernels built with CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n, this
+will have the disadvantage of causing KCSAN to generate false positives
+because KCSAN will have no way of knowing that the resulting data race
+was intentional.
+
+
+Use of Plain C-Language Accesses
+--------------------------------
+
+Here are some example situations where plain C-language accesses should
+used instead of READ_ONCE(), WRITE_ONCE(), and data_race():
+
+1. Accesses protected by mutual exclusion, including strict locking
+ and sequence locking.
+
+2. Initialization-time and cleanup-time accesses. This covers a
+ wide variety of situations, including the uniprocessor phase of
+ system boot, variables to be used by not-yet-spawned kthreads,
+ structures not yet published to reference-counted or RCU-protected
+ data structures, and the cleanup side of any of these situations.
+
+3. Per-CPU variables that are not accessed from other CPUs.
+
+4. Private per-task variables, including on-stack variables, some
+ fields in the task_struct structure, and task-private heap data.
+
+5. Any other loads for which there is not supposed to be a concurrent
+ store to that same variable.
+
+6. Any other stores for which there should be neither concurrent
+ loads nor concurrent stores to that same variable.
+
+ But note that KCSAN makes two explicit exceptions to this rule
+ by default, refraining from flagging plain C-language stores:
+
+ a. No matter what. You can override this default by building
+ with CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n.
+
+ b. When the store writes the value already contained in
+ that variable. You can override this default by building
+ with CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n.
+
+ c. When one of the stores is in an interrupt handler and
+ the other in the interrupted code. You can override this
+ default by building with CONFIG_KCSAN_INTERRUPT_WATCHER=y.
+
+Note that it is important to use plain C-language accesses in these cases,
+because doing otherwise prevents KCSAN from detecting violations of your
+code's synchronization rules.
+
+
+ACCESS-DOCUMENTATION OPTIONS
+============================
+
+It is important to comment marked accesses so that people reading your
+code, yourself included, are reminded of the synchronization design.
+However, it is even more important to comment plain C-language accesses
+that are intentionally involved in data races. Such comments are
+needed to remind people reading your code, again, yourself included,
+of how the compiler has been prevented from optimizing those accesses
+into concurrency bugs.
+
+It is also possible to tell KCSAN about your synchronization design.
+For example, ASSERT_EXCLUSIVE_ACCESS(foo) tells KCSAN that any
+concurrent access to variable foo by any other CPU is an error, even
+if that concurrent access is marked with READ_ONCE(). In addition,
+ASSERT_EXCLUSIVE_WRITER(foo) tells KCSAN that although it is OK for there
+to be concurrent reads from foo from other CPUs, it is an error for some
+other CPU to be concurrently writing to foo, even if that concurrent
+write is marked with data_race() or WRITE_ONCE().
+
+Note that although KCSAN will call out data races involving either
+ASSERT_EXCLUSIVE_ACCESS() or ASSERT_EXCLUSIVE_WRITER() on the one hand
+and data_race() writes on the other, KCSAN will not report the location
+of these data_race() writes.
+
+
+EXAMPLES
+========
+
+As noted earlier, the goal is to prevent the compiler from destroying
+your concurrent algorithm, to help the human reader, and to inform
+KCSAN of aspects of your concurrency design. This section looks at a
+few examples showing how this can be done.
+
+
+Lock Protection With Lockless Diagnostic Access
+-----------------------------------------------
+
+For example, suppose a shared variable "foo" is read only while a
+reader-writer spinlock is read-held, written only while that same
+spinlock is write-held, except that it is also read locklessly for
+diagnostic purposes. The code might look as follows:
+
+ int foo;
+ DEFINE_RWLOCK(foo_rwlock);
+
+ void update_foo(int newval)
+ {
+ write_lock(&foo_rwlock);
+ foo = newval;
+ do_something(newval);
+ write_unlock(&foo_rwlock);
+ }
+
+ int read_foo(void)
+ {
+ int ret;
+
+ read_lock(&foo_rwlock);
+ do_something_else();
+ ret = foo;
+ read_unlock(&foo_rwlock);
+ return ret;
+ }
+
+ void read_foo_diagnostic(void)
+ {
+ pr_info("Current value of foo: %d\n", data_race(foo));
+ }
+
+The reader-writer lock prevents the compiler from introducing concurrency
+bugs into any part of the main algorithm using foo, which means that
+the accesses to foo within both update_foo() and read_foo() can (and
+should) be plain C-language accesses. One benefit of making them be
+plain C-language accesses is that KCSAN can detect any erroneous lockless
+reads from or updates to foo. The data_race() in read_foo_diagnostic()
+tells KCSAN that data races are expected, and should be silently
+ignored. This data_race() also tells the human reading the code that
+read_foo_diagnostic() might sometimes return a bogus value.
+
+If it is necessary to suppress compiler optimization and also detect
+buggy lockless writes, read_foo_diagnostic() can be updated as follows:
+
+ void read_foo_diagnostic(void)
+ {
+ pr_info("Current value of foo: %d\n", data_race(READ_ONCE(foo)));
+ }
+
+Alternatively, given that KCSAN is to ignore all accesses in this function,
+this function can be marked __no_kcsan and the data_race() can be dropped:
+
+ void __no_kcsan read_foo_diagnostic(void)
+ {
+ pr_info("Current value of foo: %d\n", READ_ONCE(foo));
+ }
+
+However, in order for KCSAN to detect buggy lockless writes, your kernel
+must be built with CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n. If you
+need KCSAN to detect such a write even if that write did not change
+the value of foo, you also need CONFIG_KCSAN_REPORT_VALUE_CHANGE_ONLY=n.
+If you need KCSAN to detect such a write happening in an interrupt handler
+running on the same CPU doing the legitimate lock-protected write, you
+also need CONFIG_KCSAN_INTERRUPT_WATCHER=y. With some or all of these
+Kconfig options set properly, KCSAN can be quite helpful, although
+it is not necessarily a full replacement for hardware watchpoints.
+On the other hand, neither are hardware watchpoints a full replacement
+for KCSAN because it is not always easy to tell hardware watchpoint to
+conditionally trap on accesses.
+
+
+Lock-Protected Writes With Lockless Reads
+-----------------------------------------
+
+For another example, suppose a shared variable "foo" is updated only
+while holding a spinlock, but is read locklessly. The code might look
+as follows:
+
+ int foo;
+ DEFINE_SPINLOCK(foo_lock);
+
+ void update_foo(int newval)
+ {
+ spin_lock(&foo_lock);
+ WRITE_ONCE(foo, newval);
+ ASSERT_EXCLUSIVE_WRITER(foo);
+ do_something(newval);
+ spin_unlock(&foo_wlock);
+ }
+
+ int read_foo(void)
+ {
+ do_something_else();
+ return READ_ONCE(foo);
+ }
+
+Because foo is read locklessly, all accesses are marked. The purpose
+of the ASSERT_EXCLUSIVE_WRITER() is to allow KCSAN to check for a buggy
+concurrent lockless write.
+
+
+Lock-Protected Writes With Heuristic Lockless Reads
+---------------------------------------------------
+
+For another example, suppose that the code can normally make use of
+a per-data-structure lock, but there are times when a global lock
+is required. These times are indicated via a global flag. The code
+might look as follows, and is based loosely on nf_conntrack_lock(),
+nf_conntrack_all_lock(), and nf_conntrack_all_unlock():
+
+ bool global_flag;
+ DEFINE_SPINLOCK(global_lock);
+ struct foo {
+ spinlock_t f_lock;
+ int f_data;
+ };
+
+ /* All foo structures are in the following array. */
+ int nfoo;
+ struct foo *foo_array;
+
+ void do_something_locked(struct foo *fp)
+ {
+ /* This works even if data_race() returns nonsense. */
+ if (!data_race(global_flag)) {
+ spin_lock(&fp->f_lock);
+ if (!smp_load_acquire(&global_flag)) {
+ do_something(fp);
+ spin_unlock(&fp->f_lock);
+ return;
+ }
+ spin_unlock(&fp->f_lock);
+ }
+ spin_lock(&global_lock);
+ /* global_lock held, thus global flag cannot be set. */
+ spin_lock(&fp->f_lock);
+ spin_unlock(&global_lock);
+ /*
+ * global_flag might be set here, but begin_global()
+ * will wait for ->f_lock to be released.
+ */
+ do_something(fp);
+ spin_unlock(&fp->f_lock);
+ }
+
+ void begin_global(void)
+ {
+ int i;
+
+ spin_lock(&global_lock);
+ WRITE_ONCE(global_flag, true);
+ for (i = 0; i < nfoo; i++) {
+ /*
+ * Wait for pre-existing local locks. One at
+ * a time to avoid lockdep limitations.
+ */
+ spin_lock(&fp->f_lock);
+ spin_unlock(&fp->f_lock);
+ }
+ }
+
+ void end_global(void)
+ {
+ smp_store_release(&global_flag, false);
+ spin_unlock(&global_lock);
+ }
+
+All code paths leading from the do_something_locked() function's first
+read from global_flag acquire a lock, so endless load fusing cannot
+happen.
+
+If the value read from global_flag is true, then global_flag is
+rechecked while holding ->f_lock, which, if global_flag is now false,
+prevents begin_global() from completing. It is therefore safe to invoke
+do_something().
+
+Otherwise, if either value read from global_flag is true, then after
+global_lock is acquired global_flag must be false. The acquisition of
+->f_lock will prevent any call to begin_global() from returning, which
+means that it is safe to release global_lock and invoke do_something().
+
+For this to work, only those foo structures in foo_array[] may be passed
+to do_something_locked(). The reason for this is that the synchronization
+with begin_global() relies on momentarily holding the lock of each and
+every foo structure.
+
+The smp_load_acquire() and smp_store_release() are required because
+changes to a foo structure between calls to begin_global() and
+end_global() are carried out without holding that structure's ->f_lock.
+The smp_load_acquire() and smp_store_release() ensure that the next
+invocation of do_something() from do_something_locked() will see those
+changes.
+
+
+Lockless Reads and Writes
+-------------------------
+
+For another example, suppose a shared variable "foo" is both read and
+updated locklessly. The code might look as follows:
+
+ int foo;
+
+ int update_foo(int newval)
+ {
+ int ret;
+
+ ret = xchg(&foo, newval);
+ do_something(newval);
+ return ret;
+ }
+
+ int read_foo(void)
+ {
+ do_something_else();
+ return READ_ONCE(foo);
+ }
+
+Because foo is accessed locklessly, all accesses are marked. It does
+not make sense to use ASSERT_EXCLUSIVE_WRITER() in this case because
+there really can be concurrent lockless writers. KCSAN would
+flag any concurrent plain C-language reads from foo, and given
+CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=n, also any concurrent plain
+C-language writes to foo.
+
+
+Lockless Reads and Writes, But With Single-Threaded Initialization
+------------------------------------------------------------------
+
+For yet another example, suppose that foo is initialized in a
+single-threaded manner, but that a number of kthreads are then created
+that locklessly and concurrently access foo. Some snippets of this code
+might look as follows:
+
+ int foo;
+
+ void initialize_foo(int initval, int nkthreads)
+ {
+ int i;
+
+ foo = initval;
+ ASSERT_EXCLUSIVE_ACCESS(foo);
+ for (i = 0; i < nkthreads; i++)
+ kthread_run(access_foo_concurrently, ...);
+ }
+
+ /* Called from access_foo_concurrently(). */
+ int update_foo(int newval)
+ {
+ int ret;
+
+ ret = xchg(&foo, newval);
+ do_something(newval);
+ return ret;
+ }
+
+ /* Also called from access_foo_concurrently(). */
+ int read_foo(void)
+ {
+ do_something_else();
+ return READ_ONCE(foo);
+ }
+
+The initialize_foo() uses a plain C-language write to foo because there
+are not supposed to be concurrent accesses during initialization. The
+ASSERT_EXCLUSIVE_ACCESS() allows KCSAN to flag buggy concurrent unmarked
+reads, and the ASSERT_EXCLUSIVE_ACCESS() call further allows KCSAN to
+flag buggy concurrent writes, even if: (1) Those writes are marked or
+(2) The kernel was built with CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y.
+
+
+Checking Stress-Test Race Coverage
+----------------------------------
+
+When designing stress tests it is important to ensure that race conditions
+of interest really do occur. For example, consider the following code
+fragment:
+
+ int foo;
+
+ int update_foo(int newval)
+ {
+ return xchg(&foo, newval);
+ }
+
+ int xor_shift_foo(int shift, int mask)
+ {
+ int old, new, newold;
+
+ newold = data_race(foo); /* Checked by cmpxchg(). */
+ do {
+ old = newold;
+ new = (old << shift) ^ mask;
+ newold = cmpxchg(&foo, old, new);
+ } while (newold != old);
+ return old;
+ }
+
+ int read_foo(void)
+ {
+ return READ_ONCE(foo);
+ }
+
+If it is possible for update_foo(), xor_shift_foo(), and read_foo() to be
+invoked concurrently, the stress test should force this concurrency to
+actually happen. KCSAN can evaluate the stress test when the above code
+is modified to read as follows:
+
+ int foo;
+
+ int update_foo(int newval)
+ {
+ ASSERT_EXCLUSIVE_ACCESS(foo);
+ return xchg(&foo, newval);
+ }
+
+ int xor_shift_foo(int shift, int mask)
+ {
+ int old, new, newold;
+
+ newold = data_race(foo); /* Checked by cmpxchg(). */
+ do {
+ old = newold;
+ new = (old << shift) ^ mask;
+ ASSERT_EXCLUSIVE_ACCESS(foo);
+ newold = cmpxchg(&foo, old, new);
+ } while (newold != old);
+ return old;
+ }
+
+
+ int read_foo(void)
+ {
+ ASSERT_EXCLUSIVE_ACCESS(foo);
+ return READ_ONCE(foo);
+ }
+
+If a given stress-test run does not result in KCSAN complaints from
+each possible pair of ASSERT_EXCLUSIVE_ACCESS() invocations, the
+stress test needs improvement. If the stress test was to be evaluated
+on a regular basis, it would be wise to place the above instances of
+ASSERT_EXCLUSIVE_ACCESS() under #ifdef so that they did not result in
+false positives when not evaluating the stress test.
+
+
+REFERENCES
+==========
+
+[1] "Concurrency bugs should fear the big bad data-race detector (part 2)"
+ https://lwn.net/Articles/816854/
+
+[2] "Who's afraid of a big bad optimizing compiler?"
+ https://lwn.net/Articles/793253/
diff --git a/tools/memory-model/Documentation/cheatsheet.txt b/tools/memory-model/Documentation/cheatsheet.txt
index 33ba98d72b16..99d00870b160 100644
--- a/tools/memory-model/Documentation/cheatsheet.txt
+++ b/tools/memory-model/Documentation/cheatsheet.txt
@@ -3,9 +3,9 @@
C Self R W RMW Self R W DR DW RMW SV
-- ---- - - --- ---- - - -- -- --- --
-Store, e.g., WRITE_ONCE() Y Y
-Load, e.g., READ_ONCE() Y Y Y Y
-Unsuccessful RMW operation Y Y Y Y
+Relaxed store Y Y
+Relaxed load Y Y Y Y
+Relaxed RMW operation Y Y Y Y
rcu_dereference() Y Y Y Y
Successful *_acquire() R Y Y Y Y Y Y
Successful *_release() C Y Y Y W Y
@@ -17,14 +17,19 @@ smp_mb__before_atomic() CP Y Y Y a a a a Y
smp_mb__after_atomic() CP a a Y Y Y Y Y Y
-Key: C: Ordering is cumulative
- P: Ordering propagates
- R: Read, for example, READ_ONCE(), or read portion of RMW
- W: Write, for example, WRITE_ONCE(), or write portion of RMW
- Y: Provides ordering
- a: Provides ordering given intervening RMW atomic operation
- DR: Dependent read (address dependency)
- DW: Dependent write (address, data, or control dependency)
- RMW: Atomic read-modify-write operation
- SELF: Orders self, as opposed to accesses before and/or after
- SV: Orders later accesses to the same variable
+Key: Relaxed: A relaxed operation is either READ_ONCE(), WRITE_ONCE(),
+ a *_relaxed() RMW operation, an unsuccessful RMW
+ operation, a non-value-returning RMW operation such
+ as atomic_inc(), or one of the atomic*_read() and
+ atomic*_set() family of operations.
+ C: Ordering is cumulative
+ P: Ordering propagates
+ R: Read, for example, READ_ONCE(), or read portion of RMW
+ W: Write, for example, WRITE_ONCE(), or write portion of RMW
+ Y: Provides ordering
+ a: Provides ordering given intervening RMW atomic operation
+ DR: Dependent read (address dependency)
+ DW: Dependent write (address, data, or control dependency)
+ RMW: Atomic read-modify-write operation
+ SELF: Orders self, as opposed to accesses before and/or after
+ SV: Orders later accesses to the same variable
diff --git a/tools/memory-model/Documentation/control-dependencies.txt b/tools/memory-model/Documentation/control-dependencies.txt
new file mode 100644
index 000000000000..8b743d20fe27
--- /dev/null
+++ b/tools/memory-model/Documentation/control-dependencies.txt
@@ -0,0 +1,258 @@
+CONTROL DEPENDENCIES
+====================
+
+A major difficulty with control dependencies is that current compilers
+do not support them. One purpose of this document is therefore to
+help you prevent your compiler from breaking your code. However,
+control dependencies also pose other challenges, which leads to the
+second purpose of this document, namely to help you to avoid breaking
+your own code, even in the absence of help from your compiler.
+
+One such challenge is that control dependencies order only later stores.
+Therefore, a load-load control dependency will not preserve ordering
+unless a read memory barrier is provided. Consider the following code:
+
+ q = READ_ONCE(a);
+ if (q)
+ p = READ_ONCE(b);
+
+This is not guaranteed to provide any ordering because some types of CPUs
+are permitted to predict the result of the load from "b". This prediction
+can cause other CPUs to see this load as having happened before the load
+from "a". This means that an explicit read barrier is required, for example
+as follows:
+
+ q = READ_ONCE(a);
+ if (q) {
+ smp_rmb();
+ p = READ_ONCE(b);
+ }
+
+However, stores are not speculated. This means that ordering is
+(usually) guaranteed for load-store control dependencies, as in the
+following example:
+
+ q = READ_ONCE(a);
+ if (q)
+ WRITE_ONCE(b, 1);
+
+Control dependencies can pair with each other and with other types
+of ordering. But please note that neither the READ_ONCE() nor the
+WRITE_ONCE() are optional. Without the READ_ONCE(), the compiler might
+fuse the load from "a" with other loads. Without the WRITE_ONCE(),
+the compiler might fuse the store to "b" with other stores. Worse yet,
+the compiler might convert the store into a load and a check followed
+by a store, and this compiler-generated load would not be ordered by
+the control dependency.
+
+Furthermore, if the compiler is able to prove that the value of variable
+"a" is always non-zero, it would be well within its rights to optimize
+the original example by eliminating the "if" statement as follows:
+
+ q = a;
+ b = 1; /* BUG: Compiler and CPU can both reorder!!! */
+
+So don't leave out either the READ_ONCE() or the WRITE_ONCE().
+In particular, although READ_ONCE() does force the compiler to emit a
+load, it does *not* force the compiler to actually use the loaded value.
+
+It is tempting to try use control dependencies to enforce ordering on
+identical stores on both branches of the "if" statement as follows:
+
+ q = READ_ONCE(a);
+ if (q) {
+ barrier();
+ WRITE_ONCE(b, 1);
+ do_something();
+ } else {
+ barrier();
+ WRITE_ONCE(b, 1);
+ do_something_else();
+ }
+
+Unfortunately, current compilers will transform this as follows at high
+optimization levels:
+
+ q = READ_ONCE(a);
+ barrier();
+ WRITE_ONCE(b, 1); /* BUG: No ordering vs. load from a!!! */
+ if (q) {
+ /* WRITE_ONCE(b, 1); -- moved up, BUG!!! */
+ do_something();
+ } else {
+ /* WRITE_ONCE(b, 1); -- moved up, BUG!!! */
+ do_something_else();
+ }
+
+Now there is no conditional between the load from "a" and the store to
+"b", which means that the CPU is within its rights to reorder them: The
+conditional is absolutely required, and must be present in the final
+assembly code, after all of the compiler and link-time optimizations
+have been applied. Therefore, if you need ordering in this example,
+you must use explicit memory ordering, for example, smp_store_release():
+
+ q = READ_ONCE(a);
+ if (q) {
+ smp_store_release(&b, 1);
+ do_something();
+ } else {
+ smp_store_release(&b, 1);
+ do_something_else();
+ }
+
+Without explicit memory ordering, control-dependency-based ordering is
+guaranteed only when the stores differ, for example:
+
+ q = READ_ONCE(a);
+ if (q) {
+ WRITE_ONCE(b, 1);
+ do_something();
+ } else {
+ WRITE_ONCE(b, 2);
+ do_something_else();
+ }
+
+The initial READ_ONCE() is still required to prevent the compiler from
+knowing too much about the value of "a".
+
+But please note that you need to be careful what you do with the local
+variable "q", otherwise the compiler might be able to guess the value
+and again remove the conditional branch that is absolutely required to
+preserve ordering. For example:
+
+ q = READ_ONCE(a);
+ if (q % MAX) {
+ WRITE_ONCE(b, 1);
+ do_something();
+ } else {
+ WRITE_ONCE(b, 2);
+ do_something_else();
+ }
+
+If MAX is compile-time defined to be 1, then the compiler knows that
+(q % MAX) must be equal to zero, regardless of the value of "q".
+The compiler is therefore within its rights to transform the above code
+into the following:
+
+ q = READ_ONCE(a);
+ WRITE_ONCE(b, 2);
+ do_something_else();
+
+Given this transformation, the CPU is not required to respect the ordering
+between the load from variable "a" and the store to variable "b". It is
+tempting to add a barrier(), but this does not help. The conditional
+is gone, and the barrier won't bring it back. Therefore, if you need
+to relying on control dependencies to produce this ordering, you should
+make sure that MAX is greater than one, perhaps as follows:
+
+ q = READ_ONCE(a);
+ BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
+ if (q % MAX) {
+ WRITE_ONCE(b, 1);
+ do_something();
+ } else {
+ WRITE_ONCE(b, 2);
+ do_something_else();
+ }
+
+Please note once again that each leg of the "if" statement absolutely
+must store different values to "b". As in previous examples, if the two
+values were identical, the compiler could pull this store outside of the
+"if" statement, destroying the control dependency's ordering properties.
+
+You must also be careful avoid relying too much on boolean short-circuit
+evaluation. Consider this example:
+
+ q = READ_ONCE(a);
+ if (q || 1 > 0)
+ WRITE_ONCE(b, 1);
+
+Because the first condition cannot fault and the second condition is
+always true, the compiler can transform this example as follows, again
+destroying the control dependency's ordering:
+
+ q = READ_ONCE(a);
+ WRITE_ONCE(b, 1);
+
+This is yet another example showing the importance of preventing the
+compiler from out-guessing your code. Again, although READ_ONCE() really
+does force the compiler to emit code for a given load, the compiler is
+within its rights to discard the loaded value.
+
+In addition, control dependencies apply only to the then-clause and
+else-clause of the "if" statement in question. In particular, they do
+not necessarily order the code following the entire "if" statement:
+
+ q = READ_ONCE(a);
+ if (q) {
+ WRITE_ONCE(b, 1);
+ } else {
+ WRITE_ONCE(b, 2);
+ }
+ WRITE_ONCE(c, 1); /* BUG: No ordering against the read from "a". */
+
+It is tempting to argue that there in fact is ordering because the
+compiler cannot reorder volatile accesses and also cannot reorder
+the writes to "b" with the condition. Unfortunately for this line
+of reasoning, the compiler might compile the two writes to "b" as
+conditional-move instructions, as in this fanciful pseudo-assembly
+language:
+
+ ld r1,a
+ cmp r1,$0
+ cmov,ne r4,$1
+ cmov,eq r4,$2
+ st r4,b
+ st $1,c
+
+The control dependencies would then extend only to the pair of cmov
+instructions and the store depending on them. This means that a weakly
+ordered CPU would have no dependency of any sort between the load from
+"a" and the store to "c". In short, control dependencies provide ordering
+only to the stores in the then-clause and else-clause of the "if" statement
+in question (including functions invoked by those two clauses), and not
+to code following that "if" statement.
+
+
+In summary:
+
+ (*) Control dependencies can order prior loads against later stores.
+ However, they do *not* guarantee any other sort of ordering:
+ Not prior loads against later loads, nor prior stores against
+ later anything. If you need these other forms of ordering, use
+ smp_load_acquire(), smp_store_release(), or, in the case of prior
+ stores and later loads, smp_mb().
+
+ (*) If both legs of the "if" statement contain identical stores to
+ the same variable, then you must explicitly order those stores,
+ either by preceding both of them with smp_mb() or by using
+ smp_store_release(). Please note that it is *not* sufficient to use
+ barrier() at beginning and end of each leg of the "if" statement
+ because, as shown by the example above, optimizing compilers can
+ destroy the control dependency while respecting the letter of the
+ barrier() law.
+
+ (*) Control dependencies require at least one run-time conditional
+ between the prior load and the subsequent store, and this
+ conditional must involve the prior load. If the compiler is able
+ to optimize the conditional away, it will have also optimized
+ away the ordering. Careful use of READ_ONCE() and WRITE_ONCE()
+ can help to preserve the needed conditional.
+
+ (*) Control dependencies require that the compiler avoid reordering the
+ dependency into nonexistence. Careful use of READ_ONCE() or
+ atomic{,64}_read() can help to preserve your control dependency.
+
+ (*) Control dependencies apply only to the then-clause and else-clause
+ of the "if" statement containing the control dependency, including
+ any functions that these two clauses call. Control dependencies
+ do *not* apply to code beyond the end of that "if" statement.
+
+ (*) Control dependencies pair normally with other types of barriers.
+
+ (*) Control dependencies do *not* provide multicopy atomicity. If you
+ need all the CPUs to agree on the ordering of a given store against
+ all other accesses, use smp_mb().
+
+ (*) Compilers do not understand control dependencies. It is therefore
+ your job to ensure that they do not break your code.
diff --git a/tools/memory-model/Documentation/explanation.txt b/tools/memory-model/Documentation/explanation.txt
index e91a2eb19592..6dc8b3642458 100644
--- a/tools/memory-model/Documentation/explanation.txt
+++ b/tools/memory-model/Documentation/explanation.txt
@@ -28,9 +28,10 @@ Explanation of the Linux-Kernel Memory Consistency Model
20. THE HAPPENS-BEFORE RELATION: hb
21. THE PROPAGATES-BEFORE RELATION: pb
22. RCU RELATIONS: rcu-link, rcu-gp, rcu-rscsi, rcu-order, rcu-fence, and rb
- 23. LOCKING
- 24. PLAIN ACCESSES AND DATA RACES
- 25. ODDS AND ENDS
+ 23. SRCU READ-SIDE CRITICAL SECTIONS
+ 24. LOCKING
+ 25. PLAIN ACCESSES AND DATA RACES
+ 26. ODDS AND ENDS
@@ -464,9 +465,10 @@ to address dependencies, since the address of a location accessed
through a pointer will depend on the value read earlier from that
pointer.
-Finally, a read event and another memory access event are linked by a
-control dependency if the value obtained by the read affects whether
-the second event is executed at all. Simple example:
+Finally, a read event X and a write event Y are linked by a control
+dependency if Y syntactically lies within an arm of an if statement and
+X affects the evaluation of the if condition via a data or address
+dependency (or similarly for a switch statement). Simple example:
int x, y;
@@ -485,6 +487,57 @@ have R ->po X. It wouldn't make sense for a computation to depend
somehow on a value that doesn't get loaded from shared memory until
later in the code!
+Here's a trick question: When is a dependency not a dependency? Answer:
+When it is purely syntactic rather than semantic. We say a dependency
+between two accesses is purely syntactic if the second access doesn't
+actually depend on the result of the first. Here is a trivial example:
+
+ r1 = READ_ONCE(x);
+ WRITE_ONCE(y, r1 * 0);
+
+There appears to be a data dependency from the load of x to the store
+of y, since the value to be stored is computed from the value that was
+loaded. But in fact, the value stored does not really depend on
+anything since it will always be 0. Thus the data dependency is only
+syntactic (it appears to exist in the code) but not semantic (the
+second access will always be the same, regardless of the value of the
+first access). Given code like this, a compiler could simply discard
+the value returned by the load from x, which would certainly destroy
+any dependency. (The compiler is not permitted to eliminate entirely
+the load generated for a READ_ONCE() -- that's one of the nice
+properties of READ_ONCE() -- but it is allowed to ignore the load's
+value.)
+
+It's natural to object that no one in their right mind would write
+code like the above. However, macro expansions can easily give rise
+to this sort of thing, in ways that often are not apparent to the
+programmer.
+
+Another mechanism that can lead to purely syntactic dependencies is
+related to the notion of "undefined behavior". Certain program
+behaviors are called "undefined" in the C language specification,
+which means that when they occur there are no guarantees at all about
+the outcome. Consider the following example:
+
+ int a[1];
+ int i;
+
+ r1 = READ_ONCE(i);
+ r2 = READ_ONCE(a[r1]);
+
+Access beyond the end or before the beginning of an array is one kind
+of undefined behavior. Therefore the compiler doesn't have to worry
+about what will happen if r1 is nonzero, and it can assume that r1
+will always be zero regardless of the value actually loaded from i.
+(If the assumption turns out to be wrong the resulting behavior will
+be undefined anyway, so the compiler doesn't care!) Thus the value
+from the load can be discarded, breaking the address dependency.
+
+The LKMM is unaware that purely syntactic dependencies are different
+from semantic dependencies and therefore mistakenly predicts that the
+accesses in the two examples above will be ordered. This is another
+example of how the compiler can undermine the memory model. Be warned.
+
THE READS-FROM RELATION: rf, rfi, and rfe
-----------------------------------------
@@ -955,6 +1008,36 @@ order. Equivalently,
where the rmw relation links the read and write events making up each
atomic update. This is what the LKMM's "atomic" axiom says.
+Atomic rmw updates play one more role in the LKMM: They can form "rmw
+sequences". An rmw sequence is simply a bunch of atomic updates where
+each update reads from the previous one. Written using events, it
+looks like this:
+
+ Z0 ->rf Y1 ->rmw Z1 ->rf ... ->rf Yn ->rmw Zn,
+
+where Z0 is some store event and n can be any number (even 0, in the
+degenerate case). We write this relation as: Z0 ->rmw-sequence Zn.
+Note that this implies Z0 and Zn are stores to the same variable.
+
+Rmw sequences have a special property in the LKMM: They can extend the
+cumul-fence relation. That is, if we have:
+
+ U ->cumul-fence X -> rmw-sequence Y
+
+then also U ->cumul-fence Y. Thinking about this in terms of the
+operational model, U ->cumul-fence X says that the store U propagates
+to each CPU before the store X does. Then the fact that X and Y are
+linked by an rmw sequence means that U also propagates to each CPU
+before Y does. In an analogous way, rmw sequences can also extend
+the w-post-bounded relation defined below in the PLAIN ACCESSES AND
+DATA RACES section.
+
+(The notion of rmw sequences in the LKMM is similar to, but not quite
+the same as, that of release sequences in the C11 memory model. They
+were added to the LKMM to fix an obscure bug; without them, atomic
+updates with full-barrier semantics did not always guarantee ordering
+at least as strong as atomic updates with release-barrier semantics.)
+
THE PRESERVED PROGRAM ORDER RELATION: ppo
-----------------------------------------
@@ -1122,12 +1205,10 @@ maintain at least the appearance of FIFO order.
In practice, this difficulty is solved by inserting a special fence
between P1's two loads when the kernel is compiled for the Alpha
architecture. In fact, as of version 4.15, the kernel automatically
-adds this fence (called smp_read_barrier_depends() and defined as
-nothing at all on non-Alpha builds) after every READ_ONCE() and atomic
-load. The effect of the fence is to cause the CPU not to execute any
-po-later instructions until after the local cache has finished
-processing all the stores it has already received. Thus, if the code
-was changed to:
+adds this fence after every READ_ONCE() and atomic load on Alpha. The
+effect of the fence is to cause the CPU not to execute any po-later
+instructions until after the local cache has finished processing all
+the stores it has already received. Thus, if the code was changed to:
P1()
{
@@ -1146,14 +1227,14 @@ READ_ONCE() or another synchronization primitive rather than accessed
directly.
The LKMM requires that smp_rmb(), acquire fences, and strong fences
-share this property with smp_read_barrier_depends(): They do not allow
-the CPU to execute any po-later instructions (or po-later loads in the
-case of smp_rmb()) until all outstanding stores have been processed by
-the local cache. In the case of a strong fence, the CPU first has to
-wait for all of its po-earlier stores to propagate to every other CPU
-in the system; then it has to wait for the local cache to process all
-the stores received as of that time -- not just the stores received
-when the strong fence began.
+share this property: They do not allow the CPU to execute any po-later
+instructions (or po-later loads in the case of smp_rmb()) until all
+outstanding stores have been processed by the local cache. In the
+case of a strong fence, the CPU first has to wait for all of its
+po-earlier stores to propagate to every other CPU in the system; then
+it has to wait for the local cache to process all the stores received
+as of that time -- not just the stores received when the strong fence
+began.
And of course, none of this matters for any architecture other than
Alpha.
@@ -1768,14 +1849,169 @@ section in P0 both starts before P1's grace period does and ends
before it does, and the critical section in P2 both starts after P1's
grace period does and ends after it does.
-Addendum: The LKMM now supports SRCU (Sleepable Read-Copy-Update) in
-addition to normal RCU. The ideas involved are much the same as
-above, with new relations srcu-gp and srcu-rscsi added to represent
-SRCU grace periods and read-side critical sections. There is a
-restriction on the srcu-gp and srcu-rscsi links that can appear in an
-rcu-order sequence (the srcu-rscsi links must be paired with srcu-gp
-links having the same SRCU domain with proper nesting); the details
-are relatively unimportant.
+The LKMM supports SRCU (Sleepable Read-Copy-Update) in addition to
+normal RCU. The ideas involved are much the same as above, with new
+relations srcu-gp and srcu-rscsi added to represent SRCU grace periods
+and read-side critical sections. However, there are some significant
+differences between RCU read-side critical sections and their SRCU
+counterparts, as described in the next section.
+
+
+SRCU READ-SIDE CRITICAL SECTIONS
+--------------------------------
+
+The LKMM uses the srcu-rscsi relation to model SRCU read-side critical
+sections. They differ from RCU read-side critical sections in the
+following respects:
+
+1. Unlike the analogous RCU primitives, synchronize_srcu(),
+ srcu_read_lock(), and srcu_read_unlock() take a pointer to a
+ struct srcu_struct as an argument. This structure is called
+ an SRCU domain, and calls linked by srcu-rscsi must have the
+ same domain. Read-side critical sections and grace periods
+ associated with different domains are independent of one
+ another; the SRCU version of the RCU Guarantee applies only
+ to pairs of critical sections and grace periods having the
+ same domain.
+
+2. srcu_read_lock() returns a value, called the index, which must
+ be passed to the matching srcu_read_unlock() call. Unlike
+ rcu_read_lock() and rcu_read_unlock(), an srcu_read_lock()
+ call does not always have to match the next unpaired
+ srcu_read_unlock(). In fact, it is possible for two SRCU
+ read-side critical sections to overlap partially, as in the
+ following example (where s is an srcu_struct and idx1 and idx2
+ are integer variables):
+
+ idx1 = srcu_read_lock(&s); // Start of first RSCS
+ idx2 = srcu_read_lock(&s); // Start of second RSCS
+ srcu_read_unlock(&s, idx1); // End of first RSCS
+ srcu_read_unlock(&s, idx2); // End of second RSCS
+
+ The matching is determined entirely by the domain pointer and
+ index value. By contrast, if the calls had been
+ rcu_read_lock() and rcu_read_unlock() then they would have
+ created two nested (fully overlapping) read-side critical
+ sections: an inner one and an outer one.
+
+3. The srcu_down_read() and srcu_up_read() primitives work
+ exactly like srcu_read_lock() and srcu_read_unlock(), except
+ that matching calls don't have to execute on the same CPU.
+ (The names are meant to be suggestive of operations on
+ semaphores.) Since the matching is determined by the domain
+ pointer and index value, these primitives make it possible for
+ an SRCU read-side critical section to start on one CPU and end
+ on another, so to speak.
+
+In order to account for these properties of SRCU, the LKMM models
+srcu_read_lock() as a special type of load event (which is
+appropriate, since it takes a memory location as argument and returns
+a value, just as a load does) and srcu_read_unlock() as a special type
+of store event (again appropriate, since it takes as arguments a
+memory location and a value). These loads and stores are annotated as
+belonging to the "srcu-lock" and "srcu-unlock" event classes
+respectively.
+
+This approach allows the LKMM to tell whether two events are
+associated with the same SRCU domain, simply by checking whether they
+access the same memory location (i.e., they are linked by the loc
+relation). It also gives a way to tell which unlock matches a
+particular lock, by checking for the presence of a data dependency
+from the load (srcu-lock) to the store (srcu-unlock). For example,
+given the situation outlined earlier (with statement labels added):
+
+ A: idx1 = srcu_read_lock(&s);
+ B: idx2 = srcu_read_lock(&s);
+ C: srcu_read_unlock(&s, idx1);
+ D: srcu_read_unlock(&s, idx2);
+
+the LKMM will treat A and B as loads from s yielding values saved in
+idx1 and idx2 respectively. Similarly, it will treat C and D as
+though they stored the values from idx1 and idx2 in s. The end result
+is much as if we had written:
+
+ A: idx1 = READ_ONCE(s);
+ B: idx2 = READ_ONCE(s);
+ C: WRITE_ONCE(s, idx1);
+ D: WRITE_ONCE(s, idx2);
+
+except for the presence of the special srcu-lock and srcu-unlock
+annotations. You can see at once that we have A ->data C and
+B ->data D. These dependencies tell the LKMM that C is the
+srcu-unlock event matching srcu-lock event A, and D is the
+srcu-unlock event matching srcu-lock event B.
+
+This approach is admittedly a hack, and it has the potential to lead
+to problems. For example, in:
+
+ idx1 = srcu_read_lock(&s);
+ srcu_read_unlock(&s, idx1);
+ idx2 = srcu_read_lock(&s);
+ srcu_read_unlock(&s, idx2);
+
+the LKMM will believe that idx2 must have the same value as idx1,
+since it reads from the immediately preceding store of idx1 in s.
+Fortunately this won't matter, assuming that litmus tests never do
+anything with SRCU index values other than pass them to
+srcu_read_unlock() or srcu_up_read() calls.
+
+However, sometimes it is necessary to store an index value in a
+shared variable temporarily. In fact, this is the only way for
+srcu_down_read() to pass the index it gets to an srcu_up_read() call
+on a different CPU. In more detail, we might have soething like:
+
+ struct srcu_struct s;
+ int x;
+
+ P0()
+ {
+ int r0;
+
+ A: r0 = srcu_down_read(&s);
+ B: WRITE_ONCE(x, r0);
+ }
+
+ P1()
+ {
+ int r1;
+
+ C: r1 = READ_ONCE(x);
+ D: srcu_up_read(&s, r1);
+ }
+
+Assuming that P1 executes after P0 and does read the index value
+stored in x, we can write this (using brackets to represent event
+annotations) as:
+
+ A[srcu-lock] ->data B[once] ->rf C[once] ->data D[srcu-unlock].
+
+The LKMM defines a carry-srcu-data relation to express this pattern;
+it permits an arbitrarily long sequence of
+
+ data ; rf
+
+pairs (that is, a data link followed by an rf link) to occur between
+an srcu-lock event and the final data dependency leading to the
+matching srcu-unlock event. carry-srcu-data is complicated by the
+need to ensure that none of the intermediate store events in this
+sequence are instances of srcu-unlock. This is necessary because in a
+pattern like the one above:
+
+ A: idx1 = srcu_read_lock(&s);
+ B: srcu_read_unlock(&s, idx1);
+ C: idx2 = srcu_read_lock(&s);
+ D: srcu_read_unlock(&s, idx2);
+
+the LKMM treats B as a store to the variable s and C as a load from
+that variable, creating an undesirable rf link from B to C:
+
+ A ->data B ->rf C ->data D.
+
+This would cause carry-srcu-data to mistakenly extend a data
+dependency from A to D, giving the impression that D was the
+srcu-unlock event matching A's srcu-lock. To avoid such problems,
+carry-srcu-data does not accept sequences in which the ends of any of
+the intermediate ->data links (B above) is an srcu-unlock event.
LOCKING
@@ -1815,15 +2051,16 @@ spin_trylock() -- we can call these things lock-releases and
lock-acquires -- have two properties beyond those of ordinary releases
and acquires.
-First, when a lock-acquire reads from a lock-release, the LKMM
-requires that every instruction po-before the lock-release must
-execute before any instruction po-after the lock-acquire. This would
-naturally hold if the release and acquire operations were on different
-CPUs, but the LKMM says it holds even when they are on the same CPU.
-For example:
+First, when a lock-acquire reads from or is po-after a lock-release,
+the LKMM requires that every instruction po-before the lock-release
+must execute before any instruction po-after the lock-acquire. This
+would naturally hold if the release and acquire operations were on
+different CPUs and accessed the same lock variable, but the LKMM says
+it also holds when they are on the same CPU, even if they access
+different lock variables. For example:
int x, y;
- spinlock_t s;
+ spinlock_t s, t;
P0()
{
@@ -1832,9 +2069,9 @@ For example:
spin_lock(&s);
r1 = READ_ONCE(x);
spin_unlock(&s);
- spin_lock(&s);
+ spin_lock(&t);
r2 = READ_ONCE(y);
- spin_unlock(&s);
+ spin_unlock(&t);
}
P1()
@@ -1844,10 +2081,10 @@ For example:
WRITE_ONCE(x, 1);
}
-Here the second spin_lock() reads from the first spin_unlock(), and
-therefore the load of x must execute before the load of y. Thus we
-cannot have r1 = 1 and r2 = 0 at the end (this is an instance of the
-MP pattern).
+Here the second spin_lock() is po-after the first spin_unlock(), and
+therefore the load of x must execute before the load of y, even though
+the two locking operations use different locks. Thus we cannot have
+r1 = 1 and r2 = 0 at the end (this is an instance of the MP pattern).
This requirement does not apply to ordinary release and acquire
fences, only to lock-related operations. For instance, suppose P0()
@@ -1874,13 +2111,13 @@ instructions in the following order:
and thus it could load y before x, obtaining r2 = 0 and r1 = 1.
-Second, when a lock-acquire reads from a lock-release, and some other
-stores W and W' occur po-before the lock-release and po-after the
-lock-acquire respectively, the LKMM requires that W must propagate to
-each CPU before W' does. For example, consider:
+Second, when a lock-acquire reads from or is po-after a lock-release,
+and some other stores W and W' occur po-before the lock-release and
+po-after the lock-acquire respectively, the LKMM requires that W must
+propagate to each CPU before W' does. For example, consider:
int x, y;
- spinlock_t x;
+ spinlock_t s;
P0()
{
@@ -1910,7 +2147,12 @@ each CPU before W' does. For example, consider:
If r1 = 1 at the end then the spin_lock() in P1 must have read from
the spin_unlock() in P0. Hence the store to x must propagate to P2
-before the store to y does, so we cannot have r2 = 1 and r3 = 0.
+before the store to y does, so we cannot have r2 = 1 and r3 = 0. But
+if P1 had used a lock variable different from s, the writes could have
+propagated in either order. (On the other hand, if the code in P0 and
+P1 had all executed on a single CPU, as in the example before this
+one, then the writes would have propagated in order even if the two
+critical sections used different lock variables.)
These two special requirements for lock-release and lock-acquire do
not arise from the operational model. Nevertheless, kernel developers
@@ -1987,28 +2229,36 @@ outcome undefined.
In technical terms, the compiler is allowed to assume that when the
program executes, there will not be any data races. A "data race"
-occurs when two conflicting memory accesses execute concurrently;
-two memory accesses "conflict" if:
+occurs when there are two memory accesses such that:
+
+1. they access the same location,
- they access the same location,
+2. at least one of them is a store,
- they occur on different CPUs (or in different threads on the
- same CPU),
+3. at least one of them is plain,
- at least one of them is a plain access,
+4. they occur on different CPUs (or in different threads on the
+ same CPU), and
- and at least one of them is a store.
+5. they execute concurrently.
-The LKMM tries to determine whether a program contains two conflicting
-accesses which may execute concurrently; if it does then the LKMM says
-there is a potential data race and makes no predictions about the
-program's outcome.
+In the literature, two accesses are said to "conflict" if they satisfy
+1 and 2 above. We'll go a little farther and say that two accesses
+are "race candidates" if they satisfy 1 - 4. Thus, whether or not two
+race candidates actually do race in a given execution depends on
+whether they are concurrent.
-Determining whether two accesses conflict is easy; you can see that
-all the concepts involved in the definition above are already part of
-the memory model. The hard part is telling whether they may execute
-concurrently. The LKMM takes a conservative attitude, assuming that
-accesses may be concurrent unless it can prove they cannot.
+The LKMM tries to determine whether a program contains race candidates
+which may execute concurrently; if it does then the LKMM says there is
+a potential data race and makes no predictions about the program's
+outcome.
+
+Determining whether two accesses are race candidates is easy; you can
+see that all the concepts involved in the definition above are already
+part of the memory model. The hard part is telling whether they may
+execute concurrently. The LKMM takes a conservative attitude,
+assuming that accesses may be concurrent unless it can prove they
+are not.
If two memory accesses aren't concurrent then one must execute before
the other. Therefore the LKMM decides two accesses aren't concurrent
@@ -2171,8 +2421,8 @@ again, now using plain accesses for buf:
}
This program does not contain a data race. Although the U and V
-accesses conflict, the LKMM can prove they are not concurrent as
-follows:
+accesses are race candidates, the LKMM can prove they are not
+concurrent as follows:
The smp_wmb() fence in P0 is both a compiler barrier and a
cumul-fence. It guarantees that no matter what hash of
@@ -2326,12 +2576,11 @@ could now perform the load of x before the load of ptr (there might be
a control dependency but no address dependency at the machine level).
Finally, it turns out there is a situation in which a plain write does
-not need to be w-post-bounded: when it is separated from the
-conflicting access by a fence. At first glance this may seem
-impossible. After all, to be conflicting the second access has to be
-on a different CPU from the first, and fences don't link events on
-different CPUs. Well, normal fences don't -- but rcu-fence can!
-Here's an example:
+not need to be w-post-bounded: when it is separated from the other
+race-candidate access by a fence. At first glance this may seem
+impossible. After all, to be race candidates the two accesses must
+be on different CPUs, and fences don't link events on different CPUs.
+Well, normal fences don't -- but rcu-fence can! Here's an example:
int x, y;
@@ -2367,7 +2616,7 @@ concurrent and there is no race, even though P1's plain store to y
isn't w-post-bounded by any marked accesses.
Putting all this material together yields the following picture. For
-two conflicting stores W and W', where W ->co W', the LKMM says the
+race-candidate stores W and W', where W ->co W', the LKMM says the
stores don't race if W can be linked to W' by a
w-post-bounded ; vis ; w-pre-bounded
@@ -2380,8 +2629,8 @@ sequence, and if W' is plain then they also have to be linked by a
w-post-bounded ; vis ; r-pre-bounded
-sequence. For a conflicting load R and store W, the LKMM says the two
-accesses don't race if R can be linked to W by an
+sequence. For race-candidate load R and store W, the LKMM says the
+two accesses don't race if R can be linked to W by an
r-post-bounded ; xb* ; w-pre-bounded
@@ -2413,20 +2662,20 @@ is, the rules governing the memory subsystem's choice of a store to
satisfy a load request and its determination of where a store will
fall in the coherence order):
- If R and W conflict and it is possible to link R to W by one
- of the xb* sequences listed above, then W ->rfe R is not
- allowed (i.e., a load cannot read from a store that it
+ If R and W are race candidates and it is possible to link R to
+ W by one of the xb* sequences listed above, then W ->rfe R is
+ not allowed (i.e., a load cannot read from a store that it
executes before, even if one or both is plain).
- If W and R conflict and it is possible to link W to R by one
- of the vis sequences listed above, then R ->fre W is not
- allowed (i.e., if a store is visible to a load then the load
- must read from that store or one coherence-after it).
+ If W and R are race candidates and it is possible to link W to
+ R by one of the vis sequences listed above, then R ->fre W is
+ not allowed (i.e., if a store is visible to a load then the
+ load must read from that store or one coherence-after it).
- If W and W' conflict and it is possible to link W to W' by one
- of the vis sequences listed above, then W' ->co W is not
- allowed (i.e., if one store is visible to a second then the
- second must come after the first in the coherence order).
+ If W and W' are race candidates and it is possible to link W
+ to W' by one of the vis sequences listed above, then W' ->co W
+ is not allowed (i.e., if one store is visible to a second then
+ the second must come after the first in the coherence order).
This is the extent to which the LKMM deals with plain accesses.
Perhaps it could say more (for example, plain accesses might
@@ -2482,7 +2731,7 @@ smp_store_release() -- which is basically how the Linux kernel treats
them.
Although we said that plain accesses are not linked by the ppo
-relation, they do contribute to it indirectly. Namely, when there is
+relation, they do contribute to it indirectly. Firstly, when there is
an address dependency from a marked load R to a plain store W,
followed by smp_wmb() and then a marked store W', the LKMM creates a
ppo link from R to W'. The reasoning behind this is perhaps a little
@@ -2491,6 +2740,13 @@ for this source code in which W' could execute before R. Just as with
pre-bounding by address dependencies, it is possible for the compiler
to undermine this relation if sufficient care is not taken.
+Secondly, plain accesses can carry dependencies: If a data dependency
+links a marked load R to a store W, and the store is read by a load R'
+from the same thread, then the data loaded by R' depends on the data
+loaded originally by R. Thus, if R' is linked to any access X by a
+dependency, R is also linked to access X by the same dependency, even
+if W' or R' (or both!) are plain.
+
There are a few oddball fences which need special treatment:
smp_mb__before_atomic(), smp_mb__after_atomic(), and
smp_mb__after_spinlock(). The LKMM uses fence events with special
@@ -2505,7 +2761,7 @@ they behave as follows:
smp_mb__after_atomic() orders po-earlier atomic updates and
the events preceding them against all po-later events;
- smp_mb_after_spinlock() orders po-earlier lock acquisition
+ smp_mb__after_spinlock() orders po-earlier lock acquisition
events and the events preceding them against all po-later
events.
diff --git a/tools/memory-model/Documentation/glossary.txt b/tools/memory-model/Documentation/glossary.txt
new file mode 100644
index 000000000000..6f3d16dbf467
--- /dev/null
+++ b/tools/memory-model/Documentation/glossary.txt
@@ -0,0 +1,178 @@
+This document contains brief definitions of LKMM-related terms. Like most
+glossaries, it is not intended to be read front to back (except perhaps
+as a way of confirming a diagnosis of OCD), but rather to be searched
+for specific terms.
+
+
+Address Dependency: When the address of a later memory access is computed
+ based on the value returned by an earlier load, an "address
+ dependency" extends from that load extending to the later access.
+ Address dependencies are quite common in RCU read-side critical
+ sections:
+
+ 1 rcu_read_lock();
+ 2 p = rcu_dereference(gp);
+ 3 do_something(p->a);
+ 4 rcu_read_unlock();
+
+ In this case, because the address of "p->a" on line 3 is computed
+ from the value returned by the rcu_dereference() on line 2, the
+ address dependency extends from that rcu_dereference() to that
+ "p->a". In rare cases, optimizing compilers can destroy address
+ dependencies. Please see Documentation/RCU/rcu_dereference.rst
+ for more information.
+
+ See also "Control Dependency" and "Data Dependency".
+
+Acquire: With respect to a lock, acquiring that lock, for example,
+ using spin_lock(). With respect to a non-lock shared variable,
+ a special operation that includes a load and which orders that
+ load before later memory references running on that same CPU.
+ An example special acquire operation is smp_load_acquire(),
+ but atomic_read_acquire() and atomic_xchg_acquire() also include
+ acquire loads.
+
+ When an acquire load returns the value stored by a release store
+ to that same variable, (in other words, the acquire load "reads
+ from" the release store), then all operations preceding that
+ store "happen before" any operations following that load acquire.
+
+ See also "Happens-Before", "Reads-From", "Relaxed", and "Release".
+
+Coherence (co): When one CPU's store to a given variable overwrites
+ either the value from another CPU's store or some later value,
+ there is said to be a coherence link from the second CPU to
+ the first.
+
+ It is also possible to have a coherence link within a CPU, which
+ is a "coherence internal" (coi) link. The term "coherence
+ external" (coe) link is used when it is necessary to exclude
+ the coi case.
+
+ See also "From-reads" and "Reads-from".
+
+Control Dependency: When a later store's execution depends on a test
+ of a value computed from a value returned by an earlier load,
+ a "control dependency" extends from that load to that store.
+ For example:
+
+ 1 if (READ_ONCE(x))
+ 2 WRITE_ONCE(y, 1);
+
+ Here, the control dependency extends from the READ_ONCE() on
+ line 1 to the WRITE_ONCE() on line 2. Control dependencies are
+ fragile, and can be easily destroyed by optimizing compilers.
+ Please see control-dependencies.txt for more information.
+
+ See also "Address Dependency" and "Data Dependency".
+
+Cycle: Memory-barrier pairing is restricted to a pair of CPUs, as the
+ name suggests. And in a great many cases, a pair of CPUs is all
+ that is required. In other cases, the notion of pairing must be
+ extended to additional CPUs, and the result is called a "cycle".
+ In a cycle, each CPU's ordering interacts with that of the next:
+
+ CPU 0 CPU 1 CPU 2
+ WRITE_ONCE(x, 1); WRITE_ONCE(y, 1); WRITE_ONCE(z, 1);
+ smp_mb(); smp_mb(); smp_mb();
+ r0 = READ_ONCE(y); r1 = READ_ONCE(z); r2 = READ_ONCE(x);
+
+ CPU 0's smp_mb() interacts with that of CPU 1, which interacts
+ with that of CPU 2, which in turn interacts with that of CPU 0
+ to complete the cycle. Because of the smp_mb() calls between
+ each pair of memory accesses, the outcome where r0, r1, and r2
+ are all equal to zero is forbidden by LKMM.
+
+ See also "Pairing".
+
+Data Dependency: When the data written by a later store is computed based
+ on the value returned by an earlier load, a "data dependency"
+ extends from that load to that later store. For example:
+
+ 1 r1 = READ_ONCE(x);
+ 2 WRITE_ONCE(y, r1 + 1);
+
+ In this case, the data dependency extends from the READ_ONCE()
+ on line 1 to the WRITE_ONCE() on line 2. Data dependencies are
+ fragile and can be easily destroyed by optimizing compilers.
+ Because optimizing compilers put a great deal of effort into
+ working out what values integer variables might have, this is
+ especially true in cases where the dependency is carried through
+ an integer.
+
+ See also "Address Dependency" and "Control Dependency".
+
+From-Reads (fr): When one CPU's store to a given variable happened
+ too late to affect the value returned by another CPU's
+ load from that same variable, there is said to be a from-reads
+ link from the load to the store.
+
+ It is also possible to have a from-reads link within a CPU, which
+ is a "from-reads internal" (fri) link. The term "from-reads
+ external" (fre) link is used when it is necessary to exclude
+ the fri case.
+
+ See also "Coherence" and "Reads-from".
+
+Fully Ordered: An operation such as smp_mb() that orders all of
+ its CPU's prior accesses with all of that CPU's subsequent
+ accesses, or a marked access such as atomic_add_return()
+ that orders all of its CPU's prior accesses, itself, and
+ all of its CPU's subsequent accesses.
+
+Happens-Before (hb): A relation between two accesses in which LKMM
+ guarantees the first access precedes the second. For more
+ detail, please see the "THE HAPPENS-BEFORE RELATION: hb"
+ section of explanation.txt.
+
+Marked Access: An access to a variable that uses an special function or
+ macro such as "r1 = READ_ONCE(x)" or "smp_store_release(&a, 1)".
+
+ See also "Unmarked Access".
+
+Pairing: "Memory-barrier pairing" reflects the fact that synchronizing
+ data between two CPUs requires that both CPUs their accesses.
+ Memory barriers thus tend to come in pairs, one executed by
+ one of the CPUs and the other by the other CPU. Of course,
+ pairing also occurs with other types of operations, so that a
+ smp_store_release() pairs with an smp_load_acquire() that reads
+ the value stored.
+
+ See also "Cycle".
+
+Reads-From (rf): When one CPU's load returns the value stored by some other
+ CPU, there is said to be a reads-from link from the second
+ CPU's store to the first CPU's load. Reads-from links have the
+ nice property that time must advance from the store to the load,
+ which means that algorithms using reads-from links can use lighter
+ weight ordering and synchronization compared to algorithms using
+ coherence and from-reads links.
+
+ It is also possible to have a reads-from link within a CPU, which
+ is a "reads-from internal" (rfi) link. The term "reads-from
+ external" (rfe) link is used when it is necessary to exclude
+ the rfi case.
+
+ See also Coherence" and "From-reads".
+
+Relaxed: A marked access that does not imply ordering, for example, a
+ READ_ONCE(), WRITE_ONCE(), a non-value-returning read-modify-write
+ operation, or a value-returning read-modify-write operation whose
+ name ends in "_relaxed".
+
+ See also "Acquire" and "Release".
+
+Release: With respect to a lock, releasing that lock, for example,
+ using spin_unlock(). With respect to a non-lock shared variable,
+ a special operation that includes a store and which orders that
+ store after earlier memory references that ran on that same CPU.
+ An example special release store is smp_store_release(), but
+ atomic_set_release() and atomic_cmpxchg_release() also include
+ release stores.
+
+ See also "Acquire" and "Relaxed".
+
+Unmarked Access: An access to a variable that uses normal C-language
+ syntax, for example, "a = b[2]";
+
+ See also "Marked Access".
diff --git a/tools/memory-model/Documentation/litmus-tests.txt b/tools/memory-model/Documentation/litmus-tests.txt
new file mode 100644
index 000000000000..acac527328a1
--- /dev/null
+++ b/tools/memory-model/Documentation/litmus-tests.txt
@@ -0,0 +1,1083 @@
+Linux-Kernel Memory Model Litmus Tests
+======================================
+
+This file describes the LKMM litmus-test format by example, describes
+some tricks and traps, and finally outlines LKMM's limitations. Earlier
+versions of this material appeared in a number of LWN articles, including:
+
+https://lwn.net/Articles/720550/
+ A formal kernel memory-ordering model (part 2)
+https://lwn.net/Articles/608550/
+ Axiomatic validation of memory barriers and atomic instructions
+https://lwn.net/Articles/470681/
+ Validating Memory Barriers and Atomic Instructions
+
+This document presents information in decreasing order of applicability,
+so that, where possible, the information that has proven more commonly
+useful is shown near the beginning.
+
+For information on installing LKMM, including the underlying "herd7"
+tool, please see tools/memory-model/README.
+
+
+Copy-Pasta
+==========
+
+As with other software, it is often better (if less macho) to adapt an
+existing litmus test than it is to create one from scratch. A number
+of litmus tests may be found in the kernel source tree:
+
+ tools/memory-model/litmus-tests/
+ Documentation/litmus-tests/
+
+Several thousand more example litmus tests are available on github
+and kernel.org:
+
+ https://github.com/paulmckrcu/litmus
+ https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/herd
+ https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/litmus
+
+The -l and -L arguments to "git grep" can be quite helpful in identifying
+existing litmus tests that are similar to the one you need. But even if
+you start with an existing litmus test, it is still helpful to have a
+good understanding of the litmus-test format.
+
+
+Examples and Format
+===================
+
+This section describes the overall format of litmus tests, starting
+with a small example of the message-passing pattern and moving on to
+more complex examples that illustrate explicit initialization and LKMM's
+minimalistic set of flow-control statements.
+
+
+Message-Passing Example
+-----------------------
+
+This section gives an overview of the format of a litmus test using an
+example based on the common message-passing use case. This use case
+appears often in the Linux kernel. For example, a flag (modeled by "y"
+below) indicates that a buffer (modeled by "x" below) is now completely
+filled in and ready for use. It would be very bad if the consumer saw the
+flag set, but, due to memory misordering, saw old values in the buffer.
+
+This example asks whether smp_store_release() and smp_load_acquire()
+suffices to avoid this bad outcome:
+
+ 1 C MP+pooncerelease+poacquireonce
+ 2
+ 3 {}
+ 4
+ 5 P0(int *x, int *y)
+ 6 {
+ 7 WRITE_ONCE(*x, 1);
+ 8 smp_store_release(y, 1);
+ 9 }
+10
+11 P1(int *x, int *y)
+12 {
+13 int r0;
+14 int r1;
+15
+16 r0 = smp_load_acquire(y);
+17 r1 = READ_ONCE(*x);
+18 }
+19
+20 exists (1:r0=1 /\ 1:r1=0)
+
+Line 1 starts with "C", which identifies this file as being in the
+LKMM C-language format (which, as we will see, is a small fragment
+of the full C language). The remainder of line 1 is the name of
+the test, which by convention is the filename with the ".litmus"
+suffix stripped. In this case, the actual test may be found in
+tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus
+in the Linux-kernel source tree.
+
+Mechanically generated litmus tests will often have an optional
+double-quoted comment string on the second line. Such strings are ignored
+when running the test. Yes, you can add your own comments to litmus
+tests, but this is a bit involved due to the use of multiple parsers.
+For now, you can use C-language comments in the C code, and these comments
+may be in either the "/* */" or the "//" style. A later section will
+cover the full litmus-test commenting story.
+
+Line 3 is the initialization section. Because the default initialization
+to zero suffices for this test, the "{}" syntax is used, which mean the
+initialization section is empty. Litmus tests requiring non-default
+initialization must have non-empty initialization sections, as in the
+example that will be presented later in this document.
+
+Lines 5-9 show the first process and lines 11-18 the second process. Each
+process corresponds to a Linux-kernel task (or kthread, workqueue, thread,
+and so on; LKMM discussions often use these terms interchangeably).
+The name of the first process is "P0" and that of the second "P1".
+You can name your processes anything you like as long as the names consist
+of a single "P" followed by a number, and as long as the numbers are
+consecutive starting with zero. This can actually be quite helpful,
+for example, a .litmus file matching "^P1(" but not matching "^P2("
+must contain a two-process litmus test.
+
+The argument list for each function are pointers to the global variables
+used by that function. Unlike normal C-language function parameters, the
+names are significant. The fact that both P0() and P1() have a formal
+parameter named "x" means that these two processes are working with the
+same global variable, also named "x". So the "int *x, int *y" on P0()
+and P1() mean that both processes are working with two shared global
+variables, "x" and "y". Global variables are always passed to processes
+by reference, hence "P0(int *x, int *y)", but *never* "P0(int x, int y)".
+
+P0() has no local variables, but P1() has two of them named "r0" and "r1".
+These names may be freely chosen, but for historical reasons stemming from
+other litmus-test formats, it is conventional to use names consisting of
+"r" followed by a number as shown here. A common bug in litmus tests
+is forgetting to add a global variable to a process's parameter list.
+This will sometimes result in an error message, but can also cause the
+intended global to instead be silently treated as an undeclared local
+variable.
+
+Each process's code is similar to Linux-kernel C, as can be seen on lines
+7-8 and 13-17. This code may use many of the Linux kernel's atomic
+operations, some of its exclusive-lock functions, and some of its RCU
+and SRCU functions. An approximate list of the currently supported
+functions may be found in the linux-kernel.def file.
+
+The P0() process does "WRITE_ONCE(*x, 1)" on line 7. Because "x" is a
+pointer in P0()'s parameter list, this does an unordered store to global
+variable "x". Line 8 does "smp_store_release(y, 1)", and because "y"
+is also in P0()'s parameter list, this does a release store to global
+variable "y".
+
+The P1() process declares two local variables on lines 13 and 14.
+Line 16 does "r0 = smp_load_acquire(y)" which does an acquire load
+from global variable "y" into local variable "r0". Line 17 does a
+"r1 = READ_ONCE(*x)", which does an unordered load from "*x" into local
+variable "r1". Both "x" and "y" are in P1()'s parameter list, so both
+reference the same global variables that are used by P0().
+
+Line 20 is the "exists" assertion expression to evaluate the final state.
+This final state is evaluated after the dust has settled: both processes
+have completed and all of their memory references and memory barriers
+have propagated to all parts of the system. The references to the local
+variables "r0" and "r1" in line 24 must be prefixed with "1:" to specify
+which process they are local to.
+
+Note that the assertion expression is written in the litmus-test
+language rather than in C. For example, single "=" is an equality
+operator rather than an assignment. The "/\" character combination means
+"and". Similarly, "\/" stands for "or". Both of these are ASCII-art
+representations of the corresponding mathematical symbols. Finally,
+"~" stands for "logical not", which is "!" in C, and not to be confused
+with the C-language "~" operator which instead stands for "bitwise not".
+Parentheses may be used to override precedence.
+
+The "exists" assertion on line 20 is satisfied if the consumer sees the
+flag ("y") set but the buffer ("x") as not yet filled in, that is, if P1()
+loaded a value from "x" that was equal to 1 but loaded a value from "y"
+that was still equal to zero.
+
+This example can be checked by running the following command, which
+absolutely must be run from the tools/memory-model directory and from
+this directory only:
+
+herd7 -conf linux-kernel.cfg litmus-tests/MP+pooncerelease+poacquireonce.litmus
+
+The output is the result of something similar to a full state-space
+search, and is as follows:
+
+ 1 Test MP+pooncerelease+poacquireonce Allowed
+ 2 States 3
+ 3 1:r0=0; 1:r1=0;
+ 4 1:r0=0; 1:r1=1;
+ 5 1:r0=1; 1:r1=1;
+ 6 No
+ 7 Witnesses
+ 8 Positive: 0 Negative: 3
+ 9 Condition exists (1:r0=1 /\ 1:r1=0)
+10 Observation MP+pooncerelease+poacquireonce Never 0 3
+11 Time MP+pooncerelease+poacquireonce 0.00
+12 Hash=579aaa14d8c35a39429b02e698241d09
+
+The most pertinent line is line 10, which contains "Never 0 3", which
+indicates that the bad result flagged by the "exists" clause never
+happens. This line might instead say "Sometimes" to indicate that the
+bad result happened in some but not all executions, or it might say
+"Always" to indicate that the bad result happened in all executions.
+(The herd7 tool doesn't judge, so it is only an LKMM convention that the
+"exists" clause indicates a bad result. To see this, invert the "exists"
+clause's condition and run the test.) The numbers ("0 3") at the end
+of this line indicate the number of end states satisfying the "exists"
+clause (0) and the number not not satisfying that clause (3).
+
+Another important part of this output is shown in lines 2-5, repeated here:
+
+ 2 States 3
+ 3 1:r0=0; 1:r1=0;
+ 4 1:r0=0; 1:r1=1;
+ 5 1:r0=1; 1:r1=1;
+
+Line 2 gives the total number of end states, and each of lines 3-5 list
+one of these states, with the first ("1:r0=0; 1:r1=0;") indicating that
+both of P1()'s loads returned the value "0". As expected, given the
+"Never" on line 10, the state flagged by the "exists" clause is not
+listed. This full list of states can be helpful when debugging a new
+litmus test.
+
+The rest of the output is not normally needed, either due to irrelevance
+or due to being redundant with the lines discussed above. However, the
+following paragraph lists them for the benefit of readers possessed of
+an insatiable curiosity. Other readers should feel free to skip ahead.
+
+Line 1 echos the test name, along with the "Test" and "Allowed". Line 6's
+"No" says that the "exists" clause was not satisfied by any execution,
+and as such it has the same meaning as line 10's "Never". Line 7 is a
+lead-in to line 8's "Positive: 0 Negative: 3", which lists the number
+of end states satisfying and not satisfying the "exists" clause, just
+like the two numbers at the end of line 10. Line 9 repeats the "exists"
+clause so that you don't have to look it up in the litmus-test file.
+The number at the end of line 11 (which begins with "Time") gives the
+time in seconds required to analyze the litmus test. Small tests such
+as this one complete in a few milliseconds, so "0.00" is quite common.
+Line 12 gives a hash of the contents for the litmus-test file, and is used
+by tooling that manages litmus tests and their output. This tooling is
+used by people modifying LKMM itself, and among other things lets such
+people know which of the several thousand relevant litmus tests were
+affected by a given change to LKMM.
+
+
+Initialization
+--------------
+
+The previous example relied on the default zero initialization for
+"x" and "y", but a similar litmus test could instead initialize them
+to some other value:
+
+ 1 C MP+pooncerelease+poacquireonce
+ 2
+ 3 {
+ 4 x=42;
+ 5 y=42;
+ 6 }
+ 7
+ 8 P0(int *x, int *y)
+ 9 {
+10 WRITE_ONCE(*x, 1);
+11 smp_store_release(y, 1);
+12 }
+13
+14 P1(int *x, int *y)
+15 {
+16 int r0;
+17 int r1;
+18
+19 r0 = smp_load_acquire(y);
+20 r1 = READ_ONCE(*x);
+21 }
+22
+23 exists (1:r0=1 /\ 1:r1=42)
+
+Lines 3-6 now initialize both "x" and "y" to the value 42. This also
+means that the "exists" clause on line 23 must change "1:r1=0" to
+"1:r1=42".
+
+Running the test gives the same overall result as before, but with the
+value 42 appearing in place of the value zero:
+
+ 1 Test MP+pooncerelease+poacquireonce Allowed
+ 2 States 3
+ 3 1:r0=1; 1:r1=1;
+ 4 1:r0=42; 1:r1=1;
+ 5 1:r0=42; 1:r1=42;
+ 6 No
+ 7 Witnesses
+ 8 Positive: 0 Negative: 3
+ 9 Condition exists (1:r0=1 /\ 1:r1=42)
+10 Observation MP+pooncerelease+poacquireonce Never 0 3
+11 Time MP+pooncerelease+poacquireonce 0.02
+12 Hash=ab9a9b7940a75a792266be279a980156
+
+It is tempting to avoid the open-coded repetitions of the value "42"
+by defining another global variable "initval=42" and replacing all
+occurrences of "42" with "initval". This will not, repeat *not*,
+initialize "x" and "y" to 42, but instead to the address of "initval"
+(try it!). See the section below on linked lists to learn more about
+why this approach to initialization can be useful.
+
+
+Control Structures
+------------------
+
+LKMM supports the C-language "if" statement, which allows modeling of
+conditional branches. In LKMM, conditional branches can affect ordering,
+but only if you are *very* careful (compilers are surprisingly able
+to optimize away conditional branches). The following example shows
+the "load buffering" (LB) use case that is used in the Linux kernel to
+synchronize between ring-buffer producers and consumers. In the example
+below, P0() is one side checking to see if an operation may proceed and
+P1() is the other side completing its update.
+
+ 1 C LB+fencembonceonce+ctrlonceonce
+ 2
+ 3 {}
+ 4
+ 5 P0(int *x, int *y)
+ 6 {
+ 7 int r0;
+ 8
+ 9 r0 = READ_ONCE(*x);
+10 if (r0)
+11 WRITE_ONCE(*y, 1);
+12 }
+13
+14 P1(int *x, int *y)
+15 {
+16 int r0;
+17
+18 r0 = READ_ONCE(*y);
+19 smp_mb();
+20 WRITE_ONCE(*x, 1);
+21 }
+22
+23 exists (0:r0=1 /\ 1:r0=1)
+
+P1()'s "if" statement on line 10 works as expected, so that line 11 is
+executed only if line 9 loads a non-zero value from "x". Because P1()'s
+write of "1" to "x" happens only after P1()'s read from "y", one would
+hope that the "exists" clause cannot be satisfied. LKMM agrees:
+
+ 1 Test LB+fencembonceonce+ctrlonceonce Allowed
+ 2 States 2
+ 3 0:r0=0; 1:r0=0;
+ 4 0:r0=1; 1:r0=0;
+ 5 No
+ 6 Witnesses
+ 7 Positive: 0 Negative: 2
+ 8 Condition exists (0:r0=1 /\ 1:r0=1)
+ 9 Observation LB+fencembonceonce+ctrlonceonce Never 0 2
+10 Time LB+fencembonceonce+ctrlonceonce 0.00
+11 Hash=e5260556f6de495fd39b556d1b831c3b
+
+However, there is no "while" statement due to the fact that full
+state-space search has some difficulty with iteration. However, there
+are tricks that may be used to handle some special cases, which are
+discussed below. In addition, loop-unrolling tricks may be applied,
+albeit sparingly.
+
+
+Tricks and Traps
+================
+
+This section covers extracting debug output from herd7, emulating
+spin loops, handling trivial linked lists, adding comments to litmus tests,
+emulating call_rcu(), and finally tricks to improve herd7 performance
+in order to better handle large litmus tests.
+
+
+Debug Output
+------------
+
+By default, the herd7 state output includes all variables mentioned
+in the "exists" clause. But sometimes debugging efforts are greatly
+aided by the values of other variables. Consider this litmus test
+(tools/memory-order/litmus-tests/SB+rfionceonce-poonceonces.litmus but
+slightly modified), which probes an obscure corner of hardware memory
+ordering:
+
+ 1 C SB+rfionceonce-poonceonces
+ 2
+ 3 {}
+ 4
+ 5 P0(int *x, int *y)
+ 6 {
+ 7 int r1;
+ 8 int r2;
+ 9
+10 WRITE_ONCE(*x, 1);
+11 r1 = READ_ONCE(*x);
+12 r2 = READ_ONCE(*y);
+13 }
+14
+15 P1(int *x, int *y)
+16 {
+17 int r3;
+18 int r4;
+19
+20 WRITE_ONCE(*y, 1);
+21 r3 = READ_ONCE(*y);
+22 r4 = READ_ONCE(*x);
+23 }
+24
+25 exists (0:r2=0 /\ 1:r4=0)
+
+The herd7 output is as follows:
+
+ 1 Test SB+rfionceonce-poonceonces Allowed
+ 2 States 4
+ 3 0:r2=0; 1:r4=0;
+ 4 0:r2=0; 1:r4=1;
+ 5 0:r2=1; 1:r4=0;
+ 6 0:r2=1; 1:r4=1;
+ 7 Ok
+ 8 Witnesses
+ 9 Positive: 1 Negative: 3
+10 Condition exists (0:r2=0 /\ 1:r4=0)
+11 Observation SB+rfionceonce-poonceonces Sometimes 1 3
+12 Time SB+rfionceonce-poonceonces 0.01
+13 Hash=c7f30fe0faebb7d565405d55b7318ada
+
+(This output indicates that CPUs are permitted to "snoop their own
+store buffers", which all of Linux's CPU families other than s390 will
+happily do. Such snooping results in disagreement among CPUs on the
+order of stores from different CPUs, which is rarely an issue.)
+
+But the herd7 output shows only the two variables mentioned in the
+"exists" clause. Someone modifying this test might wish to know the
+values of "x", "y", "0:r1", and "0:r3" as well. The "locations"
+statement on line 25 shows how to cause herd7 to display additional
+variables:
+
+ 1 C SB+rfionceonce-poonceonces
+ 2
+ 3 {}
+ 4
+ 5 P0(int *x, int *y)
+ 6 {
+ 7 int r1;
+ 8 int r2;
+ 9
+10 WRITE_ONCE(*x, 1);
+11 r1 = READ_ONCE(*x);
+12 r2 = READ_ONCE(*y);
+13 }
+14
+15 P1(int *x, int *y)
+16 {
+17 int r3;
+18 int r4;
+19
+20 WRITE_ONCE(*y, 1);
+21 r3 = READ_ONCE(*y);
+22 r4 = READ_ONCE(*x);
+23 }
+24
+25 locations [0:r1; 1:r3; x; y]
+26 exists (0:r2=0 /\ 1:r4=0)
+
+The herd7 output then displays the values of all the variables:
+
+ 1 Test SB+rfionceonce-poonceonces Allowed
+ 2 States 4
+ 3 0:r1=1; 0:r2=0; 1:r3=1; 1:r4=0; x=1; y=1;
+ 4 0:r1=1; 0:r2=0; 1:r3=1; 1:r4=1; x=1; y=1;
+ 5 0:r1=1; 0:r2=1; 1:r3=1; 1:r4=0; x=1; y=1;
+ 6 0:r1=1; 0:r2=1; 1:r3=1; 1:r4=1; x=1; y=1;
+ 7 Ok
+ 8 Witnesses
+ 9 Positive: 1 Negative: 3
+10 Condition exists (0:r2=0 /\ 1:r4=0)
+11 Observation SB+rfionceonce-poonceonces Sometimes 1 3
+12 Time SB+rfionceonce-poonceonces 0.01
+13 Hash=40de8418c4b395388f6501cafd1ed38d
+
+What if you would like to know the value of a particular global variable
+at some particular point in a given process's execution? One approach
+is to use a READ_ONCE() to load that global variable into a new local
+variable, then add that local variable to the "locations" clause.
+But be careful: In some litmus tests, adding a READ_ONCE() will change
+the outcome! For one example, please see the C-READ_ONCE.litmus and
+C-READ_ONCE-omitted.litmus tests located here:
+
+ https://github.com/paulmckrcu/litmus/blob/master/manual/kernel/
+
+
+Spin Loops
+----------
+
+The analysis carried out by herd7 explores full state space, which is
+at best of exponential time complexity. Adding processes and increasing
+the amount of code in a give process can greatly increase execution time.
+Potentially infinite loops, such as those used to wait for locks to
+become available, are clearly problematic.
+
+Fortunately, it is possible to avoid state-space explosion by specially
+modeling such loops. For example, the following litmus tests emulates
+locking using xchg_acquire(), but instead of enclosing xchg_acquire()
+in a spin loop, it instead excludes executions that fail to acquire the
+lock using a herd7 "filter" clause. Note that for exclusive locking, you
+are better off using the spin_lock() and spin_unlock() that LKMM directly
+models, if for no other reason that these are much faster. However, the
+techniques illustrated in this section can be used for other purposes,
+such as emulating reader-writer locking, which LKMM does not yet model.
+
+ 1 C C-SB+l-o-o-u+l-o-o-u-X
+ 2
+ 3 {
+ 4 }
+ 5
+ 6 P0(int *sl, int *x0, int *x1)
+ 7 {
+ 8 int r2;
+ 9 int r1;
+10
+11 r2 = xchg_acquire(sl, 1);
+12 WRITE_ONCE(*x0, 1);
+13 r1 = READ_ONCE(*x1);
+14 smp_store_release(sl, 0);
+15 }
+16
+17 P1(int *sl, int *x0, int *x1)
+18 {
+19 int r2;
+20 int r1;
+21
+22 r2 = xchg_acquire(sl, 1);
+23 WRITE_ONCE(*x1, 1);
+24 r1 = READ_ONCE(*x0);
+25 smp_store_release(sl, 0);
+26 }
+27
+28 filter (0:r2=0 /\ 1:r2=0)
+29 exists (0:r1=0 /\ 1:r1=0)
+
+This litmus test may be found here:
+
+https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/herd/C-SB+l-o-o-u+l-o-o-u-X.litmus
+
+This test uses two global variables, "x1" and "x2", and also emulates a
+single global spinlock named "sl". This spinlock is held by whichever
+process changes the value of "sl" from "0" to "1", and is released when
+that process sets "sl" back to "0". P0()'s lock acquisition is emulated
+on line 11 using xchg_acquire(), which unconditionally stores the value
+"1" to "sl" and stores either "0" or "1" to "r2", depending on whether
+the lock acquisition was successful or unsuccessful (due to "sl" already
+having the value "1"), respectively. P1() operates in a similar manner.
+
+Rather unconventionally, execution appears to proceed to the critical
+section on lines 12 and 13 in either case. Line 14 then uses an
+smp_store_release() to store zero to "sl", thus emulating lock release.
+
+The case where xchg_acquire() fails to acquire the lock is handled by
+the "filter" clause on line 28, which tells herd7 to keep only those
+executions in which both "0:r2" and "1:r2" are zero, that is to pay
+attention only to those executions in which both locks are actually
+acquired. Thus, the bogus executions that would execute the critical
+sections are discarded and any effects that they might have had are
+ignored. Note well that the "filter" clause keeps those executions
+for which its expression is satisfied, that is, for which the expression
+evaluates to true. In other words, the "filter" clause says what to
+keep, not what to discard.
+
+The result of running this test is as follows:
+
+ 1 Test C-SB+l-o-o-u+l-o-o-u-X Allowed
+ 2 States 2
+ 3 0:r1=0; 1:r1=1;
+ 4 0:r1=1; 1:r1=0;
+ 5 No
+ 6 Witnesses
+ 7 Positive: 0 Negative: 2
+ 8 Condition exists (0:r1=0 /\ 1:r1=0)
+ 9 Observation C-SB+l-o-o-u+l-o-o-u-X Never 0 2
+10 Time C-SB+l-o-o-u+l-o-o-u-X 0.03
+
+The "Never" on line 9 indicates that this use of xchg_acquire() and
+smp_store_release() really does correctly emulate locking.
+
+Why doesn't the litmus test take the simpler approach of using a spin loop
+to handle failed spinlock acquisitions, like the kernel does? The key
+insight behind this litmus test is that spin loops have no effect on the
+possible "exists"-clause outcomes of program execution in the absence
+of deadlock. In other words, given a high-quality lock-acquisition
+primitive in a deadlock-free program running on high-quality hardware,
+each lock acquisition will eventually succeed. Because herd7 already
+explores the full state space, the length of time required to actually
+acquire the lock does not matter. After all, herd7 already models all
+possible durations of the xchg_acquire() statements.
+
+Why not just add the "filter" clause to the "exists" clause, thus
+avoiding the "filter" clause entirely? This does work, but is slower.
+The reason that the "filter" clause is faster is that (in the common case)
+herd7 knows to abandon an execution as soon as the "filter" expression
+fails to be satisfied. In contrast, the "exists" clause is evaluated
+only at the end of time, thus requiring herd7 to waste time on bogus
+executions in which both critical sections proceed concurrently. In
+addition, some LKMM users like the separation of concerns provided by
+using the both the "filter" and "exists" clauses.
+
+Readers lacking a pathological interest in odd corner cases should feel
+free to skip the remainder of this section.
+
+But what if the litmus test were to temporarily set "0:r2" to a non-zero
+value? Wouldn't that cause herd7 to abandon the execution prematurely
+due to an early mismatch of the "filter" clause?
+
+Why not just try it? Line 4 of the following modified litmus test
+introduces a new global variable "x2" that is initialized to "1". Line 23
+of P1() reads that variable into "1:r2" to force an early mismatch with
+the "filter" clause. Line 24 does a known-true "if" condition to avoid
+and static analysis that herd7 might do. Finally the "exists" clause
+on line 32 is updated to a condition that is alway satisfied at the end
+of the test.
+
+ 1 C C-SB+l-o-o-u+l-o-o-u-X
+ 2
+ 3 {
+ 4 x2=1;
+ 5 }
+ 6
+ 7 P0(int *sl, int *x0, int *x1)
+ 8 {
+ 9 int r2;
+10 int r1;
+11
+12 r2 = xchg_acquire(sl, 1);
+13 WRITE_ONCE(*x0, 1);
+14 r1 = READ_ONCE(*x1);
+15 smp_store_release(sl, 0);
+16 }
+17
+18 P1(int *sl, int *x0, int *x1, int *x2)
+19 {
+20 int r2;
+21 int r1;
+22
+23 r2 = READ_ONCE(*x2);
+24 if (r2)
+25 r2 = xchg_acquire(sl, 1);
+26 WRITE_ONCE(*x1, 1);
+27 r1 = READ_ONCE(*x0);
+28 smp_store_release(sl, 0);
+29 }
+30
+31 filter (0:r2=0 /\ 1:r2=0)
+32 exists (x1=1)
+
+If the "filter" clause were to check each variable at each point in the
+execution, running this litmus test would display no executions because
+all executions would be filtered out at line 23. However, the output
+is instead as follows:
+
+ 1 Test C-SB+l-o-o-u+l-o-o-u-X Allowed
+ 2 States 1
+ 3 x1=1;
+ 4 Ok
+ 5 Witnesses
+ 6 Positive: 2 Negative: 0
+ 7 Condition exists (x1=1)
+ 8 Observation C-SB+l-o-o-u+l-o-o-u-X Always 2 0
+ 9 Time C-SB+l-o-o-u+l-o-o-u-X 0.04
+10 Hash=080bc508da7f291e122c6de76c0088e3
+
+Line 3 shows that there is one execution that did not get filtered out,
+so the "filter" clause is evaluated only on the last assignment to
+the variables that it checks. In this case, the "filter" clause is a
+disjunction, so it might be evaluated twice, once at the final (and only)
+assignment to "0:r2" and once at the final assignment to "1:r2".
+
+
+Linked Lists
+------------
+
+LKMM can handle linked lists, but only linked lists in which each node
+contains nothing except a pointer to the next node in the list. This is
+of course quite restrictive, but there is nevertheless quite a bit that
+can be done within these confines, as can be seen in the litmus test
+at tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus:
+
+ 1 C MP+onceassign+derefonce
+ 2
+ 3 {
+ 4 y=z;
+ 5 z=0;
+ 6 }
+ 7
+ 8 P0(int *x, int **y)
+ 9 {
+10 WRITE_ONCE(*x, 1);
+11 rcu_assign_pointer(*y, x);
+12 }
+13
+14 P1(int *x, int **y)
+15 {
+16 int *r0;
+17 int r1;
+18
+19 rcu_read_lock();
+20 r0 = rcu_dereference(*y);
+21 r1 = READ_ONCE(*r0);
+22 rcu_read_unlock();
+23 }
+24
+25 exists (1:r0=x /\ 1:r1=0)
+
+Line 4's "y=z" may seem odd, given that "z" has not yet been initialized.
+But "y=z" does not set the value of "y" to that of "z", but instead
+sets the value of "y" to the *address* of "z". Lines 4 and 5 therefore
+create a simple linked list, with "y" pointing to "z" and "z" having a
+NULL pointer. A much longer linked list could be created if desired,
+and circular singly linked lists can also be created and manipulated.
+
+The "exists" clause works the same way, with the "1:r0=x" comparing P1()'s
+"r0" not to the value of "x", but again to its address. This term of the
+"exists" clause therefore tests whether line 20's load from "y" saw the
+value stored by line 11, which is in fact what is required in this case.
+
+P0()'s line 10 initializes "x" to the value 1 then line 11 links to "x"
+from "y", replacing "z".
+
+P1()'s line 20 loads a pointer from "y", and line 21 dereferences that
+pointer. The RCU read-side critical section spanning lines 19-22 is just
+for show in this example. Note that the address used for line 21's load
+depends on (in this case, "is exactly the same as") the value loaded by
+line 20. This is an example of what is called an "address dependency".
+This particular address dependency extends from the load on line 20 to the
+load on line 21. Address dependencies provide a weak form of ordering.
+
+Running this test results in the following:
+
+ 1 Test MP+onceassign+derefonce Allowed
+ 2 States 2
+ 3 1:r0=x; 1:r1=1;
+ 4 1:r0=z; 1:r1=0;
+ 5 No
+ 6 Witnesses
+ 7 Positive: 0 Negative: 2
+ 8 Condition exists (1:r0=x /\ 1:r1=0)
+ 9 Observation MP+onceassign+derefonce Never 0 2
+10 Time MP+onceassign+derefonce 0.00
+11 Hash=49ef7a741563570102448a256a0c8568
+
+The only possible outcomes feature P1() loading a pointer to "z"
+(which contains zero) on the one hand and P1() loading a pointer to "x"
+(which contains the value one) on the other. This should be reassuring
+because it says that RCU readers cannot see the old preinitialization
+values when accessing a newly inserted list node. This undesirable
+scenario is flagged by the "exists" clause, and would occur if P1()
+loaded a pointer to "x", but obtained the pre-initialization value of
+zero after dereferencing that pointer.
+
+
+Comments
+--------
+
+Different portions of a litmus test are processed by different parsers,
+which has the charming effect of requiring different comment syntax in
+different portions of the litmus test. The C-syntax portions use
+C-language comments (either "/* */" or "//"), while the other portions
+use Ocaml comments "(* *)".
+
+The following litmus test illustrates the comment style corresponding
+to each syntactic unit of the test:
+
+ 1 C MP+onceassign+derefonce (* A *)
+ 2
+ 3 (* B *)
+ 4
+ 5 {
+ 6 y=z; (* C *)
+ 7 z=0;
+ 8 } // D
+ 9
+10 // E
+11
+12 P0(int *x, int **y) // F
+13 {
+14 WRITE_ONCE(*x, 1); // G
+15 rcu_assign_pointer(*y, x);
+16 }
+17
+18 // H
+19
+20 P1(int *x, int **y)
+21 {
+22 int *r0;
+23 int r1;
+24
+25 rcu_read_lock();
+26 r0 = rcu_dereference(*y);
+27 r1 = READ_ONCE(*r0);
+28 rcu_read_unlock();
+29 }
+30
+31 // I
+32
+33 exists (* J *) (1:r0=x /\ (* K *) 1:r1=0) (* L *)
+
+In short, use C-language comments in the C code and Ocaml comments in
+the rest of the litmus test.
+
+On the other hand, if you prefer C-style comments everywhere, the
+C preprocessor is your friend.
+
+
+Asynchronous RCU Grace Periods
+------------------------------
+
+The following litmus test is derived from the example show in
+Documentation/litmus-tests/rcu/RCU+sync+free.litmus, but converted to
+emulate call_rcu():
+
+ 1 C RCU+sync+free
+ 2
+ 3 {
+ 4 int x = 1;
+ 5 int *y = &x;
+ 6 int z = 1;
+ 7 }
+ 8
+ 9 P0(int *x, int *z, int **y)
+10 {
+11 int *r0;
+12 int r1;
+13
+14 rcu_read_lock();
+15 r0 = rcu_dereference(*y);
+16 r1 = READ_ONCE(*r0);
+17 rcu_read_unlock();
+18 }
+19
+20 P1(int *z, int **y, int *c)
+21 {
+22 rcu_assign_pointer(*y, z);
+23 smp_store_release(*c, 1); // Emulate call_rcu().
+24 }
+25
+26 P2(int *x, int *z, int **y, int *c)
+27 {
+28 int r0;
+29
+30 r0 = smp_load_acquire(*c); // Note call_rcu() request.
+31 synchronize_rcu(); // Wait one grace period.
+32 WRITE_ONCE(*x, 0); // Emulate the RCU callback.
+33 }
+34
+35 filter (2:r0=1) (* Reject too-early starts. *)
+36 exists (0:r0=x /\ 0:r1=0)
+
+Lines 4-6 initialize a linked list headed by "y" that initially contains
+"x". In addition, "z" is pre-initialized to prepare for P1(), which
+will replace "x" with "z" in this list.
+
+P0() on lines 9-18 enters an RCU read-side critical section, loads the
+list header "y" and dereferences it, leaving the node in "0:r0" and
+the node's value in "0:r1".
+
+P1() on lines 20-24 updates the list header to instead reference "z",
+then emulates call_rcu() by doing a release store into "c".
+
+P2() on lines 27-33 emulates the behind-the-scenes effect of doing a
+call_rcu(). Line 30 first does an acquire load from "c", then line 31
+waits for an RCU grace period to elapse, and finally line 32 emulates
+the RCU callback, which in turn emulates a call to kfree().
+
+Of course, it is possible for P2() to start too soon, so that the
+value of "2:r0" is zero rather than the required value of "1".
+The "filter" clause on line 35 handles this possibility, rejecting
+all executions in which "2:r0" is not equal to the value "1".
+
+
+Performance
+-----------
+
+LKMM's exploration of the full state-space can be extremely helpful,
+but it does not come for free. The price is exponential computational
+complexity in terms of the number of processes, the average number
+of statements in each process, and the total number of stores in the
+litmus test.
+
+So it is best to start small and then work up. Where possible, break
+your code down into small pieces each representing a core concurrency
+requirement.
+
+That said, herd7 is quite fast. On an unprepossessing x86 laptop, it
+was able to analyze the following 10-process RCU litmus test in about
+six seconds.
+
+https://github.com/paulmckrcu/litmus/blob/master/auto/C-RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R+RW-R+RW-R.litmus
+
+One way to make herd7 run faster is to use the "-speedcheck true" option.
+This option prevents herd7 from generating all possible end states,
+instead causing it to focus solely on whether or not the "exists"
+clause can be satisfied. With this option, herd7 evaluates the above
+litmus test in about 300 milliseconds, for more than an order of magnitude
+improvement in performance.
+
+Larger 16-process litmus tests that would normally consume 15 minutes
+of time complete in about 40 seconds with this option. To be fair,
+you do get an extra 65,535 states when you leave off the "-speedcheck
+true" option.
+
+https://github.com/paulmckrcu/litmus/blob/master/auto/C-RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R+RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R.litmus
+
+Nevertheless, litmus-test analysis really is of exponential complexity,
+whether with or without "-speedcheck true". Increasing by just three
+processes to a 19-process litmus test requires 2 hours and 40 minutes
+without, and about 8 minutes with "-speedcheck true". Each of these
+results represent roughly an order of magnitude slowdown compared to the
+16-process litmus test. Again, to be fair, the multi-hour run explores
+no fewer than 524,287 additional states compared to the shorter one.
+
+https://github.com/paulmckrcu/litmus/blob/master/auto/C-RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R+RW-R+RW-R+RW-R+RW-R+RW-G+RW-G+RW-G+RW-G+RW-R+RW-R+RW-R.litmus
+
+If you don't like command-line arguments, you can obtain a similar speedup
+by adding a "filter" clause with exactly the same expression as your
+"exists" clause.
+
+However, please note that seeing the full set of states can be extremely
+helpful when developing and debugging litmus tests.
+
+
+LIMITATIONS
+===========
+
+Limitations of the Linux-kernel memory model (LKMM) include:
+
+1. Compiler optimizations are not accurately modeled. Of course,
+ the use of READ_ONCE() and WRITE_ONCE() limits the compiler's
+ ability to optimize, but under some circumstances it is possible
+ for the compiler to undermine the memory model. For more
+ information, see Documentation/explanation.txt (in particular,
+ the "THE PROGRAM ORDER RELATION: po AND po-loc" and "A WARNING"
+ sections).
+
+ Note that this limitation in turn limits LKMM's ability to
+ accurately model address, control, and data dependencies.
+ For example, if the compiler can deduce the value of some variable
+ carrying a dependency, then the compiler can break that dependency
+ by substituting a constant of that value.
+
+ Conversely, LKMM will sometimes overestimate the amount of
+ reordering compilers and CPUs can carry out, leading it to miss
+ some pretty obvious cases of ordering. A simple example is:
+
+ r1 = READ_ONCE(x);
+ if (r1 == 0)
+ smp_mb();
+ WRITE_ONCE(y, 1);
+
+ The WRITE_ONCE() does not depend on the READ_ONCE(), and as a
+ result, LKMM does not claim ordering. However, even though no
+ dependency is present, the WRITE_ONCE() will not be executed before
+ the READ_ONCE(). There are two reasons for this:
+
+ The presence of the smp_mb() in one of the branches
+ prevents the compiler from moving the WRITE_ONCE()
+ up before the "if" statement, since the compiler has
+ to assume that r1 will sometimes be 0 (but see the
+ comment below);
+
+ CPUs do not execute stores before po-earlier conditional
+ branches, even in cases where the store occurs after the
+ two arms of the branch have recombined.
+
+ It is clear that it is not dangerous in the slightest for LKMM to
+ make weaker guarantees than architectures. In fact, it is
+ desirable, as it gives compilers room for making optimizations.
+ For instance, suppose that a 0 value in r1 would trigger undefined
+ behavior elsewhere. Then a clever compiler might deduce that r1
+ can never be 0 in the if condition. As a result, said clever
+ compiler might deem it safe to optimize away the smp_mb(),
+ eliminating the branch and any ordering an architecture would
+ guarantee otherwise.
+
+2. Multiple access sizes for a single variable are not supported,
+ and neither are misaligned or partially overlapping accesses.
+
+3. Exceptions and interrupts are not modeled. In some cases,
+ this limitation can be overcome by modeling the interrupt or
+ exception with an additional process.
+
+4. I/O such as MMIO or DMA is not supported.
+
+5. Self-modifying code (such as that found in the kernel's
+ alternatives mechanism, function tracer, Berkeley Packet Filter
+ JIT compiler, and module loader) is not supported.
+
+6. Complete modeling of all variants of atomic read-modify-write
+ operations, locking primitives, and RCU is not provided.
+ For example, call_rcu() and rcu_barrier() are not supported.
+ However, a substantial amount of support is provided for these
+ operations, as shown in the linux-kernel.def file.
+
+ Here are specific limitations:
+
+ a. When rcu_assign_pointer() is passed NULL, the Linux
+ kernel provides no ordering, but LKMM models this
+ case as a store release.
+
+ b. The "unless" RMW operations are not currently modeled:
+ atomic_long_add_unless(), atomic_inc_unless_negative(),
+ and atomic_dec_unless_positive(). These can be emulated
+ in litmus tests, for example, by using atomic_cmpxchg().
+
+ One exception of this limitation is atomic_add_unless(),
+ which is provided directly by herd7 (so no corresponding
+ definition in linux-kernel.def). atomic_add_unless() is
+ modeled by herd7 therefore it can be used in litmus tests.
+
+ c. The call_rcu() function is not modeled. As was shown above,
+ it can be emulated in litmus tests by adding another
+ process that invokes synchronize_rcu() and the body of the
+ callback function, with (for example) a release-acquire
+ from the site of the emulated call_rcu() to the beginning
+ of the additional process.
+
+ d. The rcu_barrier() function is not modeled. It can be
+ emulated in litmus tests emulating call_rcu() via
+ (for example) a release-acquire from the end of each
+ additional call_rcu() process to the site of the
+ emulated rcu-barrier().
+
+ e. Reader-writer locking is not modeled. It can be
+ emulated in litmus tests using atomic read-modify-write
+ operations.
+
+The fragment of the C language supported by these litmus tests is quite
+limited and in some ways non-standard:
+
+1. There is no automatic C-preprocessor pass. You can of course
+ run it manually, if you choose.
+
+2. There is no way to create functions other than the Pn() functions
+ that model the concurrent processes.
+
+3. The Pn() functions' formal parameters must be pointers to the
+ global shared variables. Nothing can be passed by value into
+ these functions.
+
+4. The only functions that can be invoked are those built directly
+ into herd7 or that are defined in the linux-kernel.def file.
+
+5. The "switch", "do", "for", "while", and "goto" C statements are
+ not supported. The "switch" statement can be emulated by the
+ "if" statement. The "do", "for", and "while" statements can
+ often be emulated by manually unrolling the loop, or perhaps by
+ enlisting the aid of the C preprocessor to minimize the resulting
+ code duplication. Some uses of "goto" can be emulated by "if",
+ and some others by unrolling.
+
+6. Although you can use a wide variety of types in litmus-test
+ variable declarations, and especially in global-variable
+ declarations, the "herd7" tool understands only int and
+ pointer types. There is no support for floating-point types,
+ enumerations, characters, strings, arrays, or structures.
+
+7. Parsing of variable declarations is very loose, with almost no
+ type checking.
+
+8. Initializers differ from their C-language counterparts.
+ For example, when an initializer contains the name of a shared
+ variable, that name denotes a pointer to that variable, not
+ the current value of that variable. For example, "int x = y"
+ is interpreted the way "int x = &y" would be in C.
+
+9. Dynamic memory allocation is not supported, although this can
+ be worked around in some cases by supplying multiple statically
+ allocated variables.
+
+Some of these limitations may be overcome in the future, but others are
+more likely to be addressed by incorporating the Linux-kernel memory model
+into other tools.
+
+Finally, please note that LKMM is subject to change as hardware, use cases,
+and compilers evolve.
diff --git a/tools/memory-model/Documentation/locking.txt b/tools/memory-model/Documentation/locking.txt
new file mode 100644
index 000000000000..65c898c64a93
--- /dev/null
+++ b/tools/memory-model/Documentation/locking.txt
@@ -0,0 +1,298 @@
+Locking
+=======
+
+Locking is well-known and the common use cases are straightforward: Any
+CPU holding a given lock sees any changes previously seen or made by any
+CPU before it previously released that same lock. This last sentence
+is the only part of this document that most developers will need to read.
+
+However, developers who would like to also access lock-protected shared
+variables outside of their corresponding locks should continue reading.
+
+
+Locking and Prior Accesses
+--------------------------
+
+The basic rule of locking is worth repeating:
+
+ Any CPU holding a given lock sees any changes previously seen
+ or made by any CPU before it previously released that same lock.
+
+Note that this statement is a bit stronger than "Any CPU holding a
+given lock sees all changes made by any CPU during the time that CPU was
+previously holding this same lock". For example, consider the following
+pair of code fragments:
+
+ /* See MP+polocks.litmus. */
+ void CPU0(void)
+ {
+ WRITE_ONCE(x, 1);
+ spin_lock(&mylock);
+ WRITE_ONCE(y, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU1(void)
+ {
+ spin_lock(&mylock);
+ r0 = READ_ONCE(y);
+ spin_unlock(&mylock);
+ r1 = READ_ONCE(x);
+ }
+
+The basic rule guarantees that if CPU0() acquires mylock before CPU1(),
+then both r0 and r1 must be set to the value 1. This also has the
+consequence that if the final value of r0 is equal to 1, then the final
+value of r1 must also be equal to 1. In contrast, the weaker rule would
+say nothing about the final value of r1.
+
+
+Locking and Subsequent Accesses
+-------------------------------
+
+The converse to the basic rule also holds: Any CPU holding a given
+lock will not see any changes that will be made by any CPU after it
+subsequently acquires this same lock. This converse statement is
+illustrated by the following litmus test:
+
+ /* See MP+porevlocks.litmus. */
+ void CPU0(void)
+ {
+ r0 = READ_ONCE(y);
+ spin_lock(&mylock);
+ r1 = READ_ONCE(x);
+ spin_unlock(&mylock);
+ }
+
+ void CPU1(void)
+ {
+ spin_lock(&mylock);
+ WRITE_ONCE(x, 1);
+ spin_unlock(&mylock);
+ WRITE_ONCE(y, 1);
+ }
+
+This converse to the basic rule guarantees that if CPU0() acquires
+mylock before CPU1(), then both r0 and r1 must be set to the value 0.
+This also has the consequence that if the final value of r1 is equal
+to 0, then the final value of r0 must also be equal to 0. In contrast,
+the weaker rule would say nothing about the final value of r0.
+
+These examples show only a single pair of CPUs, but the effects of the
+locking basic rule extend across multiple acquisitions of a given lock
+across multiple CPUs.
+
+
+Double-Checked Locking
+----------------------
+
+It is well known that more than just a lock is required to make
+double-checked locking work correctly, This litmus test illustrates
+one incorrect approach:
+
+ /* See Documentation/litmus-tests/locking/DCL-broken.litmus. */
+ void CPU0(void)
+ {
+ r0 = READ_ONCE(flag);
+ if (r0 == 0) {
+ spin_lock(&lck);
+ r1 = READ_ONCE(flag);
+ if (r1 == 0) {
+ WRITE_ONCE(data, 1);
+ WRITE_ONCE(flag, 1);
+ }
+ spin_unlock(&lck);
+ }
+ r2 = READ_ONCE(data);
+ }
+ /* CPU1() is the exactly the same as CPU0(). */
+
+There are two problems. First, there is no ordering between the first
+READ_ONCE() of "flag" and the READ_ONCE() of "data". Second, there is
+no ordering between the two WRITE_ONCE() calls. It should therefore be
+no surprise that "r2" can be zero, and a quick herd7 run confirms this.
+
+One way to fix this is to use smp_load_acquire() and smp_store_release()
+as shown in this corrected version:
+
+ /* See Documentation/litmus-tests/locking/DCL-fixed.litmus. */
+ void CPU0(void)
+ {
+ r0 = smp_load_acquire(&flag);
+ if (r0 == 0) {
+ spin_lock(&lck);
+ r1 = READ_ONCE(flag);
+ if (r1 == 0) {
+ WRITE_ONCE(data, 1);
+ smp_store_release(&flag, 1);
+ }
+ spin_unlock(&lck);
+ }
+ r2 = READ_ONCE(data);
+ }
+ /* CPU1() is the exactly the same as CPU0(). */
+
+The smp_load_acquire() guarantees that its load from "flags" will
+be ordered before the READ_ONCE() from data, thus solving the first
+problem. The smp_store_release() guarantees that its store will be
+ordered after the WRITE_ONCE() to "data", solving the second problem.
+The smp_store_release() pairs with the smp_load_acquire(), thus ensuring
+that the ordering provided by each actually takes effect. Again, a
+quick herd7 run confirms this.
+
+In short, if you access a lock-protected variable without holding the
+corresponding lock, you will need to provide additional ordering, in
+this case, via the smp_load_acquire() and the smp_store_release().
+
+
+Ordering Provided by a Lock to CPUs Not Holding That Lock
+---------------------------------------------------------
+
+It is not necessarily the case that accesses ordered by locking will be
+seen as ordered by CPUs not holding that lock. Consider this example:
+
+ /* See Z6.0+pooncelock+pooncelock+pombonce.litmus. */
+ void CPU0(void)
+ {
+ spin_lock(&mylock);
+ WRITE_ONCE(x, 1);
+ WRITE_ONCE(y, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU1(void)
+ {
+ spin_lock(&mylock);
+ r0 = READ_ONCE(y);
+ WRITE_ONCE(z, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU2(void)
+ {
+ WRITE_ONCE(z, 2);
+ smp_mb();
+ r1 = READ_ONCE(x);
+ }
+
+Counter-intuitive though it might be, it is quite possible to have
+the final value of r0 be 1, the final value of z be 2, and the final
+value of r1 be 0. The reason for this surprising outcome is that CPU2()
+never acquired the lock, and thus did not fully benefit from the lock's
+ordering properties.
+
+Ordering can be extended to CPUs not holding the lock by careful use
+of smp_mb__after_spinlock():
+
+ /* See Z6.0+pooncelock+poonceLock+pombonce.litmus. */
+ void CPU0(void)
+ {
+ spin_lock(&mylock);
+ WRITE_ONCE(x, 1);
+ WRITE_ONCE(y, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU1(void)
+ {
+ spin_lock(&mylock);
+ smp_mb__after_spinlock();
+ r0 = READ_ONCE(y);
+ WRITE_ONCE(z, 1);
+ spin_unlock(&mylock);
+ }
+
+ void CPU2(void)
+ {
+ WRITE_ONCE(z, 2);
+ smp_mb();
+ r1 = READ_ONCE(x);
+ }
+
+This addition of smp_mb__after_spinlock() strengthens the lock
+acquisition sufficiently to rule out the counter-intuitive outcome.
+In other words, the addition of the smp_mb__after_spinlock() prohibits
+the counter-intuitive result where the final value of r0 is 1, the final
+value of z is 2, and the final value of r1 is 0.
+
+
+No Roach-Motel Locking!
+-----------------------
+
+This example requires familiarity with the herd7 "filter" clause, so
+please read up on that topic in litmus-tests.txt.
+
+It is tempting to allow memory-reference instructions to be pulled
+into a critical section, but this cannot be allowed in the general case.
+For example, consider a spin loop preceding a lock-based critical section.
+Now, herd7 does not model spin loops, but we can emulate one with two
+loads, with a "filter" clause to constrain the first to return the
+initial value and the second to return the updated value, as shown below:
+
+ /* See Documentation/litmus-tests/locking/RM-fixed.litmus. */
+ void CPU0(void)
+ {
+ spin_lock(&lck);
+ r2 = atomic_inc_return(&y);
+ WRITE_ONCE(x, 1);
+ spin_unlock(&lck);
+ }
+
+ void CPU1(void)
+ {
+ r0 = READ_ONCE(x);
+ r1 = READ_ONCE(x);
+ spin_lock(&lck);
+ r2 = atomic_inc_return(&y);
+ spin_unlock(&lck);
+ }
+
+ filter (1:r0=0 /\ 1:r1=1)
+ exists (1:r2=1)
+
+The variable "x" is the control variable for the emulated spin loop.
+CPU0() sets it to "1" while holding the lock, and CPU1() emulates the
+spin loop by reading it twice, first into "1:r0" (which should get the
+initial value "0") and then into "1:r1" (which should get the updated
+value "1").
+
+The "filter" clause takes this into account, constraining "1:r0" to
+equal "0" and "1:r1" to equal 1.
+
+Then the "exists" clause checks to see if CPU1() acquired its lock first,
+which should not happen given the filter clause because CPU0() updates
+"x" while holding the lock. And herd7 confirms this.
+
+But suppose that the compiler was permitted to reorder the spin loop
+into CPU1()'s critical section, like this:
+
+ /* See Documentation/litmus-tests/locking/RM-broken.litmus. */
+ void CPU0(void)
+ {
+ int r2;
+
+ spin_lock(&lck);
+ r2 = atomic_inc_return(&y);
+ WRITE_ONCE(x, 1);
+ spin_unlock(&lck);
+ }
+
+ void CPU1(void)
+ {
+ spin_lock(&lck);
+ r0 = READ_ONCE(x);
+ r1 = READ_ONCE(x);
+ r2 = atomic_inc_return(&y);
+ spin_unlock(&lck);
+ }
+
+ filter (1:r0=0 /\ 1:r1=1)
+ exists (1:r2=1)
+
+If "1:r0" is equal to "0", "1:r1" can never equal "1" because CPU0()
+cannot update "x" while CPU1() holds the lock. And herd7 confirms this,
+showing zero executions matching the "filter" criteria.
+
+And this is why Linux-kernel lock and unlock primitives must prevent
+code from entering critical sections. It is not sufficient to only
+prevent code from leaving them.
diff --git a/tools/memory-model/Documentation/ordering.txt b/tools/memory-model/Documentation/ordering.txt
new file mode 100644
index 000000000000..9b0949d3f5ec
--- /dev/null
+++ b/tools/memory-model/Documentation/ordering.txt
@@ -0,0 +1,556 @@
+This document gives an overview of the categories of memory-ordering
+operations provided by the Linux-kernel memory model (LKMM).
+
+
+Categories of Ordering
+======================
+
+This section lists LKMM's three top-level categories of memory-ordering
+operations in decreasing order of strength:
+
+1. Barriers (also known as "fences"). A barrier orders some or
+ all of the CPU's prior operations against some or all of its
+ subsequent operations.
+
+2. Ordered memory accesses. These operations order themselves
+ against some or all of the CPU's prior accesses or some or all
+ of the CPU's subsequent accesses, depending on the subcategory
+ of the operation.
+
+3. Unordered accesses, as the name indicates, have no ordering
+ properties except to the extent that they interact with an
+ operation in the previous categories. This being the real world,
+ some of these "unordered" operations provide limited ordering
+ in some special situations.
+
+Each of the above categories is described in more detail by one of the
+following sections.
+
+
+Barriers
+========
+
+Each of the following categories of barriers is described in its own
+subsection below:
+
+a. Full memory barriers.
+
+b. Read-modify-write (RMW) ordering augmentation barriers.
+
+c. Write memory barrier.
+
+d. Read memory barrier.
+
+e. Compiler barrier.
+
+Note well that many of these primitives generate absolutely no code
+in kernels built with CONFIG_SMP=n. Therefore, if you are writing
+a device driver, which must correctly order accesses to a physical
+device even in kernels built with CONFIG_SMP=n, please use the
+ordering primitives provided for that purpose. For example, instead of
+smp_mb(), use mb(). See the "Linux Kernel Device Drivers" book or the
+https://lwn.net/Articles/698014/ article for more information.
+
+
+Full Memory Barriers
+--------------------
+
+The Linux-kernel primitives that provide full ordering include:
+
+o The smp_mb() full memory barrier.
+
+o Value-returning RMW atomic operations whose names do not end in
+ _acquire, _release, or _relaxed.
+
+o RCU's grace-period primitives.
+
+First, the smp_mb() full memory barrier orders all of the CPU's prior
+accesses against all subsequent accesses from the viewpoint of all CPUs.
+In other words, all CPUs will agree that any earlier action taken
+by that CPU happened before any later action taken by that same CPU.
+For example, consider the following:
+
+ WRITE_ONCE(x, 1);
+ smp_mb(); // Order store to x before load from y.
+ r1 = READ_ONCE(y);
+
+All CPUs will agree that the store to "x" happened before the load
+from "y", as indicated by the comment. And yes, please comment your
+memory-ordering primitives. It is surprisingly hard to remember their
+purpose after even a few months.
+
+Second, some RMW atomic operations provide full ordering. These
+operations include value-returning RMW atomic operations (that is, those
+with non-void return types) whose names do not end in _acquire, _release,
+or _relaxed. Examples include atomic_add_return(), atomic_dec_and_test(),
+cmpxchg(), and xchg(). Note that conditional RMW atomic operations such
+as cmpxchg() are only guaranteed to provide ordering when they succeed.
+When RMW atomic operations provide full ordering, they partition the
+CPU's accesses into three groups:
+
+1. All code that executed prior to the RMW atomic operation.
+
+2. The RMW atomic operation itself.
+
+3. All code that executed after the RMW atomic operation.
+
+All CPUs will agree that any operation in a given partition happened
+before any operation in a higher-numbered partition.
+
+In contrast, non-value-returning RMW atomic operations (that is, those
+with void return types) do not guarantee any ordering whatsoever. Nor do
+value-returning RMW atomic operations whose names end in _relaxed.
+Examples of the former include atomic_inc() and atomic_dec(),
+while examples of the latter include atomic_cmpxchg_relaxed() and
+atomic_xchg_relaxed(). Similarly, value-returning non-RMW atomic
+operations such as atomic_read() do not guarantee full ordering, and
+are covered in the later section on unordered operations.
+
+Value-returning RMW atomic operations whose names end in _acquire or
+_release provide limited ordering, and will be described later in this
+document.
+
+Finally, RCU's grace-period primitives provide full ordering. These
+primitives include synchronize_rcu(), synchronize_rcu_expedited(),
+synchronize_srcu() and so on. However, these primitives have orders
+of magnitude greater overhead than smp_mb(), atomic_xchg(), and so on.
+Furthermore, RCU's grace-period primitives can only be invoked in
+sleepable contexts. Therefore, RCU's grace-period primitives are
+typically instead used to provide ordering against RCU read-side critical
+sections, as documented in their comment headers. But of course if you
+need a synchronize_rcu() to interact with readers, it costs you nothing
+to also rely on its additional full-memory-barrier semantics. Just please
+carefully comment this, otherwise your future self will hate you.
+
+
+RMW Ordering Augmentation Barriers
+----------------------------------
+
+As noted in the previous section, non-value-returning RMW operations
+such as atomic_inc() and atomic_dec() guarantee no ordering whatsoever.
+Nevertheless, a number of popular CPU families, including x86, provide
+full ordering for these primitives. One way to obtain full ordering on
+all architectures is to add a call to smp_mb():
+
+ WRITE_ONCE(x, 1);
+ atomic_inc(&my_counter);
+ smp_mb(); // Inefficient on x86!!!
+ r1 = READ_ONCE(y);
+
+This works, but the added smp_mb() adds needless overhead for
+x86, on which atomic_inc() provides full ordering all by itself.
+The smp_mb__after_atomic() primitive can be used instead:
+
+ WRITE_ONCE(x, 1);
+ atomic_inc(&my_counter);
+ smp_mb__after_atomic(); // Order store to x before load from y.
+ r1 = READ_ONCE(y);
+
+The smp_mb__after_atomic() primitive emits code only on CPUs whose
+atomic_inc() implementations do not guarantee full ordering, thus
+incurring no unnecessary overhead on x86. There are a number of
+variations on the smp_mb__*() theme:
+
+o smp_mb__before_atomic(), which provides full ordering prior
+ to an unordered RMW atomic operation.
+
+o smp_mb__after_atomic(), which, as shown above, provides full
+ ordering subsequent to an unordered RMW atomic operation.
+
+o smp_mb__after_spinlock(), which provides full ordering subsequent
+ to a successful spinlock acquisition. Note that spin_lock() is
+ always successful but spin_trylock() might not be.
+
+o smp_mb__after_srcu_read_unlock(), which provides full ordering
+ subsequent to an srcu_read_unlock().
+
+It is bad practice to place code between the smp__*() primitive and the
+operation whose ordering that it is augmenting. The reason is that the
+ordering of this intervening code will differ from one CPU architecture
+to another.
+
+
+Write Memory Barrier
+--------------------
+
+The Linux kernel's write memory barrier is smp_wmb(). If a CPU executes
+the following code:
+
+ WRITE_ONCE(x, 1);
+ smp_wmb();
+ WRITE_ONCE(y, 1);
+
+Then any given CPU will see the write to "x" has having happened before
+the write to "y". However, you are usually better off using a release
+store, as described in the "Release Operations" section below.
+
+Note that smp_wmb() might fail to provide ordering for unmarked C-language
+stores because profile-driven optimization could determine that the
+value being overwritten is almost always equal to the new value. Such a
+compiler might then reasonably decide to transform "x = 1" and "y = 1"
+as follows:
+
+ if (x != 1)
+ x = 1;
+ smp_wmb(); // BUG: does not order the reads!!!
+ if (y != 1)
+ y = 1;
+
+Therefore, if you need to use smp_wmb() with unmarked C-language writes,
+you will need to make sure that none of the compilers used to build
+the Linux kernel carry out this sort of transformation, both now and in
+the future.
+
+
+Read Memory Barrier
+-------------------
+
+The Linux kernel's read memory barrier is smp_rmb(). If a CPU executes
+the following code:
+
+ r0 = READ_ONCE(y);
+ smp_rmb();
+ r1 = READ_ONCE(x);
+
+Then any given CPU will see the read from "y" as having preceded the read from
+"x". However, you are usually better off using an acquire load, as described
+in the "Acquire Operations" section below.
+
+Compiler Barrier
+----------------
+
+The Linux kernel's compiler barrier is barrier(). This primitive
+prohibits compiler code-motion optimizations that might move memory
+references across the point in the code containing the barrier(), but
+does not constrain hardware memory ordering. For example, this can be
+used to prevent to compiler from moving code across an infinite loop:
+
+ WRITE_ONCE(x, 1);
+ while (dontstop)
+ barrier();
+ r1 = READ_ONCE(y);
+
+Without the barrier(), the compiler would be within its rights to move the
+WRITE_ONCE() to follow the loop. This code motion could be problematic
+in the case where an interrupt handler terminates the loop. Another way
+to handle this is to use READ_ONCE() for the load of "dontstop".
+
+Note that the barriers discussed previously use barrier() or its low-level
+equivalent in their implementations.
+
+
+Ordered Memory Accesses
+=======================
+
+The Linux kernel provides a wide variety of ordered memory accesses:
+
+a. Release operations.
+
+b. Acquire operations.
+
+c. RCU read-side ordering.
+
+d. Control dependencies.
+
+Each of the above categories has its own section below.
+
+
+Release Operations
+------------------
+
+Release operations include smp_store_release(), atomic_set_release(),
+rcu_assign_pointer(), and value-returning RMW operations whose names
+end in _release. These operations order their own store against all
+of the CPU's prior memory accesses. Release operations often provide
+improved readability and performance compared to explicit barriers.
+For example, use of smp_store_release() saves a line compared to the
+smp_wmb() example above:
+
+ WRITE_ONCE(x, 1);
+ smp_store_release(&y, 1);
+
+More important, smp_store_release() makes it easier to connect up the
+different pieces of the concurrent algorithm. The variable stored to
+by the smp_store_release(), in this case "y", will normally be used in
+an acquire operation in other parts of the concurrent algorithm.
+
+To see the performance advantages, suppose that the above example read
+from "x" instead of writing to it. Then an smp_wmb() could not guarantee
+ordering, and an smp_mb() would be needed instead:
+
+ r1 = READ_ONCE(x);
+ smp_mb();
+ WRITE_ONCE(y, 1);
+
+But smp_mb() often incurs much higher overhead than does
+smp_store_release(), which still provides the needed ordering of "x"
+against "y". On x86, the version using smp_store_release() might compile
+to a simple load instruction followed by a simple store instruction.
+In contrast, the smp_mb() compiles to an expensive instruction that
+provides the needed ordering.
+
+There is a wide variety of release operations:
+
+o Store operations, including not only the aforementioned
+ smp_store_release(), but also atomic_set_release(), and
+ atomic_long_set_release().
+
+o RCU's rcu_assign_pointer() operation. This is the same as
+ smp_store_release() except that: (1) It takes the pointer to
+ be assigned to instead of a pointer to that pointer, (2) It
+ is intended to be used in conjunction with rcu_dereference()
+ and similar rather than smp_load_acquire(), and (3) It checks
+ for an RCU-protected pointer in "sparse" runs.
+
+o Value-returning RMW operations whose names end in _release,
+ such as atomic_fetch_add_release() and cmpxchg_release().
+ Note that release ordering is guaranteed only against the
+ memory-store portion of the RMW operation, and not against the
+ memory-load portion. Note also that conditional operations such
+ as cmpxchg_release() are only guaranteed to provide ordering
+ when they succeed.
+
+As mentioned earlier, release operations are often paired with acquire
+operations, which are the subject of the next section.
+
+
+Acquire Operations
+------------------
+
+Acquire operations include smp_load_acquire(), atomic_read_acquire(),
+and value-returning RMW operations whose names end in _acquire. These
+operations order their own load against all of the CPU's subsequent
+memory accesses. Acquire operations often provide improved performance
+and readability compared to explicit barriers. For example, use of
+smp_load_acquire() saves a line compared to the smp_rmb() example above:
+
+ r0 = smp_load_acquire(&y);
+ r1 = READ_ONCE(x);
+
+As with smp_store_release(), this also makes it easier to connect
+the different pieces of the concurrent algorithm by looking for the
+smp_store_release() that stores to "y". In addition, smp_load_acquire()
+improves upon smp_rmb() by ordering against subsequent stores as well
+as against subsequent loads.
+
+There are a couple of categories of acquire operations:
+
+o Load operations, including not only the aforementioned
+ smp_load_acquire(), but also atomic_read_acquire(), and
+ atomic64_read_acquire().
+
+o Value-returning RMW operations whose names end in _acquire,
+ such as atomic_xchg_acquire() and atomic_cmpxchg_acquire().
+ Note that acquire ordering is guaranteed only against the
+ memory-load portion of the RMW operation, and not against the
+ memory-store portion. Note also that conditional operations
+ such as atomic_cmpxchg_acquire() are only guaranteed to provide
+ ordering when they succeed.
+
+Symmetry being what it is, acquire operations are often paired with the
+release operations covered earlier. For example, consider the following
+example, where task0() and task1() execute concurrently:
+
+ void task0(void)
+ {
+ WRITE_ONCE(x, 1);
+ smp_store_release(&y, 1);
+ }
+
+ void task1(void)
+ {
+ r0 = smp_load_acquire(&y);
+ r1 = READ_ONCE(x);
+ }
+
+If "x" and "y" are both initially zero, then either r0's final value
+will be zero or r1's final value will be one, thus providing the required
+ordering.
+
+
+RCU Read-Side Ordering
+----------------------
+
+This category includes read-side markers such as rcu_read_lock()
+and rcu_read_unlock() as well as pointer-traversal primitives such as
+rcu_dereference() and srcu_dereference().
+
+Compared to locking primitives and RMW atomic operations, markers
+for RCU read-side critical sections incur very low overhead because
+they interact only with the corresponding grace-period primitives.
+For example, the rcu_read_lock() and rcu_read_unlock() markers interact
+with synchronize_rcu(), synchronize_rcu_expedited(), and call_rcu().
+The way this works is that if a given call to synchronize_rcu() cannot
+prove that it started before a given call to rcu_read_lock(), then
+that synchronize_rcu() must block until the matching rcu_read_unlock()
+is reached. For more information, please see the synchronize_rcu()
+docbook header comment and the material in Documentation/RCU.
+
+RCU's pointer-traversal primitives, including rcu_dereference() and
+srcu_dereference(), order their load (which must be a pointer) against any
+of the CPU's subsequent memory accesses whose address has been calculated
+from the value loaded. There is said to be an *address dependency*
+from the value returned by the rcu_dereference() or srcu_dereference()
+to that subsequent memory access.
+
+A call to rcu_dereference() for a given RCU-protected pointer is
+usually paired with a call to a call to rcu_assign_pointer() for that
+same pointer in much the same way that a call to smp_load_acquire() is
+paired with a call to smp_store_release(). Calls to rcu_dereference()
+and rcu_assign_pointer are often buried in other APIs, for example,
+the RCU list API members defined in include/linux/rculist.h. For more
+information, please see the docbook headers in that file, the most
+recent LWN article on the RCU API (https://lwn.net/Articles/777036/),
+and of course the material in Documentation/RCU.
+
+If the pointer value is manipulated between the rcu_dereference()
+that returned it and a later dereference(), please read
+Documentation/RCU/rcu_dereference.rst. It can also be quite helpful to
+review uses in the Linux kernel.
+
+
+Control Dependencies
+--------------------
+
+A control dependency extends from a marked load (READ_ONCE() or stronger)
+through an "if" condition to a marked store (WRITE_ONCE() or stronger)
+that is executed only by one of the legs of that "if" statement.
+Control dependencies are so named because they are mediated by
+control-flow instructions such as comparisons and conditional branches.
+
+In short, you can use a control dependency to enforce ordering between
+an READ_ONCE() and a WRITE_ONCE() when there is an "if" condition
+between them. The canonical example is as follows:
+
+ q = READ_ONCE(a);
+ if (q)
+ WRITE_ONCE(b, 1);
+
+In this case, all CPUs would see the read from "a" as happening before
+the write to "b".
+
+However, control dependencies are easily destroyed by compiler
+optimizations, so any use of control dependencies must take into account
+all of the compilers used to build the Linux kernel. Please see the
+"control-dependencies.txt" file for more information.
+
+
+Unordered Accesses
+==================
+
+Each of these two categories of unordered accesses has a section below:
+
+a. Unordered marked operations.
+
+b. Unmarked C-language accesses.
+
+
+Unordered Marked Operations
+---------------------------
+
+Unordered operations to different variables are just that, unordered.
+However, if a group of CPUs apply these operations to a single variable,
+all the CPUs will agree on the operation order. Of course, the ordering
+of unordered marked accesses can also be constrained using the mechanisms
+described earlier in this document.
+
+These operations come in three categories:
+
+o Marked writes, such as WRITE_ONCE() and atomic_set(). These
+ primitives required the compiler to emit the corresponding store
+ instructions in the expected execution order, thus suppressing
+ a number of destructive optimizations. However, they provide no
+ hardware ordering guarantees, and in fact many CPUs will happily
+ reorder marked writes with each other or with other unordered
+ operations, unless these operations are to the same variable.
+
+o Marked reads, such as READ_ONCE() and atomic_read(). These
+ primitives required the compiler to emit the corresponding load
+ instructions in the expected execution order, thus suppressing
+ a number of destructive optimizations. However, they provide no
+ hardware ordering guarantees, and in fact many CPUs will happily
+ reorder marked reads with each other or with other unordered
+ operations, unless these operations are to the same variable.
+
+o Unordered RMW atomic operations. These are non-value-returning
+ RMW atomic operations whose names do not end in _acquire or
+ _release, and also value-returning RMW operations whose names
+ end in _relaxed. Examples include atomic_add(), atomic_or(),
+ and atomic64_fetch_xor_relaxed(). These operations do carry
+ out the specified RMW operation atomically, for example, five
+ concurrent atomic_inc() operations applied to a given variable
+ will reliably increase the value of that variable by five.
+ However, many CPUs will happily reorder these operations with
+ each other or with other unordered operations.
+
+ This category of operations can be efficiently ordered using
+ smp_mb__before_atomic() and smp_mb__after_atomic(), as was
+ discussed in the "RMW Ordering Augmentation Barriers" section.
+
+In short, these operations can be freely reordered unless they are all
+operating on a single variable or unless they are constrained by one of
+the operations called out earlier in this document.
+
+
+Unmarked C-Language Accesses
+----------------------------
+
+Unmarked C-language accesses are normal variable accesses to normal
+variables, that is, to variables that are not "volatile" and are not
+C11 atomic variables. These operations provide no ordering guarantees,
+and further do not guarantee "atomic" access. For example, the compiler
+might (and sometimes does) split a plain C-language store into multiple
+smaller stores. A load from that same variable running on some other
+CPU while such a store is executing might see a value that is a mashup
+of the old value and the new value.
+
+Unmarked C-language accesses are unordered, and are also subject to
+any number of compiler optimizations, many of which can break your
+concurrent code. It is possible to used unmarked C-language accesses for
+shared variables that are subject to concurrent access, but great care
+is required on an ongoing basis. The compiler-constraining barrier()
+primitive can be helpful, as can the various ordering primitives discussed
+in this document. It nevertheless bears repeating that use of unmarked
+C-language accesses requires careful attention to not just your code,
+but to all the compilers that might be used to build it. Such compilers
+might replace a series of loads with a single load, and might replace
+a series of stores with a single store. Some compilers will even split
+a single store into multiple smaller stores.
+
+But there are some ways of using unmarked C-language accesses for shared
+variables without such worries:
+
+o Guard all accesses to a given variable by a particular lock,
+ so that there are never concurrent conflicting accesses to
+ that variable. (There are "conflicting accesses" when
+ (1) at least one of the concurrent accesses to a variable is an
+ unmarked C-language access and (2) when at least one of those
+ accesses is a write, whether marked or not.)
+
+o As above, but using other synchronization primitives such
+ as reader-writer locks or sequence locks.
+
+o Use locking or other means to ensure that all concurrent accesses
+ to a given variable are reads.
+
+o Restrict use of a given variable to statistics or heuristics
+ where the occasional bogus value can be tolerated.
+
+o Declare the accessed variables as C11 atomics.
+ https://lwn.net/Articles/691128/
+
+o Declare the accessed variables as "volatile".
+
+If you need to live more dangerously, please do take the time to
+understand the compilers. One place to start is these two LWN
+articles:
+
+Who's afraid of a big bad optimizing compiler?
+ https://lwn.net/Articles/793253
+Calibrating your fear of big bad optimizing compilers
+ https://lwn.net/Articles/799218
+
+Used properly, unmarked C-language accesses can reduce overhead on
+fastpaths. However, the price is great care and continual attention
+to your compiler as new versions come out and as new optimizations
+are enabled.
diff --git a/tools/memory-model/Documentation/recipes.txt b/tools/memory-model/Documentation/recipes.txt
index 7fe8d7aa3029..03f58b11c252 100644
--- a/tools/memory-model/Documentation/recipes.txt
+++ b/tools/memory-model/Documentation/recipes.txt
@@ -1,7 +1,7 @@
This document provides "recipes", that is, litmus tests for commonly
occurring situations, as well as a few that illustrate subtly broken but
attractive nuisances. Many of these recipes include example code from
-v4.13 of the Linux kernel.
+v5.7 of the Linux kernel.
The first section covers simple special cases, the second section
takes off the training wheels to cover more involved examples,
@@ -126,7 +126,7 @@ However, it is not necessarily the case that accesses ordered by
locking will be seen as ordered by CPUs not holding that lock.
Consider this example:
- /* See Z6.0+pooncerelease+poacquirerelease+fencembonceonce.litmus. */
+ /* See Z6.0+pooncelock+pooncelock+pombonce.litmus. */
void CPU0(void)
{
spin_lock(&mylock);
@@ -278,7 +278,7 @@ is present if the value loaded determines the address of a later access
first place (control dependency). Note that the term "data dependency"
is sometimes casually used to cover both address and data dependencies.
-In lib/prime_numbers.c, the expand_to_next_prime() function invokes
+In lib/math/prime_numbers.c, the expand_to_next_prime() function invokes
rcu_assign_pointer(), and the next_prime_number() function invokes
rcu_dereference(). This combination mediates access to a bit vector
that is expanded as additional primes are needed.
diff --git a/tools/memory-model/Documentation/references.txt b/tools/memory-model/Documentation/references.txt
index b177f3e4a614..c5fdfd19df24 100644
--- a/tools/memory-model/Documentation/references.txt
+++ b/tools/memory-model/Documentation/references.txt
@@ -73,6 +73,18 @@ o Christopher Pulte, Shaked Flur, Will Deacon, Jon French,
Linux-kernel memory model
=========================
+o Jade Alglave, Will Deacon, Boqun Feng, David Howells, Daniel
+ Lustig, Luc Maranget, Paul E. McKenney, Andrea Parri, Nicholas
+ Piggin, Alan Stern, Akira Yokosawa, and Peter Zijlstra.
+ 2019. "Calibrating your fear of big bad optimizing compilers"
+ Linux Weekly News. https://lwn.net/Articles/799218/
+
+o Jade Alglave, Will Deacon, Boqun Feng, David Howells, Daniel
+ Lustig, Luc Maranget, Paul E. McKenney, Andrea Parri, Nicholas
+ Piggin, Alan Stern, Akira Yokosawa, and Peter Zijlstra.
+ 2019. "Who's afraid of a big bad optimizing compiler?"
+ Linux Weekly News. https://lwn.net/Articles/793253/
+
o Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and
Alan Stern. 2018. "Frightening small children and disconcerting
grown-ups: Concurrency in the Linux kernel". In Proceedings of
@@ -88,6 +100,11 @@ o Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and
Alan Stern. 2017. "A formal kernel memory-ordering model (part 2)"
Linux Weekly News. https://lwn.net/Articles/720550/
+o Jade Alglave, Luc Maranget, Paul E. McKenney, Andrea Parri, and
+ Alan Stern. 2017-2019. "A Formal Model of Linux-Kernel Memory
+ Ordering" (backup material for the LWN articles)
+ https://mirrors.edge.kernel.org/pub/linux/kernel/people/paulmck/LWNLinuxMM/
+
Memory-model tooling
====================
@@ -103,12 +120,12 @@ o Jade Alglave, Luc Maranget, and Michael Tautschnig. 2014. "Herding
o Jade Alglave, Patrick Cousot, and Luc Maranget. 2016. "Syntax and
semantics of the weak consistency model specification language
- cat". CoRR abs/1608.07531 (2016). http://arxiv.org/abs/1608.07531
+ cat". CoRR abs/1608.07531 (2016). https://arxiv.org/abs/1608.07531
Memory-model comparisons
========================
o Paul E. McKenney, Ulrich Weigand, Andrea Parri, and Boqun
- Feng. 2016. "Linux-Kernel Memory Model". (6 June 2016).
- http://open-std.org/JTC1/SC22/WG21/docs/papers/2016/p0124r2.html.
+ Feng. 2018. "Linux-Kernel Memory Model". (27 September 2018).
+ http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2018/p0124r6.html.
diff --git a/tools/memory-model/Documentation/simple.txt b/tools/memory-model/Documentation/simple.txt
new file mode 100644
index 000000000000..4c789ec8334f
--- /dev/null
+++ b/tools/memory-model/Documentation/simple.txt
@@ -0,0 +1,270 @@
+This document provides options for those wishing to keep their
+memory-ordering lives simple, as is necessary for those whose domain
+is complex. After all, there are bugs other than memory-ordering bugs,
+and the time spent gaining memory-ordering knowledge is not available
+for gaining domain knowledge. Furthermore Linux-kernel memory model
+(LKMM) is quite complex, with subtle differences in code often having
+dramatic effects on correctness.
+
+The options near the beginning of this list are quite simple. The idea
+is not that kernel hackers don't already know about them, but rather
+that they might need the occasional reminder.
+
+Please note that this is a generic guide, and that specific subsystems
+will often have special requirements or idioms. For example, developers
+of MMIO-based device drivers will often need to use mb(), rmb(), and
+wmb(), and therefore might find smp_mb(), smp_rmb(), and smp_wmb()
+to be more natural than smp_load_acquire() and smp_store_release().
+On the other hand, those coming in from other environments will likely
+be more familiar with these last two.
+
+
+Single-threaded code
+====================
+
+In single-threaded code, there is no reordering, at least assuming
+that your toolchain and hardware are working correctly. In addition,
+it is generally a mistake to assume your code will only run in a single
+threaded context as the kernel can enter the same code path on multiple
+CPUs at the same time. One important exception is a function that makes
+no external data references.
+
+In the general case, you will need to take explicit steps to ensure that
+your code really is executed within a single thread that does not access
+shared variables. A simple way to achieve this is to define a global lock
+that you acquire at the beginning of your code and release at the end,
+taking care to ensure that all references to your code's shared data are
+also carried out under that same lock. Because only one thread can hold
+this lock at a given time, your code will be executed single-threaded.
+This approach is called "code locking".
+
+Code locking can severely limit both performance and scalability, so it
+should be used with caution, and only on code paths that execute rarely.
+After all, a huge amount of effort was required to remove the Linux
+kernel's old "Big Kernel Lock", so let's please be very careful about
+adding new "little kernel locks".
+
+One of the advantages of locking is that, in happy contrast with the
+year 1981, almost all kernel developers are very familiar with locking.
+The Linux kernel's lockdep (CONFIG_PROVE_LOCKING=y) is very helpful with
+the formerly feared deadlock scenarios.
+
+Please use the standard locking primitives provided by the kernel rather
+than rolling your own. For one thing, the standard primitives interact
+properly with lockdep. For another thing, these primitives have been
+tuned to deal better with high contention. And for one final thing, it is
+surprisingly hard to correctly code production-quality lock acquisition
+and release functions. After all, even simple non-production-quality
+locking functions must carefully prevent both the CPU and the compiler
+from moving code in either direction across the locking function.
+
+Despite the scalability limitations of single-threaded code, RCU
+takes this approach for much of its grace-period processing and also
+for early-boot operation. The reason RCU is able to scale despite
+single-threaded grace-period processing is use of batching, where all
+updates that accumulated during one grace period are handled by the
+next one. In other words, slowing down grace-period processing makes
+it more efficient. Nor is RCU unique: Similar batching optimizations
+are used in many I/O operations.
+
+
+Packaged code
+=============
+
+Even if performance and scalability concerns prevent your code from
+being completely single-threaded, it is often possible to use library
+functions that handle the concurrency nearly or entirely on their own.
+This approach delegates any LKMM worries to the library maintainer.
+
+In the kernel, what is the "library"? Quite a bit. It includes the
+contents of the lib/ directory, much of the include/linux/ directory along
+with a lot of other heavily used APIs. But heavily used examples include
+the list macros (for example, include/linux/{,rcu}list.h), workqueues,
+smp_call_function(), and the various hash tables and search trees.
+
+
+Data locking
+============
+
+With code locking, we use single-threaded code execution to guarantee
+serialized access to the data that the code is accessing. However,
+we can also achieve this by instead associating the lock with specific
+instances of the data structures. This creates a "critical section"
+in the code execution that will execute as though it is single threaded.
+By placing all the accesses and modifications to a shared data structure
+inside a critical section, we ensure that the execution context that
+holds the lock has exclusive access to the shared data.
+
+The poster boy for this approach is the hash table, where placing a lock
+in each hash bucket allows operations on different buckets to proceed
+concurrently. This works because the buckets do not overlap with each
+other, so that an operation on one bucket does not interfere with any
+other bucket.
+
+As the number of buckets increases, data locking scales naturally.
+In particular, if the amount of data increases with the number of CPUs,
+increasing the number of buckets as the number of CPUs increase results
+in a naturally scalable data structure.
+
+
+Per-CPU processing
+==================
+
+Partitioning processing and data over CPUs allows each CPU to take
+a single-threaded approach while providing excellent performance and
+scalability. Of course, there is no free lunch: The dark side of this
+excellence is substantially increased memory footprint.
+
+In addition, it is sometimes necessary to occasionally update some global
+view of this processing and data, in which case something like locking
+must be used to protect this global view. This is the approach taken
+by the percpu_counter infrastructure. In many cases, there are already
+generic/library variants of commonly used per-cpu constructs available.
+Please use them rather than rolling your own.
+
+RCU uses DEFINE_PER_CPU*() declaration to create a number of per-CPU
+data sets. For example, each CPU does private quiescent-state processing
+within its instance of the per-CPU rcu_data structure, and then uses data
+locking to report quiescent states up the grace-period combining tree.
+
+
+Packaged primitives: Sequence locking
+=====================================
+
+Lockless programming is considered by many to be more difficult than
+lock-based programming, but there are a few lockless design patterns that
+have been built out into an API. One of these APIs is sequence locking.
+Although this APIs can be used in extremely complex ways, there are simple
+and effective ways of using it that avoid the need to pay attention to
+memory ordering.
+
+The basic keep-things-simple rule for sequence locking is "do not write
+in read-side code". Yes, you can do writes from within sequence-locking
+readers, but it won't be so simple. For example, such writes will be
+lockless and should be idempotent.
+
+For more sophisticated use cases, LKMM can guide you, including use
+cases involving combining sequence locking with other synchronization
+primitives. (LKMM does not yet know about sequence locking, so it is
+currently necessary to open-code it in your litmus tests.)
+
+Additional information may be found in include/linux/seqlock.h.
+
+Packaged primitives: RCU
+========================
+
+Another lockless design pattern that has been baked into an API
+is RCU. The Linux kernel makes sophisticated use of RCU, but the
+keep-things-simple rules for RCU are "do not write in read-side code"
+and "do not update anything that is visible to and accessed by readers",
+and "protect updates with locking".
+
+These rules are illustrated by the functions foo_update_a() and
+foo_get_a() shown in Documentation/RCU/whatisRCU.rst. Additional
+RCU usage patterns maybe found in Documentation/RCU and in the
+source code.
+
+
+Packaged primitives: Atomic operations
+======================================
+
+Back in the day, the Linux kernel had three types of atomic operations:
+
+1. Initialization and read-out, such as atomic_set() and atomic_read().
+
+2. Operations that did not return a value and provided no ordering,
+ such as atomic_inc() and atomic_dec().
+
+3. Operations that returned a value and provided full ordering, such as
+ atomic_add_return() and atomic_dec_and_test(). Note that some
+ value-returning operations provide full ordering only conditionally.
+ For example, cmpxchg() provides ordering only upon success.
+
+More recent kernels have operations that return a value but do not
+provide full ordering. These are flagged with either a _relaxed()
+suffix (providing no ordering), or an _acquire() or _release() suffix
+(providing limited ordering).
+
+Additional information may be found in these files:
+
+Documentation/atomic_t.txt
+Documentation/atomic_bitops.txt
+Documentation/core-api/refcount-vs-atomic.rst
+
+Reading code using these primitives is often also quite helpful.
+
+
+Lockless, fully ordered
+=======================
+
+When using locking, there often comes a time when it is necessary
+to access some variable or another without holding the data lock
+that serializes access to that variable.
+
+If you want to keep things simple, use the initialization and read-out
+operations from the previous section only when there are no racing
+accesses. Otherwise, use only fully ordered operations when accessing
+or modifying the variable. This approach guarantees that code prior
+to a given access to that variable will be seen by all CPUs has having
+happened before any code following any later access to that same variable.
+
+Please note that per-CPU functions are not atomic operations and
+hence they do not provide any ordering guarantees at all.
+
+If the lockless accesses are frequently executed reads that are used
+only for heuristics, or if they are frequently executed writes that
+are used only for statistics, please see the next section.
+
+
+Lockless statistics and heuristics
+==================================
+
+Unordered primitives such as atomic_read(), atomic_set(), READ_ONCE(), and
+WRITE_ONCE() can safely be used in some cases. These primitives provide
+no ordering, but they do prevent the compiler from carrying out a number
+of destructive optimizations (for which please see the next section).
+One example use for these primitives is statistics, such as per-CPU
+counters exemplified by the rt_cache_stat structure's routing-cache
+statistics counters. Another example use case is heuristics, such as
+the jiffies_till_first_fqs and jiffies_till_next_fqs kernel parameters
+controlling how often RCU scans for idle CPUs.
+
+But be careful. "Unordered" really does mean "unordered". It is all
+too easy to assume ordering, and this assumption must be avoided when
+using these primitives.
+
+
+Don't let the compiler trip you up
+==================================
+
+It can be quite tempting to use plain C-language accesses for lockless
+loads from and stores to shared variables. Although this is both
+possible and quite common in the Linux kernel, it does require a
+surprising amount of analysis, care, and knowledge about the compiler.
+Yes, some decades ago it was not unfair to consider a C compiler to be
+an assembler with added syntax and better portability, but the advent of
+sophisticated optimizing compilers mean that those days are long gone.
+Today's optimizing compilers can profoundly rewrite your code during the
+translation process, and have long been ready, willing, and able to do so.
+
+Therefore, if you really need to use C-language assignments instead of
+READ_ONCE(), WRITE_ONCE(), and so on, you will need to have a very good
+understanding of both the C standard and your compiler. Here are some
+introductory references and some tooling to start you on this noble quest:
+
+Who's afraid of a big bad optimizing compiler?
+ https://lwn.net/Articles/793253/
+Calibrating your fear of big bad optimizing compilers
+ https://lwn.net/Articles/799218/
+Concurrency bugs should fear the big bad data-race detector (part 1)
+ https://lwn.net/Articles/816850/
+Concurrency bugs should fear the big bad data-race detector (part 2)
+ https://lwn.net/Articles/816854/
+
+
+More complex use cases
+======================
+
+If the alternatives above do not do what you need, please look at the
+recipes-pairs.txt file to peel off the next layer of the memory-ordering
+onion.
diff --git a/tools/memory-model/README b/tools/memory-model/README
index fc07b52f2028..dab38904206a 100644
--- a/tools/memory-model/README
+++ b/tools/memory-model/README
@@ -28,8 +28,35 @@ downloaded separately:
See "herdtools7/INSTALL.md" for installation instructions.
Note that although these tools usually provide backwards compatibility,
-this is not absolutely guaranteed. Therefore, if a later version does
-not work, please try using the exact version called out above.
+this is not absolutely guaranteed.
+
+For example, a future version of herd7 might not work with the model
+in this release. A compatible model will likely be made available in
+a later release of Linux kernel.
+
+If you absolutely need to run the model in this particular release,
+please try using the exact version called out above.
+
+klitmus7 is independent of the model provided here. It has its own
+dependency on a target kernel release where converted code is built
+and executed. Any change in kernel APIs essential to klitmus7 will
+necessitate an upgrade of klitmus7.
+
+If you find any compatibility issues in klitmus7, please inform the
+memory model maintainers.
+
+klitmus7 Compatibility Table
+----------------------------
+
+ ============ ==========
+ target Linux herdtools7
+ ------------ ----------
+ -- 4.14 7.48 --
+ 4.15 -- 4.19 7.49 --
+ 4.20 -- 5.5 7.54 --
+ 5.6 -- 5.16 7.56 --
+ 5.17 -- 7.56.1 --
+ ============ ==========
==================
@@ -37,10 +64,32 @@ BASIC USAGE: HERD7
==================
The memory model is used, in conjunction with "herd7", to exhaustively
-explore the state space of small litmus tests.
+explore the state space of small litmus tests. Documentation describing
+the format, features, capabilities and limitations of these litmus
+tests is available in tools/memory-model/Documentation/litmus-tests.txt.
+
+Example litmus tests may be found in the Linux-kernel source tree:
+
+ tools/memory-model/litmus-tests/
+ Documentation/litmus-tests/
+
+Several thousand more example litmus tests are available here:
+
+ https://github.com/paulmckrcu/litmus
+ https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/herd
+ https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/perfbook.git/tree/CodeSamples/formal/litmus
-For example, to run SB+fencembonceonces.litmus against the memory model:
+Documentation describing litmus tests and now to use them may be found
+here:
+ tools/memory-model/Documentation/litmus-tests.txt
+
+The remainder of this section uses the SB+fencembonceonces.litmus test
+located in the tools/memory-model directory.
+
+To run SB+fencembonceonces.litmus against the memory model:
+
+ $ cd $LINUX_SOURCE_TREE/tools/memory-model
$ herd7 -conf linux-kernel.cfg litmus-tests/SB+fencembonceonces.litmus
Here is the corresponding output:
@@ -61,7 +110,11 @@ Here is the corresponding output:
The "Positive: 0 Negative: 3" and the "Never 0 3" each indicate that
this litmus test's "exists" clause can not be satisfied.
-See "herd7 -help" or "herdtools7/doc/" for more information.
+See "herd7 -help" or "herdtools7/doc/" for more information on running the
+tool itself, but please be aware that this documentation is intended for
+people who work on the memory model itself, that is, people making changes
+to the tools/memory-model/linux-kernel.* files. It is not intended for
+people focusing on writing, understanding, and running LKMM litmus tests.
=====================
@@ -98,24 +151,19 @@ that during two million trials, the state specified in this litmus
test's "exists" clause was not reached.
And, as with "herd7", please see "klitmus7 -help" or "herdtools7/doc/"
-for more information.
+for more information. And again, please be aware that this documentation
+is intended for people who work on the memory model itself, that is,
+people making changes to the tools/memory-model/linux-kernel.* files.
+It is not intended for people focusing on writing, understanding, and
+running LKMM litmus tests.
====================
DESCRIPTION OF FILES
====================
-Documentation/cheatsheet.txt
- Quick-reference guide to the Linux-kernel memory model.
-
-Documentation/explanation.txt
- Describes the memory model in detail.
-
-Documentation/recipes.txt
- Lists common memory-ordering patterns.
-
-Documentation/references.txt
- Provides background reading.
+Documentation/README
+ Guide to the other documents in the Documentation/ directory.
linux-kernel.bell
Categorizes the relevant instructions, including memory
@@ -148,6 +196,18 @@ litmus-tests
are listed in litmus-tests/README. A great deal more litmus
tests are available at https://github.com/paulmckrcu/litmus.
+ By "representative", it means the one in the litmus-tests
+ directory is:
+
+ 1) simple, the number of threads should be relatively
+ small and each thread function should be relatively
+ simple.
+ 2) orthogonal, there should be no two litmus tests
+ describing the same aspect of the memory model.
+ 3) textbook, developers can easily copy-paste-modify
+ the litmus tests to use the patterns on their own
+ code.
+
lock.cat
Provides a front-end analysis of lock acquisition and release,
for example, associating a lock acquisition with the preceding
@@ -161,112 +221,3 @@ README
This file.
scripts Various scripts, see scripts/README.
-
-
-===========
-LIMITATIONS
-===========
-
-The Linux-kernel memory model (LKMM) has the following limitations:
-
-1. Compiler optimizations are not accurately modeled. Of course,
- the use of READ_ONCE() and WRITE_ONCE() limits the compiler's
- ability to optimize, but under some circumstances it is possible
- for the compiler to undermine the memory model. For more
- information, see Documentation/explanation.txt (in particular,
- the "THE PROGRAM ORDER RELATION: po AND po-loc" and "A WARNING"
- sections).
-
- Note that this limitation in turn limits LKMM's ability to
- accurately model address, control, and data dependencies.
- For example, if the compiler can deduce the value of some variable
- carrying a dependency, then the compiler can break that dependency
- by substituting a constant of that value.
-
-2. Multiple access sizes for a single variable are not supported,
- and neither are misaligned or partially overlapping accesses.
-
-3. Exceptions and interrupts are not modeled. In some cases,
- this limitation can be overcome by modeling the interrupt or
- exception with an additional process.
-
-4. I/O such as MMIO or DMA is not supported.
-
-5. Self-modifying code (such as that found in the kernel's
- alternatives mechanism, function tracer, Berkeley Packet Filter
- JIT compiler, and module loader) is not supported.
-
-6. Complete modeling of all variants of atomic read-modify-write
- operations, locking primitives, and RCU is not provided.
- For example, call_rcu() and rcu_barrier() are not supported.
- However, a substantial amount of support is provided for these
- operations, as shown in the linux-kernel.def file.
-
- a. When rcu_assign_pointer() is passed NULL, the Linux
- kernel provides no ordering, but LKMM models this
- case as a store release.
-
- b. The "unless" RMW operations are not currently modeled:
- atomic_long_add_unless(), atomic_add_unless(),
- atomic_inc_unless_negative(), and
- atomic_dec_unless_positive(). These can be emulated
- in litmus tests, for example, by using atomic_cmpxchg().
-
- c. The call_rcu() function is not modeled. It can be
- emulated in litmus tests by adding another process that
- invokes synchronize_rcu() and the body of the callback
- function, with (for example) a release-acquire from
- the site of the emulated call_rcu() to the beginning
- of the additional process.
-
- d. The rcu_barrier() function is not modeled. It can be
- emulated in litmus tests emulating call_rcu() via
- (for example) a release-acquire from the end of each
- additional call_rcu() process to the site of the
- emulated rcu-barrier().
-
- e. Although sleepable RCU (SRCU) is now modeled, there
- are some subtle differences between its semantics and
- those in the Linux kernel. For example, the kernel
- might interpret the following sequence as two partially
- overlapping SRCU read-side critical sections:
-
- 1 r1 = srcu_read_lock(&my_srcu);
- 2 do_something_1();
- 3 r2 = srcu_read_lock(&my_srcu);
- 4 do_something_2();
- 5 srcu_read_unlock(&my_srcu, r1);
- 6 do_something_3();
- 7 srcu_read_unlock(&my_srcu, r2);
-
- In contrast, LKMM will interpret this as a nested pair of
- SRCU read-side critical sections, with the outer critical
- section spanning lines 1-7 and the inner critical section
- spanning lines 3-5.
-
- This difference would be more of a concern had anyone
- identified a reasonable use case for partially overlapping
- SRCU read-side critical sections. For more information,
- please see: https://paulmck.livejournal.com/40593.html
-
- f. Reader-writer locking is not modeled. It can be
- emulated in litmus tests using atomic read-modify-write
- operations.
-
-The "herd7" tool has some additional limitations of its own, apart from
-the memory model:
-
-1. Non-trivial data structures such as arrays or structures are
- not supported. However, pointers are supported, allowing trivial
- linked lists to be constructed.
-
-2. Dynamic memory allocation is not supported, although this can
- be worked around in some cases by supplying multiple statically
- allocated variables.
-
-Some of these limitations may be overcome in the future, but others are
-more likely to be addressed by incorporating the Linux-kernel memory model
-into other tools.
-
-Finally, please note that LKMM is subject to change as hardware, use cases,
-and compilers evolve.
diff --git a/tools/memory-model/linux-kernel.bell b/tools/memory-model/linux-kernel.bell
index 5be86b1025e8..ce068700939c 100644
--- a/tools/memory-model/linux-kernel.bell
+++ b/tools/memory-model/linux-kernel.bell
@@ -31,7 +31,8 @@ enum Barriers = 'wmb (*smp_wmb*) ||
'before-atomic (*smp_mb__before_atomic*) ||
'after-atomic (*smp_mb__after_atomic*) ||
'after-spinlock (*smp_mb__after_spinlock*) ||
- 'after-unlock-lock (*smp_mb__after_unlock_lock*)
+ 'after-unlock-lock (*smp_mb__after_unlock_lock*) ||
+ 'after-srcu-read-unlock (*smp_mb__after_srcu_read_unlock*)
instructions F[Barriers]
(* SRCU *)
@@ -53,32 +54,31 @@ let rcu-rscs = let rec
in matched
(* Validate nesting *)
-flag ~empty Rcu-lock \ domain(rcu-rscs) as unbalanced-rcu-locking
-flag ~empty Rcu-unlock \ range(rcu-rscs) as unbalanced-rcu-locking
+flag ~empty Rcu-lock \ domain(rcu-rscs) as unmatched-rcu-lock
+flag ~empty Rcu-unlock \ range(rcu-rscs) as unmatched-rcu-unlock
(* Compute matching pairs of nested Srcu-lock and Srcu-unlock *)
-let srcu-rscs = let rec
- unmatched-locks = Srcu-lock \ domain(matched)
- and unmatched-unlocks = Srcu-unlock \ range(matched)
- and unmatched = unmatched-locks | unmatched-unlocks
- and unmatched-po = ([unmatched] ; po ; [unmatched]) & loc
- and unmatched-locks-to-unlocks =
- ([unmatched-locks] ; po ; [unmatched-unlocks]) & loc
- and matched = matched | (unmatched-locks-to-unlocks \
- (unmatched-po ; unmatched-po))
- in matched
+let carry-srcu-data = (data ; [~ Srcu-unlock] ; rf)*
+let srcu-rscs = ([Srcu-lock] ; carry-srcu-data ; data ; [Srcu-unlock]) & loc
(* Validate nesting *)
-flag ~empty Srcu-lock \ domain(srcu-rscs) as unbalanced-srcu-locking
-flag ~empty Srcu-unlock \ range(srcu-rscs) as unbalanced-srcu-locking
+flag ~empty Srcu-lock \ domain(srcu-rscs) as unmatched-srcu-lock
+flag ~empty Srcu-unlock \ range(srcu-rscs) as unmatched-srcu-unlock
+flag ~empty (srcu-rscs^-1 ; srcu-rscs) \ id as multiple-srcu-matches
(* Check for use of synchronize_srcu() inside an RCU critical section *)
flag ~empty rcu-rscs & (po ; [Sync-srcu] ; po) as invalid-sleep
(* Validate SRCU dynamic match *)
-flag ~empty different-values(srcu-rscs) as srcu-bad-nesting
+flag ~empty different-values(srcu-rscs) as srcu-bad-value-match
(* Compute marked and plain memory accesses *)
let Marked = (~M) | IW | Once | Release | Acquire | domain(rmw) | range(rmw) |
- LKR | LKW | UL | LF | RL | RU
+ LKR | LKW | UL | LF | RL | RU | Srcu-lock | Srcu-unlock
let Plain = M \ Marked
+
+(* Redefine dependencies to include those carried through plain accesses *)
+let carry-dep = (data ; [~ Srcu-unlock] ; rfi)*
+let addr = carry-dep ; addr
+let ctrl = carry-dep ; ctrl
+let data = carry-dep ; data
diff --git a/tools/memory-model/linux-kernel.cat b/tools/memory-model/linux-kernel.cat
index 2a9b4fe4a84e..adf3c4f41229 100644
--- a/tools/memory-model/linux-kernel.cat
+++ b/tools/memory-model/linux-kernel.cat
@@ -27,7 +27,7 @@ include "lock.cat"
(* Release Acquire *)
let acq-po = [Acquire] ; po ; [M]
let po-rel = [M] ; po ; [Release]
-let po-unlock-rf-lock-po = po ; [UL] ; rf ; [LKR] ; po
+let po-unlock-lock-po = po ; [UL] ; (po|rf) ; [LKR] ; po
(* Fences *)
let R4rmb = R \ Noreturn (* Reads for which rmb works *)
@@ -37,8 +37,20 @@ let mb = ([M] ; fencerel(Mb) ; [M]) |
([M] ; fencerel(Before-atomic) ; [RMW] ; po? ; [M]) |
([M] ; po? ; [RMW] ; fencerel(After-atomic) ; [M]) |
([M] ; po? ; [LKW] ; fencerel(After-spinlock) ; [M]) |
- ([M] ; po ; [UL] ; (co | po) ; [LKW] ;
- fencerel(After-unlock-lock) ; [M])
+(*
+ * Note: The po-unlock-lock-po relation only passes the lock to the direct
+ * successor, perhaps giving the impression that the ordering of the
+ * smp_mb__after_unlock_lock() fence only affects a single lock handover.
+ * However, in a longer sequence of lock handovers, the implicit
+ * A-cumulative release fences of lock-release ensure that any stores that
+ * propagate to one of the involved CPUs before it hands over the lock to
+ * the next CPU will also propagate to the final CPU handing over the lock
+ * to the CPU that executes the fence. Therefore, all those stores are
+ * also affected by the fence.
+ *)
+ ([M] ; po-unlock-lock-po ;
+ [After-unlock-lock] ; po ; [M]) |
+ ([M] ; po? ; [Srcu-unlock] ; fencerel(After-srcu-read-unlock) ; [M])
let gp = po ; [Sync-rcu | Sync-srcu] ; po?
let strong-fence = mb | gp
@@ -69,13 +81,14 @@ let dep = addr | data
let rwdep = (dep | ctrl) ; [W]
let overwrite = co | fr
let to-w = rwdep | (overwrite & int) | (addr ; [Plain] ; wmb)
-let to-r = addr | (dep ; [Marked] ; rfi)
-let ppo = to-r | to-w | fence | (po-unlock-rf-lock-po & int)
+let to-r = (addr ; [R]) | (dep ; [Marked] ; rfi)
+let ppo = to-r | to-w | (fence & int) | (po-unlock-lock-po & int)
(* Propagation: Ordering from release operations and strong fences. *)
let A-cumul(r) = (rfe ; [Marked])? ; r
+let rmw-sequence = (rf ; rmw)*
let cumul-fence = [Marked] ; (A-cumul(strong-fence | po-rel) | wmb |
- po-unlock-rf-lock-po) ; [Marked]
+ po-unlock-lock-po) ; [Marked] ; rmw-sequence
let prop = [Marked] ; (overwrite & ext)? ; cumul-fence* ;
[Marked] ; rfe? ; [Marked]
@@ -174,7 +187,7 @@ let vis = cumul-fence* ; rfe? ; [Marked] ;
let w-pre-bounded = [Marked] ; (addr | fence)?
let r-pre-bounded = [Marked] ; (addr | nonrw-fence |
([R4rmb] ; fencerel(Rmb) ; [~Noreturn]))?
-let w-post-bounded = fence? ; [Marked]
+let w-post-bounded = fence? ; [Marked] ; rmw-sequence
let r-post-bounded = (nonrw-fence | ([~Noreturn] ; fencerel(Rmb) ; [R4rmb]))? ;
[Marked]
diff --git a/tools/memory-model/linux-kernel.def b/tools/memory-model/linux-kernel.def
index ef0f3c1850de..88a39601f525 100644
--- a/tools/memory-model/linux-kernel.def
+++ b/tools/memory-model/linux-kernel.def
@@ -24,6 +24,7 @@ smp_mb__before_atomic() { __fence{before-atomic}; }
smp_mb__after_atomic() { __fence{after-atomic}; }
smp_mb__after_spinlock() { __fence{after-spinlock}; }
smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
+smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
barrier() { __fence{barrier}; }
// Exchange
@@ -49,8 +50,10 @@ synchronize_rcu() { __fence{sync-rcu}; }
synchronize_rcu_expedited() { __fence{sync-rcu}; }
// SRCU
-srcu_read_lock(X) __srcu{srcu-lock}(X)
-srcu_read_unlock(X,Y) { __srcu{srcu-unlock}(X,Y); }
+srcu_read_lock(X) __load{srcu-lock}(*X)
+srcu_read_unlock(X,Y) { __store{srcu-unlock}(*X,Y); }
+srcu_down_read(X) __load{srcu-lock}(*X)
+srcu_up_read(X,Y) { __store{srcu-unlock}(*X,Y); }
synchronize_srcu(X) { __srcu{sync-srcu}(X); }
synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); }
diff --git a/tools/memory-model/litmus-tests/.gitignore b/tools/memory-model/litmus-tests/.gitignore
index c492a1ddad91..19c379cf069d 100644
--- a/tools/memory-model/litmus-tests/.gitignore
+++ b/tools/memory-model/litmus-tests/.gitignore
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-*.litmus.out
+*.litmus.*
diff --git a/tools/memory-model/litmus-tests/LB+unlocklockonceonce+poacquireonce.litmus b/tools/memory-model/litmus-tests/LB+unlocklockonceonce+poacquireonce.litmus
new file mode 100644
index 000000000000..eb34123a6ffe
--- /dev/null
+++ b/tools/memory-model/litmus-tests/LB+unlocklockonceonce+poacquireonce.litmus
@@ -0,0 +1,35 @@
+C LB+unlocklockonceonce+poacquireonce
+
+(*
+ * Result: Never
+ *
+ * If two locked critical sections execute on the same CPU, all accesses
+ * in the first must execute before any accesses in the second, even if the
+ * critical sections are protected by different locks. Note: Even when a
+ * write executes before a read, their memory effects can be reordered from
+ * the viewpoint of another CPU (the kind of reordering allowed by TSO).
+ *)
+
+{}
+
+P0(spinlock_t *s, spinlock_t *t, int *x, int *y)
+{
+ int r1;
+
+ spin_lock(s);
+ r1 = READ_ONCE(*x);
+ spin_unlock(s);
+ spin_lock(t);
+ WRITE_ONCE(*y, 1);
+ spin_unlock(t);
+}
+
+P1(int *x, int *y)
+{
+ int r2;
+
+ r2 = smp_load_acquire(y);
+ WRITE_ONCE(*x, 1);
+}
+
+exists (0:r1=1 /\ 1:r2=1)
diff --git a/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus
index a273da9faa6d..f8ca1229857a 100644
--- a/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus
+++ b/tools/memory-model/litmus-tests/MP+fencewmbonceonce+fencermbonceonce.litmus
@@ -10,21 +10,21 @@ C MP+fencewmbonceonce+fencermbonceonce
{}
-P0(int *x, int *y)
+P0(int *buf, int *flag) // Producer
{
- WRITE_ONCE(*x, 1);
+ WRITE_ONCE(*buf, 1);
smp_wmb();
- WRITE_ONCE(*y, 1);
+ WRITE_ONCE(*flag, 1);
}
-P1(int *x, int *y)
+P1(int *buf, int *flag) // Consumer
{
int r0;
int r1;
- r0 = READ_ONCE(*y);
+ r0 = READ_ONCE(*flag);
smp_rmb();
- r1 = READ_ONCE(*x);
+ r1 = READ_ONCE(*buf);
}
-exists (1:r0=1 /\ 1:r1=0)
+exists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *)
diff --git a/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus b/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus
index 97731b4bbdd8..d84160b9c1ae 100644
--- a/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus
+++ b/tools/memory-model/litmus-tests/MP+onceassign+derefonce.litmus
@@ -10,25 +10,24 @@ C MP+onceassign+derefonce
*)
{
-y=z;
-z=0;
+p=y;
}
-P0(int *x, int **y)
+P0(int *x, int **p) // Producer
{
WRITE_ONCE(*x, 1);
- rcu_assign_pointer(*y, x);
+ rcu_assign_pointer(*p, x);
}
-P1(int *x, int **y)
+P1(int *x, int **p) // Consumer
{
int *r0;
int r1;
rcu_read_lock();
- r0 = rcu_dereference(*y);
+ r0 = rcu_dereference(*p);
r1 = READ_ONCE(*r0);
rcu_read_unlock();
}
-exists (1:r0=x /\ 1:r1=0)
+exists (1:r0=x /\ 1:r1=0) (* Bad outcome. *)
diff --git a/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus b/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus
index 50f4d62bbf0e..ba91cc63e148 100644
--- a/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus
+++ b/tools/memory-model/litmus-tests/MP+polockmbonce+poacquiresilsil.litmus
@@ -10,10 +10,9 @@ C MP+polockmbonce+poacquiresilsil
* executed before the lock was acquired (loosely speaking).
*)
-{
-}
+{}
-P0(spinlock_t *lo, int *x)
+P0(spinlock_t *lo, int *x) // Producer
{
spin_lock(lo);
smp_mb__after_spinlock();
@@ -21,7 +20,7 @@ P0(spinlock_t *lo, int *x)
spin_unlock(lo);
}
-P1(spinlock_t *lo, int *x)
+P1(spinlock_t *lo, int *x) // Consumer
{
int r1;
int r2;
@@ -32,4 +31,4 @@ P1(spinlock_t *lo, int *x)
r3 = spin_is_locked(lo);
}
-exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1)
+exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1) (* Bad outcome. *)
diff --git a/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus b/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus
index abf81e7a0895..a5ea3ed8f52e 100644
--- a/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus
+++ b/tools/memory-model/litmus-tests/MP+polockonce+poacquiresilsil.litmus
@@ -10,17 +10,16 @@ C MP+polockonce+poacquiresilsil
* speaking).
*)
-{
-}
+{}
-P0(spinlock_t *lo, int *x)
+P0(spinlock_t *lo, int *x) // Producer
{
spin_lock(lo);
WRITE_ONCE(*x, 1);
spin_unlock(lo);
}
-P1(spinlock_t *lo, int *x)
+P1(spinlock_t *lo, int *x) // Consumer
{
int r1;
int r2;
@@ -31,4 +30,4 @@ P1(spinlock_t *lo, int *x)
r3 = spin_is_locked(lo);
}
-exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1)
+exists (1:r1=1 /\ 1:r2=0 /\ 1:r3=1) (* Bad outcome. *)
diff --git a/tools/memory-model/litmus-tests/MP+polocks.litmus b/tools/memory-model/litmus-tests/MP+polocks.litmus
index 712a4fcdf6ce..e6af05f70069 100644
--- a/tools/memory-model/litmus-tests/MP+polocks.litmus
+++ b/tools/memory-model/litmus-tests/MP+polocks.litmus
@@ -13,23 +13,23 @@ C MP+polocks
{}
-P0(int *x, int *y, spinlock_t *mylock)
+P0(int *buf, int *flag, spinlock_t *mylock) // Producer
{
- WRITE_ONCE(*x, 1);
+ WRITE_ONCE(*buf, 1);
spin_lock(mylock);
- WRITE_ONCE(*y, 1);
+ WRITE_ONCE(*flag, 1);
spin_unlock(mylock);
}
-P1(int *x, int *y, spinlock_t *mylock)
+P1(int *buf, int *flag, spinlock_t *mylock) // Consumer
{
int r0;
int r1;
spin_lock(mylock);
- r0 = READ_ONCE(*y);
+ r0 = READ_ONCE(*flag);
spin_unlock(mylock);
- r1 = READ_ONCE(*x);
+ r1 = READ_ONCE(*buf);
}
-exists (1:r0=1 /\ 1:r1=0)
+exists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *)
diff --git a/tools/memory-model/litmus-tests/MP+poonceonces.litmus b/tools/memory-model/litmus-tests/MP+poonceonces.litmus
index 172f0145301c..ba9c99c6cf65 100644
--- a/tools/memory-model/litmus-tests/MP+poonceonces.litmus
+++ b/tools/memory-model/litmus-tests/MP+poonceonces.litmus
@@ -9,19 +9,19 @@ C MP+poonceonces
{}
-P0(int *x, int *y)
+P0(int *buf, int *flag) // Producer
{
- WRITE_ONCE(*x, 1);
- WRITE_ONCE(*y, 1);
+ WRITE_ONCE(*buf, 1);
+ WRITE_ONCE(*flag, 1);
}
-P1(int *x, int *y)
+P1(int *buf, int *flag) // Consumer
{
int r0;
int r1;
- r0 = READ_ONCE(*y);
- r1 = READ_ONCE(*x);
+ r0 = READ_ONCE(*flag);
+ r1 = READ_ONCE(*buf);
}
-exists (1:r0=1 /\ 1:r1=0)
+exists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *)
diff --git a/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus b/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus
index d52c68429722..f174bfe61702 100644
--- a/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus
+++ b/tools/memory-model/litmus-tests/MP+pooncerelease+poacquireonce.litmus
@@ -10,19 +10,19 @@ C MP+pooncerelease+poacquireonce
{}
-P0(int *x, int *y)
+P0(int *buf, int *flag) // Producer
{
- WRITE_ONCE(*x, 1);
- smp_store_release(y, 1);
+ WRITE_ONCE(*buf, 1);
+ smp_store_release(flag, 1);
}
-P1(int *x, int *y)
+P1(int *buf, int *flag) // Consumer
{
int r0;
int r1;
- r0 = smp_load_acquire(y);
- r1 = READ_ONCE(*x);
+ r0 = smp_load_acquire(flag);
+ r1 = READ_ONCE(*buf);
}
-exists (1:r0=1 /\ 1:r1=0)
+exists (1:r0=1 /\ 1:r1=0) (* Bad outcome. *)
diff --git a/tools/memory-model/litmus-tests/MP+porevlocks.litmus b/tools/memory-model/litmus-tests/MP+porevlocks.litmus
index 72c9276b363e..b9599141160e 100644
--- a/tools/memory-model/litmus-tests/MP+porevlocks.litmus
+++ b/tools/memory-model/litmus-tests/MP+porevlocks.litmus
@@ -13,23 +13,23 @@ C MP+porevlocks
{}
-P0(int *x, int *y, spinlock_t *mylock)
+P0(int *buf, int *flag, spinlock_t *mylock) // Consumer
{
int r0;
int r1;
- r0 = READ_ONCE(*y);
+ r0 = READ_ONCE(*flag);
spin_lock(mylock);
- r1 = READ_ONCE(*x);
+ r1 = READ_ONCE(*buf);
spin_unlock(mylock);
}
-P1(int *x, int *y, spinlock_t *mylock)
+P1(int *buf, int *flag, spinlock_t *mylock) // Producer
{
spin_lock(mylock);
- WRITE_ONCE(*x, 1);
+ WRITE_ONCE(*buf, 1);
spin_unlock(mylock);
- WRITE_ONCE(*y, 1);
+ WRITE_ONCE(*flag, 1);
}
-exists (0:r0=1 /\ 0:r1=0)
+exists (0:r0=1 /\ 0:r1=0) (* Bad outcome. *)
diff --git a/tools/memory-model/litmus-tests/MP+unlocklockonceonce+fencermbonceonce.litmus b/tools/memory-model/litmus-tests/MP+unlocklockonceonce+fencermbonceonce.litmus
new file mode 100644
index 000000000000..2feb1398be71
--- /dev/null
+++ b/tools/memory-model/litmus-tests/MP+unlocklockonceonce+fencermbonceonce.litmus
@@ -0,0 +1,33 @@
+C MP+unlocklockonceonce+fencermbonceonce
+
+(*
+ * Result: Never
+ *
+ * If two locked critical sections execute on the same CPU, stores in the
+ * first must propagate to each CPU before stores in the second do, even if
+ * the critical sections are protected by different locks.
+ *)
+
+{}
+
+P0(spinlock_t *s, spinlock_t *t, int *x, int *y)
+{
+ spin_lock(s);
+ WRITE_ONCE(*x, 1);
+ spin_unlock(s);
+ spin_lock(t);
+ WRITE_ONCE(*y, 1);
+ spin_unlock(t);
+}
+
+P1(int *x, int *y)
+{
+ int r1;
+ int r2;
+
+ r1 = READ_ONCE(*y);
+ smp_rmb();
+ r2 = READ_ONCE(*x);
+}
+
+exists (1:r1=1 /\ 1:r2=0)
diff --git a/tools/memory-model/litmus-tests/README b/tools/memory-model/litmus-tests/README
index 681f9067fa9e..d311a0ff1ae6 100644
--- a/tools/memory-model/litmus-tests/README
+++ b/tools/memory-model/litmus-tests/README
@@ -63,6 +63,10 @@ LB+poonceonces.litmus
As above, but with store-release replaced with WRITE_ONCE()
and load-acquire replaced with READ_ONCE().
+LB+unlocklockonceonce+poacquireonce.litmus
+ Does a unlock+lock pair provides ordering guarantee between a
+ load and a store?
+
MP+onceassign+derefonce.litmus
As below, but with rcu_assign_pointer() and an rcu_dereference().
@@ -90,6 +94,10 @@ MP+porevlocks.litmus
As below, but with the first access of the writer process
and the second access of reader process protected by a lock.
+MP+unlocklockonceonce+fencermbonceonce.litmus
+ Does a unlock+lock pair provides ordering guarantee between a
+ store and another store?
+
MP+fencewmbonceonce+fencermbonceonce.litmus
Does a smp_wmb() (between the stores) and an smp_rmb() (between
the loads) suffice for the message-passing litmus test, where one
diff --git a/tools/memory-model/litmus-tests/dep+plain.litmus b/tools/memory-model/litmus-tests/dep+plain.litmus
new file mode 100644
index 000000000000..ebf84daa9a59
--- /dev/null
+++ b/tools/memory-model/litmus-tests/dep+plain.litmus
@@ -0,0 +1,31 @@
+C dep+plain
+
+(*
+ * Result: Never
+ *
+ * This litmus test demonstrates that in LKMM, plain accesses
+ * carry dependencies much like accesses to registers:
+ * The data stored to *z1 and *z2 by P0() originates from P0()'s
+ * READ_ONCE(), and therefore using that data to compute the
+ * conditional of P0()'s if-statement creates a control dependency
+ * from that READ_ONCE() to P0()'s WRITE_ONCE().
+ *)
+
+{}
+
+P0(int *x, int *y, int *z1, int *z2)
+{
+ int a = READ_ONCE(*x);
+ *z1 = a;
+ *z2 = *z1;
+ if (*z2 == 1)
+ WRITE_ONCE(*y, 1);
+}
+
+P1(int *x, int *y)
+{
+ int r = smp_load_acquire(y);
+ smp_store_release(x, r);
+}
+
+exists (x=1 /\ y=1)
diff --git a/tools/memory-model/lock.cat b/tools/memory-model/lock.cat
index 6b52f365d73a..53b5a492739d 100644
--- a/tools/memory-model/lock.cat
+++ b/tools/memory-model/lock.cat
@@ -36,9 +36,9 @@ let RU = try RU with emptyset
(* Treat RL as a kind of LF: a read with no ordering properties *)
let LF = LF | RL
-(* There should be no ordinary R or W accesses to spinlocks *)
-let ALL-LOCKS = LKR | LKW | UL | LF | RU
-flag ~empty [M \ IW] ; loc ; [ALL-LOCKS] as mixed-lock-accesses
+(* There should be no ordinary R or W accesses to spinlocks or SRCU structs *)
+let ALL-LOCKS = LKR | LKW | UL | LF | RU | Srcu-lock | Srcu-unlock | Sync-srcu
+flag ~empty [M \ IW \ ALL-LOCKS] ; loc ; [ALL-LOCKS] as mixed-lock-accesses
(* Link Lock-Reads to their RMW-partner Lock-Writes *)
let lk-rmw = ([LKR] ; po-loc ; [LKW]) \ (po ; po)
diff --git a/tools/memory-model/scripts/README b/tools/memory-model/scripts/README
index 095c7eb36f9f..fb39bd0fd1b9 100644
--- a/tools/memory-model/scripts/README
+++ b/tools/memory-model/scripts/README
@@ -27,6 +27,14 @@ checklitmushist.sh
checklitmus.sh
Check a single litmus test against its "Result:" expected result.
+ Not intended to for manual use.
+
+checktheselitmus.sh
+
+ Check the specified list of litmus tests against their "Result:"
+ expected results. This takes optional parseargs.sh arguments,
+ followed by "--" followed by pathnames starting from the current
+ directory.
cmplitmushist.sh
@@ -43,10 +51,10 @@ initlitmushist.sh
judgelitmus.sh
- Given a .litmus file and its .litmus.out herd7 output, check the
- .litmus.out file against the .litmus file's "Result:" comment to
- judge whether the test ran correctly. Not normally run manually,
- provided instead for use by other scripts.
+ Given a .litmus file and its herd7 output, check the output file
+ against the .litmus file's "Result:" comment to judge whether
+ the test ran correctly. Not normally run manually, provided
+ instead for use by other scripts.
newlitmushist.sh
@@ -68,3 +76,35 @@ runlitmushist.sh
README
This file
+
+Testing a change to LKMM might go as follows:
+
+ # Populate expected results without that change, and
+ # runs for about an hour on an 8-CPU x86 system:
+ scripts/initlitmushist.sh --timeout 10m --procs 10
+ # Incorporate the change:
+ git am -s -3 /path/to/patch # Or whatever it takes.
+
+ # Test the new version of LKMM as follows...
+
+ # Runs in seconds, good smoke test:
+ scripts/checkalllitmus.sh
+
+ # Compares results to those produced by initlitmushist.sh,
+ # and runs for about an hour on an 8-CPU x86 system:
+ scripts/checklitmushist.sh --timeout 10m --procs 10
+
+ # Checks results against Result tags, runs in minutes:
+ scripts/checkghlitmus.sh --timeout 10m --procs 10
+
+The checkghlitmus.sh should not report errors in cases where the
+checklitmushist.sh script did not also report a change. However,
+this check is nevertheless valuable because it can find errors in the
+original version of LKMM. Note however, that given the above procedure,
+an error in the original LKMM version that is fixed by the patch will
+be reported both as a mismatch by checklitmushist.sh and as an error
+by checkghlitmus.sh. One exception to this rule of thumb is when the
+test fails completely on the original version of LKMM and passes on the
+new version. In this case, checklitmushist.sh will report a mismatch
+and checkghlitmus.sh will report success. This happens when the change
+to LKMM introduces a new primitive for which litmus tests already existed.
diff --git a/tools/memory-model/scripts/checkalllitmus.sh b/tools/memory-model/scripts/checkalllitmus.sh
index 3c0c7fbbd223..2d3ee850a839 100755
--- a/tools/memory-model/scripts/checkalllitmus.sh
+++ b/tools/memory-model/scripts/checkalllitmus.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
# SPDX-License-Identifier: GPL-2.0+
#
# Run herd7 tests on all .litmus files in the litmus-tests directory
@@ -8,6 +8,11 @@
# "^^^". It also outputs verification results to a file whose name is
# that of the specified litmus test, but with ".out" appended.
#
+# If the --hw argument is specified, this script translates the .litmus
+# C-language file to the specified type of assembly and verifies that.
+# But in this case, litmus tests using complex synchronization (such as
+# locking, RCU, and SRCU) are cheerfully ignored.
+#
# Usage:
# checkalllitmus.sh
#
@@ -17,7 +22,7 @@
#
# Copyright IBM Corporation, 2018
#
-# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
. scripts/parseargs.sh
@@ -30,29 +35,23 @@ else
exit 255
fi
-# Create any new directories that have appeared in the github litmus
-# repo since the last run.
+# Create any new directories that have appeared in the litmus-tests
+# directory since the last run.
if test "$LKMM_DESTDIR" != "."
then
find $litmusdir -type d -print |
( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
fi
-# Find the checklitmus script. If it is not where we expect it, then
-# assume that the caller has the PATH environment variable set
-# appropriately.
-if test -x scripts/checklitmus.sh
-then
- clscript=scripts/checklitmus.sh
-else
- clscript=checklitmus.sh
-fi
-
# Run the script on all the litmus tests in the specified directory
ret=0
for i in $litmusdir/*.litmus
do
- if ! $clscript $i
+ if test -n "$LKMM_HW_MAP_FILE" && ! scripts/simpletest.sh $i
+ then
+ continue
+ fi
+ if ! scripts/checklitmus.sh $i
then
ret=1
fi
diff --git a/tools/memory-model/scripts/checkghlitmus.sh b/tools/memory-model/scripts/checkghlitmus.sh
index 6589fbb6f653..d3dfb321259f 100755
--- a/tools/memory-model/scripts/checkghlitmus.sh
+++ b/tools/memory-model/scripts/checkghlitmus.sh
@@ -10,6 +10,7 @@
# parseargs.sh scripts for arguments.
. scripts/parseargs.sh
+. scripts/hwfnseg.sh
T=/tmp/checkghlitmus.sh.$$
trap 'rm -rf $T' 0
@@ -32,19 +33,19 @@ then
( cd "$LKMM_DESTDIR"; sed -e 's/^/mkdir -p /' | sh )
fi
-# Create a list of the C-language litmus tests previously run.
-( cd $LKMM_DESTDIR; find litmus -name '*.litmus.out' -print ) |
- sed -e 's/\.out$//' |
- xargs -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' |
+# Create a list of the specified litmus tests previously run.
+( cd $LKMM_DESTDIR; find litmus -name "*.litmus${hwfnseg}.out" -print ) |
+ sed -e "s/${hwfnseg}"'\.out$//' |
+ xargs -r grep -E -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' |
xargs -r grep -L "^P${LKMM_PROCS}"> $T/list-C-already
# Create a list of C-language litmus tests with "Result:" commands and
# no more than the specified number of processes.
-find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
-xargs < $T/list-C -r egrep -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' > $T/list-C-result
+find litmus -name '*.litmus' -print | mselect7 -arch C > $T/list-C
+xargs < $T/list-C -r grep -E -l '^ \* Result: (Never|Sometimes|Always|DEADLOCK)' > $T/list-C-result
xargs < $T/list-C-result -r grep -L "^P${LKMM_PROCS}" > $T/list-C-result-short
-# Form list of tests without corresponding .litmus.out files
+# Form list of tests without corresponding .out files
sort $T/list-C-already $T/list-C-result-short | uniq -u > $T/list-C-needed
# Run any needed tests.
diff --git a/tools/memory-model/scripts/checklitmus.sh b/tools/memory-model/scripts/checklitmus.sh
index 11461ed40b5e..4c1d0cf0ddad 100755
--- a/tools/memory-model/scripts/checklitmus.sh
+++ b/tools/memory-model/scripts/checklitmus.sh
@@ -1,10 +1,8 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0+
#
-# Run a herd7 test and invokes judgelitmus.sh to check the result against
-# a "Result:" comment within the litmus test. It also outputs verification
-# results to a file whose name is that of the specified litmus test, but
-# with ".out" appended.
+# Invokes runlitmus.sh and judgelitmus.sh on its arguments to run the
+# specified litmus test and pass judgment on the results.
#
# Usage:
# checklitmus.sh file.litmus
@@ -15,20 +13,7 @@
#
# Copyright IBM Corporation, 2018
#
-# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
-litmus=$1
-herdoptions=${LKMM_HERD_OPTIONS--conf linux-kernel.cfg}
-
-if test -f "$litmus" -a -r "$litmus"
-then
- :
-else
- echo ' --- ' error: \"$litmus\" is not a readable file
- exit 255
-fi
-
-echo Herd options: $herdoptions > $LKMM_DESTDIR/$litmus.out
-/usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $litmus >> $LKMM_DESTDIR/$litmus.out 2>&1
-
-scripts/judgelitmus.sh $litmus
+scripts/runlitmus.sh $1
+scripts/judgelitmus.sh $1
diff --git a/tools/memory-model/scripts/checklitmushist.sh b/tools/memory-model/scripts/checklitmushist.sh
index 1d210ffb7c8a..406ecfc0aee4 100755
--- a/tools/memory-model/scripts/checklitmushist.sh
+++ b/tools/memory-model/scripts/checklitmushist.sh
@@ -12,7 +12,7 @@
#
# Copyright IBM Corporation, 2018
#
-# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
. scripts/parseargs.sh
diff --git a/tools/memory-model/scripts/checktheselitmus.sh b/tools/memory-model/scripts/checktheselitmus.sh
new file mode 100755
index 000000000000..10eeb5ecea6d
--- /dev/null
+++ b/tools/memory-model/scripts/checktheselitmus.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Invokes checklitmus.sh on its arguments to run the specified litmus
+# test and pass judgment on the results.
+#
+# Usage:
+# checktheselitmus.sh -- [ file1.litmus [ file2.litmus ... ] ]
+#
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check. The usual parseargs.sh arguments
+# can be specified prior to the "--".
+#
+# This script is intended for use with pathnames that start from the
+# tools/memory-model directory. If some of the pathnames instead start at
+# the root directory, they all must do so and the "--destdir /" parseargs.sh
+# argument must be specified prior to the "--". Alternatively, some other
+# "--destdir" argument can be supplied as long as the needed subdirectories
+# are populated.
+#
+# Copyright IBM Corporation, 2018
+#
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
+
+. scripts/parseargs.sh
+
+ret=0
+for i in "$@"
+do
+ if scripts/checklitmus.sh $i
+ then
+ :
+ else
+ ret=1
+ fi
+done
+if test "$ret" -ne 0
+then
+ echo " ^^^ VERIFICATION MISMATCHES" 1>&2
+else
+ echo All litmus tests verified as was expected. 1>&2
+fi
+exit $ret
diff --git a/tools/memory-model/scripts/cmplitmushist.sh b/tools/memory-model/scripts/cmplitmushist.sh
index 0f498aeeccf5..ca1ac8b64614 100755
--- a/tools/memory-model/scripts/cmplitmushist.sh
+++ b/tools/memory-model/scripts/cmplitmushist.sh
@@ -12,12 +12,49 @@ trap 'rm -rf $T' 0
mkdir $T
# comparetest oldpath newpath
+badmacnam=0
+timedout=0
perfect=0
obsline=0
noobsline=0
obsresult=0
badcompare=0
comparetest () {
+ if grep -q ': Unknown macro ' $1 || grep -q ': Unknown macro ' $2
+ then
+ if grep -q ': Unknown macro ' $1
+ then
+ badname=`grep ': Unknown macro ' $1 |
+ sed -e 's/^.*: Unknown macro //' |
+ sed -e 's/ (User error).*$//'`
+ echo 'Current LKMM version does not know "'$badname'"' $1
+ fi
+ if grep -q ': Unknown macro ' $2
+ then
+ badname=`grep ': Unknown macro ' $2 |
+ sed -e 's/^.*: Unknown macro //' |
+ sed -e 's/ (User error).*$//'`
+ echo 'Current LKMM version does not know "'$badname'"' $2
+ fi
+ badmacnam=`expr "$badmacnam" + 1`
+ return 0
+ elif grep -q '^Command exited with non-zero status 124' $1 ||
+ grep -q '^Command exited with non-zero status 124' $2
+ then
+ if grep -q '^Command exited with non-zero status 124' $1 &&
+ grep -q '^Command exited with non-zero status 124' $2
+ then
+ echo Both runs timed out: $2
+ elif grep -q '^Command exited with non-zero status 124' $1
+ then
+ echo Old run timed out: $2
+ elif grep -q '^Command exited with non-zero status 124' $2
+ then
+ echo New run timed out: $2
+ fi
+ timedout=`expr "$timedout" + 1`
+ return 0
+ fi
grep -v 'maxresident)k\|minor)pagefaults\|^Time' $1 > $T/oldout
grep -v 'maxresident)k\|minor)pagefaults\|^Time' $2 > $T/newout
if cmp -s $T/oldout $T/newout && grep -q '^Observation' $1
@@ -38,7 +75,7 @@ comparetest () {
return 0
fi
else
- echo Missing Observation line "(e.g., herd7 timeout)": $2
+ echo Missing Observation line "(e.g., syntax error)": $2
noobsline=`expr "$noobsline" + 1`
return 0
fi
@@ -72,12 +109,20 @@ then
fi
if test "$noobsline" -ne 0
then
- echo Missing Observation line "(e.g., herd7 timeout)": $noobsline 1>&2
+ echo Missing Observation line "(e.g., syntax error)": $noobsline 1>&2
fi
if test "$obsresult" -ne 0
then
echo Matching Observation Always/Sometimes/Never result: $obsresult 1>&2
fi
+if test "$timedout" -ne 0
+then
+ echo "!!!" Timed out: $timedout 1>&2
+fi
+if test "$badmacnam" -ne 0
+then
+ echo "!!!" Unknown primitive: $badmacnam 1>&2
+fi
if test "$badcompare" -ne 0
then
echo "!!!" Result changed: $badcompare 1>&2
diff --git a/tools/memory-model/scripts/hwfnseg.sh b/tools/memory-model/scripts/hwfnseg.sh
new file mode 100755
index 000000000000..580c3281181c
--- /dev/null
+++ b/tools/memory-model/scripts/hwfnseg.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Generate the hardware extension to the litmus-test filename, or the
+# empty string if this is an LKMM run. The extension is placed in
+# the shell variable hwfnseg.
+#
+# Usage:
+# . hwfnseg.sh
+#
+# Copyright IBM Corporation, 2019
+#
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
+
+if test -z "$LKMM_HW_MAP_FILE"
+then
+ hwfnseg=
+else
+ hwfnseg=".$LKMM_HW_MAP_FILE"
+fi
diff --git a/tools/memory-model/scripts/initlitmushist.sh b/tools/memory-model/scripts/initlitmushist.sh
index 956b6957484d..31ea782955d3 100755
--- a/tools/memory-model/scripts/initlitmushist.sh
+++ b/tools/memory-model/scripts/initlitmushist.sh
@@ -60,7 +60,7 @@ fi
# Create a list of the C-language litmus tests with no more than the
# specified number of processes (per the --procs argument).
-find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C
+find litmus -name '*.litmus' -print | mselect7 -arch C > $T/list-C
xargs < $T/list-C -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
scripts/runlitmushist.sh < $T/list-C-short
diff --git a/tools/memory-model/scripts/judgelitmus.sh b/tools/memory-model/scripts/judgelitmus.sh
index 0cc63875e395..1ec5d89fcfbb 100755
--- a/tools/memory-model/scripts/judgelitmus.sh
+++ b/tools/memory-model/scripts/judgelitmus.sh
@@ -1,9 +1,22 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0+
#
-# Given a .litmus test and the corresponding .litmus.out file, check
-# the .litmus.out file against the "Result:" comment to judge whether
-# the test ran correctly.
+# Given a .litmus test and the corresponding litmus output file, check
+# the .litmus.out file against the "Result:" comment to judge whether the
+# test ran correctly. If the --hw argument is omitted, check against the
+# LKMM output, which is assumed to be in file.litmus.out. If either a
+# "DATARACE" marker in the "Result:" comment or a "Flag data-race" marker
+# in the LKMM output is present, the other must also be as well, at least
+# for litmus tests having a "Result:" comment. In this case, a failure of
+# the Always/Sometimes/Never portion of the "Result:" prediction will be
+# noted, but forgiven.
+#
+# If the --hw argument is provided, this is assumed to be a hardware
+# test, and the output is assumed to be in file.litmus.HW.out, where
+# "HW" is the --hw argument. In addition, non-Sometimes verification
+# results will be noted, but forgiven. Furthermore, if there is no
+# "Result:" comment but there is an LKMM .litmus.out file, the observation
+# in that file will be used to judge the assembly-language verification.
#
# Usage:
# judgelitmus.sh file.litmus
@@ -13,7 +26,7 @@
#
# Copyright IBM Corporation, 2018
#
-# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
litmus=$1
@@ -24,55 +37,120 @@ else
echo ' --- ' error: \"$litmus\" is not a readable file
exit 255
fi
-if test -f "$LKMM_DESTDIR/$litmus".out -a -r "$LKMM_DESTDIR/$litmus".out
+if test -z "$LKMM_HW_MAP_FILE"
+then
+ litmusout=$litmus.out
+ lkmmout=
+else
+ litmusout="`echo $litmus |
+ sed -e 's/\.litmus$/.litmus.'${LKMM_HW_MAP_FILE}'/'`.out"
+ lkmmout=$litmus.out
+fi
+if test -f "$LKMM_DESTDIR/$litmusout" -a -r "$LKMM_DESTDIR/$litmusout"
then
:
else
- echo ' --- ' error: \"$LKMM_DESTDIR/$litmus\".out is not a readable file
+ echo ' --- ' error: \"$LKMM_DESTDIR/$litmusout is not a readable file
exit 255
fi
-if grep -q '^ \* Result: ' $litmus
+if grep -q '^Flag data-race$' "$LKMM_DESTDIR/$litmusout"
+then
+ datarace_modeled=1
+fi
+if grep -q '^[( ]\* Result: ' $litmus
+then
+ outcome=`grep -m 1 '^[( ]\* Result: ' $litmus | awk '{ print $3 }'`
+ if grep -m1 '^[( ]\* Result: .* DATARACE' $litmus
+ then
+ datarace_predicted=1
+ fi
+ if test -n "$datarace_predicted" -a -z "$datarace_modeled" -a -z "$LKMM_HW_MAP_FILE"
+ then
+ echo '!!! Predicted data race not modeled' $litmus
+ exit 252
+ elif test -z "$datarace_predicted" -a -n "$datarace_modeled"
+ then
+ # Note that hardware models currently don't model data races
+ echo '!!! Unexpected data race modeled' $litmus
+ exit 253
+ fi
+elif test -n "$LKMM_HW_MAP_FILE" && grep -q '^Observation' $LKMM_DESTDIR/$lkmmout > /dev/null 2>&1
then
- outcome=`grep -m 1 '^ \* Result: ' $litmus | awk '{ print $3 }'`
+ outcome=`grep -m 1 '^Observation ' $LKMM_DESTDIR/$lkmmout | awk '{ print $3 }'`
else
outcome=specified
fi
-grep '^Observation' $LKMM_DESTDIR/$litmus.out
-if grep -q '^Observation' $LKMM_DESTDIR/$litmus.out
+grep '^Observation' $LKMM_DESTDIR/$litmusout
+if grep -q '^Observation' $LKMM_DESTDIR/$litmusout
then
:
+elif grep ': Unknown macro ' $LKMM_DESTDIR/$litmusout
+then
+ badname=`grep ': Unknown macro ' $LKMM_DESTDIR/$litmusout |
+ sed -e 's/^.*: Unknown macro //' |
+ sed -e 's/ (User error).*$//'`
+ badmsg=' !!! Current LKMM version does not know "'$badname'"'" $litmus"
+ echo $badmsg
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmusout
+ then
+ echo ' !!! '$badmsg >> $LKMM_DESTDIR/$litmusout 2>&1
+ fi
+ exit 254
+elif grep '^Command exited with non-zero status 124' $LKMM_DESTDIR/$litmusout
+then
+ echo ' !!! Timeout' $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmusout
+ then
+ echo ' !!! Timeout' >> $LKMM_DESTDIR/$litmusout 2>&1
+ fi
+ exit 124
else
echo ' !!! Verification error' $litmus
- if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmusout
then
- echo ' !!! Verification error' >> $LKMM_DESTDIR/$litmus.out 2>&1
+ echo ' !!! Verification error' >> $LKMM_DESTDIR/$litmusout 2>&1
fi
exit 255
fi
if test "$outcome" = DEADLOCK
then
- if grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q 'Never 0 0$'
+ if grep '^Observation' $LKMM_DESTDIR/$litmusout | grep -q 'Never 0 0$'
then
ret=0
else
echo " !!! Unexpected non-$outcome verification" $litmus
- if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmusout
then
- echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+ echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmusout 2>&1
fi
ret=1
fi
-elif grep '^Observation' $LKMM_DESTDIR/$litmus.out | grep -q $outcome || test "$outcome" = Maybe
+elif grep '^Observation' $LKMM_DESTDIR/$litmusout | grep -q 'Never 0 0$'
+then
+ echo " !!! Unexpected non-$outcome deadlock" $litmus
+ if ! grep -q '!!!' $LKMM_DESTDIR/$litmusout
+ then
+ echo " !!! Unexpected non-$outcome deadlock" $litmus >> $LKMM_DESTDIR/$litmusout 2>&1
+ fi
+ ret=1
+elif grep '^Observation' $LKMM_DESTDIR/$litmusout | grep -q $outcome || test "$outcome" = Maybe
then
ret=0
else
- echo " !!! Unexpected non-$outcome verification" $litmus
- if ! grep -q '!!!' $LKMM_DESTDIR/$litmus.out
+ if test \( -n "$LKMM_HW_MAP_FILE" -a "$outcome" = Sometimes \) -o -n "$datarace_modeled"
then
- echo " !!! Unexpected non-$outcome verification" >> $LKMM_DESTDIR/$litmus.out 2>&1
+ flag="--- Forgiven"
+ ret=0
+ else
+ flag="!!! Unexpected"
+ ret=1
+ fi
+ echo " $flag non-$outcome verification" $litmus
+ if ! grep -qe "$flag" $LKMM_DESTDIR/$litmusout
+ then
+ echo " $flag non-$outcome verification" >> $LKMM_DESTDIR/$litmusout 2>&1
fi
- ret=1
fi
-tail -2 $LKMM_DESTDIR/$litmus.out | head -1
+tail -2 $LKMM_DESTDIR/$litmusout | head -1
exit $ret
diff --git a/tools/memory-model/scripts/newlitmushist.sh b/tools/memory-model/scripts/newlitmushist.sh
index 991f8f814881..25235e2049cf 100755
--- a/tools/memory-model/scripts/newlitmushist.sh
+++ b/tools/memory-model/scripts/newlitmushist.sh
@@ -12,7 +12,7 @@
#
# Copyright IBM Corporation, 2018
#
-# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
. scripts/parseargs.sh
@@ -43,7 +43,7 @@ fi
# Form full list of litmus tests with no more than the specified
# number of processes (per the --procs argument).
-find litmus -name '*.litmus' -exec grep -l -m 1 "^C " {} \; > $T/list-C-all
+find litmus -name '*.litmus' -print | mselect7 -arch C > $T/list-C-all
xargs < $T/list-C-all -r grep -L "^P${LKMM_PROCS}" > $T/list-C-short
# Form list of new tests. Note: This does not handle litmus-test deletion!
diff --git a/tools/memory-model/scripts/parseargs.sh b/tools/memory-model/scripts/parseargs.sh
index 40f52080fdbd..08ded5909860 100755
--- a/tools/memory-model/scripts/parseargs.sh
+++ b/tools/memory-model/scripts/parseargs.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0+
#
-# the corresponding .litmus.out file, and does not judge the result.
+# Parse arguments common to the various scripts.
#
# . scripts/parseargs.sh
#
@@ -9,7 +9,7 @@
#
# Copyright IBM Corporation, 2018
#
-# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
T=/tmp/parseargs.sh.$$
mkdir $T
@@ -27,6 +27,7 @@ initparam () {
initparam LKMM_DESTDIR "."
initparam LKMM_HERD_OPTIONS "-conf linux-kernel.cfg"
+initparam LKMM_HW_MAP_FILE ""
initparam LKMM_JOBS `getconf _NPROCESSORS_ONLN`
initparam LKMM_PROCS "3"
initparam LKMM_TIMEOUT "1m"
@@ -37,10 +38,11 @@ usagehelp () {
echo "Usage $scriptname [ arguments ]"
echo " --destdir path (place for .litmus.out, default by .litmus)"
echo " --herdopts -conf linux-kernel.cfg ..."
+ echo " --hw AArch64"
echo " --jobs N (number of jobs, default one per CPU)"
echo " --procs N (litmus tests with at most this many processes)"
echo " --timeout N (herd7 timeout (e.g., 10s, 1m, 2hr, 1d, '')"
- echo "Defaults: --destdir '$LKMM_DESTDIR_DEF' --herdopts '$LKMM_HERD_OPTIONS_DEF' --jobs '$LKMM_JOBS_DEF' --procs '$LKMM_PROCS_DEF' --timeout '$LKMM_TIMEOUT_DEF'"
+ echo "Defaults: --destdir '$LKMM_DESTDIR_DEF' --herdopts '$LKMM_HERD_OPTIONS_DEF' --hw '$LKMM_HW_MAP_FILE' --jobs '$LKMM_JOBS_DEF' --procs '$LKMM_PROCS_DEF' --timeout '$LKMM_TIMEOUT_DEF'"
exit 1
}
@@ -81,7 +83,7 @@ do
echo "Cannot create directory --destdir '$LKMM_DESTDIR'"
usage
fi
- if test -d "$LKMM_DESTDIR" -a -w "$LKMM_DESTDIR" -a -x "$LKMM_DESTDIR"
+ if test -d "$LKMM_DESTDIR" -a -x "$LKMM_DESTDIR"
then
:
else
@@ -95,6 +97,11 @@ do
LKMM_HERD_OPTIONS="$2"
shift
;;
+ --hw)
+ checkarg --hw "(.map file architecture name)" "$#" "$2" '^[A-Za-z0-9_-]\+' '^--'
+ LKMM_HW_MAP_FILE="$2"
+ shift
+ ;;
-j[1-9]*)
njobs="`echo $1 | sed -e 's/^-j//'`"
trailchars="`echo $njobs | sed -e 's/[0-9]\+\(.*\)$/\1/'`"
@@ -106,7 +113,7 @@ do
LKMM_JOBS="`echo $njobs | sed -e 's/^\([0-9]\+\).*$/\1/'`"
;;
--jobs|--job|-j)
- checkarg --jobs "(number)" "$#" "$2" '^[1-9][0-9]\+$' '^--'
+ checkarg --jobs "(number)" "$#" "$2" '^[1-9][0-9]*$' '^--'
LKMM_JOBS="$2"
shift
;;
@@ -120,6 +127,10 @@ do
LKMM_TIMEOUT="$2"
shift
;;
+ --)
+ shift
+ break
+ ;;
*)
echo Unknown argument $1
usage
diff --git a/tools/memory-model/scripts/runlitmus.sh b/tools/memory-model/scripts/runlitmus.sh
new file mode 100755
index 000000000000..94608d4b6502
--- /dev/null
+++ b/tools/memory-model/scripts/runlitmus.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Without the -hw argument, runs a herd7 test and outputs verification
+# results to a file whose name is that of the specified litmus test,
+# but with ".out" appended.
+#
+# If the --hw argument is specified, this script translates the .litmus
+# C-language file to the specified type of assembly and verifies that.
+# But in this case, litmus tests using complex synchronization (such as
+# locking, RCU, and SRCU) are cheerfully ignored.
+#
+# Either way, return the status of the herd7 command.
+#
+# Usage:
+# runlitmus.sh file.litmus
+#
+# Run this in the directory containing the memory model, specifying the
+# pathname of the litmus test to check. The caller is expected to have
+# properly set up the LKMM environment variables.
+#
+# Copyright IBM Corporation, 2019
+#
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
+
+litmus=$1
+if test -f "$litmus" -a -r "$litmus"
+then
+ :
+else
+ echo ' !!! ' error: \"$litmus\" is not a readable file
+ exit 255
+fi
+
+if test -z "$LKMM_HW_MAP_FILE" -o ! -e $LKMM_DESTDIR/$litmus.out
+then
+ # LKMM run
+ herdoptions=${LKMM_HERD_OPTIONS--conf linux-kernel.cfg}
+ echo Herd options: $herdoptions > $LKMM_DESTDIR/$litmus.out
+ /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $litmus >> $LKMM_DESTDIR/$litmus.out 2>&1
+ ret=$?
+ if test -z "$LKMM_HW_MAP_FILE"
+ then
+ exit $ret
+ fi
+ echo " --- " Automatically generated LKMM output for '"'--hw $LKMM_HW_MAP_FILE'"' run
+fi
+
+# Hardware run
+
+T=/tmp/checklitmushw.sh.$$
+trap 'rm -rf $T' 0 2
+mkdir $T
+
+# Generate filenames
+mapfile="Linux2${LKMM_HW_MAP_FILE}.map"
+themefile="$T/${LKMM_HW_MAP_FILE}.theme"
+herdoptions="-model $LKMM_HW_CAT_FILE"
+hwlitmus=`echo $litmus | sed -e 's/\.litmus$/.litmus.'${LKMM_HW_MAP_FILE}'/'`
+hwlitmusfile=`echo $hwlitmus | sed -e 's,^.*/,,'`
+
+# Don't run on litmus tests with complex synchronization
+if ! scripts/simpletest.sh $litmus
+then
+ echo ' --- ' error: \"$litmus\" contains locking, RCU, or SRCU
+ exit 254
+fi
+
+# Generate the assembly code and run herd7 on it.
+gen_theme7 -n 10 -map $mapfile -call Linux.call > $themefile
+jingle7 -v -theme $themefile $litmus > $LKMM_DESTDIR/$hwlitmus 2> $T/$hwlitmusfile.jingle7.out
+if grep -q "Generated 0 tests" $T/$hwlitmusfile.jingle7.out
+then
+ echo ' !!! ' jingle7 failed, errors in $hwlitmus.err
+ cp $T/$hwlitmusfile.jingle7.out $LKMM_DESTDIR/$hwlitmus.err
+ exit 253
+fi
+/usr/bin/time $LKMM_TIMEOUT_CMD herd7 -unroll 0 $LKMM_DESTDIR/$hwlitmus > $LKMM_DESTDIR/$hwlitmus.out 2>&1
+
+exit $?
diff --git a/tools/memory-model/scripts/runlitmushist.sh b/tools/memory-model/scripts/runlitmushist.sh
index 6ed376f495bb..c6c2bdc67a50 100755
--- a/tools/memory-model/scripts/runlitmushist.sh
+++ b/tools/memory-model/scripts/runlitmushist.sh
@@ -13,7 +13,9 @@
#
# Copyright IBM Corporation, 2018
#
-# Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
+
+. scripts/hwfnseg.sh
T=/tmp/runlitmushist.sh.$$
trap 'rm -rf $T' 0
@@ -30,15 +32,12 @@ fi
# Prefixes for per-CPU scripts
for ((i=0;i<$LKMM_JOBS;i++))
do
- echo dir="$LKMM_DESTDIR" > $T/$i.sh
echo T=$T >> $T/$i.sh
- echo herdoptions=\"$LKMM_HERD_OPTIONS\" >> $T/$i.sh
cat << '___EOF___' >> $T/$i.sh
runtest () {
- echo ' ... ' /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 '>' $dir/$1.out '2>&1'
- if /usr/bin/time $LKMM_TIMEOUT_CMD herd7 $herdoptions $1 > $dir/$1.out 2>&1
+ if scripts/runlitmus.sh $1
then
- if ! grep -q '^Observation ' $dir/$1.out
+ if ! grep -q '^Observation ' $LKMM_DESTDIR/$1$2.out
then
echo ' !!! Herd failed, no Observation:' $1
fi
@@ -47,10 +46,16 @@ do
if test "$exitcode" -eq 124
then
exitmsg="timed out"
+ elif test "$exitcode" -eq 253
+ then
+ exitmsg=
else
exitmsg="failed, exit code $exitcode"
fi
- echo ' !!! Herd' ${exitmsg}: $1
+ if test -n "$exitmsg"
+ then
+ echo ' !!! Herd' ${exitmsg}: $1
+ fi
fi
}
___EOF___
@@ -59,11 +64,13 @@ done
awk -v q="'" -v b='\\' '
{
print "echo `grep " q "^P[0-9]" b "+(" q " " $0 " | tail -1 | sed -e " q "s/^P" b "([0-9]" b "+" b ")(.*$/" b "1/" q "` " $0
-}' | bash |
-sort -k1n |
-awk -v ncpu=$LKMM_JOBS -v t=$T '
+}' | sh | sort -k1n |
+awk -v dq='"' -v hwfnseg="$hwfnseg" -v ncpu="$LKMM_JOBS" -v t="$T" '
{
- print "runtest " $2 >> t "/" NR % ncpu ".sh";
+ print "if test -z " dq hwfnseg dq " || scripts/simpletest.sh " dq $2 dq
+ print "then"
+ print "\techo runtest " dq $2 dq " " hwfnseg " >> " t "/" NR % ncpu ".sh";
+ print "fi"
}
END {
diff --git a/tools/memory-model/scripts/simpletest.sh b/tools/memory-model/scripts/simpletest.sh
new file mode 100755
index 000000000000..7edc5d361665
--- /dev/null
+++ b/tools/memory-model/scripts/simpletest.sh
@@ -0,0 +1,35 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Give zero status if this is a simple test and non-zero otherwise.
+# Simple tests do not contain locking, RCU, or SRCU.
+#
+# Usage:
+# simpletest.sh file.litmus
+#
+# Copyright IBM Corporation, 2019
+#
+# Author: Paul E. McKenney <paulmck@linux.ibm.com>
+
+
+litmus=$1
+
+if test -f "$litmus" -a -r "$litmus"
+then
+ :
+else
+ echo ' --- ' error: \"$litmus\" is not a readable file
+ exit 255
+fi
+exclude="^[[:space:]]*\("
+exclude="${exclude}spin_lock(\|spin_unlock(\|spin_trylock(\|spin_is_locked("
+exclude="${exclude}\|rcu_read_lock(\|rcu_read_unlock("
+exclude="${exclude}\|synchronize_rcu(\|synchronize_rcu_expedited("
+exclude="${exclude}\|srcu_read_lock(\|srcu_read_unlock("
+exclude="${exclude}\|synchronize_srcu(\|synchronize_srcu_expedited("
+exclude="${exclude}\)"
+if grep -q $exclude $litmus
+then
+ exit 255
+fi
+exit 0
diff --git a/tools/vm/.gitignore b/tools/mm/.gitignore
index 79bb92ae1bb3..922879f93fc8 100644
--- a/tools/vm/.gitignore
+++ b/tools/mm/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
slabinfo
page-types
+page_owner_sort
diff --git a/tools/vm/Makefile b/tools/mm/Makefile
index 20f6cf04377f..6c1da51f4177 100644
--- a/tools/vm/Makefile
+++ b/tools/mm/Makefile
@@ -1,13 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for vm tools
#
+include ../scripts/Makefile.include
+
TARGETS=page-types slabinfo page_owner_sort
LIB_DIR = ../lib/api
LIBS = $(LIB_DIR)/libapi.a
-CFLAGS = -Wall -Wextra -I../lib/
-LDFLAGS = $(LIBS)
+CFLAGS += -Wall -Wextra -I../lib/
+LDFLAGS += $(LIBS)
all: $(TARGETS)
diff --git a/tools/vm/page-types.c b/tools/mm/page-types.c
index 58c0eab71bca..8d5595b6c59f 100644
--- a/tools/vm/page-types.c
+++ b/tools/mm/page-types.c
@@ -78,12 +78,13 @@
#define KPF_ARCH 38
#define KPF_UNCACHED 39
#define KPF_SOFTDIRTY 40
+#define KPF_ARCH_2 41
-/* [48-] take some arbitrary free slots for expanding overloaded flags
+/* [47-] take some arbitrary free slots for expanding overloaded flags
* not part of kernel API
*/
+#define KPF_ANON_EXCLUSIVE 47
#define KPF_READAHEAD 48
-#define KPF_SLOB_FREE 49
#define KPF_SLUB_FROZEN 50
#define KPF_SLUB_DEBUG 51
#define KPF_FILE 61
@@ -135,9 +136,10 @@ static const char * const page_flag_names[] = {
[KPF_ARCH] = "h:arch",
[KPF_UNCACHED] = "c:uncached",
[KPF_SOFTDIRTY] = "f:softdirty",
+ [KPF_ARCH_2] = "H:arch_2",
+ [KPF_ANON_EXCLUSIVE] = "d:anon_exclusive",
[KPF_READAHEAD] = "I:readahead",
- [KPF_SLOB_FREE] = "P:slob_free",
[KPF_SLUB_FROZEN] = "A:slub_frozen",
[KPF_SLUB_DEBUG] = "E:slub_debug",
@@ -388,7 +390,7 @@ static void show_page_range(unsigned long voffset, unsigned long offset,
if (opt_pid)
printf("%lx\t", voff);
if (opt_file)
- printf("%lu\t", voff);
+ printf("%lx\t", voff);
if (opt_list_cgroup)
printf("@%llu\t", (unsigned long long)cgroup0);
if (opt_list_mapcnt)
@@ -416,7 +418,7 @@ static void show_page(unsigned long voffset, unsigned long offset,
if (opt_pid)
printf("%lx\t", voffset);
if (opt_file)
- printf("%lu\t", voffset);
+ printf("%lx\t", voffset);
if (opt_list_cgroup)
printf("@%llu\t", (unsigned long long)cgroup);
if (opt_list_mapcnt)
@@ -470,10 +472,12 @@ static int bit_mask_ok(uint64_t flags)
static uint64_t expand_overloaded_flags(uint64_t flags, uint64_t pme)
{
- /* SLOB/SLUB overload several page flags */
+ /* Anonymous pages overload PG_mappedtodisk */
+ if ((flags & BIT(ANON)) && (flags & BIT(MAPPEDTODISK)))
+ flags ^= BIT(MAPPEDTODISK) | BIT(ANON_EXCLUSIVE);
+
+ /* SLUB overloads several page flags */
if (flags & BIT(SLAB)) {
- if (flags & BIT(PRIVATE))
- flags ^= BIT(PRIVATE) | BIT(SLOB_FREE);
if (flags & BIT(ACTIVE))
flags ^= BIT(ACTIVE) | BIT(SLUB_FROZEN);
if (flags & BIT(ERROR))
@@ -965,22 +969,19 @@ static struct sigaction sigbus_action = {
.sa_flags = SA_SIGINFO,
};
-static void walk_file(const char *name, const struct stat *st)
+static void walk_file_range(const char *name, int fd,
+ unsigned long off, unsigned long end)
{
uint8_t vec[PAGEMAP_BATCH];
uint64_t buf[PAGEMAP_BATCH], flags;
uint64_t cgroup = 0;
uint64_t mapcnt = 0;
unsigned long nr_pages, pfn, i;
- off_t off, end = st->st_size;
- int fd;
ssize_t len;
void *ptr;
int first = 1;
- fd = checked_open(name, O_RDONLY|O_NOATIME|O_NOFOLLOW);
-
- for (off = 0; off < end; off += len) {
+ for (; off < end; off += len) {
nr_pages = (end - off + page_size - 1) / page_size;
if (nr_pages > PAGEMAP_BATCH)
nr_pages = PAGEMAP_BATCH;
@@ -1035,12 +1036,26 @@ got_sigbus:
if (first && opt_list) {
first = 0;
flush_page_range();
- show_file(name, st);
}
add_page(off / page_size + i, pfn,
flags, cgroup, mapcnt, buf[i]);
}
}
+}
+
+static void walk_file(const char *name, const struct stat *st)
+{
+ int i;
+ int fd;
+
+ fd = checked_open(name, O_RDONLY|O_NOATIME|O_NOFOLLOW);
+
+ if (!nr_addr_ranges)
+ add_addr_range(0, st->st_size / page_size);
+
+ for (i = 0; i < nr_addr_ranges; i++)
+ walk_file_range(name, fd, opt_offset[i] * page_size,
+ (opt_offset[i] + opt_size[i]) * page_size);
close(fd);
}
@@ -1060,10 +1075,10 @@ int walk_tree(const char *name, const struct stat *st, int type, struct FTW *f)
return 0;
}
+struct stat st;
+
static void walk_page_cache(void)
{
- struct stat st;
-
kpageflags_fd = checked_open(opt_kpageflags, O_RDONLY);
pagemap_fd = checked_open("/proc/self/pagemap", O_RDONLY);
sigaction(SIGBUS, &sigbus_action, NULL);
@@ -1329,7 +1344,7 @@ int main(int argc, char *argv[])
if (opt_list && opt_list_mapcnt)
kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY);
- if (opt_mark_idle && opt_file)
+ if (opt_mark_idle)
page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR);
if (opt_list && opt_pid)
@@ -1360,6 +1375,11 @@ int main(int argc, char *argv[])
if (opt_list)
printf("\n\n");
+ if (opt_file) {
+ show_file(opt_file, &st);
+ printf("\n");
+ }
+
show_summary();
if (opt_list_mapcnt)
diff --git a/tools/mm/page_owner_sort.c b/tools/mm/page_owner_sort.c
new file mode 100644
index 000000000000..99798894b879
--- /dev/null
+++ b/tools/mm/page_owner_sort.c
@@ -0,0 +1,897 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * User-space helper to sort the output of /sys/kernel/debug/page_owner
+ *
+ * Example use:
+ * cat /sys/kernel/debug/page_owner > page_owner_full.txt
+ * ./page_owner_sort page_owner_full.txt sorted_page_owner.txt
+ * Or sort by total memory:
+ * ./page_owner_sort -m page_owner_full.txt sorted_page_owner.txt
+ *
+ * See Documentation/mm/page_owner.rst
+*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <string.h>
+#include <regex.h>
+#include <errno.h>
+#include <linux/types.h>
+#include <getopt.h>
+
+#define bool int
+#define true 1
+#define false 0
+#define TASK_COMM_LEN 16
+
+struct block_list {
+ char *txt;
+ char *comm; // task command name
+ char *stacktrace;
+ __u64 ts_nsec;
+ __u64 free_ts_nsec;
+ int len;
+ int num;
+ int page_num;
+ pid_t pid;
+ pid_t tgid;
+ int allocator;
+};
+enum FILTER_BIT {
+ FILTER_UNRELEASE = 1<<1,
+ FILTER_PID = 1<<2,
+ FILTER_TGID = 1<<3,
+ FILTER_COMM = 1<<4
+};
+enum CULL_BIT {
+ CULL_UNRELEASE = 1<<1,
+ CULL_PID = 1<<2,
+ CULL_TGID = 1<<3,
+ CULL_COMM = 1<<4,
+ CULL_STACKTRACE = 1<<5,
+ CULL_ALLOCATOR = 1<<6
+};
+enum ALLOCATOR_BIT {
+ ALLOCATOR_CMA = 1<<1,
+ ALLOCATOR_SLAB = 1<<2,
+ ALLOCATOR_VMALLOC = 1<<3,
+ ALLOCATOR_OTHERS = 1<<4
+};
+enum ARG_TYPE {
+ ARG_TXT, ARG_COMM, ARG_STACKTRACE, ARG_ALLOC_TS, ARG_FREE_TS,
+ ARG_CULL_TIME, ARG_PAGE_NUM, ARG_PID, ARG_TGID, ARG_UNKNOWN, ARG_FREE,
+ ARG_ALLOCATOR
+};
+enum SORT_ORDER {
+ SORT_ASC = 1,
+ SORT_DESC = -1,
+};
+struct filter_condition {
+ pid_t *pids;
+ pid_t *tgids;
+ char **comms;
+ int pids_size;
+ int tgids_size;
+ int comms_size;
+};
+struct sort_condition {
+ int (**cmps)(const void *, const void *);
+ int *signs;
+ int size;
+};
+static struct filter_condition fc;
+static struct sort_condition sc;
+static regex_t order_pattern;
+static regex_t pid_pattern;
+static regex_t tgid_pattern;
+static regex_t comm_pattern;
+static regex_t ts_nsec_pattern;
+static regex_t free_ts_nsec_pattern;
+static struct block_list *list;
+static int list_size;
+static int max_size;
+static int cull;
+static int filter;
+static bool debug_on;
+
+static void set_single_cmp(int (*cmp)(const void *, const void *), int sign);
+
+int read_block(char *buf, char *ext_buf, int buf_size, FILE *fin)
+{
+ char *curr = buf, *const buf_end = buf + buf_size;
+
+ while (buf_end - curr > 1 && fgets(curr, buf_end - curr, fin)) {
+ if (*curr == '\n') { /* empty line */
+ return curr - buf;
+ }
+ if (!strncmp(curr, "PFN", 3)) {
+ strcpy(ext_buf, curr);
+ continue;
+ }
+ curr += strlen(curr);
+ }
+
+ return -1; /* EOF or no space left in buf. */
+}
+
+static int compare_txt(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return strcmp(l1->txt, l2->txt);
+}
+
+static int compare_stacktrace(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return strcmp(l1->stacktrace, l2->stacktrace);
+}
+
+static int compare_num(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return l1->num - l2->num;
+}
+
+static int compare_page_num(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return l1->page_num - l2->page_num;
+}
+
+static int compare_pid(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return l1->pid - l2->pid;
+}
+
+static int compare_tgid(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return l1->tgid - l2->tgid;
+}
+
+static int compare_allocator(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return l1->allocator - l2->allocator;
+}
+
+static int compare_comm(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return strcmp(l1->comm, l2->comm);
+}
+
+static int compare_ts(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return l1->ts_nsec < l2->ts_nsec ? -1 : 1;
+}
+
+static int compare_free_ts(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ return l1->free_ts_nsec < l2->free_ts_nsec ? -1 : 1;
+}
+
+static int compare_release(const void *p1, const void *p2)
+{
+ const struct block_list *l1 = p1, *l2 = p2;
+
+ if (!l1->free_ts_nsec && !l2->free_ts_nsec)
+ return 0;
+ if (l1->free_ts_nsec && l2->free_ts_nsec)
+ return 0;
+ return l1->free_ts_nsec ? 1 : -1;
+}
+
+static int compare_cull_condition(const void *p1, const void *p2)
+{
+ if (cull == 0)
+ return compare_txt(p1, p2);
+ if ((cull & CULL_STACKTRACE) && compare_stacktrace(p1, p2))
+ return compare_stacktrace(p1, p2);
+ if ((cull & CULL_PID) && compare_pid(p1, p2))
+ return compare_pid(p1, p2);
+ if ((cull & CULL_TGID) && compare_tgid(p1, p2))
+ return compare_tgid(p1, p2);
+ if ((cull & CULL_COMM) && compare_comm(p1, p2))
+ return compare_comm(p1, p2);
+ if ((cull & CULL_UNRELEASE) && compare_release(p1, p2))
+ return compare_release(p1, p2);
+ if ((cull & CULL_ALLOCATOR) && compare_allocator(p1, p2))
+ return compare_allocator(p1, p2);
+ return 0;
+}
+
+static int compare_sort_condition(const void *p1, const void *p2)
+{
+ int cmp = 0;
+
+ for (int i = 0; i < sc.size; ++i)
+ if (cmp == 0)
+ cmp = sc.signs[i] * sc.cmps[i](p1, p2);
+ return cmp;
+}
+
+static int search_pattern(regex_t *pattern, char *pattern_str, char *buf)
+{
+ int err, val_len;
+ regmatch_t pmatch[2];
+
+ err = regexec(pattern, buf, 2, pmatch, REG_NOTBOL);
+ if (err != 0 || pmatch[1].rm_so == -1) {
+ if (debug_on)
+ fprintf(stderr, "no matching pattern in %s\n", buf);
+ return -1;
+ }
+ val_len = pmatch[1].rm_eo - pmatch[1].rm_so;
+
+ memcpy(pattern_str, buf + pmatch[1].rm_so, val_len);
+
+ return 0;
+}
+
+static bool check_regcomp(regex_t *pattern, const char *regex)
+{
+ int err;
+
+ err = regcomp(pattern, regex, REG_EXTENDED | REG_NEWLINE);
+ if (err != 0 || pattern->re_nsub != 1) {
+ fprintf(stderr, "Invalid pattern %s code %d\n", regex, err);
+ return false;
+ }
+ return true;
+}
+
+static char **explode(char sep, const char *str, int *size)
+{
+ int count = 0, len = strlen(str);
+ int lastindex = -1, j = 0;
+
+ for (int i = 0; i < len; i++)
+ if (str[i] == sep)
+ count++;
+ char **ret = calloc(++count, sizeof(char *));
+
+ for (int i = 0; i < len; i++) {
+ if (str[i] == sep) {
+ ret[j] = calloc(i - lastindex, sizeof(char));
+ memcpy(ret[j++], str + lastindex + 1, i - lastindex - 1);
+ lastindex = i;
+ }
+ }
+ if (lastindex <= len - 1) {
+ ret[j] = calloc(len - lastindex, sizeof(char));
+ memcpy(ret[j++], str + lastindex + 1, strlen(str) - 1 - lastindex);
+ }
+ *size = j;
+ return ret;
+}
+
+static void free_explode(char **arr, int size)
+{
+ for (int i = 0; i < size; i++)
+ free(arr[i]);
+ free(arr);
+}
+
+# define FIELD_BUFF 25
+
+static int get_page_num(char *buf)
+{
+ int order_val;
+ char order_str[FIELD_BUFF] = {0};
+ char *endptr;
+
+ search_pattern(&order_pattern, order_str, buf);
+ errno = 0;
+ order_val = strtol(order_str, &endptr, 10);
+ if (order_val > 64 || errno != 0 || endptr == order_str || *endptr != '\0') {
+ if (debug_on)
+ fprintf(stderr, "wrong order in follow buf:\n%s\n", buf);
+ return 0;
+ }
+
+ return 1 << order_val;
+}
+
+static pid_t get_pid(char *buf)
+{
+ pid_t pid;
+ char pid_str[FIELD_BUFF] = {0};
+ char *endptr;
+
+ search_pattern(&pid_pattern, pid_str, buf);
+ errno = 0;
+ pid = strtol(pid_str, &endptr, 10);
+ if (errno != 0 || endptr == pid_str || *endptr != '\0') {
+ if (debug_on)
+ fprintf(stderr, "wrong/invalid pid in follow buf:\n%s\n", buf);
+ return -1;
+ }
+
+ return pid;
+
+}
+
+static pid_t get_tgid(char *buf)
+{
+ pid_t tgid;
+ char tgid_str[FIELD_BUFF] = {0};
+ char *endptr;
+
+ search_pattern(&tgid_pattern, tgid_str, buf);
+ errno = 0;
+ tgid = strtol(tgid_str, &endptr, 10);
+ if (errno != 0 || endptr == tgid_str || *endptr != '\0') {
+ if (debug_on)
+ fprintf(stderr, "wrong/invalid tgid in follow buf:\n%s\n", buf);
+ return -1;
+ }
+
+ return tgid;
+
+}
+
+static __u64 get_ts_nsec(char *buf)
+{
+ __u64 ts_nsec;
+ char ts_nsec_str[FIELD_BUFF] = {0};
+ char *endptr;
+
+ search_pattern(&ts_nsec_pattern, ts_nsec_str, buf);
+ errno = 0;
+ ts_nsec = strtoull(ts_nsec_str, &endptr, 10);
+ if (errno != 0 || endptr == ts_nsec_str || *endptr != '\0') {
+ if (debug_on)
+ fprintf(stderr, "wrong ts_nsec in follow buf:\n%s\n", buf);
+ return -1;
+ }
+
+ return ts_nsec;
+}
+
+static __u64 get_free_ts_nsec(char *buf)
+{
+ __u64 free_ts_nsec;
+ char free_ts_nsec_str[FIELD_BUFF] = {0};
+ char *endptr;
+
+ search_pattern(&free_ts_nsec_pattern, free_ts_nsec_str, buf);
+ errno = 0;
+ free_ts_nsec = strtoull(free_ts_nsec_str, &endptr, 10);
+ if (errno != 0 || endptr == free_ts_nsec_str || *endptr != '\0') {
+ if (debug_on)
+ fprintf(stderr, "wrong free_ts_nsec in follow buf:\n%s\n", buf);
+ return -1;
+ }
+
+ return free_ts_nsec;
+}
+
+static char *get_comm(char *buf)
+{
+ char *comm_str = malloc(TASK_COMM_LEN);
+
+ memset(comm_str, 0, TASK_COMM_LEN);
+
+ search_pattern(&comm_pattern, comm_str, buf);
+ errno = 0;
+ if (errno != 0) {
+ if (debug_on)
+ fprintf(stderr, "wrong comm in follow buf:\n%s\n", buf);
+ return NULL;
+ }
+
+ return comm_str;
+}
+
+static int get_arg_type(const char *arg)
+{
+ if (!strcmp(arg, "pid") || !strcmp(arg, "p"))
+ return ARG_PID;
+ else if (!strcmp(arg, "tgid") || !strcmp(arg, "tg"))
+ return ARG_TGID;
+ else if (!strcmp(arg, "name") || !strcmp(arg, "n"))
+ return ARG_COMM;
+ else if (!strcmp(arg, "stacktrace") || !strcmp(arg, "st"))
+ return ARG_STACKTRACE;
+ else if (!strcmp(arg, "free") || !strcmp(arg, "f"))
+ return ARG_FREE;
+ else if (!strcmp(arg, "txt") || !strcmp(arg, "T"))
+ return ARG_TXT;
+ else if (!strcmp(arg, "free_ts") || !strcmp(arg, "ft"))
+ return ARG_FREE_TS;
+ else if (!strcmp(arg, "alloc_ts") || !strcmp(arg, "at"))
+ return ARG_ALLOC_TS;
+ else if (!strcmp(arg, "allocator") || !strcmp(arg, "ator"))
+ return ARG_ALLOCATOR;
+ else {
+ return ARG_UNKNOWN;
+ }
+}
+
+static int get_allocator(const char *buf, const char *migrate_info)
+{
+ char *tmp, *first_line, *second_line;
+ int allocator = 0;
+
+ if (strstr(migrate_info, "CMA"))
+ allocator |= ALLOCATOR_CMA;
+ if (strstr(migrate_info, "slab"))
+ allocator |= ALLOCATOR_SLAB;
+ tmp = strstr(buf, "__vmalloc_node_range");
+ if (tmp) {
+ second_line = tmp;
+ while (*tmp != '\n')
+ tmp--;
+ tmp--;
+ while (*tmp != '\n')
+ tmp--;
+ first_line = ++tmp;
+ tmp = strstr(tmp, "alloc_pages");
+ if (tmp && first_line <= tmp && tmp < second_line)
+ allocator |= ALLOCATOR_VMALLOC;
+ }
+ if (allocator == 0)
+ allocator = ALLOCATOR_OTHERS;
+ return allocator;
+}
+
+static bool match_num_list(int num, int *list, int list_size)
+{
+ for (int i = 0; i < list_size; ++i)
+ if (list[i] == num)
+ return true;
+ return false;
+}
+
+static bool match_str_list(const char *str, char **list, int list_size)
+{
+ for (int i = 0; i < list_size; ++i)
+ if (!strcmp(list[i], str))
+ return true;
+ return false;
+}
+
+static bool is_need(char *buf)
+{
+ __u64 ts_nsec, free_ts_nsec;
+
+ ts_nsec = get_ts_nsec(buf);
+ free_ts_nsec = get_free_ts_nsec(buf);
+
+ if ((filter & FILTER_UNRELEASE) && free_ts_nsec != 0 && ts_nsec < free_ts_nsec)
+ return false;
+ if ((filter & FILTER_PID) && !match_num_list(get_pid(buf), fc.pids, fc.pids_size))
+ return false;
+ if ((filter & FILTER_TGID) &&
+ !match_num_list(get_tgid(buf), fc.tgids, fc.tgids_size))
+ return false;
+
+ char *comm = get_comm(buf);
+
+ if ((filter & FILTER_COMM) &&
+ !match_str_list(comm, fc.comms, fc.comms_size)) {
+ free(comm);
+ return false;
+ }
+ free(comm);
+ return true;
+}
+
+static bool add_list(char *buf, int len, char *ext_buf)
+{
+ if (list_size != 0 &&
+ len == list[list_size-1].len &&
+ memcmp(buf, list[list_size-1].txt, len) == 0) {
+ list[list_size-1].num++;
+ list[list_size-1].page_num += get_page_num(buf);
+ return true;
+ }
+ if (list_size == max_size) {
+ fprintf(stderr, "max_size too small??\n");
+ return false;
+ }
+ if (!is_need(buf))
+ return true;
+ list[list_size].pid = get_pid(buf);
+ list[list_size].tgid = get_tgid(buf);
+ list[list_size].comm = get_comm(buf);
+ list[list_size].txt = malloc(len+1);
+ if (!list[list_size].txt) {
+ fprintf(stderr, "Out of memory\n");
+ return false;
+ }
+ memcpy(list[list_size].txt, buf, len);
+ list[list_size].txt[len] = 0;
+ list[list_size].len = len;
+ list[list_size].num = 1;
+ list[list_size].page_num = get_page_num(buf);
+
+ list[list_size].stacktrace = strchr(list[list_size].txt, '\n') ?: "";
+ if (*list[list_size].stacktrace == '\n')
+ list[list_size].stacktrace++;
+ list[list_size].ts_nsec = get_ts_nsec(buf);
+ list[list_size].free_ts_nsec = get_free_ts_nsec(buf);
+ list[list_size].allocator = get_allocator(buf, ext_buf);
+ list_size++;
+ if (list_size % 1000 == 0) {
+ printf("loaded %d\r", list_size);
+ fflush(stdout);
+ }
+ return true;
+}
+
+static bool parse_cull_args(const char *arg_str)
+{
+ int size = 0;
+ char **args = explode(',', arg_str, &size);
+
+ for (int i = 0; i < size; ++i) {
+ int arg_type = get_arg_type(args[i]);
+
+ if (arg_type == ARG_PID)
+ cull |= CULL_PID;
+ else if (arg_type == ARG_TGID)
+ cull |= CULL_TGID;
+ else if (arg_type == ARG_COMM)
+ cull |= CULL_COMM;
+ else if (arg_type == ARG_STACKTRACE)
+ cull |= CULL_STACKTRACE;
+ else if (arg_type == ARG_FREE)
+ cull |= CULL_UNRELEASE;
+ else if (arg_type == ARG_ALLOCATOR)
+ cull |= CULL_ALLOCATOR;
+ else {
+ free_explode(args, size);
+ return false;
+ }
+ }
+ free_explode(args, size);
+ if (sc.size == 0)
+ set_single_cmp(compare_num, SORT_DESC);
+ return true;
+}
+
+static void set_single_cmp(int (*cmp)(const void *, const void *), int sign)
+{
+ if (sc.signs == NULL || sc.size < 1)
+ sc.signs = calloc(1, sizeof(int));
+ sc.signs[0] = sign;
+ if (sc.cmps == NULL || sc.size < 1)
+ sc.cmps = calloc(1, sizeof(int *));
+ sc.cmps[0] = cmp;
+ sc.size = 1;
+}
+
+static bool parse_sort_args(const char *arg_str)
+{
+ int size = 0;
+
+ if (sc.size != 0) { /* reset sort_condition */
+ free(sc.signs);
+ free(sc.cmps);
+ size = 0;
+ }
+
+ char **args = explode(',', arg_str, &size);
+
+ sc.signs = calloc(size, sizeof(int));
+ sc.cmps = calloc(size, sizeof(int *));
+ for (int i = 0; i < size; ++i) {
+ int offset = 0;
+
+ sc.signs[i] = SORT_ASC;
+ if (args[i][0] == '-' || args[i][0] == '+') {
+ if (args[i][0] == '-')
+ sc.signs[i] = SORT_DESC;
+ offset = 1;
+ }
+
+ int arg_type = get_arg_type(args[i]+offset);
+
+ if (arg_type == ARG_PID)
+ sc.cmps[i] = compare_pid;
+ else if (arg_type == ARG_TGID)
+ sc.cmps[i] = compare_tgid;
+ else if (arg_type == ARG_COMM)
+ sc.cmps[i] = compare_comm;
+ else if (arg_type == ARG_STACKTRACE)
+ sc.cmps[i] = compare_stacktrace;
+ else if (arg_type == ARG_ALLOC_TS)
+ sc.cmps[i] = compare_ts;
+ else if (arg_type == ARG_FREE_TS)
+ sc.cmps[i] = compare_free_ts;
+ else if (arg_type == ARG_TXT)
+ sc.cmps[i] = compare_txt;
+ else if (arg_type == ARG_ALLOCATOR)
+ sc.cmps[i] = compare_allocator;
+ else {
+ free_explode(args, size);
+ sc.size = 0;
+ return false;
+ }
+ }
+ sc.size = size;
+ free_explode(args, size);
+ return true;
+}
+
+static int *parse_nums_list(char *arg_str, int *list_size)
+{
+ int size = 0;
+ char **args = explode(',', arg_str, &size);
+ int *list = calloc(size, sizeof(int));
+
+ errno = 0;
+ for (int i = 0; i < size; ++i) {
+ char *endptr = NULL;
+
+ list[i] = strtol(args[i], &endptr, 10);
+ if (errno != 0 || endptr == args[i] || *endptr != '\0') {
+ free(list);
+ return NULL;
+ }
+ }
+ *list_size = size;
+ free_explode(args, size);
+ return list;
+}
+
+static void print_allocator(FILE *out, int allocator)
+{
+ fprintf(out, "allocated by ");
+ if (allocator & ALLOCATOR_CMA)
+ fprintf(out, "CMA ");
+ if (allocator & ALLOCATOR_SLAB)
+ fprintf(out, "SLAB ");
+ if (allocator & ALLOCATOR_VMALLOC)
+ fprintf(out, "VMALLOC ");
+ if (allocator & ALLOCATOR_OTHERS)
+ fprintf(out, "OTHERS ");
+}
+
+#define BUF_SIZE (128 * 1024)
+
+static void usage(void)
+{
+ printf("Usage: ./page_owner_sort [OPTIONS] <input> <output>\n"
+ "-m\t\tSort by total memory.\n"
+ "-s\t\tSort by the stack trace.\n"
+ "-t\t\tSort by times (default).\n"
+ "-p\t\tSort by pid.\n"
+ "-P\t\tSort by tgid.\n"
+ "-n\t\tSort by task command name.\n"
+ "-a\t\tSort by memory allocate time.\n"
+ "-r\t\tSort by memory release time.\n"
+ "-f\t\tFilter out the information of blocks whose memory has been released.\n"
+ "-d\t\tPrint debug information.\n"
+ "--pid <pidlist>\tSelect by pid. This selects the information of blocks whose process ID numbers appear in <pidlist>.\n"
+ "--tgid <tgidlist>\tSelect by tgid. This selects the information of blocks whose Thread Group ID numbers appear in <tgidlist>.\n"
+ "--name <cmdlist>\n\t\tSelect by command name. This selects the information of blocks whose command name appears in <cmdlist>.\n"
+ "--cull <rules>\tCull by user-defined rules.<rules> is a single argument in the form of a comma-separated list with some common fields predefined\n"
+ "--sort <order>\tSpecify sort order as: [+|-]key[,[+|-]key[,...]]\n"
+ );
+}
+
+int main(int argc, char **argv)
+{
+ FILE *fin, *fout;
+ char *buf, *ext_buf;
+ int i, count;
+ struct stat st;
+ int opt;
+ struct option longopts[] = {
+ { "pid", required_argument, NULL, 1 },
+ { "tgid", required_argument, NULL, 2 },
+ { "name", required_argument, NULL, 3 },
+ { "cull", required_argument, NULL, 4 },
+ { "sort", required_argument, NULL, 5 },
+ { 0, 0, 0, 0},
+ };
+
+ while ((opt = getopt_long(argc, argv, "adfmnprstP", longopts, NULL)) != -1)
+ switch (opt) {
+ case 'a':
+ set_single_cmp(compare_ts, SORT_ASC);
+ break;
+ case 'd':
+ debug_on = true;
+ break;
+ case 'f':
+ filter = filter | FILTER_UNRELEASE;
+ break;
+ case 'm':
+ set_single_cmp(compare_page_num, SORT_DESC);
+ break;
+ case 'p':
+ set_single_cmp(compare_pid, SORT_ASC);
+ break;
+ case 'r':
+ set_single_cmp(compare_free_ts, SORT_ASC);
+ break;
+ case 's':
+ set_single_cmp(compare_stacktrace, SORT_ASC);
+ break;
+ case 't':
+ set_single_cmp(compare_num, SORT_DESC);
+ break;
+ case 'P':
+ set_single_cmp(compare_tgid, SORT_ASC);
+ break;
+ case 'n':
+ set_single_cmp(compare_comm, SORT_ASC);
+ break;
+ case 1:
+ filter = filter | FILTER_PID;
+ fc.pids = parse_nums_list(optarg, &fc.pids_size);
+ if (fc.pids == NULL) {
+ fprintf(stderr, "wrong/invalid pid in from the command line:%s\n",
+ optarg);
+ exit(1);
+ }
+ break;
+ case 2:
+ filter = filter | FILTER_TGID;
+ fc.tgids = parse_nums_list(optarg, &fc.tgids_size);
+ if (fc.tgids == NULL) {
+ fprintf(stderr, "wrong/invalid tgid in from the command line:%s\n",
+ optarg);
+ exit(1);
+ }
+ break;
+ case 3:
+ filter = filter | FILTER_COMM;
+ fc.comms = explode(',', optarg, &fc.comms_size);
+ break;
+ case 4:
+ if (!parse_cull_args(optarg)) {
+ fprintf(stderr, "wrong argument after --cull option:%s\n",
+ optarg);
+ exit(1);
+ }
+ break;
+ case 5:
+ if (!parse_sort_args(optarg)) {
+ fprintf(stderr, "wrong argument after --sort option:%s\n",
+ optarg);
+ exit(1);
+ }
+ break;
+ default:
+ usage();
+ exit(1);
+ }
+
+ if (optind >= (argc - 1)) {
+ usage();
+ exit(1);
+ }
+
+ fin = fopen(argv[optind], "r");
+ fout = fopen(argv[optind + 1], "w");
+ if (!fin || !fout) {
+ usage();
+ perror("open: ");
+ exit(1);
+ }
+
+ if (!check_regcomp(&order_pattern, "order\\s*([0-9]*),"))
+ goto out_order;
+ if (!check_regcomp(&pid_pattern, "pid\\s*([0-9]*),"))
+ goto out_pid;
+ if (!check_regcomp(&tgid_pattern, "tgid\\s*([0-9]*) "))
+ goto out_tgid;
+ if (!check_regcomp(&comm_pattern, "tgid\\s*[0-9]*\\s*\\((.*)\\),\\s*ts"))
+ goto out_comm;
+ if (!check_regcomp(&ts_nsec_pattern, "ts\\s*([0-9]*)\\s*ns,"))
+ goto out_ts;
+ if (!check_regcomp(&free_ts_nsec_pattern, "free_ts\\s*([0-9]*)\\s*ns"))
+ goto out_free_ts;
+
+ fstat(fileno(fin), &st);
+ max_size = st.st_size / 100; /* hack ... */
+
+ list = malloc(max_size * sizeof(*list));
+ buf = malloc(BUF_SIZE);
+ ext_buf = malloc(BUF_SIZE);
+ if (!list || !buf || !ext_buf) {
+ fprintf(stderr, "Out of memory\n");
+ goto out_free;
+ }
+
+ for ( ; ; ) {
+ int buf_len = read_block(buf, ext_buf, BUF_SIZE, fin);
+
+ if (buf_len < 0)
+ break;
+ if (!add_list(buf, buf_len, ext_buf))
+ goto out_free;
+ }
+
+ printf("loaded %d\n", list_size);
+
+ printf("sorting ....\n");
+
+ qsort(list, list_size, sizeof(list[0]), compare_cull_condition);
+
+ printf("culling\n");
+
+ for (i = count = 0; i < list_size; i++) {
+ if (count == 0 ||
+ compare_cull_condition((void *)(&list[count-1]), (void *)(&list[i])) != 0) {
+ list[count++] = list[i];
+ } else {
+ list[count-1].num += list[i].num;
+ list[count-1].page_num += list[i].page_num;
+ }
+ }
+
+ qsort(list, count, sizeof(list[0]), compare_sort_condition);
+
+ for (i = 0; i < count; i++) {
+ if (cull == 0) {
+ fprintf(fout, "%d times, %d pages, ", list[i].num, list[i].page_num);
+ print_allocator(fout, list[i].allocator);
+ fprintf(fout, ":\n%s\n", list[i].txt);
+ }
+ else {
+ fprintf(fout, "%d times, %d pages",
+ list[i].num, list[i].page_num);
+ if (cull & CULL_PID || filter & FILTER_PID)
+ fprintf(fout, ", PID %d", list[i].pid);
+ if (cull & CULL_TGID || filter & FILTER_TGID)
+ fprintf(fout, ", TGID %d", list[i].tgid);
+ if (cull & CULL_COMM || filter & FILTER_COMM)
+ fprintf(fout, ", task_comm_name: %s", list[i].comm);
+ if (cull & CULL_ALLOCATOR) {
+ fprintf(fout, ", ");
+ print_allocator(fout, list[i].allocator);
+ }
+ if (cull & CULL_UNRELEASE)
+ fprintf(fout, " (%s)",
+ list[i].free_ts_nsec ? "UNRELEASED" : "RELEASED");
+ if (cull & CULL_STACKTRACE)
+ fprintf(fout, ":\n%s", list[i].stacktrace);
+ fprintf(fout, "\n");
+ }
+ }
+
+out_free:
+ if (ext_buf)
+ free(ext_buf);
+ if (buf)
+ free(buf);
+ if (list)
+ free(list);
+out_free_ts:
+ regfree(&free_ts_nsec_pattern);
+out_ts:
+ regfree(&ts_nsec_pattern);
+out_comm:
+ regfree(&comm_pattern);
+out_tgid:
+ regfree(&tgid_pattern);
+out_pid:
+ regfree(&pid_pattern);
+out_order:
+ regfree(&order_pattern);
+
+ return 0;
+}
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/mm/slabinfo-gnuplot.sh
index 26e193ffd2a2..873a892147e5 100644
--- a/tools/vm/slabinfo-gnuplot.sh
+++ b/tools/mm/slabinfo-gnuplot.sh
@@ -150,7 +150,7 @@ do_preprocess()
let lines=3
out=`basename "$in"`"-slabs-by-loss"
`cat "$in" | grep -A "$lines" 'Slabs sorted by loss' |\
- egrep -iv '\-\-|Name|Slabs'\
+ grep -E -iv '\-\-|Name|Slabs'\
| awk '{print $1" "$4+$2*$3" "$4}' > "$out"`
if [ $? -eq 0 ]; then
do_slabs_plotting "$out"
@@ -159,7 +159,7 @@ do_preprocess()
let lines=3
out=`basename "$in"`"-slabs-by-size"
`cat "$in" | grep -A "$lines" 'Slabs sorted by size' |\
- egrep -iv '\-\-|Name|Slabs'\
+ grep -E -iv '\-\-|Name|Slabs'\
| awk '{print $1" "$4" "$4-$2*$3}' > "$out"`
if [ $? -eq 0 ]; then
do_slabs_plotting "$out"
diff --git a/tools/vm/slabinfo.c b/tools/mm/slabinfo.c
index 9b68658b6bb8..cfaeaea71042 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/mm/slabinfo.c
@@ -125,7 +125,7 @@ static void usage(void)
"-n|--numa Show NUMA information\n"
"-N|--lines=K Show the first K slabs\n"
"-o|--ops Show kmem_cache_ops\n"
- "-P|--partial Sort by number of partial slabs\n"
+ "-P|--partial Sort by number of partial slabs\n"
"-r|--report Detailed report on single slabs\n"
"-s|--shrink Shrink slabs\n"
"-S|--Size Sort by size\n"
@@ -157,9 +157,11 @@ static unsigned long read_obj(const char *name)
{
FILE *f = fopen(name, "r");
- if (!f)
+ if (!f) {
buffer[0] = 0;
- else {
+ if (errno == EACCES)
+ fatal("%s, Try using superuser\n", strerror(errno));
+ } else {
if (!fgets(buffer, sizeof(buffer), f))
buffer[0] = 0;
fclose(f);
@@ -233,6 +235,24 @@ static unsigned long read_slab_obj(struct slabinfo *s, const char *name)
return l;
}
+static unsigned long read_debug_slab_obj(struct slabinfo *s, const char *name)
+{
+ char x[128];
+ FILE *f;
+ size_t l;
+
+ snprintf(x, 128, "/sys/kernel/debug/slab/%s/%s", s->name, name);
+ f = fopen(x, "r");
+ if (!f) {
+ buffer[0] = 0;
+ l = 0;
+ } else {
+ l = fread(buffer, 1, sizeof(buffer), f);
+ buffer[l] = 0;
+ fclose(f);
+ }
+ return l;
+}
/*
* Put a size string together
@@ -409,14 +429,18 @@ static void show_tracking(struct slabinfo *s)
{
printf("\n%s: Kernel object allocation\n", s->name);
printf("-----------------------------------------------------------------------\n");
- if (read_slab_obj(s, "alloc_calls"))
+ if (read_debug_slab_obj(s, "alloc_traces"))
+ printf("%s", buffer);
+ else if (read_slab_obj(s, "alloc_calls"))
printf("%s", buffer);
else
printf("No Data\n");
printf("\n%s: Kernel object freeing\n", s->name);
printf("------------------------------------------------------------------------\n");
- if (read_slab_obj(s, "free_calls"))
+ if (read_debug_slab_obj(s, "free_traces"))
+ printf("%s", buffer);
+ else if (read_slab_obj(s, "free_calls"))
printf("%s", buffer);
else
printf("No Data\n");
@@ -1045,15 +1069,27 @@ static void sort_slabs(void)
for (s2 = s1 + 1; s2 < slabinfo + slabs; s2++) {
int result;
- if (sort_size)
- result = slab_size(s1) < slab_size(s2);
- else if (sort_active)
- result = slab_activity(s1) < slab_activity(s2);
- else if (sort_loss)
- result = slab_waste(s1) < slab_waste(s2);
- else if (sort_partial)
- result = s1->partial < s2->partial;
- else
+ if (sort_size) {
+ if (slab_size(s1) == slab_size(s2))
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = slab_size(s1) < slab_size(s2);
+ } else if (sort_active) {
+ if (slab_activity(s1) == slab_activity(s2))
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = slab_activity(s1) < slab_activity(s2);
+ } else if (sort_loss) {
+ if (slab_waste(s1) == slab_waste(s2))
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = slab_waste(s1) < slab_waste(s2);
+ } else if (sort_partial) {
+ if (s1->partial == s2->partial)
+ result = strcasecmp(s1->name, s2->name);
+ else
+ result = s1->partial < s2->partial;
+ } else
result = strcasecmp(s1->name, s2->name);
if (show_inverted)
diff --git a/tools/net/ynl/cli.py b/tools/net/ynl/cli.py
new file mode 100755
index 000000000000..ffaa8038aa8c
--- /dev/null
+++ b/tools/net/ynl/cli.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+import argparse
+import json
+import pprint
+import time
+
+from lib import YnlFamily
+
+
+def main():
+ parser = argparse.ArgumentParser(description='YNL CLI sample')
+ parser.add_argument('--spec', dest='spec', type=str, required=True)
+ parser.add_argument('--schema', dest='schema', type=str)
+ parser.add_argument('--no-schema', action='store_true')
+ parser.add_argument('--json', dest='json_text', type=str)
+ parser.add_argument('--do', dest='do', type=str)
+ parser.add_argument('--dump', dest='dump', type=str)
+ parser.add_argument('--sleep', dest='sleep', type=int)
+ parser.add_argument('--subscribe', dest='ntf', type=str)
+ args = parser.parse_args()
+
+ if args.no_schema:
+ args.schema = ''
+
+ attrs = {}
+ if args.json_text:
+ attrs = json.loads(args.json_text)
+
+ ynl = YnlFamily(args.spec, args.schema)
+
+ if args.ntf:
+ ynl.ntf_subscribe(args.ntf)
+
+ if args.sleep:
+ time.sleep(args.sleep)
+
+ if args.do:
+ reply = ynl.do(args.do, attrs)
+ pprint.PrettyPrinter().pprint(reply)
+ if args.dump:
+ reply = ynl.dump(args.dump, attrs)
+ pprint.PrettyPrinter().pprint(reply)
+
+ if args.ntf:
+ ynl.check_ntf()
+ pprint.PrettyPrinter().pprint(ynl.async_msg_queue)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/net/ynl/ethtool.py b/tools/net/ynl/ethtool.py
new file mode 100755
index 000000000000..6c9f7e31250c
--- /dev/null
+++ b/tools/net/ynl/ethtool.py
@@ -0,0 +1,424 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+import argparse
+import json
+import pprint
+import sys
+import re
+
+from lib import YnlFamily
+
+def args_to_req(ynl, op_name, args, req):
+ """
+ Verify and convert command-line arguments to the ynl-compatible request.
+ """
+ valid_attrs = ynl.operation_do_attributes(op_name)
+ valid_attrs.remove('header') # not user-provided
+
+ if len(args) == 0:
+ print(f'no attributes, expected: {valid_attrs}')
+ sys.exit(1)
+
+ i = 0
+ while i < len(args):
+ attr = args[i]
+ if i + 1 >= len(args):
+ print(f'expected value for \'{attr}\'')
+ sys.exit(1)
+
+ if attr not in valid_attrs:
+ print(f'invalid attribute \'{attr}\', expected: {valid_attrs}')
+ sys.exit(1)
+
+ val = args[i+1]
+ i += 2
+
+ req[attr] = val
+
+def print_field(reply, *desc):
+ """
+ Pretty-print a set of fields from the reply. desc specifies the
+ fields and the optional type (bool/yn).
+ """
+ if len(desc) == 0:
+ return print_field(reply, *zip(reply.keys(), reply.keys()))
+
+ for spec in desc:
+ try:
+ field, name, tp = spec
+ except:
+ field, name = spec
+ tp = 'int'
+
+ value = reply.get(field, None)
+ if tp == 'yn':
+ value = 'yes' if value else 'no'
+ elif tp == 'bool' or isinstance(value, bool):
+ value = 'on' if value else 'off'
+ else:
+ value = 'n/a' if value is None else value
+
+ print(f'{name}: {value}')
+
+def print_speed(name, value):
+ """
+ Print out the speed-like strings from the value dict.
+ """
+ speed_re = re.compile(r'[0-9]+base[^/]+/.+')
+ speed = [ k for k, v in value.items() if v and speed_re.match(k) ]
+ print(f'{name}: {" ".join(speed)}')
+
+def doit(ynl, args, op_name):
+ """
+ Prepare request header, parse arguments and doit.
+ """
+ req = {
+ 'header': {
+ 'dev-name': args.device,
+ },
+ }
+
+ args_to_req(ynl, op_name, args.args, req)
+ ynl.do(op_name, req)
+
+def dumpit(ynl, args, op_name, extra = {}):
+ """
+ Prepare request header, parse arguments and dumpit (filtering out the
+ devices we're not interested in).
+ """
+ reply = ynl.dump(op_name, { 'header': {} } | extra)
+ if not reply:
+ return {}
+
+ for msg in reply:
+ if msg['header']['dev-name'] == args.device:
+ if args.json:
+ pprint.PrettyPrinter().pprint(msg)
+ sys.exit(0)
+ msg.pop('header', None)
+ return msg
+
+ print(f"Not supported for device {args.device}")
+ sys.exit(1)
+
+def bits_to_dict(attr):
+ """
+ Convert ynl-formatted bitmask to a dict of bit=value.
+ """
+ ret = {}
+ if 'bits' not in attr:
+ return dict()
+ if 'bit' not in attr['bits']:
+ return dict()
+ for bit in attr['bits']['bit']:
+ if bit['name'] == '':
+ continue
+ name = bit['name']
+ value = bit.get('value', False)
+ ret[name] = value
+ return ret
+
+def main():
+ parser = argparse.ArgumentParser(description='ethtool wannabe')
+ parser.add_argument('--json', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--show-priv-flags', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--set-priv-flags', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--show-eee', action=argparse.BooleanOptionalAction)
+ parser.add_argument('--set-eee', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-a', '--show-pause', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-A', '--set-pause', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-c', '--show-coalesce', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-C', '--set-coalesce', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-g', '--show-ring', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-G', '--set-ring', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-k', '--show-features', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-K', '--set-features', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-l', '--show-channels', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-L', '--set-channels', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-T', '--show-time-stamping', action=argparse.BooleanOptionalAction)
+ parser.add_argument('-S', '--statistics', action=argparse.BooleanOptionalAction)
+ # TODO: --show-tunnels tunnel-info-get
+ # TODO: --show-module module-get
+ # TODO: --get-plca-cfg plca-get
+ # TODO: --get-plca-status plca-get-status
+ # TODO: --show-mm mm-get
+ # TODO: --show-fec fec-get
+ # TODO: --dump-module-eerpom module-eeprom-get
+ # TODO: pse-get
+ # TODO: rss-get
+ parser.add_argument('device', metavar='device', type=str)
+ parser.add_argument('args', metavar='args', type=str, nargs='*')
+ global args
+ args = parser.parse_args()
+
+ spec = '../../../Documentation/netlink/specs/ethtool.yaml'
+ schema = '../../../Documentation/netlink/genetlink-legacy.yaml'
+
+ ynl = YnlFamily(spec, schema)
+
+ if args.set_priv_flags:
+ # TODO: parse the bitmask
+ print("not implemented")
+ return
+
+ if args.set_eee:
+ return doit(ynl, args, 'eee-set')
+
+ if args.set_pause:
+ return doit(ynl, args, 'pause-set')
+
+ if args.set_coalesce:
+ return doit(ynl, args, 'coalesce-set')
+
+ if args.set_features:
+ # TODO: parse the bitmask
+ print("not implemented")
+ return
+
+ if args.set_channels:
+ return doit(ynl, args, 'channels-set')
+
+ if args.set_ring:
+ return doit(ynl, args, 'rings-set')
+
+ if args.show_priv_flags:
+ flags = bits_to_dict(dumpit(ynl, args, 'privflags-get')['flags'])
+ print_field(flags)
+ return
+
+ if args.show_eee:
+ eee = dumpit(ynl, args, 'eee-get')
+ ours = bits_to_dict(eee['modes-ours'])
+ peer = bits_to_dict(eee['modes-peer'])
+
+ if 'enabled' in eee:
+ status = 'enabled' if eee['enabled'] else 'disabled'
+ if 'active' in eee and eee['active']:
+ status = status + ' - active'
+ else:
+ status = status + ' - inactive'
+ else:
+ status = 'not supported'
+
+ print(f'EEE status: {status}')
+ print_field(eee, ('tx-lpi-timer', 'Tx LPI'))
+ print_speed('Advertised EEE link modes', ours)
+ print_speed('Link partner advertised EEE link modes', peer)
+
+ return
+
+ if args.show_pause:
+ print_field(dumpit(ynl, args, 'pause-get'),
+ ('autoneg', 'Autonegotiate', 'bool'),
+ ('rx', 'RX', 'bool'),
+ ('tx', 'TX', 'bool'))
+ return
+
+ if args.show_coalesce:
+ print_field(dumpit(ynl, args, 'coalesce-get'))
+ return
+
+ if args.show_features:
+ reply = dumpit(ynl, args, 'features-get')
+ available = bits_to_dict(reply['hw'])
+ requested = bits_to_dict(reply['wanted']).keys()
+ active = bits_to_dict(reply['active']).keys()
+ never_changed = bits_to_dict(reply['nochange']).keys()
+
+ for f in sorted(available):
+ value = "off"
+ if f in active:
+ value = "on"
+
+ fixed = ""
+ if f not in available or f in never_changed:
+ fixed = " [fixed]"
+
+ req = ""
+ if f in requested:
+ if f in active:
+ req = " [requested on]"
+ else:
+ req = " [requested off]"
+
+ print(f'{f}: {value}{fixed}{req}')
+
+ return
+
+ if args.show_channels:
+ reply = dumpit(ynl, args, 'channels-get')
+ print(f'Channel parameters for {args.device}:')
+
+ print(f'Pre-set maximums:')
+ print_field(reply,
+ ('rx-max', 'RX'),
+ ('tx-max', 'TX'),
+ ('other-max', 'Other'),
+ ('combined-max', 'Combined'))
+
+ print(f'Current hardware settings:')
+ print_field(reply,
+ ('rx-count', 'RX'),
+ ('tx-count', 'TX'),
+ ('other-count', 'Other'),
+ ('combined-count', 'Combined'))
+
+ return
+
+ if args.show_ring:
+ reply = dumpit(ynl, args, 'channels-get')
+
+ print(f'Ring parameters for {args.device}:')
+
+ print(f'Pre-set maximums:')
+ print_field(reply,
+ ('rx-max', 'RX'),
+ ('rx-mini-max', 'RX Mini'),
+ ('rx-jumbo-max', 'RX Jumbo'),
+ ('tx-max', 'TX'))
+
+ print(f'Current hardware settings:')
+ print_field(reply,
+ ('rx', 'RX'),
+ ('rx-mini', 'RX Mini'),
+ ('rx-jumbo', 'RX Jumbo'),
+ ('tx', 'TX'))
+
+ print_field(reply,
+ ('rx-buf-len', 'RX Buf Len'),
+ ('cqe-size', 'CQE Size'),
+ ('tx-push', 'TX Push', 'bool'))
+
+ return
+
+ if args.statistics:
+ print(f'NIC statistics:')
+
+ # TODO: pass id?
+ strset = dumpit(ynl, args, 'strset-get')
+ pprint.PrettyPrinter().pprint(strset)
+
+ req = {
+ 'groups': {
+ 'size': 1,
+ 'bits': {
+ 'bit':
+ # TODO: support passing the bitmask
+ #[
+ #{ 'name': 'eth-phy', 'value': True },
+ { 'name': 'eth-mac', 'value': True },
+ #{ 'name': 'eth-ctrl', 'value': True },
+ #{ 'name': 'rmon', 'value': True },
+ #],
+ },
+ },
+ }
+
+ rsp = dumpit(ynl, args, 'stats-get', req)
+ pprint.PrettyPrinter().pprint(rsp)
+ return
+
+ if args.show_time_stamping:
+ tsinfo = dumpit(ynl, args, 'tsinfo-get')
+
+ print(f'Time stamping parameters for {args.device}:')
+
+ print('Capabilities:')
+ [print(f'\t{v}') for v in bits_to_dict(tsinfo['timestamping'])]
+
+ print(f'PTP Hardware Clock: {tsinfo["phc-index"]}')
+
+ print('Hardware Transmit Timestamp Modes:')
+ [print(f'\t{v}') for v in bits_to_dict(tsinfo['tx-types'])]
+
+ print('Hardware Receive Filter Modes:')
+ [print(f'\t{v}') for v in bits_to_dict(tsinfo['rx-filters'])]
+ return
+
+ print(f'Settings for {args.device}:')
+ linkmodes = dumpit(ynl, args, 'linkmodes-get')
+ ours = bits_to_dict(linkmodes['ours'])
+
+ supported_ports = ('TP', 'AUI', 'BNC', 'MII', 'FIBRE', 'Backplane')
+ ports = [ p for p in supported_ports if ours.get(p, False)]
+ print(f'Supported ports: [ {" ".join(ports)} ]')
+
+ print_speed('Supported link modes', ours)
+
+ print_field(ours, ('Pause', 'Supported pause frame use', 'yn'))
+ print_field(ours, ('Autoneg', 'Supports auto-negotiation', 'yn'))
+
+ supported_fec = ('None', 'PS', 'BASER', 'LLRS')
+ fec = [ p for p in supported_fec if ours.get(p, False)]
+ fec_str = " ".join(fec)
+ if len(fec) == 0:
+ fec_str = "Not reported"
+
+ print(f'Supported FEC modes: {fec_str}')
+
+ speed = 'Unknown!'
+ if linkmodes['speed'] > 0 and linkmodes['speed'] < 0xffffffff:
+ speed = f'{linkmodes["speed"]}Mb/s'
+ print(f'Speed: {speed}')
+
+ duplex_modes = {
+ 0: 'Half',
+ 1: 'Full',
+ }
+ duplex = duplex_modes.get(linkmodes["duplex"], None)
+ if not duplex:
+ duplex = f'Unknown! ({linkmodes["duplex"]})'
+ print(f'Duplex: {duplex}')
+
+ autoneg = "off"
+ if linkmodes.get("autoneg", 0) != 0:
+ autoneg = "on"
+ print(f'Auto-negotiation: {autoneg}')
+
+ ports = {
+ 0: 'Twisted Pair',
+ 1: 'AUI',
+ 2: 'MII',
+ 3: 'FIBRE',
+ 4: 'BNC',
+ 5: 'Directly Attached Copper',
+ 0xef: 'None',
+ }
+ linkinfo = dumpit(ynl, args, 'linkinfo-get')
+ print(f'Port: {ports.get(linkinfo["port"], "Other")}')
+
+ print_field(linkinfo, ('phyaddr', 'PHYAD'))
+
+ transceiver = {
+ 0: 'Internal',
+ 1: 'External',
+ }
+ print(f'Transceiver: {transceiver.get(linkinfo["transceiver"], "Unknown")}')
+
+ mdix_ctrl = {
+ 1: 'off',
+ 2: 'on',
+ }
+ mdix = mdix_ctrl.get(linkinfo['tp-mdix-ctrl'], None)
+ if mdix:
+ mdix = mdix + ' (forced)'
+ else:
+ mdix = mdix_ctrl.get(linkinfo['tp-mdix'], 'Unknown (auto)')
+ print(f'MDI-X: {mdix}')
+
+ debug = dumpit(ynl, args, 'debug-get')
+ msgmask = bits_to_dict(debug.get("msgmask", [])).keys()
+ print(f'Current message level: {" ".join(msgmask)}')
+
+ linkstate = dumpit(ynl, args, 'linkstate-get')
+ detected_states = {
+ 0: 'no',
+ 1: 'yes',
+ }
+ # TODO: wol-get
+ detected = detected_states.get(linkstate['link'], 'unknown')
+ print(f'Link detected: {detected}')
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/net/ynl/lib/.gitignore b/tools/net/ynl/lib/.gitignore
new file mode 100644
index 000000000000..c18dd8d83cee
--- /dev/null
+++ b/tools/net/ynl/lib/.gitignore
@@ -0,0 +1 @@
+__pycache__/
diff --git a/tools/net/ynl/lib/__init__.py b/tools/net/ynl/lib/__init__.py
new file mode 100644
index 000000000000..4b3797fe784b
--- /dev/null
+++ b/tools/net/ynl/lib/__init__.py
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+from .nlspec import SpecAttr, SpecAttrSet, SpecEnumEntry, SpecEnumSet, \
+ SpecFamily, SpecOperation
+from .ynl import YnlFamily
+
+__all__ = ["SpecAttr", "SpecAttrSet", "SpecEnumEntry", "SpecEnumSet",
+ "SpecFamily", "SpecOperation", "YnlFamily"]
diff --git a/tools/net/ynl/lib/nlspec.py b/tools/net/ynl/lib/nlspec.py
new file mode 100644
index 000000000000..a0241add3839
--- /dev/null
+++ b/tools/net/ynl/lib/nlspec.py
@@ -0,0 +1,484 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+import collections
+import importlib
+import os
+import yaml
+
+
+# To be loaded dynamically as needed
+jsonschema = None
+
+
+class SpecElement:
+ """Netlink spec element.
+
+ Abstract element of the Netlink spec. Implements the dictionary interface
+ for access to the raw spec. Supports iterative resolution of dependencies
+ across elements and class inheritance levels. The elements of the spec
+ may refer to each other, and although loops should be very rare, having
+ to maintain correct ordering of instantiation is painful, so the resolve()
+ method should be used to perform parts of init which require access to
+ other parts of the spec.
+
+ Attributes:
+ yaml raw spec as loaded from the spec file
+ family back reference to the full family
+
+ name name of the entity as listed in the spec (optional)
+ ident_name name which can be safely used as identifier in code (optional)
+ """
+ def __init__(self, family, yaml):
+ self.yaml = yaml
+ self.family = family
+
+ if 'name' in self.yaml:
+ self.name = self.yaml['name']
+ self.ident_name = self.name.replace('-', '_')
+
+ self._super_resolved = False
+ family.add_unresolved(self)
+
+ def __getitem__(self, key):
+ return self.yaml[key]
+
+ def __contains__(self, key):
+ return key in self.yaml
+
+ def get(self, key, default=None):
+ return self.yaml.get(key, default)
+
+ def resolve_up(self, up):
+ if not self._super_resolved:
+ up.resolve()
+ self._super_resolved = True
+
+ def resolve(self):
+ pass
+
+
+class SpecEnumEntry(SpecElement):
+ """ Entry within an enum declared in the Netlink spec.
+
+ Attributes:
+ doc documentation string
+ enum_set back reference to the enum
+ value numerical value of this enum (use accessors in most situations!)
+
+ Methods:
+ raw_value raw value, i.e. the id in the enum, unlike user value which is a mask for flags
+ user_value user value, same as raw value for enums, for flags it's the mask
+ """
+ def __init__(self, enum_set, yaml, prev, value_start):
+ if isinstance(yaml, str):
+ yaml = {'name': yaml}
+ super().__init__(enum_set.family, yaml)
+
+ self.doc = yaml.get('doc', '')
+ self.enum_set = enum_set
+
+ if 'value' in yaml:
+ self.value = yaml['value']
+ elif prev:
+ self.value = prev.value + 1
+ else:
+ self.value = value_start
+
+ def has_doc(self):
+ return bool(self.doc)
+
+ def raw_value(self):
+ return self.value
+
+ def user_value(self, as_flags=None):
+ if self.enum_set['type'] == 'flags' or as_flags:
+ return 1 << self.value
+ else:
+ return self.value
+
+
+class SpecEnumSet(SpecElement):
+ """ Enum type
+
+ Represents an enumeration (list of numerical constants)
+ as declared in the "definitions" section of the spec.
+
+ Attributes:
+ type enum or flags
+ entries entries by name
+ entries_by_val entries by value
+ Methods:
+ get_mask for flags compute the mask of all defined values
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ self.type = yaml['type']
+
+ prev_entry = None
+ value_start = self.yaml.get('value-start', 0)
+ self.entries = dict()
+ self.entries_by_val = dict()
+ for entry in self.yaml['entries']:
+ e = self.new_entry(entry, prev_entry, value_start)
+ self.entries[e.name] = e
+ self.entries_by_val[e.raw_value()] = e
+ prev_entry = e
+
+ def new_entry(self, entry, prev_entry, value_start):
+ return SpecEnumEntry(self, entry, prev_entry, value_start)
+
+ def has_doc(self):
+ if 'doc' in self.yaml:
+ return True
+ for entry in self.entries.values():
+ if entry.has_doc():
+ return True
+ return False
+
+ def get_mask(self, as_flags=None):
+ mask = 0
+ for e in self.entries.values():
+ mask += e.user_value(as_flags)
+ return mask
+
+
+class SpecAttr(SpecElement):
+ """ Single Netlink atttribute type
+
+ Represents a single attribute type within an attr space.
+
+ Attributes:
+ value numerical ID when serialized
+ attr_set Attribute Set containing this attr
+ is_multi bool, attr may repeat multiple times
+ struct_name string, name of struct definition
+ sub_type string, name of sub type
+ """
+ def __init__(self, family, attr_set, yaml, value):
+ super().__init__(family, yaml)
+
+ self.value = value
+ self.attr_set = attr_set
+ self.is_multi = yaml.get('multi-attr', False)
+ self.struct_name = yaml.get('struct')
+ self.sub_type = yaml.get('sub-type')
+ self.byte_order = yaml.get('byte-order')
+
+
+class SpecAttrSet(SpecElement):
+ """ Netlink Attribute Set class.
+
+ Represents a ID space of attributes within Netlink.
+
+ Note that unlike other elements, which expose contents of the raw spec
+ via the dictionary interface Attribute Set exposes attributes by name.
+
+ Attributes:
+ attrs ordered dict of all attributes (indexed by name)
+ attrs_by_val ordered dict of all attributes (indexed by value)
+ subset_of parent set if this is a subset, otherwise None
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ self.subset_of = self.yaml.get('subset-of', None)
+
+ self.attrs = collections.OrderedDict()
+ self.attrs_by_val = collections.OrderedDict()
+
+ if self.subset_of is None:
+ val = 1
+ for elem in self.yaml['attributes']:
+ if 'value' in elem:
+ val = elem['value']
+
+ attr = self.new_attr(elem, val)
+ self.attrs[attr.name] = attr
+ self.attrs_by_val[attr.value] = attr
+ val += 1
+ else:
+ real_set = family.attr_sets[self.subset_of]
+ for elem in self.yaml['attributes']:
+ attr = real_set[elem['name']]
+ self.attrs[attr.name] = attr
+ self.attrs_by_val[attr.value] = attr
+
+ def new_attr(self, elem, value):
+ return SpecAttr(self.family, self, elem, value)
+
+ def __getitem__(self, key):
+ return self.attrs[key]
+
+ def __contains__(self, key):
+ return key in self.attrs
+
+ def __iter__(self):
+ yield from self.attrs
+
+ def items(self):
+ return self.attrs.items()
+
+
+class SpecStructMember(SpecElement):
+ """Struct member attribute
+
+ Represents a single struct member attribute.
+
+ Attributes:
+ type string, type of the member attribute
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+ self.type = yaml['type']
+
+
+class SpecStruct(SpecElement):
+ """Netlink struct type
+
+ Represents a C struct definition.
+
+ Attributes:
+ members ordered list of struct members
+ """
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ self.members = []
+ for member in yaml.get('members', []):
+ self.members.append(self.new_member(family, member))
+
+ def new_member(self, family, elem):
+ return SpecStructMember(family, elem)
+
+ def __iter__(self):
+ yield from self.members
+
+ def items(self):
+ return self.members.items()
+
+
+class SpecOperation(SpecElement):
+ """Netlink Operation
+
+ Information about a single Netlink operation.
+
+ Attributes:
+ value numerical ID when serialized, None if req/rsp values differ
+
+ req_value numerical ID when serialized, user -> kernel
+ rsp_value numerical ID when serialized, user <- kernel
+ is_call bool, whether the operation is a call
+ is_async bool, whether the operation is a notification
+ is_resv bool, whether the operation does not exist (it's just a reserved ID)
+ attr_set attribute set name
+ fixed_header string, optional name of fixed header struct
+
+ yaml raw spec as loaded from the spec file
+ """
+ def __init__(self, family, yaml, req_value, rsp_value):
+ super().__init__(family, yaml)
+
+ self.value = req_value if req_value == rsp_value else None
+ self.req_value = req_value
+ self.rsp_value = rsp_value
+
+ self.is_call = 'do' in yaml or 'dump' in yaml
+ self.is_async = 'notify' in yaml or 'event' in yaml
+ self.is_resv = not self.is_async and not self.is_call
+ self.fixed_header = self.yaml.get('fixed-header', family.fixed_header)
+
+ # Added by resolve:
+ self.attr_set = None
+ delattr(self, "attr_set")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if 'attribute-set' in self.yaml:
+ attr_set_name = self.yaml['attribute-set']
+ elif 'notify' in self.yaml:
+ msg = self.family.msgs[self.yaml['notify']]
+ attr_set_name = msg['attribute-set']
+ elif self.is_resv:
+ attr_set_name = ''
+ else:
+ raise Exception(f"Can't resolve attribute set for op '{self.name}'")
+ if attr_set_name:
+ self.attr_set = self.family.attr_sets[attr_set_name]
+
+
+class SpecFamily(SpecElement):
+ """ Netlink Family Spec class.
+
+ Netlink family information loaded from a spec (e.g. in YAML).
+ Takes care of unfolding implicit information which can be skipped
+ in the spec itself for brevity.
+
+ The class can be used like a dictionary to access the raw spec
+ elements but that's usually a bad idea.
+
+ Attributes:
+ proto protocol type (e.g. genetlink)
+ license spec license (loaded from an SPDX tag on the spec)
+
+ attr_sets dict of attribute sets
+ msgs dict of all messages (index by name)
+ msgs_by_value dict of all messages (indexed by name)
+ ops dict of all valid requests / responses
+ consts dict of all constants/enums
+ fixed_header string, optional name of family default fixed header struct
+ """
+ def __init__(self, spec_path, schema_path=None):
+ with open(spec_path, "r") as stream:
+ prefix = '# SPDX-License-Identifier: '
+ first = stream.readline().strip()
+ if not first.startswith(prefix):
+ raise Exception('SPDX license tag required in the spec')
+ self.license = first[len(prefix):]
+
+ stream.seek(0)
+ spec = yaml.safe_load(stream)
+
+ self._resolution_list = []
+
+ super().__init__(self, spec)
+
+ self.proto = self.yaml.get('protocol', 'genetlink')
+
+ if schema_path is None:
+ schema_path = os.path.dirname(os.path.dirname(spec_path)) + f'/{self.proto}.yaml'
+ if schema_path:
+ global jsonschema
+
+ with open(schema_path, "r") as stream:
+ schema = yaml.safe_load(stream)
+
+ if jsonschema is None:
+ jsonschema = importlib.import_module("jsonschema")
+
+ jsonschema.validate(self.yaml, schema)
+
+ self.attr_sets = collections.OrderedDict()
+ self.msgs = collections.OrderedDict()
+ self.req_by_value = collections.OrderedDict()
+ self.rsp_by_value = collections.OrderedDict()
+ self.ops = collections.OrderedDict()
+ self.consts = collections.OrderedDict()
+
+ last_exception = None
+ while len(self._resolution_list) > 0:
+ resolved = []
+ unresolved = self._resolution_list
+ self._resolution_list = []
+
+ for elem in unresolved:
+ try:
+ elem.resolve()
+ except (KeyError, AttributeError) as e:
+ self._resolution_list.append(elem)
+ last_exception = e
+ continue
+
+ resolved.append(elem)
+
+ if len(resolved) == 0:
+ raise last_exception
+
+ def new_enum(self, elem):
+ return SpecEnumSet(self, elem)
+
+ def new_attr_set(self, elem):
+ return SpecAttrSet(self, elem)
+
+ def new_struct(self, elem):
+ return SpecStruct(self, elem)
+
+ def new_operation(self, elem, req_val, rsp_val):
+ return SpecOperation(self, elem, req_val, rsp_val)
+
+ def add_unresolved(self, elem):
+ self._resolution_list.append(elem)
+
+ def _dictify_ops_unified(self):
+ self.fixed_header = self.yaml['operations'].get('fixed-header')
+ val = 1
+ for elem in self.yaml['operations']['list']:
+ if 'value' in elem:
+ val = elem['value']
+
+ op = self.new_operation(elem, val, val)
+ val += 1
+
+ self.msgs[op.name] = op
+
+ def _dictify_ops_directional(self):
+ self.fixed_header = self.yaml['operations'].get('fixed-header')
+ req_val = rsp_val = 1
+ for elem in self.yaml['operations']['list']:
+ if 'notify' in elem:
+ if 'value' in elem:
+ rsp_val = elem['value']
+ req_val_next = req_val
+ rsp_val_next = rsp_val + 1
+ req_val = None
+ elif 'do' in elem or 'dump' in elem:
+ mode = elem['do'] if 'do' in elem else elem['dump']
+
+ v = mode.get('request', {}).get('value', None)
+ if v:
+ req_val = v
+ v = mode.get('reply', {}).get('value', None)
+ if v:
+ rsp_val = v
+
+ rsp_inc = 1 if 'reply' in mode else 0
+ req_val_next = req_val + 1
+ rsp_val_next = rsp_val + rsp_inc
+ else:
+ raise Exception("Can't parse directional ops")
+
+ op = self.new_operation(elem, req_val, rsp_val)
+ req_val = req_val_next
+ rsp_val = rsp_val_next
+
+ self.msgs[op.name] = op
+
+ def find_operation(self, name):
+ """
+ For a given operation name, find and return operation spec.
+ """
+ for op in self.yaml['operations']['list']:
+ if name == op['name']:
+ return op
+ return None
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ definitions = self.yaml.get('definitions', [])
+ for elem in definitions:
+ if elem['type'] == 'enum' or elem['type'] == 'flags':
+ self.consts[elem['name']] = self.new_enum(elem)
+ elif elem['type'] == 'struct':
+ self.consts[elem['name']] = self.new_struct(elem)
+ else:
+ self.consts[elem['name']] = elem
+
+ for elem in self.yaml['attribute-sets']:
+ attr_set = self.new_attr_set(elem)
+ self.attr_sets[elem['name']] = attr_set
+
+ msg_id_model = self.yaml['operations'].get('enum-model', 'unified')
+ if msg_id_model == 'unified':
+ self._dictify_ops_unified()
+ elif msg_id_model == 'directional':
+ self._dictify_ops_directional()
+
+ for op in self.msgs.values():
+ if op.req_value is not None:
+ self.req_by_value[op.req_value] = op
+ if op.rsp_value is not None:
+ self.rsp_by_value[op.rsp_value] = op
+ if not op.is_async and 'attribute-set' in op:
+ self.ops[op.name] = op
diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py
new file mode 100644
index 000000000000..aa77bcae4807
--- /dev/null
+++ b/tools/net/ynl/lib/ynl.py
@@ -0,0 +1,607 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+import functools
+import os
+import random
+import socket
+import struct
+import yaml
+
+from .nlspec import SpecFamily
+
+#
+# Generic Netlink code which should really be in some library, but I can't quickly find one.
+#
+
+
+class Netlink:
+ # Netlink socket
+ SOL_NETLINK = 270
+
+ NETLINK_ADD_MEMBERSHIP = 1
+ NETLINK_CAP_ACK = 10
+ NETLINK_EXT_ACK = 11
+
+ # Netlink message
+ NLMSG_ERROR = 2
+ NLMSG_DONE = 3
+
+ NLM_F_REQUEST = 1
+ NLM_F_ACK = 4
+ NLM_F_ROOT = 0x100
+ NLM_F_MATCH = 0x200
+ NLM_F_APPEND = 0x800
+
+ NLM_F_CAPPED = 0x100
+ NLM_F_ACK_TLVS = 0x200
+
+ NLM_F_DUMP = NLM_F_ROOT | NLM_F_MATCH
+
+ NLA_F_NESTED = 0x8000
+ NLA_F_NET_BYTEORDER = 0x4000
+
+ NLA_TYPE_MASK = NLA_F_NESTED | NLA_F_NET_BYTEORDER
+
+ # Genetlink defines
+ NETLINK_GENERIC = 16
+
+ GENL_ID_CTRL = 0x10
+
+ # nlctrl
+ CTRL_CMD_GETFAMILY = 3
+
+ CTRL_ATTR_FAMILY_ID = 1
+ CTRL_ATTR_FAMILY_NAME = 2
+ CTRL_ATTR_MAXATTR = 5
+ CTRL_ATTR_MCAST_GROUPS = 7
+
+ CTRL_ATTR_MCAST_GRP_NAME = 1
+ CTRL_ATTR_MCAST_GRP_ID = 2
+
+ # Extack types
+ NLMSGERR_ATTR_MSG = 1
+ NLMSGERR_ATTR_OFFS = 2
+ NLMSGERR_ATTR_COOKIE = 3
+ NLMSGERR_ATTR_POLICY = 4
+ NLMSGERR_ATTR_MISS_TYPE = 5
+ NLMSGERR_ATTR_MISS_NEST = 6
+
+
+class NlError(Exception):
+ def __init__(self, nl_msg):
+ self.nl_msg = nl_msg
+
+ def __str__(self):
+ return f"Netlink error: {os.strerror(-self.nl_msg.error)}\n{self.nl_msg}"
+
+
+class NlAttr:
+ type_formats = { 'u8' : ('B', 1), 's8' : ('b', 1),
+ 'u16': ('H', 2), 's16': ('h', 2),
+ 'u32': ('I', 4), 's32': ('i', 4),
+ 'u64': ('Q', 8), 's64': ('q', 8) }
+
+ def __init__(self, raw, offset):
+ self._len, self._type = struct.unpack("HH", raw[offset:offset + 4])
+ self.type = self._type & ~Netlink.NLA_TYPE_MASK
+ self.payload_len = self._len
+ self.full_len = (self.payload_len + 3) & ~3
+ self.raw = raw[offset + 4:offset + self.payload_len]
+
+ def format_byte_order(byte_order):
+ if byte_order:
+ return ">" if byte_order == "big-endian" else "<"
+ return ""
+
+ def as_u8(self):
+ return struct.unpack("B", self.raw)[0]
+
+ def as_u16(self, byte_order=None):
+ endian = NlAttr.format_byte_order(byte_order)
+ return struct.unpack(f"{endian}H", self.raw)[0]
+
+ def as_u32(self, byte_order=None):
+ endian = NlAttr.format_byte_order(byte_order)
+ return struct.unpack(f"{endian}I", self.raw)[0]
+
+ def as_u64(self, byte_order=None):
+ endian = NlAttr.format_byte_order(byte_order)
+ return struct.unpack(f"{endian}Q", self.raw)[0]
+
+ def as_strz(self):
+ return self.raw.decode('ascii')[:-1]
+
+ def as_bin(self):
+ return self.raw
+
+ def as_c_array(self, type):
+ format, _ = self.type_formats[type]
+ return list({ x[0] for x in struct.iter_unpack(format, self.raw) })
+
+ def as_struct(self, members):
+ value = dict()
+ offset = 0
+ for m in members:
+ # TODO: handle non-scalar members
+ format, size = self.type_formats[m.type]
+ decoded = struct.unpack_from(format, self.raw, offset)
+ offset += size
+ value[m.name] = decoded[0]
+ return value
+
+ def __repr__(self):
+ return f"[type:{self.type} len:{self._len}] {self.raw}"
+
+
+class NlAttrs:
+ def __init__(self, msg):
+ self.attrs = []
+
+ offset = 0
+ while offset < len(msg):
+ attr = NlAttr(msg, offset)
+ offset += attr.full_len
+ self.attrs.append(attr)
+
+ def __iter__(self):
+ yield from self.attrs
+
+ def __repr__(self):
+ msg = ''
+ for a in self.attrs:
+ if msg:
+ msg += '\n'
+ msg += repr(a)
+ return msg
+
+
+class NlMsg:
+ def __init__(self, msg, offset, attr_space=None):
+ self.hdr = msg[offset:offset + 16]
+
+ self.nl_len, self.nl_type, self.nl_flags, self.nl_seq, self.nl_portid = \
+ struct.unpack("IHHII", self.hdr)
+
+ self.raw = msg[offset + 16:offset + self.nl_len]
+
+ self.error = 0
+ self.done = 0
+
+ extack_off = None
+ if self.nl_type == Netlink.NLMSG_ERROR:
+ self.error = struct.unpack("i", self.raw[0:4])[0]
+ self.done = 1
+ extack_off = 20
+ elif self.nl_type == Netlink.NLMSG_DONE:
+ self.done = 1
+ extack_off = 4
+
+ self.extack = None
+ if self.nl_flags & Netlink.NLM_F_ACK_TLVS and extack_off:
+ self.extack = dict()
+ extack_attrs = NlAttrs(self.raw[extack_off:])
+ for extack in extack_attrs:
+ if extack.type == Netlink.NLMSGERR_ATTR_MSG:
+ self.extack['msg'] = extack.as_strz()
+ elif extack.type == Netlink.NLMSGERR_ATTR_MISS_TYPE:
+ self.extack['miss-type'] = extack.as_u32()
+ elif extack.type == Netlink.NLMSGERR_ATTR_MISS_NEST:
+ self.extack['miss-nest'] = extack.as_u32()
+ elif extack.type == Netlink.NLMSGERR_ATTR_OFFS:
+ self.extack['bad-attr-offs'] = extack.as_u32()
+ else:
+ if 'unknown' not in self.extack:
+ self.extack['unknown'] = []
+ self.extack['unknown'].append(extack)
+
+ if attr_space:
+ # We don't have the ability to parse nests yet, so only do global
+ if 'miss-type' in self.extack and 'miss-nest' not in self.extack:
+ miss_type = self.extack['miss-type']
+ if miss_type in attr_space.attrs_by_val:
+ spec = attr_space.attrs_by_val[miss_type]
+ desc = spec['name']
+ if 'doc' in spec:
+ desc += f" ({spec['doc']})"
+ self.extack['miss-type'] = desc
+
+ def __repr__(self):
+ msg = f"nl_len = {self.nl_len} ({len(self.raw)}) nl_flags = 0x{self.nl_flags:x} nl_type = {self.nl_type}\n"
+ if self.error:
+ msg += '\terror: ' + str(self.error)
+ if self.extack:
+ msg += '\textack: ' + repr(self.extack)
+ return msg
+
+
+class NlMsgs:
+ def __init__(self, data, attr_space=None):
+ self.msgs = []
+
+ offset = 0
+ while offset < len(data):
+ msg = NlMsg(data, offset, attr_space=attr_space)
+ offset += msg.nl_len
+ self.msgs.append(msg)
+
+ def __iter__(self):
+ yield from self.msgs
+
+
+genl_family_name_to_id = None
+
+
+def _genl_msg(nl_type, nl_flags, genl_cmd, genl_version, seq=None):
+ # we prepend length in _genl_msg_finalize()
+ if seq is None:
+ seq = random.randint(1, 1024)
+ nlmsg = struct.pack("HHII", nl_type, nl_flags, seq, 0)
+ genlmsg = struct.pack("BBH", genl_cmd, genl_version, 0)
+ return nlmsg + genlmsg
+
+
+def _genl_msg_finalize(msg):
+ return struct.pack("I", len(msg) + 4) + msg
+
+
+def _genl_load_families():
+ with socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, Netlink.NETLINK_GENERIC) as sock:
+ sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
+
+ msg = _genl_msg(Netlink.GENL_ID_CTRL,
+ Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK | Netlink.NLM_F_DUMP,
+ Netlink.CTRL_CMD_GETFAMILY, 1)
+ msg = _genl_msg_finalize(msg)
+
+ sock.send(msg, 0)
+
+ global genl_family_name_to_id
+ genl_family_name_to_id = dict()
+
+ while True:
+ reply = sock.recv(128 * 1024)
+ nms = NlMsgs(reply)
+ for nl_msg in nms:
+ if nl_msg.error:
+ print("Netlink error:", nl_msg.error)
+ return
+ if nl_msg.done:
+ return
+
+ gm = GenlMsg(nl_msg)
+ fam = dict()
+ for attr in gm.raw_attrs:
+ if attr.type == Netlink.CTRL_ATTR_FAMILY_ID:
+ fam['id'] = attr.as_u16()
+ elif attr.type == Netlink.CTRL_ATTR_FAMILY_NAME:
+ fam['name'] = attr.as_strz()
+ elif attr.type == Netlink.CTRL_ATTR_MAXATTR:
+ fam['maxattr'] = attr.as_u32()
+ elif attr.type == Netlink.CTRL_ATTR_MCAST_GROUPS:
+ fam['mcast'] = dict()
+ for entry in NlAttrs(attr.raw):
+ mcast_name = None
+ mcast_id = None
+ for entry_attr in NlAttrs(entry.raw):
+ if entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_NAME:
+ mcast_name = entry_attr.as_strz()
+ elif entry_attr.type == Netlink.CTRL_ATTR_MCAST_GRP_ID:
+ mcast_id = entry_attr.as_u32()
+ if mcast_name and mcast_id is not None:
+ fam['mcast'][mcast_name] = mcast_id
+ if 'name' in fam and 'id' in fam:
+ genl_family_name_to_id[fam['name']] = fam
+
+
+class GenlMsg:
+ def __init__(self, nl_msg, fixed_header_members=[]):
+ self.nl = nl_msg
+
+ self.hdr = nl_msg.raw[0:4]
+ offset = 4
+
+ self.genl_cmd, self.genl_version, _ = struct.unpack("BBH", self.hdr)
+
+ self.fixed_header_attrs = dict()
+ for m in fixed_header_members:
+ format, size = NlAttr.type_formats[m.type]
+ decoded = struct.unpack_from(format, nl_msg.raw, offset)
+ offset += size
+ self.fixed_header_attrs[m.name] = decoded[0]
+
+ self.raw = nl_msg.raw[offset:]
+ self.raw_attrs = NlAttrs(self.raw)
+
+ def __repr__(self):
+ msg = repr(self.nl)
+ msg += f"\tgenl_cmd = {self.genl_cmd} genl_ver = {self.genl_version}\n"
+ for a in self.raw_attrs:
+ msg += '\t\t' + repr(a) + '\n'
+ return msg
+
+
+class GenlFamily:
+ def __init__(self, family_name):
+ self.family_name = family_name
+
+ global genl_family_name_to_id
+ if genl_family_name_to_id is None:
+ _genl_load_families()
+
+ self.genl_family = genl_family_name_to_id[family_name]
+ self.family_id = genl_family_name_to_id[family_name]['id']
+
+
+#
+# YNL implementation details.
+#
+
+
+class YnlFamily(SpecFamily):
+ def __init__(self, def_path, schema=None):
+ super().__init__(def_path, schema)
+
+ self.include_raw = False
+
+ self.sock = socket.socket(socket.AF_NETLINK, socket.SOCK_RAW, Netlink.NETLINK_GENERIC)
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_CAP_ACK, 1)
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_EXT_ACK, 1)
+
+ self.async_msg_ids = set()
+ self.async_msg_queue = []
+
+ for msg in self.msgs.values():
+ if msg.is_async:
+ self.async_msg_ids.add(msg.rsp_value)
+
+ for op_name, op in self.ops.items():
+ bound_f = functools.partial(self._op, op_name)
+ setattr(self, op.ident_name, bound_f)
+
+ try:
+ self.family = GenlFamily(self.yaml['name'])
+ except KeyError:
+ raise Exception(f"Family '{self.yaml['name']}' not supported by the kernel")
+
+ def ntf_subscribe(self, mcast_name):
+ if mcast_name not in self.family.genl_family['mcast']:
+ raise Exception(f'Multicast group "{mcast_name}" not present in the family')
+
+ self.sock.bind((0, 0))
+ self.sock.setsockopt(Netlink.SOL_NETLINK, Netlink.NETLINK_ADD_MEMBERSHIP,
+ self.family.genl_family['mcast'][mcast_name])
+
+ def _add_attr(self, space, name, value):
+ attr = self.attr_sets[space][name]
+ nl_type = attr.value
+ if attr["type"] == 'nest':
+ nl_type |= Netlink.NLA_F_NESTED
+ attr_payload = b''
+ for subname, subvalue in value.items():
+ attr_payload += self._add_attr(attr['nested-attributes'], subname, subvalue)
+ elif attr["type"] == 'flag':
+ attr_payload = b''
+ elif attr["type"] == 'u8':
+ attr_payload = struct.pack("B", int(value))
+ elif attr["type"] == 'u16':
+ endian = NlAttr.format_byte_order(attr.byte_order)
+ attr_payload = struct.pack(f"{endian}H", int(value))
+ elif attr["type"] == 'u32':
+ endian = NlAttr.format_byte_order(attr.byte_order)
+ attr_payload = struct.pack(f"{endian}I", int(value))
+ elif attr["type"] == 'u64':
+ endian = NlAttr.format_byte_order(attr.byte_order)
+ attr_payload = struct.pack(f"{endian}Q", int(value))
+ elif attr["type"] == 'string':
+ attr_payload = str(value).encode('ascii') + b'\x00'
+ elif attr["type"] == 'binary':
+ attr_payload = value
+ else:
+ raise Exception(f'Unknown type at {space} {name} {value} {attr["type"]}')
+
+ pad = b'\x00' * ((4 - len(attr_payload) % 4) % 4)
+ return struct.pack('HH', len(attr_payload) + 4, nl_type) + attr_payload + pad
+
+ def _decode_enum(self, rsp, attr_spec):
+ raw = rsp[attr_spec['name']]
+ enum = self.consts[attr_spec['enum']]
+ i = attr_spec.get('value-start', 0)
+ if 'enum-as-flags' in attr_spec and attr_spec['enum-as-flags']:
+ value = set()
+ while raw:
+ if raw & 1:
+ value.add(enum.entries_by_val[i].name)
+ raw >>= 1
+ i += 1
+ else:
+ value = enum.entries_by_val[raw - i].name
+ rsp[attr_spec['name']] = value
+
+ def _decode_binary(self, attr, attr_spec):
+ if attr_spec.struct_name:
+ decoded = attr.as_struct(self.consts[attr_spec.struct_name])
+ elif attr_spec.sub_type:
+ decoded = attr.as_c_array(attr_spec.sub_type)
+ else:
+ decoded = attr.as_bin()
+ return decoded
+
+ def _decode(self, attrs, space):
+ attr_space = self.attr_sets[space]
+ rsp = dict()
+ for attr in attrs:
+ attr_spec = attr_space.attrs_by_val[attr.type]
+ if attr_spec["type"] == 'nest':
+ subdict = self._decode(NlAttrs(attr.raw), attr_spec['nested-attributes'])
+ decoded = subdict
+ elif attr_spec['type'] == 'u8':
+ decoded = attr.as_u8()
+ elif attr_spec['type'] == 'u16':
+ decoded = attr.as_u16(attr_spec.byte_order)
+ elif attr_spec['type'] == 'u32':
+ decoded = attr.as_u32(attr_spec.byte_order)
+ elif attr_spec['type'] == 'u64':
+ decoded = attr.as_u64(attr_spec.byte_order)
+ elif attr_spec["type"] == 'string':
+ decoded = attr.as_strz()
+ elif attr_spec["type"] == 'binary':
+ decoded = self._decode_binary(attr, attr_spec)
+ elif attr_spec["type"] == 'flag':
+ decoded = True
+ else:
+ raise Exception(f'Unknown {attr.type} {attr_spec["name"]} {attr_spec["type"]}')
+
+ if not attr_spec.is_multi:
+ rsp[attr_spec['name']] = decoded
+ elif attr_spec.name in rsp:
+ rsp[attr_spec.name].append(decoded)
+ else:
+ rsp[attr_spec.name] = [decoded]
+
+ if 'enum' in attr_spec:
+ self._decode_enum(rsp, attr_spec)
+ return rsp
+
+ def _decode_extack_path(self, attrs, attr_set, offset, target):
+ for attr in attrs:
+ attr_spec = attr_set.attrs_by_val[attr.type]
+ if offset > target:
+ break
+ if offset == target:
+ return '.' + attr_spec.name
+
+ if offset + attr.full_len <= target:
+ offset += attr.full_len
+ continue
+ if attr_spec['type'] != 'nest':
+ raise Exception(f"Can't dive into {attr.type} ({attr_spec['name']}) for extack")
+ offset += 4
+ subpath = self._decode_extack_path(NlAttrs(attr.raw),
+ self.attr_sets[attr_spec['nested-attributes']],
+ offset, target)
+ if subpath is None:
+ return None
+ return '.' + attr_spec.name + subpath
+
+ return None
+
+ def _decode_extack(self, request, attr_space, extack):
+ if 'bad-attr-offs' not in extack:
+ return
+
+ genl_req = GenlMsg(NlMsg(request, 0, attr_space=attr_space))
+ path = self._decode_extack_path(genl_req.raw_attrs, attr_space,
+ 20, extack['bad-attr-offs'])
+ if path:
+ del extack['bad-attr-offs']
+ extack['bad-attr'] = path
+
+ def handle_ntf(self, nl_msg, genl_msg):
+ msg = dict()
+ if self.include_raw:
+ msg['nlmsg'] = nl_msg
+ msg['genlmsg'] = genl_msg
+ op = self.rsp_by_value[genl_msg.genl_cmd]
+ msg['name'] = op['name']
+ msg['msg'] = self._decode(genl_msg.raw_attrs, op.attr_set.name)
+ self.async_msg_queue.append(msg)
+
+ def check_ntf(self):
+ while True:
+ try:
+ reply = self.sock.recv(128 * 1024, socket.MSG_DONTWAIT)
+ except BlockingIOError:
+ return
+
+ nms = NlMsgs(reply)
+ for nl_msg in nms:
+ if nl_msg.error:
+ print("Netlink error in ntf!?", os.strerror(-nl_msg.error))
+ print(nl_msg)
+ continue
+ if nl_msg.done:
+ print("Netlink done while checking for ntf!?")
+ continue
+
+ gm = GenlMsg(nl_msg)
+ if gm.genl_cmd not in self.async_msg_ids:
+ print("Unexpected msg id done while checking for ntf", gm)
+ continue
+
+ self.handle_ntf(nl_msg, gm)
+
+ def operation_do_attributes(self, name):
+ """
+ For a given operation name, find and return a supported
+ set of attributes (as a dict).
+ """
+ op = self.find_operation(name)
+ if not op:
+ return None
+
+ return op['do']['request']['attributes'].copy()
+
+ def _op(self, method, vals, dump=False):
+ op = self.ops[method]
+
+ nl_flags = Netlink.NLM_F_REQUEST | Netlink.NLM_F_ACK
+ if dump:
+ nl_flags |= Netlink.NLM_F_DUMP
+
+ req_seq = random.randint(1024, 65535)
+ msg = _genl_msg(self.family.family_id, nl_flags, op.req_value, 1, req_seq)
+ fixed_header_members = []
+ if op.fixed_header:
+ fixed_header_members = self.consts[op.fixed_header].members
+ for m in fixed_header_members:
+ value = vals.pop(m.name)
+ format, _ = NlAttr.type_formats[m.type]
+ msg += struct.pack(format, value)
+ for name, value in vals.items():
+ msg += self._add_attr(op.attr_set.name, name, value)
+ msg = _genl_msg_finalize(msg)
+
+ self.sock.send(msg, 0)
+
+ done = False
+ rsp = []
+ while not done:
+ reply = self.sock.recv(128 * 1024)
+ nms = NlMsgs(reply, attr_space=op.attr_set)
+ for nl_msg in nms:
+ if nl_msg.extack:
+ self._decode_extack(msg, op.attr_set, nl_msg.extack)
+
+ if nl_msg.error:
+ raise NlError(nl_msg)
+ if nl_msg.done:
+ if nl_msg.extack:
+ print("Netlink warning:")
+ print(nl_msg)
+ done = True
+ break
+
+ gm = GenlMsg(nl_msg, fixed_header_members)
+ # Check if this is a reply to our request
+ if nl_msg.nl_seq != req_seq or gm.genl_cmd != op.rsp_value:
+ if gm.genl_cmd in self.async_msg_ids:
+ self.handle_ntf(nl_msg, gm)
+ continue
+ else:
+ print('Unexpected message: ' + repr(gm))
+ continue
+
+ rsp.append(self._decode(gm.raw_attrs, op.attr_set.name)
+ | gm.fixed_header_attrs)
+
+ if not rsp:
+ return None
+ if not dump and len(rsp) == 1:
+ return rsp[0]
+ return rsp
+
+ def do(self, method, vals):
+ return self._op(method, vals)
+
+ def dump(self, method, vals):
+ return self._op(method, vals, dump=True)
diff --git a/tools/net/ynl/requirements.txt b/tools/net/ynl/requirements.txt
new file mode 100644
index 000000000000..0db6ad0c1b39
--- /dev/null
+++ b/tools/net/ynl/requirements.txt
@@ -0,0 +1,2 @@
+jsonschema==4.*
+PyYAML==6.*
diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py
new file mode 100755
index 000000000000..cc2f8c945340
--- /dev/null
+++ b/tools/net/ynl/ynl-gen-c.py
@@ -0,0 +1,2305 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+
+import argparse
+import collections
+import os
+import yaml
+
+from lib import SpecFamily, SpecAttrSet, SpecAttr, SpecOperation, SpecEnumSet, SpecEnumEntry
+
+
+def c_upper(name):
+ return name.upper().replace('-', '_')
+
+
+def c_lower(name):
+ return name.lower().replace('-', '_')
+
+
+class BaseNlLib:
+ def get_family_id(self):
+ return 'ys->family_id'
+
+ def parse_cb_run(self, cb, data, is_dump=False, indent=1):
+ ind = '\n\t\t' + '\t' * indent + ' '
+ if is_dump:
+ return f"mnl_cb_run2(ys->rx_buf, len, 0, 0, {cb}, {data},{ind}ynl_cb_array, NLMSG_MIN_TYPE)"
+ else:
+ return f"mnl_cb_run2(ys->rx_buf, len, ys->seq, ys->portid,{ind}{cb}, {data},{ind}" + \
+ "ynl_cb_array, NLMSG_MIN_TYPE)"
+
+
+class Type(SpecAttr):
+ def __init__(self, family, attr_set, attr, value):
+ super().__init__(family, attr_set, attr, value)
+
+ self.attr = attr
+ self.attr_set = attr_set
+ self.type = attr['type']
+ self.checks = attr.get('checks', {})
+
+ if 'len' in attr:
+ self.len = attr['len']
+ if 'nested-attributes' in attr:
+ self.nested_attrs = attr['nested-attributes']
+ if self.nested_attrs == family.name:
+ self.nested_render_name = f"{family.name}"
+ else:
+ self.nested_render_name = f"{family.name}_{c_lower(self.nested_attrs)}"
+
+ self.c_name = c_lower(self.name)
+ if self.c_name in _C_KW:
+ self.c_name += '_'
+
+ # Added by resolve():
+ self.enum_name = None
+ delattr(self, "enum_name")
+
+ def resolve(self):
+ self.enum_name = f"{self.attr_set.name_prefix}{self.name}"
+ self.enum_name = c_upper(self.enum_name)
+
+ def is_multi_val(self):
+ return None
+
+ def is_scalar(self):
+ return self.type in {'u8', 'u16', 'u32', 'u64', 's32', 's64'}
+
+ def presence_type(self):
+ return 'bit'
+
+ def presence_member(self, space, type_filter):
+ if self.presence_type() != type_filter:
+ return
+
+ if self.presence_type() == 'bit':
+ pfx = '__' if space == 'user' else ''
+ return f"{pfx}u32 {self.c_name}:1;"
+
+ if self.presence_type() == 'len':
+ pfx = '__' if space == 'user' else ''
+ return f"{pfx}u32 {self.c_name}_len;"
+
+ def _complex_member_type(self, ri):
+ return None
+
+ def free_needs_iter(self):
+ return False
+
+ def free(self, ri, var, ref):
+ if self.is_multi_val() or self.presence_type() == 'len':
+ ri.cw.p(f'free({var}->{ref}{self.c_name});')
+
+ def arg_member(self, ri):
+ member = self._complex_member_type(ri)
+ if member:
+ return [member + ' *' + self.c_name]
+ raise Exception(f"Struct member not implemented for class type {self.type}")
+
+ def struct_member(self, ri):
+ if self.is_multi_val():
+ ri.cw.p(f"unsigned int n_{self.c_name};")
+ member = self._complex_member_type(ri)
+ if member:
+ ptr = '*' if self.is_multi_val() else ''
+ ri.cw.p(f"{member} {ptr}{self.c_name};")
+ return
+ members = self.arg_member(ri)
+ for one in members:
+ ri.cw.p(one + ';')
+
+ def _attr_policy(self, policy):
+ return '{ .type = ' + policy + ', }'
+
+ def attr_policy(self, cw):
+ policy = c_upper('nla-' + self.attr['type'])
+
+ spec = self._attr_policy(policy)
+ cw.p(f"\t[{self.enum_name}] = {spec},")
+
+ def _attr_typol(self):
+ raise Exception(f"Type policy not implemented for class type {self.type}")
+
+ def attr_typol(self, cw):
+ typol = self._attr_typol()
+ cw.p(f'[{self.enum_name}] = {"{"} .name = "{self.name}", {typol}{"}"},')
+
+ def _attr_put_line(self, ri, var, line):
+ if self.presence_type() == 'bit':
+ ri.cw.p(f"if ({var}->_present.{self.c_name})")
+ elif self.presence_type() == 'len':
+ ri.cw.p(f"if ({var}->_present.{self.c_name}_len)")
+ ri.cw.p(f"{line};")
+
+ def _attr_put_simple(self, ri, var, put_type):
+ line = f"mnl_attr_put_{put_type}(nlh, {self.enum_name}, {var}->{self.c_name})"
+ self._attr_put_line(ri, var, line)
+
+ def attr_put(self, ri, var):
+ raise Exception(f"Put not implemented for class type {self.type}")
+
+ def _attr_get(self, ri, var):
+ raise Exception(f"Attr get not implemented for class type {self.type}")
+
+ def attr_get(self, ri, var, first):
+ lines, init_lines, local_vars = self._attr_get(ri, var)
+ if type(lines) is str:
+ lines = [lines]
+ if type(init_lines) is str:
+ init_lines = [init_lines]
+
+ kw = 'if' if first else 'else if'
+ ri.cw.block_start(line=f"{kw} (mnl_attr_get_type(attr) == {self.enum_name})")
+ if local_vars:
+ for local in local_vars:
+ ri.cw.p(local)
+ ri.cw.nl()
+
+ if not self.is_multi_val():
+ ri.cw.p("if (ynl_attr_validate(yarg, attr))")
+ ri.cw.p("return MNL_CB_ERROR;")
+ if self.presence_type() == 'bit':
+ ri.cw.p(f"{var}->_present.{self.c_name} = 1;")
+
+ if init_lines:
+ ri.cw.nl()
+ for line in init_lines:
+ ri.cw.p(line)
+
+ for line in lines:
+ ri.cw.p(line)
+ ri.cw.block_end()
+
+ def _setter_lines(self, ri, member, presence):
+ raise Exception(f"Setter not implemented for class type {self.type}")
+
+ def setter(self, ri, space, direction, deref=False, ref=None):
+ ref = (ref if ref else []) + [self.c_name]
+ var = "req"
+ member = f"{var}->{'.'.join(ref)}"
+
+ code = []
+ presence = ''
+ for i in range(0, len(ref)):
+ presence = f"{var}->{'.'.join(ref[:i] + [''])}_present.{ref[i]}"
+ if self.presence_type() == 'bit':
+ code.append(presence + ' = 1;')
+ code += self._setter_lines(ri, member, presence)
+
+ ri.cw.write_func('static inline void',
+ f"{op_prefix(ri, direction, deref=deref)}_set_{'_'.join(ref)}",
+ body=code,
+ args=[f'{type_name(ri, direction, deref=deref)} *{var}'] + self.arg_member(ri))
+
+
+class TypeUnused(Type):
+ def presence_type(self):
+ return ''
+
+ def _attr_typol(self):
+ return '.type = YNL_PT_REJECT, '
+
+ def attr_policy(self, cw):
+ pass
+
+
+class TypePad(Type):
+ def presence_type(self):
+ return ''
+
+ def _attr_typol(self):
+ return '.type = YNL_PT_REJECT, '
+
+ def attr_policy(self, cw):
+ pass
+
+
+class TypeScalar(Type):
+ def __init__(self, family, attr_set, attr, value):
+ super().__init__(family, attr_set, attr, value)
+
+ self.byte_order_comment = ''
+ if 'byte-order' in attr:
+ self.byte_order_comment = f" /* {attr['byte-order']} */"
+
+ # Added by resolve():
+ self.is_bitfield = None
+ delattr(self, "is_bitfield")
+ self.type_name = None
+ delattr(self, "type_name")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if 'enum-as-flags' in self.attr and self.attr['enum-as-flags']:
+ self.is_bitfield = True
+ elif 'enum' in self.attr:
+ self.is_bitfield = self.family.consts[self.attr['enum']]['type'] == 'flags'
+ else:
+ self.is_bitfield = False
+
+ if 'enum' in self.attr and not self.is_bitfield:
+ self.type_name = f"enum {self.family.name}_{c_lower(self.attr['enum'])}"
+ else:
+ self.type_name = '__' + self.type
+
+ def _mnl_type(self):
+ t = self.type
+ # mnl does not have a helper for signed types
+ if t[0] == 's':
+ t = 'u' + t[1:]
+ return t
+
+ def _attr_policy(self, policy):
+ if 'flags-mask' in self.checks or self.is_bitfield:
+ if self.is_bitfield:
+ enum = self.family.consts[self.attr['enum']]
+ mask = enum.get_mask(as_flags=True)
+ else:
+ flags = self.family.consts[self.checks['flags-mask']]
+ flag_cnt = len(flags['entries'])
+ mask = (1 << flag_cnt) - 1
+ return f"NLA_POLICY_MASK({policy}, 0x{mask:x})"
+ elif 'min' in self.checks:
+ return f"NLA_POLICY_MIN({policy}, {self.checks['min']})"
+ elif 'enum' in self.attr:
+ enum = self.family.consts[self.attr['enum']]
+ cnt = len(enum['entries'])
+ return f"NLA_POLICY_MAX({policy}, {cnt - 1})"
+ return super()._attr_policy(policy)
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_U{self.type[1:]}, '
+
+ def arg_member(self, ri):
+ return [f'{self.type_name} {self.c_name}{self.byte_order_comment}']
+
+ def attr_put(self, ri, var):
+ self._attr_put_simple(ri, var, self._mnl_type())
+
+ def _attr_get(self, ri, var):
+ return f"{var}->{self.c_name} = mnl_attr_get_{self._mnl_type()}(attr);", None, None
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"{member} = {self.c_name};"]
+
+
+class TypeFlag(Type):
+ def arg_member(self, ri):
+ return []
+
+ def _attr_typol(self):
+ return '.type = YNL_PT_FLAG, '
+
+ def attr_put(self, ri, var):
+ self._attr_put_line(ri, var, f"mnl_attr_put(nlh, {self.enum_name}, 0, NULL)")
+
+ def _attr_get(self, ri, var):
+ return [], None, None
+
+ def _setter_lines(self, ri, member, presence):
+ return []
+
+
+class TypeString(Type):
+ def arg_member(self, ri):
+ return [f"const char *{self.c_name}"]
+
+ def presence_type(self):
+ return 'len'
+
+ def struct_member(self, ri):
+ ri.cw.p(f"char *{self.c_name};")
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NUL_STR, '
+
+ def _attr_policy(self, policy):
+ mem = '{ .type = ' + policy
+ if 'max-len' in self.checks:
+ mem += ', .len = ' + str(self.checks['max-len'])
+ mem += ', }'
+ return mem
+
+ def attr_policy(self, cw):
+ if self.checks.get('unterminated-ok', False):
+ policy = 'NLA_STRING'
+ else:
+ policy = 'NLA_NUL_STRING'
+
+ spec = self._attr_policy(policy)
+ cw.p(f"\t[{self.enum_name}] = {spec},")
+
+ def attr_put(self, ri, var):
+ self._attr_put_simple(ri, var, 'strz')
+
+ def _attr_get(self, ri, var):
+ len_mem = var + '->_present.' + self.c_name + '_len'
+ return [f"{len_mem} = len;",
+ f"{var}->{self.c_name} = malloc(len + 1);",
+ f"memcpy({var}->{self.c_name}, mnl_attr_get_str(attr), len);",
+ f"{var}->{self.c_name}[len] = 0;"], \
+ ['len = strnlen(mnl_attr_get_str(attr), mnl_attr_get_payload_len(attr));'], \
+ ['unsigned int len;']
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"free({member});",
+ f"{presence}_len = strlen({self.c_name});",
+ f"{member} = malloc({presence}_len + 1);",
+ f'memcpy({member}, {self.c_name}, {presence}_len);',
+ f'{member}[{presence}_len] = 0;']
+
+
+class TypeBinary(Type):
+ def arg_member(self, ri):
+ return [f"const void *{self.c_name}", 'size_t len']
+
+ def presence_type(self):
+ return 'len'
+
+ def struct_member(self, ri):
+ ri.cw.p(f"void *{self.c_name};")
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_BINARY,'
+
+ def _attr_policy(self, policy):
+ mem = '{ '
+ if len(self.checks) == 1 and 'min-len' in self.checks:
+ mem += '.len = ' + str(self.checks['min-len'])
+ elif len(self.checks) == 0:
+ mem += '.type = NLA_BINARY'
+ else:
+ raise Exception('One or more of binary type checks not implemented, yet')
+ mem += ', }'
+ return mem
+
+ def attr_put(self, ri, var):
+ self._attr_put_line(ri, var, f"mnl_attr_put(nlh, {self.enum_name}, " +
+ f"{var}->_present.{self.c_name}_len, {var}->{self.c_name})")
+
+ def _attr_get(self, ri, var):
+ len_mem = var + '->_present.' + self.c_name + '_len'
+ return [f"{len_mem} = len;",
+ f"{var}->{self.c_name} = malloc(len);",
+ f"memcpy({var}->{self.c_name}, mnl_attr_get_payload(attr), len);"], \
+ ['len = mnl_attr_get_payload_len(attr);'], \
+ ['unsigned int len;']
+
+ def _setter_lines(self, ri, member, presence):
+ return [f"free({member});",
+ f"{member} = malloc({presence}_len);",
+ f'memcpy({member}, {self.c_name}, {presence}_len);']
+
+
+class TypeNest(Type):
+ def _complex_member_type(self, ri):
+ return f"struct {self.nested_render_name}"
+
+ def free(self, ri, var, ref):
+ ri.cw.p(f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name});')
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+
+ def _attr_policy(self, policy):
+ return 'NLA_POLICY_NESTED(' + self.nested_render_name + '_nl_policy)'
+
+ def attr_put(self, ri, var):
+ self._attr_put_line(ri, var, f"{self.nested_render_name}_put(nlh, " +
+ f"{self.enum_name}, &{var}->{self.c_name})")
+
+ def _attr_get(self, ri, var):
+ get_lines = [f"{self.nested_render_name}_parse(&parg, attr);"]
+ init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
+ f"parg.data = &{var}->{self.c_name};"]
+ return get_lines, init_lines, None
+
+ def setter(self, ri, space, direction, deref=False, ref=None):
+ ref = (ref if ref else []) + [self.c_name]
+
+ for _, attr in ri.family.pure_nested_structs[self.nested_attrs].member_list():
+ attr.setter(ri, self.nested_attrs, direction, deref=deref, ref=ref)
+
+
+class TypeMultiAttr(Type):
+ def is_multi_val(self):
+ return True
+
+ def presence_type(self):
+ return 'count'
+
+ def _complex_member_type(self, ri):
+ if 'type' not in self.attr or self.attr['type'] == 'nest':
+ return f"struct {self.nested_render_name}"
+ elif self.attr['type'] in scalars:
+ scalar_pfx = '__' if ri.ku_space == 'user' else ''
+ return scalar_pfx + self.attr['type']
+ else:
+ raise Exception(f"Sub-type {self.attr['type']} not supported yet")
+
+ def free_needs_iter(self):
+ return 'type' not in self.attr or self.attr['type'] == 'nest'
+
+ def free(self, ri, var, ref):
+ if 'type' not in self.attr or self.attr['type'] == 'nest':
+ ri.cw.p(f"for (i = 0; i < {var}->{ref}n_{self.c_name}; i++)")
+ ri.cw.p(f'{self.nested_render_name}_free(&{var}->{ref}{self.c_name}[i]);')
+
+ def _attr_typol(self):
+ if 'type' not in self.attr or self.attr['type'] == 'nest':
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+ elif self.attr['type'] in scalars:
+ return f".type = YNL_PT_U{self.attr['type'][1:]}, "
+ else:
+ raise Exception(f"Sub-type {self.attr['type']} not supported yet")
+
+ def _attr_get(self, ri, var):
+ return f'{var}->n_{self.c_name}++;', None, None
+
+
+class TypeArrayNest(Type):
+ def is_multi_val(self):
+ return True
+
+ def presence_type(self):
+ return 'count'
+
+ def _complex_member_type(self, ri):
+ if 'sub-type' not in self.attr or self.attr['sub-type'] == 'nest':
+ return f"struct {self.nested_render_name}"
+ elif self.attr['sub-type'] in scalars:
+ scalar_pfx = '__' if ri.ku_space == 'user' else ''
+ return scalar_pfx + self.attr['sub-type']
+ else:
+ raise Exception(f"Sub-type {self.attr['sub-type']} not supported yet")
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+
+ def _attr_get(self, ri, var):
+ local_vars = ['const struct nlattr *attr2;']
+ get_lines = [f'attr_{self.c_name} = attr;',
+ 'mnl_attr_for_each_nested(attr2, attr)',
+ f'\t{var}->n_{self.c_name}++;']
+ return get_lines, None, local_vars
+
+
+class TypeNestTypeValue(Type):
+ def _complex_member_type(self, ri):
+ return f"struct {self.nested_render_name}"
+
+ def _attr_typol(self):
+ return f'.type = YNL_PT_NEST, .nest = &{self.nested_render_name}_nest, '
+
+ def _attr_get(self, ri, var):
+ prev = 'attr'
+ tv_args = ''
+ get_lines = []
+ local_vars = []
+ init_lines = [f"parg.rsp_policy = &{self.nested_render_name}_nest;",
+ f"parg.data = &{var}->{self.c_name};"]
+ if 'type-value' in self.attr:
+ tv_names = [c_lower(x) for x in self.attr["type-value"]]
+ local_vars += [f'const struct nlattr *attr_{", *attr_".join(tv_names)};']
+ local_vars += [f'__u32 {", ".join(tv_names)};']
+ for level in self.attr["type-value"]:
+ level = c_lower(level)
+ get_lines += [f'attr_{level} = mnl_attr_get_payload({prev});']
+ get_lines += [f'{level} = mnl_attr_get_type(attr_{level});']
+ prev = 'attr_' + level
+
+ tv_args = f", {', '.join(tv_names)}"
+
+ get_lines += [f"{self.nested_render_name}_parse(&parg, {prev}{tv_args});"]
+ return get_lines, init_lines, local_vars
+
+
+class Struct:
+ def __init__(self, family, space_name, type_list=None, inherited=None):
+ self.family = family
+ self.space_name = space_name
+ self.attr_set = family.attr_sets[space_name]
+ # Use list to catch comparisons with empty sets
+ self._inherited = inherited if inherited is not None else []
+ self.inherited = []
+
+ self.nested = type_list is None
+ if family.name == c_lower(space_name):
+ self.render_name = f"{family.name}"
+ else:
+ self.render_name = f"{family.name}_{c_lower(space_name)}"
+ self.struct_name = 'struct ' + self.render_name
+ self.ptr_name = self.struct_name + ' *'
+
+ self.request = False
+ self.reply = False
+
+ self.attr_list = []
+ self.attrs = dict()
+ if type_list:
+ for t in type_list:
+ self.attr_list.append((t, self.attr_set[t]),)
+ else:
+ for t in self.attr_set:
+ self.attr_list.append((t, self.attr_set[t]),)
+
+ max_val = 0
+ self.attr_max_val = None
+ for name, attr in self.attr_list:
+ if attr.value >= max_val:
+ max_val = attr.value
+ self.attr_max_val = attr
+ self.attrs[name] = attr
+
+ def __iter__(self):
+ yield from self.attrs
+
+ def __getitem__(self, key):
+ return self.attrs[key]
+
+ def member_list(self):
+ return self.attr_list
+
+ def set_inherited(self, new_inherited):
+ if self._inherited != new_inherited:
+ raise Exception("Inheriting different members not supported")
+ self.inherited = [c_lower(x) for x in sorted(self._inherited)]
+
+
+class EnumEntry(SpecEnumEntry):
+ def __init__(self, enum_set, yaml, prev, value_start):
+ super().__init__(enum_set, yaml, prev, value_start)
+
+ if prev:
+ self.value_change = (self.value != prev.value + 1)
+ else:
+ self.value_change = (self.value != 0)
+ self.value_change = self.value_change or self.enum_set['type'] == 'flags'
+
+ # Added by resolve:
+ self.c_name = None
+ delattr(self, "c_name")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ self.c_name = c_upper(self.enum_set.value_pfx + self.name)
+
+
+class EnumSet(SpecEnumSet):
+ def __init__(self, family, yaml):
+ self.render_name = c_lower(family.name + '-' + yaml['name'])
+ self.enum_name = 'enum ' + self.render_name
+
+ self.value_pfx = yaml.get('name-prefix', f"{family.name}-{yaml['name']}-")
+
+ super().__init__(family, yaml)
+
+ def new_entry(self, entry, prev_entry, value_start):
+ return EnumEntry(self, entry, prev_entry, value_start)
+
+
+class AttrSet(SpecAttrSet):
+ def __init__(self, family, yaml):
+ super().__init__(family, yaml)
+
+ if self.subset_of is None:
+ if 'name-prefix' in yaml:
+ pfx = yaml['name-prefix']
+ elif self.name == family.name:
+ pfx = family.name + '-a-'
+ else:
+ pfx = f"{family.name}-a-{self.name}-"
+ self.name_prefix = c_upper(pfx)
+ self.max_name = c_upper(self.yaml.get('attr-max-name', f"{self.name_prefix}max"))
+ else:
+ self.name_prefix = family.attr_sets[self.subset_of].name_prefix
+ self.max_name = family.attr_sets[self.subset_of].max_name
+
+ # Added by resolve:
+ self.c_name = None
+ delattr(self, "c_name")
+
+ def resolve(self):
+ self.c_name = c_lower(self.name)
+ if self.c_name in _C_KW:
+ self.c_name += '_'
+ if self.c_name == self.family.c_name:
+ self.c_name = ''
+
+ def new_attr(self, elem, value):
+ if 'multi-attr' in elem and elem['multi-attr']:
+ return TypeMultiAttr(self.family, self, elem, value)
+ elif elem['type'] in scalars:
+ return TypeScalar(self.family, self, elem, value)
+ elif elem['type'] == 'unused':
+ return TypeUnused(self.family, self, elem, value)
+ elif elem['type'] == 'pad':
+ return TypePad(self.family, self, elem, value)
+ elif elem['type'] == 'flag':
+ return TypeFlag(self.family, self, elem, value)
+ elif elem['type'] == 'string':
+ return TypeString(self.family, self, elem, value)
+ elif elem['type'] == 'binary':
+ return TypeBinary(self.family, self, elem, value)
+ elif elem['type'] == 'nest':
+ return TypeNest(self.family, self, elem, value)
+ elif elem['type'] == 'array-nest':
+ return TypeArrayNest(self.family, self, elem, value)
+ elif elem['type'] == 'nest-type-value':
+ return TypeNestTypeValue(self.family, self, elem, value)
+ else:
+ raise Exception(f"No typed class for type {elem['type']}")
+
+
+class Operation(SpecOperation):
+ def __init__(self, family, yaml, req_value, rsp_value):
+ super().__init__(family, yaml, req_value, rsp_value)
+
+ if req_value != rsp_value:
+ raise Exception("Directional messages not supported by codegen")
+
+ self.render_name = family.name + '_' + c_lower(self.name)
+
+ self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \
+ ('dump' in yaml and 'request' in yaml['dump'])
+
+ # Added by resolve:
+ self.enum_name = None
+ delattr(self, "enum_name")
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if not self.is_async:
+ self.enum_name = self.family.op_prefix + c_upper(self.name)
+ else:
+ self.enum_name = self.family.async_op_prefix + c_upper(self.name)
+
+ def add_notification(self, op):
+ if 'notify' not in self.yaml:
+ self.yaml['notify'] = dict()
+ self.yaml['notify']['reply'] = self.yaml['do']['reply']
+ self.yaml['notify']['cmds'] = []
+ self.yaml['notify']['cmds'].append(op)
+
+
+class Family(SpecFamily):
+ def __init__(self, file_name):
+ # Added by resolve:
+ self.c_name = None
+ delattr(self, "c_name")
+ self.op_prefix = None
+ delattr(self, "op_prefix")
+ self.async_op_prefix = None
+ delattr(self, "async_op_prefix")
+ self.mcgrps = None
+ delattr(self, "mcgrps")
+ self.consts = None
+ delattr(self, "consts")
+ self.hooks = None
+ delattr(self, "hooks")
+
+ super().__init__(file_name)
+
+ self.fam_key = c_upper(self.yaml.get('c-family-name', self.yaml["name"] + '_FAMILY_NAME'))
+ self.ver_key = c_upper(self.yaml.get('c-version-name', self.yaml["name"] + '_FAMILY_VERSION'))
+
+ if 'definitions' not in self.yaml:
+ self.yaml['definitions'] = []
+
+ if 'uapi-header' in self.yaml:
+ self.uapi_header = self.yaml['uapi-header']
+ else:
+ self.uapi_header = f"linux/{self.name}.h"
+
+ def resolve(self):
+ self.resolve_up(super())
+
+ if self.yaml.get('protocol', 'genetlink') not in {'genetlink', 'genetlink-c', 'genetlink-legacy'}:
+ raise Exception("Codegen only supported for genetlink")
+
+ self.c_name = c_lower(self.name)
+ if 'name-prefix' in self.yaml['operations']:
+ self.op_prefix = c_upper(self.yaml['operations']['name-prefix'])
+ else:
+ self.op_prefix = c_upper(self.yaml['name'] + '-cmd-')
+ if 'async-prefix' in self.yaml['operations']:
+ self.async_op_prefix = c_upper(self.yaml['operations']['async-prefix'])
+ else:
+ self.async_op_prefix = self.op_prefix
+
+ self.mcgrps = self.yaml.get('mcast-groups', {'list': []})
+
+ self.hooks = dict()
+ for when in ['pre', 'post']:
+ self.hooks[when] = dict()
+ for op_mode in ['do', 'dump']:
+ self.hooks[when][op_mode] = dict()
+ self.hooks[when][op_mode]['set'] = set()
+ self.hooks[when][op_mode]['list'] = []
+
+ # dict space-name -> 'request': set(attrs), 'reply': set(attrs)
+ self.root_sets = dict()
+ # dict space-name -> set('request', 'reply')
+ self.pure_nested_structs = dict()
+ self.all_notify = dict()
+
+ self._mock_up_events()
+
+ self._dictify()
+ self._load_root_sets()
+ self._load_nested_sets()
+ self._load_all_notify()
+ self._load_hooks()
+
+ self.kernel_policy = self.yaml.get('kernel-policy', 'split')
+ if self.kernel_policy == 'global':
+ self._load_global_policy()
+
+ def new_enum(self, elem):
+ return EnumSet(self, elem)
+
+ def new_attr_set(self, elem):
+ return AttrSet(self, elem)
+
+ def new_operation(self, elem, req_value, rsp_value):
+ return Operation(self, elem, req_value, rsp_value)
+
+ # Fake a 'do' equivalent of all events, so that we can render their response parsing
+ def _mock_up_events(self):
+ for op in self.yaml['operations']['list']:
+ if 'event' in op:
+ op['do'] = {
+ 'reply': {
+ 'attributes': op['event']['attributes']
+ }
+ }
+
+ def _dictify(self):
+ ntf = []
+ for msg in self.msgs.values():
+ if 'notify' in msg:
+ ntf.append(msg)
+ for n in ntf:
+ self.ops[n['notify']].add_notification(n)
+
+ def _load_root_sets(self):
+ for op_name, op in self.ops.items():
+ if 'attribute-set' not in op:
+ continue
+
+ req_attrs = set()
+ rsp_attrs = set()
+ for op_mode in ['do', 'dump']:
+ if op_mode in op and 'request' in op[op_mode]:
+ req_attrs.update(set(op[op_mode]['request']['attributes']))
+ if op_mode in op and 'reply' in op[op_mode]:
+ rsp_attrs.update(set(op[op_mode]['reply']['attributes']))
+
+ if op['attribute-set'] not in self.root_sets:
+ self.root_sets[op['attribute-set']] = {'request': req_attrs, 'reply': rsp_attrs}
+ else:
+ self.root_sets[op['attribute-set']]['request'].update(req_attrs)
+ self.root_sets[op['attribute-set']]['reply'].update(rsp_attrs)
+
+ def _load_nested_sets(self):
+ for root_set, rs_members in self.root_sets.items():
+ for attr, spec in self.attr_sets[root_set].items():
+ if 'nested-attributes' in spec:
+ inherit = set()
+ nested = spec['nested-attributes']
+ if nested not in self.root_sets:
+ self.pure_nested_structs[nested] = Struct(self, nested, inherited=inherit)
+ if attr in rs_members['request']:
+ self.pure_nested_structs[nested].request = True
+ if attr in rs_members['reply']:
+ self.pure_nested_structs[nested].reply = True
+
+ if 'type-value' in spec:
+ if nested in self.root_sets:
+ raise Exception("Inheriting members to a space used as root not supported")
+ inherit.update(set(spec['type-value']))
+ elif spec['type'] == 'array-nest':
+ inherit.add('idx')
+ self.pure_nested_structs[nested].set_inherited(inherit)
+
+ def _load_all_notify(self):
+ for op_name, op in self.ops.items():
+ if not op:
+ continue
+
+ if 'notify' in op:
+ self.all_notify[op_name] = op['notify']['cmds']
+
+ def _load_global_policy(self):
+ global_set = set()
+ attr_set_name = None
+ for op_name, op in self.ops.items():
+ if not op:
+ continue
+ if 'attribute-set' not in op:
+ continue
+
+ if attr_set_name is None:
+ attr_set_name = op['attribute-set']
+ if attr_set_name != op['attribute-set']:
+ raise Exception('For a global policy all ops must use the same set')
+
+ for op_mode in ['do', 'dump']:
+ if op_mode in op:
+ global_set.update(op[op_mode].get('request', []))
+
+ self.global_policy = []
+ self.global_policy_set = attr_set_name
+ for attr in self.attr_sets[attr_set_name]:
+ if attr in global_set:
+ self.global_policy.append(attr)
+
+ def _load_hooks(self):
+ for op in self.ops.values():
+ for op_mode in ['do', 'dump']:
+ if op_mode not in op:
+ continue
+ for when in ['pre', 'post']:
+ if when not in op[op_mode]:
+ continue
+ name = op[op_mode][when]
+ if name in self.hooks[when][op_mode]['set']:
+ continue
+ self.hooks[when][op_mode]['set'].add(name)
+ self.hooks[when][op_mode]['list'].append(name)
+
+
+class RenderInfo:
+ def __init__(self, cw, family, ku_space, op, op_name, op_mode, attr_set=None):
+ self.family = family
+ self.nl = cw.nlib
+ self.ku_space = ku_space
+ self.op = op
+ self.op_name = op_name
+ self.op_mode = op_mode
+
+ # 'do' and 'dump' response parsing is identical
+ if op_mode != 'do' and 'dump' in op and 'do' in op and 'reply' in op['do'] and \
+ op["do"]["reply"] == op["dump"]["reply"]:
+ self.type_consistent = True
+ else:
+ self.type_consistent = op_mode == 'event'
+
+ self.attr_set = attr_set
+ if not self.attr_set:
+ self.attr_set = op['attribute-set']
+
+ if op:
+ self.type_name = c_lower(op_name)
+ else:
+ self.type_name = c_lower(attr_set)
+
+ self.cw = cw
+
+ self.struct = dict()
+ for op_dir in ['request', 'reply']:
+ if op and op_dir in op[op_mode]:
+ self.struct[op_dir] = Struct(family, self.attr_set,
+ type_list=op[op_mode][op_dir]['attributes'])
+ if op_mode == 'event':
+ self.struct['reply'] = Struct(family, self.attr_set, type_list=op['event']['attributes'])
+
+
+class CodeWriter:
+ def __init__(self, nlib, out_file):
+ self.nlib = nlib
+
+ self._nl = False
+ self._silent_block = False
+ self._ind = 0
+ self._out = out_file
+
+ @classmethod
+ def _is_cond(cls, line):
+ return line.startswith('if') or line.startswith('while') or line.startswith('for')
+
+ def p(self, line, add_ind=0):
+ if self._nl:
+ self._out.write('\n')
+ self._nl = False
+ ind = self._ind
+ if line[-1] == ':':
+ ind -= 1
+ if self._silent_block:
+ ind += 1
+ self._silent_block = line.endswith(')') and CodeWriter._is_cond(line)
+ if add_ind:
+ ind += add_ind
+ self._out.write('\t' * ind + line + '\n')
+
+ def nl(self):
+ self._nl = True
+
+ def block_start(self, line=''):
+ if line:
+ line = line + ' '
+ self.p(line + '{')
+ self._ind += 1
+
+ def block_end(self, line=''):
+ if line and line[0] not in {';', ','}:
+ line = ' ' + line
+ self._ind -= 1
+ self.p('}' + line)
+
+ def write_doc_line(self, doc, indent=True):
+ words = doc.split()
+ line = ' *'
+ for word in words:
+ if len(line) + len(word) >= 79:
+ self.p(line)
+ line = ' *'
+ if indent:
+ line += ' '
+ line += ' ' + word
+ self.p(line)
+
+ def write_func_prot(self, qual_ret, name, args=None, doc=None, suffix=''):
+ if not args:
+ args = ['void']
+
+ if doc:
+ self.p('/*')
+ self.p(' * ' + doc)
+ self.p(' */')
+
+ oneline = qual_ret
+ if qual_ret[-1] != '*':
+ oneline += ' '
+ oneline += f"{name}({', '.join(args)}){suffix}"
+
+ if len(oneline) < 80:
+ self.p(oneline)
+ return
+
+ v = qual_ret
+ if len(v) > 3:
+ self.p(v)
+ v = ''
+ elif qual_ret[-1] != '*':
+ v += ' '
+ v += name + '('
+ ind = '\t' * (len(v) // 8) + ' ' * (len(v) % 8)
+ delta_ind = len(v) - len(ind)
+ v += args[0]
+ i = 1
+ while i < len(args):
+ next_len = len(v) + len(args[i])
+ if v[0] == '\t':
+ next_len += delta_ind
+ if next_len > 76:
+ self.p(v + ',')
+ v = ind
+ else:
+ v += ', '
+ v += args[i]
+ i += 1
+ self.p(v + ')' + suffix)
+
+ def write_func_lvar(self, local_vars):
+ if not local_vars:
+ return
+
+ if type(local_vars) is str:
+ local_vars = [local_vars]
+
+ local_vars.sort(key=len, reverse=True)
+ for var in local_vars:
+ self.p(var)
+ self.nl()
+
+ def write_func(self, qual_ret, name, body, args=None, local_vars=None):
+ self.write_func_prot(qual_ret=qual_ret, name=name, args=args)
+ self.write_func_lvar(local_vars=local_vars)
+
+ self.block_start()
+ for line in body:
+ self.p(line)
+ self.block_end()
+
+ def writes_defines(self, defines):
+ longest = 0
+ for define in defines:
+ if len(define[0]) > longest:
+ longest = len(define[0])
+ longest = ((longest + 8) // 8) * 8
+ for define in defines:
+ line = '#define ' + define[0]
+ line += '\t' * ((longest - len(define[0]) + 7) // 8)
+ if type(define[1]) is int:
+ line += str(define[1])
+ elif type(define[1]) is str:
+ line += '"' + define[1] + '"'
+ self.p(line)
+
+ def write_struct_init(self, members):
+ longest = max([len(x[0]) for x in members])
+ longest += 1 # because we prepend a .
+ longest = ((longest + 8) // 8) * 8
+ for one in members:
+ line = '.' + one[0]
+ line += '\t' * ((longest - len(one[0]) - 1 + 7) // 8)
+ line += '= ' + one[1] + ','
+ self.p(line)
+
+
+scalars = {'u8', 'u16', 'u32', 'u64', 's32', 's64'}
+
+direction_to_suffix = {
+ 'reply': '_rsp',
+ 'request': '_req',
+ '': ''
+}
+
+op_mode_to_wrapper = {
+ 'do': '',
+ 'dump': '_list',
+ 'notify': '_ntf',
+ 'event': '',
+}
+
+_C_KW = {
+ 'do'
+}
+
+
+def rdir(direction):
+ if direction == 'reply':
+ return 'request'
+ if direction == 'request':
+ return 'reply'
+ return direction
+
+
+def op_prefix(ri, direction, deref=False):
+ suffix = f"_{ri.type_name}"
+
+ if not ri.op_mode or ri.op_mode == 'do':
+ suffix += f"{direction_to_suffix[direction]}"
+ else:
+ if direction == 'request':
+ suffix += '_req_dump'
+ else:
+ if ri.type_consistent:
+ if deref:
+ suffix += f"{direction_to_suffix[direction]}"
+ else:
+ suffix += op_mode_to_wrapper[ri.op_mode]
+ else:
+ suffix += '_rsp'
+ suffix += '_dump' if deref else '_list'
+
+ return f"{ri.family['name']}{suffix}"
+
+
+def type_name(ri, direction, deref=False):
+ return f"struct {op_prefix(ri, direction, deref=deref)}"
+
+
+def print_prototype(ri, direction, terminate=True, doc=None):
+ suffix = ';' if terminate else ''
+
+ fname = ri.op.render_name
+ if ri.op_mode == 'dump':
+ fname += '_dump'
+
+ args = ['struct ynl_sock *ys']
+ if 'request' in ri.op[ri.op_mode]:
+ args.append(f"{type_name(ri, direction)} *" + f"{direction_to_suffix[direction][1:]}")
+
+ ret = 'int'
+ if 'reply' in ri.op[ri.op_mode]:
+ ret = f"{type_name(ri, rdir(direction))} *"
+
+ ri.cw.write_func_prot(ret, fname, args, doc=doc, suffix=suffix)
+
+
+def print_req_prototype(ri):
+ print_prototype(ri, "request", doc=ri.op['doc'])
+
+
+def print_dump_prototype(ri):
+ print_prototype(ri, "request")
+
+
+def put_typol_fwd(cw, struct):
+ cw.p(f'extern struct ynl_policy_nest {struct.render_name}_nest;')
+
+
+def put_typol(cw, struct):
+ type_max = struct.attr_set.max_name
+ cw.block_start(line=f'struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =')
+
+ for _, arg in struct.member_list():
+ arg.attr_typol(cw)
+
+ cw.block_end(line=';')
+ cw.nl()
+
+ cw.block_start(line=f'struct ynl_policy_nest {struct.render_name}_nest =')
+ cw.p(f'.max_attr = {type_max},')
+ cw.p(f'.table = {struct.render_name}_policy,')
+ cw.block_end(line=';')
+ cw.nl()
+
+
+def put_req_nested(ri, struct):
+ func_args = ['struct nlmsghdr *nlh',
+ 'unsigned int attr_type',
+ f'{struct.ptr_name}obj']
+
+ ri.cw.write_func_prot('int', f'{struct.render_name}_put', func_args)
+ ri.cw.block_start()
+ ri.cw.write_func_lvar('struct nlattr *nest;')
+
+ ri.cw.p("nest = mnl_attr_nest_start(nlh, attr_type);")
+
+ for _, arg in struct.member_list():
+ arg.attr_put(ri, "obj")
+
+ ri.cw.p("mnl_attr_nest_end(nlh, nest);")
+
+ ri.cw.nl()
+ ri.cw.p('return 0;')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def _multi_parse(ri, struct, init_lines, local_vars):
+ if struct.nested:
+ iter_line = "mnl_attr_for_each_nested(attr, nested)"
+ else:
+ iter_line = "mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr))"
+
+ array_nests = set()
+ multi_attrs = set()
+ needs_parg = False
+ for arg, aspec in struct.member_list():
+ if aspec['type'] == 'array-nest':
+ local_vars.append(f'const struct nlattr *attr_{aspec.c_name};')
+ array_nests.add(arg)
+ if 'multi-attr' in aspec:
+ multi_attrs.add(arg)
+ needs_parg |= 'nested-attributes' in aspec
+ if array_nests or multi_attrs:
+ local_vars.append('int i;')
+ if needs_parg:
+ local_vars.append('struct ynl_parse_arg parg;')
+ init_lines.append('parg.ys = yarg->ys;')
+
+ ri.cw.block_start()
+ ri.cw.write_func_lvar(local_vars)
+
+ for line in init_lines:
+ ri.cw.p(line)
+ ri.cw.nl()
+
+ for arg in struct.inherited:
+ ri.cw.p(f'dst->{arg} = {arg};')
+
+ ri.cw.nl()
+ ri.cw.block_start(line=iter_line)
+
+ first = True
+ for _, arg in struct.member_list():
+ arg.attr_get(ri, 'dst', first=first)
+ first = False
+
+ ri.cw.block_end()
+ ri.cw.nl()
+
+ for anest in sorted(array_nests):
+ aspec = struct[anest]
+
+ ri.cw.block_start(line=f"if (dst->n_{aspec.c_name})")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc(dst->n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p('i = 0;')
+ ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
+ ri.cw.block_start(line=f"mnl_attr_for_each_nested(attr, attr_{aspec.c_name})")
+ ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
+ ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr, mnl_attr_get_type(attr)))")
+ ri.cw.p('return MNL_CB_ERROR;')
+ ri.cw.p('i++;')
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.nl()
+
+ for anest in sorted(multi_attrs):
+ aspec = struct[anest]
+ ri.cw.block_start(line=f"if (dst->n_{aspec.c_name})")
+ ri.cw.p(f"dst->{aspec.c_name} = calloc(dst->n_{aspec.c_name}, sizeof(*dst->{aspec.c_name}));")
+ ri.cw.p('i = 0;')
+ if 'nested-attributes' in aspec:
+ ri.cw.p(f"parg.rsp_policy = &{aspec.nested_render_name}_nest;")
+ ri.cw.block_start(line=iter_line)
+ ri.cw.block_start(line=f"if (mnl_attr_get_type(attr) == {aspec.enum_name})")
+ if 'nested-attributes' in aspec:
+ ri.cw.p(f"parg.data = &dst->{aspec.c_name}[i];")
+ ri.cw.p(f"if ({aspec.nested_render_name}_parse(&parg, attr))")
+ ri.cw.p('return MNL_CB_ERROR;')
+ elif aspec['type'] in scalars:
+ t = aspec['type']
+ if t[0] == 's':
+ t = 'u' + t[1:]
+ ri.cw.p(f"dst->{aspec.c_name}[i] = mnl_attr_get_{t}(attr);")
+ else:
+ raise Exception('Nest parsing type not supported yet')
+ ri.cw.p('i++;')
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.nl()
+
+ if struct.nested:
+ ri.cw.p('return 0;')
+ else:
+ ri.cw.p('return MNL_CB_OK;')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def parse_rsp_nested(ri, struct):
+ func_args = ['struct ynl_parse_arg *yarg',
+ 'const struct nlattr *nested']
+ for arg in struct.inherited:
+ func_args.append('__u32 ' + arg)
+
+ local_vars = ['const struct nlattr *attr;',
+ f'{struct.ptr_name}dst = yarg->data;']
+ init_lines = []
+
+ ri.cw.write_func_prot('int', f'{struct.render_name}_parse', func_args)
+
+ _multi_parse(ri, struct, init_lines, local_vars)
+
+
+def parse_rsp_msg(ri, deref=False):
+ if 'reply' not in ri.op[ri.op_mode] and ri.op_mode != 'event':
+ return
+
+ func_args = ['const struct nlmsghdr *nlh',
+ 'void *data']
+
+ local_vars = [f'{type_name(ri, "reply", deref=deref)} *dst;',
+ 'struct ynl_parse_arg *yarg = data;',
+ 'const struct nlattr *attr;']
+ init_lines = ['dst = yarg->data;']
+
+ ri.cw.write_func_prot('int', f'{op_prefix(ri, "reply", deref=deref)}_parse', func_args)
+
+ _multi_parse(ri, ri.struct["reply"], init_lines, local_vars)
+
+
+def print_req(ri):
+ ret_ok = '0'
+ ret_err = '-1'
+ direction = "request"
+ local_vars = ['struct nlmsghdr *nlh;',
+ 'int len, err;']
+
+ if 'reply' in ri.op[ri.op_mode]:
+ ret_ok = 'rsp'
+ ret_err = 'NULL'
+ local_vars += [f'{type_name(ri, rdir(direction))} *rsp;',
+ 'struct ynl_parse_arg yarg = { .ys = ys, };']
+
+ print_prototype(ri, direction, terminate=False)
+ ri.cw.block_start()
+ ri.cw.write_func_lvar(local_vars)
+
+ ri.cw.p(f"nlh = ynl_gemsg_start_req(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
+
+ ri.cw.p(f"ys->req_policy = &{ri.struct['request'].render_name}_nest;")
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p(f"yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ ri.cw.nl()
+ for _, attr in ri.struct["request"].member_list():
+ attr.attr_put(ri, "req")
+ ri.cw.nl()
+
+ ri.cw.p('err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len);')
+ ri.cw.p('if (err < 0)')
+ ri.cw.p(f"return {ret_err};")
+ ri.cw.nl()
+ ri.cw.p('len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);')
+ ri.cw.p('if (len < 0)')
+ ri.cw.p(f"return {ret_err};")
+ ri.cw.nl()
+
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p('rsp = calloc(1, sizeof(*rsp));')
+ ri.cw.p('yarg.data = rsp;')
+ ri.cw.nl()
+ ri.cw.p(f"err = {ri.nl.parse_cb_run(op_prefix(ri, 'reply') + '_parse', '&yarg', False)};")
+ ri.cw.p('if (err < 0)')
+ ri.cw.p('goto err_free;')
+ ri.cw.nl()
+
+ ri.cw.p('err = ynl_recv_ack(ys, err);')
+ ri.cw.p('if (err)')
+ ri.cw.p('goto err_free;')
+ ri.cw.nl()
+ ri.cw.p(f"return {ret_ok};")
+ ri.cw.nl()
+ ri.cw.p('err_free:')
+
+ if 'reply' in ri.op[ri.op_mode]:
+ ri.cw.p(f"{call_free(ri, rdir(direction), 'rsp')}")
+ ri.cw.p(f"return {ret_err};")
+ ri.cw.block_end()
+
+
+def print_dump(ri):
+ direction = "request"
+ print_prototype(ri, direction, terminate=False)
+ ri.cw.block_start()
+ local_vars = ['struct ynl_dump_state yds = {};',
+ 'struct nlmsghdr *nlh;',
+ 'int len, err;']
+
+ for var in local_vars:
+ ri.cw.p(f'{var}')
+ ri.cw.nl()
+
+ ri.cw.p('yds.ys = ys;')
+ ri.cw.p(f"yds.alloc_sz = sizeof({type_name(ri, rdir(direction))});")
+ ri.cw.p(f"yds.cb = {op_prefix(ri, 'reply', deref=True)}_parse;")
+ ri.cw.p(f"yds.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ ri.cw.nl()
+ ri.cw.p(f"nlh = ynl_gemsg_start_dump(ys, {ri.nl.get_family_id()}, {ri.op.enum_name}, 1);")
+
+ if "request" in ri.op[ri.op_mode]:
+ ri.cw.p(f"ys->req_policy = &{ri.struct['request'].render_name}_nest;")
+ ri.cw.nl()
+ for _, attr in ri.struct["request"].member_list():
+ attr.attr_put(ri, "req")
+ ri.cw.nl()
+
+ ri.cw.p('err = mnl_socket_sendto(ys->sock, nlh, nlh->nlmsg_len);')
+ ri.cw.p('if (err < 0)')
+ ri.cw.p('return NULL;')
+ ri.cw.nl()
+
+ ri.cw.block_start(line='do')
+ ri.cw.p('len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);')
+ ri.cw.p('if (len < 0)')
+ ri.cw.p('goto free_list;')
+ ri.cw.nl()
+ ri.cw.p(f"err = {ri.nl.parse_cb_run('ynl_dump_trampoline', '&yds', False, indent=2)};")
+ ri.cw.p('if (err < 0)')
+ ri.cw.p('goto free_list;')
+ ri.cw.block_end(line='while (err > 0);')
+ ri.cw.nl()
+
+ ri.cw.p('return yds.first;')
+ ri.cw.nl()
+ ri.cw.p('free_list:')
+ ri.cw.p(call_free(ri, rdir(direction), 'yds.first'))
+ ri.cw.p('return NULL;')
+ ri.cw.block_end()
+
+
+def call_free(ri, direction, var):
+ return f"{op_prefix(ri, direction)}_free({var});"
+
+
+def free_arg_name(direction):
+ if direction:
+ return direction_to_suffix[direction][1:]
+ return 'obj'
+
+
+def print_free_prototype(ri, direction, suffix=';'):
+ name = op_prefix(ri, direction)
+ arg = free_arg_name(direction)
+ ri.cw.write_func_prot('void', f"{name}_free", [f"struct {name} *{arg}"], suffix=suffix)
+
+
+def _print_type(ri, direction, struct):
+ suffix = f'_{ri.type_name}{direction_to_suffix[direction]}'
+
+ if ri.op_mode == 'dump':
+ suffix += '_dump'
+
+ ri.cw.block_start(line=f"struct {ri.family['name']}{suffix}")
+
+ meta_started = False
+ for _, attr in struct.member_list():
+ for type_filter in ['len', 'bit']:
+ line = attr.presence_member(ri.ku_space, type_filter)
+ if line:
+ if not meta_started:
+ ri.cw.block_start(line=f"struct")
+ meta_started = True
+ ri.cw.p(line)
+ if meta_started:
+ ri.cw.block_end(line='_present;')
+ ri.cw.nl()
+
+ for arg in struct.inherited:
+ ri.cw.p(f"__u32 {arg};")
+
+ for _, attr in struct.member_list():
+ attr.struct_member(ri)
+
+ ri.cw.block_end(line=';')
+ ri.cw.nl()
+
+
+def print_type(ri, direction):
+ _print_type(ri, direction, ri.struct[direction])
+
+
+def print_type_full(ri, struct):
+ _print_type(ri, "", struct)
+
+
+def print_type_helpers(ri, direction, deref=False):
+ print_free_prototype(ri, direction)
+
+ if ri.ku_space == 'user' and direction == 'request':
+ for _, attr in ri.struct[direction].member_list():
+ attr.setter(ri, ri.attr_set, direction, deref=deref)
+ ri.cw.nl()
+
+
+def print_req_type_helpers(ri):
+ print_type_helpers(ri, "request")
+
+
+def print_rsp_type_helpers(ri):
+ if 'reply' not in ri.op[ri.op_mode]:
+ return
+ print_type_helpers(ri, "reply")
+
+
+def print_parse_prototype(ri, direction, terminate=True):
+ suffix = "_rsp" if direction == "reply" else "_req"
+ term = ';' if terminate else ''
+
+ ri.cw.write_func_prot('void', f"{ri.op.render_name}{suffix}_parse",
+ ['const struct nlattr **tb',
+ f"struct {ri.op.render_name}{suffix} *req"],
+ suffix=term)
+
+
+def print_req_type(ri):
+ print_type(ri, "request")
+
+
+def print_rsp_type(ri):
+ if (ri.op_mode == 'do' or ri.op_mode == 'dump') and 'reply' in ri.op[ri.op_mode]:
+ direction = 'reply'
+ elif ri.op_mode == 'event':
+ direction = 'reply'
+ else:
+ return
+ print_type(ri, direction)
+
+
+def print_wrapped_type(ri):
+ ri.cw.block_start(line=f"{type_name(ri, 'reply')}")
+ if ri.op_mode == 'dump':
+ ri.cw.p(f"{type_name(ri, 'reply')} *next;")
+ elif ri.op_mode == 'notify' or ri.op_mode == 'event':
+ ri.cw.p('__u16 family;')
+ ri.cw.p('__u8 cmd;')
+ ri.cw.p(f"void (*free)({type_name(ri, 'reply')} *ntf);")
+ ri.cw.p(f"{type_name(ri, 'reply', deref=True)} obj __attribute__ ((aligned (8)));")
+ ri.cw.block_end(line=';')
+ ri.cw.nl()
+ print_free_prototype(ri, 'reply')
+ ri.cw.nl()
+
+
+def _free_type_members_iter(ri, struct):
+ for _, attr in struct.member_list():
+ if attr.free_needs_iter():
+ ri.cw.p('unsigned int i;')
+ ri.cw.nl()
+ break
+
+
+def _free_type_members(ri, var, struct, ref=''):
+ for _, attr in struct.member_list():
+ attr.free(ri, var, ref)
+
+
+def _free_type(ri, direction, struct):
+ var = free_arg_name(direction)
+
+ print_free_prototype(ri, direction, suffix='')
+ ri.cw.block_start()
+ _free_type_members_iter(ri, struct)
+ _free_type_members(ri, var, struct)
+ if direction:
+ ri.cw.p(f'free({var});')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def free_rsp_nested(ri, struct):
+ _free_type(ri, "", struct)
+
+
+def print_rsp_free(ri):
+ if 'reply' not in ri.op[ri.op_mode]:
+ return
+ _free_type(ri, 'reply', ri.struct['reply'])
+
+
+def print_dump_type_free(ri):
+ sub_type = type_name(ri, 'reply')
+
+ print_free_prototype(ri, 'reply', suffix='')
+ ri.cw.block_start()
+ ri.cw.p(f"{sub_type} *next = rsp;")
+ ri.cw.nl()
+ ri.cw.block_start(line='while (next)')
+ _free_type_members_iter(ri, ri.struct['reply'])
+ ri.cw.p('rsp = next;')
+ ri.cw.p('next = rsp->next;')
+ ri.cw.nl()
+
+ _free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
+ ri.cw.p(f'free(rsp);')
+ ri.cw.block_end()
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def print_ntf_type_free(ri):
+ print_free_prototype(ri, 'reply', suffix='')
+ ri.cw.block_start()
+ _free_type_members_iter(ri, ri.struct['reply'])
+ _free_type_members(ri, 'rsp', ri.struct['reply'], ref='obj.')
+ ri.cw.p(f'free(rsp);')
+ ri.cw.block_end()
+ ri.cw.nl()
+
+
+def print_ntf_parse_prototype(family, cw, suffix=';'):
+ cw.write_func_prot('struct ynl_ntf_base_type *', f"{family['name']}_ntf_parse",
+ ['struct ynl_sock *ys'], suffix=suffix)
+
+
+def print_ntf_type_parse(family, cw, ku_mode):
+ print_ntf_parse_prototype(family, cw, suffix='')
+ cw.block_start()
+ cw.write_func_lvar(['struct genlmsghdr *genlh;',
+ 'struct nlmsghdr *nlh;',
+ 'struct ynl_parse_arg yarg = { .ys = ys, };',
+ 'struct ynl_ntf_base_type *rsp;',
+ 'int len, err;',
+ 'mnl_cb_t parse;'])
+ cw.p('len = mnl_socket_recvfrom(ys->sock, ys->rx_buf, MNL_SOCKET_BUFFER_SIZE);')
+ cw.p('if (len < (ssize_t)(sizeof(*nlh) + sizeof(*genlh)))')
+ cw.p('return NULL;')
+ cw.nl()
+ cw.p('nlh = (struct nlmsghdr *)ys->rx_buf;')
+ cw.p('genlh = mnl_nlmsg_get_payload(nlh);')
+ cw.nl()
+ cw.block_start(line='switch (genlh->cmd)')
+ for ntf_op in sorted(family.all_notify.keys()):
+ op = family.ops[ntf_op]
+ ri = RenderInfo(cw, family, ku_mode, op, ntf_op, "notify")
+ for ntf in op['notify']['cmds']:
+ cw.p(f"case {ntf.enum_name}:")
+ cw.p(f"rsp = calloc(1, sizeof({type_name(ri, 'notify')}));")
+ cw.p(f"parse = {op_prefix(ri, 'reply', deref=True)}_parse;")
+ cw.p(f"yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ cw.p(f"rsp->free = (void *){op_prefix(ri, 'notify')}_free;")
+ cw.p('break;')
+ for op_name, op in family.ops.items():
+ if 'event' not in op:
+ continue
+ ri = RenderInfo(cw, family, ku_mode, op, op_name, "event")
+ cw.p(f"case {op.enum_name}:")
+ cw.p(f"rsp = calloc(1, sizeof({type_name(ri, 'event')}));")
+ cw.p(f"parse = {op_prefix(ri, 'reply', deref=True)}_parse;")
+ cw.p(f"yarg.rsp_policy = &{ri.struct['reply'].render_name}_nest;")
+ cw.p(f"rsp->free = (void *){op_prefix(ri, 'notify')}_free;")
+ cw.p('break;')
+ cw.p('default:')
+ cw.p('ynl_error_unknown_notification(ys, genlh->cmd);')
+ cw.p('return NULL;')
+ cw.block_end()
+ cw.nl()
+ cw.p('yarg.data = rsp->data;')
+ cw.nl()
+ cw.p(f"err = {cw.nlib.parse_cb_run('parse', '&yarg', True)};")
+ cw.p('if (err < 0)')
+ cw.p('goto err_free;')
+ cw.nl()
+ cw.p('rsp->family = nlh->nlmsg_type;')
+ cw.p('rsp->cmd = genlh->cmd;')
+ cw.p('return rsp;')
+ cw.nl()
+ cw.p('err_free:')
+ cw.p('free(rsp);')
+ cw.p('return NULL;')
+ cw.block_end()
+ cw.nl()
+
+
+def print_req_policy_fwd(cw, struct, ri=None, terminate=True):
+ if terminate and ri and kernel_can_gen_family_struct(struct.family):
+ return
+
+ if terminate:
+ prefix = 'extern '
+ else:
+ if kernel_can_gen_family_struct(struct.family) and ri:
+ prefix = 'static '
+ else:
+ prefix = ''
+
+ suffix = ';' if terminate else ' = {'
+
+ max_attr = struct.attr_max_val
+ if ri:
+ name = ri.op.render_name
+ if ri.op.dual_policy:
+ name += '_' + ri.op_mode
+ else:
+ name = struct.render_name
+ cw.p(f"{prefix}const struct nla_policy {name}_nl_policy[{max_attr.enum_name} + 1]{suffix}")
+
+
+def print_req_policy(cw, struct, ri=None):
+ print_req_policy_fwd(cw, struct, ri=ri, terminate=False)
+ for _, arg in struct.member_list():
+ arg.attr_policy(cw)
+ cw.p("};")
+
+
+def kernel_can_gen_family_struct(family):
+ return family.proto == 'genetlink'
+
+
+def print_kernel_op_table_fwd(family, cw, terminate):
+ exported = not kernel_can_gen_family_struct(family)
+
+ if not terminate or exported:
+ cw.p(f"/* Ops table for {family.name} */")
+
+ pol_to_struct = {'global': 'genl_small_ops',
+ 'per-op': 'genl_ops',
+ 'split': 'genl_split_ops'}
+ struct_type = pol_to_struct[family.kernel_policy]
+
+ if not exported:
+ cnt = ""
+ elif family.kernel_policy == 'split':
+ cnt = 0
+ for op in family.ops.values():
+ if 'do' in op:
+ cnt += 1
+ if 'dump' in op:
+ cnt += 1
+ else:
+ cnt = len(family.ops)
+
+ qual = 'static const' if not exported else 'const'
+ line = f"{qual} struct {struct_type} {family.name}_nl_ops[{cnt}]"
+ if terminate:
+ cw.p(f"extern {line};")
+ else:
+ cw.block_start(line=line + ' =')
+
+ if not terminate:
+ return
+
+ cw.nl()
+ for name in family.hooks['pre']['do']['list']:
+ cw.write_func_prot('int', c_lower(name),
+ ['const struct genl_split_ops *ops',
+ 'struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
+ for name in family.hooks['post']['do']['list']:
+ cw.write_func_prot('void', c_lower(name),
+ ['const struct genl_split_ops *ops',
+ 'struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
+ for name in family.hooks['pre']['dump']['list']:
+ cw.write_func_prot('int', c_lower(name),
+ ['struct netlink_callback *cb'], suffix=';')
+ for name in family.hooks['post']['dump']['list']:
+ cw.write_func_prot('int', c_lower(name),
+ ['struct netlink_callback *cb'], suffix=';')
+
+ cw.nl()
+
+ for op_name, op in family.ops.items():
+ if op.is_async:
+ continue
+
+ if 'do' in op:
+ name = c_lower(f"{family.name}-nl-{op_name}-doit")
+ cw.write_func_prot('int', name,
+ ['struct sk_buff *skb', 'struct genl_info *info'], suffix=';')
+
+ if 'dump' in op:
+ name = c_lower(f"{family.name}-nl-{op_name}-dumpit")
+ cw.write_func_prot('int', name,
+ ['struct sk_buff *skb', 'struct netlink_callback *cb'], suffix=';')
+ cw.nl()
+
+
+def print_kernel_op_table_hdr(family, cw):
+ print_kernel_op_table_fwd(family, cw, terminate=True)
+
+
+def print_kernel_op_table(family, cw):
+ print_kernel_op_table_fwd(family, cw, terminate=False)
+ if family.kernel_policy == 'global' or family.kernel_policy == 'per-op':
+ for op_name, op in family.ops.items():
+ if op.is_async:
+ continue
+
+ cw.block_start()
+ members = [('cmd', op.enum_name)]
+ if 'dont-validate' in op:
+ members.append(('validate',
+ ' | '.join([c_upper('genl-dont-validate-' + x)
+ for x in op['dont-validate']])), )
+ for op_mode in ['do', 'dump']:
+ if op_mode in op:
+ name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it")
+ members.append((op_mode + 'it', name))
+ if family.kernel_policy == 'per-op':
+ struct = Struct(family, op['attribute-set'],
+ type_list=op['do']['request']['attributes'])
+
+ name = c_lower(f"{family.name}-{op_name}-nl-policy")
+ members.append(('policy', name))
+ members.append(('maxattr', struct.attr_max_val.enum_name))
+ if 'flags' in op:
+ members.append(('flags', ' | '.join([c_upper('genl-' + x) for x in op['flags']])))
+ cw.write_struct_init(members)
+ cw.block_end(line=',')
+ elif family.kernel_policy == 'split':
+ cb_names = {'do': {'pre': 'pre_doit', 'post': 'post_doit'},
+ 'dump': {'pre': 'start', 'post': 'done'}}
+
+ for op_name, op in family.ops.items():
+ for op_mode in ['do', 'dump']:
+ if op.is_async or op_mode not in op:
+ continue
+
+ cw.block_start()
+ members = [('cmd', op.enum_name)]
+ if 'dont-validate' in op:
+ members.append(('validate',
+ ' | '.join([c_upper('genl-dont-validate-' + x)
+ for x in op['dont-validate']])), )
+ name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it")
+ if 'pre' in op[op_mode]:
+ members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre'])))
+ members.append((op_mode + 'it', name))
+ if 'post' in op[op_mode]:
+ members.append((cb_names[op_mode]['post'], c_lower(op[op_mode]['post'])))
+ if 'request' in op[op_mode]:
+ struct = Struct(family, op['attribute-set'],
+ type_list=op[op_mode]['request']['attributes'])
+
+ if op.dual_policy:
+ name = c_lower(f"{family.name}-{op_name}-{op_mode}-nl-policy")
+ else:
+ name = c_lower(f"{family.name}-{op_name}-nl-policy")
+ members.append(('policy', name))
+ members.append(('maxattr', struct.attr_max_val.enum_name))
+ flags = (op['flags'] if 'flags' in op else []) + ['cmd-cap-' + op_mode]
+ members.append(('flags', ' | '.join([c_upper('genl-' + x) for x in flags])))
+ cw.write_struct_init(members)
+ cw.block_end(line=',')
+
+ cw.block_end(line=';')
+ cw.nl()
+
+
+def print_kernel_mcgrp_hdr(family, cw):
+ if not family.mcgrps['list']:
+ return
+
+ cw.block_start('enum')
+ for grp in family.mcgrps['list']:
+ grp_id = c_upper(f"{family.name}-nlgrp-{grp['name']},")
+ cw.p(grp_id)
+ cw.block_end(';')
+ cw.nl()
+
+
+def print_kernel_mcgrp_src(family, cw):
+ if not family.mcgrps['list']:
+ return
+
+ cw.block_start('static const struct genl_multicast_group ' + family.name + '_nl_mcgrps[] =')
+ for grp in family.mcgrps['list']:
+ name = grp['name']
+ grp_id = c_upper(f"{family.name}-nlgrp-{name}")
+ cw.p('[' + grp_id + '] = { "' + name + '", },')
+ cw.block_end(';')
+ cw.nl()
+
+
+def print_kernel_family_struct_hdr(family, cw):
+ if not kernel_can_gen_family_struct(family):
+ return
+
+ cw.p(f"extern struct genl_family {family.name}_nl_family;")
+ cw.nl()
+
+
+def print_kernel_family_struct_src(family, cw):
+ if not kernel_can_gen_family_struct(family):
+ return
+
+ cw.block_start(f"struct genl_family {family.name}_nl_family __ro_after_init =")
+ cw.p('.name\t\t= ' + family.fam_key + ',')
+ cw.p('.version\t= ' + family.ver_key + ',')
+ cw.p('.netnsok\t= true,')
+ cw.p('.parallel_ops\t= true,')
+ cw.p('.module\t\t= THIS_MODULE,')
+ if family.kernel_policy == 'per-op':
+ cw.p(f'.ops\t\t= {family.name}_nl_ops,')
+ cw.p(f'.n_ops\t\t= ARRAY_SIZE({family.name}_nl_ops),')
+ elif family.kernel_policy == 'split':
+ cw.p(f'.split_ops\t= {family.name}_nl_ops,')
+ cw.p(f'.n_split_ops\t= ARRAY_SIZE({family.name}_nl_ops),')
+ if family.mcgrps['list']:
+ cw.p(f'.mcgrps\t\t= {family.name}_nl_mcgrps,')
+ cw.p(f'.n_mcgrps\t= ARRAY_SIZE({family.name}_nl_mcgrps),')
+ cw.block_end(';')
+
+
+def uapi_enum_start(family, cw, obj, ckey='', enum_name='enum-name'):
+ start_line = 'enum'
+ if enum_name in obj:
+ if obj[enum_name]:
+ start_line = 'enum ' + c_lower(obj[enum_name])
+ elif ckey and ckey in obj:
+ start_line = 'enum ' + family.name + '_' + c_lower(obj[ckey])
+ cw.block_start(line=start_line)
+
+
+def render_uapi(family, cw):
+ hdr_prot = f"_UAPI_LINUX_{family.name.upper()}_H"
+ cw.p('#ifndef ' + hdr_prot)
+ cw.p('#define ' + hdr_prot)
+ cw.nl()
+
+ defines = [(family.fam_key, family["name"]),
+ (family.ver_key, family.get('version', 1))]
+ cw.writes_defines(defines)
+ cw.nl()
+
+ defines = []
+ for const in family['definitions']:
+ if const['type'] != 'const':
+ cw.writes_defines(defines)
+ defines = []
+ cw.nl()
+
+ # Write kdoc for enum and flags (one day maybe also structs)
+ if const['type'] == 'enum' or const['type'] == 'flags':
+ enum = family.consts[const['name']]
+
+ if enum.has_doc():
+ cw.p('/**')
+ doc = ''
+ if 'doc' in enum:
+ doc = ' - ' + enum['doc']
+ cw.write_doc_line(enum.enum_name + doc)
+ for entry in enum.entries.values():
+ if entry.has_doc():
+ doc = '@' + entry.c_name + ': ' + entry['doc']
+ cw.write_doc_line(doc)
+ cw.p(' */')
+
+ uapi_enum_start(family, cw, const, 'name')
+ name_pfx = const.get('name-prefix', f"{family.name}-{const['name']}-")
+ for entry in enum.entries.values():
+ suffix = ','
+ if entry.value_change:
+ suffix = f" = {entry.user_value()}" + suffix
+ cw.p(entry.c_name + suffix)
+
+ if const.get('render-max', False):
+ cw.nl()
+ if const['type'] == 'flags':
+ max_name = c_upper(name_pfx + 'mask')
+ max_val = f' = {enum.get_mask()},'
+ cw.p(max_name + max_val)
+ else:
+ max_name = c_upper(name_pfx + 'max')
+ cw.p('__' + max_name + ',')
+ cw.p(max_name + ' = (__' + max_name + ' - 1)')
+ cw.block_end(line=';')
+ cw.nl()
+ elif const['type'] == 'const':
+ defines.append([c_upper(family.get('c-define-name',
+ f"{family.name}-{const['name']}")),
+ const['value']])
+
+ if defines:
+ cw.writes_defines(defines)
+ cw.nl()
+
+ max_by_define = family.get('max-by-define', False)
+
+ for _, attr_set in family.attr_sets.items():
+ if attr_set.subset_of:
+ continue
+
+ cnt_name = c_upper(family.get('attr-cnt-name', f"__{attr_set.name_prefix}MAX"))
+ max_value = f"({cnt_name} - 1)"
+
+ val = 0
+ uapi_enum_start(family, cw, attr_set.yaml, 'enum-name')
+ for _, attr in attr_set.items():
+ suffix = ','
+ if attr.value != val:
+ suffix = f" = {attr.value},"
+ val = attr.value
+ val += 1
+ cw.p(attr.enum_name + suffix)
+ cw.nl()
+ cw.p(cnt_name + ('' if max_by_define else ','))
+ if not max_by_define:
+ cw.p(f"{attr_set.max_name} = {max_value}")
+ cw.block_end(line=';')
+ if max_by_define:
+ cw.p(f"#define {attr_set.max_name} {max_value}")
+ cw.nl()
+
+ # Commands
+ separate_ntf = 'async-prefix' in family['operations']
+
+ max_name = c_upper(family.get('cmd-max-name', f"{family.op_prefix}MAX"))
+ cnt_name = c_upper(family.get('cmd-cnt-name', f"__{family.op_prefix}MAX"))
+ max_value = f"({cnt_name} - 1)"
+
+ uapi_enum_start(family, cw, family['operations'], 'enum-name')
+ val = 0
+ for op in family.msgs.values():
+ if separate_ntf and ('notify' in op or 'event' in op):
+ continue
+
+ suffix = ','
+ if op.value != val:
+ suffix = f" = {op.value},"
+ val = op.value
+ cw.p(op.enum_name + suffix)
+ val += 1
+ cw.nl()
+ cw.p(cnt_name + ('' if max_by_define else ','))
+ if not max_by_define:
+ cw.p(f"{max_name} = {max_value}")
+ cw.block_end(line=';')
+ if max_by_define:
+ cw.p(f"#define {max_name} {max_value}")
+ cw.nl()
+
+ if separate_ntf:
+ uapi_enum_start(family, cw, family['operations'], enum_name='async-enum')
+ for op in family.msgs.values():
+ if separate_ntf and not ('notify' in op or 'event' in op):
+ continue
+
+ suffix = ','
+ if 'value' in op:
+ suffix = f" = {op['value']},"
+ cw.p(op.enum_name + suffix)
+ cw.block_end(line=';')
+ cw.nl()
+
+ # Multicast
+ defines = []
+ for grp in family.mcgrps['list']:
+ name = grp['name']
+ defines.append([c_upper(grp.get('c-define-name', f"{family.name}-mcgrp-{name}")),
+ f'{name}'])
+ cw.nl()
+ if defines:
+ cw.writes_defines(defines)
+ cw.nl()
+
+ cw.p(f'#endif /* {hdr_prot} */')
+
+
+def find_kernel_root(full_path):
+ sub_path = ''
+ while True:
+ sub_path = os.path.join(os.path.basename(full_path), sub_path)
+ full_path = os.path.dirname(full_path)
+ maintainers = os.path.join(full_path, "MAINTAINERS")
+ if os.path.exists(maintainers):
+ return full_path, sub_path[:-1]
+
+
+def main():
+ parser = argparse.ArgumentParser(description='Netlink simple parsing generator')
+ parser.add_argument('--mode', dest='mode', type=str, required=True)
+ parser.add_argument('--spec', dest='spec', type=str, required=True)
+ parser.add_argument('--header', dest='header', action='store_true', default=None)
+ parser.add_argument('--source', dest='header', action='store_false')
+ parser.add_argument('--user-header', nargs='+', default=[])
+ parser.add_argument('-o', dest='out_file', type=str)
+ args = parser.parse_args()
+
+ out_file = open(args.out_file, 'w+') if args.out_file else os.sys.stdout
+
+ if args.header is None:
+ parser.error("--header or --source is required")
+
+ try:
+ parsed = Family(args.spec)
+ if parsed.license != '((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)':
+ print('Spec license:', parsed.license)
+ print('License must be: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)')
+ os.sys.exit(1)
+ except yaml.YAMLError as exc:
+ print(exc)
+ os.sys.exit(1)
+ return
+
+ cw = CodeWriter(BaseNlLib(), out_file)
+
+ _, spec_kernel = find_kernel_root(args.spec)
+ if args.mode == 'uapi' or args.header:
+ cw.p(f'/* SPDX-License-Identifier: {parsed.license} */')
+ else:
+ cw.p(f'// SPDX-License-Identifier: {parsed.license}')
+ cw.p("/* Do not edit directly, auto-generated from: */")
+ cw.p(f"/*\t{spec_kernel} */")
+ cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */")
+ cw.nl()
+
+ if args.mode == 'uapi':
+ render_uapi(parsed, cw)
+ return
+
+ hdr_prot = f"_LINUX_{parsed.name.upper()}_GEN_H"
+ if args.header:
+ cw.p('#ifndef ' + hdr_prot)
+ cw.p('#define ' + hdr_prot)
+ cw.nl()
+
+ if args.mode == 'kernel':
+ cw.p('#include <net/netlink.h>')
+ cw.p('#include <net/genetlink.h>')
+ cw.nl()
+ if not args.header:
+ if args.out_file:
+ cw.p(f'#include "{os.path.basename(args.out_file[:-2])}.h"')
+ cw.nl()
+ headers = [parsed.uapi_header]
+ for definition in parsed['definitions']:
+ if 'header' in definition:
+ headers.append(definition['header'])
+ for one in headers:
+ cw.p(f"#include <{one}>")
+ cw.nl()
+
+ if args.mode == "user":
+ if not args.header:
+ cw.p("#include <stdlib.h>")
+ cw.p("#include <stdio.h>")
+ cw.p("#include <string.h>")
+ cw.p("#include <libmnl/libmnl.h>")
+ cw.p("#include <linux/genetlink.h>")
+ cw.nl()
+ for one in args.user_header:
+ cw.p(f'#include "{one}"')
+ else:
+ cw.p('struct ynl_sock;')
+ cw.nl()
+
+ if args.mode == "kernel":
+ if args.header:
+ for _, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ cw.p('/* Common nested types */')
+ break
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ print_req_policy_fwd(cw, struct)
+ cw.nl()
+
+ if parsed.kernel_policy == 'global':
+ cw.p(f"/* Global operation policy for {parsed.name} */")
+
+ struct = Struct(parsed, parsed.global_policy_set, type_list=parsed.global_policy)
+ print_req_policy_fwd(cw, struct)
+ cw.nl()
+
+ if parsed.kernel_policy in {'per-op', 'split'}:
+ for op_name, op in parsed.ops.items():
+ if 'do' in op and 'event' not in op:
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ print_req_policy_fwd(cw, ri.struct['request'], ri=ri)
+ cw.nl()
+
+ print_kernel_op_table_hdr(parsed, cw)
+ print_kernel_mcgrp_hdr(parsed, cw)
+ print_kernel_family_struct_hdr(parsed, cw)
+ else:
+ for _, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ cw.p('/* Common nested types */')
+ break
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ if struct.request:
+ print_req_policy(cw, struct)
+ cw.nl()
+
+ if parsed.kernel_policy == 'global':
+ cw.p(f"/* Global operation policy for {parsed.name} */")
+
+ struct = Struct(parsed, parsed.global_policy_set, type_list=parsed.global_policy)
+ print_req_policy(cw, struct)
+ cw.nl()
+
+ for op_name, op in parsed.ops.items():
+ if parsed.kernel_policy in {'per-op', 'split'}:
+ for op_mode in ['do', 'dump']:
+ if op_mode in op and 'request' in op[op_mode]:
+ cw.p(f"/* {op.enum_name} - {op_mode} */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, op_mode)
+ print_req_policy(cw, ri.struct['request'], ri=ri)
+ cw.nl()
+
+ print_kernel_op_table(parsed, cw)
+ print_kernel_mcgrp_src(parsed, cw)
+ print_kernel_family_struct_src(parsed, cw)
+
+ if args.mode == "user":
+ has_ntf = False
+ if args.header:
+ cw.p('/* Common nested types */')
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ ri = RenderInfo(cw, parsed, args.mode, "", "", "", attr_set)
+ print_type_full(ri, struct)
+
+ for op_name, op in parsed.ops.items():
+ cw.p(f"/* ============== {op.enum_name} ============== */")
+
+ if 'do' in op and 'event' not in op:
+ cw.p(f"/* {op.enum_name} - do */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ print_req_type(ri)
+ print_req_type_helpers(ri)
+ cw.nl()
+ print_rsp_type(ri)
+ print_rsp_type_helpers(ri)
+ cw.nl()
+ print_req_prototype(ri)
+ cw.nl()
+
+ if 'dump' in op:
+ cw.p(f"/* {op.enum_name} - dump */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'dump')
+ if 'request' in op['dump']:
+ print_req_type(ri)
+ print_req_type_helpers(ri)
+ if not ri.type_consistent:
+ print_rsp_type(ri)
+ print_wrapped_type(ri)
+ print_dump_prototype(ri)
+ cw.nl()
+
+ if 'notify' in op:
+ cw.p(f"/* {op.enum_name} - notify */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'notify')
+ has_ntf = True
+ if not ri.type_consistent:
+ raise Exception('Only notifications with consistent types supported')
+ print_wrapped_type(ri)
+
+ if 'event' in op:
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'event')
+ cw.p(f"/* {op.enum_name} - event */")
+ print_rsp_type(ri)
+ cw.nl()
+ print_wrapped_type(ri)
+
+ if has_ntf:
+ cw.p('/* --------------- Common notification parsing --------------- */')
+ print_ntf_parse_prototype(parsed, cw)
+ cw.nl()
+ else:
+ cw.p('/* Policies */')
+ for name, _ in parsed.attr_sets.items():
+ struct = Struct(parsed, name)
+ put_typol_fwd(cw, struct)
+ cw.nl()
+
+ for name, _ in parsed.attr_sets.items():
+ struct = Struct(parsed, name)
+ put_typol(cw, struct)
+
+ cw.p('/* Common nested types */')
+ for attr_set, struct in sorted(parsed.pure_nested_structs.items()):
+ ri = RenderInfo(cw, parsed, args.mode, "", "", "", attr_set)
+
+ free_rsp_nested(ri, struct)
+ if struct.request:
+ put_req_nested(ri, struct)
+ if struct.reply:
+ parse_rsp_nested(ri, struct)
+
+ for op_name, op in parsed.ops.items():
+ cw.p(f"/* ============== {op.enum_name} ============== */")
+ if 'do' in op and 'event' not in op:
+ cw.p(f"/* {op.enum_name} - do */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ print_rsp_free(ri)
+ parse_rsp_msg(ri)
+ print_req(ri)
+ cw.nl()
+
+ if 'dump' in op:
+ cw.p(f"/* {op.enum_name} - dump */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "dump")
+ if not ri.type_consistent:
+ parse_rsp_msg(ri, deref=True)
+ print_dump_type_free(ri)
+ print_dump(ri)
+ cw.nl()
+
+ if 'notify' in op:
+ cw.p(f"/* {op.enum_name} - notify */")
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, 'notify')
+ has_ntf = True
+ if not ri.type_consistent:
+ raise Exception('Only notifications with consistent types supported')
+ print_ntf_type_free(ri)
+
+ if 'event' in op:
+ cw.p(f"/* {op.enum_name} - event */")
+ has_ntf = True
+
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "do")
+ parse_rsp_msg(ri)
+
+ ri = RenderInfo(cw, parsed, args.mode, op, op_name, "event")
+ print_ntf_type_free(ri)
+
+ if has_ntf:
+ cw.p('/* --------------- Common notification parsing --------------- */')
+ print_ntf_type_parse(parsed, cw, args.mode)
+
+ if args.header:
+ cw.p(f'#endif /* {hdr_prot} */')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/net/ynl/ynl-regen.sh b/tools/net/ynl/ynl-regen.sh
new file mode 100755
index 000000000000..74f5de1c2399
--- /dev/null
+++ b/tools/net/ynl/ynl-regen.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+TOOL=$(dirname $(realpath $0))/ynl-gen-c.py
+
+force=
+
+while [ ! -z "$1" ]; do
+ case "$1" in
+ -f ) force=yes; shift ;;
+ * ) echo "Unrecognized option '$1'"; exit 1 ;;
+ esac
+done
+
+KDIR=$(dirname $(dirname $(dirname $(dirname $(realpath $0)))))
+
+files=$(git grep --files-with-matches '^/\* YNL-GEN \(kernel\|uapi\)')
+for f in $files; do
+ # params: 0 1 2 3
+ # $YAML YNL-GEN kernel $mode
+ params=( $(git grep -B1 -h '/\* YNL-GEN' $f | sed 's@/\*\(.*\)\*/@\1@') )
+
+ if [ $f -nt ${params[0]} -a -z "$force" ]; then
+ echo -e "\tSKIP $f"
+ continue
+ fi
+
+ echo -e "\tGEN ${params[2]}\t$f"
+ $TOOL --mode ${params[2]} --${params[3]} --spec $KDIR/${params[0]} -o $f
+done
diff --git a/tools/nfsd/inject_fault.sh b/tools/nfsd/inject_fault.sh
deleted file mode 100755
index 10ceee64a09a..000000000000
--- a/tools/nfsd/inject_fault.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# Copyright (c) 2011 Bryan Schumaker <bjschuma@netapp.com>
-#
-# Script for easier NFSD fault injection
-
-# Check that debugfs has been mounted
-DEBUGFS=`cat /proc/mounts | grep debugfs`
-if [ "$DEBUGFS" == "" ]; then
- echo "debugfs does not appear to be mounted!"
- echo "Please mount debugfs and try again"
- exit 1
-fi
-
-# Check that the fault injection directory exists
-DEBUGDIR=`echo $DEBUGFS | awk '{print $2}'`/nfsd
-if [ ! -d "$DEBUGDIR" ]; then
- echo "$DEBUGDIR does not exist"
- echo "Check that your .config selects CONFIG_NFSD_FAULT_INJECTION"
- exit 1
-fi
-
-function help()
-{
- echo "Usage $0 injection_type [count]"
- echo ""
- echo "Injection types are:"
- ls $DEBUGDIR
- exit 1
-}
-
-if [ $# == 0 ]; then
- help
-elif [ ! -f $DEBUGDIR/$1 ]; then
- help
-elif [ $# != 2 ]; then
- COUNT=0
-else
- COUNT=$2
-fi
-
-BEFORE=`mktemp`
-AFTER=`mktemp`
-dmesg > $BEFORE
-echo $COUNT > $DEBUGDIR/$1
-dmesg > $AFTER
-# Capture lines that only exist in the $AFTER file
-diff $BEFORE $AFTER | grep ">"
-rm -f $BEFORE $AFTER
diff --git a/tools/objtool/.gitignore b/tools/objtool/.gitignore
index 45cefda24c7b..4faa4dd72f35 100644
--- a/tools/objtool/.gitignore
+++ b/tools/objtool/.gitignore
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
arch/x86/lib/inat-tables.c
-objtool
+/objtool
fixdep
+libsubcmd/
diff --git a/tools/objtool/Build b/tools/objtool/Build
index 66f44f5cd2a6..a3cdf8af6635 100644
--- a/tools/objtool/Build
+++ b/tools/objtool/Build
@@ -1,20 +1,21 @@
objtool-y += arch/$(SRCARCH)/
-objtool-y += builtin-check.o
-objtool-y += builtin-orc.o
+
+objtool-y += weak.o
+
objtool-y += check.o
-objtool-y += orc_gen.o
-objtool-y += orc_dump.o
-objtool-y += elf.o
objtool-y += special.o
+objtool-y += builtin-check.o
+objtool-y += elf.o
objtool-y += objtool.o
+objtool-$(BUILD_ORC) += orc_gen.o
+objtool-$(BUILD_ORC) += orc_dump.o
+
objtool-y += libstring.o
objtool-y += libctype.o
objtool-y += str_error_r.o
objtool-y += librbtree.o
-CFLAGS += -I$(srctree)/tools/lib
-
$(OUTPUT)libstring.o: ../lib/string.c FORCE
$(call rule_mkdir)
$(call if_changed_dep,cc_o_c)
diff --git a/tools/objtool/Documentation/stack-validation.txt b/tools/objtool/Documentation/objtool.txt
index de094670050b..744db4218e7a 100644
--- a/tools/objtool/Documentation/stack-validation.txt
+++ b/tools/objtool/Documentation/objtool.txt
@@ -1,15 +1,103 @@
-Compile-time stack metadata validation
-======================================
+Objtool
+=======
+The kernel CONFIG_OBJTOOL option enables a host tool named 'objtool'
+which runs at compile time. It can do various validations and
+transformations on .o files.
-Overview
+Objtool has become an integral part of the x86-64 kernel toolchain. The
+kernel depends on it for a variety of security and performance features
+(and other types of features as well).
+
+
+Features
--------
-The kernel CONFIG_STACK_VALIDATION option enables a host tool named
-objtool which runs at compile time. It has a "check" subcommand which
-analyzes every .o file and ensures the validity of its stack metadata.
-It enforces a set of rules on asm code and C inline assembly code so
-that stack traces can be reliable.
+Objtool has the following features:
+
+- Stack unwinding metadata validation -- useful for helping to ensure
+ stack traces are reliable for live patching
+
+- ORC unwinder metadata generation -- a faster and more precise
+ alternative to frame pointer based unwinding
+
+- Retpoline validation -- ensures that all indirect calls go through
+ retpoline thunks, for Spectre v2 mitigations
+
+- Retpoline call site annotation -- annotates all retpoline thunk call
+ sites, enabling the kernel to patch them inline, to prevent "thunk
+ funneling" for both security and performance reasons
+
+- Non-instrumentation validation -- validates non-instrumentable
+ ("noinstr") code rules, preventing instrumentation in low-level C
+ entry code
+
+- Static call annotation -- annotates static call sites, enabling the
+ kernel to implement inline static calls, a faster alternative to some
+ indirect branches
+
+- Uaccess validation -- validates uaccess rules for a proper
+ implementation of Supervisor Mode Access Protection (SMAP)
+
+- Straight Line Speculation validation -- validates certain SLS
+ mitigations
+
+- Indirect Branch Tracking validation -- validates Intel CET IBT rules
+ to ensure that all functions referenced by function pointers have
+ corresponding ENDBR instructions
+
+- Indirect Branch Tracking annotation -- annotates unused ENDBR
+ instruction sites, enabling the kernel to "seal" them (replace them
+ with NOPs) to further harden IBT
+
+- Function entry annotation -- annotates function entries, enabling
+ kernel function tracing
+
+- Other toolchain hacks which will go unmentioned at this time...
+
+Each feature can be enabled individually or in combination using the
+objtool cmdline.
+
+
+Objects
+-------
+
+Typically, objtool runs on every translation unit (TU, aka ".o file") in
+the kernel. If a TU is part of a kernel module, the '--module' option
+is added.
+
+However:
+
+- If noinstr validation is enabled, it also runs on vmlinux.o, with all
+ options removed and '--noinstr' added.
+
+- If IBT or LTO is enabled, it doesn't run on TUs at all. Instead it
+ runs on vmlinux.o and linked modules, with all options.
+
+In summary:
+
+ A) Legacy mode:
+ TU: objtool [--module] <options>
+ vmlinux: N/A
+ module: N/A
+
+ B) CONFIG_NOINSTR_VALIDATION=y && !(CONFIG_X86_KERNEL_IBT=y || CONFIG_LTO=y):
+ TU: objtool [--module] <options> // no --noinstr
+ vmlinux: objtool --noinstr // other options removed
+ module: N/A
+
+ C) CONFIG_X86_KERNEL_IBT=y || CONFIG_LTO=y:
+ TU: N/A
+ vmlinux: objtool --noinstr <options>
+ module: objtool --module --noinstr <options>
+
+
+Stack validation
+----------------
+
+Objtool's stack validation feature analyzes every .o file and ensures
+the validity of its stack metadata. It enforces a set of rules on asm
+code and C inline assembly code so that stack traces can be reliable.
For each function, it recursively follows all possible code paths and
validates the correct frame pointer state at each instruction.
@@ -20,14 +108,6 @@ alternative execution paths to a given instruction (or set of
instructions). Similarly, it knows how to follow switch statements, for
which gcc sometimes uses jump tables.
-(Objtool also has an 'orc generate' subcommand which generates debuginfo
-for the ORC unwinder. See Documentation/x86/orc-unwinder.rst in the
-kernel tree for more details.)
-
-
-Why do we need stack metadata validation?
------------------------------------------
-
Here are some of the benefits of validating stack metadata:
a) More reliable stack traces for frame pointer enabled kernels
@@ -101,7 +181,7 @@ b) ORC (Oops Rewind Capability) unwind table generation
band. So it doesn't affect runtime performance and it can be
reliable even when interrupts or exceptions are involved.
- For more details, see Documentation/x86/orc-unwinder.rst.
+ For more details, see Documentation/arch/x86/orc-unwinder.rst.
c) Higher live patching compatibility rate
@@ -113,9 +193,6 @@ c) Higher live patching compatibility rate
For more details, see the livepatch documentation in the Linux kernel
source tree at Documentation/livepatch/livepatch.rst.
-Rules
------
-
To achieve the validation, objtool enforces the following rules:
1. Each callable function must be annotated as such with the ELF
@@ -177,7 +254,8 @@ Another possible cause for errors in C code is if the Makefile removes
-fno-omit-frame-pointer or adds -fomit-frame-pointer to the gcc options.
Here are some examples of common warnings reported by objtool, what
-they mean, and suggestions for how to fix them.
+they mean, and suggestions for how to fix them. When in doubt, ping
+the objtool maintainers.
1. file.o: warning: objtool: func()+0x128: call without frame pointer save/setup
@@ -289,6 +367,57 @@ they mean, and suggestions for how to fix them.
might be corrupt due to a gcc bug. For more details, see:
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=70646
+9. file.o: warning: objtool: funcA() call to funcB() with UACCESS enabled
+
+ This means that an unexpected call to a non-whitelisted function exists
+ outside of arch-specific guards.
+ X86: SMAP (stac/clac): __uaccess_begin()/__uaccess_end()
+ ARM: PAN: uaccess_enable()/uaccess_disable()
+
+ These functions should be called to denote a minimal critical section around
+ access to __user variables. See also: https://lwn.net/Articles/517475/
+
+ The intention of the warning is to prevent calls to funcB() from eventually
+ calling schedule(), potentially leaking the AC flags state, and not
+ restoring them correctly.
+
+ It also helps verify that there are no unexpected calls to funcB() which may
+ access user space pages with protections against doing so disabled.
+
+ To fix, either:
+ 1) remove explicit calls to funcB() from funcA().
+ 2) add the correct guards before and after calls to low level functions like
+ __get_user_size()/__put_user_size().
+ 3) add funcB to uaccess_safe_builtin whitelist in tools/objtool/check.c, if
+ funcB obviously does not call schedule(), and is marked notrace (since
+ function tracing inserts additional calls, which is not obvious from the
+ sources).
+
+10. file.o: warning: func()+0x5c: stack layout conflict in alternatives
+
+ This means that in the use of the alternative() or ALTERNATIVE()
+ macro, the code paths have conflicting modifications to the stack.
+ The problem is that there is only one ORC unwind table, which means
+ that the ORC unwind entries must be consistent for all possible
+ instruction boundaries regardless of which code has been patched.
+ This limitation can be overcome by massaging the alternatives with
+ NOPs to shift the stack changes around so they no longer conflict.
+
+11. file.o: warning: unannotated intra-function call
+
+ This warning means that a direct call is done to a destination which
+ is not at the beginning of a function. If this is a legit call, you
+ can remove this warning by putting the ANNOTATE_INTRA_FUNCTION_CALL
+ directive right before the call.
+
+12. file.o: warning: func(): not an indirect call target
+
+ This means that objtool is running with --ibt and a function expected
+ to be an indirect call target is not. In particular, this happens for
+ init_module() or cleanup_module() if a module relies on these special
+ names and does not use module_init() / module_exit() macros to create
+ them.
+
If the error doesn't seem to make sense, it could be a bug in objtool.
Feel free to ask the objtool maintainer for help.
@@ -315,3 +444,7 @@ ignore it:
OBJECT_FILES_NON_STANDARD := y
to the Makefile.
+
+NOTE: OBJECT_FILES_NON_STANDARD doesn't work for link time validation of
+vmlinux.o or a linked module. So it should only be used for files which
+aren't linked into vmlinux or a module.
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index f591c4d1b6fe..83b100c1e7f6 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -2,67 +2,92 @@
include ../scripts/Makefile.include
include ../scripts/Makefile.arch
-# always use the host compiler
-ifneq ($(LLVM),)
-HOSTAR ?= llvm-ar
-HOSTCC ?= clang
-HOSTLD ?= ld.lld
-else
-HOSTAR ?= ar
-HOSTCC ?= gcc
-HOSTLD ?= ld
-endif
-AR = $(HOSTAR)
-CC = $(HOSTCC)
-LD = $(HOSTLD)
-
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
srctree := $(patsubst %/,%,$(dir $(srctree)))
endif
-SUBCMD_SRCDIR = $(srctree)/tools/lib/subcmd/
-LIBSUBCMD_OUTPUT = $(if $(OUTPUT),$(OUTPUT),$(CURDIR)/)
-LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
+LIBSUBCMD_DIR = $(srctree)/tools/lib/subcmd/
+ifneq ($(OUTPUT),)
+ LIBSUBCMD_OUTPUT = $(abspath $(OUTPUT))/libsubcmd
+else
+ LIBSUBCMD_OUTPUT = $(CURDIR)/libsubcmd
+endif
+LIBSUBCMD = $(LIBSUBCMD_OUTPUT)/libsubcmd.a
OBJTOOL := $(OUTPUT)objtool
OBJTOOL_IN := $(OBJTOOL)-in.o
-LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
-LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
+LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
all: $(OBJTOOL)
INCLUDES := -I$(srctree)/tools/include \
-I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
- -I$(srctree)/tools/arch/$(SRCARCH)/include
-WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
-LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+ -I$(srctree)/tools/arch/$(SRCARCH)/include \
+ -I$(srctree)/tools/objtool/include \
+ -I$(srctree)/tools/objtool/arch/$(SRCARCH)/include \
+ -I$(LIBSUBCMD_OUTPUT)/include
+# Note, EXTRA_WARNINGS here was determined for CC and not HOSTCC, it
+# is passed here to match a legacy behavior.
+WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed -Wno-nested-externs
+OBJTOOL_CFLAGS := -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+OBJTOOL_LDFLAGS := $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
# Allow old libelf to be used:
-elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
-CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED)
+elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(HOSTCC) $(OBJTOOL_CFLAGS) -x c -E - | grep elf_getshdr)
+OBJTOOL_CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED)
+
+# Always want host compilation.
+HOST_OVERRIDES := CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)"
AWK = awk
+MKDIR = mkdir
+
+ifeq ($(V),1)
+ Q =
+else
+ Q = @
+endif
+
+BUILD_ORC := n
+
+ifeq ($(SRCARCH),x86)
+ BUILD_ORC := y
+endif
+
+export BUILD_ORC
export srctree OUTPUT CFLAGS SRCARCH AWK
include $(srctree)/tools/build/Makefile.include
-$(OBJTOOL_IN): fixdep FORCE
- @$(MAKE) $(build)=objtool
+$(OBJTOOL_IN): fixdep $(LIBSUBCMD) FORCE
+ $(Q)$(CONFIG_SHELL) ./sync-check.sh
+ $(Q)$(MAKE) $(build)=objtool $(HOST_OVERRIDES) CFLAGS="$(OBJTOOL_CFLAGS)" \
+ LDFLAGS="$(OBJTOOL_LDFLAGS)"
+
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
- @$(CONFIG_SHELL) ./sync-check.sh
- $(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
+ $(QUIET_LINK)$(HOSTCC) $(OBJTOOL_IN) $(OBJTOOL_LDFLAGS) -o $@
+
+
+$(LIBSUBCMD_OUTPUT):
+ $(Q)$(MKDIR) -p $@
+$(LIBSUBCMD): fixdep $(LIBSUBCMD_OUTPUT) FORCE
+ $(Q)$(MAKE) -C $(LIBSUBCMD_DIR) O=$(LIBSUBCMD_OUTPUT) \
+ DESTDIR=$(LIBSUBCMD_OUTPUT) prefix= subdir= \
+ $(HOST_OVERRIDES) EXTRA_CFLAGS="$(OBJTOOL_CFLAGS)" \
+ $@ install_headers
-$(LIBSUBCMD): fixdep FORCE
- $(Q)$(MAKE) -C $(SUBCMD_SRCDIR) OUTPUT=$(LIBSUBCMD_OUTPUT)
+$(LIBSUBCMD)-clean:
+ $(call QUIET_CLEAN, libsubcmd)
+ $(Q)$(RM) -r -- $(LIBSUBCMD_OUTPUT)
-clean:
+clean: $(LIBSUBCMD)-clean
$(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
$(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
- $(Q)$(RM) $(OUTPUT)arch/x86/inat-tables.c $(OUTPUT)fixdep
+ $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
FORCE:
diff --git a/tools/objtool/arch/powerpc/Build b/tools/objtool/arch/powerpc/Build
new file mode 100644
index 000000000000..d24d5636a5b8
--- /dev/null
+++ b/tools/objtool/arch/powerpc/Build
@@ -0,0 +1,2 @@
+objtool-y += decode.o
+objtool-y += special.o
diff --git a/tools/objtool/arch/powerpc/decode.c b/tools/objtool/arch/powerpc/decode.c
new file mode 100644
index 000000000000..53b55690f320
--- /dev/null
+++ b/tools/objtool/arch/powerpc/decode.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <objtool/check.h>
+#include <objtool/elf.h>
+#include <objtool/arch.h>
+#include <objtool/warn.h>
+#include <objtool/builtin.h>
+#include <objtool/endianness.h>
+
+int arch_ftrace_match(char *name)
+{
+ return !strcmp(name, "_mcount");
+}
+
+unsigned long arch_dest_reloc_offset(int addend)
+{
+ return addend;
+}
+
+bool arch_callee_saved_reg(unsigned char reg)
+{
+ return false;
+}
+
+int arch_decode_hint_reg(u8 sp_reg, int *base)
+{
+ exit(-1);
+}
+
+const char *arch_nop_insn(int len)
+{
+ exit(-1);
+}
+
+const char *arch_ret_insn(int len)
+{
+ exit(-1);
+}
+
+int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
+ unsigned long offset, unsigned int maxlen,
+ struct instruction *insn)
+{
+ unsigned int opcode;
+ enum insn_type typ;
+ unsigned long imm;
+ u32 ins;
+
+ ins = bswap_if_needed(file->elf, *(u32 *)(sec->data->d_buf + offset));
+ opcode = ins >> 26;
+ typ = INSN_OTHER;
+ imm = 0;
+
+ switch (opcode) {
+ case 18: /* b[l][a] */
+ if ((ins & 3) == 1) /* bl */
+ typ = INSN_CALL;
+
+ imm = ins & 0x3fffffc;
+ if (imm & 0x2000000)
+ imm -= 0x4000000;
+ break;
+ }
+
+ if (opcode == 1)
+ insn->len = 8;
+ else
+ insn->len = 4;
+
+ insn->type = typ;
+ insn->immediate = imm;
+
+ return 0;
+}
+
+unsigned long arch_jump_destination(struct instruction *insn)
+{
+ return insn->offset + insn->immediate;
+}
+
+bool arch_pc_relative_reloc(struct reloc *reloc)
+{
+ /*
+ * The powerpc build only allows certain relocation types, see
+ * relocs_check.sh, and none of those accepted are PC relative.
+ */
+ return false;
+}
+
+void arch_initial_func_cfi_state(struct cfi_init_state *state)
+{
+ int i;
+
+ for (i = 0; i < CFI_NUM_REGS; i++) {
+ state->regs[i].base = CFI_UNDEFINED;
+ state->regs[i].offset = 0;
+ }
+
+ /* initial CFA (call frame address) */
+ state->cfa.base = CFI_SP;
+ state->cfa.offset = 0;
+
+ /* initial LR (return address) */
+ state->regs[CFI_RA].base = CFI_CFA;
+ state->regs[CFI_RA].offset = 0;
+}
diff --git a/tools/objtool/arch/powerpc/include/arch/cfi_regs.h b/tools/objtool/arch/powerpc/include/arch/cfi_regs.h
new file mode 100644
index 000000000000..59638ebeafc8
--- /dev/null
+++ b/tools/objtool/arch/powerpc/include/arch/cfi_regs.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _OBJTOOL_CFI_REGS_H
+#define _OBJTOOL_CFI_REGS_H
+
+#define CFI_BP 1
+#define CFI_SP CFI_BP
+#define CFI_RA 32
+#define CFI_NUM_REGS 33
+
+#endif
diff --git a/tools/objtool/arch/powerpc/include/arch/elf.h b/tools/objtool/arch/powerpc/include/arch/elf.h
new file mode 100644
index 000000000000..73f9ae172fe5
--- /dev/null
+++ b/tools/objtool/arch/powerpc/include/arch/elf.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _OBJTOOL_ARCH_ELF
+#define _OBJTOOL_ARCH_ELF
+
+#define R_NONE R_PPC_NONE
+#define R_ABS64 R_PPC64_ADDR64
+#define R_ABS32 R_PPC_ADDR32
+
+#endif /* _OBJTOOL_ARCH_ELF */
diff --git a/tools/objtool/arch/powerpc/include/arch/special.h b/tools/objtool/arch/powerpc/include/arch/special.h
new file mode 100644
index 000000000000..ffef9ada7133
--- /dev/null
+++ b/tools/objtool/arch/powerpc/include/arch/special.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _PPC_ARCH_SPECIAL_H
+#define _PPC_ARCH_SPECIAL_H
+
+#define EX_ENTRY_SIZE 8
+#define EX_ORIG_OFFSET 0
+#define EX_NEW_OFFSET 4
+
+#define JUMP_ENTRY_SIZE 16
+#define JUMP_ORIG_OFFSET 0
+#define JUMP_NEW_OFFSET 4
+#define JUMP_KEY_OFFSET 8
+
+#define ALT_ENTRY_SIZE 12
+#define ALT_ORIG_OFFSET 0
+#define ALT_NEW_OFFSET 4
+#define ALT_FEATURE_OFFSET 8
+#define ALT_ORIG_LEN_OFFSET 10
+#define ALT_NEW_LEN_OFFSET 11
+
+#endif /* _PPC_ARCH_SPECIAL_H */
diff --git a/tools/objtool/arch/powerpc/special.c b/tools/objtool/arch/powerpc/special.c
new file mode 100644
index 000000000000..d33868147196
--- /dev/null
+++ b/tools/objtool/arch/powerpc/special.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <string.h>
+#include <stdlib.h>
+#include <objtool/special.h>
+#include <objtool/builtin.h>
+
+
+bool arch_support_alt_relocation(struct special_alt *special_alt,
+ struct instruction *insn,
+ struct reloc *reloc)
+{
+ exit(-1);
+}
+
+struct reloc *arch_find_switch_table(struct objtool_file *file,
+ struct instruction *insn)
+{
+ exit(-1);
+}
diff --git a/tools/objtool/arch/x86/Build b/tools/objtool/arch/x86/Build
index 7c5004008e97..9f7869b5c5e0 100644
--- a/tools/objtool/arch/x86/Build
+++ b/tools/objtool/arch/x86/Build
@@ -1,3 +1,4 @@
+objtool-y += special.o
objtool-y += decode.o
inat_tables_script = ../arch/x86/tools/gen-insn-attr-x86.awk
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index a62e032863a8..9ef024fd648c 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -11,22 +11,24 @@
#include "../../../arch/x86/lib/inat.c"
#include "../../../arch/x86/lib/insn.c"
-#include "../../elf.h"
-#include "../../arch.h"
-#include "../../warn.h"
-
-static unsigned char op_to_cfi_reg[][2] = {
- {CFI_AX, CFI_R8},
- {CFI_CX, CFI_R9},
- {CFI_DX, CFI_R10},
- {CFI_BX, CFI_R11},
- {CFI_SP, CFI_R12},
- {CFI_BP, CFI_R13},
- {CFI_SI, CFI_R14},
- {CFI_DI, CFI_R15},
-};
-
-static int is_x86_64(struct elf *elf)
+#define CONFIG_64BIT 1
+#include <asm/nops.h>
+
+#include <asm/orc_types.h>
+#include <objtool/check.h>
+#include <objtool/elf.h>
+#include <objtool/arch.h>
+#include <objtool/warn.h>
+#include <objtool/endianness.h>
+#include <objtool/builtin.h>
+#include <arch/elf.h>
+
+int arch_ftrace_match(char *name)
+{
+ return !strcmp(name, "__fentry__");
+}
+
+static int is_x86_64(const struct elf *elf)
{
switch (elf->ehdr.e_machine) {
case EM_X86_64:
@@ -66,293 +68,441 @@ bool arch_callee_saved_reg(unsigned char reg)
}
}
-int arch_decode_instruction(struct elf *elf, struct section *sec,
+unsigned long arch_dest_reloc_offset(int addend)
+{
+ return addend + 4;
+}
+
+unsigned long arch_jump_destination(struct instruction *insn)
+{
+ return insn->offset + insn->len + insn->immediate;
+}
+
+bool arch_pc_relative_reloc(struct reloc *reloc)
+{
+ /*
+ * All relocation types where P (the address of the target)
+ * is included in the computation.
+ */
+ switch (reloc->type) {
+ case R_X86_64_PC8:
+ case R_X86_64_PC16:
+ case R_X86_64_PC32:
+ case R_X86_64_PC64:
+
+ case R_X86_64_PLT32:
+ case R_X86_64_GOTPC32:
+ case R_X86_64_GOTPCREL:
+ return true;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+#define ADD_OP(op) \
+ if (!(op = calloc(1, sizeof(*op)))) \
+ return -1; \
+ else for (*ops_list = op, ops_list = &op->next; op; op = NULL)
+
+/*
+ * Helpers to decode ModRM/SIB:
+ *
+ * r/m| AX CX DX BX | SP | BP | SI DI |
+ * | R8 R9 R10 R11 | R12 | R13 | R14 R15 |
+ * Mod+----------------+-----+-----+---------+
+ * 00 | [r/m] |[SIB]|[IP+]| [r/m] |
+ * 01 | [r/m + d8] |[S+d]| [r/m + d8] |
+ * 10 | [r/m + d32] |[S+D]| [r/m + d32] |
+ * 11 | r/ m |
+ */
+
+#define mod_is_mem() (modrm_mod != 3)
+#define mod_is_reg() (modrm_mod == 3)
+
+#define is_RIP() ((modrm_rm & 7) == CFI_BP && modrm_mod == 0)
+#define have_SIB() ((modrm_rm & 7) == CFI_SP && mod_is_mem())
+
+#define rm_is(reg) (have_SIB() ? \
+ sib_base == (reg) && sib_index == CFI_SP : \
+ modrm_rm == (reg))
+
+#define rm_is_mem(reg) (mod_is_mem() && !is_RIP() && rm_is(reg))
+#define rm_is_reg(reg) (mod_is_reg() && modrm_rm == (reg))
+
+static bool has_notrack_prefix(struct insn *insn)
+{
+ int i;
+
+ for (i = 0; i < insn->prefixes.nbytes; i++) {
+ if (insn->prefixes.bytes[i] == 0x3e)
+ return true;
+ }
+
+ return false;
+}
+
+int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
unsigned long offset, unsigned int maxlen,
- unsigned int *len, enum insn_type *type,
- unsigned long *immediate, struct stack_op *op)
+ struct instruction *insn)
{
- struct insn insn;
- int x86_64, sign;
- unsigned char op1, op2, rex = 0, rex_b = 0, rex_r = 0, rex_w = 0,
- rex_x = 0, modrm = 0, modrm_mod = 0, modrm_rm = 0,
- modrm_reg = 0, sib = 0;
+ struct stack_op **ops_list = &insn->stack_ops;
+ const struct elf *elf = file->elf;
+ struct insn ins;
+ int x86_64, ret;
+ unsigned char op1, op2, op3, prefix,
+ rex = 0, rex_b = 0, rex_r = 0, rex_w = 0, rex_x = 0,
+ modrm = 0, modrm_mod = 0, modrm_rm = 0, modrm_reg = 0,
+ sib = 0, /* sib_scale = 0, */ sib_index = 0, sib_base = 0;
+ struct stack_op *op = NULL;
+ struct symbol *sym;
+ u64 imm;
x86_64 = is_x86_64(elf);
if (x86_64 == -1)
return -1;
- insn_init(&insn, sec->data->d_buf + offset, maxlen, x86_64);
- insn_get_length(&insn);
-
- if (!insn_complete(&insn)) {
- WARN_FUNC("can't decode instruction", sec, offset);
+ ret = insn_decode(&ins, sec->data->d_buf + offset, maxlen,
+ x86_64 ? INSN_MODE_64 : INSN_MODE_32);
+ if (ret < 0) {
+ WARN("can't decode instruction at %s:0x%lx", sec->name, offset);
return -1;
}
- *len = insn.length;
- *type = INSN_OTHER;
+ insn->len = ins.length;
+ insn->type = INSN_OTHER;
- if (insn.vex_prefix.nbytes)
+ if (ins.vex_prefix.nbytes)
return 0;
- op1 = insn.opcode.bytes[0];
- op2 = insn.opcode.bytes[1];
+ prefix = ins.prefixes.bytes[0];
+
+ op1 = ins.opcode.bytes[0];
+ op2 = ins.opcode.bytes[1];
+ op3 = ins.opcode.bytes[2];
- if (insn.rex_prefix.nbytes) {
- rex = insn.rex_prefix.bytes[0];
+ if (ins.rex_prefix.nbytes) {
+ rex = ins.rex_prefix.bytes[0];
rex_w = X86_REX_W(rex) >> 3;
rex_r = X86_REX_R(rex) >> 2;
rex_x = X86_REX_X(rex) >> 1;
rex_b = X86_REX_B(rex);
}
- if (insn.modrm.nbytes) {
- modrm = insn.modrm.bytes[0];
+ if (ins.modrm.nbytes) {
+ modrm = ins.modrm.bytes[0];
modrm_mod = X86_MODRM_MOD(modrm);
- modrm_reg = X86_MODRM_REG(modrm);
- modrm_rm = X86_MODRM_RM(modrm);
+ modrm_reg = X86_MODRM_REG(modrm) + 8*rex_r;
+ modrm_rm = X86_MODRM_RM(modrm) + 8*rex_b;
}
- if (insn.sib.nbytes)
- sib = insn.sib.bytes[0];
+ if (ins.sib.nbytes) {
+ sib = ins.sib.bytes[0];
+ /* sib_scale = X86_SIB_SCALE(sib); */
+ sib_index = X86_SIB_INDEX(sib) + 8*rex_x;
+ sib_base = X86_SIB_BASE(sib) + 8*rex_b;
+ }
switch (op1) {
case 0x1:
case 0x29:
- if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+ if (rex_w && rm_is_reg(CFI_SP)) {
/* add/sub reg, %rsp */
- *type = INSN_STACK;
- op->src.type = OP_SRC_ADD;
- op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = modrm_reg;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
}
break;
case 0x50 ... 0x57:
/* push reg */
- *type = INSN_STACK;
- op->src.type = OP_SRC_REG;
- op->src.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
- op->dest.type = OP_DEST_PUSH;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = (op1 & 0x7) + 8*rex_b;
+ op->dest.type = OP_DEST_PUSH;
+ }
break;
case 0x58 ... 0x5f:
/* pop reg */
- *type = INSN_STACK;
- op->src.type = OP_SRC_POP;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[op1 & 0x7][rex_b];
+ ADD_OP(op) {
+ op->src.type = OP_SRC_POP;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = (op1 & 0x7) + 8*rex_b;
+ }
break;
case 0x68:
case 0x6a:
/* push immediate */
- *type = INSN_STACK;
- op->src.type = OP_SRC_CONST;
- op->dest.type = OP_DEST_PUSH;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_CONST;
+ op->dest.type = OP_DEST_PUSH;
+ }
break;
case 0x70 ... 0x7f:
- *type = INSN_JUMP_CONDITIONAL;
+ insn->type = INSN_JUMP_CONDITIONAL;
break;
- case 0x81:
- case 0x83:
- if (rex != 0x48)
+ case 0x80 ... 0x83:
+ /*
+ * 1000 00sw : mod OP r/m : immediate
+ *
+ * s - sign extend immediate
+ * w - imm8 / imm32
+ *
+ * OP: 000 ADD 100 AND
+ * 001 OR 101 SUB
+ * 010 ADC 110 XOR
+ * 011 SBB 111 CMP
+ */
+
+ /* 64bit only */
+ if (!rex_w)
break;
- if (modrm == 0xe4) {
- /* and imm, %rsp */
- *type = INSN_STACK;
- op->src.type = OP_SRC_AND;
- op->src.reg = CFI_SP;
- op->src.offset = insn.immediate.value;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
+ /* %rsp target only */
+ if (!rm_is_reg(CFI_SP))
break;
+
+ imm = ins.immediate.value;
+ if (op1 & 2) { /* sign extend */
+ if (op1 & 1) { /* imm32 */
+ imm <<= 32;
+ imm = (s64)imm >> 32;
+ } else { /* imm8 */
+ imm <<= 56;
+ imm = (s64)imm >> 56;
+ }
}
- if (modrm == 0xc4)
- sign = 1;
- else if (modrm == 0xec)
- sign = -1;
- else
+ switch (modrm_reg & 7) {
+ case 5:
+ imm = -imm;
+ /* fallthrough */
+ case 0:
+ /* add/sub imm, %rsp */
+ ADD_OP(op) {
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = CFI_SP;
+ op->src.offset = imm;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
+ break;
+
+ case 4:
+ /* and imm, %rsp */
+ ADD_OP(op) {
+ op->src.type = OP_SRC_AND;
+ op->src.reg = CFI_SP;
+ op->src.offset = ins.immediate.value;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
break;
- /* add/sub imm, %rsp */
- *type = INSN_STACK;
- op->src.type = OP_SRC_ADD;
- op->src.reg = CFI_SP;
- op->src.offset = insn.immediate.value * sign;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
+ default:
+ /* WARN ? */
+ break;
+ }
+
break;
case 0x89:
- if (rex_w && !rex_r && modrm_mod == 3 && modrm_reg == 4) {
+ if (!rex_w)
+ break;
+
+ if (modrm_reg == CFI_SP) {
+
+ if (mod_is_reg()) {
+ /* mov %rsp, reg */
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = CFI_SP;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = modrm_rm;
+ }
+ break;
+
+ } else {
+ /* skip RIP relative displacement */
+ if (is_RIP())
+ break;
+
+ /* skip nontrivial SIB */
+ if (have_SIB()) {
+ modrm_rm = sib_base;
+ if (sib_index != CFI_SP)
+ break;
+ }
+
+ /* mov %rsp, disp(%reg) */
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = CFI_SP;
+ op->dest.type = OP_DEST_REG_INDIRECT;
+ op->dest.reg = modrm_rm;
+ op->dest.offset = ins.displacement.value;
+ }
+ break;
+ }
- /* mov %rsp, reg */
- *type = INSN_STACK;
- op->src.type = OP_SRC_REG;
- op->src.reg = CFI_SP;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[modrm_rm][rex_b];
break;
}
- if (rex_w && !rex_b && modrm_mod == 3 && modrm_rm == 4) {
+ if (rm_is_reg(CFI_SP)) {
/* mov reg, %rsp */
- *type = INSN_STACK;
- op->src.type = OP_SRC_REG;
- op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = modrm_reg;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
break;
}
/* fallthrough */
case 0x88:
- if (!rex_b &&
- (modrm_mod == 1 || modrm_mod == 2) && modrm_rm == 5) {
+ if (!rex_w)
+ break;
+
+ if (rm_is_mem(CFI_BP)) {
/* mov reg, disp(%rbp) */
- *type = INSN_STACK;
- op->src.type = OP_SRC_REG;
- op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
- op->dest.type = OP_DEST_REG_INDIRECT;
- op->dest.reg = CFI_BP;
- op->dest.offset = insn.displacement.value;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = modrm_reg;
+ op->dest.type = OP_DEST_REG_INDIRECT;
+ op->dest.reg = CFI_BP;
+ op->dest.offset = ins.displacement.value;
+ }
+ break;
+ }
- } else if (rex_w && !rex_b && modrm_rm == 4 && sib == 0x24) {
+ if (rm_is_mem(CFI_SP)) {
/* mov reg, disp(%rsp) */
- *type = INSN_STACK;
- op->src.type = OP_SRC_REG;
- op->src.reg = op_to_cfi_reg[modrm_reg][rex_r];
- op->dest.type = OP_DEST_REG_INDIRECT;
- op->dest.reg = CFI_SP;
- op->dest.offset = insn.displacement.value;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = modrm_reg;
+ op->dest.type = OP_DEST_REG_INDIRECT;
+ op->dest.reg = CFI_SP;
+ op->dest.offset = ins.displacement.value;
+ }
+ break;
}
break;
case 0x8b:
- if (rex_w && !rex_b && modrm_mod == 1 && modrm_rm == 5) {
+ if (!rex_w)
+ break;
+
+ if (rm_is_mem(CFI_BP)) {
/* mov disp(%rbp), reg */
- *type = INSN_STACK;
- op->src.type = OP_SRC_REG_INDIRECT;
- op->src.reg = CFI_BP;
- op->src.offset = insn.displacement.value;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG_INDIRECT;
+ op->src.reg = CFI_BP;
+ op->src.offset = ins.displacement.value;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = modrm_reg;
+ }
+ break;
+ }
- } else if (rex_w && !rex_b && sib == 0x24 &&
- modrm_mod != 3 && modrm_rm == 4) {
+ if (rm_is_mem(CFI_SP)) {
/* mov disp(%rsp), reg */
- *type = INSN_STACK;
- op->src.type = OP_SRC_REG_INDIRECT;
- op->src.reg = CFI_SP;
- op->src.offset = insn.displacement.value;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG_INDIRECT;
+ op->src.reg = CFI_SP;
+ op->src.offset = ins.displacement.value;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = modrm_reg;
+ }
+ break;
}
break;
case 0x8d:
- if (sib == 0x24 && rex_w && !rex_b && !rex_x) {
+ if (mod_is_reg()) {
+ WARN("invalid LEA encoding at %s:0x%lx", sec->name, offset);
+ break;
+ }
+
+ /* skip non 64bit ops */
+ if (!rex_w)
+ break;
+
+ /* skip RIP relative displacement */
+ if (is_RIP())
+ break;
+
+ /* skip nontrivial SIB */
+ if (have_SIB()) {
+ modrm_rm = sib_base;
+ if (sib_index != CFI_SP)
+ break;
+ }
- *type = INSN_STACK;
- if (!insn.displacement.value) {
- /* lea (%rsp), reg */
+ /* lea disp(%src), %dst */
+ ADD_OP(op) {
+ op->src.offset = ins.displacement.value;
+ if (!op->src.offset) {
+ /* lea (%src), %dst */
op->src.type = OP_SRC_REG;
} else {
- /* lea disp(%rsp), reg */
+ /* lea disp(%src), %dst */
op->src.type = OP_SRC_ADD;
- op->src.offset = insn.displacement.value;
}
- op->src.reg = CFI_SP;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = op_to_cfi_reg[modrm_reg][rex_r];
-
- } else if (rex == 0x48 && modrm == 0x65) {
-
- /* lea disp(%rbp), %rsp */
- *type = INSN_STACK;
- op->src.type = OP_SRC_ADD;
- op->src.reg = CFI_BP;
- op->src.offset = insn.displacement.value;
+ op->src.reg = modrm_rm;
op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
-
- } else if (rex == 0x49 && modrm == 0x62 &&
- insn.displacement.value == -8) {
-
- /*
- * lea -0x8(%r10), %rsp
- *
- * Restoring rsp back to its original value after a
- * stack realignment.
- */
- *type = INSN_STACK;
- op->src.type = OP_SRC_ADD;
- op->src.reg = CFI_R10;
- op->src.offset = -8;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
-
- } else if (rex == 0x49 && modrm == 0x65 &&
- insn.displacement.value == -16) {
-
- /*
- * lea -0x10(%r13), %rsp
- *
- * Restoring rsp back to its original value after a
- * stack realignment.
- */
- *type = INSN_STACK;
- op->src.type = OP_SRC_ADD;
- op->src.reg = CFI_R13;
- op->src.offset = -16;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
+ op->dest.reg = modrm_reg;
}
-
break;
case 0x8f:
/* pop to mem */
- *type = INSN_STACK;
- op->src.type = OP_SRC_POP;
- op->dest.type = OP_DEST_MEM;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_POP;
+ op->dest.type = OP_DEST_MEM;
+ }
break;
case 0x90:
- *type = INSN_NOP;
+ insn->type = INSN_NOP;
break;
case 0x9c:
/* pushf */
- *type = INSN_STACK;
- op->src.type = OP_SRC_CONST;
- op->dest.type = OP_DEST_PUSHF;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_CONST;
+ op->dest.type = OP_DEST_PUSHF;
+ }
break;
case 0x9d:
/* popf */
- *type = INSN_STACK;
- op->src.type = OP_SRC_POPF;
- op->dest.type = OP_DEST_MEM;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_POPF;
+ op->dest.type = OP_DEST_MEM;
+ }
break;
case 0x0f:
@@ -360,43 +510,59 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
if (op2 == 0x01) {
if (modrm == 0xca)
- *type = INSN_CLAC;
+ insn->type = INSN_CLAC;
else if (modrm == 0xcb)
- *type = INSN_STAC;
+ insn->type = INSN_STAC;
} else if (op2 >= 0x80 && op2 <= 0x8f) {
- *type = INSN_JUMP_CONDITIONAL;
+ insn->type = INSN_JUMP_CONDITIONAL;
} else if (op2 == 0x05 || op2 == 0x07 || op2 == 0x34 ||
op2 == 0x35) {
/* sysenter, sysret */
- *type = INSN_CONTEXT_SWITCH;
+ insn->type = INSN_CONTEXT_SWITCH;
} else if (op2 == 0x0b || op2 == 0xb9) {
/* ud2 */
- *type = INSN_BUG;
+ insn->type = INSN_BUG;
} else if (op2 == 0x0d || op2 == 0x1f) {
/* nopl/nopw */
- *type = INSN_NOP;
+ insn->type = INSN_NOP;
+
+ } else if (op2 == 0x1e) {
+
+ if (prefix == 0xf3 && (modrm == 0xfa || modrm == 0xfb))
+ insn->type = INSN_ENDBR;
+
+
+ } else if (op2 == 0x38 && op3 == 0xf8) {
+ if (ins.prefixes.nbytes == 1 &&
+ ins.prefixes.bytes[0] == 0xf2) {
+ /* ENQCMD cannot be used in the kernel. */
+ WARN("ENQCMD instruction at %s:%lx", sec->name,
+ offset);
+ }
} else if (op2 == 0xa0 || op2 == 0xa8) {
/* push fs/gs */
- *type = INSN_STACK;
- op->src.type = OP_SRC_CONST;
- op->dest.type = OP_DEST_PUSH;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_CONST;
+ op->dest.type = OP_DEST_PUSH;
+ }
} else if (op2 == 0xa1 || op2 == 0xa9) {
/* pop fs/gs */
- *type = INSN_STACK;
- op->src.type = OP_SRC_POP;
- op->dest.type = OP_DEST_MEM;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_POP;
+ op->dest.type = OP_DEST_MEM;
+ }
}
break;
@@ -409,64 +575,145 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
* mov bp, sp
* pop bp
*/
- *type = INSN_STACK;
- op->dest.type = OP_DEST_LEAVE;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = CFI_BP;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
+ ADD_OP(op) {
+ op->src.type = OP_SRC_POP;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_BP;
+ }
+ break;
+ case 0xcc:
+ /* int3 */
+ insn->type = INSN_TRAP;
break;
case 0xe3:
/* jecxz/jrcxz */
- *type = INSN_JUMP_CONDITIONAL;
+ insn->type = INSN_JUMP_CONDITIONAL;
break;
case 0xe9:
case 0xeb:
- *type = INSN_JUMP_UNCONDITIONAL;
+ insn->type = INSN_JUMP_UNCONDITIONAL;
break;
case 0xc2:
case 0xc3:
- *type = INSN_RETURN;
+ insn->type = INSN_RETURN;
+ break;
+
+ case 0xc7: /* mov imm, r/m */
+ if (!opts.noinstr)
+ break;
+
+ if (ins.length == 3+4+4 && !strncmp(sec->name, ".init.text", 10)) {
+ struct reloc *immr, *disp;
+ struct symbol *func;
+ int idx;
+
+ immr = find_reloc_by_dest(elf, (void *)sec, offset+3);
+ disp = find_reloc_by_dest(elf, (void *)sec, offset+7);
+
+ if (!immr || strcmp(immr->sym->name, "pv_ops"))
+ break;
+
+ idx = (immr->addend + 8) / sizeof(void *);
+
+ func = disp->sym;
+ if (disp->sym->type == STT_SECTION)
+ func = find_symbol_by_offset(disp->sym->sec, disp->addend);
+ if (!func) {
+ WARN("no func for pv_ops[]");
+ return -1;
+ }
+
+ objtool_pv_add(file, idx, func);
+ }
+
break;
+ case 0xcf: /* iret */
+ /*
+ * Handle sync_core(), which has an IRET to self.
+ * All other IRET are in STT_NONE entry code.
+ */
+ sym = find_symbol_containing(sec, offset);
+ if (sym && sym->type == STT_FUNC) {
+ ADD_OP(op) {
+ /* add $40, %rsp */
+ op->src.type = OP_SRC_ADD;
+ op->src.reg = CFI_SP;
+ op->src.offset = 5*8;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = CFI_SP;
+ }
+ break;
+ }
+
+ /* fallthrough */
+
case 0xca: /* retf */
case 0xcb: /* retf */
- case 0xcf: /* iret */
- *type = INSN_CONTEXT_SWITCH;
+ insn->type = INSN_CONTEXT_SWITCH;
+ break;
+
+ case 0xe0: /* loopne */
+ case 0xe1: /* loope */
+ case 0xe2: /* loop */
+ insn->type = INSN_JUMP_CONDITIONAL;
break;
case 0xe8:
- *type = INSN_CALL;
+ insn->type = INSN_CALL;
+ /*
+ * For the impact on the stack, a CALL behaves like
+ * a PUSH of an immediate value (the return address).
+ */
+ ADD_OP(op) {
+ op->src.type = OP_SRC_CONST;
+ op->dest.type = OP_DEST_PUSH;
+ }
break;
case 0xfc:
- *type = INSN_CLD;
+ insn->type = INSN_CLD;
break;
case 0xfd:
- *type = INSN_STD;
+ insn->type = INSN_STD;
break;
case 0xff:
- if (modrm_reg == 2 || modrm_reg == 3)
+ if (modrm_reg == 2 || modrm_reg == 3) {
- *type = INSN_CALL_DYNAMIC;
+ insn->type = INSN_CALL_DYNAMIC;
+ if (has_notrack_prefix(&ins))
+ WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
- else if (modrm_reg == 4)
+ } else if (modrm_reg == 4) {
- *type = INSN_JUMP_DYNAMIC;
+ insn->type = INSN_JUMP_DYNAMIC;
+ if (has_notrack_prefix(&ins))
+ WARN("notrack prefix found at %s:0x%lx", sec->name, offset);
- else if (modrm_reg == 5)
+ } else if (modrm_reg == 5) {
/* jmpf */
- *type = INSN_CONTEXT_SWITCH;
+ insn->type = INSN_CONTEXT_SWITCH;
- else if (modrm_reg == 6) {
+ } else if (modrm_reg == 6) {
/* push from mem */
- *type = INSN_STACK;
- op->src.type = OP_SRC_CONST;
- op->dest.type = OP_DEST_PUSH;
+ ADD_OP(op) {
+ op->src.type = OP_SRC_CONST;
+ op->dest.type = OP_DEST_PUSH;
+ }
}
break;
@@ -475,12 +722,12 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
break;
}
- *immediate = insn.immediate.nbytes ? insn.immediate.value : 0;
+ insn->immediate = ins.immediate.nbytes ? ins.immediate.value : 0;
return 0;
}
-void arch_initial_func_cfi_state(struct cfi_state *state)
+void arch_initial_func_cfi_state(struct cfi_init_state *state)
{
int i;
@@ -494,6 +741,88 @@ void arch_initial_func_cfi_state(struct cfi_state *state)
state->cfa.offset = 8;
/* initial RA (return address) */
- state->regs[16].base = CFI_CFA;
- state->regs[16].offset = -8;
+ state->regs[CFI_RA].base = CFI_CFA;
+ state->regs[CFI_RA].offset = -8;
+}
+
+const char *arch_nop_insn(int len)
+{
+ static const char nops[5][5] = {
+ { BYTES_NOP1 },
+ { BYTES_NOP2 },
+ { BYTES_NOP3 },
+ { BYTES_NOP4 },
+ { BYTES_NOP5 },
+ };
+
+ if (len < 1 || len > 5) {
+ WARN("invalid NOP size: %d\n", len);
+ return NULL;
+ }
+
+ return nops[len-1];
+}
+
+#define BYTE_RET 0xC3
+
+const char *arch_ret_insn(int len)
+{
+ static const char ret[5][5] = {
+ { BYTE_RET },
+ { BYTE_RET, 0xcc },
+ { BYTE_RET, 0xcc, BYTES_NOP1 },
+ { BYTE_RET, 0xcc, BYTES_NOP2 },
+ { BYTE_RET, 0xcc, BYTES_NOP3 },
+ };
+
+ if (len < 1 || len > 5) {
+ WARN("invalid RET size: %d\n", len);
+ return NULL;
+ }
+
+ return ret[len-1];
+}
+
+int arch_decode_hint_reg(u8 sp_reg, int *base)
+{
+ switch (sp_reg) {
+ case ORC_REG_UNDEFINED:
+ *base = CFI_UNDEFINED;
+ break;
+ case ORC_REG_SP:
+ *base = CFI_SP;
+ break;
+ case ORC_REG_BP:
+ *base = CFI_BP;
+ break;
+ case ORC_REG_SP_INDIRECT:
+ *base = CFI_SP_INDIRECT;
+ break;
+ case ORC_REG_R10:
+ *base = CFI_R10;
+ break;
+ case ORC_REG_R13:
+ *base = CFI_R13;
+ break;
+ case ORC_REG_DI:
+ *base = CFI_DI;
+ break;
+ case ORC_REG_DX:
+ *base = CFI_DX;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+bool arch_is_retpoline(struct symbol *sym)
+{
+ return !strncmp(sym->name, "__x86_indirect_", 15);
+}
+
+bool arch_is_rethunk(struct symbol *sym)
+{
+ return !strcmp(sym->name, "__x86_return_thunk");
}
diff --git a/tools/objtool/arch/x86/include/arch/cfi_regs.h b/tools/objtool/arch/x86/include/arch/cfi_regs.h
new file mode 100644
index 000000000000..0579d22c433c
--- /dev/null
+++ b/tools/objtool/arch/x86/include/arch/cfi_regs.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _OBJTOOL_CFI_REGS_H
+#define _OBJTOOL_CFI_REGS_H
+
+#define CFI_AX 0
+#define CFI_CX 1
+#define CFI_DX 2
+#define CFI_BX 3
+#define CFI_SP 4
+#define CFI_BP 5
+#define CFI_SI 6
+#define CFI_DI 7
+#define CFI_R8 8
+#define CFI_R9 9
+#define CFI_R10 10
+#define CFI_R11 11
+#define CFI_R12 12
+#define CFI_R13 13
+#define CFI_R14 14
+#define CFI_R15 15
+#define CFI_RA 16
+#define CFI_NUM_REGS 17
+
+#endif /* _OBJTOOL_CFI_REGS_H */
diff --git a/tools/objtool/arch/x86/include/arch/elf.h b/tools/objtool/arch/x86/include/arch/elf.h
new file mode 100644
index 000000000000..ac14987cf687
--- /dev/null
+++ b/tools/objtool/arch/x86/include/arch/elf.h
@@ -0,0 +1,8 @@
+#ifndef _OBJTOOL_ARCH_ELF
+#define _OBJTOOL_ARCH_ELF
+
+#define R_NONE R_X86_64_NONE
+#define R_ABS64 R_X86_64_64
+#define R_ABS32 R_X86_64_32
+
+#endif /* _OBJTOOL_ARCH_ELF */
diff --git a/tools/objtool/arch/x86/include/arch/special.h b/tools/objtool/arch/x86/include/arch/special.h
new file mode 100644
index 000000000000..ca8131352994
--- /dev/null
+++ b/tools/objtool/arch/x86/include/arch/special.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _X86_ARCH_SPECIAL_H
+#define _X86_ARCH_SPECIAL_H
+
+#define EX_ENTRY_SIZE 12
+#define EX_ORIG_OFFSET 0
+#define EX_NEW_OFFSET 4
+
+#define JUMP_ENTRY_SIZE 16
+#define JUMP_ORIG_OFFSET 0
+#define JUMP_NEW_OFFSET 4
+#define JUMP_KEY_OFFSET 8
+
+#define ALT_ENTRY_SIZE 14
+#define ALT_ORIG_OFFSET 0
+#define ALT_NEW_OFFSET 4
+#define ALT_FEATURE_OFFSET 8
+#define ALT_ORIG_LEN_OFFSET 12
+#define ALT_NEW_LEN_OFFSET 13
+
+#endif /* _X86_ARCH_SPECIAL_H */
diff --git a/tools/objtool/arch/x86/special.c b/tools/objtool/arch/x86/special.c
new file mode 100644
index 000000000000..7c97b7391279
--- /dev/null
+++ b/tools/objtool/arch/x86/special.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <string.h>
+
+#include <objtool/special.h>
+#include <objtool/builtin.h>
+
+#define X86_FEATURE_POPCNT (4 * 32 + 23)
+#define X86_FEATURE_SMAP (9 * 32 + 20)
+
+void arch_handle_alternative(unsigned short feature, struct special_alt *alt)
+{
+ switch (feature) {
+ case X86_FEATURE_SMAP:
+ /*
+ * If UACCESS validation is enabled; force that alternative;
+ * otherwise force it the other way.
+ *
+ * What we want to avoid is having both the original and the
+ * alternative code flow at the same time, in that case we can
+ * find paths that see the STAC but take the NOP instead of
+ * CLAC and the other way around.
+ */
+ if (opts.uaccess)
+ alt->skip_orig = true;
+ else
+ alt->skip_alt = true;
+ break;
+ case X86_FEATURE_POPCNT:
+ /*
+ * It has been requested that we don't validate the !POPCNT
+ * feature path which is a "very very small percentage of
+ * machines".
+ */
+ alt->skip_orig = true;
+ break;
+ default:
+ break;
+ }
+}
+
+bool arch_support_alt_relocation(struct special_alt *special_alt,
+ struct instruction *insn,
+ struct reloc *reloc)
+{
+ /*
+ * The x86 alternatives code adjusts the offsets only when it
+ * encounters a branch instruction at the very beginning of the
+ * replacement group.
+ */
+ return insn->offset == special_alt->new_off &&
+ (insn->type == INSN_CALL || is_jump(insn));
+}
+
+/*
+ * There are 3 basic jump table patterns:
+ *
+ * 1. jmpq *[rodata addr](,%reg,8)
+ *
+ * This is the most common case by far. It jumps to an address in a simple
+ * jump table which is stored in .rodata.
+ *
+ * 2. jmpq *[rodata addr](%rip)
+ *
+ * This is caused by a rare GCC quirk, currently only seen in three driver
+ * functions in the kernel, only with certain obscure non-distro configs.
+ *
+ * As part of an optimization, GCC makes a copy of an existing switch jump
+ * table, modifies it, and then hard-codes the jump (albeit with an indirect
+ * jump) to use a single entry in the table. The rest of the jump table and
+ * some of its jump targets remain as dead code.
+ *
+ * In such a case we can just crudely ignore all unreachable instruction
+ * warnings for the entire object file. Ideally we would just ignore them
+ * for the function, but that would require redesigning the code quite a
+ * bit. And honestly that's just not worth doing: unreachable instruction
+ * warnings are of questionable value anyway, and this is such a rare issue.
+ *
+ * 3. mov [rodata addr],%reg1
+ * ... some instructions ...
+ * jmpq *(%reg1,%reg2,8)
+ *
+ * This is a fairly uncommon pattern which is new for GCC 6. As of this
+ * writing, there are 11 occurrences of it in the allmodconfig kernel.
+ *
+ * As of GCC 7 there are quite a few more of these and the 'in between' code
+ * is significant. Esp. with KASAN enabled some of the code between the mov
+ * and jmpq uses .rodata itself, which can confuse things.
+ *
+ * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
+ * ensure the same register is used in the mov and jump instructions.
+ *
+ * NOTE: RETPOLINE made it harder still to decode dynamic jumps.
+ */
+struct reloc *arch_find_switch_table(struct objtool_file *file,
+ struct instruction *insn)
+{
+ struct reloc *text_reloc, *rodata_reloc;
+ struct section *table_sec;
+ unsigned long table_offset;
+
+ /* look for a relocation which references .rodata */
+ text_reloc = find_reloc_by_dest_range(file->elf, insn->sec,
+ insn->offset, insn->len);
+ if (!text_reloc || text_reloc->sym->type != STT_SECTION ||
+ !text_reloc->sym->sec->rodata)
+ return NULL;
+
+ table_offset = text_reloc->addend;
+ table_sec = text_reloc->sym->sec;
+
+ if (text_reloc->type == R_X86_64_PC32)
+ table_offset += 4;
+
+ /*
+ * Make sure the .rodata address isn't associated with a
+ * symbol. GCC jump tables are anonymous data.
+ *
+ * Also support C jump tables which are in the same format as
+ * switch jump tables. For objtool to recognize them, they
+ * need to be placed in the C_JUMP_TABLE_SECTION section. They
+ * have symbols associated with them.
+ */
+ if (find_symbol_containing(table_sec, table_offset) &&
+ strcmp(table_sec->name, C_JUMP_TABLE_SECTION))
+ return NULL;
+
+ /*
+ * Each table entry has a rela associated with it. The rela
+ * should reference text in the same function as the original
+ * instruction.
+ */
+ rodata_reloc = find_reloc_by_dest(file->elf, table_sec, table_offset);
+ if (!rodata_reloc)
+ return NULL;
+
+ /*
+ * Use of RIP-relative switch jumps is quite rare, and
+ * indicates a rare GCC quirk/bug which can leave dead
+ * code behind.
+ */
+ if (text_reloc->type == R_X86_64_PC32)
+ file->ignore_unreachables = true;
+
+ return rodata_reloc;
+}
diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
index 10fbe75ab43d..7c175198d09f 100644
--- a/tools/objtool/builtin-check.c
+++ b/tools/objtool/builtin-check.c
@@ -3,48 +3,231 @@
* Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com>
*/
-/*
- * objtool check:
- *
- * This command analyzes every .o file and ensures the validity of its stack
- * trace metadata. It enforces a set of rules on asm code and C inline
- * assembly code so that stack traces can be reliable.
- *
- * For more information, see tools/objtool/Documentation/stack-validation.txt.
- */
-
#include <subcmd/parse-options.h>
-#include "builtin.h"
-#include "check.h"
+#include <string.h>
+#include <stdlib.h>
+#include <objtool/builtin.h>
+#include <objtool/objtool.h>
+
+#define ERROR(format, ...) \
+ fprintf(stderr, \
+ "error: objtool: " format "\n", \
+ ##__VA_ARGS__)
-bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats;
+struct opts opts;
static const char * const check_usage[] = {
- "objtool check [<options>] file.o",
+ "objtool <actions> [<options>] file.o",
+ NULL,
+};
+
+static const char * const env_usage[] = {
+ "OBJTOOL_ARGS=\"<options>\"",
NULL,
};
-const struct option check_options[] = {
- OPT_BOOLEAN('f', "no-fp", &no_fp, "Skip frame pointer validation"),
- OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"),
- OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"),
- OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"),
- OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"),
- OPT_BOOLEAN('a', "uaccess", &uaccess, "enable uaccess checking"),
- OPT_BOOLEAN('s', "stats", &stats, "print statistics"),
+static int parse_dump(const struct option *opt, const char *str, int unset)
+{
+ if (!str || !strcmp(str, "orc")) {
+ opts.dump_orc = true;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int parse_hacks(const struct option *opt, const char *str, int unset)
+{
+ bool found = false;
+
+ /*
+ * Use strstr() as a lazy method of checking for comma-separated
+ * options.
+ *
+ * No string provided == enable all options.
+ */
+
+ if (!str || strstr(str, "jump_label")) {
+ opts.hack_jump_label = true;
+ found = true;
+ }
+
+ if (!str || strstr(str, "noinstr")) {
+ opts.hack_noinstr = true;
+ found = true;
+ }
+
+ if (!str || strstr(str, "skylake")) {
+ opts.hack_skylake = true;
+ found = true;
+ }
+
+ return found ? 0 : -1;
+}
+
+static const struct option check_options[] = {
+ OPT_GROUP("Actions:"),
+ OPT_CALLBACK_OPTARG('h', "hacks", NULL, NULL, "jump_label,noinstr,skylake", "patch toolchain bugs/limitations", parse_hacks),
+ OPT_BOOLEAN('i', "ibt", &opts.ibt, "validate and annotate IBT"),
+ OPT_BOOLEAN('m', "mcount", &opts.mcount, "annotate mcount/fentry calls for ftrace"),
+ OPT_BOOLEAN('n', "noinstr", &opts.noinstr, "validate noinstr rules"),
+ OPT_BOOLEAN('o', "orc", &opts.orc, "generate ORC metadata"),
+ OPT_BOOLEAN('r', "retpoline", &opts.retpoline, "validate and annotate retpoline usage"),
+ OPT_BOOLEAN(0, "rethunk", &opts.rethunk, "validate and annotate rethunk usage"),
+ OPT_BOOLEAN(0, "unret", &opts.unret, "validate entry unret placement"),
+ OPT_INTEGER(0, "prefix", &opts.prefix, "generate prefix symbols"),
+ OPT_BOOLEAN('l', "sls", &opts.sls, "validate straight-line-speculation mitigations"),
+ OPT_BOOLEAN('s', "stackval", &opts.stackval, "validate frame pointer rules"),
+ OPT_BOOLEAN('t', "static-call", &opts.static_call, "annotate static calls"),
+ OPT_BOOLEAN('u', "uaccess", &opts.uaccess, "validate uaccess rules for SMAP"),
+ OPT_BOOLEAN(0 , "cfi", &opts.cfi, "annotate kernel control flow integrity (kCFI) function preambles"),
+ OPT_CALLBACK_OPTARG(0, "dump", NULL, NULL, "orc", "dump metadata", parse_dump),
+
+ OPT_GROUP("Options:"),
+ OPT_BOOLEAN(0, "backtrace", &opts.backtrace, "unwind on error"),
+ OPT_BOOLEAN(0, "backup", &opts.backup, "create .orig files before modification"),
+ OPT_BOOLEAN(0, "dry-run", &opts.dryrun, "don't write modifications"),
+ OPT_BOOLEAN(0, "link", &opts.link, "object is a linked object"),
+ OPT_BOOLEAN(0, "module", &opts.module, "object is part of a kernel module"),
+ OPT_BOOLEAN(0, "mnop", &opts.mnop, "nop out mcount call sites"),
+ OPT_BOOLEAN(0, "no-unreachable", &opts.no_unreachable, "skip 'unreachable instruction' warnings"),
+ OPT_BOOLEAN(0, "sec-address", &opts.sec_address, "print section addresses in warnings"),
+ OPT_BOOLEAN(0, "stats", &opts.stats, "print statistics"),
+
OPT_END(),
};
-int cmd_check(int argc, const char **argv)
+int cmd_parse_options(int argc, const char **argv, const char * const usage[])
{
- const char *objname;
+ const char *envv[16] = { };
+ char *env;
+ int envc;
+
+ env = getenv("OBJTOOL_ARGS");
+ if (env) {
+ envv[0] = "OBJTOOL_ARGS";
+ for (envc = 1; envc < ARRAY_SIZE(envv); ) {
+ envv[envc++] = env;
+ env = strchr(env, ' ');
+ if (!env)
+ break;
+ *env = '\0';
+ env++;
+ }
- argc = parse_options(argc, argv, check_options, check_usage, 0);
+ parse_options(envc, envv, check_options, env_usage, 0);
+ }
+ argc = parse_options(argc, argv, check_options, usage, 0);
if (argc != 1)
- usage_with_options(check_usage, check_options);
+ usage_with_options(usage, check_options);
+ return argc;
+}
+
+static bool opts_valid(void)
+{
+ if (opts.hack_jump_label ||
+ opts.hack_noinstr ||
+ opts.ibt ||
+ opts.mcount ||
+ opts.noinstr ||
+ opts.orc ||
+ opts.retpoline ||
+ opts.rethunk ||
+ opts.sls ||
+ opts.stackval ||
+ opts.static_call ||
+ opts.uaccess) {
+ if (opts.dump_orc) {
+ ERROR("--dump can't be combined with other options");
+ return false;
+ }
+
+ return true;
+ }
+ if (opts.unret && !opts.rethunk) {
+ ERROR("--unret requires --rethunk");
+ return false;
+ }
+
+ if (opts.dump_orc)
+ return true;
+
+ ERROR("At least one command required");
+ return false;
+}
+
+static bool mnop_opts_valid(void)
+{
+ if (opts.mnop && !opts.mcount) {
+ ERROR("--mnop requires --mcount");
+ return false;
+ }
+
+ return true;
+}
+
+static bool link_opts_valid(struct objtool_file *file)
+{
+ if (opts.link)
+ return true;
+
+ if (has_multiple_files(file->elf)) {
+ ERROR("Linked object detected, forcing --link");
+ opts.link = true;
+ return true;
+ }
+
+ if (opts.noinstr) {
+ ERROR("--noinstr requires --link");
+ return false;
+ }
+
+ if (opts.ibt) {
+ ERROR("--ibt requires --link");
+ return false;
+ }
+
+ if (opts.unret) {
+ ERROR("--unret requires --link");
+ return false;
+ }
+
+ return true;
+}
+
+int objtool_run(int argc, const char **argv)
+{
+ const char *objname;
+ struct objtool_file *file;
+ int ret;
+
+ argc = cmd_parse_options(argc, argv, check_usage);
objname = argv[0];
- return check(objname, false);
+ if (!opts_valid())
+ return 1;
+
+ if (opts.dump_orc)
+ return orc_dump(objname);
+
+ file = objtool_open_read(objname);
+ if (!file)
+ return 1;
+
+ if (!mnop_opts_valid())
+ return 1;
+
+ if (!link_opts_valid(file))
+ return 1;
+
+ ret = check(file);
+ if (ret)
+ return ret;
+
+ if (file->elf->changed)
+ return elf_write(file->elf);
+
+ return 0;
}
diff --git a/tools/objtool/builtin-orc.c b/tools/objtool/builtin-orc.c
deleted file mode 100644
index 5f7cc6157edd..000000000000
--- a/tools/objtool/builtin-orc.c
+++ /dev/null
@@ -1,56 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- */
-
-/*
- * objtool orc:
- *
- * This command analyzes a .o file and adds .orc_unwind and .orc_unwind_ip
- * sections to it, which is used by the in-kernel ORC unwinder.
- *
- * This command is a superset of "objtool check".
- */
-
-#include <string.h>
-#include "builtin.h"
-#include "check.h"
-
-
-static const char *orc_usage[] = {
- "objtool orc generate [<options>] file.o",
- "objtool orc dump file.o",
- NULL,
-};
-
-int cmd_orc(int argc, const char **argv)
-{
- const char *objname;
-
- argc--; argv++;
- if (argc <= 0)
- usage_with_options(orc_usage, check_options);
-
- if (!strncmp(argv[0], "gen", 3)) {
- argc = parse_options(argc, argv, check_options, orc_usage, 0);
- if (argc != 1)
- usage_with_options(orc_usage, check_options);
-
- objname = argv[0];
-
- return check(objname, true);
- }
-
- if (!strcmp(argv[0], "dump")) {
- if (argc != 2)
- usage_with_options(orc_usage, check_options);
-
- objname = argv[1];
-
- return orc_dump(objname);
- }
-
- usage_with_options(orc_usage, check_options);
-
- return 0;
-}
diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h
deleted file mode 100644
index 0b907902ee79..000000000000
--- a/tools/objtool/builtin.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- */
-#ifndef _BUILTIN_H
-#define _BUILTIN_H
-
-#include <subcmd/parse-options.h>
-
-extern const struct option check_options[];
-extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess, stats;
-
-extern int cmd_check(int argc, const char **argv);
-extern int cmd_orc(int argc, const char **argv);
-
-#endif /* _BUILTIN_H */
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index a22272c819f3..0fcf99c91400 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -5,63 +5,71 @@
#include <string.h>
#include <stdlib.h>
-
-#include "builtin.h"
-#include "check.h"
-#include "elf.h"
-#include "special.h"
-#include "arch.h"
-#include "warn.h"
-
+#include <inttypes.h>
+#include <sys/mman.h>
+
+#include <arch/elf.h>
+#include <objtool/builtin.h>
+#include <objtool/cfi.h>
+#include <objtool/arch.h>
+#include <objtool/check.h>
+#include <objtool/special.h>
+#include <objtool/warn.h>
+#include <objtool/endianness.h>
+
+#include <linux/objtool_types.h>
#include <linux/hashtable.h>
#include <linux/kernel.h>
-
-#define FAKE_JUMP_OFFSET -1
-
-#define C_JUMP_TABLE_SECTION ".rodata..c_jump_table"
+#include <linux/static_call_types.h>
struct alternative {
- struct list_head list;
+ struct alternative *next;
struct instruction *insn;
bool skip_orig;
};
-const char *objname;
-struct cfi_state initial_func_cfi;
+static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache;
+
+static struct cfi_init_state initial_func_cfi;
+static struct cfi_state init_cfi;
+static struct cfi_state func_cfi;
struct instruction *find_insn(struct objtool_file *file,
struct section *sec, unsigned long offset)
{
struct instruction *insn;
- hash_for_each_possible(file->insn_hash, insn, hash, offset)
+ hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) {
if (insn->sec == sec && insn->offset == offset)
return insn;
+ }
return NULL;
}
-static struct instruction *next_insn_same_sec(struct objtool_file *file,
- struct instruction *insn)
+struct instruction *next_insn_same_sec(struct objtool_file *file,
+ struct instruction *insn)
{
- struct instruction *next = list_next_entry(insn, list);
+ if (insn->idx == INSN_CHUNK_MAX)
+ return find_insn(file, insn->sec, insn->offset + insn->len);
- if (!next || &next->list == &file->insn_list || next->sec != insn->sec)
+ insn++;
+ if (!insn->len)
return NULL;
- return next;
+ return insn;
}
static struct instruction *next_insn_same_func(struct objtool_file *file,
struct instruction *insn)
{
- struct instruction *next = list_next_entry(insn, list);
- struct symbol *func = insn->func;
+ struct instruction *next = next_insn_same_sec(file, insn);
+ struct symbol *func = insn_func(insn);
if (!func)
return NULL;
- if (&next->list != &file->insn_list && next->func == func)
+ if (next && insn_func(next) == func)
return next;
/* Check if we're already in the subfunction: */
@@ -72,6 +80,35 @@ static struct instruction *next_insn_same_func(struct objtool_file *file,
return find_insn(file, func->cfunc->sec, func->cfunc->offset);
}
+static struct instruction *prev_insn_same_sec(struct objtool_file *file,
+ struct instruction *insn)
+{
+ if (insn->idx == 0) {
+ if (insn->prev_len)
+ return find_insn(file, insn->sec, insn->offset - insn->prev_len);
+ return NULL;
+ }
+
+ return insn - 1;
+}
+
+static struct instruction *prev_insn_same_sym(struct objtool_file *file,
+ struct instruction *insn)
+{
+ struct instruction *prev = prev_insn_same_sec(file, insn);
+
+ if (prev && insn_func(prev) == insn_func(insn))
+ return prev;
+
+ return NULL;
+}
+
+#define for_each_insn(file, insn) \
+ for (struct section *__sec, *__fake = (struct section *)1; \
+ __fake; __fake = NULL) \
+ for_each_sec(file, __sec) \
+ sec_for_each_insn(file, __sec, insn)
+
#define func_for_each_insn(file, func, insn) \
for (insn = find_insn(file, func->sec, func->offset); \
insn; \
@@ -79,16 +116,13 @@ static struct instruction *next_insn_same_func(struct objtool_file *file,
#define sym_for_each_insn(file, sym, insn) \
for (insn = find_insn(file, sym->sec, sym->offset); \
- insn && &insn->list != &file->insn_list && \
- insn->sec == sym->sec && \
- insn->offset < sym->offset + sym->len; \
- insn = list_next_entry(insn, list))
+ insn && insn->offset < sym->offset + sym->len; \
+ insn = next_insn_same_sec(file, insn))
#define sym_for_each_insn_continue_reverse(file, sym, insn) \
- for (insn = list_prev_entry(insn, list); \
- &insn->list != &file->insn_list && \
- insn->sec == sym->sec && insn->offset >= sym->offset; \
- insn = list_prev_entry(insn, list))
+ for (insn = prev_insn_same_sec(file, insn); \
+ insn && insn->offset >= sym->offset; \
+ insn = prev_insn_same_sec(file, insn))
#define sec_for_each_insn_from(file, insn) \
for (; insn; insn = next_insn_same_sec(file, insn))
@@ -97,23 +131,49 @@ static struct instruction *next_insn_same_func(struct objtool_file *file,
for (insn = next_insn_same_sec(file, insn); insn; \
insn = next_insn_same_sec(file, insn))
-static bool is_static_jump(struct instruction *insn)
+static inline struct symbol *insn_call_dest(struct instruction *insn)
{
- return insn->type == INSN_JUMP_CONDITIONAL ||
- insn->type == INSN_JUMP_UNCONDITIONAL;
+ if (insn->type == INSN_JUMP_DYNAMIC ||
+ insn->type == INSN_CALL_DYNAMIC)
+ return NULL;
+
+ return insn->_call_dest;
}
-static bool is_sibling_call(struct instruction *insn)
+static inline struct reloc *insn_jump_table(struct instruction *insn)
{
- /* An indirect jump is either a sibling call or a jump to a table. */
- if (insn->type == INSN_JUMP_DYNAMIC)
- return list_empty(&insn->alts);
+ if (insn->type == INSN_JUMP_DYNAMIC ||
+ insn->type == INSN_CALL_DYNAMIC)
+ return insn->_jump_table;
- if (!is_static_jump(insn))
- return false;
+ return NULL;
+}
+
+static bool is_jump_table_jump(struct instruction *insn)
+{
+ struct alt_group *alt_group = insn->alt_group;
+
+ if (insn_jump_table(insn))
+ return true;
- /* add_jump_destinations() sets insn->call_dest for sibling calls. */
- return !!insn->call_dest;
+ /* Retpoline alternative for a jump table? */
+ return alt_group && alt_group->orig_group &&
+ insn_jump_table(alt_group->orig_group->first_insn);
+}
+
+static bool is_sibling_call(struct instruction *insn)
+{
+ /*
+ * Assume only STT_FUNC calls have jump-tables.
+ */
+ if (insn_func(insn)) {
+ /* An indirect jump is either a sibling call or a jump to a table. */
+ if (insn->type == INSN_JUMP_DYNAMIC)
+ return !is_jump_table_jump(insn);
+ }
+
+ /* add_jump_destinations() sets insn_call_dest(insn) for sibling calls. */
+ return (is_static_jump(insn) && insn_call_dest(insn));
}
/*
@@ -134,40 +194,66 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
/*
* Unfortunately these have to be hard coded because the noreturn
- * attribute isn't provided in ELF data.
+ * attribute isn't provided in ELF data. Keep 'em sorted.
*/
static const char * const global_noreturns[] = {
+ "__invalid_creds",
+ "__module_put_and_kthread_exit",
+ "__reiserfs_panic",
"__stack_chk_fail",
- "panic",
+ "__ubsan_handle_builtin_unreachable",
+ "arch_call_rest_init",
+ "arch_cpu_idle_dead",
+ "btrfs_assertfail",
+ "cpu_bringup_and_idle",
+ "cpu_startup_entry",
"do_exit",
+ "do_group_exit",
"do_task_dead",
- "__module_put_and_exit",
- "complete_and_exit",
- "__reiserfs_panic",
- "lbug_with_loc",
+ "ex_handler_msr_mce",
"fortify_panic",
- "usercopy_abort",
- "machine_real_restart",
- "rewind_stack_do_exit",
+ "hlt_play_dead",
+ "hv_ghcb_terminate",
+ "kthread_complete_and_exit",
+ "kthread_exit",
"kunit_try_catch_throw",
+ "lbug_with_loc",
+ "machine_real_restart",
+ "make_task_dead",
+ "mpt_halt_firmware",
+ "nmi_panic_self_stop",
+ "panic",
+ "panic_smp_self_stop",
+ "rest_init",
+ "resume_play_dead",
+ "rewind_stack_and_make_dead",
+ "sev_es_terminate",
+ "snp_abort",
+ "start_kernel",
+ "stop_this_cpu",
+ "usercopy_abort",
+ "x86_64_start_kernel",
+ "x86_64_start_reservations",
+ "xen_cpu_bringup_again",
+ "xen_start_kernel",
};
if (!func)
return false;
- if (func->bind == STB_WEAK)
- return false;
-
- if (func->bind == STB_GLOBAL)
+ if (func->bind == STB_GLOBAL || func->bind == STB_WEAK)
for (i = 0; i < ARRAY_SIZE(global_noreturns); i++)
if (!strcmp(func->name, global_noreturns[i]))
return true;
+ if (func->bind == STB_WEAK)
+ return false;
+
if (!func->len)
return false;
insn = find_insn(file, func->sec, func->offset);
- if (!insn->func)
+ if (!insn || !insn_func(insn))
return false;
func_for_each_insn(file, func, insn) {
@@ -203,7 +289,7 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func,
return false;
}
- return __dead_end_function(file, dest->func, recursion+1);
+ return __dead_end_function(file, insn_func(dest), recursion+1);
}
}
@@ -215,20 +301,106 @@ static bool dead_end_function(struct objtool_file *file, struct symbol *func)
return __dead_end_function(file, func, 0);
}
-static void clear_insn_state(struct insn_state *state)
+static void init_cfi_state(struct cfi_state *cfi)
{
int i;
- memset(state, 0, sizeof(*state));
- state->cfa.base = CFI_UNDEFINED;
for (i = 0; i < CFI_NUM_REGS; i++) {
- state->regs[i].base = CFI_UNDEFINED;
- state->vals[i].base = CFI_UNDEFINED;
+ cfi->regs[i].base = CFI_UNDEFINED;
+ cfi->vals[i].base = CFI_UNDEFINED;
}
- state->drap_reg = CFI_UNDEFINED;
- state->drap_offset = -1;
+ cfi->cfa.base = CFI_UNDEFINED;
+ cfi->drap_reg = CFI_UNDEFINED;
+ cfi->drap_offset = -1;
+}
+
+static void init_insn_state(struct objtool_file *file, struct insn_state *state,
+ struct section *sec)
+{
+ memset(state, 0, sizeof(*state));
+ init_cfi_state(&state->cfi);
+
+ /*
+ * We need the full vmlinux for noinstr validation, otherwise we can
+ * not correctly determine insn_call_dest(insn)->sec (external symbols
+ * do not have a section).
+ */
+ if (opts.link && opts.noinstr && sec)
+ state->noinstr = sec->noinstr;
}
+static struct cfi_state *cfi_alloc(void)
+{
+ struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1);
+ if (!cfi) {
+ WARN("calloc failed");
+ exit(1);
+ }
+ nr_cfi++;
+ return cfi;
+}
+
+static int cfi_bits;
+static struct hlist_head *cfi_hash;
+
+static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2)
+{
+ return memcmp((void *)cfi1 + sizeof(cfi1->hash),
+ (void *)cfi2 + sizeof(cfi2->hash),
+ sizeof(struct cfi_state) - sizeof(struct hlist_node));
+}
+
+static inline u32 cfi_key(struct cfi_state *cfi)
+{
+ return jhash((void *)cfi + sizeof(cfi->hash),
+ sizeof(*cfi) - sizeof(cfi->hash), 0);
+}
+
+static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi)
+{
+ struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
+ struct cfi_state *obj;
+
+ hlist_for_each_entry(obj, head, hash) {
+ if (!cficmp(cfi, obj)) {
+ nr_cfi_cache++;
+ return obj;
+ }
+ }
+
+ obj = cfi_alloc();
+ *obj = *cfi;
+ hlist_add_head(&obj->hash, head);
+
+ return obj;
+}
+
+static void cfi_hash_add(struct cfi_state *cfi)
+{
+ struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)];
+
+ hlist_add_head(&cfi->hash, head);
+}
+
+static void *cfi_hash_alloc(unsigned long size)
+{
+ cfi_bits = max(10, ilog2(size));
+ cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits,
+ PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANON, -1, 0);
+ if (cfi_hash == (void *)-1L) {
+ WARN("mmap fail cfi_hash");
+ cfi_hash = NULL;
+ } else if (opts.stats) {
+ printf("cfi_bits: %d\n", cfi_bits);
+ }
+
+ return cfi_hash;
+}
+
+static unsigned long nr_insns;
+static unsigned long nr_insns_visited;
+
/*
* Call the arch-specific instruction decoder for all the instructions and add
* them to the global instruction list.
@@ -239,10 +411,12 @@ static int decode_instructions(struct objtool_file *file)
struct symbol *func;
unsigned long offset;
struct instruction *insn;
- unsigned long nr_insns = 0;
int ret;
for_each_sec(file, sec) {
+ struct instruction *insns = NULL;
+ u8 prev_len = 0;
+ u8 idx = 0;
if (!(sec->sh.sh_flags & SHF_EXECINSTR))
continue;
@@ -252,34 +426,76 @@ static int decode_instructions(struct objtool_file *file)
strncmp(sec->name, ".discard.", 9))
sec->text = true;
- for (offset = 0; offset < sec->len; offset += insn->len) {
- insn = malloc(sizeof(*insn));
- if (!insn) {
- WARN("malloc failed");
- return -1;
+ if (!strcmp(sec->name, ".noinstr.text") ||
+ !strcmp(sec->name, ".entry.text") ||
+ !strcmp(sec->name, ".cpuidle.text") ||
+ !strncmp(sec->name, ".text.__x86.", 12))
+ sec->noinstr = true;
+
+ /*
+ * .init.text code is ran before userspace and thus doesn't
+ * strictly need retpolines, except for modules which are
+ * loaded late, they very much do need retpoline in their
+ * .init.text
+ */
+ if (!strcmp(sec->name, ".init.text") && !opts.module)
+ sec->init = true;
+
+ for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) {
+ if (!insns || idx == INSN_CHUNK_MAX) {
+ insns = calloc(sizeof(*insn), INSN_CHUNK_SIZE);
+ if (!insns) {
+ WARN("malloc failed");
+ return -1;
+ }
+ idx = 0;
+ } else {
+ idx++;
}
- memset(insn, 0, sizeof(*insn));
- INIT_LIST_HEAD(&insn->alts);
- clear_insn_state(&insn->state);
+ insn = &insns[idx];
+ insn->idx = idx;
+ INIT_LIST_HEAD(&insn->call_node);
insn->sec = sec;
insn->offset = offset;
+ insn->prev_len = prev_len;
- ret = arch_decode_instruction(file->elf, sec, offset,
- sec->len - offset,
- &insn->len, &insn->type,
- &insn->immediate,
- &insn->stack_op);
+ ret = arch_decode_instruction(file, sec, offset,
+ sec->sh.sh_size - offset,
+ insn);
if (ret)
- goto err;
+ return ret;
+
+ prev_len = insn->len;
- hash_add(file->insn_hash, &insn->hash, insn->offset);
- list_add_tail(&insn->list, &file->insn_list);
+ /*
+ * By default, "ud2" is a dead end unless otherwise
+ * annotated, because GCC 7 inserts it for certain
+ * divide-by-zero cases.
+ */
+ if (insn->type == INSN_BUG)
+ insn->dead_end = true;
+
+ hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset));
nr_insns++;
}
- list_for_each_entry(func, &sec->symbol_list, list) {
- if (func->type != STT_FUNC || func->alias != func)
+// printf("%s: last chunk used: %d\n", sec->name, (int)idx);
+
+ sec_for_each_sym(sec, func) {
+ if (func->type != STT_NOTYPE && func->type != STT_FUNC)
+ continue;
+
+ if (func->offset == sec->sh.sh_size) {
+ /* Heuristic: likely an "end" symbol */
+ if (func->type == STT_NOTYPE)
+ continue;
+ WARN("%s(): STT_FUNC at end of section",
+ func->name);
+ return -1;
+ }
+
+ if (func->return_thunk || func->alias != func)
continue;
if (!find_insn(file, sec, func->offset)) {
@@ -288,19 +504,115 @@ static int decode_instructions(struct objtool_file *file)
return -1;
}
- sym_for_each_insn(file, func, insn)
- insn->func = func;
+ sym_for_each_insn(file, func, insn) {
+ insn->sym = func;
+ if (func->type == STT_FUNC &&
+ insn->type == INSN_ENDBR &&
+ list_empty(&insn->call_node)) {
+ if (insn->offset == func->offset) {
+ list_add_tail(&insn->call_node, &file->endbr_list);
+ file->nr_endbr++;
+ } else {
+ file->nr_endbr_int++;
+ }
+ }
+ }
}
}
- if (stats)
+ if (opts.stats)
printf("nr_insns: %lu\n", nr_insns);
return 0;
+}
-err:
- free(insn);
- return ret;
+/*
+ * Read the pv_ops[] .data table to find the static initialized values.
+ */
+static int add_pv_ops(struct objtool_file *file, const char *symname)
+{
+ struct symbol *sym, *func;
+ unsigned long off, end;
+ struct reloc *rel;
+ int idx;
+
+ sym = find_symbol_by_name(file->elf, symname);
+ if (!sym)
+ return 0;
+
+ off = sym->offset;
+ end = off + sym->len;
+ for (;;) {
+ rel = find_reloc_by_dest_range(file->elf, sym->sec, off, end - off);
+ if (!rel)
+ break;
+
+ func = rel->sym;
+ if (func->type == STT_SECTION)
+ func = find_symbol_by_offset(rel->sym->sec, rel->addend);
+
+ idx = (rel->offset - sym->offset) / sizeof(unsigned long);
+
+ objtool_pv_add(file, idx, func);
+
+ off = rel->offset + 1;
+ if (off > end)
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Allocate and initialize file->pv_ops[].
+ */
+static int init_pv_ops(struct objtool_file *file)
+{
+ static const char *pv_ops_tables[] = {
+ "pv_ops",
+ "xen_cpu_ops",
+ "xen_irq_ops",
+ "xen_mmu_ops",
+ NULL,
+ };
+ const char *pv_ops;
+ struct symbol *sym;
+ int idx, nr;
+
+ if (!opts.noinstr)
+ return 0;
+
+ file->pv_ops = NULL;
+
+ sym = find_symbol_by_name(file->elf, "pv_ops");
+ if (!sym)
+ return 0;
+
+ nr = sym->len / sizeof(unsigned long);
+ file->pv_ops = calloc(sizeof(struct pv_state), nr);
+ if (!file->pv_ops)
+ return -1;
+
+ for (idx = 0; idx < nr; idx++)
+ INIT_LIST_HEAD(&file->pv_ops[idx].targets);
+
+ for (idx = 0; (pv_ops = pv_ops_tables[idx]); idx++)
+ add_pv_ops(file, pv_ops);
+
+ return 0;
+}
+
+static struct instruction *find_last_insn(struct objtool_file *file,
+ struct section *sec)
+{
+ struct instruction *insn = NULL;
+ unsigned int offset;
+ unsigned int end = (sec->sh.sh_size > 10) ? sec->sh.sh_size - 10 : 0;
+
+ for (offset = sec->sh.sh_size - 1; offset >= end && !insn; offset--)
+ insn = find_insn(file, sec, offset);
+
+ return insn;
}
/*
@@ -309,17 +621,8 @@ err:
static int add_dead_ends(struct objtool_file *file)
{
struct section *sec;
- struct rela *rela;
+ struct reloc *reloc;
struct instruction *insn;
- bool found;
-
- /*
- * By default, "ud2" is a dead end unless otherwise annotated, because
- * GCC 7 inserts it for certain divide-by-zero cases.
- */
- for_each_insn(file, insn)
- if (insn->type == INSN_BUG)
- insn->dead_end = true;
/*
* Check for manually annotated dead ends.
@@ -328,31 +631,24 @@ static int add_dead_ends(struct objtool_file *file)
if (!sec)
goto reachable;
- list_for_each_entry(rela, &sec->rela_list, list) {
- if (rela->sym->type != STT_SECTION) {
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", sec->name);
return -1;
}
- insn = find_insn(file, rela->sym->sec, rela->addend);
+ insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (insn)
- insn = list_prev_entry(insn, list);
- else if (rela->addend == rela->sym->sec->len) {
- found = false;
- list_for_each_entry_reverse(insn, &file->insn_list, list) {
- if (insn->sec == rela->sym->sec) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- WARN("can't find unreachable insn at %s+0x%x",
- rela->sym->sec->name, rela->addend);
+ insn = prev_insn_same_sec(file, insn);
+ else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
+ insn = find_last_insn(file, reloc->sym->sec);
+ if (!insn) {
+ WARN("can't find unreachable insn at %s+0x%" PRIx64,
+ reloc->sym->sec->name, reloc->addend);
return -1;
}
} else {
- WARN("can't find unreachable insn at %s+0x%x",
- rela->sym->sec->name, rela->addend);
+ WARN("can't find unreachable insn at %s+0x%" PRIx64,
+ reloc->sym->sec->name, reloc->addend);
return -1;
}
@@ -370,31 +666,24 @@ reachable:
if (!sec)
return 0;
- list_for_each_entry(rela, &sec->rela_list, list) {
- if (rela->sym->type != STT_SECTION) {
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", sec->name);
return -1;
}
- insn = find_insn(file, rela->sym->sec, rela->addend);
+ insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (insn)
- insn = list_prev_entry(insn, list);
- else if (rela->addend == rela->sym->sec->len) {
- found = false;
- list_for_each_entry_reverse(insn, &file->insn_list, list) {
- if (insn->sec == rela->sym->sec) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- WARN("can't find reachable insn at %s+0x%x",
- rela->sym->sec->name, rela->addend);
+ insn = prev_insn_same_sec(file, insn);
+ else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
+ insn = find_last_insn(file, reloc->sym->sec);
+ if (!insn) {
+ WARN("can't find reachable insn at %s+0x%" PRIx64,
+ reloc->sym->sec->name, reloc->addend);
return -1;
}
} else {
- WARN("can't find reachable insn at %s+0x%x",
- rela->sym->sec->name, rela->addend);
+ WARN("can't find reachable insn at %s+0x%" PRIx64,
+ reloc->sym->sec->name, reloc->addend);
return -1;
}
@@ -404,6 +693,387 @@ reachable:
return 0;
}
+static int create_static_call_sections(struct objtool_file *file)
+{
+ struct section *sec;
+ struct static_call_site *site;
+ struct instruction *insn;
+ struct symbol *key_sym;
+ char *key_name, *tmp;
+ int idx;
+
+ sec = find_section_by_name(file->elf, ".static_call_sites");
+ if (sec) {
+ INIT_LIST_HEAD(&file->static_call_list);
+ WARN("file already has .static_call_sites section, skipping");
+ return 0;
+ }
+
+ if (list_empty(&file->static_call_list))
+ return 0;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->static_call_list, call_node)
+ idx++;
+
+ sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE,
+ sizeof(struct static_call_site), idx);
+ if (!sec)
+ return -1;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->static_call_list, call_node) {
+
+ site = (struct static_call_site *)sec->data->d_buf + idx;
+ memset(site, 0, sizeof(struct static_call_site));
+
+ /* populate reloc for 'addr' */
+ if (elf_add_reloc_to_insn(file->elf, sec,
+ idx * sizeof(struct static_call_site),
+ R_X86_64_PC32,
+ insn->sec, insn->offset))
+ return -1;
+
+ /* find key symbol */
+ key_name = strdup(insn_call_dest(insn)->name);
+ if (!key_name) {
+ perror("strdup");
+ return -1;
+ }
+ if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR,
+ STATIC_CALL_TRAMP_PREFIX_LEN)) {
+ WARN("static_call: trampoline name malformed: %s", key_name);
+ free(key_name);
+ return -1;
+ }
+ tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN;
+ memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN);
+
+ key_sym = find_symbol_by_name(file->elf, tmp);
+ if (!key_sym) {
+ if (!opts.module) {
+ WARN("static_call: can't find static_call_key symbol: %s", tmp);
+ free(key_name);
+ return -1;
+ }
+
+ /*
+ * For modules(), the key might not be exported, which
+ * means the module can make static calls but isn't
+ * allowed to change them.
+ *
+ * In that case we temporarily set the key to be the
+ * trampoline address. This is fixed up in
+ * static_call_add_module().
+ */
+ key_sym = insn_call_dest(insn);
+ }
+ free(key_name);
+
+ /* populate reloc for 'key' */
+ if (elf_add_reloc(file->elf, sec,
+ idx * sizeof(struct static_call_site) + 4,
+ R_X86_64_PC32, key_sym,
+ is_sibling_call(insn) * STATIC_CALL_SITE_TAIL))
+ return -1;
+
+ idx++;
+ }
+
+ return 0;
+}
+
+static int create_retpoline_sites_sections(struct objtool_file *file)
+{
+ struct instruction *insn;
+ struct section *sec;
+ int idx;
+
+ sec = find_section_by_name(file->elf, ".retpoline_sites");
+ if (sec) {
+ WARN("file already has .retpoline_sites, skipping");
+ return 0;
+ }
+
+ idx = 0;
+ list_for_each_entry(insn, &file->retpoline_call_list, call_node)
+ idx++;
+
+ if (!idx)
+ return 0;
+
+ sec = elf_create_section(file->elf, ".retpoline_sites", 0,
+ sizeof(int), idx);
+ if (!sec) {
+ WARN("elf_create_section: .retpoline_sites");
+ return -1;
+ }
+
+ idx = 0;
+ list_for_each_entry(insn, &file->retpoline_call_list, call_node) {
+
+ int *site = (int *)sec->data->d_buf + idx;
+ *site = 0;
+
+ if (elf_add_reloc_to_insn(file->elf, sec,
+ idx * sizeof(int),
+ R_X86_64_PC32,
+ insn->sec, insn->offset)) {
+ WARN("elf_add_reloc_to_insn: .retpoline_sites");
+ return -1;
+ }
+
+ idx++;
+ }
+
+ return 0;
+}
+
+static int create_return_sites_sections(struct objtool_file *file)
+{
+ struct instruction *insn;
+ struct section *sec;
+ int idx;
+
+ sec = find_section_by_name(file->elf, ".return_sites");
+ if (sec) {
+ WARN("file already has .return_sites, skipping");
+ return 0;
+ }
+
+ idx = 0;
+ list_for_each_entry(insn, &file->return_thunk_list, call_node)
+ idx++;
+
+ if (!idx)
+ return 0;
+
+ sec = elf_create_section(file->elf, ".return_sites", 0,
+ sizeof(int), idx);
+ if (!sec) {
+ WARN("elf_create_section: .return_sites");
+ return -1;
+ }
+
+ idx = 0;
+ list_for_each_entry(insn, &file->return_thunk_list, call_node) {
+
+ int *site = (int *)sec->data->d_buf + idx;
+ *site = 0;
+
+ if (elf_add_reloc_to_insn(file->elf, sec,
+ idx * sizeof(int),
+ R_X86_64_PC32,
+ insn->sec, insn->offset)) {
+ WARN("elf_add_reloc_to_insn: .return_sites");
+ return -1;
+ }
+
+ idx++;
+ }
+
+ return 0;
+}
+
+static int create_ibt_endbr_seal_sections(struct objtool_file *file)
+{
+ struct instruction *insn;
+ struct section *sec;
+ int idx;
+
+ sec = find_section_by_name(file->elf, ".ibt_endbr_seal");
+ if (sec) {
+ WARN("file already has .ibt_endbr_seal, skipping");
+ return 0;
+ }
+
+ idx = 0;
+ list_for_each_entry(insn, &file->endbr_list, call_node)
+ idx++;
+
+ if (opts.stats) {
+ printf("ibt: ENDBR at function start: %d\n", file->nr_endbr);
+ printf("ibt: ENDBR inside functions: %d\n", file->nr_endbr_int);
+ printf("ibt: superfluous ENDBR: %d\n", idx);
+ }
+
+ if (!idx)
+ return 0;
+
+ sec = elf_create_section(file->elf, ".ibt_endbr_seal", 0,
+ sizeof(int), idx);
+ if (!sec) {
+ WARN("elf_create_section: .ibt_endbr_seal");
+ return -1;
+ }
+
+ idx = 0;
+ list_for_each_entry(insn, &file->endbr_list, call_node) {
+
+ int *site = (int *)sec->data->d_buf + idx;
+ struct symbol *sym = insn->sym;
+ *site = 0;
+
+ if (opts.module && sym && sym->type == STT_FUNC &&
+ insn->offset == sym->offset &&
+ (!strcmp(sym->name, "init_module") ||
+ !strcmp(sym->name, "cleanup_module")))
+ WARN("%s(): not an indirect call target", sym->name);
+
+ if (elf_add_reloc_to_insn(file->elf, sec,
+ idx * sizeof(int),
+ R_X86_64_PC32,
+ insn->sec, insn->offset)) {
+ WARN("elf_add_reloc_to_insn: .ibt_endbr_seal");
+ return -1;
+ }
+
+ idx++;
+ }
+
+ return 0;
+}
+
+static int create_cfi_sections(struct objtool_file *file)
+{
+ struct section *sec;
+ struct symbol *sym;
+ unsigned int *loc;
+ int idx;
+
+ sec = find_section_by_name(file->elf, ".cfi_sites");
+ if (sec) {
+ INIT_LIST_HEAD(&file->call_list);
+ WARN("file already has .cfi_sites section, skipping");
+ return 0;
+ }
+
+ idx = 0;
+ for_each_sym(file, sym) {
+ if (sym->type != STT_FUNC)
+ continue;
+
+ if (strncmp(sym->name, "__cfi_", 6))
+ continue;
+
+ idx++;
+ }
+
+ sec = elf_create_section(file->elf, ".cfi_sites", 0, sizeof(unsigned int), idx);
+ if (!sec)
+ return -1;
+
+ idx = 0;
+ for_each_sym(file, sym) {
+ if (sym->type != STT_FUNC)
+ continue;
+
+ if (strncmp(sym->name, "__cfi_", 6))
+ continue;
+
+ loc = (unsigned int *)sec->data->d_buf + idx;
+ memset(loc, 0, sizeof(unsigned int));
+
+ if (elf_add_reloc_to_insn(file->elf, sec,
+ idx * sizeof(unsigned int),
+ R_X86_64_PC32,
+ sym->sec, sym->offset))
+ return -1;
+
+ idx++;
+ }
+
+ return 0;
+}
+
+static int create_mcount_loc_sections(struct objtool_file *file)
+{
+ int addrsize = elf_class_addrsize(file->elf);
+ struct instruction *insn;
+ struct section *sec;
+ int idx;
+
+ sec = find_section_by_name(file->elf, "__mcount_loc");
+ if (sec) {
+ INIT_LIST_HEAD(&file->mcount_loc_list);
+ WARN("file already has __mcount_loc section, skipping");
+ return 0;
+ }
+
+ if (list_empty(&file->mcount_loc_list))
+ return 0;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->mcount_loc_list, call_node)
+ idx++;
+
+ sec = elf_create_section(file->elf, "__mcount_loc", 0, addrsize, idx);
+ if (!sec)
+ return -1;
+
+ sec->sh.sh_addralign = addrsize;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->mcount_loc_list, call_node) {
+ void *loc;
+
+ loc = sec->data->d_buf + idx;
+ memset(loc, 0, addrsize);
+
+ if (elf_add_reloc_to_insn(file->elf, sec, idx,
+ addrsize == sizeof(u64) ? R_ABS64 : R_ABS32,
+ insn->sec, insn->offset))
+ return -1;
+
+ idx += addrsize;
+ }
+
+ return 0;
+}
+
+static int create_direct_call_sections(struct objtool_file *file)
+{
+ struct instruction *insn;
+ struct section *sec;
+ unsigned int *loc;
+ int idx;
+
+ sec = find_section_by_name(file->elf, ".call_sites");
+ if (sec) {
+ INIT_LIST_HEAD(&file->call_list);
+ WARN("file already has .call_sites section, skipping");
+ return 0;
+ }
+
+ if (list_empty(&file->call_list))
+ return 0;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->call_list, call_node)
+ idx++;
+
+ sec = elf_create_section(file->elf, ".call_sites", 0, sizeof(unsigned int), idx);
+ if (!sec)
+ return -1;
+
+ idx = 0;
+ list_for_each_entry(insn, &file->call_list, call_node) {
+
+ loc = (unsigned int *)sec->data->d_buf + idx;
+ memset(loc, 0, sizeof(unsigned int));
+
+ if (elf_add_reloc_to_insn(file->elf, sec,
+ idx * sizeof(unsigned int),
+ R_X86_64_PC32,
+ insn->sec, insn->offset))
+ return -1;
+
+ idx++;
+ }
+
+ return 0;
+}
+
/*
* Warnings shouldn't be reported for ignored functions.
*/
@@ -412,26 +1082,26 @@ static void add_ignores(struct objtool_file *file)
struct instruction *insn;
struct section *sec;
struct symbol *func;
- struct rela *rela;
+ struct reloc *reloc;
sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard");
if (!sec)
return;
- list_for_each_entry(rela, &sec->rela_list, list) {
- switch (rela->sym->type) {
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ switch (reloc->sym->type) {
case STT_FUNC:
- func = rela->sym;
+ func = reloc->sym;
break;
case STT_SECTION:
- func = find_func_by_offset(rela->sym->sec, rela->addend);
+ func = find_func_by_offset(reloc->sym->sec, reloc->addend);
if (!func)
continue;
break;
default:
- WARN("unexpected relocation symbol type in %s: %d", sec->name, rela->sym->type);
+ WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type);
continue;
}
@@ -450,7 +1120,7 @@ static void add_ignores(struct objtool_file *file)
static const char *uaccess_safe_builtin[] = {
/* KASAN */
"kasan_report",
- "check_memory_region",
+ "kasan_check_range",
/* KASAN out-of-line */
"__asan_loadN_noabort",
"__asan_load1_noabort",
@@ -464,6 +1134,8 @@ static const char *uaccess_safe_builtin[] = {
"__asan_store4_noabort",
"__asan_store8_noabort",
"__asan_store16_noabort",
+ "__kasan_check_read",
+ "__kasan_check_write",
/* KASAN in-line */
"__asan_report_load_n_noabort",
"__asan_report_load1_noabort",
@@ -479,6 +1151,10 @@ static const char *uaccess_safe_builtin[] = {
"__asan_report_store16_noabort",
/* KCSAN */
"__kcsan_check_access",
+ "__kcsan_mb",
+ "__kcsan_wmb",
+ "__kcsan_rmb",
+ "__kcsan_release",
"kcsan_found_watchpoint",
"kcsan_setup_watchpoint",
"kcsan_check_scoped_accesses",
@@ -499,8 +1175,76 @@ static const char *uaccess_safe_builtin[] = {
"__tsan_write4",
"__tsan_write8",
"__tsan_write16",
+ "__tsan_read_write1",
+ "__tsan_read_write2",
+ "__tsan_read_write4",
+ "__tsan_read_write8",
+ "__tsan_read_write16",
+ "__tsan_volatile_read1",
+ "__tsan_volatile_read2",
+ "__tsan_volatile_read4",
+ "__tsan_volatile_read8",
+ "__tsan_volatile_read16",
+ "__tsan_volatile_write1",
+ "__tsan_volatile_write2",
+ "__tsan_volatile_write4",
+ "__tsan_volatile_write8",
+ "__tsan_volatile_write16",
+ "__tsan_atomic8_load",
+ "__tsan_atomic16_load",
+ "__tsan_atomic32_load",
+ "__tsan_atomic64_load",
+ "__tsan_atomic8_store",
+ "__tsan_atomic16_store",
+ "__tsan_atomic32_store",
+ "__tsan_atomic64_store",
+ "__tsan_atomic8_exchange",
+ "__tsan_atomic16_exchange",
+ "__tsan_atomic32_exchange",
+ "__tsan_atomic64_exchange",
+ "__tsan_atomic8_fetch_add",
+ "__tsan_atomic16_fetch_add",
+ "__tsan_atomic32_fetch_add",
+ "__tsan_atomic64_fetch_add",
+ "__tsan_atomic8_fetch_sub",
+ "__tsan_atomic16_fetch_sub",
+ "__tsan_atomic32_fetch_sub",
+ "__tsan_atomic64_fetch_sub",
+ "__tsan_atomic8_fetch_and",
+ "__tsan_atomic16_fetch_and",
+ "__tsan_atomic32_fetch_and",
+ "__tsan_atomic64_fetch_and",
+ "__tsan_atomic8_fetch_or",
+ "__tsan_atomic16_fetch_or",
+ "__tsan_atomic32_fetch_or",
+ "__tsan_atomic64_fetch_or",
+ "__tsan_atomic8_fetch_xor",
+ "__tsan_atomic16_fetch_xor",
+ "__tsan_atomic32_fetch_xor",
+ "__tsan_atomic64_fetch_xor",
+ "__tsan_atomic8_fetch_nand",
+ "__tsan_atomic16_fetch_nand",
+ "__tsan_atomic32_fetch_nand",
+ "__tsan_atomic64_fetch_nand",
+ "__tsan_atomic8_compare_exchange_strong",
+ "__tsan_atomic16_compare_exchange_strong",
+ "__tsan_atomic32_compare_exchange_strong",
+ "__tsan_atomic64_compare_exchange_strong",
+ "__tsan_atomic8_compare_exchange_weak",
+ "__tsan_atomic16_compare_exchange_weak",
+ "__tsan_atomic32_compare_exchange_weak",
+ "__tsan_atomic64_compare_exchange_weak",
+ "__tsan_atomic8_compare_exchange_val",
+ "__tsan_atomic16_compare_exchange_val",
+ "__tsan_atomic32_compare_exchange_val",
+ "__tsan_atomic64_compare_exchange_val",
+ "__tsan_atomic_thread_fence",
+ "__tsan_atomic_signal_fence",
+ "__tsan_unaligned_read16",
+ "__tsan_unaligned_write16",
/* KCOV */
"write_comp_data",
+ "check_kcov_mode",
"__sanitizer_cov_trace_pc",
"__sanitizer_cov_trace_const_cmp1",
"__sanitizer_cov_trace_const_cmp2",
@@ -511,16 +1255,43 @@ static const char *uaccess_safe_builtin[] = {
"__sanitizer_cov_trace_cmp4",
"__sanitizer_cov_trace_cmp8",
"__sanitizer_cov_trace_switch",
+ /* KMSAN */
+ "kmsan_copy_to_user",
+ "kmsan_report",
+ "kmsan_unpoison_entry_regs",
+ "kmsan_unpoison_memory",
+ "__msan_chain_origin",
+ "__msan_get_context_state",
+ "__msan_instrument_asm_store",
+ "__msan_metadata_ptr_for_load_1",
+ "__msan_metadata_ptr_for_load_2",
+ "__msan_metadata_ptr_for_load_4",
+ "__msan_metadata_ptr_for_load_8",
+ "__msan_metadata_ptr_for_load_n",
+ "__msan_metadata_ptr_for_store_1",
+ "__msan_metadata_ptr_for_store_2",
+ "__msan_metadata_ptr_for_store_4",
+ "__msan_metadata_ptr_for_store_8",
+ "__msan_metadata_ptr_for_store_n",
+ "__msan_poison_alloca",
+ "__msan_warning",
/* UBSAN */
"ubsan_type_mismatch_common",
"__ubsan_handle_type_mismatch",
"__ubsan_handle_type_mismatch_v1",
"__ubsan_handle_shift_out_of_bounds",
+ "__ubsan_handle_load_invalid_value",
+ /* STACKLEAK */
+ "stackleak_track_stack",
/* misc */
"csum_partial_copy_generic",
- "__memcpy_mcsafe",
- "mcsafe_handle_tail",
+ "copy_mc_fragile",
+ "copy_mc_fragile_handle_tail",
+ "copy_mc_enhanced_fast_string",
"ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */
+ "rep_stos_alternative",
+ "rep_movs_alternative",
+ "__copy_user_nocache",
NULL
};
@@ -529,7 +1300,7 @@ static void add_uaccess_safe(struct objtool_file *file)
struct symbol *func;
const char **name;
- if (!uaccess)
+ if (!opts.uaccess)
return;
for (name = uaccess_safe_builtin; *name; name++) {
@@ -550,20 +1321,20 @@ static void add_uaccess_safe(struct objtool_file *file)
static int add_ignore_alternatives(struct objtool_file *file)
{
struct section *sec;
- struct rela *rela;
+ struct reloc *reloc;
struct instruction *insn;
sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts");
if (!sec)
return 0;
- list_for_each_entry(rela, &sec->rela_list, list) {
- if (rela->sym->type != STT_SECTION) {
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", sec->name);
return -1;
}
- insn = find_insn(file, rela->sym->sec, rela->addend);
+ insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (!insn) {
WARN("bad .discard.ignore_alts entry");
return -1;
@@ -575,74 +1346,320 @@ static int add_ignore_alternatives(struct objtool_file *file)
return 0;
}
+__weak bool arch_is_retpoline(struct symbol *sym)
+{
+ return false;
+}
+
+__weak bool arch_is_rethunk(struct symbol *sym)
+{
+ return false;
+}
+
+static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
+{
+ struct reloc *reloc;
+
+ if (insn->no_reloc)
+ return NULL;
+
+ if (!file)
+ return NULL;
+
+ reloc = find_reloc_by_dest_range(file->elf, insn->sec,
+ insn->offset, insn->len);
+ if (!reloc) {
+ insn->no_reloc = 1;
+ return NULL;
+ }
+
+ return reloc;
+}
+
+static void remove_insn_ops(struct instruction *insn)
+{
+ struct stack_op *op, *next;
+
+ for (op = insn->stack_ops; op; op = next) {
+ next = op->next;
+ free(op);
+ }
+ insn->stack_ops = NULL;
+}
+
+static void annotate_call_site(struct objtool_file *file,
+ struct instruction *insn, bool sibling)
+{
+ struct reloc *reloc = insn_reloc(file, insn);
+ struct symbol *sym = insn_call_dest(insn);
+
+ if (!sym)
+ sym = reloc->sym;
+
+ /*
+ * Alternative replacement code is just template code which is
+ * sometimes copied to the original instruction. For now, don't
+ * annotate it. (In the future we might consider annotating the
+ * original instruction if/when it ever makes sense to do so.)
+ */
+ if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+ return;
+
+ if (sym->static_call_tramp) {
+ list_add_tail(&insn->call_node, &file->static_call_list);
+ return;
+ }
+
+ if (sym->retpoline_thunk) {
+ list_add_tail(&insn->call_node, &file->retpoline_call_list);
+ return;
+ }
+
+ /*
+ * Many compilers cannot disable KCOV or sanitizer calls with a function
+ * attribute so they need a little help, NOP out any such calls from
+ * noinstr text.
+ */
+ if (opts.hack_noinstr && insn->sec->noinstr && sym->profiling_func) {
+ if (reloc) {
+ reloc->type = R_NONE;
+ elf_write_reloc(file->elf, reloc);
+ }
+
+ elf_write_insn(file->elf, insn->sec,
+ insn->offset, insn->len,
+ sibling ? arch_ret_insn(insn->len)
+ : arch_nop_insn(insn->len));
+
+ insn->type = sibling ? INSN_RETURN : INSN_NOP;
+
+ if (sibling) {
+ /*
+ * We've replaced the tail-call JMP insn by two new
+ * insn: RET; INT3, except we only have a single struct
+ * insn here. Mark it retpoline_safe to avoid the SLS
+ * warning, instead of adding another insn.
+ */
+ insn->retpoline_safe = true;
+ }
+
+ return;
+ }
+
+ if (opts.mcount && sym->fentry) {
+ if (sibling)
+ WARN_INSN(insn, "tail call to __fentry__ !?!?");
+ if (opts.mnop) {
+ if (reloc) {
+ reloc->type = R_NONE;
+ elf_write_reloc(file->elf, reloc);
+ }
+
+ elf_write_insn(file->elf, insn->sec,
+ insn->offset, insn->len,
+ arch_nop_insn(insn->len));
+
+ insn->type = INSN_NOP;
+ }
+
+ list_add_tail(&insn->call_node, &file->mcount_loc_list);
+ return;
+ }
+
+ if (insn->type == INSN_CALL && !insn->sec->init)
+ list_add_tail(&insn->call_node, &file->call_list);
+
+ if (!sibling && dead_end_function(file, sym))
+ insn->dead_end = true;
+}
+
+static void add_call_dest(struct objtool_file *file, struct instruction *insn,
+ struct symbol *dest, bool sibling)
+{
+ insn->_call_dest = dest;
+ if (!dest)
+ return;
+
+ /*
+ * Whatever stack impact regular CALLs have, should be undone
+ * by the RETURN of the called function.
+ *
+ * Annotated intra-function calls retain the stack_ops but
+ * are converted to JUMP, see read_intra_function_calls().
+ */
+ remove_insn_ops(insn);
+
+ annotate_call_site(file, insn, sibling);
+}
+
+static void add_retpoline_call(struct objtool_file *file, struct instruction *insn)
+{
+ /*
+ * Retpoline calls/jumps are really dynamic calls/jumps in disguise,
+ * so convert them accordingly.
+ */
+ switch (insn->type) {
+ case INSN_CALL:
+ insn->type = INSN_CALL_DYNAMIC;
+ break;
+ case INSN_JUMP_UNCONDITIONAL:
+ insn->type = INSN_JUMP_DYNAMIC;
+ break;
+ case INSN_JUMP_CONDITIONAL:
+ insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
+ break;
+ default:
+ return;
+ }
+
+ insn->retpoline_safe = true;
+
+ /*
+ * Whatever stack impact regular CALLs have, should be undone
+ * by the RETURN of the called function.
+ *
+ * Annotated intra-function calls retain the stack_ops but
+ * are converted to JUMP, see read_intra_function_calls().
+ */
+ remove_insn_ops(insn);
+
+ annotate_call_site(file, insn, false);
+}
+
+static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add)
+{
+ /*
+ * Return thunk tail calls are really just returns in disguise,
+ * so convert them accordingly.
+ */
+ insn->type = INSN_RETURN;
+ insn->retpoline_safe = true;
+
+ if (add)
+ list_add_tail(&insn->call_node, &file->return_thunk_list);
+}
+
+static bool is_first_func_insn(struct objtool_file *file,
+ struct instruction *insn, struct symbol *sym)
+{
+ if (insn->offset == sym->offset)
+ return true;
+
+ /* Allow direct CALL/JMP past ENDBR */
+ if (opts.ibt) {
+ struct instruction *prev = prev_insn_same_sym(file, insn);
+
+ if (prev && prev->type == INSN_ENDBR &&
+ insn->offset == sym->offset + prev->len)
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * A sibling call is a tail-call to another symbol -- to differentiate from a
+ * recursive tail-call which is to the same symbol.
+ */
+static bool jump_is_sibling_call(struct objtool_file *file,
+ struct instruction *from, struct instruction *to)
+{
+ struct symbol *fs = from->sym;
+ struct symbol *ts = to->sym;
+
+ /* Not a sibling call if from/to a symbol hole */
+ if (!fs || !ts)
+ return false;
+
+ /* Not a sibling call if not targeting the start of a symbol. */
+ if (!is_first_func_insn(file, to, ts))
+ return false;
+
+ /* Disallow sibling calls into STT_NOTYPE */
+ if (ts->type == STT_NOTYPE)
+ return false;
+
+ /* Must not be self to be a sibling */
+ return fs->pfunc != ts->pfunc;
+}
+
/*
* Find the destination instructions for all jumps.
*/
static int add_jump_destinations(struct objtool_file *file)
{
- struct instruction *insn;
- struct rela *rela;
+ struct instruction *insn, *jump_dest;
+ struct reloc *reloc;
struct section *dest_sec;
unsigned long dest_off;
for_each_insn(file, insn) {
- if (!is_static_jump(insn))
+ if (insn->jump_dest) {
+ /*
+ * handle_group_alt() may have previously set
+ * 'jump_dest' for some alternatives.
+ */
continue;
-
- if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
+ }
+ if (!is_static_jump(insn))
continue;
- rela = find_rela_by_dest_range(file->elf, insn->sec,
- insn->offset, insn->len);
- if (!rela) {
+ reloc = insn_reloc(file, insn);
+ if (!reloc) {
dest_sec = insn->sec;
- dest_off = insn->offset + insn->len + insn->immediate;
- } else if (rela->sym->type == STT_SECTION) {
- dest_sec = rela->sym->sec;
- dest_off = rela->addend + 4;
- } else if (rela->sym->sec->idx) {
- dest_sec = rela->sym->sec;
- dest_off = rela->sym->sym.st_value + rela->addend + 4;
- } else if (strstr(rela->sym->name, "_indirect_thunk_")) {
+ dest_off = arch_jump_destination(insn);
+ } else if (reloc->sym->type == STT_SECTION) {
+ dest_sec = reloc->sym->sec;
+ dest_off = arch_dest_reloc_offset(reloc->addend);
+ } else if (reloc->sym->retpoline_thunk) {
+ add_retpoline_call(file, insn);
+ continue;
+ } else if (reloc->sym->return_thunk) {
+ add_return_call(file, insn, true);
+ continue;
+ } else if (insn_func(insn)) {
/*
- * Retpoline jumps are really dynamic jumps in
- * disguise, so convert them accordingly.
+ * External sibling call or internal sibling call with
+ * STT_FUNC reloc.
*/
- if (insn->type == INSN_JUMP_UNCONDITIONAL)
- insn->type = INSN_JUMP_DYNAMIC;
- else
- insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL;
-
- insn->retpoline_safe = true;
+ add_call_dest(file, insn, reloc->sym, true);
continue;
+ } else if (reloc->sym->sec->idx) {
+ dest_sec = reloc->sym->sec;
+ dest_off = reloc->sym->sym.st_value +
+ arch_dest_reloc_offset(reloc->addend);
} else {
- /* external sibling call */
- insn->call_dest = rela->sym;
+ /* non-func asm code jumping to another file */
continue;
}
- insn->jump_dest = find_insn(file, dest_sec, dest_off);
- if (!insn->jump_dest) {
+ jump_dest = find_insn(file, dest_sec, dest_off);
+ if (!jump_dest) {
+ struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
/*
- * This is a special case where an alt instruction
- * jumps past the end of the section. These are
- * handled later in handle_group_alt().
+ * This is a special case for zen_untrain_ret().
+ * It jumps to __x86_return_thunk(), but objtool
+ * can't find the thunk's starting RET
+ * instruction, because the RET is also in the
+ * middle of another instruction. Objtool only
+ * knows about the outer instruction.
*/
- if (!strcmp(insn->sec->name, ".altinstr_replacement"))
+ if (sym && sym->return_thunk) {
+ add_return_call(file, insn, false);
continue;
+ }
- WARN_FUNC("can't find jump dest instruction at %s+0x%lx",
- insn->sec, insn->offset, dest_sec->name,
- dest_off);
+ WARN_INSN(insn, "can't find jump dest instruction at %s+0x%lx",
+ dest_sec->name, dest_off);
return -1;
}
/*
* Cross-function jump.
*/
- if (insn->func && insn->jump_dest->func &&
- insn->func != insn->jump_dest->func) {
+ if (insn_func(insn) && insn_func(jump_dest) &&
+ insn_func(insn) != insn_func(jump_dest)) {
/*
* For GCC 8+, create parent/child links for any cold
@@ -659,23 +1676,39 @@ static int add_jump_destinations(struct objtool_file *file)
* case where the parent function's only reference to a
* subfunction is through a jump table.
*/
- if (!strstr(insn->func->name, ".cold.") &&
- strstr(insn->jump_dest->func->name, ".cold.")) {
- insn->func->cfunc = insn->jump_dest->func;
- insn->jump_dest->func->pfunc = insn->func;
-
- } else if (insn->jump_dest->func->pfunc != insn->func->pfunc &&
- insn->jump_dest->offset == insn->jump_dest->func->offset) {
-
- /* internal sibling call */
- insn->call_dest = insn->jump_dest->func;
+ if (!strstr(insn_func(insn)->name, ".cold") &&
+ strstr(insn_func(jump_dest)->name, ".cold")) {
+ insn_func(insn)->cfunc = insn_func(jump_dest);
+ insn_func(jump_dest)->pfunc = insn_func(insn);
}
}
+
+ if (jump_is_sibling_call(file, insn, jump_dest)) {
+ /*
+ * Internal sibling call without reloc or with
+ * STT_SECTION reloc.
+ */
+ add_call_dest(file, insn, insn_func(jump_dest), true);
+ continue;
+ }
+
+ insn->jump_dest = jump_dest;
}
return 0;
}
+static struct symbol *find_call_destination(struct section *sec, unsigned long offset)
+{
+ struct symbol *call_dest;
+
+ call_dest = find_func_by_offset(sec, offset);
+ if (!call_dest)
+ call_dest = find_symbol_by_offset(sec, offset);
+
+ return call_dest;
+}
+
/*
* Find the destination instructions for all calls.
*/
@@ -683,127 +1716,154 @@ static int add_call_destinations(struct objtool_file *file)
{
struct instruction *insn;
unsigned long dest_off;
- struct rela *rela;
+ struct symbol *dest;
+ struct reloc *reloc;
for_each_insn(file, insn) {
if (insn->type != INSN_CALL)
continue;
- rela = find_rela_by_dest_range(file->elf, insn->sec,
- insn->offset, insn->len);
- if (!rela) {
- dest_off = insn->offset + insn->len + insn->immediate;
- insn->call_dest = find_func_by_offset(insn->sec, dest_off);
- if (!insn->call_dest)
- insn->call_dest = find_symbol_by_offset(insn->sec, dest_off);
+ reloc = insn_reloc(file, insn);
+ if (!reloc) {
+ dest_off = arch_jump_destination(insn);
+ dest = find_call_destination(insn->sec, dest_off);
+
+ add_call_dest(file, insn, dest, false);
if (insn->ignore)
continue;
- if (!insn->call_dest) {
- WARN_FUNC("unsupported intra-function call",
- insn->sec, insn->offset);
- if (retpoline)
- WARN("If this is a retpoline, please patch it in with alternatives and annotate it with ANNOTATE_NOSPEC_ALTERNATIVE.");
+ if (!insn_call_dest(insn)) {
+ WARN_INSN(insn, "unannotated intra-function call");
return -1;
}
- if (insn->func && insn->call_dest->type != STT_FUNC) {
- WARN_FUNC("unsupported call to non-function",
- insn->sec, insn->offset);
+ if (insn_func(insn) && insn_call_dest(insn)->type != STT_FUNC) {
+ WARN_INSN(insn, "unsupported call to non-function");
return -1;
}
- } else if (rela->sym->type == STT_SECTION) {
- insn->call_dest = find_func_by_offset(rela->sym->sec,
- rela->addend+4);
- if (!insn->call_dest) {
- WARN_FUNC("can't find call dest symbol at %s+0x%x",
- insn->sec, insn->offset,
- rela->sym->sec->name,
- rela->addend + 4);
+ } else if (reloc->sym->type == STT_SECTION) {
+ dest_off = arch_dest_reloc_offset(reloc->addend);
+ dest = find_call_destination(reloc->sym->sec, dest_off);
+ if (!dest) {
+ WARN_INSN(insn, "can't find call dest symbol at %s+0x%lx",
+ reloc->sym->sec->name, dest_off);
return -1;
}
+
+ add_call_dest(file, insn, dest, false);
+
+ } else if (reloc->sym->retpoline_thunk) {
+ add_retpoline_call(file, insn);
+
} else
- insn->call_dest = rela->sym;
+ add_call_dest(file, insn, reloc->sym, false);
}
return 0;
}
/*
- * The .alternatives section requires some extra special care, over and above
- * what other special sections require:
- *
- * 1. Because alternatives are patched in-place, we need to insert a fake jump
- * instruction at the end so that validate_branch() skips all the original
- * replaced instructions when validating the new instruction path.
- *
- * 2. An added wrinkle is that the new instruction length might be zero. In
- * that case the old instructions are replaced with noops. We simulate that
- * by creating a fake jump as the only new instruction.
- *
- * 3. In some cases, the alternative section includes an instruction which
- * conditionally jumps to the _end_ of the entry. We have to modify these
- * jumps' destinations to point back to .text rather than the end of the
- * entry in .altinstr_replacement.
+ * The .alternatives section requires some extra special care over and above
+ * other special sections because alternatives are patched in place.
*/
static int handle_group_alt(struct objtool_file *file,
struct special_alt *special_alt,
struct instruction *orig_insn,
struct instruction **new_insn)
{
- struct instruction *last_orig_insn, *last_new_insn, *insn, *fake_jump = NULL;
+ struct instruction *last_new_insn = NULL, *insn, *nop = NULL;
+ struct alt_group *orig_alt_group, *new_alt_group;
unsigned long dest_off;
- last_orig_insn = NULL;
- insn = orig_insn;
- sec_for_each_insn_from(file, insn) {
- if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
- break;
+ orig_alt_group = orig_insn->alt_group;
+ if (!orig_alt_group) {
+ struct instruction *last_orig_insn = NULL;
- insn->alt_group = true;
- last_orig_insn = insn;
- }
-
- if (next_insn_same_sec(file, last_orig_insn)) {
- fake_jump = malloc(sizeof(*fake_jump));
- if (!fake_jump) {
+ orig_alt_group = malloc(sizeof(*orig_alt_group));
+ if (!orig_alt_group) {
WARN("malloc failed");
return -1;
}
- memset(fake_jump, 0, sizeof(*fake_jump));
- INIT_LIST_HEAD(&fake_jump->alts);
- clear_insn_state(&fake_jump->state);
+ orig_alt_group->cfi = calloc(special_alt->orig_len,
+ sizeof(struct cfi_state *));
+ if (!orig_alt_group->cfi) {
+ WARN("calloc failed");
+ return -1;
+ }
+
+ insn = orig_insn;
+ sec_for_each_insn_from(file, insn) {
+ if (insn->offset >= special_alt->orig_off + special_alt->orig_len)
+ break;
- fake_jump->sec = special_alt->new_sec;
- fake_jump->offset = FAKE_JUMP_OFFSET;
- fake_jump->type = INSN_JUMP_UNCONDITIONAL;
- fake_jump->jump_dest = list_next_entry(last_orig_insn, list);
- fake_jump->func = orig_insn->func;
+ insn->alt_group = orig_alt_group;
+ last_orig_insn = insn;
+ }
+ orig_alt_group->orig_group = NULL;
+ orig_alt_group->first_insn = orig_insn;
+ orig_alt_group->last_insn = last_orig_insn;
+ orig_alt_group->nop = NULL;
+ } else {
+ if (orig_alt_group->last_insn->offset + orig_alt_group->last_insn->len -
+ orig_alt_group->first_insn->offset != special_alt->orig_len) {
+ WARN_INSN(orig_insn, "weirdly overlapping alternative! %ld != %d",
+ orig_alt_group->last_insn->offset +
+ orig_alt_group->last_insn->len -
+ orig_alt_group->first_insn->offset,
+ special_alt->orig_len);
+ return -1;
+ }
}
- if (!special_alt->new_len) {
- if (!fake_jump) {
- WARN("%s: empty alternative at end of section",
- special_alt->orig_sec->name);
+ new_alt_group = malloc(sizeof(*new_alt_group));
+ if (!new_alt_group) {
+ WARN("malloc failed");
+ return -1;
+ }
+
+ if (special_alt->new_len < special_alt->orig_len) {
+ /*
+ * Insert a fake nop at the end to make the replacement
+ * alt_group the same size as the original. This is needed to
+ * allow propagate_alt_cfi() to do its magic. When the last
+ * instruction affects the stack, the instruction after it (the
+ * nop) will propagate the new state to the shared CFI array.
+ */
+ nop = malloc(sizeof(*nop));
+ if (!nop) {
+ WARN("malloc failed");
return -1;
}
+ memset(nop, 0, sizeof(*nop));
+
+ nop->sec = special_alt->new_sec;
+ nop->offset = special_alt->new_off + special_alt->new_len;
+ nop->len = special_alt->orig_len - special_alt->new_len;
+ nop->type = INSN_NOP;
+ nop->sym = orig_insn->sym;
+ nop->alt_group = new_alt_group;
+ nop->ignore = orig_insn->ignore_alts;
+ }
- *new_insn = fake_jump;
- return 0;
+ if (!special_alt->new_len) {
+ *new_insn = nop;
+ goto end;
}
- last_new_insn = NULL;
insn = *new_insn;
sec_for_each_insn_from(file, insn) {
+ struct reloc *alt_reloc;
+
if (insn->offset >= special_alt->new_off + special_alt->new_len)
break;
last_new_insn = insn;
insn->ignore = orig_insn->ignore_alts;
- insn->func = orig_insn->func;
+ insn->sym = orig_insn->sym;
+ insn->alt_group = new_alt_group;
/*
* Since alternative replacement code is copy/pasted by the
@@ -812,17 +1872,12 @@ static int handle_group_alt(struct objtool_file *file,
* .altinstr_replacement section, unless the arch's
* alternatives code can adjust the relative offsets
* accordingly.
- *
- * The x86 alternatives code adjusts the offsets only when it
- * encounters a branch instruction at the very beginning of the
- * replacement group.
*/
- if ((insn->offset != special_alt->new_off ||
- (insn->type != INSN_CALL && !is_static_jump(insn))) &&
- find_rela_by_dest_range(file->elf, insn->sec, insn->offset, insn->len)) {
+ alt_reloc = insn_reloc(file, insn);
+ if (alt_reloc && arch_pc_relative_reloc(alt_reloc) &&
+ !arch_support_alt_relocation(special_alt, insn, alt_reloc)) {
- WARN_FUNC("unsupported relocation in alternatives section",
- insn->sec, insn->offset);
+ WARN_INSN(insn, "unsupported relocation in alternatives section");
return -1;
}
@@ -832,20 +1887,13 @@ static int handle_group_alt(struct objtool_file *file,
if (!insn->immediate)
continue;
- dest_off = insn->offset + insn->len + insn->immediate;
+ dest_off = arch_jump_destination(insn);
if (dest_off == special_alt->new_off + special_alt->new_len) {
- if (!fake_jump) {
- WARN("%s: alternative jump to end of section",
- special_alt->orig_sec->name);
+ insn->jump_dest = next_insn_same_sec(file, orig_alt_group->last_insn);
+ if (!insn->jump_dest) {
+ WARN_INSN(insn, "can't find alternative jump destination");
return -1;
}
- insn->jump_dest = fake_jump;
- }
-
- if (!insn->jump_dest) {
- WARN_FUNC("can't find alternative jump destination",
- insn->sec, insn->offset);
- return -1;
}
}
@@ -855,9 +1903,12 @@ static int handle_group_alt(struct objtool_file *file,
return -1;
}
- if (fake_jump)
- list_add(&fake_jump->list, &last_new_insn->list);
-
+end:
+ new_alt_group->orig_group = orig_alt_group;
+ new_alt_group->first_insn = *new_insn;
+ new_alt_group->last_insn = last_new_insn;
+ new_alt_group->nop = nop;
+ new_alt_group->cfi = orig_alt_group->cfi;
return 0;
}
@@ -871,16 +1922,41 @@ static int handle_jump_alt(struct objtool_file *file,
struct instruction *orig_insn,
struct instruction **new_insn)
{
- if (orig_insn->type == INSN_NOP)
- return 0;
+ if (orig_insn->type != INSN_JUMP_UNCONDITIONAL &&
+ orig_insn->type != INSN_NOP) {
- if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) {
- WARN_FUNC("unsupported instruction at jump label",
- orig_insn->sec, orig_insn->offset);
+ WARN_INSN(orig_insn, "unsupported instruction at jump label");
return -1;
}
- *new_insn = list_next_entry(orig_insn, list);
+ if (opts.hack_jump_label && special_alt->key_addend & 2) {
+ struct reloc *reloc = insn_reloc(file, orig_insn);
+
+ if (reloc) {
+ reloc->type = R_NONE;
+ elf_write_reloc(file->elf, reloc);
+ }
+ elf_write_insn(file->elf, orig_insn->sec,
+ orig_insn->offset, orig_insn->len,
+ arch_nop_insn(orig_insn->len));
+ orig_insn->type = INSN_NOP;
+ }
+
+ if (orig_insn->type == INSN_NOP) {
+ if (orig_insn->len == 2)
+ file->jl_nop_short++;
+ else
+ file->jl_nop_long++;
+
+ return 0;
+ }
+
+ if (orig_insn->len == 2)
+ file->jl_short++;
+ else
+ file->jl_long++;
+
+ *new_insn = next_insn_same_sec(file, orig_insn);
return 0;
}
@@ -927,6 +2003,11 @@ static int add_special_section_alts(struct objtool_file *file)
}
if (special_alt->group) {
+ if (!special_alt->orig_len) {
+ WARN_INSN(orig_insn, "empty alternative entry");
+ continue;
+ }
+
ret = handle_group_alt(file, special_alt, orig_insn,
&new_insn);
if (ret)
@@ -948,50 +2029,57 @@ static int add_special_section_alts(struct objtool_file *file)
alt->insn = new_insn;
alt->skip_orig = special_alt->skip_orig;
orig_insn->ignore_alts |= special_alt->skip_alt;
- list_add_tail(&alt->list, &orig_insn->alts);
+ alt->next = orig_insn->alts;
+ orig_insn->alts = alt;
list_del(&special_alt->list);
free(special_alt);
}
+ if (opts.stats) {
+ printf("jl\\\tNOP\tJMP\n");
+ printf("short:\t%ld\t%ld\n", file->jl_nop_short, file->jl_short);
+ printf("long:\t%ld\t%ld\n", file->jl_nop_long, file->jl_long);
+ }
+
out:
return ret;
}
static int add_jump_table(struct objtool_file *file, struct instruction *insn,
- struct rela *table)
+ struct reloc *table)
{
- struct rela *rela = table;
+ struct reloc *reloc = table;
struct instruction *dest_insn;
struct alternative *alt;
- struct symbol *pfunc = insn->func->pfunc;
+ struct symbol *pfunc = insn_func(insn)->pfunc;
unsigned int prev_offset = 0;
/*
- * Each @rela is a switch table relocation which points to the target
+ * Each @reloc is a switch table relocation which points to the target
* instruction.
*/
- list_for_each_entry_from(rela, &table->sec->rela_list, list) {
+ list_for_each_entry_from(reloc, &table->sec->reloc_list, list) {
/* Check for the end of the table: */
- if (rela != table && rela->jump_table_start)
+ if (reloc != table && reloc->jump_table_start)
break;
/* Make sure the table entries are consecutive: */
- if (prev_offset && rela->offset != prev_offset + 8)
+ if (prev_offset && reloc->offset != prev_offset + 8)
break;
/* Detect function pointers from contiguous objects: */
- if (rela->sym->sec == pfunc->sec &&
- rela->addend == pfunc->offset)
+ if (reloc->sym->sec == pfunc->sec &&
+ reloc->addend == pfunc->offset)
break;
- dest_insn = find_insn(file, rela->sym->sec, rela->addend);
+ dest_insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (!dest_insn)
break;
/* Make sure the destination is in the same function: */
- if (!dest_insn->func || dest_insn->func->pfunc != pfunc)
+ if (!insn_func(dest_insn) || insn_func(dest_insn)->pfunc != pfunc)
break;
alt = malloc(sizeof(*alt));
@@ -1001,13 +2089,13 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
}
alt->insn = dest_insn;
- list_add_tail(&alt->list, &insn->alts);
- prev_offset = rela->offset;
+ alt->next = insn->alts;
+ insn->alts = alt;
+ prev_offset = reloc->offset;
}
if (!prev_offset) {
- WARN_FUNC("can't find switch jump table",
- insn->sec, insn->offset);
+ WARN_INSN(insn, "can't find switch jump table");
return -1;
}
@@ -1015,56 +2103,15 @@ static int add_jump_table(struct objtool_file *file, struct instruction *insn,
}
/*
- * find_jump_table() - Given a dynamic jump, find the switch jump table in
- * .rodata associated with it.
- *
- * There are 3 basic patterns:
- *
- * 1. jmpq *[rodata addr](,%reg,8)
- *
- * This is the most common case by far. It jumps to an address in a simple
- * jump table which is stored in .rodata.
- *
- * 2. jmpq *[rodata addr](%rip)
- *
- * This is caused by a rare GCC quirk, currently only seen in three driver
- * functions in the kernel, only with certain obscure non-distro configs.
- *
- * As part of an optimization, GCC makes a copy of an existing switch jump
- * table, modifies it, and then hard-codes the jump (albeit with an indirect
- * jump) to use a single entry in the table. The rest of the jump table and
- * some of its jump targets remain as dead code.
- *
- * In such a case we can just crudely ignore all unreachable instruction
- * warnings for the entire object file. Ideally we would just ignore them
- * for the function, but that would require redesigning the code quite a
- * bit. And honestly that's just not worth doing: unreachable instruction
- * warnings are of questionable value anyway, and this is such a rare issue.
- *
- * 3. mov [rodata addr],%reg1
- * ... some instructions ...
- * jmpq *(%reg1,%reg2,8)
- *
- * This is a fairly uncommon pattern which is new for GCC 6. As of this
- * writing, there are 11 occurrences of it in the allmodconfig kernel.
- *
- * As of GCC 7 there are quite a few more of these and the 'in between' code
- * is significant. Esp. with KASAN enabled some of the code between the mov
- * and jmpq uses .rodata itself, which can confuse things.
- *
- * TODO: Once we have DWARF CFI and smarter instruction decoding logic,
- * ensure the same register is used in the mov and jump instructions.
- *
- * NOTE: RETPOLINE made it harder still to decode dynamic jumps.
+ * find_jump_table() - Given a dynamic jump, find the switch jump table
+ * associated with it.
*/
-static struct rela *find_jump_table(struct objtool_file *file,
+static struct reloc *find_jump_table(struct objtool_file *file,
struct symbol *func,
struct instruction *insn)
{
- struct rela *text_rela, *table_rela;
+ struct reloc *table_reloc;
struct instruction *dest_insn, *orig_insn = insn;
- struct section *table_sec;
- unsigned long table_offset;
/*
* Backward search using the @first_jump_src links, these help avoid
@@ -1072,11 +2119,8 @@ static struct rela *find_jump_table(struct objtool_file *file,
* it.
*/
for (;
- &insn->list != &file->insn_list &&
- insn->sec == func->sec &&
- insn->offset >= func->offset;
-
- insn = insn->first_jump_src ?: list_prev_entry(insn, list)) {
+ insn && insn_func(insn) && insn_func(insn)->pfunc == func;
+ insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) {
if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC)
break;
@@ -1088,53 +2132,14 @@ static struct rela *find_jump_table(struct objtool_file *file,
insn->jump_dest->offset > orig_insn->offset))
break;
- /* look for a relocation which references .rodata */
- text_rela = find_rela_by_dest_range(file->elf, insn->sec,
- insn->offset, insn->len);
- if (!text_rela || text_rela->sym->type != STT_SECTION ||
- !text_rela->sym->sec->rodata)
+ table_reloc = arch_find_switch_table(file, insn);
+ if (!table_reloc)
continue;
-
- table_offset = text_rela->addend;
- table_sec = text_rela->sym->sec;
-
- if (text_rela->type == R_X86_64_PC32)
- table_offset += 4;
-
- /*
- * Make sure the .rodata address isn't associated with a
- * symbol. GCC jump tables are anonymous data.
- *
- * Also support C jump tables which are in the same format as
- * switch jump tables. For objtool to recognize them, they
- * need to be placed in the C_JUMP_TABLE_SECTION section. They
- * have symbols associated with them.
- */
- if (find_symbol_containing(table_sec, table_offset) &&
- strcmp(table_sec->name, C_JUMP_TABLE_SECTION))
+ dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend);
+ if (!dest_insn || !insn_func(dest_insn) || insn_func(dest_insn)->pfunc != func)
continue;
- /*
- * Each table entry has a rela associated with it. The rela
- * should reference text in the same function as the original
- * instruction.
- */
- table_rela = find_rela_by_dest(file->elf, table_sec, table_offset);
- if (!table_rela)
- continue;
- dest_insn = find_insn(file, table_rela->sym->sec, table_rela->addend);
- if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func)
- continue;
-
- /*
- * Use of RIP-relative switch jumps is quite rare, and
- * indicates a rare GCC quirk/bug which can leave dead code
- * behind.
- */
- if (text_rela->type == R_X86_64_PC32)
- file->ignore_unreachables = true;
-
- return table_rela;
+ return table_reloc;
}
return NULL;
@@ -1148,7 +2153,7 @@ static void mark_func_jump_tables(struct objtool_file *file,
struct symbol *func)
{
struct instruction *insn, *last = NULL;
- struct rela *rela;
+ struct reloc *reloc;
func_for_each_insn(file, func, insn) {
if (!last)
@@ -1171,10 +2176,10 @@ static void mark_func_jump_tables(struct objtool_file *file,
if (insn->type != INSN_JUMP_DYNAMIC)
continue;
- rela = find_jump_table(file, func, insn);
- if (rela) {
- rela->jump_table_start = true;
- insn->jump_table = rela;
+ reloc = find_jump_table(file, func, insn);
+ if (reloc) {
+ reloc->jump_table_start = true;
+ insn->_jump_table = reloc;
}
}
}
@@ -1186,10 +2191,10 @@ static int add_func_jump_tables(struct objtool_file *file,
int ret;
func_for_each_insn(file, func, insn) {
- if (!insn->jump_table)
+ if (!insn_jump_table(insn))
continue;
- ret = add_jump_table(file, insn, insn->jump_table);
+ ret = add_jump_table(file, insn, insn_jump_table(insn));
if (ret)
return ret;
}
@@ -1204,117 +2209,139 @@ static int add_func_jump_tables(struct objtool_file *file,
*/
static int add_jump_table_alts(struct objtool_file *file)
{
- struct section *sec;
struct symbol *func;
int ret;
if (!file->rodata)
return 0;
- for_each_sec(file, sec) {
- list_for_each_entry(func, &sec->symbol_list, list) {
- if (func->type != STT_FUNC)
- continue;
+ for_each_sym(file, func) {
+ if (func->type != STT_FUNC)
+ continue;
- mark_func_jump_tables(file, func);
- ret = add_func_jump_tables(file, func);
- if (ret)
- return ret;
- }
+ mark_func_jump_tables(file, func);
+ ret = add_func_jump_tables(file, func);
+ if (ret)
+ return ret;
}
return 0;
}
+static void set_func_state(struct cfi_state *state)
+{
+ state->cfa = initial_func_cfi.cfa;
+ memcpy(&state->regs, &initial_func_cfi.regs,
+ CFI_NUM_REGS * sizeof(struct cfi_reg));
+ state->stack_size = initial_func_cfi.cfa.offset;
+ state->type = UNWIND_HINT_TYPE_CALL;
+}
+
static int read_unwind_hints(struct objtool_file *file)
{
- struct section *sec, *relasec;
- struct rela *rela;
+ struct cfi_state cfi = init_cfi;
+ struct section *sec, *relocsec;
struct unwind_hint *hint;
struct instruction *insn;
- struct cfi_reg *cfa;
+ struct reloc *reloc;
int i;
sec = find_section_by_name(file->elf, ".discard.unwind_hints");
if (!sec)
return 0;
- relasec = sec->rela;
- if (!relasec) {
+ relocsec = sec->reloc;
+ if (!relocsec) {
WARN("missing .rela.discard.unwind_hints section");
return -1;
}
- if (sec->len % sizeof(struct unwind_hint)) {
+ if (sec->sh.sh_size % sizeof(struct unwind_hint)) {
WARN("struct unwind_hint size mismatch");
return -1;
}
file->hints = true;
- for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) {
+ for (i = 0; i < sec->sh.sh_size / sizeof(struct unwind_hint); i++) {
hint = (struct unwind_hint *)sec->data->d_buf + i;
- rela = find_rela_by_dest(file->elf, sec, i * sizeof(*hint));
- if (!rela) {
- WARN("can't find rela for unwind_hints[%d]", i);
+ reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint));
+ if (!reloc) {
+ WARN("can't find reloc for unwind_hints[%d]", i);
return -1;
}
- insn = find_insn(file, rela->sym->sec, rela->addend);
+ insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (!insn) {
WARN("can't find insn for unwind_hints[%d]", i);
return -1;
}
- cfa = &insn->state.cfa;
+ insn->hint = true;
if (hint->type == UNWIND_HINT_TYPE_SAVE) {
+ insn->hint = false;
insn->save = true;
continue;
+ }
- } else if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
+ if (hint->type == UNWIND_HINT_TYPE_RESTORE) {
insn->restore = true;
- insn->hint = true;
continue;
}
- insn->hint = true;
+ if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) {
+ struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset);
- switch (hint->sp_reg) {
- case ORC_REG_UNDEFINED:
- cfa->base = CFI_UNDEFINED;
- break;
- case ORC_REG_SP:
- cfa->base = CFI_SP;
- break;
- case ORC_REG_BP:
- cfa->base = CFI_BP;
- break;
- case ORC_REG_SP_INDIRECT:
- cfa->base = CFI_SP_INDIRECT;
- break;
- case ORC_REG_R10:
- cfa->base = CFI_R10;
- break;
- case ORC_REG_R13:
- cfa->base = CFI_R13;
- break;
- case ORC_REG_DI:
- cfa->base = CFI_DI;
- break;
- case ORC_REG_DX:
- cfa->base = CFI_DX;
- break;
- default:
- WARN_FUNC("unsupported unwind_hint sp base reg %d",
- insn->sec, insn->offset, hint->sp_reg);
+ if (sym && sym->bind == STB_GLOBAL) {
+ if (opts.ibt && insn->type != INSN_ENDBR && !insn->noendbr) {
+ WARN_INSN(insn, "UNWIND_HINT_IRET_REGS without ENDBR");
+ }
+ }
+ }
+
+ if (hint->type == UNWIND_HINT_TYPE_FUNC) {
+ insn->cfi = &func_cfi;
+ continue;
+ }
+
+ if (insn->cfi)
+ cfi = *(insn->cfi);
+
+ if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) {
+ WARN_INSN(insn, "unsupported unwind_hint sp base reg %d", hint->sp_reg);
return -1;
}
- cfa->offset = hint->sp_offset;
- insn->state.type = hint->type;
- insn->state.end = hint->end;
+ cfi.cfa.offset = bswap_if_needed(file->elf, hint->sp_offset);
+ cfi.type = hint->type;
+ cfi.signal = hint->signal;
+
+ insn->cfi = cfi_hash_find_or_add(&cfi);
+ }
+
+ return 0;
+}
+
+static int read_noendbr_hints(struct objtool_file *file)
+{
+ struct section *sec;
+ struct instruction *insn;
+ struct reloc *reloc;
+
+ sec = find_section_by_name(file->elf, ".rela.discard.noendbr");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ insn = find_insn(file, reloc->sym->sec, reloc->sym->offset + reloc->addend);
+ if (!insn) {
+ WARN("bad .discard.noendbr entry");
+ return -1;
+ }
+
+ insn->noendbr = 1;
}
return 0;
@@ -1324,28 +2351,29 @@ static int read_retpoline_hints(struct objtool_file *file)
{
struct section *sec;
struct instruction *insn;
- struct rela *rela;
+ struct reloc *reloc;
sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe");
if (!sec)
return 0;
- list_for_each_entry(rela, &sec->rela_list, list) {
- if (rela->sym->type != STT_SECTION) {
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ if (reloc->sym->type != STT_SECTION) {
WARN("unexpected relocation symbol type in %s", sec->name);
return -1;
}
- insn = find_insn(file, rela->sym->sec, rela->addend);
+ insn = find_insn(file, reloc->sym->sec, reloc->addend);
if (!insn) {
WARN("bad .discard.retpoline_safe entry");
return -1;
}
if (insn->type != INSN_JUMP_DYNAMIC &&
- insn->type != INSN_CALL_DYNAMIC) {
- WARN_FUNC("retpoline_safe hint not an indirect jump/call",
- insn->sec, insn->offset);
+ insn->type != INSN_CALL_DYNAMIC &&
+ insn->type != INSN_RETURN &&
+ insn->type != INSN_NOP) {
+ WARN_INSN(insn, "retpoline_safe hint not an indirect jump/call/ret/nop");
return -1;
}
@@ -1355,6 +2383,183 @@ static int read_retpoline_hints(struct objtool_file *file)
return 0;
}
+static int read_instr_hints(struct objtool_file *file)
+{
+ struct section *sec;
+ struct instruction *insn;
+ struct reloc *reloc;
+
+ sec = find_section_by_name(file->elf, ".rela.discard.instr_end");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ if (reloc->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, reloc->sym->sec, reloc->addend);
+ if (!insn) {
+ WARN("bad .discard.instr_end entry");
+ return -1;
+ }
+
+ insn->instr--;
+ }
+
+ sec = find_section_by_name(file->elf, ".rela.discard.instr_begin");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ if (reloc->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, reloc->sym->sec, reloc->addend);
+ if (!insn) {
+ WARN("bad .discard.instr_begin entry");
+ return -1;
+ }
+
+ insn->instr++;
+ }
+
+ return 0;
+}
+
+static int read_validate_unret_hints(struct objtool_file *file)
+{
+ struct section *sec;
+ struct instruction *insn;
+ struct reloc *reloc;
+
+ sec = find_section_by_name(file->elf, ".rela.discard.validate_unret");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ if (reloc->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s", sec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, reloc->sym->sec, reloc->addend);
+ if (!insn) {
+ WARN("bad .discard.instr_end entry");
+ return -1;
+ }
+ insn->unret = 1;
+ }
+
+ return 0;
+}
+
+
+static int read_intra_function_calls(struct objtool_file *file)
+{
+ struct instruction *insn;
+ struct section *sec;
+ struct reloc *reloc;
+
+ sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls");
+ if (!sec)
+ return 0;
+
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ unsigned long dest_off;
+
+ if (reloc->sym->type != STT_SECTION) {
+ WARN("unexpected relocation symbol type in %s",
+ sec->name);
+ return -1;
+ }
+
+ insn = find_insn(file, reloc->sym->sec, reloc->addend);
+ if (!insn) {
+ WARN("bad .discard.intra_function_call entry");
+ return -1;
+ }
+
+ if (insn->type != INSN_CALL) {
+ WARN_INSN(insn, "intra_function_call not a direct call");
+ return -1;
+ }
+
+ /*
+ * Treat intra-function CALLs as JMPs, but with a stack_op.
+ * See add_call_destinations(), which strips stack_ops from
+ * normal CALLs.
+ */
+ insn->type = INSN_JUMP_UNCONDITIONAL;
+
+ dest_off = arch_jump_destination(insn);
+ insn->jump_dest = find_insn(file, insn->sec, dest_off);
+ if (!insn->jump_dest) {
+ WARN_INSN(insn, "can't find call dest at %s+0x%lx",
+ insn->sec->name, dest_off);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Return true if name matches an instrumentation function, where calls to that
+ * function from noinstr code can safely be removed, but compilers won't do so.
+ */
+static bool is_profiling_func(const char *name)
+{
+ /*
+ * Many compilers cannot disable KCOV with a function attribute.
+ */
+ if (!strncmp(name, "__sanitizer_cov_", 16))
+ return true;
+
+ /*
+ * Some compilers currently do not remove __tsan_func_entry/exit nor
+ * __tsan_atomic_signal_fence (used for barrier instrumentation) with
+ * the __no_sanitize_thread attribute, remove them. Once the kernel's
+ * minimum Clang version is 14.0, this can be removed.
+ */
+ if (!strncmp(name, "__tsan_func_", 12) ||
+ !strcmp(name, "__tsan_atomic_signal_fence"))
+ return true;
+
+ return false;
+}
+
+static int classify_symbols(struct objtool_file *file)
+{
+ struct symbol *func;
+
+ for_each_sym(file, func) {
+ if (func->bind != STB_GLOBAL)
+ continue;
+
+ if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR,
+ strlen(STATIC_CALL_TRAMP_PREFIX_STR)))
+ func->static_call_tramp = true;
+
+ if (arch_is_retpoline(func))
+ func->retpoline_thunk = true;
+
+ if (arch_is_rethunk(func))
+ func->return_thunk = true;
+
+ if (arch_ftrace_match(func->name))
+ func->fentry = true;
+
+ if (is_profiling_func(func->name))
+ func->profiling_func = true;
+ }
+
+ return 0;
+}
+
static void mark_rodata(struct objtool_file *file)
{
struct section *sec;
@@ -1371,8 +2576,8 @@ static void mark_rodata(struct objtool_file *file)
* .rodata.str1.* sections are ignored; they don't contain jump tables.
*/
for_each_sec(file, sec) {
- if ((!strncmp(sec->name, ".rodata", 7) && !strstr(sec->name, ".str1.")) ||
- !strcmp(sec->name, C_JUMP_TABLE_SECTION)) {
+ if (!strncmp(sec->name, ".rodata", 7) &&
+ !strstr(sec->name, ".str1.")) {
sec->rodata = true;
found = true;
}
@@ -1387,11 +2592,18 @@ static int decode_sections(struct objtool_file *file)
mark_rodata(file);
- ret = decode_instructions(file);
+ ret = init_pv_ops(file);
if (ret)
return ret;
- ret = add_dead_ends(file);
+ /*
+ * Must be before add_{jump_call}_destination.
+ */
+ ret = classify_symbols(file);
+ if (ret)
+ return ret;
+
+ ret = decode_instructions(file);
if (ret)
return ret;
@@ -1402,11 +2614,32 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
+ /*
+ * Must be before read_unwind_hints() since that needs insn->noendbr.
+ */
+ ret = read_noendbr_hints(file);
+ if (ret)
+ return ret;
+
+ /*
+ * Must be before add_jump_destinations(), which depends on 'func'
+ * being set for alternatives, to enable proper sibling call detection.
+ */
+ if (opts.stackval || opts.orc || opts.uaccess || opts.noinstr) {
+ ret = add_special_section_alts(file);
+ if (ret)
+ return ret;
+ }
+
ret = add_jump_destinations(file);
if (ret)
return ret;
- ret = add_special_section_alts(file);
+ /*
+ * Must be before add_call_destination(); it changes INSN_CALL to
+ * INSN_JUMP.
+ */
+ ret = read_intra_function_calls(file);
if (ret)
return ret;
@@ -1414,6 +2647,14 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
+ /*
+ * Must be after add_call_destinations() such that it can override
+ * dead_end_function() marks.
+ */
+ ret = add_dead_ends(file);
+ if (ret)
+ return ret;
+
ret = add_jump_table_alts(file);
if (ret)
return ret;
@@ -1426,55 +2667,79 @@ static int decode_sections(struct objtool_file *file)
if (ret)
return ret;
+ ret = read_instr_hints(file);
+ if (ret)
+ return ret;
+
+ ret = read_validate_unret_hints(file);
+ if (ret)
+ return ret;
+
return 0;
}
static bool is_fentry_call(struct instruction *insn)
{
if (insn->type == INSN_CALL &&
- insn->call_dest->type == STT_NOTYPE &&
- !strcmp(insn->call_dest->name, "__fentry__"))
+ insn_call_dest(insn) &&
+ insn_call_dest(insn)->fentry)
return true;
return false;
}
-static bool has_modified_stack_frame(struct insn_state *state)
+static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state)
{
+ struct cfi_state *cfi = &state->cfi;
int i;
- if (state->cfa.base != initial_func_cfi.cfa.base ||
- state->cfa.offset != initial_func_cfi.cfa.offset ||
- state->stack_size != initial_func_cfi.cfa.offset ||
- state->drap)
+ if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap)
return true;
- for (i = 0; i < CFI_NUM_REGS; i++)
- if (state->regs[i].base != initial_func_cfi.regs[i].base ||
- state->regs[i].offset != initial_func_cfi.regs[i].offset)
+ if (cfi->cfa.offset != initial_func_cfi.cfa.offset)
+ return true;
+
+ if (cfi->stack_size != initial_func_cfi.cfa.offset)
+ return true;
+
+ for (i = 0; i < CFI_NUM_REGS; i++) {
+ if (cfi->regs[i].base != initial_func_cfi.regs[i].base ||
+ cfi->regs[i].offset != initial_func_cfi.regs[i].offset)
return true;
+ }
return false;
}
+static bool check_reg_frame_pos(const struct cfi_reg *reg,
+ int expected_offset)
+{
+ return reg->base == CFI_CFA &&
+ reg->offset == expected_offset;
+}
+
static bool has_valid_stack_frame(struct insn_state *state)
{
- if (state->cfa.base == CFI_BP && state->regs[CFI_BP].base == CFI_CFA &&
- state->regs[CFI_BP].offset == -16)
+ struct cfi_state *cfi = &state->cfi;
+
+ if (cfi->cfa.base == CFI_BP &&
+ check_reg_frame_pos(&cfi->regs[CFI_BP], -cfi->cfa.offset) &&
+ check_reg_frame_pos(&cfi->regs[CFI_RA], -cfi->cfa.offset + 8))
return true;
- if (state->drap && state->regs[CFI_BP].base == CFI_BP)
+ if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP)
return true;
return false;
}
-static int update_insn_state_regs(struct instruction *insn, struct insn_state *state)
+static int update_cfi_state_regs(struct instruction *insn,
+ struct cfi_state *cfi,
+ struct stack_op *op)
{
- struct cfi_reg *cfa = &state->cfa;
- struct stack_op *op = &insn->stack_op;
+ struct cfi_reg *cfa = &cfi->cfa;
- if (cfa->base != CFI_SP)
+ if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT)
return 0;
/* push */
@@ -1493,20 +2758,19 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s
return 0;
}
-static void save_reg(struct insn_state *state, unsigned char reg, int base,
- int offset)
+static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset)
{
if (arch_callee_saved_reg(reg) &&
- state->regs[reg].base == CFI_UNDEFINED) {
- state->regs[reg].base = base;
- state->regs[reg].offset = offset;
+ cfi->regs[reg].base == CFI_UNDEFINED) {
+ cfi->regs[reg].base = base;
+ cfi->regs[reg].offset = offset;
}
}
-static void restore_reg(struct insn_state *state, unsigned char reg)
+static void restore_reg(struct cfi_state *cfi, unsigned char reg)
{
- state->regs[reg].base = CFI_UNDEFINED;
- state->regs[reg].offset = 0;
+ cfi->regs[reg].base = initial_func_cfi.regs[reg].base;
+ cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset;
}
/*
@@ -1562,23 +2826,25 @@ static void restore_reg(struct insn_state *state, unsigned char reg)
* 41 5d pop %r13
* c3 retq
*/
-static int update_insn_state(struct instruction *insn, struct insn_state *state)
+static int update_cfi_state(struct instruction *insn,
+ struct instruction *next_insn,
+ struct cfi_state *cfi, struct stack_op *op)
{
- struct stack_op *op = &insn->stack_op;
- struct cfi_reg *cfa = &state->cfa;
- struct cfi_reg *regs = state->regs;
+ struct cfi_reg *cfa = &cfi->cfa;
+ struct cfi_reg *regs = cfi->regs;
/* stack operations don't make sense with an undefined CFA */
if (cfa->base == CFI_UNDEFINED) {
- if (insn->func) {
- WARN_FUNC("undefined stack state", insn->sec, insn->offset);
+ if (insn_func(insn)) {
+ WARN_INSN(insn, "undefined stack state");
return -1;
}
return 0;
}
- if (state->type == ORC_TYPE_REGS || state->type == ORC_TYPE_REGS_IRET)
- return update_insn_state_regs(insn, state);
+ if (cfi->type == UNWIND_HINT_TYPE_REGS ||
+ cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL)
+ return update_cfi_state_regs(insn, cfi, op);
switch (op->dest.type) {
@@ -1588,21 +2854,20 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
case OP_SRC_REG:
if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP &&
cfa->base == CFI_SP &&
- regs[CFI_BP].base == CFI_CFA &&
- regs[CFI_BP].offset == -cfa->offset) {
+ check_reg_frame_pos(&regs[CFI_BP], -cfa->offset)) {
/* mov %rsp, %rbp */
cfa->base = op->dest.reg;
- state->bp_scratch = false;
+ cfi->bp_scratch = false;
}
else if (op->src.reg == CFI_SP &&
- op->dest.reg == CFI_BP && state->drap) {
+ op->dest.reg == CFI_BP && cfi->drap) {
/* drap: mov %rsp, %rbp */
regs[CFI_BP].base = CFI_BP;
- regs[CFI_BP].offset = -state->stack_size;
- state->bp_scratch = false;
+ regs[CFI_BP].offset = -cfi->stack_size;
+ cfi->bp_scratch = false;
}
else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
@@ -1617,26 +2882,26 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
* ...
* mov %rax, %rsp
*/
- state->vals[op->dest.reg].base = CFI_CFA;
- state->vals[op->dest.reg].offset = -state->stack_size;
+ cfi->vals[op->dest.reg].base = CFI_CFA;
+ cfi->vals[op->dest.reg].offset = -cfi->stack_size;
}
else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP &&
- cfa->base == CFI_BP) {
+ (cfa->base == CFI_BP || cfa->base == cfi->drap_reg)) {
/*
* mov %rbp, %rsp
*
* Restore the original stack pointer (Clang).
*/
- state->stack_size = -state->regs[CFI_BP].offset;
+ cfi->stack_size = -cfi->regs[CFI_BP].offset;
}
else if (op->dest.reg == cfa->base) {
/* mov %reg, %rsp */
if (cfa->base == CFI_SP &&
- state->vals[op->src.reg].base == CFI_CFA) {
+ cfi->vals[op->src.reg].base == CFI_CFA) {
/*
* This is needed for the rare case
@@ -1646,8 +2911,40 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
* ...
* mov %rcx, %rsp
*/
- cfa->offset = -state->vals[op->src.reg].offset;
- state->stack_size = cfa->offset;
+ cfa->offset = -cfi->vals[op->src.reg].offset;
+ cfi->stack_size = cfa->offset;
+
+ } else if (cfa->base == CFI_SP &&
+ cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
+ cfi->vals[op->src.reg].offset == cfa->offset) {
+
+ /*
+ * Stack swizzle:
+ *
+ * 1: mov %rsp, (%[tos])
+ * 2: mov %[tos], %rsp
+ * ...
+ * 3: pop %rsp
+ *
+ * Where:
+ *
+ * 1 - places a pointer to the previous
+ * stack at the Top-of-Stack of the
+ * new stack.
+ *
+ * 2 - switches to the new stack.
+ *
+ * 3 - pops the Top-of-Stack to restore
+ * the original stack.
+ *
+ * Note: we set base to SP_INDIRECT
+ * here and preserve offset. Therefore
+ * when the unwinder reaches ToS it
+ * will dereference SP and then add the
+ * offset to find the next frame, IOW:
+ * (%rsp) + offset.
+ */
+ cfa->base = CFI_SP_INDIRECT;
} else {
cfa->base = CFI_UNDEFINED;
@@ -1655,13 +2952,27 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
}
}
+ else if (op->dest.reg == CFI_SP &&
+ cfi->vals[op->src.reg].base == CFI_SP_INDIRECT &&
+ cfi->vals[op->src.reg].offset == cfa->offset) {
+
+ /*
+ * The same stack swizzle case 2) as above. But
+ * because we can't change cfa->base, case 3)
+ * will become a regular POP. Pretend we're a
+ * PUSH so things don't go unbalanced.
+ */
+ cfi->stack_size += 8;
+ }
+
+
break;
case OP_SRC_ADD:
if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) {
/* add imm, %rsp */
- state->stack_size -= op->src.offset;
+ cfi->stack_size -= op->src.offset;
if (cfa->base == CFI_SP)
cfa->offset -= op->src.offset;
break;
@@ -1670,14 +2981,14 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) {
/* lea disp(%rbp), %rsp */
- state->stack_size = -(op->src.offset + regs[CFI_BP].offset);
+ cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset);
break;
}
if (op->src.reg == CFI_SP && cfa->base == CFI_SP) {
/* drap: lea disp(%rsp), %drap */
- state->drap_reg = op->dest.reg;
+ cfi->drap_reg = op->dest.reg;
/*
* lea disp(%rsp), %reg
@@ -1689,27 +3000,26 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
* ...
* mov %rcx, %rsp
*/
- state->vals[op->dest.reg].base = CFI_CFA;
- state->vals[op->dest.reg].offset = \
- -state->stack_size + op->src.offset;
+ cfi->vals[op->dest.reg].base = CFI_CFA;
+ cfi->vals[op->dest.reg].offset = \
+ -cfi->stack_size + op->src.offset;
break;
}
- if (state->drap && op->dest.reg == CFI_SP &&
- op->src.reg == state->drap_reg) {
+ if (cfi->drap && op->dest.reg == CFI_SP &&
+ op->src.reg == cfi->drap_reg) {
/* drap: lea disp(%drap), %rsp */
cfa->base = CFI_SP;
- cfa->offset = state->stack_size = -op->src.offset;
- state->drap_reg = CFI_UNDEFINED;
- state->drap = false;
+ cfa->offset = cfi->stack_size = -op->src.offset;
+ cfi->drap_reg = CFI_UNDEFINED;
+ cfi->drap = false;
break;
}
- if (op->dest.reg == state->cfa.base) {
- WARN_FUNC("unsupported stack register modification",
- insn->sec, insn->offset);
+ if (op->dest.reg == cfi->cfa.base && !(next_insn && next_insn->hint)) {
+ WARN_INSN(insn, "unsupported stack register modification");
return -1;
}
@@ -1717,18 +3027,17 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
case OP_SRC_AND:
if (op->dest.reg != CFI_SP ||
- (state->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
- (state->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
- WARN_FUNC("unsupported stack pointer realignment",
- insn->sec, insn->offset);
+ (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) ||
+ (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) {
+ WARN_INSN(insn, "unsupported stack pointer realignment");
return -1;
}
- if (state->drap_reg != CFI_UNDEFINED) {
+ if (cfi->drap_reg != CFI_UNDEFINED) {
/* drap: and imm, %rsp */
- cfa->base = state->drap_reg;
- cfa->offset = state->stack_size = 0;
- state->drap = true;
+ cfa->base = cfi->drap_reg;
+ cfa->offset = cfi->stack_size = 0;
+ cfi->drap = true;
}
/*
@@ -1740,64 +3049,82 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
case OP_SRC_POP:
case OP_SRC_POPF:
- if (!state->drap && op->dest.type == OP_DEST_REG &&
- op->dest.reg == cfa->base) {
+ if (op->dest.reg == CFI_SP && cfa->base == CFI_SP_INDIRECT) {
+
+ /* pop %rsp; # restore from a stack swizzle */
+ cfa->base = CFI_SP;
+ break;
+ }
+
+ if (!cfi->drap && op->dest.reg == cfa->base) {
/* pop %rbp */
cfa->base = CFI_SP;
}
- if (state->drap && cfa->base == CFI_BP_INDIRECT &&
- op->dest.type == OP_DEST_REG &&
- op->dest.reg == state->drap_reg &&
- state->drap_offset == -state->stack_size) {
+ if (cfi->drap && cfa->base == CFI_BP_INDIRECT &&
+ op->dest.reg == cfi->drap_reg &&
+ cfi->drap_offset == -cfi->stack_size) {
/* drap: pop %drap */
- cfa->base = state->drap_reg;
+ cfa->base = cfi->drap_reg;
cfa->offset = 0;
- state->drap_offset = -1;
+ cfi->drap_offset = -1;
- } else if (regs[op->dest.reg].offset == -state->stack_size) {
+ } else if (cfi->stack_size == -regs[op->dest.reg].offset) {
/* pop %reg */
- restore_reg(state, op->dest.reg);
+ restore_reg(cfi, op->dest.reg);
}
- state->stack_size -= 8;
+ cfi->stack_size -= 8;
if (cfa->base == CFI_SP)
cfa->offset -= 8;
break;
case OP_SRC_REG_INDIRECT:
- if (state->drap && op->src.reg == CFI_BP &&
- op->src.offset == state->drap_offset) {
+ if (!cfi->drap && op->dest.reg == cfa->base &&
+ op->dest.reg == CFI_BP) {
+
+ /* mov disp(%rsp), %rbp */
+ cfa->base = CFI_SP;
+ cfa->offset = cfi->stack_size;
+ }
+
+ if (cfi->drap && op->src.reg == CFI_BP &&
+ op->src.offset == cfi->drap_offset) {
/* drap: mov disp(%rbp), %drap */
- cfa->base = state->drap_reg;
+ cfa->base = cfi->drap_reg;
cfa->offset = 0;
- state->drap_offset = -1;
+ cfi->drap_offset = -1;
}
- if (state->drap && op->src.reg == CFI_BP &&
+ if (cfi->drap && op->src.reg == CFI_BP &&
op->src.offset == regs[op->dest.reg].offset) {
/* drap: mov disp(%rbp), %reg */
- restore_reg(state, op->dest.reg);
+ restore_reg(cfi, op->dest.reg);
} else if (op->src.reg == cfa->base &&
op->src.offset == regs[op->dest.reg].offset + cfa->offset) {
/* mov disp(%rbp), %reg */
/* mov disp(%rsp), %reg */
- restore_reg(state, op->dest.reg);
+ restore_reg(cfi, op->dest.reg);
+
+ } else if (op->src.reg == CFI_SP &&
+ op->src.offset == regs[op->dest.reg].offset + cfi->stack_size) {
+
+ /* mov disp(%rsp), %reg */
+ restore_reg(cfi, op->dest.reg);
}
break;
default:
- WARN_FUNC("unknown stack-related instruction",
- insn->sec, insn->offset);
+ WARN_INSN(insn, "unknown stack-related instruction");
return -1;
}
@@ -1805,153 +3132,225 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state)
case OP_DEST_PUSH:
case OP_DEST_PUSHF:
- state->stack_size += 8;
+ cfi->stack_size += 8;
if (cfa->base == CFI_SP)
cfa->offset += 8;
if (op->src.type != OP_SRC_REG)
break;
- if (state->drap) {
- if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+ if (cfi->drap) {
+ if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
/* drap: push %drap */
cfa->base = CFI_BP_INDIRECT;
- cfa->offset = -state->stack_size;
+ cfa->offset = -cfi->stack_size;
/* save drap so we know when to restore it */
- state->drap_offset = -state->stack_size;
+ cfi->drap_offset = -cfi->stack_size;
- } else if (op->src.reg == CFI_BP && cfa->base == state->drap_reg) {
+ } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) {
/* drap: push %rbp */
- state->stack_size = 0;
+ cfi->stack_size = 0;
- } else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+ } else {
/* drap: push %reg */
- save_reg(state, op->src.reg, CFI_BP, -state->stack_size);
+ save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size);
}
} else {
/* push %reg */
- save_reg(state, op->src.reg, CFI_CFA, -state->stack_size);
+ save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size);
}
/* detect when asm code uses rbp as a scratch register */
- if (!no_fp && insn->func && op->src.reg == CFI_BP &&
+ if (opts.stackval && insn_func(insn) && op->src.reg == CFI_BP &&
cfa->base != CFI_BP)
- state->bp_scratch = true;
+ cfi->bp_scratch = true;
break;
case OP_DEST_REG_INDIRECT:
- if (state->drap) {
- if (op->src.reg == cfa->base && op->src.reg == state->drap_reg) {
+ if (cfi->drap) {
+ if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) {
/* drap: mov %drap, disp(%rbp) */
cfa->base = CFI_BP_INDIRECT;
cfa->offset = op->dest.offset;
/* save drap offset so we know when to restore it */
- state->drap_offset = op->dest.offset;
- }
-
- else if (regs[op->src.reg].base == CFI_UNDEFINED) {
+ cfi->drap_offset = op->dest.offset;
+ } else {
/* drap: mov reg, disp(%rbp) */
- save_reg(state, op->src.reg, CFI_BP, op->dest.offset);
+ save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset);
}
} else if (op->dest.reg == cfa->base) {
/* mov reg, disp(%rbp) */
/* mov reg, disp(%rsp) */
- save_reg(state, op->src.reg, CFI_CFA,
- op->dest.offset - state->cfa.offset);
- }
-
- break;
+ save_reg(cfi, op->src.reg, CFI_CFA,
+ op->dest.offset - cfi->cfa.offset);
- case OP_DEST_LEAVE:
- if ((!state->drap && cfa->base != CFI_BP) ||
- (state->drap && cfa->base != state->drap_reg)) {
- WARN_FUNC("leave instruction with modified stack frame",
- insn->sec, insn->offset);
- return -1;
- }
+ } else if (op->dest.reg == CFI_SP) {
- /* leave (mov %rbp, %rsp; pop %rbp) */
+ /* mov reg, disp(%rsp) */
+ save_reg(cfi, op->src.reg, CFI_CFA,
+ op->dest.offset - cfi->stack_size);
- state->stack_size = -state->regs[CFI_BP].offset - 8;
- restore_reg(state, CFI_BP);
+ } else if (op->src.reg == CFI_SP && op->dest.offset == 0) {
- if (!state->drap) {
- cfa->base = CFI_SP;
- cfa->offset -= 8;
+ /* mov %rsp, (%reg); # setup a stack swizzle. */
+ cfi->vals[op->dest.reg].base = CFI_SP_INDIRECT;
+ cfi->vals[op->dest.reg].offset = cfa->offset;
}
break;
case OP_DEST_MEM:
if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) {
- WARN_FUNC("unknown stack-related memory operation",
- insn->sec, insn->offset);
+ WARN_INSN(insn, "unknown stack-related memory operation");
return -1;
}
/* pop mem */
- state->stack_size -= 8;
+ cfi->stack_size -= 8;
if (cfa->base == CFI_SP)
cfa->offset -= 8;
break;
default:
- WARN_FUNC("unknown stack-related instruction",
- insn->sec, insn->offset);
+ WARN_INSN(insn, "unknown stack-related instruction");
return -1;
}
return 0;
}
-static bool insn_state_match(struct instruction *insn, struct insn_state *state)
+/*
+ * The stack layouts of alternatives instructions can sometimes diverge when
+ * they have stack modifications. That's fine as long as the potential stack
+ * layouts don't conflict at any given potential instruction boundary.
+ *
+ * Flatten the CFIs of the different alternative code streams (both original
+ * and replacement) into a single shared CFI array which can be used to detect
+ * conflicts and nicely feed a linear array of ORC entries to the unwinder.
+ */
+static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn)
+{
+ struct cfi_state **alt_cfi;
+ int group_off;
+
+ if (!insn->alt_group)
+ return 0;
+
+ if (!insn->cfi) {
+ WARN("CFI missing");
+ return -1;
+ }
+
+ alt_cfi = insn->alt_group->cfi;
+ group_off = insn->offset - insn->alt_group->first_insn->offset;
+
+ if (!alt_cfi[group_off]) {
+ alt_cfi[group_off] = insn->cfi;
+ } else {
+ if (cficmp(alt_cfi[group_off], insn->cfi)) {
+ struct alt_group *orig_group = insn->alt_group->orig_group ?: insn->alt_group;
+ struct instruction *orig = orig_group->first_insn;
+ char *where = offstr(insn->sec, insn->offset);
+ WARN_INSN(orig, "stack layout conflict in alternatives: %s", where);
+ free(where);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int handle_insn_ops(struct instruction *insn,
+ struct instruction *next_insn,
+ struct insn_state *state)
+{
+ struct stack_op *op;
+
+ for (op = insn->stack_ops; op; op = op->next) {
+
+ if (update_cfi_state(insn, next_insn, &state->cfi, op))
+ return 1;
+
+ if (!insn->alt_group)
+ continue;
+
+ if (op->dest.type == OP_DEST_PUSHF) {
+ if (!state->uaccess_stack) {
+ state->uaccess_stack = 1;
+ } else if (state->uaccess_stack >> 31) {
+ WARN_INSN(insn, "PUSHF stack exhausted");
+ return 1;
+ }
+ state->uaccess_stack <<= 1;
+ state->uaccess_stack |= state->uaccess;
+ }
+
+ if (op->src.type == OP_SRC_POPF) {
+ if (state->uaccess_stack) {
+ state->uaccess = state->uaccess_stack & 1;
+ state->uaccess_stack >>= 1;
+ if (state->uaccess_stack == 1)
+ state->uaccess_stack = 0;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2)
{
- struct insn_state *state1 = &insn->state, *state2 = state;
+ struct cfi_state *cfi1 = insn->cfi;
int i;
- if (memcmp(&state1->cfa, &state2->cfa, sizeof(state1->cfa))) {
- WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
- insn->sec, insn->offset,
- state1->cfa.base, state1->cfa.offset,
- state2->cfa.base, state2->cfa.offset);
+ if (!cfi1) {
+ WARN("CFI missing");
+ return false;
+ }
+
+ if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) {
- } else if (memcmp(&state1->regs, &state2->regs, sizeof(state1->regs))) {
+ WARN_INSN(insn, "stack state mismatch: cfa1=%d%+d cfa2=%d%+d",
+ cfi1->cfa.base, cfi1->cfa.offset,
+ cfi2->cfa.base, cfi2->cfa.offset);
+
+ } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) {
for (i = 0; i < CFI_NUM_REGS; i++) {
- if (!memcmp(&state1->regs[i], &state2->regs[i],
+ if (!memcmp(&cfi1->regs[i], &cfi2->regs[i],
sizeof(struct cfi_reg)))
continue;
- WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
- insn->sec, insn->offset,
- i, state1->regs[i].base, state1->regs[i].offset,
- i, state2->regs[i].base, state2->regs[i].offset);
+ WARN_INSN(insn, "stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d",
+ i, cfi1->regs[i].base, cfi1->regs[i].offset,
+ i, cfi2->regs[i].base, cfi2->regs[i].offset);
break;
}
- } else if (state1->type != state2->type) {
- WARN_FUNC("stack state mismatch: type1=%d type2=%d",
- insn->sec, insn->offset, state1->type, state2->type);
+ } else if (cfi1->type != cfi2->type) {
+
+ WARN_INSN(insn, "stack state mismatch: type1=%d type2=%d",
+ cfi1->type, cfi2->type);
+
+ } else if (cfi1->drap != cfi2->drap ||
+ (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) ||
+ (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) {
- } else if (state1->drap != state2->drap ||
- (state1->drap && state1->drap_reg != state2->drap_reg) ||
- (state1->drap && state1->drap_offset != state2->drap_offset)) {
- WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
- insn->sec, insn->offset,
- state1->drap, state1->drap_reg, state1->drap_offset,
- state2->drap, state2->drap_reg, state2->drap_offset);
+ WARN_INSN(insn, "stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)",
+ cfi1->drap, cfi1->drap_reg, cfi1->drap_offset,
+ cfi2->drap, cfi2->drap_reg, cfi2->drap_offset);
} else
return true;
@@ -1969,129 +3368,248 @@ static inline bool func_uaccess_safe(struct symbol *func)
static inline const char *call_dest_name(struct instruction *insn)
{
- if (insn->call_dest)
- return insn->call_dest->name;
+ static char pvname[19];
+ struct reloc *rel;
+ int idx;
+
+ if (insn_call_dest(insn))
+ return insn_call_dest(insn)->name;
+
+ rel = insn_reloc(NULL, insn);
+ if (rel && !strcmp(rel->sym->name, "pv_ops")) {
+ idx = (rel->addend / sizeof(void *));
+ snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx);
+ return pvname;
+ }
return "{dynamic}";
}
-static int validate_call(struct instruction *insn, struct insn_state *state)
+static bool pv_call_dest(struct objtool_file *file, struct instruction *insn)
{
- if (state->uaccess && !func_uaccess_safe(insn->call_dest)) {
- WARN_FUNC("call to %s() with UACCESS enabled",
- insn->sec, insn->offset, call_dest_name(insn));
+ struct symbol *target;
+ struct reloc *rel;
+ int idx;
+
+ rel = insn_reloc(file, insn);
+ if (!rel || strcmp(rel->sym->name, "pv_ops"))
+ return false;
+
+ idx = (arch_dest_reloc_offset(rel->addend) / sizeof(void *));
+
+ if (file->pv_ops[idx].clean)
+ return true;
+
+ file->pv_ops[idx].clean = true;
+
+ list_for_each_entry(target, &file->pv_ops[idx].targets, pv_target) {
+ if (!target->sec->noinstr) {
+ WARN("pv_ops[%d]: %s", idx, target->name);
+ file->pv_ops[idx].clean = false;
+ }
+ }
+
+ return file->pv_ops[idx].clean;
+}
+
+static inline bool noinstr_call_dest(struct objtool_file *file,
+ struct instruction *insn,
+ struct symbol *func)
+{
+ /*
+ * We can't deal with indirect function calls at present;
+ * assume they're instrumented.
+ */
+ if (!func) {
+ if (file->pv_ops)
+ return pv_call_dest(file, insn);
+
+ return false;
+ }
+
+ /*
+ * If the symbol is from a noinstr section; we good.
+ */
+ if (func->sec->noinstr)
+ return true;
+
+ /*
+ * If the symbol is a static_call trampoline, we can't tell.
+ */
+ if (func->static_call_tramp)
+ return true;
+
+ /*
+ * The __ubsan_handle_*() calls are like WARN(), they only happen when
+ * something 'BAD' happened. At the risk of taking the machine down,
+ * let them proceed to get the message out.
+ */
+ if (!strncmp(func->name, "__ubsan_handle_", 15))
+ return true;
+
+ return false;
+}
+
+static int validate_call(struct objtool_file *file,
+ struct instruction *insn,
+ struct insn_state *state)
+{
+ if (state->noinstr && state->instr <= 0 &&
+ !noinstr_call_dest(file, insn, insn_call_dest(insn))) {
+ WARN_INSN(insn, "call to %s() leaves .noinstr.text section", call_dest_name(insn));
+ return 1;
+ }
+
+ if (state->uaccess && !func_uaccess_safe(insn_call_dest(insn))) {
+ WARN_INSN(insn, "call to %s() with UACCESS enabled", call_dest_name(insn));
return 1;
}
if (state->df) {
- WARN_FUNC("call to %s() with DF set",
- insn->sec, insn->offset, call_dest_name(insn));
+ WARN_INSN(insn, "call to %s() with DF set", call_dest_name(insn));
return 1;
}
return 0;
}
-static int validate_sibling_call(struct instruction *insn, struct insn_state *state)
+static int validate_sibling_call(struct objtool_file *file,
+ struct instruction *insn,
+ struct insn_state *state)
{
- if (has_modified_stack_frame(state)) {
- WARN_FUNC("sibling call from callable instruction with modified stack frame",
- insn->sec, insn->offset);
+ if (insn_func(insn) && has_modified_stack_frame(insn, state)) {
+ WARN_INSN(insn, "sibling call from callable instruction with modified stack frame");
return 1;
}
- return validate_call(insn, state);
+ return validate_call(file, insn, state);
}
static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state)
{
+ if (state->noinstr && state->instr > 0) {
+ WARN_INSN(insn, "return with instrumentation enabled");
+ return 1;
+ }
+
if (state->uaccess && !func_uaccess_safe(func)) {
- WARN_FUNC("return with UACCESS enabled",
- insn->sec, insn->offset);
+ WARN_INSN(insn, "return with UACCESS enabled");
return 1;
}
if (!state->uaccess && func_uaccess_safe(func)) {
- WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function",
- insn->sec, insn->offset);
+ WARN_INSN(insn, "return with UACCESS disabled from a UACCESS-safe function");
return 1;
}
if (state->df) {
- WARN_FUNC("return with DF set",
- insn->sec, insn->offset);
+ WARN_INSN(insn, "return with DF set");
return 1;
}
- if (func && has_modified_stack_frame(state)) {
- WARN_FUNC("return with modified stack frame",
- insn->sec, insn->offset);
+ if (func && has_modified_stack_frame(insn, state)) {
+ WARN_INSN(insn, "return with modified stack frame");
return 1;
}
- if (state->bp_scratch) {
- WARN("%s uses BP as a scratch register",
- func->name);
+ if (state->cfi.bp_scratch) {
+ WARN_INSN(insn, "BP used as a scratch register");
return 1;
}
return 0;
}
+static struct instruction *next_insn_to_validate(struct objtool_file *file,
+ struct instruction *insn)
+{
+ struct alt_group *alt_group = insn->alt_group;
+
+ /*
+ * Simulate the fact that alternatives are patched in-place. When the
+ * end of a replacement alt_group is reached, redirect objtool flow to
+ * the end of the original alt_group.
+ *
+ * insn->alts->insn -> alt_group->first_insn
+ * ...
+ * alt_group->last_insn
+ * [alt_group->nop] -> next(orig_group->last_insn)
+ */
+ if (alt_group) {
+ if (alt_group->nop) {
+ /* ->nop implies ->orig_group */
+ if (insn == alt_group->last_insn)
+ return alt_group->nop;
+ if (insn == alt_group->nop)
+ goto next_orig;
+ }
+ if (insn == alt_group->last_insn && alt_group->orig_group)
+ goto next_orig;
+ }
+
+ return next_insn_same_sec(file, insn);
+
+next_orig:
+ return next_insn_same_sec(file, alt_group->orig_group->last_insn);
+}
+
/*
* Follow the branch starting at the given instruction, and recursively follow
* any other branches (jumps). Meanwhile, track the frame pointer state at
* each instruction and validate all the rules described in
- * tools/objtool/Documentation/stack-validation.txt.
+ * tools/objtool/Documentation/objtool.txt.
*/
static int validate_branch(struct objtool_file *file, struct symbol *func,
- struct instruction *first, struct insn_state state)
+ struct instruction *insn, struct insn_state state)
{
struct alternative *alt;
- struct instruction *insn, *next_insn;
+ struct instruction *next_insn, *prev_insn = NULL;
struct section *sec;
u8 visited;
int ret;
- insn = first;
sec = insn->sec;
- if (insn->alt_group && list_empty(&insn->alts)) {
- WARN_FUNC("don't know how to handle branch to middle of alternative instruction group",
- sec, insn->offset);
- return 1;
- }
-
while (1) {
- next_insn = next_insn_same_sec(file, insn);
+ next_insn = next_insn_to_validate(file, insn);
+
+ if (func && insn_func(insn) && func != insn_func(insn)->pfunc) {
+ /* Ignore KCFI type preambles, which always fall through */
+ if (!strncmp(func->name, "__cfi_", 6) ||
+ !strncmp(func->name, "__pfx_", 6))
+ return 0;
- if (file->c_file && func && insn->func && func != insn->func->pfunc) {
WARN("%s() falls through to next function %s()",
- func->name, insn->func->name);
+ func->name, insn_func(insn)->name);
return 1;
}
if (func && insn->ignore) {
- WARN_FUNC("BUG: why am I validating an ignored function?",
- sec, insn->offset);
+ WARN_INSN(insn, "BUG: why am I validating an ignored function?");
return 1;
}
- visited = 1 << state.uaccess;
- if (insn->visited) {
- if (!insn->hint && !insn_state_match(insn, &state))
+ visited = VISITED_BRANCH << state.uaccess;
+ if (insn->visited & VISITED_BRANCH_MASK) {
+ if (!insn->hint && !insn_cfi_match(insn, &state.cfi))
return 1;
if (insn->visited & visited)
return 0;
+ } else {
+ nr_insns_visited++;
}
+ if (state.noinstr)
+ state.instr += insn->instr;
+
if (insn->hint) {
if (insn->restore) {
struct instruction *save_insn, *i;
i = insn;
save_insn = NULL;
+
sym_for_each_insn_continue_reverse(file, func, i) {
if (i->save) {
save_insn = i;
@@ -2100,47 +3618,46 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
}
if (!save_insn) {
- WARN_FUNC("no corresponding CFI save for CFI restore",
- sec, insn->offset);
+ WARN_INSN(insn, "no corresponding CFI save for CFI restore");
return 1;
}
if (!save_insn->visited) {
- /*
- * Oops, no state to copy yet.
- * Hopefully we can reach this
- * instruction from another branch
- * after the save insn has been
- * visited.
- */
- if (insn == first)
- return 0;
-
- WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo",
- sec, insn->offset);
+ WARN_INSN(insn, "objtool isn't smart enough to handle this CFI save/restore combo");
return 1;
}
- insn->state = save_insn->state;
+ insn->cfi = save_insn->cfi;
+ nr_cfi_reused++;
}
- state = insn->state;
+ state.cfi = *insn->cfi;
+ } else {
+ /* XXX track if we actually changed state.cfi */
- } else
- insn->state = state;
+ if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) {
+ insn->cfi = prev_insn->cfi;
+ nr_cfi_reused++;
+ } else {
+ insn->cfi = cfi_hash_find_or_add(&state.cfi);
+ }
+ }
insn->visited |= visited;
- if (!insn->ignore_alts) {
+ if (propagate_alt_cfi(file, insn))
+ return 1;
+
+ if (!insn->ignore_alts && insn->alts) {
bool skip_orig = false;
- list_for_each_entry(alt, &insn->alts, list) {
+ for (alt = insn->alts; alt; alt = alt->next) {
if (alt->skip_orig)
skip_orig = true;
ret = validate_branch(file, func, alt->insn, state);
if (ret) {
- if (backtrace)
+ if (opts.backtrace)
BT_FUNC("(alt)", insn);
return ret;
}
@@ -2150,6 +3667,9 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 0;
}
+ if (handle_insn_ops(insn, next_insn, &state))
+ return 1;
+
switch (insn->type) {
case INSN_RETURN:
@@ -2157,26 +3677,25 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
case INSN_CALL:
case INSN_CALL_DYNAMIC:
- ret = validate_call(insn, &state);
+ ret = validate_call(file, insn, &state);
if (ret)
return ret;
- if (!no_fp && func && !is_fentry_call(insn) &&
+ if (opts.stackval && func && !is_fentry_call(insn) &&
!has_valid_stack_frame(&state)) {
- WARN_FUNC("call without frame pointer save/setup",
- sec, insn->offset);
+ WARN_INSN(insn, "call without frame pointer save/setup");
return 1;
}
- if (dead_end_function(file, insn->call_dest))
+ if (insn->dead_end)
return 0;
break;
case INSN_JUMP_CONDITIONAL:
case INSN_JUMP_UNCONDITIONAL:
- if (func && is_sibling_call(insn)) {
- ret = validate_sibling_call(insn, &state);
+ if (is_sibling_call(insn)) {
+ ret = validate_sibling_call(file, insn, &state);
if (ret)
return ret;
@@ -2184,7 +3703,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
ret = validate_branch(file, func,
insn->jump_dest, state);
if (ret) {
- if (backtrace)
+ if (opts.backtrace)
BT_FUNC("(branch)", insn);
return ret;
}
@@ -2197,8 +3716,8 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
case INSN_JUMP_DYNAMIC:
case INSN_JUMP_DYNAMIC_CONDITIONAL:
- if (func && is_sibling_call(insn)) {
- ret = validate_sibling_call(insn, &state);
+ if (is_sibling_call(insn)) {
+ ret = validate_sibling_call(file, insn, &state);
if (ret)
return ret;
}
@@ -2210,41 +3729,14 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
case INSN_CONTEXT_SWITCH:
if (func && (!next_insn || !next_insn->hint)) {
- WARN_FUNC("unsupported instruction in callable function",
- sec, insn->offset);
+ WARN_INSN(insn, "unsupported instruction in callable function");
return 1;
}
return 0;
- case INSN_STACK:
- if (update_insn_state(insn, &state))
- return 1;
-
- if (insn->stack_op.dest.type == OP_DEST_PUSHF) {
- if (!state.uaccess_stack) {
- state.uaccess_stack = 1;
- } else if (state.uaccess_stack >> 31) {
- WARN_FUNC("PUSHF stack exhausted", sec, insn->offset);
- return 1;
- }
- state.uaccess_stack <<= 1;
- state.uaccess_stack |= state.uaccess;
- }
-
- if (insn->stack_op.src.type == OP_SRC_POPF) {
- if (state.uaccess_stack) {
- state.uaccess = state.uaccess_stack & 1;
- state.uaccess_stack >>= 1;
- if (state.uaccess_stack == 1)
- state.uaccess_stack = 0;
- }
- }
-
- break;
-
case INSN_STAC:
if (state.uaccess) {
- WARN_FUNC("recursive UACCESS enable", sec, insn->offset);
+ WARN_INSN(insn, "recursive UACCESS enable");
return 1;
}
@@ -2253,12 +3745,12 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
case INSN_CLAC:
if (!state.uaccess && func) {
- WARN_FUNC("redundant UACCESS disable", sec, insn->offset);
+ WARN_INSN(insn, "redundant UACCESS disable");
return 1;
}
if (func_uaccess_safe(func) && !state.uaccess_stack) {
- WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset);
+ WARN_INSN(insn, "UACCESS-safe disables UACCESS");
return 1;
}
@@ -2266,15 +3758,19 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
break;
case INSN_STD:
- if (state.df)
- WARN_FUNC("recursive STD", sec, insn->offset);
+ if (state.df) {
+ WARN_INSN(insn, "recursive STD");
+ return 1;
+ }
state.df = true;
break;
case INSN_CLD:
- if (!state.df && func)
- WARN_FUNC("redundant CLD", sec, insn->offset);
+ if (!state.df && func) {
+ WARN_INSN(insn, "redundant CLD");
+ return 1;
+ }
state.df = false;
break;
@@ -2287,36 +3783,188 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 0;
if (!next_insn) {
- if (state.cfa.base == CFI_UNDEFINED)
+ if (state.cfi.cfa.base == CFI_UNDEFINED)
return 0;
WARN("%s: unexpected end of section", sec->name);
return 1;
}
+ prev_insn = insn;
insn = next_insn;
}
return 0;
}
-static int validate_unwind_hints(struct objtool_file *file)
+static int validate_unwind_hint(struct objtool_file *file,
+ struct instruction *insn,
+ struct insn_state *state)
+{
+ if (insn->hint && !insn->visited && !insn->ignore) {
+ int ret = validate_branch(file, insn_func(insn), insn, *state);
+ if (ret && opts.backtrace)
+ BT_FUNC("<=== (hint)", insn);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int validate_unwind_hints(struct objtool_file *file, struct section *sec)
{
struct instruction *insn;
- int ret, warnings = 0;
struct insn_state state;
+ int warnings = 0;
if (!file->hints)
return 0;
- clear_insn_state(&state);
+ init_insn_state(file, &state, sec);
+
+ if (sec) {
+ sec_for_each_insn(file, sec, insn)
+ warnings += validate_unwind_hint(file, insn, &state);
+ } else {
+ for_each_insn(file, insn)
+ warnings += validate_unwind_hint(file, insn, &state);
+ }
+
+ return warnings;
+}
+
+/*
+ * Validate rethunk entry constraint: must untrain RET before the first RET.
+ *
+ * Follow every branch (intra-function) and ensure VALIDATE_UNRET_END comes
+ * before an actual RET instruction.
+ */
+static int validate_unret(struct objtool_file *file, struct instruction *insn)
+{
+ struct instruction *next, *dest;
+ int ret, warnings = 0;
+
+ for (;;) {
+ next = next_insn_to_validate(file, insn);
+
+ if (insn->visited & VISITED_UNRET)
+ return 0;
+
+ insn->visited |= VISITED_UNRET;
+
+ if (!insn->ignore_alts && insn->alts) {
+ struct alternative *alt;
+ bool skip_orig = false;
+
+ for (alt = insn->alts; alt; alt = alt->next) {
+ if (alt->skip_orig)
+ skip_orig = true;
+
+ ret = validate_unret(file, alt->insn);
+ if (ret) {
+ if (opts.backtrace)
+ BT_FUNC("(alt)", insn);
+ return ret;
+ }
+ }
+
+ if (skip_orig)
+ return 0;
+ }
+
+ switch (insn->type) {
+
+ case INSN_CALL_DYNAMIC:
+ case INSN_JUMP_DYNAMIC:
+ case INSN_JUMP_DYNAMIC_CONDITIONAL:
+ WARN_INSN(insn, "early indirect call");
+ return 1;
+
+ case INSN_JUMP_UNCONDITIONAL:
+ case INSN_JUMP_CONDITIONAL:
+ if (!is_sibling_call(insn)) {
+ if (!insn->jump_dest) {
+ WARN_INSN(insn, "unresolved jump target after linking?!?");
+ return -1;
+ }
+ ret = validate_unret(file, insn->jump_dest);
+ if (ret) {
+ if (opts.backtrace) {
+ BT_FUNC("(branch%s)", insn,
+ insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : "");
+ }
+ return ret;
+ }
+
+ if (insn->type == INSN_JUMP_UNCONDITIONAL)
+ return 0;
+
+ break;
+ }
+
+ /* fallthrough */
+ case INSN_CALL:
+ dest = find_insn(file, insn_call_dest(insn)->sec,
+ insn_call_dest(insn)->offset);
+ if (!dest) {
+ WARN("Unresolved function after linking!?: %s",
+ insn_call_dest(insn)->name);
+ return -1;
+ }
+
+ ret = validate_unret(file, dest);
+ if (ret) {
+ if (opts.backtrace)
+ BT_FUNC("(call)", insn);
+ return ret;
+ }
+ /*
+ * If a call returns without error, it must have seen UNTRAIN_RET.
+ * Therefore any non-error return is a success.
+ */
+ return 0;
+
+ case INSN_RETURN:
+ WARN_INSN(insn, "RET before UNTRAIN");
+ return 1;
+
+ case INSN_NOP:
+ if (insn->retpoline_safe)
+ return 0;
+ break;
+
+ default:
+ break;
+ }
+
+ if (!next) {
+ WARN_INSN(insn, "teh end!");
+ return -1;
+ }
+ insn = next;
+ }
+
+ return warnings;
+}
+
+/*
+ * Validate that all branches starting at VALIDATE_UNRET_BEGIN encounter
+ * VALIDATE_UNRET_END before RET.
+ */
+static int validate_unrets(struct objtool_file *file)
+{
+ struct instruction *insn;
+ int ret, warnings = 0;
for_each_insn(file, insn) {
- if (insn->hint && !insn->visited) {
- ret = validate_branch(file, insn->func, insn, state);
- if (ret && backtrace)
- BT_FUNC("<=== (hint)", insn);
- warnings += ret;
+ if (!insn->unret)
+ continue;
+
+ ret = validate_unret(file, insn);
+ if (ret < 0) {
+ WARN_INSN(insn, "Failed UNRET validation");
+ return ret;
}
+ warnings += ret;
}
return warnings;
@@ -2329,24 +3977,25 @@ static int validate_retpoline(struct objtool_file *file)
for_each_insn(file, insn) {
if (insn->type != INSN_JUMP_DYNAMIC &&
- insn->type != INSN_CALL_DYNAMIC)
+ insn->type != INSN_CALL_DYNAMIC &&
+ insn->type != INSN_RETURN)
continue;
if (insn->retpoline_safe)
continue;
- /*
- * .init.text code is ran before userspace and thus doesn't
- * strictly need retpolines, except for modules which are
- * loaded late, they very much do need retpoline in their
- * .init.text
- */
- if (!strcmp(insn->sec->name, ".init.text") && !module)
+ if (insn->sec->init)
continue;
- WARN_FUNC("indirect %s found in RETPOLINE build",
- insn->sec, insn->offset,
- insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+ if (insn->type == INSN_RETURN) {
+ if (opts.rethunk) {
+ WARN_INSN(insn, "'naked' return found in RETHUNK build");
+ } else
+ continue;
+ } else {
+ WARN_INSN(insn, "indirect %s found in RETPOLINE build",
+ insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call");
+ }
warnings++;
}
@@ -2357,43 +4006,102 @@ static int validate_retpoline(struct objtool_file *file)
static bool is_kasan_insn(struct instruction *insn)
{
return (insn->type == INSN_CALL &&
- !strcmp(insn->call_dest->name, "__asan_handle_no_return"));
+ !strcmp(insn_call_dest(insn)->name, "__asan_handle_no_return"));
}
static bool is_ubsan_insn(struct instruction *insn)
{
return (insn->type == INSN_CALL &&
- !strcmp(insn->call_dest->name,
+ !strcmp(insn_call_dest(insn)->name,
"__ubsan_handle_builtin_unreachable"));
}
-static bool ignore_unreachable_insn(struct instruction *insn)
+static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn)
{
int i;
+ struct instruction *prev_insn;
- if (insn->ignore || insn->type == INSN_NOP)
+ if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP)
return true;
/*
- * Ignore any unused exceptions. This can happen when a whitelisted
- * function has an exception table entry.
- *
- * Also ignore alternative replacement instructions. This can happen
+ * Ignore alternative replacement instructions. This can happen
* when a whitelisted function uses one of the ALTERNATIVE macros.
*/
- if (!strcmp(insn->sec->name, ".fixup") ||
- !strcmp(insn->sec->name, ".altinstr_replacement") ||
+ if (!strcmp(insn->sec->name, ".altinstr_replacement") ||
!strcmp(insn->sec->name, ".altinstr_aux"))
return true;
/*
+ * Whole archive runs might encounter dead code from weak symbols.
+ * This is where the linker will have dropped the weak symbol in
+ * favour of a regular symbol, but leaves the code in place.
+ *
+ * In this case we'll find a piece of code (whole function) that is not
+ * covered by a !section symbol. Ignore them.
+ */
+ if (opts.link && !insn_func(insn)) {
+ int size = find_symbol_hole_containing(insn->sec, insn->offset);
+ unsigned long end = insn->offset + size;
+
+ if (!size) /* not a hole */
+ return false;
+
+ if (size < 0) /* hole until the end */
+ return true;
+
+ sec_for_each_insn_continue(file, insn) {
+ /*
+ * If we reach a visited instruction at or before the
+ * end of the hole, ignore the unreachable.
+ */
+ if (insn->visited)
+ return true;
+
+ if (insn->offset >= end)
+ break;
+
+ /*
+ * If this hole jumps to a .cold function, mark it ignore too.
+ */
+ if (insn->jump_dest && insn_func(insn->jump_dest) &&
+ strstr(insn_func(insn->jump_dest)->name, ".cold")) {
+ struct instruction *dest = insn->jump_dest;
+ func_for_each_insn(file, insn_func(dest), dest)
+ dest->ignore = true;
+ }
+ }
+
+ return false;
+ }
+
+ if (!insn_func(insn))
+ return false;
+
+ if (insn_func(insn)->static_call_tramp)
+ return true;
+
+ /*
+ * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
+ * __builtin_unreachable(). The BUG() macro has an unreachable() after
+ * the UD2, which causes GCC's undefined trap logic to emit another UD2
+ * (or occasionally a JMP to UD2).
+ *
+ * It may also insert a UD2 after calling a __noreturn function.
+ */
+ prev_insn = prev_insn_same_sec(file, insn);
+ if (prev_insn->dead_end &&
+ (insn->type == INSN_BUG ||
+ (insn->type == INSN_JUMP_UNCONDITIONAL &&
+ insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
+ return true;
+
+ /*
* Check if this (or a subsequent) instruction is related to
* CONFIG_UBSAN or CONFIG_KASAN.
*
* End the search at 5 instructions to avoid going into the weeds.
*/
- if (!insn->func)
- return false;
for (i = 0; i < 5; i++) {
if (is_kasan_insn(insn) || is_ubsan_insn(insn))
@@ -2401,7 +4109,7 @@ static bool ignore_unreachable_insn(struct instruction *insn)
if (insn->type == INSN_JUMP_UNCONDITIONAL) {
if (insn->jump_dest &&
- insn->jump_dest->func == insn->func) {
+ insn_func(insn->jump_dest) == insn_func(insn)) {
insn = insn->jump_dest;
continue;
}
@@ -2409,64 +4117,396 @@ static bool ignore_unreachable_insn(struct instruction *insn)
break;
}
- if (insn->offset + insn->len >= insn->func->offset + insn->func->len)
+ if (insn->offset + insn->len >= insn_func(insn)->offset + insn_func(insn)->len)
break;
- insn = list_next_entry(insn, list);
+ insn = next_insn_same_sec(file, insn);
}
return false;
}
-static int validate_section(struct objtool_file *file, struct section *sec)
+static int add_prefix_symbol(struct objtool_file *file, struct symbol *func)
+{
+ struct instruction *insn, *prev;
+ struct cfi_state *cfi;
+
+ insn = find_insn(file, func->sec, func->offset);
+ if (!insn)
+ return -1;
+
+ for (prev = prev_insn_same_sec(file, insn);
+ prev;
+ prev = prev_insn_same_sec(file, prev)) {
+ u64 offset;
+
+ if (prev->type != INSN_NOP)
+ return -1;
+
+ offset = func->offset - prev->offset;
+
+ if (offset > opts.prefix)
+ return -1;
+
+ if (offset < opts.prefix)
+ continue;
+
+ elf_create_prefix_symbol(file->elf, func, opts.prefix);
+ break;
+ }
+
+ if (!prev)
+ return -1;
+
+ if (!insn->cfi) {
+ /*
+ * This can happen if stack validation isn't enabled or the
+ * function is annotated with STACK_FRAME_NON_STANDARD.
+ */
+ return 0;
+ }
+
+ /* Propagate insn->cfi to the prefix code */
+ cfi = cfi_hash_find_or_add(insn->cfi);
+ for (; prev != insn; prev = next_insn_same_sec(file, prev))
+ prev->cfi = cfi;
+
+ return 0;
+}
+
+static int add_prefix_symbols(struct objtool_file *file)
{
+ struct section *sec;
struct symbol *func;
+ int warnings = 0;
+
+ for_each_sec(file, sec) {
+ if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+ continue;
+
+ sec_for_each_sym(sec, func) {
+ if (func->type != STT_FUNC)
+ continue;
+
+ add_prefix_symbol(file, func);
+ }
+ }
+
+ return warnings;
+}
+
+static int validate_symbol(struct objtool_file *file, struct section *sec,
+ struct symbol *sym, struct insn_state *state)
+{
struct instruction *insn;
- struct insn_state state;
- int ret, warnings = 0;
+ int ret;
- clear_insn_state(&state);
+ if (!sym->len) {
+ WARN("%s() is missing an ELF size annotation", sym->name);
+ return 1;
+ }
- state.cfa = initial_func_cfi.cfa;
- memcpy(&state.regs, &initial_func_cfi.regs,
- CFI_NUM_REGS * sizeof(struct cfi_reg));
- state.stack_size = initial_func_cfi.cfa.offset;
+ if (sym->pfunc != sym || sym->alias != sym)
+ return 0;
- list_for_each_entry(func, &sec->symbol_list, list) {
+ insn = find_insn(file, sec, sym->offset);
+ if (!insn || insn->ignore || insn->visited)
+ return 0;
+
+ state->uaccess = sym->uaccess_safe;
+
+ ret = validate_branch(file, insn_func(insn), insn, *state);
+ if (ret && opts.backtrace)
+ BT_FUNC("<=== (sym)", insn);
+ return ret;
+}
+
+static int validate_section(struct objtool_file *file, struct section *sec)
+{
+ struct insn_state state;
+ struct symbol *func;
+ int warnings = 0;
+
+ sec_for_each_sym(sec, func) {
if (func->type != STT_FUNC)
continue;
- if (!func->len) {
- WARN("%s() is missing an ELF size annotation",
- func->name);
- warnings++;
+ init_insn_state(file, &state, sec);
+ set_func_state(&state.cfi);
+
+ warnings += validate_symbol(file, sec, func, &state);
+ }
+
+ return warnings;
+}
+
+static int validate_noinstr_sections(struct objtool_file *file)
+{
+ struct section *sec;
+ int warnings = 0;
+
+ sec = find_section_by_name(file->elf, ".noinstr.text");
+ if (sec) {
+ warnings += validate_section(file, sec);
+ warnings += validate_unwind_hints(file, sec);
+ }
+
+ sec = find_section_by_name(file->elf, ".entry.text");
+ if (sec) {
+ warnings += validate_section(file, sec);
+ warnings += validate_unwind_hints(file, sec);
+ }
+
+ sec = find_section_by_name(file->elf, ".cpuidle.text");
+ if (sec) {
+ warnings += validate_section(file, sec);
+ warnings += validate_unwind_hints(file, sec);
+ }
+
+ return warnings;
+}
+
+static int validate_functions(struct objtool_file *file)
+{
+ struct section *sec;
+ int warnings = 0;
+
+ for_each_sec(file, sec) {
+ if (!(sec->sh.sh_flags & SHF_EXECINSTR))
+ continue;
+
+ warnings += validate_section(file, sec);
+ }
+
+ return warnings;
+}
+
+static void mark_endbr_used(struct instruction *insn)
+{
+ if (!list_empty(&insn->call_node))
+ list_del_init(&insn->call_node);
+}
+
+static bool noendbr_range(struct objtool_file *file, struct instruction *insn)
+{
+ struct symbol *sym = find_symbol_containing(insn->sec, insn->offset-1);
+ struct instruction *first;
+
+ if (!sym)
+ return false;
+
+ first = find_insn(file, sym->sec, sym->offset);
+ if (!first)
+ return false;
+
+ if (first->type != INSN_ENDBR && !first->noendbr)
+ return false;
+
+ return insn->offset == sym->offset + sym->len;
+}
+
+static int validate_ibt_insn(struct objtool_file *file, struct instruction *insn)
+{
+ struct instruction *dest;
+ struct reloc *reloc;
+ unsigned long off;
+ int warnings = 0;
+
+ /*
+ * Looking for function pointer load relocations. Ignore
+ * direct/indirect branches:
+ */
+ switch (insn->type) {
+ case INSN_CALL:
+ case INSN_CALL_DYNAMIC:
+ case INSN_JUMP_CONDITIONAL:
+ case INSN_JUMP_UNCONDITIONAL:
+ case INSN_JUMP_DYNAMIC:
+ case INSN_JUMP_DYNAMIC_CONDITIONAL:
+ case INSN_RETURN:
+ case INSN_NOP:
+ return 0;
+ default:
+ break;
+ }
+
+ for (reloc = insn_reloc(file, insn);
+ reloc;
+ reloc = find_reloc_by_dest_range(file->elf, insn->sec,
+ reloc->offset + 1,
+ (insn->offset + insn->len) - (reloc->offset + 1))) {
+
+ /*
+ * static_call_update() references the trampoline, which
+ * doesn't have (or need) ENDBR. Skip warning in that case.
+ */
+ if (reloc->sym->static_call_tramp)
+ continue;
+
+ off = reloc->sym->offset;
+ if (reloc->type == R_X86_64_PC32 || reloc->type == R_X86_64_PLT32)
+ off += arch_dest_reloc_offset(reloc->addend);
+ else
+ off += reloc->addend;
+
+ dest = find_insn(file, reloc->sym->sec, off);
+ if (!dest)
+ continue;
+
+ if (dest->type == INSN_ENDBR) {
+ mark_endbr_used(dest);
+ continue;
}
- if (func->pfunc != func || func->alias != func)
+ if (insn_func(dest) && insn_func(dest) == insn_func(insn)) {
+ /*
+ * Anything from->to self is either _THIS_IP_ or
+ * IRET-to-self.
+ *
+ * There is no sane way to annotate _THIS_IP_ since the
+ * compiler treats the relocation as a constant and is
+ * happy to fold in offsets, skewing any annotation we
+ * do, leading to vast amounts of false-positives.
+ *
+ * There's also compiler generated _THIS_IP_ through
+ * KCOV and such which we have no hope of annotating.
+ *
+ * As such, blanket accept self-references without
+ * issue.
+ */
continue;
+ }
- insn = find_insn(file, sec, func->offset);
- if (!insn || insn->ignore || insn->visited)
+ /*
+ * Accept anything ANNOTATE_NOENDBR.
+ */
+ if (dest->noendbr)
continue;
- state.uaccess = func->uaccess_safe;
+ /*
+ * Accept if this is the instruction after a symbol
+ * that is (no)endbr -- typical code-range usage.
+ */
+ if (noendbr_range(file, dest))
+ continue;
- ret = validate_branch(file, func, insn, state);
- if (ret && backtrace)
- BT_FUNC("<=== (func)", insn);
- warnings += ret;
+ WARN_INSN(insn, "relocation to !ENDBR: %s", offstr(dest->sec, dest->offset));
+
+ warnings++;
}
return warnings;
}
-static int validate_functions(struct objtool_file *file)
+static int validate_ibt_data_reloc(struct objtool_file *file,
+ struct reloc *reloc)
+{
+ struct instruction *dest;
+
+ dest = find_insn(file, reloc->sym->sec,
+ reloc->sym->offset + reloc->addend);
+ if (!dest)
+ return 0;
+
+ if (dest->type == INSN_ENDBR) {
+ mark_endbr_used(dest);
+ return 0;
+ }
+
+ if (dest->noendbr)
+ return 0;
+
+ WARN_FUNC("data relocation to !ENDBR: %s",
+ reloc->sec->base, reloc->offset,
+ offstr(dest->sec, dest->offset));
+
+ return 1;
+}
+
+/*
+ * Validate IBT rules and remove used ENDBR instructions from the seal list.
+ * Unused ENDBR instructions will be annotated for sealing (i.e., replaced with
+ * NOPs) later, in create_ibt_endbr_seal_sections().
+ */
+static int validate_ibt(struct objtool_file *file)
{
struct section *sec;
+ struct reloc *reloc;
+ struct instruction *insn;
int warnings = 0;
- for_each_sec(file, sec)
- warnings += validate_section(file, sec);
+ for_each_insn(file, insn)
+ warnings += validate_ibt_insn(file, insn);
+
+ for_each_sec(file, sec) {
+
+ /* Already done by validate_ibt_insn() */
+ if (sec->sh.sh_flags & SHF_EXECINSTR)
+ continue;
+
+ if (!sec->reloc)
+ continue;
+
+ /*
+ * These sections can reference text addresses, but not with
+ * the intent to indirect branch to them.
+ */
+ if ((!strncmp(sec->name, ".discard", 8) &&
+ strcmp(sec->name, ".discard.ibt_endbr_noseal")) ||
+ !strncmp(sec->name, ".debug", 6) ||
+ !strcmp(sec->name, ".altinstructions") ||
+ !strcmp(sec->name, ".ibt_endbr_seal") ||
+ !strcmp(sec->name, ".orc_unwind_ip") ||
+ !strcmp(sec->name, ".parainstructions") ||
+ !strcmp(sec->name, ".retpoline_sites") ||
+ !strcmp(sec->name, ".smp_locks") ||
+ !strcmp(sec->name, ".static_call_sites") ||
+ !strcmp(sec->name, "_error_injection_whitelist") ||
+ !strcmp(sec->name, "_kprobe_blacklist") ||
+ !strcmp(sec->name, "__bug_table") ||
+ !strcmp(sec->name, "__ex_table") ||
+ !strcmp(sec->name, "__jump_table") ||
+ !strcmp(sec->name, "__mcount_loc") ||
+ !strcmp(sec->name, ".kcfi_traps") ||
+ strstr(sec->name, "__patchable_function_entries"))
+ continue;
+
+ list_for_each_entry(reloc, &sec->reloc->reloc_list, list)
+ warnings += validate_ibt_data_reloc(file, reloc);
+ }
+
+ return warnings;
+}
+
+static int validate_sls(struct objtool_file *file)
+{
+ struct instruction *insn, *next_insn;
+ int warnings = 0;
+
+ for_each_insn(file, insn) {
+ next_insn = next_insn_same_sec(file, insn);
+
+ if (insn->retpoline_safe)
+ continue;
+
+ switch (insn->type) {
+ case INSN_RETURN:
+ if (!next_insn || next_insn->type != INSN_TRAP) {
+ WARN_INSN(insn, "missing int3 after ret");
+ warnings++;
+ }
+
+ break;
+ case INSN_JUMP_DYNAMIC:
+ if (!next_insn || next_insn->type != INSN_TRAP) {
+ WARN_INSN(insn, "missing int3 after indirect jump");
+ warnings++;
+ }
+ break;
+ default:
+ break;
+ }
+ }
return warnings;
}
@@ -2479,91 +4519,173 @@ static int validate_reachable_instructions(struct objtool_file *file)
return 0;
for_each_insn(file, insn) {
- if (insn->visited || ignore_unreachable_insn(insn))
+ if (insn->visited || ignore_unreachable_insn(file, insn))
continue;
- WARN_FUNC("unreachable instruction", insn->sec, insn->offset);
+ WARN_INSN(insn, "unreachable instruction");
return 1;
}
return 0;
}
-static struct objtool_file file;
-
-int check(const char *_objname, bool orc)
+int check(struct objtool_file *file)
{
int ret, warnings = 0;
- objname = _objname;
-
- file.elf = elf_read(objname, orc ? O_RDWR : O_RDONLY);
- if (!file.elf)
- return 1;
+ arch_initial_func_cfi_state(&initial_func_cfi);
+ init_cfi_state(&init_cfi);
+ init_cfi_state(&func_cfi);
+ set_func_state(&func_cfi);
- INIT_LIST_HEAD(&file.insn_list);
- hash_init(file.insn_hash);
- file.c_file = find_section_by_name(file.elf, ".comment");
- file.ignore_unreachables = no_unreachable;
- file.hints = false;
+ if (!cfi_hash_alloc(1UL << (file->elf->symbol_bits - 3)))
+ goto out;
- arch_initial_func_cfi_state(&initial_func_cfi);
+ cfi_hash_add(&init_cfi);
+ cfi_hash_add(&func_cfi);
- ret = decode_sections(&file);
+ ret = decode_sections(file);
if (ret < 0)
goto out;
+
warnings += ret;
- if (list_empty(&file.insn_list))
+ if (!nr_insns)
goto out;
- if (retpoline) {
- ret = validate_retpoline(&file);
+ if (opts.retpoline) {
+ ret = validate_retpoline(file);
if (ret < 0)
return ret;
warnings += ret;
}
- ret = validate_functions(&file);
- if (ret < 0)
- goto out;
- warnings += ret;
+ if (opts.stackval || opts.orc || opts.uaccess) {
+ ret = validate_functions(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
- ret = validate_unwind_hints(&file);
- if (ret < 0)
- goto out;
- warnings += ret;
+ ret = validate_unwind_hints(file, NULL);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
- if (!warnings) {
- ret = validate_reachable_instructions(&file);
+ if (!warnings) {
+ ret = validate_reachable_instructions(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+
+ } else if (opts.noinstr) {
+ ret = validate_noinstr_sections(file);
if (ret < 0)
goto out;
warnings += ret;
}
- if (orc) {
- ret = create_orc(&file);
+ if (opts.unret) {
+ /*
+ * Must be after validate_branch() and friends, it plays
+ * further games with insn->visited.
+ */
+ ret = validate_unrets(file);
+ if (ret < 0)
+ return ret;
+ warnings += ret;
+ }
+
+ if (opts.ibt) {
+ ret = validate_ibt(file);
if (ret < 0)
goto out;
+ warnings += ret;
+ }
- ret = create_orc_sections(&file);
+ if (opts.sls) {
+ ret = validate_sls(file);
if (ret < 0)
goto out;
+ warnings += ret;
+ }
- ret = elf_write(file.elf);
+ if (opts.static_call) {
+ ret = create_static_call_sections(file);
if (ret < 0)
goto out;
+ warnings += ret;
}
-out:
- if (ret < 0) {
- /*
- * Fatal error. The binary is corrupt or otherwise broken in
- * some way, or objtool itself is broken. Fail the kernel
- * build.
- */
- return ret;
+ if (opts.retpoline) {
+ ret = create_retpoline_sites_sections(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+
+ if (opts.cfi) {
+ ret = create_cfi_sections(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+
+ if (opts.rethunk) {
+ ret = create_return_sites_sections(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+
+ if (opts.hack_skylake) {
+ ret = create_direct_call_sections(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+ }
+
+ if (opts.mcount) {
+ ret = create_mcount_loc_sections(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
}
+ if (opts.prefix) {
+ ret = add_prefix_symbols(file);
+ if (ret < 0)
+ return ret;
+ warnings += ret;
+ }
+
+ if (opts.ibt) {
+ ret = create_ibt_endbr_seal_sections(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+
+ if (opts.orc && nr_insns) {
+ ret = orc_create(file);
+ if (ret < 0)
+ goto out;
+ warnings += ret;
+ }
+
+
+ if (opts.stats) {
+ printf("nr_insns_visited: %ld\n", nr_insns_visited);
+ printf("nr_cfi: %ld\n", nr_cfi);
+ printf("nr_cfi_reused: %ld\n", nr_cfi_reused);
+ printf("nr_cfi_cache: %ld\n", nr_cfi_cache);
+ }
+
+out:
+ /*
+ * For now, don't fail the kernel build on fatal warnings. These
+ * errors are still fairly common due to the growing matrix of
+ * supported toolchains and their recent pace of change.
+ */
return 0;
}
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
deleted file mode 100644
index f0ce8ffe7135..000000000000
--- a/tools/objtool/check.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- */
-
-#ifndef _CHECK_H
-#define _CHECK_H
-
-#include <stdbool.h>
-#include "elf.h"
-#include "cfi.h"
-#include "arch.h"
-#include "orc.h"
-#include <linux/hashtable.h>
-
-struct insn_state {
- struct cfi_reg cfa;
- struct cfi_reg regs[CFI_NUM_REGS];
- int stack_size;
- unsigned char type;
- bool bp_scratch;
- bool drap, end, uaccess, df;
- unsigned int uaccess_stack;
- int drap_reg, drap_offset;
- struct cfi_reg vals[CFI_NUM_REGS];
-};
-
-struct instruction {
- struct list_head list;
- struct hlist_node hash;
- struct section *sec;
- unsigned long offset;
- unsigned int len;
- enum insn_type type;
- unsigned long immediate;
- bool alt_group, dead_end, ignore, hint, save, restore, ignore_alts;
- bool retpoline_safe;
- u8 visited;
- struct symbol *call_dest;
- struct instruction *jump_dest;
- struct instruction *first_jump_src;
- struct rela *jump_table;
- struct list_head alts;
- struct symbol *func;
- struct stack_op stack_op;
- struct insn_state state;
- struct orc_entry orc;
-};
-
-struct objtool_file {
- struct elf *elf;
- struct list_head insn_list;
- DECLARE_HASHTABLE(insn_hash, 20);
- bool ignore_unreachables, c_file, hints, rodata;
-};
-
-int check(const char *objname, bool orc);
-
-struct instruction *find_insn(struct objtool_file *file,
- struct section *sec, unsigned long offset);
-
-#define for_each_insn(file, insn) \
- list_for_each_entry(insn, &file->insn_list, list)
-
-#define sec_for_each_insn(file, sec, insn) \
- for (insn = find_insn(file, sec, 0); \
- insn && &insn->list != &file->insn_list && \
- insn->sec == sec; \
- insn = list_next_entry(insn, list))
-
-
-#endif /* _CHECK_H */
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 09ddc8f1def3..500e92979a31 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -9,16 +9,18 @@
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/mman.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
-#include "builtin.h"
+#include <linux/interval_tree_generic.h>
+#include <objtool/builtin.h>
-#include "elf.h"
-#include "warn.h"
+#include <objtool/elf.h>
+#include <objtool/warn.h>
#define MAX_NAME_LEN 128
@@ -27,97 +29,78 @@ static inline u32 str_hash(const char *str)
return jhash(str, strlen(str), 0);
}
-static void rb_add(struct rb_root *tree, struct rb_node *node,
- int (*cmp)(struct rb_node *, const struct rb_node *))
+#define __elf_table(name) (elf->name##_hash)
+#define __elf_bits(name) (elf->name##_bits)
+
+#define elf_hash_add(name, node, key) \
+ hlist_add_head(node, &__elf_table(name)[hash_min(key, __elf_bits(name))])
+
+#define elf_hash_for_each_possible(name, obj, member, key) \
+ hlist_for_each_entry(obj, &__elf_table(name)[hash_min(key, __elf_bits(name))], member)
+
+#define elf_alloc_hash(name, size) \
+({ \
+ __elf_bits(name) = max(10, ilog2(size)); \
+ __elf_table(name) = mmap(NULL, sizeof(struct hlist_head) << __elf_bits(name), \
+ PROT_READ|PROT_WRITE, \
+ MAP_PRIVATE|MAP_ANON, -1, 0); \
+ if (__elf_table(name) == (void *)-1L) { \
+ WARN("mmap fail " #name); \
+ __elf_table(name) = NULL; \
+ } \
+ __elf_table(name); \
+})
+
+static inline unsigned long __sym_start(struct symbol *s)
{
- struct rb_node **link = &tree->rb_node;
- struct rb_node *parent = NULL;
-
- while (*link) {
- parent = *link;
- if (cmp(node, parent) < 0)
- link = &parent->rb_left;
- else
- link = &parent->rb_right;
- }
-
- rb_link_node(node, parent, link);
- rb_insert_color(node, tree);
+ return s->offset;
}
-static struct rb_node *rb_find_first(struct rb_root *tree, const void *key,
- int (*cmp)(const void *key, const struct rb_node *))
+static inline unsigned long __sym_last(struct symbol *s)
{
- struct rb_node *node = tree->rb_node;
- struct rb_node *match = NULL;
-
- while (node) {
- int c = cmp(key, node);
- if (c <= 0) {
- if (!c)
- match = node;
- node = node->rb_left;
- } else if (c > 0) {
- node = node->rb_right;
- }
- }
-
- return match;
+ return s->offset + s->len - 1;
}
-static struct rb_node *rb_next_match(struct rb_node *node, const void *key,
- int (*cmp)(const void *key, const struct rb_node *))
-{
- node = rb_next(node);
- if (node && cmp(key, node))
- node = NULL;
- return node;
-}
+INTERVAL_TREE_DEFINE(struct symbol, node, unsigned long, __subtree_last,
+ __sym_start, __sym_last, static, __sym)
-#define rb_for_each(tree, node, key, cmp) \
- for ((node) = rb_find_first((tree), (key), (cmp)); \
- (node); (node) = rb_next_match((node), (key), (cmp)))
+#define __sym_for_each(_iter, _tree, _start, _end) \
+ for (_iter = __sym_iter_first((_tree), (_start), (_end)); \
+ _iter; _iter = __sym_iter_next(_iter, (_start), (_end)))
-static int symbol_to_offset(struct rb_node *a, const struct rb_node *b)
-{
- struct symbol *sa = rb_entry(a, struct symbol, node);
- struct symbol *sb = rb_entry(b, struct symbol, node);
+struct symbol_hole {
+ unsigned long key;
+ const struct symbol *sym;
+};
- if (sa->offset < sb->offset)
- return -1;
- if (sa->offset > sb->offset)
- return 1;
-
- if (sa->len < sb->len)
- return -1;
- if (sa->len > sb->len)
- return 1;
-
- sa->alias = sb;
-
- return 0;
-}
-
-static int symbol_by_offset(const void *key, const struct rb_node *node)
+/*
+ * Find !section symbol where @offset is after it.
+ */
+static int symbol_hole_by_offset(const void *key, const struct rb_node *node)
{
const struct symbol *s = rb_entry(node, struct symbol, node);
- const unsigned long *o = key;
+ struct symbol_hole *sh = (void *)key;
- if (*o < s->offset)
+ if (sh->key < s->offset)
return -1;
- if (*o > s->offset + s->len)
+
+ if (sh->key >= s->offset + s->len) {
+ if (s->type != STT_SECTION)
+ sh->sym = s;
return 1;
+ }
return 0;
}
-struct section *find_section_by_name(struct elf *elf, const char *name)
+struct section *find_section_by_name(const struct elf *elf, const char *name)
{
struct section *sec;
- hash_for_each_possible(elf->section_name_hash, sec, name_hash, str_hash(name))
+ elf_hash_for_each_possible(section_name, sec, name_hash, str_hash(name)) {
if (!strcmp(sec->name, name))
return sec;
+ }
return NULL;
}
@@ -127,9 +110,10 @@ static struct section *find_section_by_index(struct elf *elf,
{
struct section *sec;
- hash_for_each_possible(elf->section_hash, sec, hash, idx)
+ elf_hash_for_each_possible(section, sec, hash, idx) {
if (sec->idx == idx)
return sec;
+ }
return NULL;
}
@@ -138,22 +122,22 @@ static struct symbol *find_symbol_by_index(struct elf *elf, unsigned int idx)
{
struct symbol *sym;
- hash_for_each_possible(elf->symbol_hash, sym, hash, idx)
+ elf_hash_for_each_possible(symbol, sym, hash, idx) {
if (sym->idx == idx)
return sym;
+ }
return NULL;
}
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
{
- struct rb_node *node;
-
- rb_for_each(&sec->symbol_tree, node, &offset, symbol_by_offset) {
- struct symbol *s = rb_entry(node, struct symbol, node);
+ struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
+ struct symbol *iter;
- if (s->offset == offset && s->type != STT_SECTION)
- return s;
+ __sym_for_each(iter, tree, offset, offset) {
+ if (iter->offset == offset && iter->type != STT_SECTION)
+ return iter;
}
return NULL;
@@ -161,77 +145,110 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
struct symbol *find_func_by_offset(struct section *sec, unsigned long offset)
{
- struct rb_node *node;
+ struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
+ struct symbol *iter;
- rb_for_each(&sec->symbol_tree, node, &offset, symbol_by_offset) {
- struct symbol *s = rb_entry(node, struct symbol, node);
-
- if (s->offset == offset && s->type == STT_FUNC)
- return s;
+ __sym_for_each(iter, tree, offset, offset) {
+ if (iter->offset == offset && iter->type == STT_FUNC)
+ return iter;
}
return NULL;
}
-struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
+struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset)
{
- struct rb_node *node;
-
- rb_for_each(&sec->symbol_tree, node, &offset, symbol_by_offset) {
- struct symbol *s = rb_entry(node, struct symbol, node);
+ struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
+ struct symbol *iter;
- if (s->type != STT_SECTION)
- return s;
+ __sym_for_each(iter, tree, offset, offset) {
+ if (iter->type != STT_SECTION)
+ return iter;
}
return NULL;
}
-struct symbol *find_func_containing(struct section *sec, unsigned long offset)
+/*
+ * Returns size of hole starting at @offset.
+ */
+int find_symbol_hole_containing(const struct section *sec, unsigned long offset)
{
- struct rb_node *node;
+ struct symbol_hole hole = {
+ .key = offset,
+ .sym = NULL,
+ };
+ struct rb_node *n;
+ struct symbol *s;
+
+ /*
+ * Find the rightmost symbol for which @offset is after it.
+ */
+ n = rb_find(&hole, &sec->symbol_tree.rb_root, symbol_hole_by_offset);
+
+ /* found a symbol that contains @offset */
+ if (n)
+ return 0; /* not a hole */
+
+ /* didn't find a symbol for which @offset is after it */
+ if (!hole.sym)
+ return 0; /* not a hole */
+
+ /* @offset >= sym->offset + sym->len, find symbol after it */
+ n = rb_next(&hole.sym->node);
+ if (!n)
+ return -1; /* until end of address space */
+
+ /* hole until start of next symbol */
+ s = rb_entry(n, struct symbol, node);
+ return s->offset - offset;
+}
- rb_for_each(&sec->symbol_tree, node, &offset, symbol_by_offset) {
- struct symbol *s = rb_entry(node, struct symbol, node);
+struct symbol *find_func_containing(struct section *sec, unsigned long offset)
+{
+ struct rb_root_cached *tree = (struct rb_root_cached *)&sec->symbol_tree;
+ struct symbol *iter;
- if (s->type == STT_FUNC)
- return s;
+ __sym_for_each(iter, tree, offset, offset) {
+ if (iter->type == STT_FUNC)
+ return iter;
}
return NULL;
}
-struct symbol *find_symbol_by_name(struct elf *elf, const char *name)
+struct symbol *find_symbol_by_name(const struct elf *elf, const char *name)
{
struct symbol *sym;
- hash_for_each_possible(elf->symbol_name_hash, sym, name_hash, str_hash(name))
+ elf_hash_for_each_possible(symbol_name, sym, name_hash, str_hash(name)) {
if (!strcmp(sym->name, name))
return sym;
+ }
return NULL;
}
-struct rela *find_rela_by_dest_range(struct elf *elf, struct section *sec,
+struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len)
{
- struct rela *rela, *r = NULL;
+ struct reloc *reloc, *r = NULL;
unsigned long o;
- if (!sec->rela)
+ if (!sec->reloc)
return NULL;
- sec = sec->rela;
+ sec = sec->reloc;
for_offset_range(o, offset, offset + len) {
- hash_for_each_possible(elf->rela_hash, rela, hash,
- sec_offset_hash(sec, o)) {
- if (rela->sec != sec)
+ elf_hash_for_each_possible(reloc, reloc, hash,
+ sec_offset_hash(sec, o)) {
+ if (reloc->sec != sec)
continue;
- if (rela->offset >= offset && rela->offset < offset + len) {
- if (!r || rela->offset < r->offset)
- r = rela;
+ if (reloc->offset >= offset && reloc->offset < offset + len) {
+ if (!r || reloc->offset < r->offset)
+ r = reloc;
}
}
if (r)
@@ -241,9 +258,9 @@ struct rela *find_rela_by_dest_range(struct elf *elf, struct section *sec,
return NULL;
}
-struct rela *find_rela_by_dest(struct elf *elf, struct section *sec, unsigned long offset)
+struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, unsigned long offset)
{
- return find_rela_by_dest_range(elf, sec, offset, 1);
+ return find_reloc_by_dest_range(elf, sec, offset, 1);
}
static int read_sections(struct elf *elf)
@@ -263,16 +280,20 @@ static int read_sections(struct elf *elf)
return -1;
}
+ if (!elf_alloc_hash(section, sections_nr) ||
+ !elf_alloc_hash(section_name, sections_nr))
+ return -1;
+
+ elf->section_data = calloc(sections_nr, sizeof(*sec));
+ if (!elf->section_data) {
+ perror("calloc");
+ return -1;
+ }
for (i = 0; i < sections_nr; i++) {
- sec = malloc(sizeof(*sec));
- if (!sec) {
- perror("malloc");
- return -1;
- }
- memset(sec, 0, sizeof(*sec));
+ sec = &elf->section_data[i];
INIT_LIST_HEAD(&sec->symbol_list);
- INIT_LIST_HEAD(&sec->rela_list);
+ INIT_LIST_HEAD(&sec->reloc_list);
s = elf_getscn(elf->elf, i);
if (!s) {
@@ -306,15 +327,19 @@ static int read_sections(struct elf *elf)
return -1;
}
}
- sec->len = sec->sh.sh_size;
+
+ if (sec->sh.sh_flags & SHF_EXECINSTR)
+ elf->text_size += sec->sh.sh_size;
list_add_tail(&sec->list, &elf->sections);
- hash_add(elf->section_hash, &sec->hash, sec->idx);
- hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
+ elf_hash_add(section, &sec->hash, sec->idx);
+ elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
}
- if (stats)
+ if (opts.stats) {
printf("nr_sections: %lu\n", (unsigned long)sections_nr);
+ printf("section_bits: %d\n", elf->section_bits);
+ }
/* sanity check, one more call to elf_nextscn() should return NULL */
if (elf_nextscn(elf->elf, s)) {
@@ -325,36 +350,91 @@ static int read_sections(struct elf *elf)
return 0;
}
-static int read_symbols(struct elf *elf)
+static void elf_add_symbol(struct elf *elf, struct symbol *sym)
{
- struct section *symtab, *sec;
- struct symbol *sym, *pfunc;
struct list_head *entry;
struct rb_node *pnode;
+ struct symbol *iter;
+
+ INIT_LIST_HEAD(&sym->reloc_list);
+ INIT_LIST_HEAD(&sym->pv_target);
+ sym->alias = sym;
+
+ sym->type = GELF_ST_TYPE(sym->sym.st_info);
+ sym->bind = GELF_ST_BIND(sym->sym.st_info);
+
+ if (sym->type == STT_FILE)
+ elf->num_files++;
+
+ sym->offset = sym->sym.st_value;
+ sym->len = sym->sym.st_size;
+
+ __sym_for_each(iter, &sym->sec->symbol_tree, sym->offset, sym->offset) {
+ if (iter->offset == sym->offset && iter->type == sym->type)
+ iter->alias = sym;
+ }
+
+ __sym_insert(sym, &sym->sec->symbol_tree);
+ pnode = rb_prev(&sym->node);
+ if (pnode)
+ entry = &rb_entry(pnode, struct symbol, node)->list;
+ else
+ entry = &sym->sec->symbol_list;
+ list_add(&sym->list, entry);
+ elf_hash_add(symbol, &sym->hash, sym->idx);
+ elf_hash_add(symbol_name, &sym->name_hash, str_hash(sym->name));
+
+ /*
+ * Don't store empty STT_NOTYPE symbols in the rbtree. They
+ * can exist within a function, confusing the sorting.
+ */
+ if (!sym->len)
+ __sym_remove(sym, &sym->sec->symbol_tree);
+}
+
+static int read_symbols(struct elf *elf)
+{
+ struct section *symtab, *symtab_shndx, *sec;
+ struct symbol *sym, *pfunc;
int symbols_nr, i;
char *coldstr;
+ Elf_Data *shndx_data = NULL;
+ Elf32_Word shndx;
symtab = find_section_by_name(elf, ".symtab");
- if (!symtab) {
- WARN("missing symbol table");
- return -1;
+ if (symtab) {
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+ if (symtab_shndx)
+ shndx_data = symtab_shndx->data;
+
+ symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
+ } else {
+ /*
+ * A missing symbol table is actually possible if it's an empty
+ * .o file. This can happen for thunk_64.o. Make sure to at
+ * least allocate the symbol hash tables so we can do symbol
+ * lookups without crashing.
+ */
+ symbols_nr = 0;
}
- symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
+ if (!elf_alloc_hash(symbol, symbols_nr) ||
+ !elf_alloc_hash(symbol_name, symbols_nr))
+ return -1;
+ elf->symbol_data = calloc(symbols_nr, sizeof(*sym));
+ if (!elf->symbol_data) {
+ perror("calloc");
+ return -1;
+ }
for (i = 0; i < symbols_nr; i++) {
- sym = malloc(sizeof(*sym));
- if (!sym) {
- perror("malloc");
- return -1;
- }
- memset(sym, 0, sizeof(*sym));
- sym->alias = sym;
+ sym = &elf->symbol_data[i];
sym->idx = i;
- if (!gelf_getsym(symtab->data, i, &sym->sym)) {
- WARN_ELF("gelf_getsym");
+ if (!gelf_getsymshndx(symtab->data, shndx_data, i, &sym->sym,
+ &shndx)) {
+ WARN_ELF("gelf_getsymshndx");
goto err;
}
@@ -365,50 +445,47 @@ static int read_symbols(struct elf *elf)
goto err;
}
- sym->type = GELF_ST_TYPE(sym->sym.st_info);
- sym->bind = GELF_ST_BIND(sym->sym.st_info);
+ if ((sym->sym.st_shndx > SHN_UNDEF &&
+ sym->sym.st_shndx < SHN_LORESERVE) ||
+ (shndx_data && sym->sym.st_shndx == SHN_XINDEX)) {
+ if (sym->sym.st_shndx != SHN_XINDEX)
+ shndx = sym->sym.st_shndx;
- if (sym->sym.st_shndx > SHN_UNDEF &&
- sym->sym.st_shndx < SHN_LORESERVE) {
- sym->sec = find_section_by_index(elf,
- sym->sym.st_shndx);
+ sym->sec = find_section_by_index(elf, shndx);
if (!sym->sec) {
WARN("couldn't find section for symbol %s",
sym->name);
goto err;
}
- if (sym->type == STT_SECTION) {
+ if (GELF_ST_TYPE(sym->sym.st_info) == STT_SECTION) {
sym->name = sym->sec->name;
sym->sec->sym = sym;
}
} else
sym->sec = find_section_by_index(elf, 0);
- sym->offset = sym->sym.st_value;
- sym->len = sym->sym.st_size;
-
- rb_add(&sym->sec->symbol_tree, &sym->node, symbol_to_offset);
- pnode = rb_prev(&sym->node);
- if (pnode)
- entry = &rb_entry(pnode, struct symbol, node)->list;
- else
- entry = &sym->sec->symbol_list;
- list_add(&sym->list, entry);
- hash_add(elf->symbol_hash, &sym->hash, sym->idx);
- hash_add(elf->symbol_name_hash, &sym->name_hash, str_hash(sym->name));
+ elf_add_symbol(elf, sym);
}
- if (stats)
+ if (opts.stats) {
printf("nr_symbols: %lu\n", (unsigned long)symbols_nr);
+ printf("symbol_bits: %d\n", elf->symbol_bits);
+ }
/* Create parent/child links for any cold subfunctions */
list_for_each_entry(sec, &elf->sections, list) {
- list_for_each_entry(sym, &sec->symbol_list, list) {
+ sec_for_each_sym(sec, sym) {
char pname[MAX_NAME_LEN + 1];
size_t pnamelen;
if (sym->type != STT_FUNC)
continue;
- sym->pfunc = sym->cfunc = sym;
+
+ if (sym->pfunc == NULL)
+ sym->pfunc = sym;
+
+ if (sym->cfunc == NULL)
+ sym->cfunc = sym;
+
coldstr = strstr(sym->name, ".cold");
if (!coldstr)
continue;
@@ -456,70 +533,439 @@ err:
return -1;
}
-static int read_relas(struct elf *elf)
+static struct section *elf_create_reloc_section(struct elf *elf,
+ struct section *base,
+ int reltype);
+
+int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
+ unsigned int type, struct symbol *sym, s64 addend)
+{
+ struct reloc *reloc;
+
+ if (!sec->reloc && !elf_create_reloc_section(elf, sec, SHT_RELA))
+ return -1;
+
+ reloc = malloc(sizeof(*reloc));
+ if (!reloc) {
+ perror("malloc");
+ return -1;
+ }
+ memset(reloc, 0, sizeof(*reloc));
+
+ reloc->sec = sec->reloc;
+ reloc->offset = offset;
+ reloc->type = type;
+ reloc->sym = sym;
+ reloc->addend = addend;
+
+ list_add_tail(&reloc->sym_reloc_entry, &sym->reloc_list);
+ list_add_tail(&reloc->list, &sec->reloc->reloc_list);
+ elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
+
+ sec->reloc->sh.sh_size += sec->reloc->sh.sh_entsize;
+ sec->reloc->changed = true;
+
+ return 0;
+}
+
+/*
+ * Ensure that any reloc section containing references to @sym is marked
+ * changed such that it will get re-generated in elf_rebuild_reloc_sections()
+ * with the new symbol index.
+ */
+static void elf_dirty_reloc_sym(struct elf *elf, struct symbol *sym)
+{
+ struct reloc *reloc;
+
+ list_for_each_entry(reloc, &sym->reloc_list, sym_reloc_entry)
+ reloc->sec->changed = true;
+}
+
+/*
+ * The libelf API is terrible; gelf_update_sym*() takes a data block relative
+ * index value, *NOT* the symbol index. As such, iterate the data blocks and
+ * adjust index until it fits.
+ *
+ * If no data block is found, allow adding a new data block provided the index
+ * is only one past the end.
+ */
+static int elf_update_symbol(struct elf *elf, struct section *symtab,
+ struct section *symtab_shndx, struct symbol *sym)
+{
+ Elf32_Word shndx = sym->sec ? sym->sec->idx : SHN_UNDEF;
+ Elf_Data *symtab_data = NULL, *shndx_data = NULL;
+ Elf64_Xword entsize = symtab->sh.sh_entsize;
+ int max_idx, idx = sym->idx;
+ Elf_Scn *s, *t = NULL;
+ bool is_special_shndx = sym->sym.st_shndx >= SHN_LORESERVE &&
+ sym->sym.st_shndx != SHN_XINDEX;
+
+ if (is_special_shndx)
+ shndx = sym->sym.st_shndx;
+
+ s = elf_getscn(elf->elf, symtab->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ if (symtab_shndx) {
+ t = elf_getscn(elf->elf, symtab_shndx->idx);
+ if (!t) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+ }
+
+ for (;;) {
+ /* get next data descriptor for the relevant sections */
+ symtab_data = elf_getdata(s, symtab_data);
+ if (t)
+ shndx_data = elf_getdata(t, shndx_data);
+
+ /* end-of-list */
+ if (!symtab_data) {
+ /*
+ * Over-allocate to avoid O(n^2) symbol creation
+ * behaviour. The down side is that libelf doesn't
+ * like this; see elf_truncate_section() for the fixup.
+ */
+ int num = max(1U, sym->idx/3);
+ void *buf;
+
+ if (idx) {
+ /* we don't do holes in symbol tables */
+ WARN("index out of range");
+ return -1;
+ }
+
+ /* if @idx == 0, it's the next contiguous entry, create it */
+ symtab_data = elf_newdata(s);
+ if (t)
+ shndx_data = elf_newdata(t);
+
+ buf = calloc(num, entsize);
+ if (!buf) {
+ WARN("malloc");
+ return -1;
+ }
+
+ symtab_data->d_buf = buf;
+ symtab_data->d_size = num * entsize;
+ symtab_data->d_align = 1;
+ symtab_data->d_type = ELF_T_SYM;
+
+ symtab->changed = true;
+ symtab->truncate = true;
+
+ if (t) {
+ buf = calloc(num, sizeof(Elf32_Word));
+ if (!buf) {
+ WARN("malloc");
+ return -1;
+ }
+
+ shndx_data->d_buf = buf;
+ shndx_data->d_size = num * sizeof(Elf32_Word);
+ shndx_data->d_align = sizeof(Elf32_Word);
+ shndx_data->d_type = ELF_T_WORD;
+
+ symtab_shndx->changed = true;
+ symtab_shndx->truncate = true;
+ }
+
+ break;
+ }
+
+ /* empty blocks should not happen */
+ if (!symtab_data->d_size) {
+ WARN("zero size data");
+ return -1;
+ }
+
+ /* is this the right block? */
+ max_idx = symtab_data->d_size / entsize;
+ if (idx < max_idx)
+ break;
+
+ /* adjust index and try again */
+ idx -= max_idx;
+ }
+
+ /* something went side-ways */
+ if (idx < 0) {
+ WARN("negative index");
+ return -1;
+ }
+
+ /* setup extended section index magic and write the symbol */
+ if ((shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) || is_special_shndx) {
+ sym->sym.st_shndx = shndx;
+ if (!shndx_data)
+ shndx = 0;
+ } else {
+ sym->sym.st_shndx = SHN_XINDEX;
+ if (!shndx_data) {
+ WARN("no .symtab_shndx");
+ return -1;
+ }
+ }
+
+ if (!gelf_update_symshndx(symtab_data, shndx_data, idx, &sym->sym, shndx)) {
+ WARN_ELF("gelf_update_symshndx");
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct symbol *
+__elf_create_symbol(struct elf *elf, struct symbol *sym)
+{
+ struct section *symtab, *symtab_shndx;
+ Elf32_Word first_non_local, new_idx;
+ struct symbol *old;
+
+ symtab = find_section_by_name(elf, ".symtab");
+ if (symtab) {
+ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+ } else {
+ WARN("no .symtab");
+ return NULL;
+ }
+
+ new_idx = symtab->sh.sh_size / symtab->sh.sh_entsize;
+
+ if (GELF_ST_BIND(sym->sym.st_info) != STB_LOCAL)
+ goto non_local;
+
+ /*
+ * Move the first global symbol, as per sh_info, into a new, higher
+ * symbol index. This fees up a spot for a new local symbol.
+ */
+ first_non_local = symtab->sh.sh_info;
+ old = find_symbol_by_index(elf, first_non_local);
+ if (old) {
+ old->idx = new_idx;
+
+ hlist_del(&old->hash);
+ elf_hash_add(symbol, &old->hash, old->idx);
+
+ elf_dirty_reloc_sym(elf, old);
+
+ if (elf_update_symbol(elf, symtab, symtab_shndx, old)) {
+ WARN("elf_update_symbol move");
+ return NULL;
+ }
+
+ new_idx = first_non_local;
+ }
+
+ /*
+ * Either way, we will add a LOCAL symbol.
+ */
+ symtab->sh.sh_info += 1;
+
+non_local:
+ sym->idx = new_idx;
+ if (elf_update_symbol(elf, symtab, symtab_shndx, sym)) {
+ WARN("elf_update_symbol");
+ return NULL;
+ }
+
+ symtab->sh.sh_size += symtab->sh.sh_entsize;
+ symtab->changed = true;
+
+ if (symtab_shndx) {
+ symtab_shndx->sh.sh_size += sizeof(Elf32_Word);
+ symtab_shndx->changed = true;
+ }
+
+ return sym;
+}
+
+static struct symbol *
+elf_create_section_symbol(struct elf *elf, struct section *sec)
+{
+ struct symbol *sym = calloc(1, sizeof(*sym));
+
+ if (!sym) {
+ perror("malloc");
+ return NULL;
+ }
+
+ sym->name = sec->name;
+ sym->sec = sec;
+
+ // st_name 0
+ sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION);
+ // st_other 0
+ // st_value 0
+ // st_size 0
+
+ sym = __elf_create_symbol(elf, sym);
+ if (sym)
+ elf_add_symbol(elf, sym);
+
+ return sym;
+}
+
+static int elf_add_string(struct elf *elf, struct section *strtab, char *str);
+
+struct symbol *
+elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size)
+{
+ struct symbol *sym = calloc(1, sizeof(*sym));
+ size_t namelen = strlen(orig->name) + sizeof("__pfx_");
+ char *name = malloc(namelen);
+
+ if (!sym || !name) {
+ perror("malloc");
+ return NULL;
+ }
+
+ snprintf(name, namelen, "__pfx_%s", orig->name);
+
+ sym->name = name;
+ sym->sec = orig->sec;
+
+ sym->sym.st_name = elf_add_string(elf, NULL, name);
+ sym->sym.st_info = orig->sym.st_info;
+ sym->sym.st_value = orig->sym.st_value - size;
+ sym->sym.st_size = size;
+
+ sym = __elf_create_symbol(elf, sym);
+ if (sym)
+ elf_add_symbol(elf, sym);
+
+ return sym;
+}
+
+int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int type,
+ struct section *insn_sec, unsigned long insn_off)
+{
+ struct symbol *sym = insn_sec->sym;
+ int addend = insn_off;
+
+ if (!sym) {
+ /*
+ * Due to how weak functions work, we must use section based
+ * relocations. Symbol based relocations would result in the
+ * weak and non-weak function annotations being overlaid on the
+ * non-weak function after linking.
+ */
+ sym = elf_create_section_symbol(elf, insn_sec);
+ if (!sym)
+ return -1;
+
+ insn_sec->sym = sym;
+ }
+
+ return elf_add_reloc(elf, sec, offset, type, sym, addend);
+}
+
+static int read_rel_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
+{
+ if (!gelf_getrel(sec->data, i, &reloc->rel)) {
+ WARN_ELF("gelf_getrel");
+ return -1;
+ }
+ reloc->type = GELF_R_TYPE(reloc->rel.r_info);
+ reloc->addend = 0;
+ reloc->offset = reloc->rel.r_offset;
+ *symndx = GELF_R_SYM(reloc->rel.r_info);
+ return 0;
+}
+
+static int read_rela_reloc(struct section *sec, int i, struct reloc *reloc, unsigned int *symndx)
+{
+ if (!gelf_getrela(sec->data, i, &reloc->rela)) {
+ WARN_ELF("gelf_getrela");
+ return -1;
+ }
+ reloc->type = GELF_R_TYPE(reloc->rela.r_info);
+ reloc->addend = reloc->rela.r_addend;
+ reloc->offset = reloc->rela.r_offset;
+ *symndx = GELF_R_SYM(reloc->rela.r_info);
+ return 0;
+}
+
+static int read_relocs(struct elf *elf)
{
+ unsigned long nr_reloc, max_reloc = 0, tot_reloc = 0;
struct section *sec;
- struct rela *rela;
- int i;
+ struct reloc *reloc;
unsigned int symndx;
- unsigned long nr_rela, max_rela = 0, tot_rela = 0;
+ struct symbol *sym;
+ int i;
+
+ if (!elf_alloc_hash(reloc, elf->text_size / 16))
+ return -1;
list_for_each_entry(sec, &elf->sections, list) {
- if (sec->sh.sh_type != SHT_RELA)
+ if ((sec->sh.sh_type != SHT_RELA) &&
+ (sec->sh.sh_type != SHT_REL))
continue;
- sec->base = find_section_by_name(elf, sec->name + 5);
+ sec->base = find_section_by_index(elf, sec->sh.sh_info);
if (!sec->base) {
- WARN("can't find base section for rela section %s",
+ WARN("can't find base section for reloc section %s",
sec->name);
return -1;
}
- sec->base->rela = sec;
+ sec->base->reloc = sec;
- nr_rela = 0;
+ nr_reloc = 0;
+ sec->reloc_data = calloc(sec->sh.sh_size / sec->sh.sh_entsize, sizeof(*reloc));
+ if (!sec->reloc_data) {
+ perror("calloc");
+ return -1;
+ }
for (i = 0; i < sec->sh.sh_size / sec->sh.sh_entsize; i++) {
- rela = malloc(sizeof(*rela));
- if (!rela) {
- perror("malloc");
- return -1;
+ reloc = &sec->reloc_data[i];
+ switch (sec->sh.sh_type) {
+ case SHT_REL:
+ if (read_rel_reloc(sec, i, reloc, &symndx))
+ return -1;
+ break;
+ case SHT_RELA:
+ if (read_rela_reloc(sec, i, reloc, &symndx))
+ return -1;
+ break;
+ default: return -1;
}
- memset(rela, 0, sizeof(*rela));
- if (!gelf_getrela(sec->data, i, &rela->rela)) {
- WARN_ELF("gelf_getrela");
- return -1;
- }
-
- rela->type = GELF_R_TYPE(rela->rela.r_info);
- rela->addend = rela->rela.r_addend;
- rela->offset = rela->rela.r_offset;
- symndx = GELF_R_SYM(rela->rela.r_info);
- rela->sym = find_symbol_by_index(elf, symndx);
- rela->sec = sec;
- if (!rela->sym) {
- WARN("can't find rela entry symbol %d for %s",
+ reloc->sec = sec;
+ reloc->idx = i;
+ reloc->sym = sym = find_symbol_by_index(elf, symndx);
+ if (!reloc->sym) {
+ WARN("can't find reloc entry symbol %d for %s",
symndx, sec->name);
return -1;
}
- list_add_tail(&rela->list, &sec->rela_list);
- hash_add(elf->rela_hash, &rela->hash, rela_hash(rela));
- nr_rela++;
+ list_add_tail(&reloc->sym_reloc_entry, &sym->reloc_list);
+ list_add_tail(&reloc->list, &sec->reloc_list);
+ elf_hash_add(reloc, &reloc->hash, reloc_hash(reloc));
+
+ nr_reloc++;
}
- max_rela = max(max_rela, nr_rela);
- tot_rela += nr_rela;
+ max_reloc = max(max_reloc, nr_reloc);
+ tot_reloc += nr_reloc;
}
- if (stats) {
- printf("max_rela: %lu\n", max_rela);
- printf("tot_rela: %lu\n", tot_rela);
+ if (opts.stats) {
+ printf("max_reloc: %lu\n", max_reloc);
+ printf("tot_reloc: %lu\n", tot_reloc);
+ printf("reloc_bits: %d\n", elf->reloc_bits);
}
return 0;
}
-struct elf *elf_read(const char *name, int flags)
+struct elf *elf_open_read(const char *name, int flags)
{
struct elf *elf;
Elf_Cmd cmd;
@@ -531,13 +977,8 @@ struct elf *elf_read(const char *name, int flags)
perror("malloc");
return NULL;
}
- memset(elf, 0, sizeof(*elf));
+ memset(elf, 0, offsetof(struct elf, sections));
- hash_init(elf->symbol_hash);
- hash_init(elf->symbol_name_hash);
- hash_init(elf->section_hash);
- hash_init(elf->section_name_hash);
- hash_init(elf->rela_hash);
INIT_LIST_HEAD(&elf->sections);
elf->fd = open(name, flags);
@@ -571,7 +1012,7 @@ struct elf *elf_read(const char *name, int flags)
if (read_symbols(elf))
goto err;
- if (read_relas(elf))
+ if (read_relocs(elf))
goto err;
return elf;
@@ -581,13 +1022,48 @@ err:
return NULL;
}
+static int elf_add_string(struct elf *elf, struct section *strtab, char *str)
+{
+ Elf_Data *data;
+ Elf_Scn *s;
+ int len;
+
+ if (!strtab)
+ strtab = find_section_by_name(elf, ".strtab");
+ if (!strtab) {
+ WARN("can't find .strtab section");
+ return -1;
+ }
+
+ s = elf_getscn(elf->elf, strtab->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ data = elf_newdata(s);
+ if (!data) {
+ WARN_ELF("elf_newdata");
+ return -1;
+ }
+
+ data->d_buf = str;
+ data->d_size = strlen(str) + 1;
+ data->d_align = 1;
+
+ len = strtab->sh.sh_size;
+ strtab->sh.sh_size += data->d_size;
+ strtab->changed = true;
+
+ return len;
+}
+
struct section *elf_create_section(struct elf *elf, const char *name,
- size_t entsize, int nr)
+ unsigned int sh_flags, size_t entsize, int nr)
{
struct section *sec, *shstrtab;
size_t size = entsize * nr;
Elf_Scn *s;
- Elf_Data *data;
sec = malloc(sizeof(*sec));
if (!sec) {
@@ -597,7 +1073,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
memset(sec, 0, sizeof(*sec));
INIT_LIST_HEAD(&sec->symbol_list);
- INIT_LIST_HEAD(&sec->rela_list);
+ INIT_LIST_HEAD(&sec->reloc_list);
s = elf_newscn(elf->elf);
if (!s) {
@@ -612,7 +1088,6 @@ struct section *elf_create_section(struct elf *elf, const char *name,
}
sec->idx = elf_ndxscn(s);
- sec->len = size;
sec->changed = true;
sec->data = elf_newdata(s);
@@ -642,8 +1117,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec->sh.sh_entsize = entsize;
sec->sh.sh_type = SHT_PROGBITS;
sec->sh.sh_addralign = 1;
- sec->sh.sh_flags = SHF_ALLOC;
-
+ sec->sh.sh_flags = SHF_ALLOC | sh_flags;
/* Add section name to .shstrtab (or .strtab for Clang) */
shstrtab = find_section_by_name(elf, ".shstrtab");
@@ -653,58 +1127,76 @@ struct section *elf_create_section(struct elf *elf, const char *name,
WARN("can't find .shstrtab or .strtab section");
return NULL;
}
-
- s = elf_getscn(elf->elf, shstrtab->idx);
- if (!s) {
- WARN_ELF("elf_getscn");
+ sec->sh.sh_name = elf_add_string(elf, shstrtab, sec->name);
+ if (sec->sh.sh_name == -1)
return NULL;
- }
- data = elf_newdata(s);
- if (!data) {
- WARN_ELF("elf_newdata");
+ list_add_tail(&sec->list, &elf->sections);
+ elf_hash_add(section, &sec->hash, sec->idx);
+ elf_hash_add(section_name, &sec->name_hash, str_hash(sec->name));
+
+ elf->changed = true;
+
+ return sec;
+}
+
+static struct section *elf_create_rel_reloc_section(struct elf *elf, struct section *base)
+{
+ char *relocname;
+ struct section *sec;
+
+ relocname = malloc(strlen(base->name) + strlen(".rel") + 1);
+ if (!relocname) {
+ perror("malloc");
return NULL;
}
+ strcpy(relocname, ".rel");
+ strcat(relocname, base->name);
- data->d_buf = sec->name;
- data->d_size = strlen(name) + 1;
- data->d_align = 1;
-
- sec->sh.sh_name = shstrtab->len;
+ sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rel), 0);
+ free(relocname);
+ if (!sec)
+ return NULL;
- shstrtab->len += strlen(name) + 1;
- shstrtab->changed = true;
+ base->reloc = sec;
+ sec->base = base;
- list_add_tail(&sec->list, &elf->sections);
- hash_add(elf->section_hash, &sec->hash, sec->idx);
- hash_add(elf->section_name_hash, &sec->name_hash, str_hash(sec->name));
+ sec->sh.sh_type = SHT_REL;
+ sec->sh.sh_addralign = 8;
+ sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
+ sec->sh.sh_info = base->idx;
+ sec->sh.sh_flags = SHF_INFO_LINK;
return sec;
}
-struct section *elf_create_rela_section(struct elf *elf, struct section *base)
+static struct section *elf_create_rela_reloc_section(struct elf *elf, struct section *base)
{
- char *relaname;
+ char *relocname;
struct section *sec;
+ int addrsize = elf_class_addrsize(elf);
- relaname = malloc(strlen(base->name) + strlen(".rela") + 1);
- if (!relaname) {
+ relocname = malloc(strlen(base->name) + strlen(".rela") + 1);
+ if (!relocname) {
perror("malloc");
return NULL;
}
- strcpy(relaname, ".rela");
- strcat(relaname, base->name);
-
- sec = elf_create_section(elf, relaname, sizeof(GElf_Rela), 0);
- free(relaname);
+ strcpy(relocname, ".rela");
+ strcat(relocname, base->name);
+
+ if (addrsize == sizeof(u32))
+ sec = elf_create_section(elf, relocname, 0, sizeof(Elf32_Rela), 0);
+ else
+ sec = elf_create_section(elf, relocname, 0, sizeof(GElf_Rela), 0);
+ free(relocname);
if (!sec)
return NULL;
- base->rela = sec;
+ base->reloc = sec;
sec->base = base;
sec->sh.sh_type = SHT_RELA;
- sec->sh.sh_addralign = 8;
+ sec->sh.sh_addralign = addrsize;
sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx;
sec->sh.sh_info = base->idx;
sec->sh.sh_flags = SHF_INFO_LINK;
@@ -712,46 +1204,203 @@ struct section *elf_create_rela_section(struct elf *elf, struct section *base)
return sec;
}
-int elf_rebuild_rela_section(struct section *sec)
+static struct section *elf_create_reloc_section(struct elf *elf,
+ struct section *base,
+ int reltype)
{
- struct rela *rela;
- int nr, idx = 0, size;
- GElf_Rela *relas;
+ switch (reltype) {
+ case SHT_REL: return elf_create_rel_reloc_section(elf, base);
+ case SHT_RELA: return elf_create_rela_reloc_section(elf, base);
+ default: return NULL;
+ }
+}
- nr = 0;
- list_for_each_entry(rela, &sec->rela_list, list)
- nr++;
+static int elf_rebuild_rel_reloc_section(struct section *sec)
+{
+ struct reloc *reloc;
+ int idx = 0;
+ void *buf;
- size = nr * sizeof(*relas);
- relas = malloc(size);
- if (!relas) {
+ /* Allocate a buffer for relocations */
+ buf = malloc(sec->sh.sh_size);
+ if (!buf) {
perror("malloc");
return -1;
}
- sec->data->d_buf = relas;
- sec->data->d_size = size;
+ sec->data->d_buf = buf;
+ sec->data->d_size = sec->sh.sh_size;
+ sec->data->d_type = ELF_T_REL;
- sec->sh.sh_size = size;
+ idx = 0;
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ reloc->rel.r_offset = reloc->offset;
+ reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
+ if (!gelf_update_rel(sec->data, idx, &reloc->rel)) {
+ WARN_ELF("gelf_update_rel");
+ return -1;
+ }
+ idx++;
+ }
+
+ return 0;
+}
+
+static int elf_rebuild_rela_reloc_section(struct section *sec)
+{
+ struct reloc *reloc;
+ int idx = 0;
+ void *buf;
+
+ /* Allocate a buffer for relocations with addends */
+ buf = malloc(sec->sh.sh_size);
+ if (!buf) {
+ perror("malloc");
+ return -1;
+ }
+
+ sec->data->d_buf = buf;
+ sec->data->d_size = sec->sh.sh_size;
+ sec->data->d_type = ELF_T_RELA;
idx = 0;
- list_for_each_entry(rela, &sec->rela_list, list) {
- relas[idx].r_offset = rela->offset;
- relas[idx].r_addend = rela->addend;
- relas[idx].r_info = GELF_R_INFO(rela->sym->idx, rela->type);
+ list_for_each_entry(reloc, &sec->reloc_list, list) {
+ reloc->rela.r_offset = reloc->offset;
+ reloc->rela.r_addend = reloc->addend;
+ reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
+ if (!gelf_update_rela(sec->data, idx, &reloc->rela)) {
+ WARN_ELF("gelf_update_rela");
+ return -1;
+ }
idx++;
}
return 0;
}
+static int elf_rebuild_reloc_section(struct elf *elf, struct section *sec)
+{
+ switch (sec->sh.sh_type) {
+ case SHT_REL: return elf_rebuild_rel_reloc_section(sec);
+ case SHT_RELA: return elf_rebuild_rela_reloc_section(sec);
+ default: return -1;
+ }
+}
+
+int elf_write_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int len,
+ const char *insn)
+{
+ Elf_Data *data = sec->data;
+
+ if (data->d_type != ELF_T_BYTE || data->d_off) {
+ WARN("write to unexpected data for section: %s", sec->name);
+ return -1;
+ }
+
+ memcpy(data->d_buf + offset, insn, len);
+ elf_flagdata(data, ELF_C_SET, ELF_F_DIRTY);
+
+ elf->changed = true;
+
+ return 0;
+}
+
+int elf_write_reloc(struct elf *elf, struct reloc *reloc)
+{
+ struct section *sec = reloc->sec;
+
+ if (sec->sh.sh_type == SHT_REL) {
+ reloc->rel.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
+ reloc->rel.r_offset = reloc->offset;
+
+ if (!gelf_update_rel(sec->data, reloc->idx, &reloc->rel)) {
+ WARN_ELF("gelf_update_rel");
+ return -1;
+ }
+ } else {
+ reloc->rela.r_info = GELF_R_INFO(reloc->sym->idx, reloc->type);
+ reloc->rela.r_addend = reloc->addend;
+ reloc->rela.r_offset = reloc->offset;
+
+ if (!gelf_update_rela(sec->data, reloc->idx, &reloc->rela)) {
+ WARN_ELF("gelf_update_rela");
+ return -1;
+ }
+ }
+
+ elf->changed = true;
+
+ return 0;
+}
+
+/*
+ * When Elf_Scn::sh_size is smaller than the combined Elf_Data::d_size
+ * do you:
+ *
+ * A) adhere to the section header and truncate the data, or
+ * B) ignore the section header and write out all the data you've got?
+ *
+ * Yes, libelf sucks and we need to manually truncate if we over-allocate data.
+ */
+static int elf_truncate_section(struct elf *elf, struct section *sec)
+{
+ u64 size = sec->sh.sh_size;
+ bool truncated = false;
+ Elf_Data *data = NULL;
+ Elf_Scn *s;
+
+ s = elf_getscn(elf->elf, sec->idx);
+ if (!s) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ for (;;) {
+ /* get next data descriptor for the relevant section */
+ data = elf_getdata(s, data);
+
+ if (!data) {
+ if (size) {
+ WARN("end of section data but non-zero size left\n");
+ return -1;
+ }
+ return 0;
+ }
+
+ if (truncated) {
+ /* when we remove symbols */
+ WARN("truncated; but more data\n");
+ return -1;
+ }
+
+ if (!data->d_size) {
+ WARN("zero size data");
+ return -1;
+ }
+
+ if (data->d_size > size) {
+ truncated = true;
+ data->d_size = size;
+ }
+
+ size -= data->d_size;
+ }
+}
+
int elf_write(struct elf *elf)
{
struct section *sec;
Elf_Scn *s;
- /* Update section headers for changed sections: */
+ if (opts.dryrun)
+ return 0;
+
+ /* Update changed relocation sections and section headers: */
list_for_each_entry(sec, &elf->sections, list) {
+ if (sec->truncate)
+ elf_truncate_section(elf, sec);
+
if (sec->changed) {
s = elf_getscn(elf->elf, sec->idx);
if (!s) {
@@ -762,6 +1411,15 @@ int elf_write(struct elf *elf)
WARN_ELF("gelf_update_shdr");
return -1;
}
+
+ if (sec->base &&
+ elf_rebuild_reloc_section(elf, sec)) {
+ WARN("elf_rebuild_reloc_section");
+ return -1;
+ }
+
+ sec->changed = false;
+ elf->changed = true;
}
}
@@ -774,6 +1432,8 @@ int elf_write(struct elf *elf)
return -1;
}
+ elf->changed = false;
+
return 0;
}
@@ -781,7 +1441,7 @@ void elf_close(struct elf *elf)
{
struct section *sec, *tmpsec;
struct symbol *sym, *tmpsym;
- struct rela *rela, *tmprela;
+ struct reloc *reloc, *tmpreloc;
if (elf->elf)
elf_end(elf->elf);
@@ -793,16 +1453,16 @@ void elf_close(struct elf *elf)
list_for_each_entry_safe(sym, tmpsym, &sec->symbol_list, list) {
list_del(&sym->list);
hash_del(&sym->hash);
- free(sym);
}
- list_for_each_entry_safe(rela, tmprela, &sec->rela_list, list) {
- list_del(&rela->list);
- hash_del(&rela->hash);
- free(rela);
+ list_for_each_entry_safe(reloc, tmpreloc, &sec->reloc_list, list) {
+ list_del(&reloc->list);
+ hash_del(&reloc->hash);
}
list_del(&sec->list);
- free(sec);
+ free(sec->reloc_data);
}
+ free(elf->symbol_data);
+ free(elf->section_data);
free(elf);
}
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
deleted file mode 100644
index ebbb10c61e24..000000000000
--- a/tools/objtool/elf.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
- */
-
-#ifndef _OBJTOOL_ELF_H
-#define _OBJTOOL_ELF_H
-
-#include <stdio.h>
-#include <gelf.h>
-#include <linux/list.h>
-#include <linux/hashtable.h>
-#include <linux/rbtree.h>
-#include <linux/jhash.h>
-
-#ifdef LIBELF_USE_DEPRECATED
-# define elf_getshdrnum elf_getshnum
-# define elf_getshdrstrndx elf_getshstrndx
-#endif
-
-/*
- * Fallback for systems without this "read, mmaping if possible" cmd.
- */
-#ifndef ELF_C_READ_MMAP
-#define ELF_C_READ_MMAP ELF_C_READ
-#endif
-
-struct section {
- struct list_head list;
- struct hlist_node hash;
- struct hlist_node name_hash;
- GElf_Shdr sh;
- struct rb_root symbol_tree;
- struct list_head symbol_list;
- struct list_head rela_list;
- struct section *base, *rela;
- struct symbol *sym;
- Elf_Data *data;
- char *name;
- int idx;
- unsigned int len;
- bool changed, text, rodata;
-};
-
-struct symbol {
- struct list_head list;
- struct rb_node node;
- struct hlist_node hash;
- struct hlist_node name_hash;
- GElf_Sym sym;
- struct section *sec;
- char *name;
- unsigned int idx;
- unsigned char bind, type;
- unsigned long offset;
- unsigned int len;
- struct symbol *pfunc, *cfunc, *alias;
- bool uaccess_safe;
-};
-
-struct rela {
- struct list_head list;
- struct hlist_node hash;
- GElf_Rela rela;
- struct section *sec;
- struct symbol *sym;
- unsigned int type;
- unsigned long offset;
- int addend;
- bool jump_table_start;
-};
-
-struct elf {
- Elf *elf;
- GElf_Ehdr ehdr;
- int fd;
- char *name;
- struct list_head sections;
- DECLARE_HASHTABLE(symbol_hash, 20);
- DECLARE_HASHTABLE(symbol_name_hash, 20);
- DECLARE_HASHTABLE(section_hash, 16);
- DECLARE_HASHTABLE(section_name_hash, 16);
- DECLARE_HASHTABLE(rela_hash, 20);
-};
-
-#define OFFSET_STRIDE_BITS 4
-#define OFFSET_STRIDE (1UL << OFFSET_STRIDE_BITS)
-#define OFFSET_STRIDE_MASK (~(OFFSET_STRIDE - 1))
-
-#define for_offset_range(_offset, _start, _end) \
- for (_offset = ((_start) & OFFSET_STRIDE_MASK); \
- _offset <= ((_end) & OFFSET_STRIDE_MASK); \
- _offset += OFFSET_STRIDE)
-
-static inline u32 sec_offset_hash(struct section *sec, unsigned long offset)
-{
- u32 ol, oh, idx = sec->idx;
-
- offset &= OFFSET_STRIDE_MASK;
-
- ol = offset;
- oh = offset >> 32;
-
- __jhash_mix(ol, oh, idx);
-
- return ol;
-}
-
-static inline u32 rela_hash(struct rela *rela)
-{
- return sec_offset_hash(rela->sec, rela->offset);
-}
-
-struct elf *elf_read(const char *name, int flags);
-struct section *find_section_by_name(struct elf *elf, const char *name);
-struct symbol *find_func_by_offset(struct section *sec, unsigned long offset);
-struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
-struct symbol *find_symbol_by_name(struct elf *elf, const char *name);
-struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
-struct rela *find_rela_by_dest(struct elf *elf, struct section *sec, unsigned long offset);
-struct rela *find_rela_by_dest_range(struct elf *elf, struct section *sec,
- unsigned long offset, unsigned int len);
-struct symbol *find_func_containing(struct section *sec, unsigned long offset);
-struct section *elf_create_section(struct elf *elf, const char *name, size_t
- entsize, int nr);
-struct section *elf_create_rela_section(struct elf *elf, struct section *base);
-int elf_rebuild_rela_section(struct section *sec);
-int elf_write(struct elf *elf);
-void elf_close(struct elf *elf);
-
-#define for_each_sec(file, sec) \
- list_for_each_entry(sec, &file->elf->sections, list)
-
-#endif /* _OBJTOOL_ELF_H */
diff --git a/tools/objtool/arch.h b/tools/objtool/include/objtool/arch.h
index ced3765c4f44..2b6d2ce4f9a5 100644
--- a/tools/objtool/arch.h
+++ b/tools/objtool/include/objtool/arch.h
@@ -8,8 +8,8 @@
#include <stdbool.h>
#include <linux/list.h>
-#include "elf.h"
-#include "cfi.h"
+#include <objtool/objtool.h>
+#include <objtool/cfi.h>
enum insn_type {
INSN_JUMP_CONDITIONAL,
@@ -20,13 +20,14 @@ enum insn_type {
INSN_CALL_DYNAMIC,
INSN_RETURN,
INSN_CONTEXT_SWITCH,
- INSN_STACK,
INSN_BUG,
INSN_NOP,
INSN_STAC,
INSN_CLAC,
INSN_STD,
INSN_CLD,
+ INSN_TRAP,
+ INSN_ENDBR,
INSN_OTHER,
};
@@ -36,7 +37,6 @@ enum op_dest_type {
OP_DEST_MEM,
OP_DEST_PUSH,
OP_DEST_PUSHF,
- OP_DEST_LEAVE,
};
struct op_dest {
@@ -62,17 +62,37 @@ struct op_src {
};
struct stack_op {
+ struct stack_op *next;
struct op_dest dest;
struct op_src src;
};
-void arch_initial_func_cfi_state(struct cfi_state *state);
+struct instruction;
-int arch_decode_instruction(struct elf *elf, struct section *sec,
+int arch_ftrace_match(char *name);
+
+void arch_initial_func_cfi_state(struct cfi_init_state *state);
+
+int arch_decode_instruction(struct objtool_file *file, const struct section *sec,
unsigned long offset, unsigned int maxlen,
- unsigned int *len, enum insn_type *type,
- unsigned long *immediate, struct stack_op *op);
+ struct instruction *insn);
bool arch_callee_saved_reg(unsigned char reg);
+unsigned long arch_jump_destination(struct instruction *insn);
+
+unsigned long arch_dest_reloc_offset(int addend);
+
+const char *arch_nop_insn(int len);
+const char *arch_ret_insn(int len);
+
+int arch_decode_hint_reg(u8 sp_reg, int *base);
+
+bool arch_is_retpoline(struct symbol *sym);
+bool arch_is_rethunk(struct symbol *sym);
+
+int arch_rewrite_retpolines(struct objtool_file *file);
+
+bool arch_pc_relative_reloc(struct reloc *reloc);
+
#endif /* _ARCH_H */
diff --git a/tools/objtool/include/objtool/builtin.h b/tools/objtool/include/objtool/builtin.h
new file mode 100644
index 000000000000..2a108e648b7a
--- /dev/null
+++ b/tools/objtool/include/objtool/builtin.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
+ */
+#ifndef _BUILTIN_H
+#define _BUILTIN_H
+
+#include <subcmd/parse-options.h>
+
+struct opts {
+ /* actions: */
+ bool dump_orc;
+ bool hack_jump_label;
+ bool hack_noinstr;
+ bool hack_skylake;
+ bool ibt;
+ bool mcount;
+ bool noinstr;
+ bool orc;
+ bool retpoline;
+ bool rethunk;
+ bool unret;
+ bool sls;
+ bool stackval;
+ bool static_call;
+ bool uaccess;
+ int prefix;
+ bool cfi;
+
+ /* options: */
+ bool backtrace;
+ bool backup;
+ bool dryrun;
+ bool link;
+ bool mnop;
+ bool module;
+ bool no_unreachable;
+ bool sec_address;
+ bool stats;
+};
+
+extern struct opts opts;
+
+extern int cmd_parse_options(int argc, const char **argv, const char * const usage[]);
+
+extern int objtool_run(int argc, const char **argv);
+
+#endif /* _BUILTIN_H */
diff --git a/tools/objtool/cfi.h b/tools/objtool/include/objtool/cfi.h
index 4427bf8ed686..b1258e79a1b7 100644
--- a/tools/objtool/cfi.h
+++ b/tools/objtool/include/objtool/cfi.h
@@ -6,38 +6,36 @@
#ifndef _OBJTOOL_CFI_H
#define _OBJTOOL_CFI_H
+#include <arch/cfi_regs.h>
+#include <linux/list.h>
+
#define CFI_UNDEFINED -1
#define CFI_CFA -2
#define CFI_SP_INDIRECT -3
#define CFI_BP_INDIRECT -4
-#define CFI_AX 0
-#define CFI_DX 1
-#define CFI_CX 2
-#define CFI_BX 3
-#define CFI_SI 4
-#define CFI_DI 5
-#define CFI_BP 6
-#define CFI_SP 7
-#define CFI_R8 8
-#define CFI_R9 9
-#define CFI_R10 10
-#define CFI_R11 11
-#define CFI_R12 12
-#define CFI_R13 13
-#define CFI_R14 14
-#define CFI_R15 15
-#define CFI_RA 16
-#define CFI_NUM_REGS 17
-
struct cfi_reg {
int base;
int offset;
};
-struct cfi_state {
+struct cfi_init_state {
+ struct cfi_reg regs[CFI_NUM_REGS];
struct cfi_reg cfa;
+};
+
+struct cfi_state {
+ struct hlist_node hash; /* must be first, cficmp() */
struct cfi_reg regs[CFI_NUM_REGS];
+ struct cfi_reg vals[CFI_NUM_REGS];
+ struct cfi_reg cfa;
+ int stack_size;
+ int drap_reg, drap_offset;
+ unsigned char type;
+ bool bp_scratch;
+ bool drap;
+ bool signal;
+ bool end;
};
#endif /* _OBJTOOL_CFI_H */
diff --git a/tools/objtool/include/objtool/check.h b/tools/objtool/include/objtool/check.h
new file mode 100644
index 000000000000..daa46f1f0965
--- /dev/null
+++ b/tools/objtool/include/objtool/check.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ */
+
+#ifndef _CHECK_H
+#define _CHECK_H
+
+#include <stdbool.h>
+#include <objtool/cfi.h>
+#include <objtool/arch.h>
+
+struct insn_state {
+ struct cfi_state cfi;
+ unsigned int uaccess_stack;
+ bool uaccess;
+ bool df;
+ bool noinstr;
+ s8 instr;
+};
+
+struct alt_group {
+ /*
+ * Pointer from a replacement group to the original group. NULL if it
+ * *is* the original group.
+ */
+ struct alt_group *orig_group;
+
+ /* First and last instructions in the group */
+ struct instruction *first_insn, *last_insn, *nop;
+
+ /*
+ * Byte-offset-addressed len-sized array of pointers to CFI structs.
+ * This is shared with the other alt_groups in the same alternative.
+ */
+ struct cfi_state **cfi;
+};
+
+#define INSN_CHUNK_BITS 8
+#define INSN_CHUNK_SIZE (1 << INSN_CHUNK_BITS)
+#define INSN_CHUNK_MAX (INSN_CHUNK_SIZE - 1)
+
+struct instruction {
+ struct hlist_node hash;
+ struct list_head call_node;
+ struct section *sec;
+ unsigned long offset;
+ unsigned long immediate;
+
+ u8 len;
+ u8 prev_len;
+ u8 type;
+ s8 instr;
+
+ u32 idx : INSN_CHUNK_BITS,
+ dead_end : 1,
+ ignore : 1,
+ ignore_alts : 1,
+ hint : 1,
+ save : 1,
+ restore : 1,
+ retpoline_safe : 1,
+ noendbr : 1,
+ unret : 1,
+ visited : 4,
+ no_reloc : 1;
+ /* 10 bit hole */
+
+ struct alt_group *alt_group;
+ struct instruction *jump_dest;
+ struct instruction *first_jump_src;
+ union {
+ struct symbol *_call_dest;
+ struct reloc *_jump_table;
+ };
+ struct alternative *alts;
+ struct symbol *sym;
+ struct stack_op *stack_ops;
+ struct cfi_state *cfi;
+};
+
+static inline struct symbol *insn_func(struct instruction *insn)
+{
+ struct symbol *sym = insn->sym;
+
+ if (sym && sym->type != STT_FUNC)
+ sym = NULL;
+
+ return sym;
+}
+
+#define VISITED_BRANCH 0x01
+#define VISITED_BRANCH_UACCESS 0x02
+#define VISITED_BRANCH_MASK 0x03
+#define VISITED_UNRET 0x04
+
+static inline bool is_static_jump(struct instruction *insn)
+{
+ return insn->type == INSN_JUMP_CONDITIONAL ||
+ insn->type == INSN_JUMP_UNCONDITIONAL;
+}
+
+static inline bool is_dynamic_jump(struct instruction *insn)
+{
+ return insn->type == INSN_JUMP_DYNAMIC ||
+ insn->type == INSN_JUMP_DYNAMIC_CONDITIONAL;
+}
+
+static inline bool is_jump(struct instruction *insn)
+{
+ return is_static_jump(insn) || is_dynamic_jump(insn);
+}
+
+struct instruction *find_insn(struct objtool_file *file,
+ struct section *sec, unsigned long offset);
+
+struct instruction *next_insn_same_sec(struct objtool_file *file, struct instruction *insn);
+
+#define sec_for_each_insn(file, _sec, insn) \
+ for (insn = find_insn(file, _sec, 0); \
+ insn && insn->sec == _sec; \
+ insn = next_insn_same_sec(file, insn))
+
+#endif /* _CHECK_H */
diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
new file mode 100644
index 000000000000..e1ca588eb69d
--- /dev/null
+++ b/tools/objtool/include/objtool/elf.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
+ */
+
+#ifndef _OBJTOOL_ELF_H
+#define _OBJTOOL_ELF_H
+
+#include <stdio.h>
+#include <gelf.h>
+#include <linux/list.h>
+#include <linux/hashtable.h>
+#include <linux/rbtree.h>
+#include <linux/jhash.h>
+
+#ifdef LIBELF_USE_DEPRECATED
+# define elf_getshdrnum elf_getshnum
+# define elf_getshdrstrndx elf_getshstrndx
+#endif
+
+/*
+ * Fallback for systems without this "read, mmaping if possible" cmd.
+ */
+#ifndef ELF_C_READ_MMAP
+#define ELF_C_READ_MMAP ELF_C_READ
+#endif
+
+struct section {
+ struct list_head list;
+ struct hlist_node hash;
+ struct hlist_node name_hash;
+ GElf_Shdr sh;
+ struct rb_root_cached symbol_tree;
+ struct list_head symbol_list;
+ struct list_head reloc_list;
+ struct section *base, *reloc;
+ struct symbol *sym;
+ Elf_Data *data;
+ char *name;
+ int idx;
+ bool changed, text, rodata, noinstr, init, truncate;
+ struct reloc *reloc_data;
+};
+
+struct symbol {
+ struct list_head list;
+ struct rb_node node;
+ struct hlist_node hash;
+ struct hlist_node name_hash;
+ GElf_Sym sym;
+ struct section *sec;
+ char *name;
+ unsigned int idx, len;
+ unsigned long offset;
+ unsigned long __subtree_last;
+ struct symbol *pfunc, *cfunc, *alias;
+ unsigned char bind, type;
+ u8 uaccess_safe : 1;
+ u8 static_call_tramp : 1;
+ u8 retpoline_thunk : 1;
+ u8 return_thunk : 1;
+ u8 fentry : 1;
+ u8 profiling_func : 1;
+ struct list_head pv_target;
+ struct list_head reloc_list;
+};
+
+struct reloc {
+ struct list_head list;
+ struct hlist_node hash;
+ union {
+ GElf_Rela rela;
+ GElf_Rel rel;
+ };
+ struct section *sec;
+ struct symbol *sym;
+ struct list_head sym_reloc_entry;
+ unsigned long offset;
+ unsigned int type;
+ s64 addend;
+ int idx;
+ bool jump_table_start;
+};
+
+#define ELF_HASH_BITS 20
+
+struct elf {
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ int fd;
+ bool changed;
+ char *name;
+ unsigned int text_size, num_files;
+ struct list_head sections;
+
+ int symbol_bits;
+ int symbol_name_bits;
+ int section_bits;
+ int section_name_bits;
+ int reloc_bits;
+
+ struct hlist_head *symbol_hash;
+ struct hlist_head *symbol_name_hash;
+ struct hlist_head *section_hash;
+ struct hlist_head *section_name_hash;
+ struct hlist_head *reloc_hash;
+
+ struct section *section_data;
+ struct symbol *symbol_data;
+};
+
+#define OFFSET_STRIDE_BITS 4
+#define OFFSET_STRIDE (1UL << OFFSET_STRIDE_BITS)
+#define OFFSET_STRIDE_MASK (~(OFFSET_STRIDE - 1))
+
+#define for_offset_range(_offset, _start, _end) \
+ for (_offset = ((_start) & OFFSET_STRIDE_MASK); \
+ _offset >= ((_start) & OFFSET_STRIDE_MASK) && \
+ _offset <= ((_end) & OFFSET_STRIDE_MASK); \
+ _offset += OFFSET_STRIDE)
+
+static inline u32 sec_offset_hash(struct section *sec, unsigned long offset)
+{
+ u32 ol, oh, idx = sec->idx;
+
+ offset &= OFFSET_STRIDE_MASK;
+
+ ol = offset;
+ oh = (offset >> 16) >> 16;
+
+ __jhash_mix(ol, oh, idx);
+
+ return ol;
+}
+
+static inline u32 reloc_hash(struct reloc *reloc)
+{
+ return sec_offset_hash(reloc->sec, reloc->offset);
+}
+
+/*
+ * Try to see if it's a whole archive (vmlinux.o or module).
+ *
+ * Note this will miss the case where a module only has one source file.
+ */
+static inline bool has_multiple_files(struct elf *elf)
+{
+ return elf->num_files > 1;
+}
+
+static inline int elf_class_addrsize(struct elf *elf)
+{
+ if (elf->ehdr.e_ident[EI_CLASS] == ELFCLASS32)
+ return sizeof(u32);
+ else
+ return sizeof(u64);
+}
+
+struct elf *elf_open_read(const char *name, int flags);
+struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
+
+struct symbol *elf_create_prefix_symbol(struct elf *elf, struct symbol *orig, long size);
+
+int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
+ unsigned int type, struct symbol *sym, s64 addend);
+int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int type,
+ struct section *insn_sec, unsigned long insn_off);
+
+int elf_write_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int len,
+ const char *insn);
+int elf_write_reloc(struct elf *elf, struct reloc *reloc);
+int elf_write(struct elf *elf);
+void elf_close(struct elf *elf);
+
+struct section *find_section_by_name(const struct elf *elf, const char *name);
+struct symbol *find_func_by_offset(struct section *sec, unsigned long offset);
+struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
+struct symbol *find_symbol_by_name(const struct elf *elf, const char *name);
+struct symbol *find_symbol_containing(const struct section *sec, unsigned long offset);
+int find_symbol_hole_containing(const struct section *sec, unsigned long offset);
+struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, unsigned long offset);
+struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int len);
+struct symbol *find_func_containing(struct section *sec, unsigned long offset);
+
+#define for_each_sec(file, sec) \
+ list_for_each_entry(sec, &file->elf->sections, list)
+
+#define sec_for_each_sym(sec, sym) \
+ list_for_each_entry(sym, &sec->symbol_list, list)
+
+#define for_each_sym(file, sym) \
+ for (struct section *__sec, *__fake = (struct section *)1; \
+ __fake; __fake = NULL) \
+ for_each_sec(file, __sec) \
+ sec_for_each_sym(__sec, sym)
+
+#endif /* _OBJTOOL_ELF_H */
diff --git a/tools/objtool/include/objtool/endianness.h b/tools/objtool/include/objtool/endianness.h
new file mode 100644
index 000000000000..4d2aa9b0fe2f
--- /dev/null
+++ b/tools/objtool/include/objtool/endianness.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _OBJTOOL_ENDIANNESS_H
+#define _OBJTOOL_ENDIANNESS_H
+
+#include <linux/kernel.h>
+#include <endian.h>
+#include <objtool/elf.h>
+
+/*
+ * Does a byte swap if target file endianness doesn't match the host, i.e. cross
+ * compilation for little endian on big endian and vice versa.
+ * To be used for multi-byte values conversion, which are read from / about
+ * to be written to a target native endianness ELF file.
+ */
+static inline bool need_bswap(struct elf *elf)
+{
+ return (__BYTE_ORDER == __LITTLE_ENDIAN) ^
+ (elf->ehdr.e_ident[EI_DATA] == ELFDATA2LSB);
+}
+
+#define bswap_if_needed(elf, val) \
+({ \
+ __typeof__(val) __ret; \
+ bool __need_bswap = need_bswap(elf); \
+ switch (sizeof(val)) { \
+ case 8: \
+ __ret = __need_bswap ? bswap_64(val) : (val); break; \
+ case 4: \
+ __ret = __need_bswap ? bswap_32(val) : (val); break; \
+ case 2: \
+ __ret = __need_bswap ? bswap_16(val) : (val); break; \
+ default: \
+ BUILD_BUG(); break; \
+ } \
+ __ret; \
+})
+
+#endif /* _OBJTOOL_ENDIANNESS_H */
diff --git a/tools/objtool/include/objtool/objtool.h b/tools/objtool/include/objtool/objtool.h
new file mode 100644
index 000000000000..94a33ee7b363
--- /dev/null
+++ b/tools/objtool/include/objtool/objtool.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Matt Helsley <mhelsley@vmware.com>
+ */
+
+#ifndef _OBJTOOL_H
+#define _OBJTOOL_H
+
+#include <stdbool.h>
+#include <linux/list.h>
+#include <linux/hashtable.h>
+
+#include <objtool/elf.h>
+
+#define __weak __attribute__((weak))
+
+struct pv_state {
+ bool clean;
+ struct list_head targets;
+};
+
+struct objtool_file {
+ struct elf *elf;
+ DECLARE_HASHTABLE(insn_hash, 20);
+ struct list_head retpoline_call_list;
+ struct list_head return_thunk_list;
+ struct list_head static_call_list;
+ struct list_head mcount_loc_list;
+ struct list_head endbr_list;
+ struct list_head call_list;
+ bool ignore_unreachables, hints, rodata;
+
+ unsigned int nr_endbr;
+ unsigned int nr_endbr_int;
+
+ unsigned long jl_short, jl_long;
+ unsigned long jl_nop_short, jl_nop_long;
+
+ struct pv_state *pv_ops;
+};
+
+struct objtool_file *objtool_open_read(const char *_objname);
+
+void objtool_pv_add(struct objtool_file *file, int idx, struct symbol *func);
+
+int check(struct objtool_file *file);
+int orc_dump(const char *objname);
+int orc_create(struct objtool_file *file);
+
+#endif /* _OBJTOOL_H */
diff --git a/tools/objtool/special.h b/tools/objtool/include/objtool/special.h
index 35061530e46e..86d4af9c5aa9 100644
--- a/tools/objtool/special.h
+++ b/tools/objtool/include/objtool/special.h
@@ -7,7 +7,10 @@
#define _SPECIAL_H
#include <stdbool.h>
-#include "elf.h"
+#include <objtool/check.h>
+#include <objtool/elf.h>
+
+#define C_JUMP_TABLE_SECTION ".rodata..c_jump_table"
struct special_alt {
struct list_head list;
@@ -16,6 +19,7 @@ struct special_alt {
bool skip_orig;
bool skip_alt;
bool jump_or_nop;
+ u8 key_addend;
struct section *orig_sec;
unsigned long orig_off;
@@ -28,4 +32,11 @@ struct special_alt {
int special_get_alts(struct elf *elf, struct list_head *alts);
+void arch_handle_alternative(unsigned short feature, struct special_alt *alt);
+
+bool arch_support_alt_relocation(struct special_alt *special_alt,
+ struct instruction *insn,
+ struct reloc *reloc);
+struct reloc *arch_find_switch_table(struct objtool_file *file,
+ struct instruction *insn);
#endif /* _SPECIAL_H */
diff --git a/tools/objtool/warn.h b/tools/objtool/include/objtool/warn.h
index 7799f60de80a..b1c920dc9516 100644
--- a/tools/objtool/warn.h
+++ b/tools/objtool/include/objtool/warn.h
@@ -11,32 +11,33 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-#include "elf.h"
+#include <objtool/builtin.h>
+#include <objtool/elf.h>
extern const char *objname;
static inline char *offstr(struct section *sec, unsigned long offset)
{
- struct symbol *func;
- char *name, *str;
- unsigned long name_off;
+ bool is_text = (sec->sh.sh_flags & SHF_EXECINSTR);
+ struct symbol *sym = NULL;
+ char *str;
+ int len;
- func = find_func_containing(sec, offset);
- if (func) {
- name = func->name;
- name_off = offset - func->offset;
+ if (is_text)
+ sym = find_func_containing(sec, offset);
+ if (!sym)
+ sym = find_symbol_containing(sec, offset);
+
+ if (sym) {
+ str = malloc(strlen(sym->name) + strlen(sec->name) + 40);
+ len = sprintf(str, "%s+0x%lx", sym->name, offset - sym->offset);
+ if (opts.sec_address)
+ sprintf(str+len, " (%s+0x%lx)", sec->name, offset);
} else {
- name = sec->name;
- name_off = offset;
+ str = malloc(strlen(sec->name) + 20);
+ sprintf(str, "%s+0x%lx", sec->name, offset);
}
- str = malloc(strlen(name) + 20);
-
- if (func)
- sprintf(str, "%s()+0x%lx", name, name_off);
- else
- sprintf(str, "%s+0x%lx", name, name_off);
-
return str;
}
@@ -52,6 +53,11 @@ static inline char *offstr(struct section *sec, unsigned long offset)
free(_str); \
})
+#define WARN_INSN(insn, format, ...) \
+({ \
+ WARN_FUNC(format, insn->sec, insn->offset, ##__VA_ARGS__); \
+})
+
#define BT_FUNC(format, insn, ...) \
({ \
struct instruction *_insn = (insn); \
diff --git a/tools/objtool/objtool.c b/tools/objtool/objtool.c
index 0b3528f05053..c54f7235c5d9 100644
--- a/tools/objtool/objtool.c
+++ b/tools/objtool/objtool.c
@@ -3,102 +3,139 @@
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
*/
-/*
- * objtool:
- *
- * The 'check' subcmd analyzes every .o file and ensures the validity of its
- * stack trace metadata. It enforces a set of rules on asm code and C inline
- * assembly code so that stack traces can be reliable.
- *
- * For more information, see tools/objtool/Documentation/stack-validation.txt.
- */
-
#include <stdio.h>
#include <stdbool.h>
#include <string.h>
#include <stdlib.h>
+#include <unistd.h>
#include <subcmd/exec-cmd.h>
#include <subcmd/pager.h>
#include <linux/kernel.h>
-#include "builtin.h"
+#include <objtool/builtin.h>
+#include <objtool/objtool.h>
+#include <objtool/warn.h>
-struct cmd_struct {
- const char *name;
- int (*fn)(int, const char **);
- const char *help;
-};
+bool help;
-static const char objtool_usage_string[] =
- "objtool COMMAND [ARGS]";
+const char *objname;
+static struct objtool_file file;
-static struct cmd_struct objtool_cmds[] = {
- {"check", cmd_check, "Perform stack metadata validation on an object file" },
- {"orc", cmd_orc, "Generate in-place ORC unwind tables for an object file" },
-};
+static bool objtool_create_backup(const char *_objname)
+{
+ int len = strlen(_objname);
+ char *buf, *base, *name = malloc(len+6);
+ int s, d, l, t;
-bool help;
+ if (!name) {
+ perror("failed backup name malloc");
+ return false;
+ }
-static void cmd_usage(void)
-{
- unsigned int i, longest = 0;
+ strcpy(name, _objname);
+ strcpy(name + len, ".orig");
- printf("\n usage: %s\n\n", objtool_usage_string);
+ d = open(name, O_CREAT|O_WRONLY|O_TRUNC, 0644);
+ if (d < 0) {
+ perror("failed to create backup file");
+ return false;
+ }
- for (i = 0; i < ARRAY_SIZE(objtool_cmds); i++) {
- if (longest < strlen(objtool_cmds[i].name))
- longest = strlen(objtool_cmds[i].name);
+ s = open(_objname, O_RDONLY);
+ if (s < 0) {
+ perror("failed to open orig file");
+ return false;
}
- puts(" Commands:");
- for (i = 0; i < ARRAY_SIZE(objtool_cmds); i++) {
- printf(" %-*s ", longest, objtool_cmds[i].name);
- puts(objtool_cmds[i].help);
+ buf = malloc(4096);
+ if (!buf) {
+ perror("failed backup data malloc");
+ return false;
}
- printf("\n");
+ while ((l = read(s, buf, 4096)) > 0) {
+ base = buf;
+ do {
+ t = write(d, base, l);
+ if (t < 0) {
+ perror("failed backup write");
+ return false;
+ }
+ base += t;
+ l -= t;
+ } while (l);
+ }
- exit(129);
+ if (l < 0) {
+ perror("failed backup read");
+ return false;
+ }
+
+ free(name);
+ free(buf);
+ close(d);
+ close(s);
+
+ return true;
}
-static void handle_options(int *argc, const char ***argv)
+struct objtool_file *objtool_open_read(const char *_objname)
{
- while (*argc > 0) {
- const char *cmd = (*argv)[0];
-
- if (cmd[0] != '-')
- break;
-
- if (!strcmp(cmd, "--help") || !strcmp(cmd, "-h")) {
- help = true;
- break;
- } else {
- fprintf(stderr, "Unknown option: %s\n", cmd);
- cmd_usage();
+ if (objname) {
+ if (strcmp(objname, _objname)) {
+ WARN("won't handle more than one file at a time");
+ return NULL;
}
+ return &file;
+ }
+ objname = _objname;
- (*argv)++;
- (*argc)--;
+ file.elf = elf_open_read(objname, O_RDWR);
+ if (!file.elf)
+ return NULL;
+
+ if (opts.backup && !objtool_create_backup(objname)) {
+ WARN("can't create backup file");
+ return NULL;
}
+
+ hash_init(file.insn_hash);
+ INIT_LIST_HEAD(&file.retpoline_call_list);
+ INIT_LIST_HEAD(&file.return_thunk_list);
+ INIT_LIST_HEAD(&file.static_call_list);
+ INIT_LIST_HEAD(&file.mcount_loc_list);
+ INIT_LIST_HEAD(&file.endbr_list);
+ INIT_LIST_HEAD(&file.call_list);
+ file.ignore_unreachables = opts.no_unreachable;
+ file.hints = false;
+
+ return &file;
}
-static void handle_internal_command(int argc, const char **argv)
+void objtool_pv_add(struct objtool_file *f, int idx, struct symbol *func)
{
- const char *cmd = argv[0];
- unsigned int i, ret;
+ if (!opts.noinstr)
+ return;
- for (i = 0; i < ARRAY_SIZE(objtool_cmds); i++) {
- struct cmd_struct *p = objtool_cmds+i;
-
- if (strcmp(p->name, cmd))
- continue;
+ if (!f->pv_ops) {
+ WARN("paravirt confusion");
+ return;
+ }
- ret = p->fn(argc, argv);
+ /*
+ * These functions will be patched into native code,
+ * see paravirt_patch().
+ */
+ if (!strcmp(func->name, "_paravirt_nop") ||
+ !strcmp(func->name, "_paravirt_ident_64"))
+ return;
- exit(ret);
- }
+ /* already added this function */
+ if (!list_empty(&func->pv_target))
+ return;
- cmd_usage();
+ list_add(&func->pv_target, &f->pv_ops[idx].targets);
+ f->pv_ops[idx].clean = false;
}
int main(int argc, const char **argv)
@@ -109,14 +146,7 @@ int main(int argc, const char **argv)
exec_cmd_init("objtool", UNUSED, UNUSED, UNUSED);
pager_init(UNUSED);
- argv++;
- argc--;
- handle_options(&argc, &argv);
-
- if (!argc || help)
- cmd_usage();
-
- handle_internal_command(argc, argv);
+ objtool_run(argc, argv);
return 0;
}
diff --git a/tools/objtool/orc.h b/tools/objtool/orc.h
deleted file mode 100644
index ee2832221e62..000000000000
--- a/tools/objtool/orc.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- */
-
-#ifndef _ORC_H
-#define _ORC_H
-
-#include <asm/orc_types.h>
-
-struct objtool_file;
-
-int create_orc(struct objtool_file *file);
-int create_orc_sections(struct objtool_file *file);
-
-int orc_dump(const char *objname);
-
-#endif /* _ORC_H */
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 13ccf775a83a..0e183bb1c720 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -4,8 +4,10 @@
*/
#include <unistd.h>
-#include "orc.h"
-#include "warn.h"
+#include <asm/orc_types.h>
+#include <objtool/objtool.h>
+#include <objtool/warn.h>
+#include <objtool/endianness.h>
static const char *reg_name(unsigned int reg)
{
@@ -36,12 +38,16 @@ static const char *reg_name(unsigned int reg)
static const char *orc_type_name(unsigned int type)
{
switch (type) {
+ case ORC_TYPE_UNDEFINED:
+ return "(und)";
+ case ORC_TYPE_END_OF_STACK:
+ return "end";
case ORC_TYPE_CALL:
return "call";
case ORC_TYPE_REGS:
return "regs";
- case ORC_TYPE_REGS_IRET:
- return "iret";
+ case ORC_TYPE_REGS_PARTIAL:
+ return "regs (partial)";
default:
return "?";
}
@@ -52,7 +58,7 @@ static void print_reg(unsigned int reg, int offset)
if (reg == ORC_REG_BP_INDIRECT)
printf("(bp%+d)", offset);
else if (reg == ORC_REG_SP_INDIRECT)
- printf("(sp%+d)", offset);
+ printf("(sp)%+d", offset);
else if (reg == ORC_REG_UNDEFINED)
printf("(und)");
else
@@ -66,13 +72,14 @@ int orc_dump(const char *_objname)
char *name;
size_t nr_sections;
Elf64_Addr orc_ip_addr = 0;
- size_t shstrtab_idx;
+ size_t shstrtab_idx, strtab_idx = 0;
Elf *elf;
Elf_Scn *scn;
GElf_Shdr sh;
GElf_Rela rela;
GElf_Sym sym;
Elf_Data *data, *symtab = NULL, *rela_orc_ip = NULL;
+ struct elf dummy_elf = {};
objname = _objname;
@@ -91,6 +98,12 @@ int orc_dump(const char *_objname)
return -1;
}
+ if (!elf64_getehdr(elf)) {
+ WARN_ELF("elf64_getehdr");
+ return -1;
+ }
+ memcpy(&dummy_elf.ehdr, elf64_getehdr(elf), sizeof(dummy_elf.ehdr));
+
if (elf_getshdrnum(elf, &nr_sections)) {
WARN_ELF("elf_getshdrnum");
return -1;
@@ -127,6 +140,8 @@ int orc_dump(const char *_objname)
if (!strcmp(name, ".symtab")) {
symtab = data;
+ } else if (!strcmp(name, ".strtab")) {
+ strtab_idx = i;
} else if (!strcmp(name, ".orc_unwind")) {
orc = data->d_buf;
orc_size = sh.sh_size;
@@ -138,7 +153,7 @@ int orc_dump(const char *_objname)
}
}
- if (!symtab || !orc || !orc_ip)
+ if (!symtab || !strtab_idx || !orc || !orc_ip)
return 0;
if (orc_size % sizeof(*orc) != 0) {
@@ -159,21 +174,29 @@ int orc_dump(const char *_objname)
return -1;
}
- scn = elf_getscn(elf, sym.st_shndx);
- if (!scn) {
- WARN_ELF("elf_getscn");
- return -1;
- }
-
- if (!gelf_getshdr(scn, &sh)) {
- WARN_ELF("gelf_getshdr");
- return -1;
- }
-
- name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
- if (!name || !*name) {
- WARN_ELF("elf_strptr");
- return -1;
+ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
+ scn = elf_getscn(elf, sym.st_shndx);
+ if (!scn) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ if (!gelf_getshdr(scn, &sh)) {
+ WARN_ELF("gelf_getshdr");
+ return -1;
+ }
+
+ name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+ if (!name) {
+ WARN_ELF("elf_strptr");
+ return -1;
+ }
+ } else {
+ name = elf_strptr(elf, strtab_idx, sym.st_name);
+ if (!name) {
+ WARN_ELF("elf_strptr");
+ return -1;
+ }
}
printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
@@ -182,17 +205,17 @@ int orc_dump(const char *_objname)
printf("%llx:", (unsigned long long)(orc_ip_addr + (i * sizeof(int)) + orc_ip[i]));
}
+ printf("type:%s", orc_type_name(orc[i].type));
printf(" sp:");
- print_reg(orc[i].sp_reg, orc[i].sp_offset);
+ print_reg(orc[i].sp_reg, bswap_if_needed(&dummy_elf, orc[i].sp_offset));
printf(" bp:");
- print_reg(orc[i].bp_reg, orc[i].bp_offset);
+ print_reg(orc[i].bp_reg, bswap_if_needed(&dummy_elf, orc[i].bp_offset));
- printf(" type:%s end:%d\n",
- orc_type_name(orc[i].type), orc[i].end);
+ printf(" signal:%d\n", orc[i].signal);
}
elf_end(elf);
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index 41e4a2754da4..48efd1e2f00d 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -6,207 +6,253 @@
#include <stdlib.h>
#include <string.h>
-#include "orc.h"
-#include "check.h"
-#include "warn.h"
+#include <linux/objtool_types.h>
+#include <asm/orc_types.h>
-int create_orc(struct objtool_file *file)
-{
- struct instruction *insn;
-
- for_each_insn(file, insn) {
- struct orc_entry *orc = &insn->orc;
- struct cfi_reg *cfa = &insn->state.cfa;
- struct cfi_reg *bp = &insn->state.regs[CFI_BP];
+#include <objtool/check.h>
+#include <objtool/warn.h>
+#include <objtool/endianness.h>
- orc->end = insn->state.end;
-
- if (cfa->base == CFI_UNDEFINED) {
- orc->sp_reg = ORC_REG_UNDEFINED;
- continue;
- }
+static int init_orc_entry(struct orc_entry *orc, struct cfi_state *cfi,
+ struct instruction *insn)
+{
+ struct cfi_reg *bp = &cfi->regs[CFI_BP];
+
+ memset(orc, 0, sizeof(*orc));
+
+ if (!cfi) {
+ /*
+ * This is usually either unreachable nops/traps (which don't
+ * trigger unreachable instruction warnings), or
+ * STACK_FRAME_NON_STANDARD functions.
+ */
+ orc->type = ORC_TYPE_UNDEFINED;
+ return 0;
+ }
- switch (cfa->base) {
- case CFI_SP:
- orc->sp_reg = ORC_REG_SP;
- break;
- case CFI_SP_INDIRECT:
- orc->sp_reg = ORC_REG_SP_INDIRECT;
- break;
- case CFI_BP:
- orc->sp_reg = ORC_REG_BP;
- break;
- case CFI_BP_INDIRECT:
- orc->sp_reg = ORC_REG_BP_INDIRECT;
- break;
- case CFI_R10:
- orc->sp_reg = ORC_REG_R10;
- break;
- case CFI_R13:
- orc->sp_reg = ORC_REG_R13;
- break;
- case CFI_DI:
- orc->sp_reg = ORC_REG_DI;
- break;
- case CFI_DX:
- orc->sp_reg = ORC_REG_DX;
- break;
- default:
- WARN_FUNC("unknown CFA base reg %d",
- insn->sec, insn->offset, cfa->base);
- return -1;
- }
+ switch (cfi->type) {
+ case UNWIND_HINT_TYPE_UNDEFINED:
+ orc->type = ORC_TYPE_UNDEFINED;
+ return 0;
+ case UNWIND_HINT_TYPE_END_OF_STACK:
+ orc->type = ORC_TYPE_END_OF_STACK;
+ return 0;
+ case UNWIND_HINT_TYPE_CALL:
+ orc->type = ORC_TYPE_CALL;
+ break;
+ case UNWIND_HINT_TYPE_REGS:
+ orc->type = ORC_TYPE_REGS;
+ break;
+ case UNWIND_HINT_TYPE_REGS_PARTIAL:
+ orc->type = ORC_TYPE_REGS_PARTIAL;
+ break;
+ default:
+ WARN_INSN(insn, "unknown unwind hint type %d", cfi->type);
+ return -1;
+ }
- switch(bp->base) {
- case CFI_UNDEFINED:
- orc->bp_reg = ORC_REG_UNDEFINED;
- break;
- case CFI_CFA:
- orc->bp_reg = ORC_REG_PREV_SP;
- break;
- case CFI_BP:
- orc->bp_reg = ORC_REG_BP;
- break;
- default:
- WARN_FUNC("unknown BP base reg %d",
- insn->sec, insn->offset, bp->base);
- return -1;
- }
+ orc->signal = cfi->signal;
+
+ switch (cfi->cfa.base) {
+ case CFI_SP:
+ orc->sp_reg = ORC_REG_SP;
+ break;
+ case CFI_SP_INDIRECT:
+ orc->sp_reg = ORC_REG_SP_INDIRECT;
+ break;
+ case CFI_BP:
+ orc->sp_reg = ORC_REG_BP;
+ break;
+ case CFI_BP_INDIRECT:
+ orc->sp_reg = ORC_REG_BP_INDIRECT;
+ break;
+ case CFI_R10:
+ orc->sp_reg = ORC_REG_R10;
+ break;
+ case CFI_R13:
+ orc->sp_reg = ORC_REG_R13;
+ break;
+ case CFI_DI:
+ orc->sp_reg = ORC_REG_DI;
+ break;
+ case CFI_DX:
+ orc->sp_reg = ORC_REG_DX;
+ break;
+ default:
+ WARN_INSN(insn, "unknown CFA base reg %d", cfi->cfa.base);
+ return -1;
+ }
- orc->sp_offset = cfa->offset;
- orc->bp_offset = bp->offset;
- orc->type = insn->state.type;
+ switch (bp->base) {
+ case CFI_UNDEFINED:
+ orc->bp_reg = ORC_REG_UNDEFINED;
+ break;
+ case CFI_CFA:
+ orc->bp_reg = ORC_REG_PREV_SP;
+ break;
+ case CFI_BP:
+ orc->bp_reg = ORC_REG_BP;
+ break;
+ default:
+ WARN_INSN(insn, "unknown BP base reg %d", bp->base);
+ return -1;
}
+ orc->sp_offset = cfi->cfa.offset;
+ orc->bp_offset = bp->offset;
+
return 0;
}
-static int create_orc_entry(struct elf *elf, struct section *u_sec, struct section *ip_relasec,
- unsigned int idx, struct section *insn_sec,
- unsigned long insn_off, struct orc_entry *o)
+static int write_orc_entry(struct elf *elf, struct section *orc_sec,
+ struct section *ip_sec, unsigned int idx,
+ struct section *insn_sec, unsigned long insn_off,
+ struct orc_entry *o)
{
struct orc_entry *orc;
- struct rela *rela;
-
- if (!insn_sec->sym) {
- WARN("missing symbol for section %s", insn_sec->name);
- return -1;
- }
/* populate ORC data */
- orc = (struct orc_entry *)u_sec->data->d_buf + idx;
+ orc = (struct orc_entry *)orc_sec->data->d_buf + idx;
memcpy(orc, o, sizeof(*orc));
+ orc->sp_offset = bswap_if_needed(elf, orc->sp_offset);
+ orc->bp_offset = bswap_if_needed(elf, orc->bp_offset);
- /* populate rela for ip */
- rela = malloc(sizeof(*rela));
- if (!rela) {
- perror("malloc");
+ /* populate reloc for ip */
+ if (elf_add_reloc_to_insn(elf, ip_sec, idx * sizeof(int), R_X86_64_PC32,
+ insn_sec, insn_off))
return -1;
- }
- memset(rela, 0, sizeof(*rela));
-
- rela->sym = insn_sec->sym;
- rela->addend = insn_off;
- rela->type = R_X86_64_PC32;
- rela->offset = idx * sizeof(int);
- rela->sec = ip_relasec;
-
- list_add_tail(&rela->list, &ip_relasec->rela_list);
- hash_add(elf->rela_hash, &rela->hash, rela_hash(rela));
return 0;
}
-int create_orc_sections(struct objtool_file *file)
-{
- struct instruction *insn, *prev_insn;
- struct section *sec, *u_sec, *ip_relasec;
- unsigned int idx;
+struct orc_list_entry {
+ struct list_head list;
+ struct orc_entry orc;
+ struct section *insn_sec;
+ unsigned long insn_off;
+};
- struct orc_entry empty = {
- .sp_reg = ORC_REG_UNDEFINED,
- .bp_reg = ORC_REG_UNDEFINED,
- .type = ORC_TYPE_CALL,
- };
+static int orc_list_add(struct list_head *orc_list, struct orc_entry *orc,
+ struct section *sec, unsigned long offset)
+{
+ struct orc_list_entry *entry = malloc(sizeof(*entry));
- sec = find_section_by_name(file->elf, ".orc_unwind");
- if (sec) {
- WARN("file already has .orc_unwind section, skipping");
+ if (!entry) {
+ WARN("malloc failed");
return -1;
}
- /* count the number of needed orcs */
- idx = 0;
- for_each_sec(file, sec) {
- if (!sec->text)
- continue;
-
- prev_insn = NULL;
- sec_for_each_insn(file, sec, insn) {
- if (!prev_insn ||
- memcmp(&insn->orc, &prev_insn->orc,
- sizeof(struct orc_entry))) {
- idx++;
- }
- prev_insn = insn;
- }
-
- /* section terminator */
- if (prev_insn)
- idx++;
- }
- if (!idx)
- return -1;
+ entry->orc = *orc;
+ entry->insn_sec = sec;
+ entry->insn_off = offset;
+ list_add_tail(&entry->list, orc_list);
+ return 0;
+}
- /* create .orc_unwind_ip and .rela.orc_unwind_ip sections */
- sec = elf_create_section(file->elf, ".orc_unwind_ip", sizeof(int), idx);
- if (!sec)
- return -1;
+static unsigned long alt_group_len(struct alt_group *alt_group)
+{
+ return alt_group->last_insn->offset +
+ alt_group->last_insn->len -
+ alt_group->first_insn->offset;
+}
- ip_relasec = elf_create_rela_section(file->elf, sec);
- if (!ip_relasec)
- return -1;
+int orc_create(struct objtool_file *file)
+{
+ struct section *sec, *orc_sec;
+ unsigned int nr = 0, idx = 0;
+ struct orc_list_entry *entry;
+ struct list_head orc_list;
- /* create .orc_unwind section */
- u_sec = elf_create_section(file->elf, ".orc_unwind",
- sizeof(struct orc_entry), idx);
+ struct orc_entry null = { .type = ORC_TYPE_UNDEFINED };
- /* populate sections */
- idx = 0;
+ /* Build a deduplicated list of ORC entries: */
+ INIT_LIST_HEAD(&orc_list);
for_each_sec(file, sec) {
+ struct orc_entry orc, prev_orc = {0};
+ struct instruction *insn;
+ bool empty = true;
+
if (!sec->text)
continue;
- prev_insn = NULL;
sec_for_each_insn(file, sec, insn) {
- if (!prev_insn || memcmp(&insn->orc, &prev_insn->orc,
- sizeof(struct orc_entry))) {
+ struct alt_group *alt_group = insn->alt_group;
+ int i;
- if (create_orc_entry(file->elf, u_sec, ip_relasec, idx,
- insn->sec, insn->offset,
- &insn->orc))
+ if (!alt_group) {
+ if (init_orc_entry(&orc, insn->cfi, insn))
+ return -1;
+ if (!memcmp(&prev_orc, &orc, sizeof(orc)))
+ continue;
+ if (orc_list_add(&orc_list, &orc, sec,
+ insn->offset))
return -1;
+ nr++;
+ prev_orc = orc;
+ empty = false;
+ continue;
+ }
- idx++;
+ /*
+ * Alternatives can have different stack layout
+ * possibilities (but they shouldn't conflict).
+ * Instead of traversing the instructions, use the
+ * alt_group's flattened byte-offset-addressed CFI
+ * array.
+ */
+ for (i = 0; i < alt_group_len(alt_group); i++) {
+ struct cfi_state *cfi = alt_group->cfi[i];
+ if (!cfi)
+ continue;
+ /* errors are reported on the original insn */
+ if (init_orc_entry(&orc, cfi, insn))
+ return -1;
+ if (!memcmp(&prev_orc, &orc, sizeof(orc)))
+ continue;
+ if (orc_list_add(&orc_list, &orc, insn->sec,
+ insn->offset + i))
+ return -1;
+ nr++;
+ prev_orc = orc;
+ empty = false;
}
- prev_insn = insn;
- }
- /* section terminator */
- if (prev_insn) {
- if (create_orc_entry(file->elf, u_sec, ip_relasec, idx,
- prev_insn->sec,
- prev_insn->offset + prev_insn->len,
- &empty))
- return -1;
+ /* Skip to the end of the alt_group */
+ insn = alt_group->last_insn;
+ }
- idx++;
+ /* Add a section terminator */
+ if (!empty) {
+ orc_list_add(&orc_list, &null, sec, sec->sh.sh_size);
+ nr++;
}
}
+ if (!nr)
+ return 0;
+
+ /* Create .orc_unwind, .orc_unwind_ip and .rela.orc_unwind_ip sections: */
+ sec = find_section_by_name(file->elf, ".orc_unwind");
+ if (sec) {
+ WARN("file already has .orc_unwind section, skipping");
+ return -1;
+ }
+ orc_sec = elf_create_section(file->elf, ".orc_unwind", 0,
+ sizeof(struct orc_entry), nr);
+ if (!orc_sec)
+ return -1;
- if (elf_rebuild_rela_section(ip_relasec))
+ sec = elf_create_section(file->elf, ".orc_unwind_ip", 0, sizeof(int), nr);
+ if (!sec)
return -1;
+ /* Write ORC entries to sections: */
+ list_for_each_entry(entry, &orc_list, list) {
+ if (write_orc_entry(file->elf, orc_sec, sec, idx++,
+ entry->insn_sec, entry->insn_off,
+ &entry->orc))
+ return -1;
+ }
+
return 0;
}
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index e74e0189de22..baa85c31526b 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -11,27 +11,11 @@
#include <stdlib.h>
#include <string.h>
-#include "builtin.h"
-#include "special.h"
-#include "warn.h"
-
-#define EX_ENTRY_SIZE 12
-#define EX_ORIG_OFFSET 0
-#define EX_NEW_OFFSET 4
-
-#define JUMP_ENTRY_SIZE 16
-#define JUMP_ORIG_OFFSET 0
-#define JUMP_NEW_OFFSET 4
-
-#define ALT_ENTRY_SIZE 13
-#define ALT_ORIG_OFFSET 0
-#define ALT_NEW_OFFSET 4
-#define ALT_FEATURE_OFFSET 8
-#define ALT_ORIG_LEN_OFFSET 10
-#define ALT_NEW_LEN_OFFSET 11
-
-#define X86_FEATURE_POPCNT (4*32+23)
-#define X86_FEATURE_SMAP (9*32+20)
+#include <arch/special.h>
+#include <objtool/builtin.h>
+#include <objtool/special.h>
+#include <objtool/warn.h>
+#include <objtool/endianness.h>
struct special_entry {
const char *sec;
@@ -39,9 +23,10 @@ struct special_entry {
unsigned char size, orig, new;
unsigned char orig_len, new_len; /* group only */
unsigned char feature; /* ALTERNATIVE macro CPU feature */
+ unsigned char key; /* jump_label key */
};
-struct special_entry entries[] = {
+static const struct special_entry entries[] = {
{
.sec = ".altinstructions",
.group = true,
@@ -58,6 +43,7 @@ struct special_entry entries[] = {
.size = JUMP_ENTRY_SIZE,
.orig = JUMP_ORIG_OFFSET,
.new = JUMP_NEW_OFFSET,
+ .key = JUMP_KEY_OFFSET,
},
{
.sec = "__ex_table",
@@ -68,11 +54,22 @@ struct special_entry entries[] = {
{},
};
-static int get_alt_entry(struct elf *elf, struct special_entry *entry,
+void __weak arch_handle_alternative(unsigned short feature, struct special_alt *alt)
+{
+}
+
+static void reloc_to_sec_off(struct reloc *reloc, struct section **sec,
+ unsigned long *off)
+{
+ *sec = reloc->sym->sec;
+ *off = reloc->sym->offset + reloc->addend;
+}
+
+static int get_alt_entry(struct elf *elf, const struct special_entry *entry,
struct section *sec, int idx,
struct special_alt *alt)
{
- struct rela *orig_rela, *new_rela;
+ struct reloc *orig_reloc, *new_reloc;
unsigned long offset;
offset = idx * entry->size;
@@ -90,64 +87,48 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
if (entry->feature) {
unsigned short feature;
- feature = *(unsigned short *)(sec->data->d_buf + offset +
- entry->feature);
-
- /*
- * It has been requested that we don't validate the !POPCNT
- * feature path which is a "very very small percentage of
- * machines".
- */
- if (feature == X86_FEATURE_POPCNT)
- alt->skip_orig = true;
-
- /*
- * If UACCESS validation is enabled; force that alternative;
- * otherwise force it the other way.
- *
- * What we want to avoid is having both the original and the
- * alternative code flow at the same time, in that case we can
- * find paths that see the STAC but take the NOP instead of
- * CLAC and the other way around.
- */
- if (feature == X86_FEATURE_SMAP) {
- if (uaccess)
- alt->skip_orig = true;
- else
- alt->skip_alt = true;
- }
+ feature = bswap_if_needed(elf,
+ *(unsigned short *)(sec->data->d_buf +
+ offset +
+ entry->feature));
+ arch_handle_alternative(feature, alt);
}
- orig_rela = find_rela_by_dest(elf, sec, offset + entry->orig);
- if (!orig_rela) {
- WARN_FUNC("can't find orig rela", sec, offset + entry->orig);
- return -1;
- }
- if (orig_rela->sym->type != STT_SECTION) {
- WARN_FUNC("don't know how to handle non-section rela symbol %s",
- sec, offset + entry->orig, orig_rela->sym->name);
+ orig_reloc = find_reloc_by_dest(elf, sec, offset + entry->orig);
+ if (!orig_reloc) {
+ WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
return -1;
}
- alt->orig_sec = orig_rela->sym->sec;
- alt->orig_off = orig_rela->addend;
+ reloc_to_sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off);
if (!entry->group || alt->new_len) {
- new_rela = find_rela_by_dest(elf, sec, offset + entry->new);
- if (!new_rela) {
- WARN_FUNC("can't find new rela",
+ new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
+ if (!new_reloc) {
+ WARN_FUNC("can't find new reloc",
sec, offset + entry->new);
return -1;
}
- alt->new_sec = new_rela->sym->sec;
- alt->new_off = (unsigned int)new_rela->addend;
+ reloc_to_sec_off(new_reloc, &alt->new_sec, &alt->new_off);
/* _ASM_EXTABLE_EX hack */
if (alt->new_off >= 0x7ffffff0)
alt->new_off -= 0x7ffffff0;
}
+ if (entry->key) {
+ struct reloc *key_reloc;
+
+ key_reloc = find_reloc_by_dest(elf, sec, offset + entry->key);
+ if (!key_reloc) {
+ WARN_FUNC("can't find key reloc",
+ sec, offset + entry->key);
+ return -1;
+ }
+ alt->key_addend = key_reloc->addend;
+ }
+
return 0;
}
@@ -158,7 +139,7 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
*/
int special_get_alts(struct elf *elf, struct list_head *alts)
{
- struct special_entry *entry;
+ const struct special_entry *entry;
struct section *sec;
unsigned int nr_entries;
struct special_alt *alt;
@@ -171,13 +152,13 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
if (!sec)
continue;
- if (sec->len % entry->size != 0) {
+ if (sec->sh.sh_size % entry->size != 0) {
WARN("%s size not a multiple of %d",
sec->name, entry->size);
return -1;
}
- nr_entries = sec->len / entry->size;
+ nr_entries = sec->sh.sh_size / entry->size;
for (idx = 0; idx < nr_entries; idx++) {
alt = malloc(sizeof(*alt));
@@ -188,7 +169,9 @@ int special_get_alts(struct elf *elf, struct list_head *alts)
memset(alt, 0, sizeof(*alt));
ret = get_alt_entry(elf, entry, sec, idx, alt);
- if (ret)
+ if (ret > 0)
+ continue;
+ if (ret < 0)
return ret;
list_add_tail(&alt->list, alts);
diff --git a/tools/objtool/sync-check.sh b/tools/objtool/sync-check.sh
index 2a1261bfbb62..81d120d05442 100755
--- a/tools/objtool/sync-check.sh
+++ b/tools/objtool/sync-check.sh
@@ -1,13 +1,31 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-FILES='
+if [ -z "$SRCARCH" ]; then
+ echo 'sync-check.sh: error: missing $SRCARCH environment variable' >&2
+ exit 1
+fi
+
+FILES="include/linux/objtool_types.h"
+
+if [ "$SRCARCH" = "x86" ]; then
+FILES="$FILES
+arch/x86/include/asm/nops.h
arch/x86/include/asm/inat_types.h
arch/x86/include/asm/orc_types.h
arch/x86/include/asm/emulate_prefix.h
arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
+include/linux/static_call_types.h
+"
+
+SYNC_CHECK_FILES='
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/insn.h
+arch/x86/lib/inat.c
+arch/x86/lib/insn.c
'
+fi
check_2 () {
file1=$1
@@ -40,11 +58,18 @@ fi
cd ../..
-for i in $FILES; do
- check $i
-done
+while read -r file_entry; do
+ if [ -z "$file_entry" ]; then
+ continue
+ fi
+
+ check $file_entry
+done <<EOF
+$FILES
+EOF
-check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
-check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
-check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
-check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
+if [ "$SRCARCH" = "x86" ]; then
+ for i in $SYNC_CHECK_FILES; do
+ check $i '-I "^.*\/\*.*__ignore_sync_check__.*\*\/.*$"'
+ done
+fi
diff --git a/tools/objtool/weak.c b/tools/objtool/weak.c
new file mode 100644
index 000000000000..d83f607733b0
--- /dev/null
+++ b/tools/objtool/weak.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 Matt Helsley <mhelsley@vmware.com>
+ * Weak definitions necessary to compile objtool without
+ * some subcommands (e.g. check, orc).
+ */
+
+#include <stdbool.h>
+#include <errno.h>
+#include <objtool/objtool.h>
+
+#define UNSUPPORTED(name) \
+({ \
+ fprintf(stderr, "error: objtool: " name " not implemented\n"); \
+ return ENOSYS; \
+})
+
+int __weak orc_dump(const char *_objname)
+{
+ UNSUPPORTED("ORC");
+}
+
+int __weak orc_create(struct objtool_file *file)
+{
+ UNSUPPORTED("ORC");
+}
diff --git a/tools/pci/Makefile b/tools/pci/Makefile
index 4b95a5176355..57744778b518 100644
--- a/tools/pci/Makefile
+++ b/tools/pci/Makefile
@@ -42,7 +42,7 @@ $(OUTPUT)pcitest: $(PCITEST_IN)
clean:
rm -f $(ALL_PROGRAMS)
rm -rf $(OUTPUT)include/
- find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+ find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
install: $(ALL_PROGRAMS)
install -d -m 755 $(DESTDIR)$(bindir); \
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
index 0a1344c45213..441b54234635 100644
--- a/tools/pci/pcitest.c
+++ b/tools/pci/pcitest.c
@@ -40,7 +40,7 @@ struct pci_test {
static int run_test(struct pci_test *test)
{
- struct pci_endpoint_test_xfer_param param;
+ struct pci_endpoint_test_xfer_param param = {};
int ret = -EINVAL;
int fd;
diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore
index f3f84781fd74..f533e76fb480 100644
--- a/tools/perf/.gitignore
+++ b/tools/perf/.gitignore
@@ -4,6 +4,7 @@ PERF-GUI-VARS
PERF-VERSION-FILE
FEATURE-DUMP
perf
+!include/perf/
perf-read-vdso32
perf-read-vdsox32
perf-help
@@ -15,13 +16,14 @@ perf*.1
perf*.xml
perf*.html
common-cmds.h
-perf.data
-perf.data.old
+perf*.data
+perf*.data.old
output.svg
perf-archive
-perf-with-kcore
+perf-iostat
tags
TAGS
+stats-*.csv
cscope*
config.mak
config.mak.autogen
@@ -29,12 +31,21 @@ config.mak.autogen
*-flex.*
*.pyc
*.pyo
+*.stdout
.config-detected
util/intel-pt-decoder/inat-tables.c
arch/*/include/generated/
trace/beauty/generated/
pmu-events/pmu-events.c
pmu-events/jevents
+pmu-events/metric_test.log
feature/
+libapi/
+libbpf/
+libperf/
+libsubcmd/
+libsymbol/
+libtraceevent/
+libtraceevent_plugins/
fixdep
-libtraceevent-dynamic-list
+Documentation/doc.dep
diff --git a/tools/perf/Build b/tools/perf/Build
index 5f392dbb88fc..aa7623622834 100644
--- a/tools/perf/Build
+++ b/tools/perf/Build
@@ -5,7 +5,6 @@ perf-y += builtin-diff.o
perf-y += builtin-evlist.o
perf-y += builtin-ftrace.o
perf-y += builtin-help.o
-perf-y += builtin-sched.o
perf-y += builtin-buildid-list.o
perf-y += builtin-buildid-cache.o
perf-y += builtin-kallsyms.o
@@ -13,19 +12,27 @@ perf-y += builtin-list.o
perf-y += builtin-record.o
perf-y += builtin-report.o
perf-y += builtin-stat.o
-perf-y += builtin-timechart.o
perf-y += builtin-top.o
perf-y += builtin-script.o
-perf-y += builtin-kmem.o
-perf-y += builtin-lock.o
perf-y += builtin-kvm.o
perf-y += builtin-inject.o
perf-y += builtin-mem.o
perf-y += builtin-data.o
perf-y += builtin-version.o
perf-y += builtin-c2c.o
+perf-y += builtin-daemon.o
+
+perf-$(CONFIG_LIBTRACEEVENT) += builtin-kmem.o
+perf-$(CONFIG_LIBTRACEEVENT) += builtin-kwork.o
+perf-$(CONFIG_LIBTRACEEVENT) += builtin-lock.o
+perf-$(CONFIG_LIBTRACEEVENT) += builtin-sched.o
+perf-$(CONFIG_LIBTRACEEVENT) += builtin-timechart.o
+
+ifeq ($(CONFIG_LIBTRACEEVENT),y)
+ perf-$(CONFIG_TRACE) += builtin-trace.o
+ perf-$(CONFIG_TRACE) += trace/beauty/
+endif
-perf-$(CONFIG_TRACE) += builtin-trace.o
perf-$(CONFIG_LIBELF) += builtin-probe.o
perf-y += bench/
@@ -50,6 +57,5 @@ perf-y += util/
perf-y += arch/
perf-y += ui/
perf-y += scripts/
-perf-$(CONFIG_TRACE) += trace/beauty/
gtk-y += ui/gtk/
diff --git a/tools/perf/Documentation/Makefile b/tools/perf/Documentation/Makefile
index 31824d5269cc..ba5d942e4c6a 100644
--- a/tools/perf/Documentation/Makefile
+++ b/tools/perf/Documentation/Makefile
@@ -2,6 +2,10 @@
include ../../scripts/Makefile.include
include ../../scripts/utilities.mak
+ARTICLES =
+# with their own formatting rules.
+SP_ARTICLES =
+
MAN1_TXT= \
$(filter-out $(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \
$(wildcard perf-*.txt)) \
@@ -16,13 +20,6 @@ _MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT))
MAN_XML=$(addprefix $(OUTPUT),$(_MAN_XML))
MAN_HTML=$(addprefix $(OUTPUT),$(_MAN_HTML))
-ARTICLES =
-# with their own formatting rules.
-SP_ARTICLES =
-API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technical/api-index.txt, $(wildcard technical/api-*.txt)))
-SP_ARTICLES += $(API_DOCS)
-SP_ARTICLES += technical/api-index
-
_DOC_HTML = $(_MAN_HTML)
_DOC_HTML+=$(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES))
DOC_HTML=$(addprefix $(OUTPUT),$(_DOC_HTML))
@@ -48,7 +45,7 @@ man5dir=$(mandir)/man5
man7dir=$(mandir)/man7
ASCIIDOC=asciidoc
-ASCIIDOC_EXTRA = --unsafe -f asciidoc.conf
+ASCIIDOC_EXTRA += --unsafe -f asciidoc.conf
ASCIIDOC_HTML = xhtml11
MANPAGE_XSL = manpage-normal.xsl
XMLTO_EXTRA =
@@ -59,7 +56,7 @@ HTML_REF = origin/html
ifdef USE_ASCIIDOCTOR
ASCIIDOC = asciidoctor
-ASCIIDOC_EXTRA = -a compat-mode
+ASCIIDOC_EXTRA += -a compat-mode
ASCIIDOC_EXTRA += -I. -rasciidoctor-extensions
ASCIIDOC_EXTRA += -a mansource="perf" -a manmanual="perf Manual"
ASCIIDOC_HTML = xhtml5
@@ -173,7 +170,7 @@ ifneq ($(V),1)
endif
endif
-all: html man
+all: html man info
html: $(DOC_HTML)
@@ -186,8 +183,6 @@ man7: $(DOC_MAN7)
info: $(OUTPUT)perf.info $(OUTPUT)perfman.info
-pdf: $(OUTPUT)user-manual.pdf
-
install: install-man
check-man-tools:
@@ -225,11 +220,6 @@ install-info: info
echo "No directory found in $(DESTDIR)$(infodir)" >&2 ; \
fi
-install-pdf: pdf
- $(call QUIET_INSTALL, Documentation-pdf) \
- $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir); \
- $(INSTALL) -m 644 $(OUTPUT)user-manual.pdf $(DESTDIR)$(pdfdir)
-
#install-html: html
# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
@@ -244,33 +234,13 @@ $(OUTPUT)doc.dep : $(wildcard *.txt) build-docdep.perl
-include $(OUTPUT)doc.dep
-_cmds_txt = cmds-ancillaryinterrogators.txt \
- cmds-ancillarymanipulators.txt \
- cmds-mainporcelain.txt \
- cmds-plumbinginterrogators.txt \
- cmds-plumbingmanipulators.txt \
- cmds-synchingrepositories.txt \
- cmds-synchelpers.txt \
- cmds-purehelpers.txt \
- cmds-foreignscminterface.txt
-cmds_txt=$(addprefix $(OUTPUT),$(_cmds_txt))
-
-$(cmds_txt): $(OUTPUT)cmd-list.made
-
-$(OUTPUT)cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT)
- $(QUIET_GEN)$(RM) $@ && \
- $(PERL_PATH) ./cmd-list.perl ../command-list.txt $(QUIET_STDERR) && \
- date >$@
-
CLEAN_FILES = \
$(MAN_XML) $(addsuffix +,$(MAN_XML)) \
$(MAN_HTML) $(addsuffix +,$(MAN_HTML)) \
$(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7) \
$(OUTPUT)*.texi $(OUTPUT)*.texi+ $(OUTPUT)*.texi++ \
- $(OUTPUT)perf.info $(OUTPUT)perfman.info \
- $(OUTPUT)howto-index.txt $(OUTPUT)howto/*.html $(OUTPUT)doc.dep \
- $(OUTPUT)technical/api-*.html $(OUTPUT)technical/api-index.txt \
- $(cmds_txt) $(OUTPUT)*.made
+ $(OUTPUT)perf.info $(OUTPUT)perfman.info $(OUTPUT)doc.dep \
+ $(OUTPUT)technical/api-*.html $(OUTPUT)technical/api-index.txt
clean:
$(call QUIET_CLEAN, Documentation) $(RM) $(CLEAN_FILES)
@@ -297,31 +267,13 @@ $(OUTPUT)%.xml : %.txt
$(ASCIIDOC) -b docbook -d manpage \
$(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) \
-aperf_date=$(shell git log -1 --pretty="format:%cd" \
- --date=short $<) \
+ --date=short --no-show-signature $<) \
-o $@+ $< && \
mv $@+ $@
XSLT = docbook.xsl
XSLTOPTS = --xinclude --stringparam html.stylesheet docbook-xsl.css
-$(OUTPUT)user-manual.html: $(OUTPUT)user-manual.xml
- $(QUIET_XSLTPROC)xsltproc $(XSLTOPTS) -o $@ $(XSLT) $<
-
-$(OUTPUT)perf.info: $(OUTPUT)user-manual.texi
- $(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ $(OUTPUT)user-manual.texi
-
-$(OUTPUT)user-manual.texi: $(OUTPUT)user-manual.xml
- $(QUIET_DB2TEXI)$(RM) $@+ $@ && \
- $(DOCBOOK2X_TEXI) $(OUTPUT)user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \
- $(PERL_PATH) fix-texi.perl <$@++ >$@+ && \
- rm $@++ && \
- mv $@+ $@
-
-$(OUTPUT)user-manual.pdf: $(OUTPUT)user-manual.xml
- $(QUIET_DBLATEX)$(RM) $@+ $@ && \
- $(DBLATEX) -o $@+ -p /etc/asciidoc/dblatex/asciidoc-dblatex.xsl -s /etc/asciidoc/dblatex/asciidoc-dblatex.sty $< && \
- mv $@+ $@
-
$(OUTPUT)perfman.texi: $(MAN_XML) cat-texi.perl
$(QUIET_DB2TEXI)$(RM) $@+ $@ && \
($(foreach xml,$(MAN_XML),$(DOCBOOK2X_TEXI) --encoding=UTF-8 \
@@ -331,28 +283,18 @@ $(OUTPUT)perfman.texi: $(MAN_XML) cat-texi.perl
mv $@+ $@
$(OUTPUT)perfman.info: $(OUTPUT)perfman.texi
- $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $*.texi
+ $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate -o $@ $*.texi
$(patsubst %.txt,%.texi,$(MAN_TXT)): %.texi : %.xml
$(QUIET_DB2TEXI)$(RM) $@+ $@ && \
$(DOCBOOK2X_TEXI) --to-stdout $*.xml >$@+ && \
mv $@+ $@
-howto-index.txt: howto-index.sh $(wildcard howto/*.txt)
- $(QUIET_GEN)$(RM) $@+ $@ && \
- '$(SHELL_PATH_SQ)' ./howto-index.sh $(wildcard howto/*.txt) >$@+ && \
- mv $@+ $@
-
$(patsubst %,%.html,$(ARTICLES)) : %.html : %.txt
$(QUIET_ASCIIDOC)$(ASCIIDOC) -b $(ASCIIDOC_HTML) $*.txt
WEBDOC_DEST = /pub/software/tools/perf/docs
-$(patsubst %.txt,%.html,$(wildcard howto/*.txt)): %.html : %.txt
- $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
- sed -e '1,/^$$/d' $< | $(ASCIIDOC) -b $(ASCIIDOC_HTML) - >$@+ && \
- mv $@+ $@
-
# UNIMPLEMENTED
#install-webdoc : html
# '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(WEBDOC_DEST)
diff --git a/tools/perf/Documentation/arm-coresight.txt b/tools/perf/Documentation/arm-coresight.txt
new file mode 100644
index 000000000000..c117fc50a2a9
--- /dev/null
+++ b/tools/perf/Documentation/arm-coresight.txt
@@ -0,0 +1,5 @@
+Arm CoreSight Support
+=====================
+
+For full documentation, see Documentation/trace/coresight/coresight-perf.rst
+in the kernel tree.
diff --git a/tools/perf/Documentation/build-docdep.perl b/tools/perf/Documentation/build-docdep.perl
new file mode 100755
index 000000000000..ba4205e0302a
--- /dev/null
+++ b/tools/perf/Documentation/build-docdep.perl
@@ -0,0 +1,46 @@
+#!/usr/bin/perl
+
+my %include = ();
+my %included = ();
+
+for my $text (<*.txt>) {
+ open I, '<', $text || die "cannot read: $text";
+ while (<I>) {
+ if (/^include::/) {
+ chomp;
+ s/^include::\s*//;
+ s/\[\]//;
+ $include{$text}{$_} = 1;
+ $included{$_} = 1;
+ }
+ }
+ close I;
+}
+
+# Do we care about chained includes???
+my $changed = 1;
+while ($changed) {
+ $changed = 0;
+ while (my ($text, $included) = each %include) {
+ for my $i (keys %$included) {
+ # $text has include::$i; if $i includes $j
+ # $text indirectly includes $j.
+ if (exists $include{$i}) {
+ for my $j (keys %{$include{$i}}) {
+ if (!exists $include{$text}{$j}) {
+ $include{$text}{$j} = 1;
+ $included{$j} = 1;
+ $changed = 1;
+ }
+ }
+ }
+ }
+ }
+}
+
+while (my ($text, $included) = each %include) {
+ if (! exists $included{$text} &&
+ (my $base = $text) =~ s/\.txt$//) {
+ print "$base.html $base.xml : ", join(" ", keys %$included), "\n";
+ }
+}
diff --git a/tools/perf/Documentation/cat-texi.perl b/tools/perf/Documentation/cat-texi.perl
new file mode 100755
index 000000000000..14d2f8341517
--- /dev/null
+++ b/tools/perf/Documentation/cat-texi.perl
@@ -0,0 +1,46 @@
+#!/usr/bin/perl -w
+
+use strict;
+use warnings;
+
+my @menu = ();
+my $output = $ARGV[0];
+
+open my $tmp, '>', "$output.tmp";
+
+while (<STDIN>) {
+ next if (/^\\input texinfo/../\@node Top/);
+ next if (/^\@bye/ || /^\.ft/);
+ if (s/^\@top (.*)/\@node $1,,,Top/) {
+ push @menu, $1;
+ }
+ s/\(\@pxref\{\[(URLS|REMOTES)\]}\)//;
+ s/\@anchor\{[^{}]*\}//g;
+ print $tmp $_;
+}
+close $tmp;
+
+print '\input texinfo
+@setfilename gitman.info
+@documentencoding UTF-8
+@dircategory Development
+@direntry
+* Git Man Pages: (gitman). Manual pages for Git revision control system
+@end direntry
+@node Top,,, (dir)
+@top Git Manual Pages
+@documentlanguage en
+@menu
+';
+
+for (@menu) {
+ print "* ${_}::\n";
+}
+print "\@end menu\n";
+open $tmp, '<', "$output.tmp";
+while (<$tmp>) {
+ print;
+}
+close $tmp;
+print "\@bye\n";
+unlink "$output.tmp";
diff --git a/tools/perf/Documentation/examples.txt b/tools/perf/Documentation/examples.txt
index a4e392156488..c0d22fbe9201 100644
--- a/tools/perf/Documentation/examples.txt
+++ b/tools/perf/Documentation/examples.txt
@@ -3,7 +3,7 @@
****** perf by examples ******
------------------------------
-[ From an e-mail by Ingo Molnar, http://lkml.org/lkml/2009/8/4/346 ]
+[ From an e-mail by Ingo Molnar, https://lore.kernel.org/lkml/20090804195717.GA5998@elte.hu ]
First, discovery/enumeration of available counters can be done via
diff --git a/tools/perf/Documentation/guest-files.txt b/tools/perf/Documentation/guest-files.txt
new file mode 100644
index 000000000000..8cc0b092f996
--- /dev/null
+++ b/tools/perf/Documentation/guest-files.txt
@@ -0,0 +1,16 @@
+include::guestmount.txt[]
+
+--guestkallsyms=<path>::
+ Guest OS /proc/kallsyms file copy. perf reads it to get guest
+ kernel symbols. Users copy it out from guest OS.
+
+--guestmodules=<path>::
+ Guest OS /proc/modules file copy. perf reads it to get guest
+ kernel module information. Users copy it out from guest OS.
+
+--guestvmlinux=<path>::
+ Guest OS kernel vmlinux.
+
+--guest-code::
+ Indicate that guest code can be found in the hypervisor process,
+ which is a common case for KVM test programs.
diff --git a/tools/perf/Documentation/guestmount.txt b/tools/perf/Documentation/guestmount.txt
new file mode 100644
index 000000000000..6edf12363add
--- /dev/null
+++ b/tools/perf/Documentation/guestmount.txt
@@ -0,0 +1,11 @@
+--guestmount=<path>::
+ Guest OS root file system mount directory. Users mount guest OS
+ root directories under <path> by a specific filesystem access method,
+ typically, sshfs.
+ For example, start 2 guest OS, one's pid is 8888 and the other's is 9999:
+[verse]
+ $ mkdir \~/guestmount
+ $ cd \~/guestmount
+ $ sshfs -o allow_other,direct_io -p 5551 localhost:/ 8888/
+ $ sshfs -o allow_other,direct_io -p 5552 localhost:/ 9999/
+ $ perf {GMEXAMPLECMD} --guestmount=~/guestmount {GMEXAMPLESUBCMD}
diff --git a/tools/perf/Documentation/intel-hybrid.txt b/tools/perf/Documentation/intel-hybrid.txt
new file mode 100644
index 000000000000..e7a776ad25d7
--- /dev/null
+++ b/tools/perf/Documentation/intel-hybrid.txt
@@ -0,0 +1,204 @@
+Intel hybrid support
+--------------------
+Support for Intel hybrid events within perf tools.
+
+For some Intel platforms, such as AlderLake, which is hybrid platform and
+it consists of atom cpu and core cpu. Each cpu has dedicated event list.
+Part of events are available on core cpu, part of events are available
+on atom cpu and even part of events are available on both.
+
+Kernel exports two new cpu pmus via sysfs:
+/sys/devices/cpu_core
+/sys/devices/cpu_atom
+
+The 'cpus' files are created under the directories. For example,
+
+cat /sys/devices/cpu_core/cpus
+0-15
+
+cat /sys/devices/cpu_atom/cpus
+16-23
+
+It indicates cpu0-cpu15 are core cpus and cpu16-cpu23 are atom cpus.
+
+As before, use perf-list to list the symbolic event.
+
+perf list
+
+inst_retired.any
+ [Fixed Counter: Counts the number of instructions retired. Unit: cpu_atom]
+inst_retired.any
+ [Number of instructions retired. Fixed Counter - architectural event. Unit: cpu_core]
+
+The 'Unit: xxx' is added to brief description to indicate which pmu
+the event is belong to. Same event name but with different pmu can
+be supported.
+
+Enable hybrid event with a specific pmu
+
+To enable a core only event or atom only event, following syntax is supported:
+
+ cpu_core/<event name>/
+or
+ cpu_atom/<event name>/
+
+For example, count the 'cycles' event on core cpus.
+
+ perf stat -e cpu_core/cycles/
+
+Create two events for one hardware event automatically
+
+When creating one event and the event is available on both atom and core,
+two events are created automatically. One is for atom, the other is for
+core. Most of hardware events and cache events are available on both
+cpu_core and cpu_atom.
+
+For hardware events, they have pre-defined configs (e.g. 0 for cycles).
+But on hybrid platform, kernel needs to know where the event comes from
+(from atom or from core). The original perf event type PERF_TYPE_HARDWARE
+can't carry pmu information. So now this type is extended to be PMU aware
+type. The PMU type ID is stored at attr.config[63:32].
+
+PMU type ID is retrieved from sysfs.
+/sys/devices/cpu_atom/type
+/sys/devices/cpu_core/type
+
+The new attr.config layout for PERF_TYPE_HARDWARE:
+
+PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
+ AA: hardware event ID
+ EEEEEEEE: PMU type ID
+
+Cache event is similar. The type PERF_TYPE_HW_CACHE is extended to be
+PMU aware type. The PMU type ID is stored at attr.config[63:32].
+
+The new attr.config layout for PERF_TYPE_HW_CACHE:
+
+PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
+ BB: hardware cache ID
+ CC: hardware cache op ID
+ DD: hardware cache op result ID
+ EEEEEEEE: PMU type ID
+
+When enabling a hardware event without specified pmu, such as,
+perf stat -e cycles -a (use system-wide in this example), two events
+are created automatically.
+
+ ------------------------------------------------------------
+ perf_event_attr:
+ size 120
+ config 0x400000000
+ sample_type IDENTIFIER
+ read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING
+ disabled 1
+ inherit 1
+ exclude_guest 1
+ ------------------------------------------------------------
+
+and
+
+ ------------------------------------------------------------
+ perf_event_attr:
+ size 120
+ config 0x800000000
+ sample_type IDENTIFIER
+ read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING
+ disabled 1
+ inherit 1
+ exclude_guest 1
+ ------------------------------------------------------------
+
+type 0 is PERF_TYPE_HARDWARE.
+0x4 in 0x400000000 indicates it's cpu_core pmu.
+0x8 in 0x800000000 indicates it's cpu_atom pmu (atom pmu type id is random).
+
+The kernel creates 'cycles' (0x400000000) on cpu0-cpu15 (core cpus),
+and create 'cycles' (0x800000000) on cpu16-cpu23 (atom cpus).
+
+For perf-stat result, it displays two events:
+
+ Performance counter stats for 'system wide':
+
+ 6,744,979 cpu_core/cycles/
+ 1,965,552 cpu_atom/cycles/
+
+The first 'cycles' is core event, the second 'cycles' is atom event.
+
+Thread mode example:
+
+perf-stat reports the scaled counts for hybrid event and with a percentage
+displayed. The percentage is the event's running time/enabling time.
+
+One example, 'triad_loop' runs on cpu16 (atom core), while we can see the
+scaled value for core cycles is 160,444,092 and the percentage is 0.47%.
+
+perf stat -e cycles \-- taskset -c 16 ./triad_loop
+
+As previous, two events are created.
+
+------------------------------------------------------------
+perf_event_attr:
+ size 120
+ config 0x400000000
+ sample_type IDENTIFIER
+ read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING
+ disabled 1
+ inherit 1
+ enable_on_exec 1
+ exclude_guest 1
+------------------------------------------------------------
+
+and
+
+------------------------------------------------------------
+perf_event_attr:
+ size 120
+ config 0x800000000
+ sample_type IDENTIFIER
+ read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING
+ disabled 1
+ inherit 1
+ enable_on_exec 1
+ exclude_guest 1
+------------------------------------------------------------
+
+ Performance counter stats for 'taskset -c 16 ./triad_loop':
+
+ 233,066,666 cpu_core/cycles/ (0.43%)
+ 604,097,080 cpu_atom/cycles/ (99.57%)
+
+perf-record:
+
+If there is no '-e' specified in perf record, on hybrid platform,
+it creates two default 'cycles' and adds them to event list. One
+is for core, the other is for atom.
+
+perf-stat:
+
+If there is no '-e' specified in perf stat, on hybrid platform,
+besides of software events, following events are created and
+added to event list in order.
+
+cpu_core/cycles/,
+cpu_atom/cycles/,
+cpu_core/instructions/,
+cpu_atom/instructions/,
+cpu_core/branches/,
+cpu_atom/branches/,
+cpu_core/branch-misses/,
+cpu_atom/branch-misses/
+
+Of course, both perf-stat and perf-record support to enable
+hybrid event with a specific pmu.
+
+e.g.
+perf stat -e cpu_core/cycles/
+perf stat -e cpu_atom/cycles/
+perf stat -e cpu_core/r1a/
+perf stat -e cpu_atom/L1-icache-loads/
+perf stat -e cpu_core/cycles/,cpu_atom/instructions/
+perf stat -e '{cpu_core/cycles/,cpu_core/instructions/}'
+
+But '{cpu_core/cycles/,cpu_atom/instructions/}' will return
+warning and disable grouping, because the pmus in group are
+not matched (cpu_core vs. cpu_atom).
diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt
index 82ff7dad40c2..a97f95825b14 100644
--- a/tools/perf/Documentation/itrace.txt
+++ b/tools/perf/Documentation/itrace.txt
@@ -1,19 +1,32 @@
i synthesize instructions events
- b synthesize branches events
+ y synthesize cycles events
+ b synthesize branches events (branch misses for Arm SPE)
c synthesize branches events (calls only)
r synthesize branches events (returns only)
x synthesize transactions events
w synthesize ptwrite events
- p synthesize power events
+ p synthesize power events (incl. PSB events for Intel PT)
o synthesize other events recorded due to the use
of aux-output (refer to perf record)
+ I synthesize interrupt or similar (asynchronous) events
+ (e.g. Intel PT Event Trace)
e synthesize error events
d create a debug log
+ f synthesize first level cache events
+ m synthesize last level cache events
+ M synthesize memory events
+ t synthesize TLB events
+ a synthesize remote access events
g synthesize a call chain (use with i or x)
+ G synthesize a call chain on existing event records
l synthesize last branch entries (use with i or x)
+ L synthesize last branch entries on existing event records
s skip initial number of events
+ q quicker (less detailed) decoding
+ A approximate IPC
+ Z prefer to ignore timestamps (so-called "timeless" decoding)
- The default is all events i.e. the same as --itrace=ibxwpe,
+ The default is all events i.e. the same as --itrace=iybxwpe,
except for perf script where it is --itrace=ce
In addition, the period (default 100000, except for perf script where it is 1)
@@ -31,9 +44,28 @@
Also the number of last branch entries (default 64, max. 1024) for
instructions or transactions events can be specified.
+ Similar to options g and l, size may also be specified for options G and L.
+ On x86, note that G and L work poorly when data has been recorded with
+ large PEBS. Refer linkperf:perf-intel-pt[1] man page for details.
+
It is also possible to skip events generated (instructions, branches, transactions,
ptwrite, power) at the beginning. This is useful to ignore initialization code.
--itrace=i0nss1000000
skips the first million instructions.
+
+ The 'e' option may be followed by flags which affect what errors will or
+ will not be reported. Each flag must be preceded by either '+' or '-'.
+ The flags are:
+ o overflow
+ l trace data lost
+
+ If supported, the 'd' option may be followed by flags which affect what
+ debug messages will or will not be logged. Each flag must be preceded
+ by either '+' or '-'. The flags are:
+ a all perf events
+ e output only on errors (size configurable - see linkperf:perf-config[1])
+ o output to stdout
+
+ If supported, the 'q' option may be repeated to increase the effect.
diff --git a/tools/perf/Documentation/jitdump-specification.txt b/tools/perf/Documentation/jitdump-specification.txt
index 52152d156ad9..79936355d819 100644
--- a/tools/perf/Documentation/jitdump-specification.txt
+++ b/tools/perf/Documentation/jitdump-specification.txt
@@ -164,7 +164,7 @@ const char unwinding_data[n]: an array of unwinding data, consisting of the EH F
The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
-The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
+The EH Frame follows the LSB specification as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt
index 1b5042f134a8..fe168e8165c8 100644
--- a/tools/perf/Documentation/perf-annotate.txt
+++ b/tools/perf/Documentation/perf-annotate.txt
@@ -41,7 +41,7 @@ OPTIONS
-q::
--quiet::
- Do not show any message. (Suppress -v)
+ Do not show any warnings or messages. (Suppress -v)
-n::
--show-nr-samples::
@@ -58,6 +58,13 @@ OPTIONS
--ignore-vmlinux::
Ignore vmlinux files.
+--itrace::
+ Options for decoding instruction tracing data. The options are:
+
+include::itrace.txt[]
+
+ To disable decoding entirely, use --no-itrace.
+
-m::
--modules::
Load module symbols. WARNING: use only with -k and LIVE kernel.
@@ -109,6 +116,9 @@ OPTIONS
-M::
--disassembler-style=:: Set disassembler style for objdump.
+--addr2line=<path>::
+ Path to addr2line binary.
+
--objdump=<path>::
Path to objdump binary.
@@ -124,6 +134,13 @@ OPTIONS
--group::
Show event group information together
+--demangle::
+ Demangle symbol names to human readable form. It's enabled by default,
+ disable with --no-demangle.
+
+--demangle-kernel::
+ Demangle kernel symbol names to human readable form (for C++ kernels).
+
--percent-type::
Set annotation percent type from following choices:
global-period, local-period, global-hits, local-hits
@@ -133,6 +150,11 @@ OPTIONS
The period/hits keywords set the base the percentage is computed
on - the samples period or the number of samples (hits).
+--percent-limit::
+ Do not show functions which have an overhead under that percent on
+ stdio or stdio2 (Default: 0). Note that this is about selection of
+ functions to display, not about lines within the function.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1]
diff --git a/tools/perf/Documentation/perf-arm-spe.txt b/tools/perf/Documentation/perf-arm-spe.txt
new file mode 100644
index 000000000000..bf03222e9a68
--- /dev/null
+++ b/tools/perf/Documentation/perf-arm-spe.txt
@@ -0,0 +1,218 @@
+perf-arm-spe(1)
+================
+
+NAME
+----
+perf-arm-spe - Support for Arm Statistical Profiling Extension within Perf tools
+
+SYNOPSIS
+--------
+[verse]
+'perf record' -e arm_spe//
+
+DESCRIPTION
+-----------
+
+The SPE (Statistical Profiling Extension) feature provides accurate attribution of latencies and
+ events down to individual instructions. Rather than being interrupt-driven, it picks an
+instruction to sample and then captures data for it during execution. Data includes execution time
+in cycles. For loads and stores it also includes data address, cache miss events, and data origin.
+
+The sampling has 5 stages:
+
+ 1. Choose an operation
+ 2. Collect data about the operation
+ 3. Optionally discard the record based on a filter
+ 4. Write the record to memory
+ 5. Interrupt when the buffer is full
+
+Choose an operation
+~~~~~~~~~~~~~~~~~~~
+
+This is chosen from a sample population, for SPE this is an IMPLEMENTATION DEFINED choice of all
+architectural instructions or all micro-ops. Sampling happens at a programmable interval. The
+architecture provides a mechanism for the SPE driver to infer the minimum interval at which it should
+sample. This minimum interval is used by the driver if no interval is specified. A pseudo-random
+perturbation is also added to the sampling interval by default.
+
+Collect data about the operation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Program counter, PMU events, timings and data addresses related to the operation are recorded.
+Sampling ensures there is only one sampled operation is in flight.
+
+Optionally discard the record based on a filter
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Based on programmable criteria, choose whether to keep the record or discard it. If the record is
+discarded then the flow stops here for this sample.
+
+Write the record to memory
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The record is appended to a memory buffer
+
+Interrupt when the buffer is full
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When the buffer fills, an interrupt is sent and the driver signals Perf to collect the records.
+Perf saves the raw data in the perf.data file.
+
+Opening the file
+----------------
+
+Up until this point no decoding of the SPE data was done by either the kernel or Perf. Only when the
+recorded file is opened with 'perf report' or 'perf script' does the decoding happen. When decoding
+the data, Perf generates "synthetic samples" as if these were generated at the time of the
+recording. These samples are the same as if normal sampling was done by Perf without using SPE,
+although they may have more attributes associated with them. For example a normal sample may have
+just the instruction pointer, but an SPE sample can have data addresses and latency attributes.
+
+Why Sampling?
+-------------
+
+ - Sampling, rather than tracing, cuts down the profiling problem to something more manageable for
+ hardware. Only one sampled operation is in flight at a time.
+
+ - Allows precise attribution data, including: Full PC of instruction, data virtual and physical
+ addresses.
+
+ - Allows correlation between an instruction and events, such as TLB and cache miss. (Data source
+ indicates which particular cache was hit, but the meaning is implementation defined because
+ different implementations can have different cache configurations.)
+
+However, SPE does not provide any call-graph information, and relies on statistical methods.
+
+Collisions
+----------
+
+When an operation is sampled while a previous sampled operation has not finished, a collision
+occurs. The new sample is dropped. Collisions affect the integrity of the data, so the sample rate
+should be set to avoid collisions.
+
+The 'sample_collision' PMU event can be used to determine the number of lost samples. Although this
+count is based on collisions _before_ filtering occurs. Therefore this can not be used as an exact
+number for samples dropped that would have made it through the filter, but can be a rough
+guide.
+
+The effect of microarchitectural sampling
+-----------------------------------------
+
+If an implementation samples micro-operations instead of instructions, the results of sampling must
+be weighted accordingly.
+
+For example, if a given instruction A is always converted into two micro-operations, A0 and A1, it
+becomes twice as likely to appear in the sample population.
+
+The coarse effect of conversions, and, if applicable, sampling of speculative operations, can be
+estimated from the 'sample_pop' and 'inst_retired' PMU events.
+
+Kernel Requirements
+-------------------
+
+The ARM_SPE_PMU config must be set to build as either a module or statically.
+
+Depending on CPU model, the kernel may need to be booted with page table isolation disabled
+(kpti=off). If KPTI needs to be disabled, this will fail with a console message "profiling buffer
+inaccessible. Try passing 'kpti=off' on the kernel command line".
+
+Capturing SPE with perf command-line tools
+------------------------------------------
+
+You can record a session with SPE samples:
+
+ perf record -e arm_spe// -- ./mybench
+
+The sample period is set from the -c option, and because the minimum interval is used by default
+it's recommended to set this to a higher value. The value is written to PMSIRR.INTERVAL.
+
+Config parameters
+~~~~~~~~~~~~~~~~~
+
+These are placed between the // in the event and comma separated. For example '-e
+arm_spe/load_filter=1,min_latency=10/'
+
+ branch_filter=1 - collect branches only (PMSFCR.B)
+ event_filter=<mask> - filter on specific events (PMSEVFR) - see bitfield description below
+ jitter=1 - use jitter to avoid resonance when sampling (PMSIRR.RND)
+ load_filter=1 - collect loads only (PMSFCR.LD)
+ min_latency=<n> - collect only samples with this latency or higher* (PMSLATFR)
+ pa_enable=1 - collect physical address (as well as VA) of loads/stores (PMSCR.PA) - requires privilege
+ pct_enable=1 - collect physical timestamp instead of virtual timestamp (PMSCR.PCT) - requires privilege
+ store_filter=1 - collect stores only (PMSFCR.ST)
+ ts_enable=1 - enable timestamping with value of generic timer (PMSCR.TS)
+
++++*+++ Latency is the total latency from the point at which sampling started on that instruction, rather
+than only the execution latency.
+
+Only some events can be filtered on; these include:
+
+ bit 1 - instruction retired (i.e. omit speculative instructions)
+ bit 3 - L1D refill
+ bit 5 - TLB refill
+ bit 7 - mispredict
+ bit 11 - misaligned access
+
+So to sample just retired instructions:
+
+ perf record -e arm_spe/event_filter=2/ -- ./mybench
+
+or just mispredicted branches:
+
+ perf record -e arm_spe/event_filter=0x80/ -- ./mybench
+
+Viewing the data
+~~~~~~~~~~~~~~~~~
+
+By default perf report and perf script will assign samples to separate groups depending on the
+attributes/events of the SPE record. Because instructions can have multiple events associated with
+them, the samples in these groups are not necessarily unique. For example perf report shows these
+groups:
+
+ Available samples
+ 0 arm_spe//
+ 0 dummy:u
+ 21 l1d-miss
+ 897 l1d-access
+ 5 llc-miss
+ 7 llc-access
+ 2 tlb-miss
+ 1K tlb-access
+ 36 branch-miss
+ 0 remote-access
+ 900 memory
+
+The arm_spe// and dummy:u events are implementation details and are expected to be empty.
+
+To get a full list of unique samples that are not sorted into groups, set the itrace option to
+generate 'instruction' samples. The period option is also taken into account, so set it to 1
+instruction unless you want to further downsample the already sampled SPE data:
+
+ perf report --itrace=i1i
+
+Memory access details are also stored on the samples and this can be viewed with:
+
+ perf report --mem-mode
+
+Common errors
+~~~~~~~~~~~~~
+
+ - "Cannot find PMU `arm_spe'. Missing kernel support?"
+
+ Module not built or loaded, KPTI not disabled (see above), or running on a VM
+
+ - "Arm SPE CONTEXT packets not found in the traces."
+
+ Root privilege is required to collect context packets. But these only increase the accuracy of
+ assigning PIDs to kernel samples. For userspace sampling this can be ignored.
+
+ - Excessively large perf.data file size
+
+ Increase sampling interval (see above)
+
+
+SEE ALSO
+--------
+
+linkperf:perf-record[1], linkperf:perf-script[1], linkperf:perf-report[1],
+linkperf:perf-inject[1]
diff --git a/tools/perf/Documentation/perf-bench.txt b/tools/perf/Documentation/perf-bench.txt
index 0921a3c67381..f04f0eaded98 100644
--- a/tools/perf/Documentation/perf-bench.txt
+++ b/tools/perf/Documentation/perf-bench.txt
@@ -18,7 +18,7 @@ COMMON OPTIONS
--------------
-r::
--repeat=::
-Specify amount of times to repeat the run (default 10).
+Specify number of times to repeat the run (default 10).
-f::
--format=::
@@ -49,6 +49,9 @@ SUBSYSTEM
'sched'::
Scheduler and IPC mechanisms.
+'syscall'::
+ System call performance (throughput).
+
'mem'::
Memory access performance.
@@ -61,6 +64,9 @@ SUBSYSTEM
'epoll'::
Eventpoll (epoll) stressing benchmarks.
+'internals'::
+ Benchmark internal perf functionality.
+
'all'::
All benchmark subsystems.
@@ -134,6 +140,14 @@ Example of *pipe*
59004 ops/sec
---------------------
+SUITES FOR 'syscall'
+~~~~~~~~~~~~~~~~~~
+*basic*::
+Suite for evaluating performance of core system call throughput (both usecs/op and ops/sec metrics).
+This uses a single thread simply doing getppid(2), which is a simple syscall where the result is not
+cached by glibc.
+
+
SUITES FOR 'mem'
~~~~~~~~~~~~~~~~
*memcpy*::
@@ -214,6 +228,11 @@ Suite for evaluating concurrent epoll_wait calls.
*ctl*::
Suite for evaluating multiple epoll_ctl calls.
+SUITES FOR 'internals'
+~~~~~~~~~~~~~~~~~~~~~~
+*synthesize*::
+Suite for evaluating perf's event synthesis performance.
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-buildid-cache.txt b/tools/perf/Documentation/perf-buildid-cache.txt
index f6de0952ff3c..7e44b419d301 100644
--- a/tools/perf/Documentation/perf-buildid-cache.txt
+++ b/tools/perf/Documentation/perf-buildid-cache.txt
@@ -57,7 +57,7 @@ OPTIONS
-u::
--update=::
Update specified file of the cache. Note that this doesn't remove
- older entires since those may be still needed for annotating old
+ older entries since those may be still needed for annotating old
(or remote) perf.data. Only if there is already a cache which has
exactly same build-id, that is replaced by new one. It can be used
to update kallsyms and kernel dso to vmlinux in order to support
@@ -74,6 +74,15 @@ OPTIONS
used when creating a uprobe for a process that resides in a
different mount namespace from the perf(1) utility.
+--debuginfod[=URLs]::
+ Specify debuginfod URL to be used when retrieving perf.data binaries,
+ it follows the same syntax as the DEBUGINFOD_URLS variable, like:
+
+ buildid-cache.debuginfod=http://192.168.122.174:8002
+
+ If the URLs is not specified, the value of DEBUGINFOD_URLS
+ system environment variable is used.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-buildid-list[1]
diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt
index 25c52efcc7f0..e1e8fdbe06b9 100644
--- a/tools/perf/Documentation/perf-buildid-list.txt
+++ b/tools/perf/Documentation/perf-buildid-list.txt
@@ -33,6 +33,10 @@ OPTIONS
-k::
--kernel::
Show running kernel build id.
+-m::
+--kernel-maps::
+ Show buildid, start/end text address, and path of running kernel and
+ its modules.
-v::
--verbose::
Be more verbose.
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index e6150f21267d..856f0dfb8e5a 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -9,7 +9,7 @@ SYNOPSIS
--------
[verse]
'perf c2c record' [<options>] <command>
-'perf c2c record' [<options>] -- [<record command options>] <command>
+'perf c2c record' [<options>] \-- [<record command options>] <command>
'perf c2c report' [<options>]
DESCRIPTION
@@ -19,9 +19,14 @@ C2C stands for Cache To Cache.
The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows
you to track down the cacheline contentions.
-On x86, the tool is based on load latency and precise store facility events
+On Intel, the tool is based on load latency and precise store facility events
provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling
-with thresholding feature.
+with thresholding feature. On AMD, the tool uses IBS op pmu (due to hardware
+limitations, perf c2c is not supported on Zen3 cpus). On Arm64 it uses SPE to
+sample load and store operations, therefore hardware and kernel support is
+required. See linkperf:perf-arm-spe[1] for a setup guide. Due to the
+statistical nature of Arm SPE sampling, not every memory operation will be
+sampled.
These events provide:
- memory address of the access
@@ -40,7 +45,7 @@ RECORD OPTIONS
--------------
-e::
--event=::
- Select the PMU event. Use 'perf mem record -e list'
+ Select the PMU event. Use 'perf c2c record -e list'
to list available events.
-v::
@@ -49,7 +54,8 @@ RECORD OPTIONS
-l::
--ldlat::
- Configure mem-loads latency. (x86 only)
+ Configure mem-loads latency. Supported on Intel and Arm64 processors
+ only. Ignored on other archs.
-k::
--all-kernel::
@@ -109,7 +115,26 @@ REPORT OPTIONS
-d::
--display::
- Switch to HITM type (rmt, lcl) to display and sort on. Total HITMs as default.
+ Switch to HITM type (rmt, lcl) or peer snooping type (peer) to display
+ and sort on. Total HITMs (tot) as default, except Arm64 uses peer mode
+ as default.
+
+--stitch-lbr::
+ Show callgraph with stitched LBRs, which may have more complete
+ callgraph. The perf.data file must have been obtained using
+ perf c2c record --call-graph lbr.
+ Disabled by default. In common cases with call stack overflows,
+ it can recreate better call stacks than the default lbr call stack
+ output. But this approach is not foolproof. There can be cases
+ where it creates incorrect call stacks from incorrect matches.
+ The known limitations include exception handing such as
+ setjmp/longjmp will have calls/returns not match.
+
+--double-cl::
+ Group the detection of shared cacheline events into double cacheline
+ granularity. Some architectures have an Adjacent Cacheline Prefetch
+ feature, which causes cacheline sharing to behave like the cacheline
+ size is doubled.
C2C RECORD
----------
@@ -122,11 +147,15 @@ Following perf record options are configured by default:
-W,-d,--phys-data,--sample-cpu
Unless specified otherwise with '-e' option, following events are monitored by
-default on x86:
+default on Intel:
cpu/mem-loads,ldlat=30/P
cpu/mem-stores/P
+following on AMD:
+
+ ibs_op//
+
and following on PowerPC:
cpu/mem-loads/
@@ -163,42 +192,57 @@ For each cacheline in the 1) list we display following data:
Cacheline
- cacheline address (hex number)
- Total records
- - sum of all cachelines accesses
-
- Rmt/Lcl Hitm
+ Rmt/Lcl Hitm (Display with HITM types)
- cacheline percentage of all Remote/Local HITM accesses
- LLC Load Hitm - Total, Lcl, Rmt
+ Peer Snoop (Display with peer type)
+ - cacheline percentage of all peer accesses
+
+ LLC Load Hitm - Total, LclHitm, RmtHitm (For display with HITM types)
- count of Total/Local/Remote load HITMs
- Store Reference - Total, L1Hit, L1Miss
- Total - all store accesses
- L1Hit - store accesses that hit L1
- L1Hit - store accesses that missed L1
+ Load Peer - Total, Local, Remote (For display with peer type)
+ - count of Total/Local/Remote load from peer cache or DRAM
- Load Dram
- - count of local and remote DRAM accesses
-
- LLC Ld Miss
- - count of all accesses that missed LLC
+ Total records
+ - sum of all cachelines accesses
- Total Loads
+ Total loads
- sum of all load accesses
+ Total stores
+ - sum of all store accesses
+
+ Store Reference - L1Hit, L1Miss, N/A
+ L1Hit - store accesses that hit L1
+ L1Miss - store accesses that missed L1
+ N/A - store accesses with memory level is not available
+
Core Load Hit - FB, L1, L2
- count of load hits in FB (Fill Buffer), L1 and L2 cache
- LLC Load Hit - Llc, Rmt
- - count of LLC and Remote load hits
+ LLC Load Hit - LlcHit, LclHitm
+ - count of LLC load accesses, includes LLC hits and LLC HITMs
+
+ RMT Load Hit - RmtHit, RmtHitm
+ - count of remote load accesses, includes remote hits and remote HITMs;
+ on Arm neoverse cores, RmtHit is used to account remote accesses,
+ includes remote DRAM or any upward cache level in remote node
+
+ Load Dram - Lcl, Rmt
+ - count of local and remote DRAM accesses
For each offset in the 2) list we display following data:
- HITM - Rmt, Lcl
+ HITM - Rmt, Lcl (Display with HITM types)
- % of Remote/Local HITM accesses for given offset within cacheline
- Store Refs - L1 Hit, L1 Miss
- - % of store accesses that hit/missed L1 for given offset within cacheline
+ Peer Snoop - Rmt, Lcl (Display with peer type)
+ - % of Remote/Local peer accesses for given offset within cacheline
+
+ Store Refs - L1 Hit, L1 Miss, N/A
+ - % of store accesses that hit L1, missed L1 and N/A (no available) memory
+ level for given offset within cacheline
Data address - Offset
- offset address
@@ -212,9 +256,12 @@ For each offset in the 2) list we display following data:
Code address
- code address responsible for the accesses
- cycles - rmt hitm, lcl hitm, load
+ cycles - rmt hitm, lcl hitm, load (Display with HITM types)
- sum of cycles for given accesses - Remote/Local HITM and generic load
+ cycles - rmt peer, lcl peer, load (Display with peer type)
+ - sum of cycles for given accesses - Remote/Local peer load and generic load
+
cpu cnt
- number of cpus that participated on the access
@@ -236,7 +283,8 @@ The 'Node' field displays nodes that accesses given cacheline
offset. Its output comes in 3 flavors:
- node IDs separated by ','
- node IDs with stats for each ID, in following format:
- Node{cpus %hitms %stores}
+ Node{cpus %hitms %stores} (Display with HITM types)
+ Node{cpus %peers %stores} (Display with peer type)
- node IDs with list of affected CPUs in following format:
Node{cpu list}
@@ -248,7 +296,7 @@ COALESCE
User can specify how to sort offsets for cacheline.
Following fields are available and governs the final
-output fields set for caheline offsets output:
+output fields set for cacheline offsets output:
tid - coalesced by process TIDs
pid - coalesced by process PIDs
@@ -295,4 +343,4 @@ Check Joe's blog on c2c tool for detailed use case explanation:
SEE ALSO
--------
-linkperf:perf-record[1], linkperf:perf-mem[1]
+linkperf:perf-record[1], linkperf:perf-mem[1], linkperf:perf-arm-spe[1]
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index f16d8a71d3f5..e56ae54805a8 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -123,6 +123,7 @@ Given a $HOME/.perfconfig like this:
queue-size = 0
children = true
group = true
+ skip-empty = true
[llvm]
dump-obj = true
@@ -138,7 +139,7 @@ If you want to add or modify several config items, you can do like
To modify the sort order of report functionality in user config file(i.e. `~/.perfconfig`), do
- % perf config --user report sort-order=srcline
+ % perf config --user report.sort-order=srcline
To change colors of selected line to other foreground and background colors
in system config file (i.e. `$(sysconf)/perfconfig`), do
@@ -238,10 +239,28 @@ buildid.*::
cache location, or to disable it altogether. If you want to disable it,
set buildid.dir to /dev/null. The default is $HOME/.debug
+buildid-cache.*::
+ buildid-cache.debuginfod=URLs
+ Specify debuginfod URLs to be used when retrieving perf.data binaries,
+ it follows the same syntax as the DEBUGINFOD_URLS variable, like:
+
+ buildid-cache.debuginfod=http://192.168.122.174:8002
+
annotate.*::
These are in control of addresses, jump function, source code
in lines of assembly code from a specific program.
+ annotate.addr2line::
+ addr2line binary to use for file names and line numbers.
+
+ annotate.objdump::
+ objdump binary to use for disassembly and annotations.
+
+ annotate.disassembler_style::
+ Use this to change the default disassembler style to some other value
+ supported by binutils, such as "intel", see the '-M' option help in the
+ 'objdump' man page.
+
annotate.hide_src_code::
If a program which is analyzed has source code,
this option lets 'annotate' print a list of assembly code with the source code.
@@ -381,6 +400,12 @@ annotate.*::
This option works with tui, stdio2 browsers.
+ annotate.demangle::
+ Demangle symbol names to human readable form. Default is 'true'.
+
+ annotate.demangle_kernel::
+ Demangle kernel symbol names to human readable form. Default is 'true'.
+
hist.*::
hist.percentage::
This option control the way to calculate overhead of filtered entries -
@@ -513,6 +538,10 @@ report.*::
0.07% 0.00% noploop ld-2.15.so [.] strcmp
0.03% 0.00% noploop [kernel.kallsyms] [k] timerqueue_del
+ report.skip-empty::
+ This option can change default stat behavior with empty results.
+ If it's set true, 'perf report --stat' will not show 0 stats.
+
top.*::
top.children::
Same as 'report.children'. So if it is enabled, the output of 'top'
@@ -547,11 +576,12 @@ kmem.*::
record.*::
record.build-id::
- This option can be 'cache', 'no-cache' or 'skip'.
+ This option can be 'cache', 'no-cache', 'skip' or 'mmap'.
'cache' is to post-process data and save/update the binaries into
the build-id cache (in ~/.debug). This is the default.
But if this option is 'no-cache', it will not update the build-id cache.
'skip' skips post-processing and does not update the cache.
+ 'mmap' skips post-processing and reads build-ids from MMAP events.
record.call-graph::
This is identical to 'call-graph.record-mode', except it is
@@ -563,6 +593,15 @@ record.*::
Use 'n' control blocks in asynchronous (Posix AIO) trace writing
mode ('n' default: 1, max: 4).
+ record.debuginfod::
+ Specify debuginfod URL to be used when cacheing perf.data binaries,
+ it follows the same syntax as the DEBUGINFOD_URLS variable, like:
+
+ http://192.168.122.174:8002
+
+ If the URLs is 'system', the value of DEBUGINFOD_URLS system environment
+ variable is used.
+
diff.*::
diff.order::
This option sets the number of columns to sort the result.
@@ -614,8 +653,9 @@ trace.*::
ftrace.*::
ftrace.tracer::
- Can be used to select the default tracer. Possible values are
- 'function' and 'function_graph'.
+ Can be used to select the default tracer when neither -G nor
+ -F option is not specified. Possible values are 'function' and
+ 'function_graph'.
llvm.*::
llvm.clang-path::
@@ -667,6 +707,11 @@ convert.*::
Limit the size of ordered_events queue, so we could control
allocation size of perf data files without proper finished
round events.
+stat.*::
+
+ stat.big-num::
+ (boolean) Change the default for "--big-num". To make
+ "--no-big-num" the default, set "stat.big-num=false".
intel-pt.*::
@@ -676,6 +721,12 @@ intel-pt.*::
If set, Intel PT decoder will set the mispred flag on all
branches.
+ intel-pt.max-loops::
+ If set and non-zero, the maximum number of unconditional
+ branches decoded without consuming any trace packets. If
+ the maximum is exceeded there will be a "Never-ending loop"
+ error. The default is 100000.
+
auxtrace.*::
auxtrace.dumpdir::
@@ -684,6 +735,27 @@ auxtrace.*::
If the directory does not exist or has the wrong file type,
the current directory is used.
+itrace.*::
+
+ debug-log-buffer-size::
+ Log size in bytes to output when using the option --itrace=d+e
+ Refer 'itrace' option of linkperf:perf-script[1] or
+ linkperf:perf-report[1]. The default is 16384.
+
+daemon.*::
+
+ daemon.base::
+ Base path for daemon data. All sessions data are stored under
+ this path.
+
+session-<NAME>.*::
+
+ session-<NAME>.run::
+
+ Defines new record session for daemon. The value is record's
+ command line without the 'record' keyword.
+
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-daemon.txt b/tools/perf/Documentation/perf-daemon.txt
new file mode 100644
index 000000000000..f558f8e4bc9b
--- /dev/null
+++ b/tools/perf/Documentation/perf-daemon.txt
@@ -0,0 +1,208 @@
+perf-daemon(1)
+==============
+
+
+NAME
+----
+perf-daemon - Run record sessions on background
+
+
+SYNOPSIS
+--------
+[verse]
+'perf daemon'
+'perf daemon' [<options>]
+'perf daemon start' [<options>]
+'perf daemon stop' [<options>]
+'perf daemon signal' [<options>]
+'perf daemon ping' [<options>]
+
+
+DESCRIPTION
+-----------
+This command allows to run simple daemon process that starts and
+monitors configured record sessions.
+
+You can imagine 'perf daemon' of background process with several
+'perf record' child tasks, like:
+
+ # ps axjf
+ ...
+ 1 916507 ... perf daemon start
+ 916507 916508 ... \_ perf record --control=fifo:control,ack -m 10M -e cycles --overwrite --switch-output -a
+ 916507 916509 ... \_ perf record --control=fifo:control,ack -m 20M -e sched:* --overwrite --switch-output -a
+
+Not every 'perf record' session is suitable for running under daemon.
+User need perf session that either produces data on query, like the
+flight recorder sessions in above example or session that is configured
+to produce data periodically, like with --switch-output configuration
+for time and size.
+
+Each session is started with control setup (with perf record --control
+options).
+
+Sessions are configured through config file, see CONFIG FILE section
+with EXAMPLES.
+
+
+OPTIONS
+-------
+-v::
+--verbose::
+ Be more verbose.
+
+--config=<PATH>::
+ Config file path. If not provided, perf will check system and default
+ locations (/etc/perfconfig, $HOME/.perfconfig).
+
+--base=<PATH>::
+ Base directory path. Each daemon instance is running on top
+ of base directory. Only one instance of server can run on
+ top of one directory at the time.
+
+All generic options are available also under commands.
+
+
+START COMMAND
+-------------
+The start command creates the daemon process.
+
+-f::
+--foreground::
+ Do not put the process in background.
+
+
+STOP COMMAND
+------------
+The stop command stops all the session and the daemon process.
+
+
+SIGNAL COMMAND
+--------------
+The signal command sends signal to configured sessions.
+
+--session::
+ Send signal to specific session.
+
+
+PING COMMAND
+------------
+The ping command sends control ping to configured sessions.
+
+--session::
+ Send ping to specific session.
+
+
+CONFIG FILE
+-----------
+The daemon is configured within standard perf config file by
+following new variables:
+
+daemon.base:
+ Base path for daemon data. All sessions data are
+ stored under this path.
+
+session-<NAME>.run:
+ Defines new record session. The value is record's command
+ line without the 'record' keyword.
+
+Each perf record session is run in daemon.base/<NAME> directory.
+
+
+EXAMPLES
+--------
+Example with 2 record sessions:
+
+ # cat ~/.perfconfig
+ [daemon]
+ base=/opt/perfdata
+
+ [session-cycles]
+ run = -m 10M -e cycles --overwrite --switch-output -a
+
+ [session-sched]
+ run = -m 20M -e sched:* --overwrite --switch-output -a
+
+
+Starting the daemon:
+
+ # perf daemon start
+
+
+Check sessions:
+
+ # perf daemon
+ [603349:daemon] base: /opt/perfdata
+ [603350:cycles] perf record -m 10M -e cycles --overwrite --switch-output -a
+ [603351:sched] perf record -m 20M -e sched:* --overwrite --switch-output -a
+
+First line is daemon process info with configured daemon base.
+
+
+Check sessions with more info:
+
+ # perf daemon -v
+ [603349:daemon] base: /opt/perfdata
+ output: /opt/perfdata/output
+ lock: /opt/perfdata/lock
+ up: 1 minutes
+ [603350:cycles] perf record -m 10M -e cycles --overwrite --switch-output -a
+ base: /opt/perfdata/session-cycles
+ output: /opt/perfdata/session-cycles/output
+ control: /opt/perfdata/session-cycles/control
+ ack: /opt/perfdata/session-cycles/ack
+ up: 1 minutes
+ [603351:sched] perf record -m 20M -e sched:* --overwrite --switch-output -a
+ base: /opt/perfdata/session-sched
+ output: /opt/perfdata/session-sched/output
+ control: /opt/perfdata/session-sched/control
+ ack: /opt/perfdata/session-sched/ack
+ up: 1 minutes
+
+The 'base' path is daemon/session base.
+The 'lock' file is daemon's lock file guarding that no other
+daemon is running on top of the base.
+The 'output' file is perf record output for specific session.
+The 'control' and 'ack' files are perf control files.
+The 'up' number shows minutes daemon/session is running.
+
+
+Make sure control session is online:
+
+ # perf daemon ping
+ OK cycles
+ OK sched
+
+
+Send USR2 signal to session 'cycles' to generate perf.data file:
+
+ # perf daemon signal --session cycles
+ signal 12 sent to session 'cycles [603452]'
+
+ # tail -2 /opt/perfdata/session-cycles/output
+ [ perf record: dump data: Woken up 1 times ]
+ [ perf record: Dump perf.data.2020123017013149 ]
+
+
+Send USR2 signal to all sessions:
+
+ # perf daemon signal
+ signal 12 sent to session 'cycles [603452]'
+ signal 12 sent to session 'sched [603453]'
+
+ # tail -2 /opt/perfdata/session-cycles/output
+ [ perf record: dump data: Woken up 1 times ]
+ [ perf record: Dump perf.data.2020123017024689 ]
+ # tail -2 /opt/perfdata/session-sched/output
+ [ perf record: dump data: Woken up 1 times ]
+ [ perf record: Dump perf.data.2020123017024713 ]
+
+
+Stop daemon:
+
+ # perf daemon stop
+
+
+SEE ALSO
+--------
+linkperf:perf-record[1], linkperf:perf-config[1]
diff --git a/tools/perf/Documentation/perf-data.txt b/tools/perf/Documentation/perf-data.txt
index c87180764829..417bf17e265c 100644
--- a/tools/perf/Documentation/perf-data.txt
+++ b/tools/perf/Documentation/perf-data.txt
@@ -17,7 +17,7 @@ Data file related processing.
COMMANDS
--------
convert::
- Converts perf data file into another format (only CTF [1] format is support by now).
+ Converts perf data file into another format.
It's possible to set data-convert debug variable to get debug messages from conversion,
like:
perf --debug data-convert data convert ...
@@ -27,6 +27,12 @@ OPTIONS for 'convert'
--to-ctf::
Triggers the CTF conversion, specify the path of CTF data directory.
+--to-json::
+ Triggers JSON conversion. Specify the JSON filename to output.
+
+--tod::
+ Convert time to wall clock time.
+
-i::
Specify input perf data file path.
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index f50ca0fef0a4..f3067a4af294 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -75,7 +75,7 @@ OPTIONS
-q::
--quiet::
- Do not show any message. (Suppress -v)
+ Do not show any warnings or messages. (Suppress -v)
-f::
--force::
@@ -182,6 +182,10 @@ OPTIONS
--tid=::
Only diff samples for given thread ID (comma separated list).
+--stream::
+ Enable hot streams comparison. Stream can be a callchain which is
+ aggregated by the branch records from samples.
+
COMPARISON
----------
The comparison is governed by the baseline file. The baseline perf.data
diff --git a/tools/perf/Documentation/perf-dlfilter.txt b/tools/perf/Documentation/perf-dlfilter.txt
new file mode 100644
index 000000000000..fb22e3b31dc5
--- /dev/null
+++ b/tools/perf/Documentation/perf-dlfilter.txt
@@ -0,0 +1,281 @@
+perf-dlfilter(1)
+================
+
+NAME
+----
+perf-dlfilter - Filter sample events using a dynamically loaded shared
+object file
+
+SYNOPSIS
+--------
+[verse]
+'perf script' [--dlfilter file.so ] [ --dlarg arg ]...
+
+DESCRIPTION
+-----------
+
+This option is used to process data through a custom filter provided by a
+dynamically loaded shared object file. Arguments can be passed using --dlarg
+and retrieved using perf_dlfilter_fns.args().
+
+If 'file.so' does not contain "/", then it will be found either in the current
+directory, or perf tools exec path which is ~/libexec/perf-core/dlfilters for
+a local build and install (refer perf --exec-path), or the dynamic linker
+paths.
+
+API
+---
+
+The API for filtering consists of the following:
+
+[source,c]
+----
+#include <perf/perf_dlfilter.h>
+
+struct perf_dlfilter_fns perf_dlfilter_fns;
+
+int start(void **data, void *ctx);
+int stop(void *data, void *ctx);
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+const char *filter_description(const char **long_description);
+----
+
+If implemented, 'start' will be called at the beginning, before any
+calls to 'filter_event' or 'filter_event_early'. Return 0 to indicate success,
+or return a negative error code. '*data' can be assigned for use by other
+functions. 'ctx' is needed for calls to perf_dlfilter_fns, but most
+perf_dlfilter_fns are not valid when called from 'start'.
+
+If implemented, 'stop' will be called at the end, after any calls to
+'filter_event' or 'filter_event_early'. Return 0 to indicate success, or
+return a negative error code. 'data' is set by 'start'. 'ctx' is needed
+for calls to perf_dlfilter_fns, but most perf_dlfilter_fns are not valid
+when called from 'stop'.
+
+If implemented, 'filter_event' will be called for each sample event.
+Return 0 to keep the sample event, 1 to filter it out, or return a negative
+error code. 'data' is set by 'start'. 'ctx' is needed for calls to
+'perf_dlfilter_fns'.
+
+'filter_event_early' is the same as 'filter_event' except it is called before
+internal filtering.
+
+If implemented, 'filter_description' should return a one-line description
+of the filter, and optionally a longer description.
+
+The perf_dlfilter_sample structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+'filter_event' and 'filter_event_early' are passed a perf_dlfilter_sample
+structure, which contains the following fields:
+[source,c]
+----
+/*
+ * perf sample event information (as per perf script and <linux/perf_event.h>)
+ */
+struct perf_dlfilter_sample {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 ip;
+ __s32 pid;
+ __s32 tid;
+ __u64 time;
+ __u64 addr;
+ __u64 id;
+ __u64 stream_id;
+ __u64 period;
+ __u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
+ __u64 insn_cnt; /* For instructions-per-cycle (IPC) */
+ __u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
+ __s32 cpu;
+ __u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
+ __u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
+ __u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
+ __u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
+ __u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
+ __u8 addr_correlates_sym; /* True => resolve_addr() can be called */
+ __u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
+ __u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ __u64 brstack_nr; /* Number of brstack entries */
+ const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
+ __u64 raw_callchain_nr; /* Number of raw_callchain entries */
+ const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
+ const char *event;
+ __s32 machine_pid;
+ __s32 vcpu;
+};
+----
+
+Note: 'machine_pid' and 'vcpu' are not original members, but were added together later.
+'size' can be used to determine their presence at run time.
+PERF_DLFILTER_HAS_MACHINE_PID will be defined if they are present at compile time.
+For example:
+[source,c]
+----
+#include <perf/perf_dlfilter.h>
+#include <stddef.h>
+#include <stdbool.h>
+
+static inline bool have_machine_pid(const struct perf_dlfilter_sample *sample)
+{
+#ifdef PERF_DLFILTER_HAS_MACHINE_PID
+ return sample->size >= offsetof(struct perf_dlfilter_sample, vcpu) + sizeof(sample->vcpu);
+#else
+ return false;
+#endif
+}
+----
+
+The perf_dlfilter_fns structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'perf_dlfilter_fns' structure is populated with function pointers when the
+file is loaded. The functions can be called by 'filter_event' or
+'filter_event_early'.
+
+[source,c]
+----
+struct perf_dlfilter_fns {
+ const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
+ const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
+ char **(*args)(void *ctx, int *dlargc);
+ __s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
+ const __u8 *(*insn)(void *ctx, __u32 *length);
+ const char *(*srcline)(void *ctx, __u32 *line_number);
+ struct perf_event_attr *(*attr)(void *ctx);
+ __s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+ void *(*reserved[120])(void *);
+};
+----
+
+'resolve_ip' returns information about ip.
+
+'resolve_addr' returns information about addr (if addr_correlates_sym).
+
+'args' returns arguments from --dlarg options.
+
+'resolve_address' provides information about 'address'. al->size must be set
+before calling. Returns 0 on success, -1 otherwise.
+
+'insn' returns instruction bytes and length.
+
+'srcline' return source file name and line number.
+
+'attr' returns perf_event_attr, refer <linux/perf_event.h>.
+
+'object_code' reads object code and returns the number of bytes read.
+
+The perf_dlfilter_al structure
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'perf_dlfilter_al' structure contains information about an address.
+
+[source,c]
+----
+/*
+ * Address location (as per perf script)
+ */
+struct perf_dlfilter_al {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u32 symoff;
+ const char *sym;
+ __u64 addr; /* Mapped address (from dso) */
+ __u64 sym_start;
+ __u64 sym_end;
+ const char *dso;
+ __u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
+ __u8 is_64_bit; /* Only valid if dso is not NULL */
+ __u8 is_kernel_ip; /* True if in kernel space */
+ __u32 buildid_size;
+ __u8 *buildid;
+ /* Below members are only populated by resolve_ip() */
+ __u8 filtered; /* true if this sample event will be filtered out */
+ const char *comm;
+};
+----
+
+perf_dlfilter_sample flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The 'flags' member of 'perf_dlfilter_sample' corresponds with the flags field
+of perf script. The bits of the flags are as follows:
+
+[source,c]
+----
+/* Definitions for perf_dlfilter_sample flags */
+enum {
+ PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
+ PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
+ PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
+ PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
+ PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
+ PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
+ PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
+ PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
+ PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
+ PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
+ PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
+ PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
+ PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
+};
+----
+
+EXAMPLE
+-------
+
+Filter out everything except branches from "foo" to "bar":
+
+[source,c]
+----
+#include <perf/perf_dlfilter.h>
+#include <string.h>
+
+struct perf_dlfilter_fns perf_dlfilter_fns;
+
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ const struct perf_dlfilter_al *al;
+ const struct perf_dlfilter_al *addr_al;
+
+ if (!sample->ip || !sample->addr_correlates_sym)
+ return 1;
+
+ al = perf_dlfilter_fns.resolve_ip(ctx);
+ if (!al || !al->sym || strcmp(al->sym, "foo"))
+ return 1;
+
+ addr_al = perf_dlfilter_fns.resolve_addr(ctx);
+ if (!addr_al || !addr_al->sym || strcmp(addr_al->sym, "bar"))
+ return 1;
+
+ return 0;
+}
+----
+
+To build the shared object, assuming perf has been installed for the local user
+i.e. perf_dlfilter.h is in ~/include/perf :
+
+ gcc -c -I ~/include -fpic dlfilter-example.c
+ gcc -shared -o dlfilter-example.so dlfilter-example.o
+
+To use the filter with perf script:
+
+ perf script --dlfilter dlfilter-example.so
+
+NOTES
+-----
+
+The dlfilter .so file will be dependent on shared libraries. If those change,
+it may be necessary to rebuild the .so. Also there may be unexpected results
+if the .so uses different versions of the shared libraries that perf uses.
+Versions can be checked using the ldd command.
+
+SEE ALSO
+--------
+linkperf:perf-script[1]
diff --git a/tools/perf/Documentation/perf-evlist.txt b/tools/perf/Documentation/perf-evlist.txt
index c0a66400a960..9af8b8dfb7b6 100644
--- a/tools/perf/Documentation/perf-evlist.txt
+++ b/tools/perf/Documentation/perf-evlist.txt
@@ -29,7 +29,7 @@ OPTIONS
Show just the sample frequency used for each event.
-v::
---verbose=::
+--verbose::
Show all fields.
-g::
diff --git a/tools/perf/Documentation/perf-ftrace.txt b/tools/perf/Documentation/perf-ftrace.txt
index b80c84307dc9..df4595563801 100644
--- a/tools/perf/Documentation/perf-ftrace.txt
+++ b/tools/perf/Documentation/perf-ftrace.txt
@@ -9,31 +9,32 @@ perf-ftrace - simple wrapper for kernel's ftrace functionality
SYNOPSIS
--------
[verse]
-'perf ftrace' <command>
+'perf ftrace' {trace|latency} <command>
DESCRIPTION
-----------
-The 'perf ftrace' command is a simple wrapper of kernel's ftrace
-functionality. It only supports single thread tracing currently and
-just reads trace_pipe in text and then write it to stdout.
+The 'perf ftrace' command provides a collection of subcommands which use
+kernel's ftrace infrastructure.
-The following options apply to perf ftrace.
+ 'perf ftrace trace' is a simple wrapper of the ftrace. It only supports
+ single thread tracing currently and just reads trace_pipe in text and then
+ write it to stdout.
-OPTIONS
--------
+ 'perf ftrace latency' calculates execution latency of a given function
+ (optionally with BPF) and display it as a histogram.
--t::
---tracer=::
- Tracer to use: function_graph or function.
+The following options apply to perf ftrace.
--v::
---verbose=::
- Verbosity level.
+COMMON OPTIONS
+--------------
-p::
--pid=::
Trace on existing process id (comma separated list).
+--tid=::
+ Trace on existing thread id (comma separated list).
+
-a::
--all-cpus::
Force system-wide collection. Scripts run without a <command>
@@ -48,39 +49,99 @@ OPTIONS
Ranges of CPUs are specified with -: 0-2.
Default is to trace on all online CPUs.
+-v::
+--verbose::
+ Increase the verbosity level.
+
+
+OPTIONS for 'perf ftrace trace'
+-------------------------------
+
+-t::
+--tracer=::
+ Tracer to use when neither -G nor -F option is not
+ specified: function_graph or function.
+
+-F::
+--funcs::
+ List available functions to trace. It accepts a pattern to
+ only list interested functions.
+
+-D::
+--delay::
+ Time (ms) to wait before starting tracing after program start.
+
+-m::
+--buffer-size::
+ Set the size of per-cpu tracing buffer, <size> is expected to
+ be a number with appended unit character - B/K/M/G.
+
+--inherit::
+ Trace children processes spawned by our target.
+
-T::
--trace-funcs=::
- Only trace functions given by the argument. Multiple functions
- can be given by using this option more than once. The function
- argument also can be a glob pattern. It will be passed to
- 'set_ftrace_filter' in tracefs.
+ Select function tracer and set function filter on the given
+ function (or a glob pattern). Multiple functions can be given
+ by using this option more than once. The function argument also
+ can be a glob pattern. It will be passed to 'set_ftrace_filter'
+ in tracefs.
-N::
--notrace-funcs=::
- Do not trace functions given by the argument. Like -T option,
- this can be used more than once to specify multiple functions
- (or glob patterns). It will be passed to 'set_ftrace_notrace'
- in tracefs.
+ Select function tracer and do not trace functions given by the
+ argument. Like -T option, this can be used more than once to
+ specify multiple functions (or glob patterns). It will be
+ passed to 'set_ftrace_notrace' in tracefs.
+
+--func-opts::
+ List of options allowed to set:
+ call-graph - Display kernel stack trace for function tracer.
+ irq-info - Display irq context info for function tracer.
-G::
--graph-funcs=::
- Set graph filter on the given function (or a glob pattern).
- This is useful for the function_graph tracer only and enables
- tracing for functions executed from the given function.
- This can be used more than once to specify multiple functions.
- It will be passed to 'set_graph_function' in tracefs.
+ Select function_graph tracer and set graph filter on the given
+ function (or a glob pattern). This is useful to trace for
+ functions executed from the given function. This can be used more
+ than once to specify multiple functions. It will be passed to
+ 'set_graph_function' in tracefs.
-g::
--nograph-funcs=::
- Set graph notrace filter on the given function (or a glob pattern).
- Like -G option, this is useful for the function_graph tracer only
- and disables tracing for function executed from the given function.
- This can be used more than once to specify multiple functions.
- It will be passed to 'set_graph_notrace' in tracefs.
+ Select function_graph tracer and set graph notrace filter on the
+ given function (or a glob pattern). Like -G option, this is useful
+ for the function_graph tracer only and disables tracing for function
+ executed from the given function. This can be used more than once to
+ specify multiple functions. It will be passed to 'set_graph_notrace'
+ in tracefs.
+
+--graph-opts::
+ List of options allowed to set:
+ nosleep-time - Measure on-CPU time only for function_graph tracer.
+ noirqs - Ignore functions that happen inside interrupt.
+ verbose - Show process names, PIDs, timestamps, etc.
+ thresh=<n> - Setup trace duration threshold in microseconds.
+ depth=<n> - Set max depth for function graph tracer to follow.
+
+
+OPTIONS for 'perf ftrace latency'
+---------------------------------
+
+-T::
+--trace-funcs=::
+ Set the function name to get the histogram. Unlike perf ftrace trace,
+ it only allows single function to calculate the histogram.
+
+-b::
+--use-bpf::
+ Use BPF to measure function latency instead of using the ftrace (it
+ uses function_graph tracer internally).
+
+-n::
+--use-nsec::
+ Use nano-second instead of micro-second as a base unit of the histogram.
--D::
---graph-depth=::
- Set max depth for function graph tracer to follow
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-inject.txt b/tools/perf/Documentation/perf-inject.txt
index 70969ea73e01..c972032f4ca0 100644
--- a/tools/perf/Documentation/perf-inject.txt
+++ b/tools/perf/Documentation/perf-inject.txt
@@ -24,8 +24,19 @@ information could make use of this facility.
OPTIONS
-------
-b::
---build-ids=::
- Inject build-ids into the output stream
+--build-ids::
+ Inject build-ids of DSOs hit by samples into the output stream.
+ This means it needs to process all SAMPLE records to find the DSOs.
+
+--buildid-all::
+ Inject build-ids of all DSOs into the output stream regardless of hits
+ and skip SAMPLE processing.
+
+--known-build-ids=::
+ Override build-ids to inject using these comma-separated pairs of
+ build-id and path. Understands file://filename to read these pairs
+ from a file, which can be generated with perf buildid-list.
+
-v::
--verbose::
Be more verbose.
@@ -41,6 +52,13 @@ OPTIONS
tasks slept. sched_switch contains a callchain where a task slept and
sched_stat contains a timeslice how long a task slept.
+-k::
+--vmlinux=<file>::
+ vmlinux pathname
+
+--ignore-vmlinux::
+ Ignore vmlinux files.
+
--kallsyms=<file>::
kallsyms pathname
@@ -64,6 +82,37 @@ include::itrace.txt[]
--force::
Don't complain, do it.
+--vm-time-correlation[=OPTIONS]::
+ Some architectures may capture AUX area data which contains timestamps
+ affected by virtualization. This option will update those timestamps
+ in place, to correlate with host timestamps. The in-place update means
+ that an output file is not specified, and instead the input file is
+ modified. The options are architecture specific, except that they may
+ start with "dry-run" which will cause the file to be processed but
+ without updating it. Currently this option is supported only by
+ Intel PT, refer linkperf:perf-intel-pt[1]
+
+--guest-data=<path>,<pid>[,<time offset>[,<time scale>]]::
+ Insert events from a perf.data file recorded in a virtual machine at
+ the same time as the input perf.data file was recorded on the host.
+ The Process ID (PID) of the QEMU hypervisor process must be provided,
+ and the time offset and time scale (multiplier) will likely be needed
+ to convert guest time stamps into host time stamps. For example, for
+ x86 the TSC Offset and Multiplier could be provided for a virtual machine
+ using Linux command line option no-kvmclock.
+ Currently only mmap, mmap2, comm, task, context_switch, ksymbol,
+ and text_poke events are inserted, as well as build ID information.
+ The QEMU option -name debug-threads=on is needed so that thread names
+ can be used to determine which thread is running which VCPU. Note
+ libvirt seems to use this by default.
+ When using perf record in the guest, option --sample-identifier
+ should be used, and also --buildid-all and --switch-events may be
+ useful.
+
+:GMEXAMPLECMD: inject
+:GMEXAMPLESUBCMD:
+include::guestmount.txt[]
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1],
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index 456fdcbf26ac..4c90cc176f81 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -69,22 +69,22 @@ And profiled with 'perf report' e.g.
To also trace kernel space presents a problem, namely kernel self-modifying
code. A fairly good kernel image is available in /proc/kcore but to get an
accurate image a copy of /proc/kcore needs to be made under the same conditions
-as the data capture. A script perf-with-kcore can do that, but beware that the
-script makes use of 'sudo' to copy /proc/kcore. If you have perf installed
-locally from the source tree you can do:
+as the data capture. 'perf record' can make a copy of /proc/kcore if the option
+--kcore is used, but access to /proc/kcore is restricted e.g.
- ~/libexec/perf-core/perf-with-kcore record pt_ls -e intel_pt// -- ls
+ sudo perf record -o pt_ls --kcore -e intel_pt// -- ls
-which will create a directory named 'pt_ls' and put the perf.data file and
-copies of /proc/kcore, /proc/kallsyms and /proc/modules into it. Then to use
-'perf report' becomes:
+which will create a directory named 'pt_ls' and put the perf.data file (named
+simply 'data') and copies of /proc/kcore, /proc/kallsyms and /proc/modules into
+it. The other tools understand the directory format, so to use 'perf report'
+becomes:
- ~/libexec/perf-core/perf-with-kcore report pt_ls
+ sudo perf report -i pt_ls
Because samples are synthesized after-the-fact, the sampling period can be
selected for reporting. e.g. sample every microsecond
- ~/libexec/perf-core/perf-with-kcore report pt_ls --itrace=i1usge
+ sudo perf report pt_ls --itrace=i1usge
See the sections below for more information about the --itrace option.
@@ -101,16 +101,43 @@ data is available you can use the 'perf script' tool with all itrace sampling
options, which will list all the samples.
perf record -e intel_pt//u ls
- perf script --itrace=ibxwpe
+ perf script --itrace=iybxwpe
An interesting field that is not printed by default is 'flags' which can be
displayed as follows:
- perf script --itrace=ibxwpe -F+flags
+ perf script --itrace=iybxwpe -F+flags
-The flags are "bcrosyiABEx" which stand for branch, call, return, conditional,
-system, asynchronous, interrupt, transaction abort, trace begin, trace end, and
-in transaction, respectively.
+The flags are "bcrosyiABExghDt" which stand for branch, call, return, conditional,
+system, asynchronous, interrupt, transaction abort, trace begin, trace end,
+in transaction, VM-entry, VM-exit, interrupt disabled, and interrupt disable
+toggle respectively.
+
+perf script also supports higher level ways to dump instruction traces:
+
+ perf script --insn-trace --xed
+
+Dump all instructions. This requires installing the xed tool (see XED below)
+Dumping all instructions in a long trace can be fairly slow. It is usually better
+to start with higher level decoding, like
+
+ perf script --call-trace
+
+or
+
+ perf script --call-ret-trace
+
+and then select a time range of interest. The time range can then be examined
+in detail with
+
+ perf script --time starttime,stoptime --insn-trace --xed
+
+While examining the trace it's also useful to filter on specific CPUs using
+the -C option
+
+ perf script --time starttime,stoptime --insn-trace --xed -C 1
+
+Dump all instructions in time range on CPU 1.
Another interesting field that is not printed by default is 'ipc' which can be
displayed as follows:
@@ -120,16 +147,28 @@ displayed as follows:
There are two ways that instructions-per-cycle (IPC) can be calculated depending
on the recording.
-If the 'cyc' config term (see config terms section below) was used, then IPC is
-calculated using the cycle count from CYC packets, otherwise MTC packets are
-used - refer to the 'mtc' config term. When MTC is used, however, the values
-are less accurate because the timing is less accurate.
+If the 'cyc' config term (see config terms section below) was used, then IPC
+and cycle events are calculated using the cycle count from CYC packets, otherwise
+MTC packets are used - refer to the 'mtc' config term. When MTC is used, however,
+the values are less accurate because the timing is less accurate.
Because Intel PT does not update the cycle count on every branch or instruction,
the values will often be zero. When there are values, they will be the number
of instructions and number of cycles since the last update, and thus represent
-the average IPC since the last IPC for that event type. Note IPC for "branches"
-events is calculated separately from IPC for "instructions" events.
+the average IPC cycle count since the last IPC for that event type.
+Note IPC for "branches" events is calculated separately from IPC for "instructions"
+events.
+
+Even with the 'cyc' config term, it is possible to produce IPC information for
+every change of timestamp, but at the expense of accuracy. That is selected by
+specifying the itrace 'A' option. Due to the granularity of timestamps, the
+actual number of cycles increases even though the cycles reported does not.
+The number of instructions is known, but if IPC is reported, cycles can be too
+low and so IPC is too high. Note that inaccuracy decreases as the period of
+sampling increases i.e. if the number of cycles is too low by a small amount,
+that becomes less significant if the number of cycles is large. It may also be
+useful to use the 'A' option in conjunction with dlfilter-show-cycles.so to
+provide higher granularity cycle information.
Also note that the IPC instruction count may or may not include the current
instruction. If the cycle count is associated with an asynchronous branch
@@ -148,7 +187,19 @@ Refer to script export-to-sqlite.py or export-to-postgresql.py for more details,
and to script exported-sql-viewer.py for an example of using the database.
There is also script intel-pt-events.py which provides an example of how to
-unpack the raw data for power events and PTWRITE.
+unpack the raw data for power events and PTWRITE. The script also displays
+branches, and supports 2 additional modes selected by option:
+
+ - --insn-trace - instruction trace
+ - --src-trace - source trace
+
+The intel-pt-events.py script also has options:
+
+ - --all-switch-events - display all switch events, not only the last consecutive.
+ - --interleave [<n>] - interleave sample output for the same timestamp so that
+ no more than n samples for a CPU are displayed in a row. 'n' defaults to 4.
+ Note this only affects the order of output, and only when the timestamp is the
+ same.
As mentioned above, it is easy to capture too much data. One way to limit the
data captured is to use 'snapshot' mode which is explained further below.
@@ -225,7 +276,7 @@ Note that, as with all events, the event is suffixed with event modifiers:
H host
p precise ip
-'h', 'G' and 'H' are for virtualization which is not supported by Intel PT.
+'h', 'G' and 'H' are for virtualization which are not used by Intel PT.
'p' is also not relevant to Intel PT. So only options 'u' and 'k' are
meaningful for Intel PT.
@@ -426,6 +477,8 @@ ptw Enable PTWRITE packets which are produced when a ptwrite instruction
which contains "1" if the feature is supported and
"0" otherwise.
+ As an alternative, refer to "Emulated PTWRITE" further below.
+
fup_on_ptw Enable a FUP packet to follow the PTWRITE packet. The FUP packet
provides the address of the ptwrite instruction. In the absence of
fup_on_ptw, the decoder will use the address of the previous branch
@@ -442,6 +495,30 @@ pwr_evt Enable power events. The power events provide information about
which contains "1" if the feature is supported and
"0" otherwise.
+event Enable Event Trace. The events provide information about asynchronous
+ events.
+
+ Support for this feature is indicated by:
+
+ /sys/bus/event_source/devices/intel_pt/caps/event_trace
+
+ which contains "1" if the feature is supported and
+ "0" otherwise.
+
+notnt Disable TNT packets. Without TNT packets, it is not possible to walk
+ executable code to reconstruct control flow, however FUP, TIP, TIP.PGE
+ and TIP.PGD packets still indicate asynchronous control flow, and (if
+ return compression is disabled - see noretcomp) return statements.
+ The advantage of eliminating TNT packets is reducing the size of the
+ trace and corresponding tracing overhead.
+
+ Support for this feature is indicated by:
+
+ /sys/bus/event_source/devices/intel_pt/caps/tnt_disable
+
+ which contains "1" if the feature is supported and
+ "0" otherwise.
+
AUX area sampling option
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -558,7 +635,7 @@ The mmap size and auxtrace mmap size are displayed if the -vv option is used e.g
Intel PT modes of operation
~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Intel PT can be used in 2 modes:
+Intel PT can be used in 3 modes:
full-trace mode
sample mode
snapshot mode
@@ -571,7 +648,8 @@ Sample mode attaches a Intel PT sample to other events e.g.
perf record --aux-sample -e intel_pt//u -e branch-misses:u
-Snapshot mode captures the available data when a signal is sent e.g.
+Snapshot mode captures the available data when a signal is sent or "snapshot"
+control command is issued. e.g. using a signal
perf record -v -e intel_pt//u -S ./loopy 1000000000 &
[1] 11435
@@ -582,7 +660,23 @@ Note that the signal sent is SIGUSR2.
Note that "Recording AUX area tracing snapshot" is displayed because the -v
option is used.
-The 2 modes cannot be used together.
+The advantage of using "snapshot" control command is that the access is
+controlled by access to a FIFO e.g.
+
+ $ mkfifo perf.control
+ $ mkfifo perf.ack
+ $ cat perf.ack &
+ [1] 15235
+ $ sudo ~/bin/perf record --control fifo:perf.control,perf.ack -S -e intel_pt//u -- sleep 60 &
+ [2] 15243
+ $ ps -e | grep perf
+ 15244 pts/1 00:00:00 perf
+ $ kill -USR2 15244
+ bash: kill: (15244) - Operation not permitted
+ $ echo snapshot > perf.control
+ ack
+
+The 3 Intel PT modes of operation cannot be used together.
Buffer handling
@@ -687,7 +781,7 @@ The v4.2 kernel introduced support for a context switch metadata event,
PERF_RECORD_SWITCH, which allows unprivileged users to see when their processes
are scheduled out and in, just not by whom, which is left for the
PERF_RECORD_SWITCH_CPU_WIDE, that is only accessible in system wide context,
-which in turn requires CAP_SYS_ADMIN.
+which in turn requires CAP_PERFMON or CAP_SYS_ADMIN.
Please see the 45ac1403f564 ("perf: Add PERF_RECORD_SWITCH to indicate context
switches") commit, that introduces these metadata events for further info.
@@ -807,36 +901,54 @@ Having no option is the same as
which, in turn, is the same as
- --itrace=cepwx
+ --itrace=cepwxy
The letters are:
i synthesize "instructions" events
+ y synthesize "cycles" events
b synthesize "branches" events
x synthesize "transactions" events
w synthesize "ptwrite" events
- p synthesize "power" events
+ p synthesize "power" events (incl. PSB events)
c synthesize branches events (calls only)
r synthesize branches events (returns only)
+ o synthesize PEBS-via-PT events
+ I synthesize Event Trace events
e synthesize tracing error events
d create a debug log
g synthesize a call chain (use with i or x)
+ G synthesize a call chain on existing event records
l synthesize last branch entries (use with i or x)
+ L synthesize last branch entries on existing event records
s skip initial number of events
+ q quicker (less detailed) decoding
+ A approximate IPC
+ Z prefer to ignore timestamps (so-called "timeless" decoding)
"Instructions" events look like they were recorded by "perf record -e
instructions".
+"Cycles" events look like they were recorded by "perf record -e cycles"
+(ie., the default). Note that even with CYC packets enabled and no sampling,
+these are not fully accurate, since CYC packets are not emitted for each
+instruction, only when some other event (like an indirect branch, or a
+TNT packet representing multiple branches) happens causes a packet to
+be emitted. Thus, it is more effective for attributing cycles to functions
+(and possibly basic blocks) than to individual instructions, although it
+is not even perfect for functions (although it becomes better if the noretcomp
+option is active).
+
"Branches" events look like they were recorded by "perf record -e branches". "c"
and "r" can be combined to get calls and returns.
"Transactions" events correspond to the start or end of transactions. The
'flags' field can be used in perf script to determine whether the event is a
-tranasaction start, commit or abort.
+transaction start, commit or abort.
-Note that "instructions", "branches" and "transactions" events depend on code
-flow packets which can be disabled by using the config term "branch=0". Refer
-to the config terms section above.
+Note that "instructions", "cycles", "branches" and "transactions" events
+depend on code flow packets which can be disabled by using the config term
+"branch=0". Refer to the config terms section above.
"ptwrite" events record the payload of the ptwrite instruction and whether
"fup_on_ptw" was used. "ptwrite" events depend on PTWRITE packets which are
@@ -851,12 +963,15 @@ event packets are recorded only if the "pwr_evt" config term was used. Refer to
the config terms section above. The power events record information about
C-state changes, whereas CBR is indicative of CPU frequency. perf script
"event,synth" fields display information like this:
+
cbr: cbr: 22 freq: 2189 MHz (200%)
mwait: hints: 0x60 extensions: 0x1
pwre: hw: 0 cstate: 2 sub-cstate: 0
exstop: ip: 1
pwrx: deepest cstate: 2 last cstate: 2 wake reason: 0x4
+
Where:
+
"cbr" includes the frequency and the percentage of maximum non-turbo
"mwait" shows mwait hints and extensions
"pwre" shows C-state transitions (to a C-state deeper than C0) and
@@ -864,16 +979,43 @@ Where:
"exstop" indicates execution stopped and whether the IP was recorded
exactly,
"pwrx" indicates return to C0
+
For more details refer to the Intel 64 and IA-32 Architectures Software
Developer Manuals.
+PSB events show when a PSB+ occurred and also the byte-offset in the trace.
+Emitting a PSB+ can cause a CPU a slight delay. When doing timing analysis
+of code with Intel PT, it is useful to know if a timing bubble was caused
+by Intel PT or not.
+
Error events show where the decoder lost the trace. Error events
are quite important. Users must know if what they are seeing is a complete
-picture or not.
+picture or not. The "e" option may be followed by flags which affect what errors
+will or will not be reported. Each flag must be preceded by either '+' or '-'.
+The flags supported by Intel PT are:
+
+ -o Suppress overflow errors
+ -l Suppress trace data lost errors
+
+For example, for errors but not overflow or data lost errors:
+
+ --itrace=e-o-l
The "d" option will cause the creation of a file "intel_pt.log" containing all
decoded packets and instructions. Note that this option slows down the decoder
-and that the resulting file may be very large.
+and that the resulting file may be very large. The "d" option may be followed
+by flags which affect what debug messages will or will not be logged. Each flag
+must be preceded by either '+' or '-'. The flags support by Intel PT are:
+
+ -a Suppress logging of perf events
+ +a Log all perf events
+ +e Output only on decoding errors (size configurable)
+ +o Output to stdout instead of "intel_pt.log"
+
+By default, logged perf events are filtered by any specified time ranges, but
+flag +a overrides that. The +e flag can be useful for analyzing errors. By
+default, the log size in that case is 16384 bytes, but can be altered by
+linkperf:perf-config[1] e.g. perf config itrace.debug-log-buffer-size=30000
In addition, the period of the "instructions" event can be specified. e.g.
@@ -912,6 +1054,39 @@ transactions events can be specified. e.g.
Note that last branch entries are cleared for each sample, so there is no overlap
from one sample to the next.
+The G and L options are designed in particular for sample mode, and work much
+like g and l but add call chain and branch stack to the other selected events
+instead of synthesized events. For example, to record branch-misses events for
+'ls' and then add a call chain derived from the Intel PT trace:
+
+ perf record --aux-sample -e '{intel_pt//u,branch-misses:u}' -- ls
+ perf report --itrace=Ge
+
+Although in fact G is a default for perf report, so that is the same as just:
+
+ perf report
+
+One caveat with the G and L options is that they work poorly with "Large PEBS".
+Large PEBS means PEBS records will be accumulated by hardware and the written
+into the event buffer in one go. That reduces interrupts, but can give very
+late timestamps. Because the Intel PT trace is synchronized by timestamps,
+the PEBS events do not match the trace. Currently, Large PEBS is used only in
+certain circumstances:
+ - hardware supports it
+ - PEBS is used
+ - event period is specified, instead of frequency
+ - the sample type is limited to the following flags:
+ PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR |
+ PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID |
+ PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER |
+ PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR |
+ PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER |
+ PERF_SAMPLE_PERIOD (and sometimes) | PERF_SAMPLE_TIME
+Because Intel PT sample mode uses a different sample type to the list above,
+Large PEBS is not used with Intel PT sample mode. To avoid Large PEBS in other
+cases, avoid specifying the event period i.e. avoid the 'perf record' -c option,
+--count option, or 'period' config term.
+
To disable trace decoding entirely, use the option --no-itrace.
It is also possible to skip events generated (instructions, branches, transactions)
@@ -921,6 +1096,70 @@ at the beginning. This is useful to ignore initialization code.
skips the first million instructions.
+The q option changes the way the trace is decoded. The decoding is much faster
+but much less detailed. Specifically, with the q option, the decoder does not
+decode TNT packets, and does not walk object code, but gets the ip from FUP and
+TIP packets. The q option can be used with the b and i options but the period
+is not used. The q option decodes more quickly, but is useful only if the
+control flow of interest is represented or indicated by FUP, TIP, TIP.PGE, or
+TIP.PGD packets (refer below). However the q option could be used to find time
+ranges that could then be decoded fully using the --time option.
+
+What will *not* be decoded with the (single) q option:
+
+ - direct calls and jmps
+ - conditional branches
+ - non-branch instructions
+
+What *will* be decoded with the (single) q option:
+
+ - asynchronous branches such as interrupts
+ - indirect branches
+ - function return target address *if* the noretcomp config term (refer
+ config terms section) was used
+ - start of (control-flow) tracing
+ - end of (control-flow) tracing, if it is not out of context
+ - power events, ptwrite, transaction start and abort
+ - instruction pointer associated with PSB packets
+
+Note the q option does not specify what events will be synthesized e.g. the p
+option must be used also to show power events.
+
+Repeating the q option (double-q i.e. qq) results in even faster decoding and even
+less detail. The decoder decodes only extended PSB (PSB+) packets, getting the
+instruction pointer if there is a FUP packet within PSB+ (i.e. between PSB and
+PSBEND). Note PSB packets occur regularly in the trace based on the psb_period
+config term (refer config terms section). There will be a FUP packet if the
+PSB+ occurs while control flow is being traced.
+
+What will *not* be decoded with the qq option:
+
+ - everything except instruction pointer associated with PSB packets
+
+What *will* be decoded with the qq option:
+
+ - instruction pointer associated with PSB packets
+
+The Z option is equivalent to having recorded a trace without TSC
+(i.e. config term tsc=0). It can be useful to avoid timestamp issues when
+decoding a trace of a virtual machine.
+
+
+dlfilter-show-cycles.so
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Cycles can be displayed using dlfilter-show-cycles.so in which case the itrace A
+option can be useful to provide higher granularity cycle information:
+
+ perf script --itrace=A --call-trace --dlfilter dlfilter-show-cycles.so
+
+To see a list of dlfilters:
+
+ perf script -v --list-dlfilters
+
+See also linkperf:perf-dlfilters[1]
+
+
dump option
~~~~~~~~~~~
@@ -993,12 +1232,644 @@ Recording is selected by using the aux-output config term e.g.
perf record -c 10000 -e '{intel_pt/branch=0/,cycles/aux-output/ppp}' uname
-Note that currently, software only supports redirecting at most one PEBS event.
+Originally, software only supported redirecting at most one PEBS event because it
+was not able to differentiate one event from another. To overcome that, more recent
+kernels and perf tools add support for the PERF_RECORD_AUX_OUTPUT_HW_ID side-band event.
+To check for the presence of that event in a PEBS-via-PT trace:
+
+ perf script -D --no-itrace | grep PERF_RECORD_AUX_OUTPUT_HW_ID
To display PEBS events from the Intel PT trace, use the itrace 'o' option e.g.
perf script --itrace=oe
+XED
+---
+
+include::build-xed.txt[]
+
+
+Tracing Virtual Machines (kernel only)
+--------------------------------------
+
+Currently, kernel tracing is supported with either "timeless" decoding
+(i.e. no TSC timestamps) or VM Time Correlation. VM Time Correlation is an extra step
+using 'perf inject' and requires unchanging VMX TSC Offset and no VMX TSC Scaling.
+
+Other limitations and caveats
+
+ VMX controls may suppress packets needed for decoding resulting in decoding errors
+ VMX controls may block the perf NMI to the host potentially resulting in lost trace data
+ Guest kernel self-modifying code (e.g. jump labels or JIT-compiled eBPF) will result in decoding errors
+ Guest thread information is unknown
+ Guest VCPU is unknown but may be able to be inferred from the host thread
+ Callchains are not supported
+
+Example using "timeless" decoding
+
+Start VM
+
+ $ sudo virsh start kubuntu20.04
+ Domain kubuntu20.04 started
+
+Mount the guest file system. Note sshfs needs -o direct_io to enable reading of proc files. root access is needed to read /proc/kcore.
+
+ $ mkdir vm0
+ $ sshfs -o direct_io root@vm0:/ vm0
+
+Copy the guest /proc/kallsyms, /proc/modules and /proc/kcore
+
+ $ perf buildid-cache -v --kcore vm0/proc/kcore
+ kcore added to build-id cache directory /home/user/.debug/[kernel.kcore]/9600f316a53a0f54278885e8d9710538ec5f6a08/2021021807494306
+ $ KALLSYMS=/home/user/.debug/[kernel.kcore]/9600f316a53a0f54278885e8d9710538ec5f6a08/2021021807494306/kallsyms
+
+Find the VM process
+
+ $ ps -eLl | grep 'KVM\|PID'
+ F S UID PID PPID LWP C PRI NI ADDR SZ WCHAN TTY TIME CMD
+ 3 S 64055 1430 1 1440 1 80 0 - 1921718 - ? 00:02:47 CPU 0/KVM
+ 3 S 64055 1430 1 1441 1 80 0 - 1921718 - ? 00:02:41 CPU 1/KVM
+ 3 S 64055 1430 1 1442 1 80 0 - 1921718 - ? 00:02:38 CPU 2/KVM
+ 3 S 64055 1430 1 1443 2 80 0 - 1921718 - ? 00:03:18 CPU 3/KVM
+
+Start an open-ended perf record, tracing the VM process, do something on the VM, and then ctrl-C to stop.
+TSC is not supported and tsc=0 must be specified. That means mtc is useless, so add mtc=0.
+However, IPC can still be determined, hence cyc=1 can be added.
+Only kernel decoding is supported, so 'k' must be specified.
+Intel PT traces both the host and the guest so --guest and --host need to be specified.
+Without timestamps, --per-thread must be specified to distinguish threads.
+
+ $ sudo perf kvm --guest --host --guestkallsyms $KALLSYMS record --kcore -e intel_pt/tsc=0,mtc=0,cyc=1/k -p 1430 --per-thread
+ ^C
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 5.829 MB ]
+
+perf script can be used to provide an instruction trace
+
+ $ perf script --guestkallsyms $KALLSYMS --insn-trace --xed -F+ipc | grep -C10 vmresume | head -21
+ CPU 0/KVM 1440 ffffffff82133cdd __vmx_vcpu_run+0x3d ([kernel.kallsyms]) movq 0x48(%rax), %r9
+ CPU 0/KVM 1440 ffffffff82133ce1 __vmx_vcpu_run+0x41 ([kernel.kallsyms]) movq 0x50(%rax), %r10
+ CPU 0/KVM 1440 ffffffff82133ce5 __vmx_vcpu_run+0x45 ([kernel.kallsyms]) movq 0x58(%rax), %r11
+ CPU 0/KVM 1440 ffffffff82133ce9 __vmx_vcpu_run+0x49 ([kernel.kallsyms]) movq 0x60(%rax), %r12
+ CPU 0/KVM 1440 ffffffff82133ced __vmx_vcpu_run+0x4d ([kernel.kallsyms]) movq 0x68(%rax), %r13
+ CPU 0/KVM 1440 ffffffff82133cf1 __vmx_vcpu_run+0x51 ([kernel.kallsyms]) movq 0x70(%rax), %r14
+ CPU 0/KVM 1440 ffffffff82133cf5 __vmx_vcpu_run+0x55 ([kernel.kallsyms]) movq 0x78(%rax), %r15
+ CPU 0/KVM 1440 ffffffff82133cf9 __vmx_vcpu_run+0x59 ([kernel.kallsyms]) movq (%rax), %rax
+ CPU 0/KVM 1440 ffffffff82133cfc __vmx_vcpu_run+0x5c ([kernel.kallsyms]) callq 0xffffffff82133c40
+ CPU 0/KVM 1440 ffffffff82133c40 vmx_vmenter+0x0 ([kernel.kallsyms]) jz 0xffffffff82133c46
+ CPU 0/KVM 1440 ffffffff82133c42 vmx_vmenter+0x2 ([kernel.kallsyms]) vmresume IPC: 0.11 (50/445)
+ :1440 1440 ffffffffbb678b06 native_write_msr+0x6 ([guest.kernel.kallsyms]) nopl %eax, (%rax,%rax,1)
+ :1440 1440 ffffffffbb678b0b native_write_msr+0xb ([guest.kernel.kallsyms]) retq IPC: 0.04 (2/41)
+ :1440 1440 ffffffffbb666646 lapic_next_deadline+0x26 ([guest.kernel.kallsyms]) data16 nop
+ :1440 1440 ffffffffbb666648 lapic_next_deadline+0x28 ([guest.kernel.kallsyms]) xor %eax, %eax
+ :1440 1440 ffffffffbb66664a lapic_next_deadline+0x2a ([guest.kernel.kallsyms]) popq %rbp
+ :1440 1440 ffffffffbb66664b lapic_next_deadline+0x2b ([guest.kernel.kallsyms]) retq IPC: 0.16 (4/25)
+ :1440 1440 ffffffffbb74607f clockevents_program_event+0x8f ([guest.kernel.kallsyms]) test %eax, %eax
+ :1440 1440 ffffffffbb746081 clockevents_program_event+0x91 ([guest.kernel.kallsyms]) jz 0xffffffffbb74603c IPC: 0.06 (2/30)
+ :1440 1440 ffffffffbb74603c clockevents_program_event+0x4c ([guest.kernel.kallsyms]) popq %rbx
+ :1440 1440 ffffffffbb74603d clockevents_program_event+0x4d ([guest.kernel.kallsyms]) popq %r12
+
+Example using VM Time Correlation
+
+Start VM
+
+ $ sudo virsh start kubuntu20.04
+ Domain kubuntu20.04 started
+
+Mount the guest file system. Note sshfs needs -o direct_io to enable reading of proc files. root access is needed to read /proc/kcore.
+
+ $ mkdir -p vm0
+ $ sshfs -o direct_io root@vm0:/ vm0
+
+Copy the guest /proc/kallsyms, /proc/modules and /proc/kcore
+
+ $ perf buildid-cache -v --kcore vm0/proc/kcore
+ same kcore found in /home/user/.debug/[kernel.kcore]/cc9c55a98c5e4ec0aeda69302554aabed5cd6491/2021021312450777
+ $ KALLSYMS=/home/user/.debug/\[kernel.kcore\]/cc9c55a98c5e4ec0aeda69302554aabed5cd6491/2021021312450777/kallsyms
+
+Find the VM process
+
+ $ ps -eLl | grep 'KVM\|PID'
+ F S UID PID PPID LWP C PRI NI ADDR SZ WCHAN TTY TIME CMD
+ 3 S 64055 16998 1 17005 13 80 0 - 1818189 - ? 00:00:16 CPU 0/KVM
+ 3 S 64055 16998 1 17006 4 80 0 - 1818189 - ? 00:00:05 CPU 1/KVM
+ 3 S 64055 16998 1 17007 3 80 0 - 1818189 - ? 00:00:04 CPU 2/KVM
+ 3 S 64055 16998 1 17008 4 80 0 - 1818189 - ? 00:00:05 CPU 3/KVM
+
+Start an open-ended perf record, tracing the VM process, do something on the VM, and then ctrl-C to stop.
+IPC can be determined, hence cyc=1 can be added.
+Only kernel decoding is supported, so 'k' must be specified.
+Intel PT traces both the host and the guest so --guest and --host need to be specified.
+
+ $ sudo perf kvm --guest --host --guestkallsyms $KALLSYMS record --kcore -e intel_pt/cyc=1/k -p 16998
+ ^C[ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 9.041 MB perf.data.kvm ]
+
+Now 'perf inject' can be used to determine the VMX TCS Offset. Note, Intel PT TSC packets are
+only 7-bytes, so the TSC Offset might differ from the actual value in the 8th byte. That will
+have no effect i.e. the resulting timestamps will be correct anyway.
+
+ $ perf inject -i perf.data.kvm --vm-time-correlation=dry-run
+ ERROR: Unknown TSC Offset for VMCS 0x1bff6a
+ VMCS: 0x1bff6a TSC Offset 0xffffe42722c64c41
+ ERROR: Unknown TSC Offset for VMCS 0x1cbc08
+ VMCS: 0x1cbc08 TSC Offset 0xffffe42722c64c41
+ ERROR: Unknown TSC Offset for VMCS 0x1c3ce8
+ VMCS: 0x1c3ce8 TSC Offset 0xffffe42722c64c41
+ ERROR: Unknown TSC Offset for VMCS 0x1cbce9
+ VMCS: 0x1cbce9 TSC Offset 0xffffe42722c64c41
+
+Each virtual CPU has a different Virtual Machine Control Structure (VMCS)
+shown above with the calculated TSC Offset. For an unchanging TSC Offset
+they should all be the same for the same virtual machine.
+
+Now that the TSC Offset is known, it can be provided to 'perf inject'
+
+ $ perf inject -i perf.data.kvm --vm-time-correlation="dry-run 0xffffe42722c64c41"
+
+Note the options for 'perf inject' --vm-time-correlation are:
+
+ [ dry-run ] [ <TSC Offset> [ : <VMCS> [ , <VMCS> ]... ] ]...
+
+So it is possible to specify different TSC Offsets for different VMCS.
+The option "dry-run" will cause the file to be processed but without updating it.
+Note it is also possible to get a intel_pt.log file by adding option --itrace=d
+
+There were no errors so, do it for real
+
+ $ perf inject -i perf.data.kvm --vm-time-correlation=0xffffe42722c64c41 --force
+
+'perf script' can be used to see if there are any decoder errors
+
+ $ perf script -i perf.data.kvm --guestkallsyms $KALLSYMS --itrace=e-o
+
+There were none.
+
+'perf script' can be used to provide an instruction trace showing timestamps
+
+ $ perf script -i perf.data.kvm --guestkallsyms $KALLSYMS --insn-trace --xed -F+ipc | grep -C10 vmresume | head -21
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cdd __vmx_vcpu_run+0x3d ([kernel.kallsyms]) movq 0x48(%rax), %r9
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce1 __vmx_vcpu_run+0x41 ([kernel.kallsyms]) movq 0x50(%rax), %r10
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce5 __vmx_vcpu_run+0x45 ([kernel.kallsyms]) movq 0x58(%rax), %r11
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ce9 __vmx_vcpu_run+0x49 ([kernel.kallsyms]) movq 0x60(%rax), %r12
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133ced __vmx_vcpu_run+0x4d ([kernel.kallsyms]) movq 0x68(%rax), %r13
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cf1 __vmx_vcpu_run+0x51 ([kernel.kallsyms]) movq 0x70(%rax), %r14
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cf5 __vmx_vcpu_run+0x55 ([kernel.kallsyms]) movq 0x78(%rax), %r15
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cf9 __vmx_vcpu_run+0x59 ([kernel.kallsyms]) movq (%rax), %rax
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133cfc __vmx_vcpu_run+0x5c ([kernel.kallsyms]) callq 0xffffffff82133c40
+ CPU 1/KVM 17006 [001] 11500.262865593: ffffffff82133c40 vmx_vmenter+0x0 ([kernel.kallsyms]) jz 0xffffffff82133c46
+ CPU 1/KVM 17006 [001] 11500.262866075: ffffffff82133c42 vmx_vmenter+0x2 ([kernel.kallsyms]) vmresume IPC: 0.05 (40/769)
+ :17006 17006 [001] 11500.262869216: ffffffff82200cb0 asm_sysvec_apic_timer_interrupt+0x0 ([guest.kernel.kallsyms]) clac
+ :17006 17006 [001] 11500.262869216: ffffffff82200cb3 asm_sysvec_apic_timer_interrupt+0x3 ([guest.kernel.kallsyms]) pushq $0xffffffffffffffff
+ :17006 17006 [001] 11500.262869216: ffffffff82200cb5 asm_sysvec_apic_timer_interrupt+0x5 ([guest.kernel.kallsyms]) callq 0xffffffff82201160
+ :17006 17006 [001] 11500.262869216: ffffffff82201160 error_entry+0x0 ([guest.kernel.kallsyms]) cld
+ :17006 17006 [001] 11500.262869216: ffffffff82201161 error_entry+0x1 ([guest.kernel.kallsyms]) pushq %rsi
+ :17006 17006 [001] 11500.262869216: ffffffff82201162 error_entry+0x2 ([guest.kernel.kallsyms]) movq 0x8(%rsp), %rsi
+ :17006 17006 [001] 11500.262869216: ffffffff82201167 error_entry+0x7 ([guest.kernel.kallsyms]) movq %rdi, 0x8(%rsp)
+ :17006 17006 [001] 11500.262869216: ffffffff8220116c error_entry+0xc ([guest.kernel.kallsyms]) pushq %rdx
+ :17006 17006 [001] 11500.262869216: ffffffff8220116d error_entry+0xd ([guest.kernel.kallsyms]) pushq %rcx
+ :17006 17006 [001] 11500.262869216: ffffffff8220116e error_entry+0xe ([guest.kernel.kallsyms]) pushq %rax
+
+
+Tracing Virtual Machines (including user space)
+-----------------------------------------------
+
+It is possible to use perf record to record sideband events within a virtual machine, so that an Intel PT trace on the host can be decoded.
+Sideband events from the guest perf.data file can be injected into the host perf.data file using perf inject.
+
+Here is an example of the steps needed:
+
+On the guest machine:
+
+Check that no-kvmclock kernel command line option was used to boot:
+
+Note, this is essential to enable time correlation between host and guest machines.
+
+ $ cat /proc/cmdline
+ BOOT_IMAGE=/boot/vmlinuz-5.10.0-16-amd64 root=UUID=cb49c910-e573-47e0-bce7-79e293df8e1d ro no-kvmclock
+
+There is no BPF support at present so, if possible, disable JIT compiling:
+
+ $ echo 0 | sudo tee /proc/sys/net/core/bpf_jit_enable
+ 0
+
+Start perf record to collect sideband events:
+
+ $ sudo perf record -o guest-sideband-testing-guest-perf.data --sample-identifier --buildid-all --switch-events --kcore -a -e dummy
+
+On the host machine:
+
+Start perf record to collect Intel PT trace:
+
+Note, the host trace will get very big, very fast, so the steps from starting to stopping the host trace really need to be done so that they happen in the shortest time possible.
+
+ $ sudo perf record -o guest-sideband-testing-host-perf.data -m,64M --kcore -a -e intel_pt/cyc/
+
+On the guest machine:
+
+Run a small test case, just 'uname' in this example:
+
+ $ uname
+ Linux
+
+On the host machine:
+
+Stop the Intel PT trace:
+
+ ^C
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 76.122 MB guest-sideband-testing-host-perf.data ]
+
+On the guest machine:
+
+Stop the Intel PT trace:
+
+ ^C
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 1.247 MB guest-sideband-testing-guest-perf.data ]
+
+And then copy guest-sideband-testing-guest-perf.data to the host (not shown here).
+
+On the host machine:
+
+With the 2 perf.data recordings, and with their ownership changed to the user.
+
+Identify the TSC Offset:
+
+ $ perf inject -i guest-sideband-testing-host-perf.data --vm-time-correlation=dry-run
+ VMCS: 0x103fc6 TSC Offset 0xfffffa6ae070cb20
+ VMCS: 0x103ff2 TSC Offset 0xfffffa6ae070cb20
+ VMCS: 0x10fdaa TSC Offset 0xfffffa6ae070cb20
+ VMCS: 0x24d57c TSC Offset 0xfffffa6ae070cb20
+
+Correct Intel PT TSC timestamps for the guest machine:
+
+ $ perf inject -i guest-sideband-testing-host-perf.data --vm-time-correlation=0xfffffa6ae070cb20 --force
+
+Identify the guest machine PID:
+
+ $ perf script -i guest-sideband-testing-host-perf.data --no-itrace --show-task-events | grep KVM
+ CPU 0/KVM 0 [000] 0.000000: PERF_RECORD_COMM: CPU 0/KVM:13376/13381
+ CPU 1/KVM 0 [000] 0.000000: PERF_RECORD_COMM: CPU 1/KVM:13376/13382
+ CPU 2/KVM 0 [000] 0.000000: PERF_RECORD_COMM: CPU 2/KVM:13376/13383
+ CPU 3/KVM 0 [000] 0.000000: PERF_RECORD_COMM: CPU 3/KVM:13376/13384
+
+Note, the QEMU option -name debug-threads=on is needed so that thread names
+can be used to determine which thread is running which VCPU as above. libvirt seems to use this by default.
+
+Create a guestmount, assuming the guest machine is 'vm_to_test':
+
+ $ mkdir -p ~/guestmount/13376
+ $ sshfs -o direct_io vm_to_test:/ ~/guestmount/13376
+
+Inject the guest perf.data file into the host perf.data file:
+
+Note, due to the guestmount option, guest object files and debug files will be copied into the build ID cache from the guest machine, with the notable exception of VDSO.
+If needed, VDSO can be copied manually in a fashion similar to that used by the perf-archive script.
+
+ $ perf inject -i guest-sideband-testing-host-perf.data -o inj --guestmount ~/guestmount --guest-data=guest-sideband-testing-guest-perf.data,13376,0xfffffa6ae070cb20
+
+Show an excerpt from the result. In this case the CPU and time range have been to chosen to show interaction between guest and host when 'uname' is starting to run on the guest machine:
+
+Notes:
+
+ - the CPU displayed, [002] in this case, is always the host CPU
+ - events happening in the virtual machine start with VM:13376 VCPU:003, which shows the hypervisor PID 13376 and the VCPU number
+ - only calls and errors are displayed i.e. --itrace=ce
+ - branches entering and exiting the virtual machine are split, and show as 2 branches to/from "0 [unknown] ([unknown])"
+
+ $ perf script -i inj --itrace=ce -F+machine_pid,+vcpu,+addr,+pid,+tid,-period --ns --time 7919.408803365,7919.408804631 -C 2
+ CPU 3/KVM 13376/13384 [002] 7919.408803365: branches: ffffffffc0f8ebe0 vmx_vcpu_enter_exit+0xc0 ([kernel.kallsyms]) => ffffffffc0f8edc0 __vmx_vcpu_run+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803365: branches: ffffffffc0f8edd5 __vmx_vcpu_run+0x15 ([kernel.kallsyms]) => ffffffffc0f8eca0 vmx_update_host_rsp+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803365: branches: ffffffffc0f8ee1b __vmx_vcpu_run+0x5b ([kernel.kallsyms]) => ffffffffc0f8ed60 vmx_vmenter+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803461: branches: ffffffffc0f8ed62 vmx_vmenter+0x2 ([kernel.kallsyms]) => 0 [unknown] ([unknown])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408803461: branches: 0 [unknown] ([unknown]) => 7f851c9b5a5c init_cacheinfo+0x3ac (/usr/lib/x86_64-linux-gnu/libc-2.31.so)
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408803567: branches: 7f851c9b5a5a init_cacheinfo+0x3aa (/usr/lib/x86_64-linux-gnu/libc-2.31.so) => 0 [unknown] ([unknown])
+ CPU 3/KVM 13376/13384 [002] 7919.408803567: branches: 0 [unknown] ([unknown]) => ffffffffc0f8ed80 vmx_vmexit+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803596: branches: ffffffffc0f6619a vmx_vcpu_run+0x26a ([kernel.kallsyms]) => ffffffffb2255c60 x86_virt_spec_ctrl+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803801: branches: ffffffffc0f66445 vmx_vcpu_run+0x515 ([kernel.kallsyms]) => ffffffffb2290b30 native_write_msr+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803850: branches: ffffffffc0f661f8 vmx_vcpu_run+0x2c8 ([kernel.kallsyms]) => ffffffffc1092300 kvm_load_host_xsave_state+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803850: branches: ffffffffc1092327 kvm_load_host_xsave_state+0x27 ([kernel.kallsyms]) => ffffffffc1092220 kvm_load_host_xsave_state.part.0+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803862: branches: ffffffffc0f662cf vmx_vcpu_run+0x39f ([kernel.kallsyms]) => ffffffffc0f63f90 vmx_recover_nmi_blocking+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803862: branches: ffffffffc0f662e9 vmx_vcpu_run+0x3b9 ([kernel.kallsyms]) => ffffffffc0f619a0 __vmx_complete_interrupts+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803872: branches: ffffffffc109cfb2 vcpu_enter_guest+0x752 ([kernel.kallsyms]) => ffffffffc0f5f570 vmx_handle_exit_irqoff+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803881: branches: ffffffffc109d028 vcpu_enter_guest+0x7c8 ([kernel.kallsyms]) => ffffffffb234f900 __srcu_read_lock+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803897: branches: ffffffffc109d06f vcpu_enter_guest+0x80f ([kernel.kallsyms]) => ffffffffc0f72e30 vmx_handle_exit+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803897: branches: ffffffffc0f72e3d vmx_handle_exit+0xd ([kernel.kallsyms]) => ffffffffc0f727c0 __vmx_handle_exit+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803897: branches: ffffffffc0f72b15 __vmx_handle_exit+0x355 ([kernel.kallsyms]) => ffffffffc0f60ae0 vmx_flush_pml_buffer+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803903: branches: ffffffffc0f72994 __vmx_handle_exit+0x1d4 ([kernel.kallsyms]) => ffffffffc10b7090 kvm_emulate_cpuid+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803903: branches: ffffffffc10b70f1 kvm_emulate_cpuid+0x61 ([kernel.kallsyms]) => ffffffffc10b6e10 kvm_cpuid+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803941: branches: ffffffffc10b7125 kvm_emulate_cpuid+0x95 ([kernel.kallsyms]) => ffffffffc1093110 kvm_skip_emulated_instruction+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803941: branches: ffffffffc109311f kvm_skip_emulated_instruction+0xf ([kernel.kallsyms]) => ffffffffc0f5e180 vmx_get_rflags+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803951: branches: ffffffffc109312a kvm_skip_emulated_instruction+0x1a ([kernel.kallsyms]) => ffffffffc0f5fd30 vmx_skip_emulated_instruction+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803951: branches: ffffffffc0f5fd79 vmx_skip_emulated_instruction+0x49 ([kernel.kallsyms]) => ffffffffc0f5fb50 skip_emulated_instruction+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803956: branches: ffffffffc0f5fc68 skip_emulated_instruction+0x118 ([kernel.kallsyms]) => ffffffffc0f6a940 vmx_cache_reg+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803964: branches: ffffffffc0f5fc11 skip_emulated_instruction+0xc1 ([kernel.kallsyms]) => ffffffffc0f5f9e0 vmx_set_interrupt_shadow+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803980: branches: ffffffffc109f8b1 vcpu_run+0x71 ([kernel.kallsyms]) => ffffffffc10ad2f0 kvm_cpu_has_pending_timer+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803980: branches: ffffffffc10ad2fb kvm_cpu_has_pending_timer+0xb ([kernel.kallsyms]) => ffffffffc10b0490 apic_has_pending_timer+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803991: branches: ffffffffc109f899 vcpu_run+0x59 ([kernel.kallsyms]) => ffffffffc109c860 vcpu_enter_guest+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803993: branches: ffffffffc109cd4c vcpu_enter_guest+0x4ec ([kernel.kallsyms]) => ffffffffc0f69140 vmx_prepare_switch_to_guest+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803996: branches: ffffffffc109cd7d vcpu_enter_guest+0x51d ([kernel.kallsyms]) => ffffffffb234f930 __srcu_read_unlock+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803996: branches: ffffffffc109cd9c vcpu_enter_guest+0x53c ([kernel.kallsyms]) => ffffffffc0f609b0 vmx_sync_pir_to_irr+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408803996: branches: ffffffffc0f60a6d vmx_sync_pir_to_irr+0xbd ([kernel.kallsyms]) => ffffffffc10adc20 kvm_lapic_find_highest_irr+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804010: branches: ffffffffc0f60abd vmx_sync_pir_to_irr+0x10d ([kernel.kallsyms]) => ffffffffc0f60820 vmx_set_rvi+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804019: branches: ffffffffc109ceca vcpu_enter_guest+0x66a ([kernel.kallsyms]) => ffffffffb2249840 fpregs_assert_state_consistent+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804021: branches: ffffffffc109cf10 vcpu_enter_guest+0x6b0 ([kernel.kallsyms]) => ffffffffc0f65f30 vmx_vcpu_run+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804024: branches: ffffffffc0f6603b vmx_vcpu_run+0x10b ([kernel.kallsyms]) => ffffffffb229bed0 __get_current_cr3_fast+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804024: branches: ffffffffc0f66055 vmx_vcpu_run+0x125 ([kernel.kallsyms]) => ffffffffb2253050 cr4_read_shadow+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804030: branches: ffffffffc0f6608d vmx_vcpu_run+0x15d ([kernel.kallsyms]) => ffffffffc10921e0 kvm_load_guest_xsave_state+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804030: branches: ffffffffc1092207 kvm_load_guest_xsave_state+0x27 ([kernel.kallsyms]) => ffffffffc1092110 kvm_load_guest_xsave_state.part.0+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804032: branches: ffffffffc0f660c6 vmx_vcpu_run+0x196 ([kernel.kallsyms]) => ffffffffb22061a0 perf_guest_get_msrs+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804032: branches: ffffffffb22061a9 perf_guest_get_msrs+0x9 ([kernel.kallsyms]) => ffffffffb220cda0 intel_guest_get_msrs+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804039: branches: ffffffffc0f66109 vmx_vcpu_run+0x1d9 ([kernel.kallsyms]) => ffffffffc0f652c0 clear_atomic_switch_msr+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804040: branches: ffffffffc0f66119 vmx_vcpu_run+0x1e9 ([kernel.kallsyms]) => ffffffffc0f73f60 intel_pmu_lbr_is_enabled+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804042: branches: ffffffffc0f73f81 intel_pmu_lbr_is_enabled+0x21 ([kernel.kallsyms]) => ffffffffc10b68e0 kvm_find_cpuid_entry+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804045: branches: ffffffffc0f66454 vmx_vcpu_run+0x524 ([kernel.kallsyms]) => ffffffffc0f61ff0 vmx_update_hv_timer+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffc0f66142 vmx_vcpu_run+0x212 ([kernel.kallsyms]) => ffffffffc10af100 kvm_wait_lapic_expire+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffc0f66156 vmx_vcpu_run+0x226 ([kernel.kallsyms]) => ffffffffb2255c60 x86_virt_spec_ctrl+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffc0f66161 vmx_vcpu_run+0x231 ([kernel.kallsyms]) => ffffffffc0f8eb20 vmx_vcpu_enter_exit+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffc0f8eb44 vmx_vcpu_enter_exit+0x24 ([kernel.kallsyms]) => ffffffffb2353e10 rcu_note_context_switch+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804057: branches: ffffffffb2353e1c rcu_note_context_switch+0xc ([kernel.kallsyms]) => ffffffffb2353db0 rcu_qs+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804066: branches: ffffffffc0f8ebe0 vmx_vcpu_enter_exit+0xc0 ([kernel.kallsyms]) => ffffffffc0f8edc0 __vmx_vcpu_run+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804066: branches: ffffffffc0f8edd5 __vmx_vcpu_run+0x15 ([kernel.kallsyms]) => ffffffffc0f8eca0 vmx_update_host_rsp+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804066: branches: ffffffffc0f8ee1b __vmx_vcpu_run+0x5b ([kernel.kallsyms]) => ffffffffc0f8ed60 vmx_vmenter+0x0 ([kernel.kallsyms])
+ CPU 3/KVM 13376/13384 [002] 7919.408804162: branches: ffffffffc0f8ed62 vmx_vmenter+0x2 ([kernel.kallsyms]) => 0 [unknown] ([unknown])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804162: branches: 0 [unknown] ([unknown]) => 7f851c9b5a5c init_cacheinfo+0x3ac (/usr/lib/x86_64-linux-gnu/libc-2.31.so)
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804273: branches: 7f851cb7c0e4 _dl_init+0x74 (/usr/lib/x86_64-linux-gnu/ld-2.31.so) => 7f851cb7bf50 call_init.part.0+0x0 (/usr/lib/x86_64-linux-gnu/ld-2.31.so)
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804526: branches: 55e0c00136f0 _start+0x0 (/usr/bin/uname) => ffffffff83200ac0 asm_exc_page_fault+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804526: branches: ffffffff83200ac3 asm_exc_page_fault+0x3 ([kernel.kallsyms]) => ffffffff83201290 error_entry+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804534: branches: ffffffff832012fa error_entry+0x6a ([kernel.kallsyms]) => ffffffff830b59a0 sync_regs+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804631: branches: ffffffff83200ad9 asm_exc_page_fault+0x19 ([kernel.kallsyms]) => ffffffff830b8210 exc_page_fault+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804631: branches: ffffffff830b82a4 exc_page_fault+0x94 ([kernel.kallsyms]) => ffffffff830b80e0 __kvm_handle_async_pf+0x0 ([kernel.kallsyms])
+ VM:13376 VCPU:003 uname 3404/3404 [002] 7919.408804631: branches: ffffffff830b80ed __kvm_handle_async_pf+0xd ([kernel.kallsyms]) => ffffffff830b80c0 kvm_read_and_reset_apf_flags+0x0 ([kernel.kallsyms])
+
+
+Tracing Virtual Machines - Guest Code
+-------------------------------------
+
+A common case for KVM test programs is that the test program acts as the
+hypervisor, creating, running and destroying the virtual machine, and
+providing the guest object code from its own object code. In this case,
+the VM is not running an OS, but only the functions loaded into it by the
+hypervisor test program, and conveniently, loaded at the same virtual
+addresses. To support that, option "--guest-code" has been added to perf script
+and perf kvm report.
+
+Here is an example tracing a test program from the kernel's KVM selftests:
+
+ # perf record --kcore -e intel_pt/cyc/ -- tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.280 MB perf.data ]
+ # perf script --guest-code --itrace=bep --ns -F-period,+addr,+flags
+ [SNIP]
+ tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962087733: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962087733: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962087836: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown])
+ [guest/18436] 18436 [007] 10897.962087836: branches: vmentry 0 [unknown] ([unknown]) => 402c81 guest_code+0x131 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962087836: branches: call 402c81 guest_code+0x131 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dba0 ucall+0x0 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962088248: branches: vmexit 40dba0 ucall+0x0 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown])
+ tsc_msrs_test 18436 [007] 10897.962088248: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962088248: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962088256: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962088270: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux)
+ [SNIP]
+ tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b2ff5 __vmx_vcpu_run+0x15 (vmlinux) => ffffffffc13b2f50 vmx_update_host_rsp+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089321: branches: return ffffffffc13b2f5d vmx_update_host_rsp+0xd (vmlinux) => ffffffffc13b2ffa __vmx_vcpu_run+0x1a (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089321: branches: call ffffffffc13b303b __vmx_vcpu_run+0x5b (vmlinux) => ffffffffc13b2f80 vmx_vmenter+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089424: branches: vmentry ffffffffc13b2f82 vmx_vmenter+0x2 (vmlinux) => 0 [unknown] ([unknown])
+ [guest/18436] 18436 [007] 10897.962089424: branches: vmentry 0 [unknown] ([unknown]) => 40dba0 ucall+0x0 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089701: branches: jmp 40dc1b ucall+0x7b (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc39 ucall+0x99 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc3c ucall+0x9c (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc20 ucall+0x80 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089701: branches: jcc 40dc37 ucall+0x97 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 40dc50 ucall+0xb0 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test)
+ [guest/18436] 18436 [007] 10897.962089878: branches: vmexit 40dc55 ucall+0xb5 (/home/user/git/work/tools/testing/selftests/kselftest_install/kvm/tsc_msrs_test) => 0 [unknown] ([unknown])
+ tsc_msrs_test 18436 [007] 10897.962089878: branches: vmexit 0 [unknown] ([unknown]) => ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089878: branches: jmp ffffffffc13b2fa0 vmx_vmexit+0x0 (vmlinux) => ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089887: branches: return ffffffffc13b2fd2 vmx_vmexit+0x32 (vmlinux) => ffffffffc13b3040 __vmx_vcpu_run+0x60 (vmlinux)
+ tsc_msrs_test 18436 [007] 10897.962089901: branches: return ffffffffc13b30b6 __vmx_vcpu_run+0xd6 (vmlinux) => ffffffffc13b2f2e vmx_vcpu_enter_exit+0x4e (vmlinux)
+ [SNIP]
+
+ # perf kvm --guest-code --guest --host report -i perf.data --stdio | head -20
+
+ # To display the perf.data header info, please use --header/--header-only options.
+ #
+ #
+ # Total Lost Samples: 0
+ #
+ # Samples: 12 of event 'instructions'
+ # Event count (approx.): 2274583
+ #
+ # Children Self Command Shared Object Symbol
+ # ........ ........ ............. .................... ...........................................
+ #
+ 54.70% 0.00% tsc_msrs_test [kernel.vmlinux] [k] entry_SYSCALL_64_after_hwframe
+ |
+ ---entry_SYSCALL_64_after_hwframe
+ do_syscall_64
+ |
+ |--29.44%--syscall_exit_to_user_mode
+ | exit_to_user_mode_prepare
+ | task_work_run
+ | __fput
+
+
+Event Trace
+-----------
+
+Event Trace records information about asynchronous events, for example interrupts,
+faults, VM exits and entries. The information is recorded in CFE and EVD packets,
+and also the Interrupt Flag is recorded on the MODE.Exec packet. The CFE packet
+contains a type field to identify one of the following:
+
+ 1 INTR interrupt, fault, exception, NMI
+ 2 IRET interrupt return
+ 3 SMI system management interrupt
+ 4 RSM resume from system management mode
+ 5 SIPI startup interprocessor interrupt
+ 6 INIT INIT signal
+ 7 VMENTRY VM-Entry
+ 8 VMEXIT VM-Entry
+ 9 VMEXIT_INTR VM-Exit due to interrupt
+ 10 SHUTDOWN Shutdown
+
+For more details, refer to the Intel 64 and IA-32 Architectures Software
+Developer Manuals (version 076 or later).
+
+The capability to do Event Trace is indicated by the
+/sys/bus/event_source/devices/intel_pt/caps/event_trace file.
+
+Event trace is selected for recording using the "event" config term. e.g.
+
+ perf record -e intel_pt/event/u uname
+
+Event trace events are output using the --itrace I option. e.g.
+
+ perf script --itrace=Ie
+
+perf script displays events containing CFE type, vector and event data,
+in the form:
+
+ evt: hw int (t) cfe: INTR IP: 1 vector: 3 PFA: 0x8877665544332211
+
+The IP flag indicates if the event binds to an IP, which includes any case where
+flow control packet generation is enabled, as well as when CFE packet IP bit is
+set.
+
+perf script displays events containing changes to the Interrupt Flag in the form:
+
+ iflag: t IFLAG: 1->0 via branch
+
+where "via branch" indicates a branch (interrupt or return from interrupt) and
+"non branch" indicates an instruction such as CFI, STI or POPF).
+
+In addition, the current state of the interrupt flag is indicated by the presence
+or absence of the "D" (interrupt disabled) perf script flag. If the interrupt
+flag is changed, then the "t" flag is also included i.e.
+
+ no flag, interrupts enabled IF=1
+ t interrupts become disabled IF=1 -> IF=0
+ D interrupts are disabled IF=0
+ Dt interrupts become enabled IF=0 -> IF=1
+
+The intel-pt-events.py script illustrates how to access Event Trace information
+using a Python script.
+
+
+TNT Disable
+-----------
+
+TNT packets are disabled using the "notnt" config term. e.g.
+
+ perf record -e intel_pt/notnt/u uname
+
+In that case the --itrace q option is forced because walking executable code
+to reconstruct the control flow is not possible.
+
+
+Emulated PTWRITE
+----------------
+
+Later perf tools support a method to emulate the ptwrite instruction, which
+can be useful if hardware does not support the ptwrite instruction.
+
+Instead of using the ptwrite instruction, a function is used which produces
+a trace that encodes the payload data into TNT packets. Here is an example
+of the function:
+
+ #include <stdint.h>
+
+ void perf_emulate_ptwrite(uint64_t x)
+ __attribute__((externally_visible, noipa, no_instrument_function, naked));
+
+ #define PERF_EMULATE_PTWRITE_8_BITS \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n" \
+ "1: shl %rax\n" \
+ " jc 1f\n"
+
+ /* Undefined instruction */
+ #define PERF_EMULATE_PTWRITE_UD2 ".byte 0x0f, 0x0b\n"
+
+ #define PERF_EMULATE_PTWRITE_MAGIC PERF_EMULATE_PTWRITE_UD2 ".ascii \"perf,ptwrite \"\n"
+
+ void perf_emulate_ptwrite(uint64_t x __attribute__ ((__unused__)))
+ {
+ /* Assumes SysV ABI : x passed in rdi */
+ __asm__ volatile (
+ "jmp 1f\n"
+ PERF_EMULATE_PTWRITE_MAGIC
+ "1: mov %rdi, %rax\n"
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ PERF_EMULATE_PTWRITE_8_BITS
+ "1: ret\n"
+ );
+ }
+
+For example, a test program with the function above:
+
+ #include <stdio.h>
+ #include <stdint.h>
+ #include <stdlib.h>
+
+ #include "perf_emulate_ptwrite.h"
+
+ int main(int argc, char *argv[])
+ {
+ uint64_t x = 0;
+
+ if (argc > 1)
+ x = strtoull(argv[1], NULL, 0);
+ perf_emulate_ptwrite(x);
+ return 0;
+ }
+
+Can be compiled and traced:
+
+ $ gcc -Wall -Wextra -O3 -g -o eg_ptw eg_ptw.c
+ $ perf record -e intel_pt//u ./eg_ptw 0x1234567890abcdef
+ [ perf record: Woken up 1 times to write data ]
+ [ perf record: Captured and wrote 0.017 MB perf.data ]
+ $ perf script --itrace=ew
+ eg_ptw 19875 [007] 8061.235912: ptwrite: IP: 0 payload: 0x1234567890abcdef 55701249a196 perf_emulate_ptwrite+0x16 (/home/user/eg_ptw)
+ $
+
+
+Pipe mode
+---------
+Pipe mode is a problem for Intel PT and possibly other auxtrace users.
+It's not recommended to use a pipe as data output with Intel PT because
+of the following reason.
+
+Essentially the auxtrace buffers do not behave like the regular perf
+event buffers. That is because the head and tail are updated by
+software, but in the auxtrace case the data is written by hardware.
+So the head and tail do not get updated as data is written.
+
+In the Intel PT case, the head and tail are updated only when the trace
+is disabled by software, for example:
+ - full-trace, system wide : when buffer passes watermark
+ - full-trace, not system-wide : when buffer passes watermark or
+ context switches
+ - snapshot mode : as above but also when a snapshot is made
+ - sample mode : as above but also when a sample is made
+
+That means finished-round ordering doesn't work. An auxtrace buffer
+can turn up that has data that extends back in time, possibly to the
+very beginning of tracing.
+
+For a perf.data file, that problem is solved by going through the trace
+and queuing up the auxtrace buffers in advance.
+
+For pipe mode, the order of events and timestamps can presumably
+be messed up.
+
+
+EXAMPLE
+-------
+
+Examples can be found on perf wiki page "Perf tools support for Intel® Processor Trace":
+
+https://perf.wiki.kernel.org/index.php/Perf_tools_support_for_Intel%C2%AE_Processor_Trace
+
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-iostat.txt b/tools/perf/Documentation/perf-iostat.txt
new file mode 100644
index 000000000000..04d510364384
--- /dev/null
+++ b/tools/perf/Documentation/perf-iostat.txt
@@ -0,0 +1,88 @@
+perf-iostat(1)
+===============
+
+NAME
+----
+perf-iostat - Show I/O performance metrics
+
+SYNOPSIS
+--------
+[verse]
+'perf iostat' list
+'perf iostat' <ports> \-- <command> [<options>]
+
+DESCRIPTION
+-----------
+Mode is intended to provide four I/O performance metrics per each PCIe root port:
+
+- Inbound Read - I/O devices below root port read from the host memory, in MB
+
+- Inbound Write - I/O devices below root port write to the host memory, in MB
+
+- Outbound Read - CPU reads from I/O devices below root port, in MB
+
+- Outbound Write - CPU writes to I/O devices below root port, in MB
+
+OPTIONS
+-------
+<command>...::
+ Any command you can specify in a shell.
+
+list::
+ List all PCIe root ports.
+
+<ports>::
+ Select the root ports for monitoring. Comma-separated list is supported.
+
+EXAMPLES
+--------
+
+1. List all PCIe root ports (example for 2-S platform):
+
+ $ perf iostat list
+ S0-uncore_iio_0<0000:00>
+ S1-uncore_iio_0<0000:80>
+ S0-uncore_iio_1<0000:17>
+ S1-uncore_iio_1<0000:85>
+ S0-uncore_iio_2<0000:3a>
+ S1-uncore_iio_2<0000:ae>
+ S0-uncore_iio_3<0000:5d>
+ S1-uncore_iio_3<0000:d7>
+
+2. Collect metrics for all PCIe root ports:
+
+ $ perf iostat -- dd if=/dev/zero of=/dev/nvme0n1 bs=1M oflag=direct
+ 357708+0 records in
+ 357707+0 records out
+ 375083606016 bytes (375 GB, 349 GiB) copied, 215.974 s, 1.7 GB/s
+
+ Performance counter stats for 'system wide':
+
+ port Inbound Read(MB) Inbound Write(MB) Outbound Read(MB) Outbound Write(MB)
+ 0000:00 1 0 2 3
+ 0000:80 0 0 0 0
+ 0000:17 352552 43 0 21
+ 0000:85 0 0 0 0
+ 0000:3a 3 0 0 0
+ 0000:ae 0 0 0 0
+ 0000:5d 0 0 0 0
+ 0000:d7 0 0 0 0
+
+3. Collect metrics for comma-separated list of PCIe root ports:
+
+ $ perf iostat 0000:17,0:3a -- dd if=/dev/zero of=/dev/nvme0n1 bs=1M oflag=direct
+ 357708+0 records in
+ 357707+0 records out
+ 375083606016 bytes (375 GB, 349 GiB) copied, 197.08 s, 1.9 GB/s
+
+ Performance counter stats for 'system wide':
+
+ port Inbound Read(MB) Inbound Write(MB) Outbound Read(MB) Outbound Write(MB)
+ 0000:17 358559 44 0 22
+ 0000:3a 3 2 0 0
+
+ 197.081983474 seconds time elapsed
+
+SEE ALSO
+--------
+linkperf:perf-stat[1]
diff --git a/tools/perf/Documentation/perf-kallsyms.txt b/tools/perf/Documentation/perf-kallsyms.txt
index f3c620951f6e..c97527df8ecd 100644
--- a/tools/perf/Documentation/perf-kallsyms.txt
+++ b/tools/perf/Documentation/perf-kallsyms.txt
@@ -20,5 +20,5 @@ modules).
OPTIONS
-------
-v::
---verbose=::
+--verbose::
Increase verbosity level, showing details about symbol table loading, etc.
diff --git a/tools/perf/Documentation/perf-kmem.txt b/tools/perf/Documentation/perf-kmem.txt
index 85b8ac695c87..f378ac59353d 100644
--- a/tools/perf/Documentation/perf-kmem.txt
+++ b/tools/perf/Documentation/perf-kmem.txt
@@ -8,22 +8,25 @@ perf-kmem - Tool to trace/measure kernel memory properties
SYNOPSIS
--------
[verse]
-'perf kmem' {record|stat} [<options>]
+'perf kmem' [<options>] {record|stat}
DESCRIPTION
-----------
There are two variants of perf kmem:
- 'perf kmem record <command>' to record the kmem events
- of an arbitrary workload.
+ 'perf kmem [<options>] record [<perf-record-options>] <command>' to
+ record the kmem events of an arbitrary workload. Additional 'perf
+ record' options may be specified after record, such as '-o' to
+ change the output file name.
- 'perf kmem stat' to report kernel memory statistics.
+ 'perf kmem [<options>] stat' to report kernel memory statistics.
OPTIONS
-------
-i <file>::
--input=<file>::
- Select the input file (default: perf.data unless stdin is a fifo)
+ For stat, select the input file (default: perf.data unless stdin is a
+ fifo)
-f::
--force::
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index cf95baef7b61..b66be66fe836 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -58,7 +58,7 @@ There are a couple of variants of perf kvm:
events.
'perf kvm stat report' reports statistical data which includes events
- handled time, samples, and so on.
+ handled sample, percent_sample, time, percent_time, max_t, min_t, mean_t.
'perf kvm stat live' reports statistical data in a live mode (similar to
record + report but with statistical data updated live at a given display
@@ -77,23 +77,13 @@ OPTIONS
Collect host side performance profile.
--guest::
Collect guest side performance profile.
---guestmount=<path>::
- Guest os root file system mount directory. Users mounts guest os
- root directories under <path> by a specific filesystem access method,
- typically, sshfs. For example, start 2 guest os. The one's pid is 8888
- and the other's is 9999.
- #mkdir ~/guestmount; cd ~/guestmount
- #sshfs -o allow_other,direct_io -p 5551 localhost:/ 8888/
- #sshfs -o allow_other,direct_io -p 5552 localhost:/ 9999/
- #perf kvm --host --guest --guestmount=~/guestmount top
---guestkallsyms=<path>::
- Guest os /proc/kallsyms file copy. 'perf' kvm' reads it to get guest
- kernel symbols. Users copy it out from guest os.
---guestmodules=<path>::
- Guest os /proc/modules file copy. 'perf' kvm' reads it to get guest
- kernel module information. Users copy it out from guest os.
---guestvmlinux=<path>::
- Guest os kernel vmlinux.
+
+:GMEXAMPLECMD: kvm --host --guest
+:GMEXAMPLESUBCMD: top
+include::guest-files.txt[]
+
+--stdio:: Use the stdio interface.
+
-v::
--verbose::
Be more verbose (show counter open errors, etc).
@@ -109,7 +99,10 @@ STAT REPORT OPTIONS
-k::
--key=<value>::
Sorting key. Possible values: sample (default, sort by samples
- number), time (sort by average time).
+ number), percent_sample (sort by sample percentage), time
+ (sort by average time), precent_time (sort by time percentage),
+ max_t (sort by maximum time), min_t (sort by minimum time), mean_t
+ (sort by mean time).
-p::
--pid=::
Analyze events only for given process ID(s) (comma separated list).
diff --git a/tools/perf/Documentation/perf-kwork.txt b/tools/perf/Documentation/perf-kwork.txt
new file mode 100644
index 000000000000..3c36324712b6
--- /dev/null
+++ b/tools/perf/Documentation/perf-kwork.txt
@@ -0,0 +1,180 @@
+perf-kowrk(1)
+=============
+
+NAME
+----
+perf-kwork - Tool to trace/measure kernel work properties (latencies)
+
+SYNOPSIS
+--------
+[verse]
+'perf kwork' {record}
+
+DESCRIPTION
+-----------
+There are several variants of 'perf kwork':
+
+ 'perf kwork record <command>' to record the kernel work
+ of an arbitrary workload.
+
+ 'perf kwork report' to report the per kwork runtime.
+
+ 'perf kwork latency' to report the per kwork latencies.
+
+ 'perf kwork timehist' provides an analysis of kernel work events.
+
+ Example usage:
+ perf kwork record -- sleep 1
+ perf kwork report
+ perf kwork report -b
+ perf kwork latency
+ perf kwork latency -b
+ perf kwork timehist
+
+ By default it shows the individual work events such as irq, workqeueu,
+ including the run time and delay (time between raise and actually entry):
+
+ Runtime start Runtime end Cpu Kwork name Runtime Delaytime
+ (TYPE)NAME:NUM (msec) (msec)
+ ----------------- ----------------- ------ ------------------------- ---------- ----------
+ 1811186.976062 1811186.976327 [0000] (s)RCU:9 0.266 0.114
+ 1811186.978452 1811186.978547 [0000] (s)SCHED:7 0.095 0.171
+ 1811186.980327 1811186.980490 [0000] (s)SCHED:7 0.162 0.083
+ 1811186.981221 1811186.981271 [0000] (s)SCHED:7 0.050 0.077
+ 1811186.984267 1811186.984318 [0000] (s)SCHED:7 0.051 0.075
+ 1811186.987252 1811186.987315 [0000] (s)SCHED:7 0.063 0.081
+ 1811186.987785 1811186.987843 [0006] (s)RCU:9 0.058 0.645
+ 1811186.988319 1811186.988383 [0000] (s)SCHED:7 0.064 0.143
+ 1811186.989404 1811186.989607 [0002] (s)TIMER:1 0.203 0.111
+ 1811186.989660 1811186.989732 [0002] (s)SCHED:7 0.072 0.310
+ 1811186.991295 1811186.991407 [0002] eth0:10 0.112
+ 1811186.991639 1811186.991734 [0002] (s)NET_RX:3 0.095 0.277
+ 1811186.989860 1811186.991826 [0002] (w)vmstat_shepherd 1.966 0.345
+ ...
+
+ Times are in msec.usec.
+
+OPTIONS
+-------
+-D::
+--dump-raw-trace=::
+ Display verbose dump of the sched data.
+
+-f::
+--force::
+ Don't complain, do it.
+
+-k::
+--kwork::
+ List of kwork to profile (irq, softirq, workqueue, etc)
+
+-v::
+--verbose::
+ Be more verbose. (show symbol address, etc)
+
+OPTIONS for 'perf kwork report'
+----------------------------
+
+-b::
+--use-bpf::
+ Use BPF to measure kwork runtime
+
+-C::
+--cpu::
+ Only show events for the given CPU(s) (comma separated list).
+
+-i::
+--input::
+ Input file name. (default: perf.data unless stdin is a fifo)
+
+-n::
+--name::
+ Only show events for the given name.
+
+-s::
+--sort::
+ Sort by key(s): runtime, max, count
+
+-S::
+--with-summary::
+ Show summary with statistics
+
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
+OPTIONS for 'perf kwork latency'
+----------------------------
+
+-b::
+--use-bpf::
+ Use BPF to measure kwork latency
+
+-C::
+--cpu::
+ Only show events for the given CPU(s) (comma separated list).
+
+-i::
+--input::
+ Input file name. (default: perf.data unless stdin is a fifo)
+
+-n::
+--name::
+ Only show events for the given name.
+
+-s::
+--sort::
+ Sort by key(s): avg, max, count
+
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
+OPTIONS for 'perf kwork timehist'
+---------------------------------
+
+-C::
+--cpu::
+ Only show events for the given CPU(s) (comma separated list).
+
+-g::
+--call-graph::
+ Display call chains if present (default off).
+
+-i::
+--input::
+ Input file name. (default: perf.data unless stdin is a fifo)
+
+-k::
+--vmlinux=<file>::
+ Vmlinux pathname
+
+-n::
+--name::
+ Only show events for the given name.
+
+--kallsyms=<file>::
+ Kallsyms pathname
+
+--max-stack::
+ Maximum number of functions to display in backtrace, default 5.
+
+--symfs=<directory>::
+ Look for files with symbols relative to this directory.
+
+--time::
+ Only analyze samples within given time window: <start>,<stop>. Times
+ have the format seconds.microseconds. If start is not given (i.e., time
+ string is ',x.y') then analysis starts at the beginning of the file. If
+ stop time is not given (i.e, time string is 'x.y,') then analysis goes
+ to end of file.
+
+SEE ALSO
+--------
+linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index 6345db33c533..d5f78e125efe 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -39,6 +39,14 @@ any extra expressions computed by perf stat.
--deprecated::
Print deprecated events. By default the deprecated events are hidden.
+--unit::
+Print PMU events and metrics limited to the specific PMU name.
+(e.g. --unit cpu, --unit msr, --unit cpu_core, --unit cpu_atom)
+
+-j::
+--json::
+Output in JSON format.
+
[[EVENT_MODIFIERS]]
EVENT MODIFIERS
---------------
@@ -58,6 +66,7 @@ counted. The following modifiers exist:
S - read sample value (PERF_SAMPLE_READ)
D - pin the event to the PMU
W - group is weak and will fallback to non-group if not schedulable,
+ e - group or event are exclusive and do not share the PMU
The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times:
@@ -76,7 +85,11 @@ On AMD systems it is implemented using IBS (up to precise-level 2).
The precise modifier works with event types 0x76 (cpu-cycles, CPU
clocks not halted) and 0xC1 (micro-ops retired). Both events map to
IBS execution sampling (IBS op) with the IBS Op Counter Control bit
-(IbsOpCntCtl) set respectively (see AMD64 Architecture Programmer’s
+(IbsOpCntCtl) set respectively (see the
+Core Complex (CCX) -> Processor x86 Core -> Instruction Based Sampling (IBS)
+section of the [AMD Processor Programming Reference (PPR)] relevant to the
+family, model and stepping of the processor being used).
+
Manual Volume 2: System Programming, 13.3 Instruction-Based
Sampling). Examples to use IBS:
@@ -89,10 +102,12 @@ RAW HARDWARE EVENT DESCRIPTOR
Even when an event is not available in a symbolic form within perf right now,
it can be encoded in a per processor specific way.
-For instance For x86 CPUs NNN represents the raw register encoding with the
+For instance on x86 CPUs, N is a hexadecimal value that represents the raw register encoding with the
layout of IA32_PERFEVTSELx MSRs (see [Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide] Figure 30-1 Layout
-of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344,
-Figure 13-7 Performance Event-Select Register (PerfEvtSeln)).
+of IA32_PERFEVTSELx MSRs) or AMD's PERF_CTL MSRs (see the
+Core Complex (CCX) -> Processor x86 Core -> MSR Registers section of the
+[AMD Processor Programming Reference (PPR)] relevant to the family, model
+and stepping of the processor being used).
Note: Only the following bit fields can be set in x86 counter
registers: event, umask, edge, inv, cmask. Esp. guest/host only and
@@ -115,6 +130,44 @@ raw encoding of 0x1A8 can be used:
perf stat -e r1a8 -a sleep 1
perf record -e r1a8 ...
+It's also possible to use pmu syntax:
+
+ perf record -e r1a8 -a sleep 1
+ perf record -e cpu/r1a8/ ...
+ perf record -e cpu/r0x1a8/ ...
+
+Some processors, like those from AMD, support event codes and unit masks
+larger than a byte. In such cases, the bits corresponding to the event
+configuration parameters can be seen with:
+
+ cat /sys/bus/event_source/devices/<pmu>/format/<config>
+
+Example:
+
+If the AMD docs for an EPYC 7713 processor describe an event as:
+
+ Event Umask Event Mask
+ Num. Value Mnemonic Description
+
+ 28FH 03H op_cache_hit_miss.op_cache_hit Counts Op Cache micro-tag
+ hit events.
+
+raw encoding of 0x0328F cannot be used since the upper nibble of the
+EventSelect bits have to be specified via bits 32-35 as can be seen with:
+
+ cat /sys/bus/event_source/devices/cpu/format/event
+
+raw encoding of 0x20000038F should be used instead:
+
+ perf stat -e r20000038f -a sleep 1
+ perf record -e r20000038f ...
+
+It's also possible to use pmu syntax:
+
+ perf record -e r20000038f -a sleep 1
+ perf record -e cpu/r20000038f/ ...
+ perf record -e cpu/r0x20000038f/ ...
+
You should refer to the processor specific documentation for getting these
details. Some of them are referenced in the SEE ALSO section below.
@@ -179,7 +232,7 @@ This can be overridden by setting the kernel.perf_event_paranoid
sysctl to -1, which allows non root to use these events.
For accessing trace point events perf needs to have read access to
-/sys/kernel/debug/tracing, even when perf_event_paranoid is in a relaxed
+/sys/kernel/tracing, even when perf_event_paranoid is in a relaxed
setting.
TRACING
@@ -258,6 +311,9 @@ Normally all events in an event group sample, but with :S only
the first event (the leader) samples, and it only reads the values of the
other events in the group.
+However, in the case AUX area events (e.g. Intel PT or CoreSight), the AUX
+area event must be the leader, so then the second event samples, not the first.
+
OPTIONS
-------
@@ -302,4 +358,4 @@ SEE ALSO
linkperf:perf-stat[1], linkperf:perf-top[1],
linkperf:perf-record[1],
http://www.intel.com/sdm/[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide],
-http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming]
+https://bugzilla.kernel.org/show_bug.cgi?id=206537[AMD Processor Programming Reference (PPR)]
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 74d774592196..6e5ba3cd2b72 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -8,7 +8,7 @@ perf-lock - Analyze lock events
SYNOPSIS
--------
[verse]
-'perf lock' {record|report|script|info}
+'perf lock' {record|report|script|info|contention}
DESCRIPTION
-----------
@@ -27,6 +27,8 @@ and statistics with this 'perf lock' command.
'perf lock info' shows metadata like threads or addresses
of lock instances.
+ 'perf lock contention' shows contention statistics.
+
COMMON OPTIONS
--------------
@@ -38,13 +40,24 @@ COMMON OPTIONS
--verbose::
Be more verbose (show symbol address, etc).
+-q::
+--quiet::
+ Do not show any warnings or messages. (Suppress -v)
+
-D::
--dump-raw-trace::
Dump raw trace in ASCII.
-f::
--force::
- Don't complan, do it.
+ Don't complain, do it.
+
+--vmlinux=<file>::
+ vmlinux pathname
+
+--kallsyms=<file>::
+ kallsyms pathname
+
REPORT OPTIONS
--------------
@@ -54,6 +67,42 @@ REPORT OPTIONS
Sorting key. Possible values: acquired (default), contended,
avg_wait, wait_total, wait_max, wait_min.
+-F::
+--field=<value>::
+ Output fields. By default it shows all the fields but users can
+ customize that using this. Possible values: acquired, contended,
+ avg_wait, wait_total, wait_max, wait_min.
+
+-c::
+--combine-locks::
+ Merge lock instances in the same class (based on name).
+
+-t::
+--threads::
+ The -t option is to show per-thread lock stat like below:
+
+ $ perf lock report -t -F acquired,contended,avg_wait
+
+ Name acquired contended avg wait (ns)
+
+ perf 240569 9 5784
+ swapper 106610 19 543
+ :15789 17370 2 14538
+ ContainerMgr 8981 6 874
+ sleep 5275 1 11281
+ ContainerThread 4416 4 944
+ RootPressureThr 3215 5 1215
+ rcu_preempt 2954 0 0
+ ContainerMgr 2560 0 0
+ unnamed 1873 0 0
+ EventManager_De 1845 1 636
+ futex-default-S 1609 0 0
+
+-E::
+--entries=<value>::
+ Display this many entries.
+
+
INFO OPTIONS
------------
@@ -65,6 +114,93 @@ INFO OPTIONS
--map::
dump map of lock instances (address:name table)
+
+CONTENTION OPTIONS
+--------------
+
+-k::
+--key=<value>::
+ Sorting key. Possible values: contended, wait_total (default),
+ wait_max, wait_min, avg_wait.
+
+-F::
+--field=<value>::
+ Output fields. By default it shows all but the wait_min fields
+ and users can customize that using this. Possible values:
+ contended, wait_total, wait_max, wait_min, avg_wait.
+
+-t::
+--threads::
+ Show per-thread lock contention stat
+
+-b::
+--use-bpf::
+ Use BPF program to collect lock contention stats instead of
+ using the input data.
+
+-a::
+--all-cpus::
+ System-wide collection from all CPUs.
+
+-C::
+--cpu=<value>::
+ Collect samples only on the list of CPUs provided. Multiple CPUs can be
+ provided as a comma-separated list with no space: 0,1. Ranges of CPUs
+ are specified with -: 0-2. Default is to monitor all CPUs.
+
+-p::
+--pid=<value>::
+ Record events on existing process ID (comma separated list).
+
+--tid=<value>::
+ Record events on existing thread ID (comma separated list).
+
+-M::
+--map-nr-entries=<value>::
+ Maximum number of BPF map entries (default: 16384).
+ This will be aligned to a power of 2.
+
+--max-stack=<value>::
+ Maximum stack depth when collecting lock contention (default: 8).
+
+--stack-skip=<value>::
+ Number of stack depth to skip when finding a lock caller (default: 3).
+
+-E::
+--entries=<value>::
+ Display this many entries.
+
+-l::
+--lock-addr::
+ Show lock contention stat by address
+
+-o::
+--lock-owner::
+ Show lock contention stat by owners. Implies --threads and
+ requires --use-bpf.
+
+-Y::
+--type-filter=<value>::
+ Show lock contention only for given lock types (comma separated list).
+ Available values are:
+ semaphore, spinlock, rwlock, rwlock:R, rwlock:W, rwsem, rwsem:R, rwsem:W,
+ rtmutex, rwlock-rt, rwlock-rt:R, rwlock-rt:W, pcpu-sem, pcpu-sem:R, pcpu-sem:W,
+ mutex
+
+ Note that RW-variant of locks have :R and :W suffix. Names without the
+ suffix are shortcuts for the both variants. Ex) rwsem = rwsem:R + rwsem:W.
+
+-L::
+--lock-filter=<value>::
+ Show lock contention only for given lock addresses or names (comma separated list).
+
+-S::
+--callstack-filter=<value>::
+ Show lock contention only if the callstack contains the given string.
+ Note that it matches the substring so 'rq' would match both 'raw_spin_rq_lock'
+ and 'irq_enter_rcu'.
+
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt
index 199ea0f0a6c0..19862572e3f2 100644
--- a/tools/perf/Documentation/perf-mem.txt
+++ b/tools/perf/Documentation/perf-mem.txt
@@ -23,6 +23,11 @@ Note that on Intel systems the memory latency reported is the use-latency,
not the pure load (or store latency). Use latency includes any pipeline
queueing delays in addition to the memory subsystem latency.
+On Arm64 this uses SPE to sample load and store operations, therefore hardware
+and kernel support is required. See linkperf:perf-arm-spe[1] for a setup guide.
+Due to the statistical nature of SPE sampling, not every memory operation will
+be sampled.
+
OPTIONS
-------
<command>...::
@@ -63,6 +68,9 @@ OPTIONS
--phys-data::
Record/Report sample physical addresses
+--data-page-size::
+ Record/Report sample data address page size
+
RECORD OPTIONS
--------------
-e::
@@ -82,11 +90,12 @@ RECORD OPTIONS
Be more verbose (show counter open errors, etc)
--ldlat <n>::
- Specify desired latency for loads event. (x86 only)
+ Specify desired latency for loads event. Supported on Intel and Arm64
+ processors only. Ignored on other archs.
In addition, for report all perf report options are valid, and for record
all perf record options.
SEE ALSO
--------
-linkperf:perf-record[1], linkperf:perf-report[1]
+linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-arm-spe[1]
diff --git a/tools/perf/Documentation/perf-probe.txt b/tools/perf/Documentation/perf-probe.txt
index ed3ecfa422e1..5c43a6edc0e5 100644
--- a/tools/perf/Documentation/perf-probe.txt
+++ b/tools/perf/Documentation/perf-probe.txt
@@ -57,7 +57,7 @@ OPTIONS
-q::
--quiet::
- Be quiet (do not show any messages including errors).
+ Do not show any warnings or messages.
Can not use with -v.
-a::
@@ -222,11 +222,11 @@ probe syntax, 'SRC' means the source file path, 'ALN' is start line number,
and 'ALN2' is end line number in the file. It is also possible to specify how
many lines to show by using 'NUM'. Moreover, 'FUNC@SRC' combination is good
for searching a specific function when several functions share same name.
-So, "source.c:100-120" shows lines between 100th to l20th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
+So, "source.c:100-120" shows lines between 100th to 120th in source.c file. And "func:10+20" shows 20 lines from 10th line of func function.
LAZY MATCHING
-------------
- The lazy line matching is similar to glob matching but ignoring spaces in both of pattern and target. So this accepts wildcards('*', '?') and character classes(e.g. [a-z], [!A-Z]).
+The lazy line matching is similar to glob matching but ignoring spaces in both of pattern and target. So this accepts wildcards('*', '?') and character classes(e.g. [a-z], [!A-Z]).
e.g.
'a=*' can matches 'a=b', 'a = b', 'a == b' and so on.
@@ -235,8 +235,8 @@ This provides some sort of flexibility and robustness to probe point definitions
FILTER PATTERN
--------------
- The filter pattern is a glob matching pattern(s) to filter variables.
- In addition, you can use "!" for specifying filter-out rule. You also can give several rules combined with "&" or "|", and fold those rules as one rule by using "(" ")".
+The filter pattern is a glob matching pattern(s) to filter variables.
+In addition, you can use "!" for specifying filter-out rule. You also can give several rules combined with "&" or "|", and fold those rules as one rule by using "(" ")".
e.g.
With --filter "foo* | bar*", perf probe -V shows variables which start with "foo" or "bar".
@@ -295,6 +295,19 @@ Add a probe in a source file using special characters by backslash escape
./perf probe -x /opt/test/a.out 'foo\+bar.c:4'
+PERMISSIONS AND SYSCTL
+----------------------
+Since perf probe depends on ftrace (tracefs) and kallsyms (/proc/kallsyms), you have to care about the permission and some sysctl knobs.
+
+ - Since tracefs and kallsyms requires root or privileged user to access it, the following perf probe commands also require it; --add, --del, --list (except for --cache option)
+
+ - The system admin can remount the tracefs with 755 (`sudo mount -o remount,mode=755 /sys/kernel/tracing/`) to allow unprivileged user to run the perf probe --list command.
+
+ - /proc/sys/kernel/kptr_restrict = 2 (restrict all users) also prevents perf probe to retrieve the important information from kallsyms. You also need to set to 1 (restrict non CAP_SYSLOG users) for the above commands. Since the user-space probe doesn't need to access kallsyms, this is only for probing the kernel function (kprobes).
+
+ - Since the perf probe commands read the vmlinux (for kernel) and/or the debuginfo file (including user-space application), you need to ensure that you can read those files.
+
+
SEE ALSO
--------
linkperf:perf-trace[1], linkperf:perf-record[1], linkperf:perf-buildid-cache[1]
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index b3f3b3f1c161..680396c56bd1 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -9,7 +9,7 @@ SYNOPSIS
--------
[verse]
'perf record' [-e <EVENT> | --event=EVENT] [-a] <command>
-'perf record' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>]
+'perf record' [-e <EVENT> | --event=EVENT] [-a] \-- <command> [<options>]
DESCRIPTION
-----------
@@ -30,8 +30,14 @@ OPTIONS
- a symbolic event name (use 'perf list' to list all events)
- - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
- hexadecimal event descriptor.
+ - a raw PMU event in the form of rN where N is a hexadecimal value
+ that represents the raw register encoding with the layout of the
+ event control registers as described by entries in
+ /sys/bus/event_source/devices/cpu/format/*.
+
+ - a symbolic or raw PMU event followed by an optional colon
+ and a list of event modifiers, e.g., cpu-cycles:p. See the
+ linkperf:perf-list[1] man page for details on event modifiers.
- a symbolically formed PMU event like 'pmu/param1=0x3,param2/' where
'param1', 'param2', etc are defined as formats for the PMU in
@@ -113,9 +119,12 @@ OPTIONS
"perf report" to view group events together.
--filter=<filter>::
- Event filter. This option should follow an event selector (-e) which
- selects either tracepoint event(s) or a hardware trace PMU
- (e.g. Intel PT or CoreSight).
+ Event filter. This option should follow an event selector (-e).
+ If the event is a tracepoint, the filter string will be parsed by
+ the kernel. If the event is a hardware trace PMU (e.g. Intel PT
+ or CoreSight), it'll be processed as an address filter. Otherwise
+ it means a general filter using BPF which can be applied for any
+ kind of event.
- tracepoint filters
@@ -170,6 +179,57 @@ OPTIONS
Multiple filters can be separated with space or comma.
+ - bpf filters
+
+ A BPF filter can access the sample data and make a decision based on the
+ data. Users need to set an appropriate sample type to use the BPF
+ filter. BPF filters need root privilege.
+
+ The sample data field can be specified in lower case letter. Multiple
+ filters can be separated with comma. For example,
+
+ --filter 'period > 1000, cpu == 1'
+ or
+ --filter 'mem_op == load || mem_op == store, mem_lvl > l1'
+
+ The former filter only accept samples with period greater than 1000 AND
+ CPU number is 1. The latter one accepts either load and store memory
+ operations but it should have memory level above the L1. Since the
+ mem_op and mem_lvl fields come from the (memory) data_source, it'd only
+ work with some events which set the data_source field.
+
+ Also user should request to collect that information (with -d option in
+ the above case). Otherwise, the following message will be shown.
+
+ $ sudo perf record -e cycles --filter 'mem_op == load'
+ Error: cycles event does not have PERF_SAMPLE_DATA_SRC
+ Hint: please add -d option to perf record.
+ failed to set filter "BPF" on event cycles with 22 (Invalid argument)
+
+ Essentially the BPF filter expression is:
+
+ <term> <operator> <value> (("," | "||") <term> <operator> <value>)*
+
+ The <term> can be one of:
+ ip, id, tid, pid, cpu, time, addr, period, txn, weight, phys_addr,
+ code_pgsz, data_pgsz, weight1, weight2, weight3, ins_lat, retire_lat,
+ p_stage_cyc, mem_op, mem_lvl, mem_snoop, mem_remote, mem_lock,
+ mem_dtlb, mem_blk, mem_hops
+
+ The <operator> can be one of:
+ ==, !=, >, >=, <, <=, &
+
+ The <value> can be one of:
+ <number> (for any term)
+ na, load, store, pfetch, exec (for mem_op)
+ l1, l2, l3, l4, cxl, io, any_cache, lfb, ram, pmem (for mem_lvl)
+ na, none, hit, miss, hitm, fwd, peer (for mem_snoop)
+ remote (for mem_remote)
+ na, locked (for mem_locked)
+ na, l1_hit, l1_miss, l2_hit, l2_miss, any_hit, any_miss, walk, fault (for mem_dtlb)
+ na, by_data, by_addr (for mem_blk)
+ hops0, hops1, hops2, hops3 (for mem_hops)
+
--exclude-perf::
Don't record events issued by perf itself. This option should follow
an event selector (-e) which selects tracepoint event(s). It adds a
@@ -232,10 +292,6 @@ OPTIONS
Also, by adding a comma, the number of mmap pages for AUX
area tracing can be specified.
---group::
- Put all events in a single event group. This precedes the --event
- option and remains only for backward compatibility. See --event.
-
-g::
Enables call-graph (stack chain/backtrace) recording for both
kernel space and user space.
@@ -269,9 +325,14 @@ OPTIONS
User can change the size by passing the size after comma like
"--call-graph dwarf,4096".
+ When "fp" recording is used, perf tries to save stack enties
+ up to the number specified in sysctl.kernel.perf_event_max_stack
+ by default. User can change the number by passing it after comma
+ like "--call-graph fp,32".
+
-q::
--quiet::
- Don't print any message, useful for scripting.
+ Don't print any warnings or messages, useful for scripting.
-v::
--verbose::
@@ -289,6 +350,12 @@ OPTIONS
--phys-data::
Record the sample physical addresses.
+--data-page-size::
+ Record the sampled data address data page size.
+
+--code-page-size::
+ Record the sampled code address (ip) page size
+
-T::
--timestamp::
Record the sample timestamps. Use it with 'perf report -D' to see the
@@ -301,6 +368,11 @@ OPTIONS
--sample-cpu::
Record the sample cpu.
+--sample-identifier::
+ Record the sample identifier i.e. PERF_SAMPLE_IDENTIFIER bit set in
+ the sample_type member of the struct perf_event_attr argument to the
+ perf_event_open system call.
+
-n::
--no-samples::
Don't sample.
@@ -366,6 +438,7 @@ following filters are defined:
- any_call: any function call or system call
- any_ret: any function return or system call return
- ind_call: any indirect branch
+ - ind_jmp: any indirect jump
- call: direct calls, including far (to/from kernel) calls
- u: only when the branch target is at the user level
- k: only when the branch target is in the kernel
@@ -374,7 +447,15 @@ following filters are defined:
- no_tx: only when the target is not in a hardware transaction
- abort_tx: only when the target is a hardware transaction abort
- cond: conditional branches
+ - call_stack: save call stack
+ - no_flags: don't save branch flags e.g prediction, misprediction etc
+ - no_cycles: don't save branch cycles
+ - hw_index: save branch hardware index
- save_type: save branch type during sampling in case binary is not available later
+ For the platforms with Intel Arch LBR support (12th-Gen+ client or
+ 4th-Gen Xeon+ server), the save branch type is unconditionally enabled
+ when the taken branch stack sampling is enabled.
+ - priv: save privilege state during sampling in case binary is not available later
+
The option requires at least one branch type among any, any_call, any_ret, ind_call, cond.
@@ -385,6 +466,7 @@ is enabled for all the sampling events. The sampled branch type is the same for
The various filters must be specified as a comma separated list: --branch-filter any_ret,u,k
Note that this feature may not be available on all processors.
+-W::
--weight::
Enable weightened sampling. An additional weight is recorded per sample and can be
displayed with the weight and local_weight sort keys. This currently works for TSX
@@ -407,8 +489,11 @@ if combined with -a or -C options.
-D::
--delay=::
-After starting the program, wait msecs before measuring. This is useful to
-filter out the startup phase of the program, which is often very different.
+After starting the program, wait msecs before measuring (-1: start with events
+disabled), or enable events only for specified ranges of msecs (e.g.
+-D 10-20,30-40 means wait 10 msecs, enable for 10 msecs, wait 10 msecs, enable
+for 10 msecs, then stop). Note, delaying enabling of events is useful to filter
+out the startup phase of the program, which is often very different.
-I::
--intr-regs::
@@ -458,7 +543,9 @@ This option sets the time out limit. The default value is 500 ms.
--switch-events::
Record context switch events i.e. events of type PERF_RECORD_SWITCH or
-PERF_RECORD_SWITCH_CPU_WIDE.
+PERF_RECORD_SWITCH_CPU_WIDE. In some cases (e.g. Intel PT, CoreSight or Arm SPE)
+switch events will be enabled automatically, which can be suppressed by
+by the option --no-switch-events.
--clang-path=PATH::
Path to clang binary to use for compiling BPF scriptlets.
@@ -475,6 +562,9 @@ Specify vmlinux path which has debuginfo.
--buildid-all::
Record build-id of all DSOs regardless whether it's actually hit or not.
+--buildid-mmap::
+Record build ids in mmap2 events, disables build id cache (implies --no-buildid).
+
--aio[=n]::
Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default: 1, max: 4).
Asynchronous mode is supported only when linking Perf tool with libc library
@@ -556,6 +646,19 @@ overhead. You can still switch them on with:
--switch-output --no-no-buildid --no-no-buildid-cache
+--switch-output-event::
+Events that will cause the switch of the perf.data file, auto-selecting
+--switch-output=signal, the results are similar as internally the side band
+thread will also send a SIGUSR2 to the main one.
+
+Uses the same syntax as --event, it will just not be recorded, serving only to
+switch the perf.data file as soon as the --switch-output event is processed by
+a separate sideband thread.
+
+This sideband thread is also used to other purposes, like processing the
+PERF_RECORD_BPF_EVENT records as they happen, asking the kernel for extra BPF
+information, etc.
+
--switch-max-files=N::
When rotating perf.data with --switch-output, only keep N files.
@@ -567,6 +670,22 @@ options.
'perf record --dry-run -e' can act as a BPF script compiler if llvm.dump-obj
in config file is set to true.
+--synth=TYPE::
+Collect and synthesize given type of events (comma separated). Note that
+this option controls the synthesis from the /proc filesystem which represent
+task status for pre-existing threads.
+
+Kernel (and some other) events are recorded regardless of the
+choice in this option. For example, --synth=no would have MMAP events for
+kernel and modules.
+
+Available types are:
+ 'task' - synthesize FORK and COMM events for each task
+ 'mmap' - synthesize MMAP events for each process (implies 'task')
+ 'cgroup' - synthesize CGROUP events for each cgroup
+ 'all' - synthesize all events (default)
+ 'no' - do not synthesize any of the above events
+
--tail-synthesize::
Instead of collecting non-sample events (for example, fork, comm, mmap) at
the beginning of record, collect them during finalizing an output file.
@@ -596,6 +715,131 @@ Make a copy of /proc/kcore and place it into a directory with the perf data file
Limit the sample data max size, <size> is expected to be a number with
appended unit character - B/K/M/G
+--num-thread-synthesize::
+ The number of threads to run when synthesizing events for existing processes.
+ By default, the number of threads equals 1.
+
+ifdef::HAVE_LIBPFM[]
+--pfm-events events::
+Select a PMU event using libpfm4 syntax (see http://perfmon2.sf.net)
+including support for event filters. For example '--pfm-events
+inst_retired:any_p:u:c=1:i'. More than one event can be passed to the
+option using the comma separator. Hardware events and generic hardware
+events cannot be mixed together. The latter must be used with the -e
+option. The -e option and this one can be mixed and matched. Events
+can be grouped using the {} notation.
+endif::HAVE_LIBPFM[]
+
+--control=fifo:ctl-fifo[,ack-fifo]::
+--control=fd:ctl-fd[,ack-fd]::
+ctl-fifo / ack-fifo are opened and used as ctl-fd / ack-fd as follows.
+Listen on ctl-fd descriptor for command to control measurement.
+
+Available commands:
+ 'enable' : enable events
+ 'disable' : disable events
+ 'enable name' : enable event 'name'
+ 'disable name' : disable event 'name'
+ 'snapshot' : AUX area tracing snapshot).
+ 'stop' : stop perf record
+ 'ping' : ping
+
+ 'evlist [-v|-g|-F] : display all events
+ -F Show just the sample frequency used for each event.
+ -v Show all fields.
+ -g Show event group information.
+
+Measurements can be started with events disabled using --delay=-1 option. Optionally
+send control command completion ('ack\n') to ack-fd descriptor to synchronize with the
+controlling process. Example of bash shell script to enable and disable events during
+measurements:
+
+ #!/bin/bash
+
+ ctl_dir=/tmp/
+
+ ctl_fifo=${ctl_dir}perf_ctl.fifo
+ test -p ${ctl_fifo} && unlink ${ctl_fifo}
+ mkfifo ${ctl_fifo}
+ exec {ctl_fd}<>${ctl_fifo}
+
+ ctl_ack_fifo=${ctl_dir}perf_ctl_ack.fifo
+ test -p ${ctl_ack_fifo} && unlink ${ctl_ack_fifo}
+ mkfifo ${ctl_ack_fifo}
+ exec {ctl_fd_ack}<>${ctl_ack_fifo}
+
+ perf record -D -1 -e cpu-cycles -a \
+ --control fd:${ctl_fd},${ctl_fd_ack} \
+ -- sleep 30 &
+ perf_pid=$!
+
+ sleep 5 && echo 'enable' >&${ctl_fd} && read -u ${ctl_fd_ack} e1 && echo "enabled(${e1})"
+ sleep 10 && echo 'disable' >&${ctl_fd} && read -u ${ctl_fd_ack} d1 && echo "disabled(${d1})"
+
+ exec {ctl_fd_ack}>&-
+ unlink ${ctl_ack_fifo}
+
+ exec {ctl_fd}>&-
+ unlink ${ctl_fifo}
+
+ wait -n ${perf_pid}
+ exit $?
+
+--threads=<spec>::
+Write collected trace data into several data files using parallel threads.
+<spec> value can be user defined list of masks. Masks separated by colon
+define CPUs to be monitored by a thread and affinity mask of that thread
+is separated by slash:
+
+ <cpus mask 1>/<affinity mask 1>:<cpus mask 2>/<affinity mask 2>:...
+
+CPUs or affinity masks must not overlap with other corresponding masks.
+Invalid CPUs are ignored, but masks containing only invalid CPUs are not
+allowed.
+
+For example user specification like the following:
+
+ 0,2-4/2-4:1,5-7/5-7
+
+specifies parallel threads layout that consists of two threads,
+the first thread monitors CPUs 0 and 2-4 with the affinity mask 2-4,
+the second monitors CPUs 1 and 5-7 with the affinity mask 5-7.
+
+<spec> value can also be a string meaning predefined parallel threads
+layout:
+
+ cpu - create new data streaming thread for every monitored cpu
+ core - create new thread to monitor CPUs grouped by a core
+ package - create new thread to monitor CPUs grouped by a package
+ numa - create new threed to monitor CPUs grouped by a NUMA domain
+
+Predefined layouts can be used on systems with large number of CPUs in
+order not to spawn multiple per-cpu streaming threads but still avoid LOST
+events in data directory files. Option specified with no or empty value
+defaults to CPU layout. Masks defined or provided by the option value are
+filtered through the mask provided by -C option.
+
+--debuginfod[=URLs]::
+ Specify debuginfod URL to be used when cacheing perf.data binaries,
+ it follows the same syntax as the DEBUGINFOD_URLS variable, like:
+
+ http://192.168.122.174:8002
+
+ If the URLs is not specified, the value of DEBUGINFOD_URLS
+ system environment variable is used.
+
+--off-cpu::
+ Enable off-cpu profiling with BPF. The BPF program will collect
+ task scheduling information with (user) stacktrace and save them
+ as sample data of a software event named "offcpu-time". The
+ sample period will have the time the task slept in nanoseconds.
+
+ Note that BPF can collect stack traces using frame pointer ("fp")
+ only, as of now. So the applications built without the frame
+ pointer might see bogus addresses.
+
+include::intel-hybrid.txt[]
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-list[1], linkperf:perf-intel-pt[1]
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index f569b9ea4002..af068b4f1e5a 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -27,7 +27,7 @@ OPTIONS
-q::
--quiet::
- Do not show any message. (Suppress -v)
+ Do not show any warnings or messages. (Suppress -v)
-n::
--show-nr-samples::
@@ -73,7 +73,7 @@ OPTIONS
Sort histogram entries by given key(s) - multiple keys can be specified
in CSV format. Following sort keys are available:
pid, comm, dso, symbol, parent, cpu, socket, srcline, weight,
- local_weight, cgroup_id.
+ local_weight, cgroup_id, addr.
Each key has following meaning:
@@ -108,6 +108,16 @@ OPTIONS
- period: Raw number of event count of sample
- time: Separate the samples by time stamp with the resolution specified by
--time-quantum (default 100ms). Specify with overhead and before it.
+ - code_page_size: the code page size of sampled code address (ip)
+ - ins_lat: Instruction latency in core cycles. This is the global instruction
+ latency
+ - local_ins_lat: Local instruction latency version
+ - p_stage_cyc: On powerpc, this presents the number of cycles spent in a
+ pipeline stage. And currently supported only on powerpc.
+ - addr: (Full) virtual address of the sampled instruction
+ - retire_lat: On X86, this reports pipeline stall of this instruction compared
+ to the previous instruction in cycles. And currently supported only on X86
+ - simd: Flags describing a SIMD operation. "e" for empty Arm SVE predicate. "p" for partial Arm SVE predicate
By default, comm, dso and symbol keys are used.
(i.e. --sort comm,dso,symbol)
@@ -139,7 +149,7 @@ OPTIONS
If the --mem-mode option is used, the following sort keys are also available
(incompatible with --branch-stack):
- symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline.
+ symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline, blocked.
- symbol_daddr: name of data symbol being executed on at the time of sample
- dso_daddr: name of library or module containing the data being executed
@@ -150,9 +160,12 @@ OPTIONS
- snoop: type of snoop (if any) for the data at the time of the sample
- dcacheline: the cacheline the data address is on at the time of the sample
- phys_daddr: physical address of data being executed on at the time of sample
+ - data_page_size: the data page size of data being executed on at the time of sample
+ - blocked: reason of blocked load access for the data at the time of the sample
And the default sort keys are changed to local_weight, mem, sym, dso,
- symbol_daddr, dso_daddr, snoop, tlb, locked, see '--mem-mode'.
+ symbol_daddr, dso_daddr, snoop, tlb, locked, blocked, local_ins_lat,
+ see '--mem-mode'.
If the data file has tracepoint event(s), following (dynamic) sort keys
are also available:
@@ -217,6 +230,9 @@ OPTIONS
--dump-raw-trace::
Dump raw trace in ASCII.
+--disable-order::
+ Disable raw trace ordering.
+
-g::
--call-graph=<print_type,threshold[,print_limit],order,sort_key[,branch],value>::
Display call chains using type, min percent threshold, print limit,
@@ -365,6 +381,9 @@ OPTIONS
This allows to examine the path the program took to each sample.
The data collection must have used -b (or -j) and -g.
+--addr2line=<path>::
+ Path to addr2line binary.
+
--objdump=<path>::
Path to objdump binary.
@@ -465,7 +484,7 @@ OPTIONS
but probably we'll make the default not to show the switch-on/off events
on the --group mode and if there is only one event besides the off/on ones,
go straight to the histogram browser, just like 'perf report' with no events
- explicitely specified does.
+ explicitly specified does.
--itrace::
Options for decoding instruction tracing data. The options are:
@@ -488,6 +507,17 @@ include::itrace.txt[]
This option extends the perf report to show reference callgraphs,
which collected by reference event, in no callgraph event.
+--stitch-lbr::
+ Show callgraph with stitched LBRs, which may have more complete
+ callgraph. The perf.data file must have been obtained using
+ perf record --call-graph lbr.
+ Disabled by default. In common cases with call stack overflows,
+ it can recreate better call stacks than the default lbr call stack
+ output. But this approach is not foolproof. There can be cases
+ where it creates incorrect call stacks from incorrect matches.
+ The known limitations include exception handing such as
+ setjmp/longjmp will have calls/returns not match.
+
--socket-filter::
Only report the samples on the processor socket that match with this filter
@@ -548,6 +578,9 @@ include::itrace.txt[]
sampled cycles
'Avg Cycles' - block average sampled cycles
+--skip-empty::
+ Do not print 0 results in the --stat output.
+
include::callchain-overhead-calculation.txt[]
SEE ALSO
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index 5a1f68122f50..5b479f5e62ff 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -54,8 +54,8 @@ all sched_wakeup events in the system:
Traces meant to be processed using a script should be recorded with
the above option: -a to enable system-wide collection.
-The format file for the sched_wakep event defines the following fields
-(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
+The format file for the sched_wakeup event defines the following fields
+(see /sys/kernel/tracing/events/sched/sched_wakeup/format):
----
format:
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 0fb9eda3cbca..6a8581012e16 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -167,7 +167,7 @@ below).
Following those are the 'event handler' functions generated one for
every event in the 'perf record' output. The handler functions take
-the form subsystem__event_name, and contain named parameters, one for
+the form subsystem\__event_name, and contain named parameters, one for
each field in the event; in this case, there's only one event,
raw_syscalls__sys_enter(). (see the EVENT HANDLERS section below for
more info on event handlers).
@@ -319,7 +319,7 @@ So those are the essential steps in writing and running a script. The
process can be generalized to any tracepoint or set of tracepoints
you're interested in - basically find the tracepoint(s) you're
interested in by looking at the list of available events shown by
-'perf list' and/or look in /sys/kernel/debug/tracing/events/ for
+'perf list' and/or look in /sys/kernel/tracing/events/ for
detailed event and field info, record the corresponding trace data
using 'perf record', passing it the list of interesting events,
generate a skeleton script using 'perf script -g python' and modify the
@@ -448,8 +448,8 @@ all sched_wakeup events in the system:
Traces meant to be processed using a script should be recorded with
the above option: -a to enable system-wide collection.
-The format file for the sched_wakep event defines the following fields
-(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
+The format file for the sched_wakeup event defines the following fields
+(see /sys/kernel/tracing/events/sched/sched_wakeup/format):
----
format:
@@ -550,6 +550,27 @@ def trace_unhandled(event_name, context, event_fields_dict):
pass
----
+*process_event*, if defined, is called for any non-tracepoint event
+
+----
+def process_event(param_dict):
+ pass
+----
+
+*context_switch*, if defined, is called for any context switch
+
+----
+def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x):
+ pass
+----
+
+*auxtrace_error*, if defined, is called for any AUX area tracing error
+
+----
+def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x):
+ pass
+----
+
The remaining sections provide descriptions of each of the available
built-in perf script Python modules and their associated functions.
@@ -592,12 +613,18 @@ common, but need to be made accessible to user scripts nonetheless.
perf_trace_context defines a set of functions that can be used to
access this data in the context of the current event. Each of these
functions expects a context variable, which is the same as the
-context variable passed into every event handler as the second
-argument.
+context variable passed into every tracepoint event handler as the second
+argument. For non-tracepoint events, the context variable is also present
+as perf_trace_context.perf_script_context .
common_pc(context) - returns common_preempt count for the current event
common_flags(context) - returns common_flags for the current event
common_lock_depth(context) - returns common_lock_depth for the current event
+ perf_sample_insn(context) - returns the machine code instruction
+ perf_set_itrace_options(context, itrace_options) - set --itrace options if they have not been set already
+ perf_sample_srcline(context) - returns source_file_name, line_number
+ perf_sample_srccode(context) - returns source_file_name, line_number, source_line
+
Util.py Module
~~~~~~~~~~~~~~
@@ -616,9 +643,20 @@ SUPPORTED FIELDS
Currently supported fields:
ev_name, comm, pid, tid, cpu, ip, time, period, phys_addr, addr,
-symbol, dso, time_enabled, time_running, values, callchain,
+symbol, symoff, dso, time_enabled, time_running, values, callchain,
brstack, brstacksym, datasrc, datasrc_decode, iregs, uregs,
-weight, transaction, raw_buf, attr.
+weight, transaction, raw_buf, attr, cpumode.
+
+Fields that may also be present:
+
+ flags - sample flags
+ flags_disp - sample flags display
+ insn_cnt - instruction count for determining instructions-per-cycle (IPC)
+ cyc_cnt - cycle count for determining IPC
+ addr_correlates_sym - addr can correlate to a symbol
+ addr_dso - addr dso
+ addr_symbol - addr symbol
+ addr_symoff - addr symbol offset
Some fields have sub items:
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 963487e82edc..777a0d8ba7d1 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -79,6 +79,9 @@ OPTIONS
--dump-raw-trace=::
Display verbose dump of the trace data.
+--dump-unsorted-raw-trace=::
+ Same as --dump-raw-trace but not sorted in time order.
+
-L::
--Latency=::
Show latency attributes (irqs/preemption disabled, etc).
@@ -98,6 +101,18 @@ OPTIONS
Generate perf-script.[ext] starter script for given language,
using current perf.data.
+--dlfilter=<file>::
+ Filter sample events using the given shared object file.
+ Refer linkperf:perf-dlfilter[1]
+
+--dlarg=<arg>::
+ Pass 'arg' as an argument to the dlfilter. --dlarg may be repeated
+ to add more arguments.
+
+--list-dlfilters::
+ Display a list of available dlfilters. Use with option -v (must come
+ before option --list-dlfilters) to show long descriptions.
+
-a::
Force system-wide collection. Scripts run without a <command>
normally use -a by default, while scripts run with a <command>
@@ -116,8 +131,10 @@ OPTIONS
--fields::
Comma separated list of fields to print. Options are:
comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
- srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output, brstackinsn,
- brstackoff, callindent, insn, insnlen, synth, phys_addr, metric, misc, srccode, ipc.
+ srcline, period, iregs, uregs, brstack, brstacksym, flags, bpf-output,
+ brstackinsn, brstackinsnlen, brstackoff, callindent, insn, insnlen, synth,
+ phys_addr, metric, misc, srccode, ipc, data_page_size, code_page_size, ins_lat,
+ machine_pid, vcpu, cgroup, retire_lat.
Field list can be prepended with the type, trace, sw or hw,
to indicate to which event type the field list applies.
e.g., -F sw:comm,tid,time,ip,sym and -F trace:time,cpu,trace
@@ -182,15 +199,19 @@ OPTIONS
At this point usage is displayed, and perf-script exits.
The flags field is synthesized and may have a value when Instruction
- Trace decoding. The flags are "bcrosyiABEx" which stand for branch,
+ Trace decoding. The flags are "bcrosyiABExghDt" which stand for branch,
call, return, conditional, system, asynchronous, interrupt,
- transaction abort, trace begin, trace end, and in transaction,
- respectively. Known combinations of flags are printed more nicely e.g.
+ transaction abort, trace begin, trace end, in transaction, VM-Entry,
+ VM-Exit, interrupt disabled and interrupt disable toggle respectively.
+ Known combinations of flags are printed more nicely e.g.
"call" for "bc", "return" for "br", "jcc" for "bo", "jmp" for "b",
"int" for "bci", "iret" for "bri", "syscall" for "bcs", "sysret" for "brs",
"async" for "by", "hw int" for "bcyi", "tx abrt" for "bA", "tr strt" for "bB",
- "tr end" for "bE". However the "x" flag will be display separately in those
- cases e.g. "jcc (x)" for a condition branch within a transaction.
+ "tr end" for "bE", "vmentry" for "bcg", "vmexit" for "bch".
+ However the "x", "D" and "t" flags will be displayed separately in those
+ cases e.g. "jcc (xD)" for a condition branch within a transaction
+ with interrupts disabled. Note, interrupts becoming disabled is "t",
+ whereas interrupts becoming enabled is "Dt".
The callindent field is synthesized and may have a value when
Instruction Trace decoding. For calls and returns, it will display the
@@ -206,6 +227,13 @@ OPTIONS
The ipc (instructions per cycle) field is synthesized and may have a value when
Instruction Trace decoding.
+ The machine_pid and vcpu fields are derived from data resulting from using
+ perf inject to insert a perf.data file recorded inside a virtual machine into
+ a perf.data file recorded on the host at the same time.
+
+ The cgroup fields requires sample having the cgroup id which is saved
+ when "--all-cgroups" option is passed to 'perf record'.
+
Finally, a user may not set fields to none for all event types.
i.e., -F "" is not allowed.
@@ -224,6 +252,10 @@ OPTIONS
is printed. This is the full execution path leading to the sample. This is only supported when the
sample was recorded with perf record -b or -j any.
+ Use brstackinsnlen to print the brstackinsn lenght. For example, you
+ can’t know the next sequential instruction after an unconditional branch unless
+ you calculate that based on its length.
+
The brstackoff field will print an offset into a specific dso/binary.
With the metric option perf script can compute metrics for
@@ -322,6 +354,10 @@ OPTIONS
--show-cgroup-events
Display cgroup events i.e. events of type PERF_RECORD_CGROUP.
+--show-text-poke-events
+ Display text poke events i.e. events of type PERF_RECORD_TEXT_POKE and
+ PERF_RECORD_KSYMBOL.
+
--demangle::
Demangle symbol names to human readable form. It's enabled by default,
disable with --no-demangle.
@@ -417,9 +453,32 @@ include::itrace.txt[]
Only consider the listed symbols. Symbols are typically a name
but they may also be hexadecimal address.
+ The hexadecimal address may be the start address of a symbol or
+ any other address to filter the trace records
+
For example, to select the symbol noploop or the address 0x4007a0:
perf script --symbols=noploop,0x4007a0
+ Support filtering trace records by symbol name, start address of
+ symbol, any hexadecimal address and address range.
+
+ The comparison order is:
+
+ 1. symbol name comparison
+ 2. symbol start address comparison.
+ 3. any hexadecimal address comparison.
+ 4. address range comparison (see --addr-range).
+
+--addr-range::
+ Use with -S or --symbols to list traced records within address range.
+
+ For example, to list the traced records within the address range
+ [0x4007a0, 0x0x4007a9]:
+ perf script -S 0x4007a0 --addr-range 10
+
+--dsos=::
+ Only consider symbols in these DSOs.
+
--call-trace::
Show call stream for intel_pt traces. The CPUs are interleaved, but
can be filtered with -C.
@@ -440,7 +499,23 @@ include::itrace.txt[]
--show-on-off-events::
Show the --switch-on/off events too.
+--stitch-lbr::
+ Show callgraph with stitched LBRs, which may have more complete
+ callgraph. The perf.data file must have been obtained using
+ perf record --call-graph lbr.
+ Disabled by default. In common cases with call stack overflows,
+ it can recreate better call stacks than the default lbr call stack
+ output. But this approach is not foolproof. There can be cases
+ where it creates incorrect call stacks from incorrect matches.
+ The known limitations include exception handing such as
+ setjmp/longjmp will have calls/returns not match.
+
+:GMEXAMPLECMD: script
+:GMEXAMPLESUBCMD:
+include::guest-files.txt[]
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],
-linkperf:perf-script-python[1], linkperf:perf-intel-pt[1]
+linkperf:perf-script-python[1], linkperf:perf-intel-pt[1],
+linkperf:perf-dlfilter[1]
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 4d56586b2fb9..29bdcfa93f04 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -9,8 +9,8 @@ SYNOPSIS
--------
[verse]
'perf stat' [-e <EVENT> | --event=EVENT] [-a] <command>
-'perf stat' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>]
-'perf stat' [-e <EVENT> | --event=EVENT] [-a] record [-o file] -- <command> [<options>]
+'perf stat' [-e <EVENT> | --event=EVENT] [-a] \-- <command> [<options>]
+'perf stat' [-e <EVENT> | --event=EVENT] [-a] record [-o file] \-- <command> [<options>]
'perf stat' report [-i file]
DESCRIPTION
@@ -36,8 +36,14 @@ report::
- a symbolic event name (use 'perf list' to list all events)
- - a raw PMU event (eventsel+umask) in the form of rNNN where NNN is a
- hexadecimal event descriptor.
+ - a raw PMU event in the form of rN where N is a hexadecimal value
+ that represents the raw register encoding with the layout of the
+ event control registers as described by entries in
+ /sys/bus/event_source/devices/cpu/format/*.
+
+ - a symbolic or raw PMU event followed by an optional colon
+ and a list of event modifiers, e.g., cpu-cycles:p. See the
+ linkperf:perf-list[1] man page for details on event modifiers.
- a symbolically formed event like 'pmu/param1=0x3,param2/' where
param1 and param2 are defined as formats for the PMU in
@@ -71,6 +77,47 @@ report::
--tid=<tid>::
stat events on existing thread id (comma separated list)
+-b::
+--bpf-prog::
+ stat events on existing bpf program id (comma separated list),
+ requiring root rights. bpftool-prog could be used to find program
+ id all bpf programs in the system. For example:
+
+ # bpftool prog | head -n 1
+ 17247: tracepoint name sys_enter tag 192d548b9d754067 gpl
+
+ # perf stat -e cycles,instructions --bpf-prog 17247 --timeout 1000
+
+ Performance counter stats for 'BPF program(s) 17247':
+
+ 85,967 cycles
+ 28,982 instructions # 0.34 insn per cycle
+
+ 1.102235068 seconds time elapsed
+
+--bpf-counters::
+ Use BPF programs to aggregate readings from perf_events. This
+ allows multiple perf-stat sessions that are counting the same metric (cycles,
+ instructions, etc.) to share hardware counters.
+ To use BPF programs on common events by default, use
+ "perf config stat.bpf-counter-events=<list_of_events>".
+
+--bpf-attr-map::
+ With option "--bpf-counters", different perf-stat sessions share
+ information about shared BPF programs and maps via a pinned hashmap.
+ Use "--bpf-attr-map" to specify the path of this pinned hashmap.
+ The default path is /sys/fs/bpf/perf_attr_map.
+
+ifdef::HAVE_LIBPFM[]
+--pfm-events events::
+Select a PMU event using libpfm4 syntax (see http://perfmon2.sf.net)
+including support for event filters. For example '--pfm-events
+inst_retired:any_p:u:c=1:i'. More than one event can be passed to the
+option using the comma separator. Hardware events and generic hardware
+events cannot be mixed together. The latter must be used with the -e
+option. The -e option and this one can be mixed and matched. Events
+can be grouped using the {} notation.
+endif::HAVE_LIBPFM[]
-a::
--all-cpus::
@@ -93,7 +140,9 @@ report::
-B::
--big-num::
- print large numbers with thousands' separators according to locale
+ print large numbers with thousands' separators according to locale.
+ Enabled by default. Use "--no-big-num" to disable.
+ Default setting can be changed with "perf config stat.big-num=false".
-C::
--cpu=::
@@ -108,7 +157,10 @@ Do not aggregate counts across all monitored CPUs.
-n::
--null::
- null run - don't start any counters
+null run - Don't start any counters.
+
+This can be useful to measure just elapsed wall-clock time - or to assess the
+raw overhead of perf stat itself, without running any counters.
-v::
--verbose::
@@ -150,6 +202,12 @@ use '-e e1 -e e2 -G foo,foo' or just use '-e e1 -e e2 -G foo'.
If wanting to monitor, say, 'cycles' for a cgroup and also for system wide, this
command line can be used: 'perf stat -e cycles -G cgroup_name -a -e cycles'.
+--for-each-cgroup name::
+Expand event list for each cgroup in "name" (allow multiple cgroups separated
+by comma). It also support regex patterns to match multiple groups. This has same
+effect that repeating -e option and -G option for each event x name. This option
+cannot be used with -G/--cgroup option.
+
-o file::
--output file::
Print the output into the designated file.
@@ -161,14 +219,55 @@ Append to the output file designated with the -o option. Ignored if -o is not sp
Log output to fd, instead of stderr. Complementary to --output, and mutually exclusive
with it. --append may be used here. Examples:
- 3>results perf stat --log-fd 3 -- $cmd
- 3>>results perf stat --log-fd 3 --append -- $cmd
+ 3>results perf stat --log-fd 3 \-- $cmd
+ 3>>results perf stat --log-fd 3 --append \-- $cmd
+
+--control=fifo:ctl-fifo[,ack-fifo]::
+--control=fd:ctl-fd[,ack-fd]::
+ctl-fifo / ack-fifo are opened and used as ctl-fd / ack-fd as follows.
+Listen on ctl-fd descriptor for command to control measurement ('enable': enable events,
+'disable': disable events). Measurements can be started with events disabled using
+--delay=-1 option. Optionally send control command completion ('ack\n') to ack-fd descriptor
+to synchronize with the controlling process. Example of bash shell script to enable and
+disable events during measurements:
+
+ #!/bin/bash
+
+ ctl_dir=/tmp/
+
+ ctl_fifo=${ctl_dir}perf_ctl.fifo
+ test -p ${ctl_fifo} && unlink ${ctl_fifo}
+ mkfifo ${ctl_fifo}
+ exec {ctl_fd}<>${ctl_fifo}
+
+ ctl_ack_fifo=${ctl_dir}perf_ctl_ack.fifo
+ test -p ${ctl_ack_fifo} && unlink ${ctl_ack_fifo}
+ mkfifo ${ctl_ack_fifo}
+ exec {ctl_fd_ack}<>${ctl_ack_fifo}
+
+ perf stat -D -1 -e cpu-cycles -a -I 1000 \
+ --control fd:${ctl_fd},${ctl_fd_ack} \
+ \-- sleep 30 &
+ perf_pid=$!
+
+ sleep 5 && echo 'enable' >&${ctl_fd} && read -u ${ctl_fd_ack} e1 && echo "enabled(${e1})"
+ sleep 10 && echo 'disable' >&${ctl_fd} && read -u ${ctl_fd_ack} d1 && echo "disabled(${d1})"
+
+ exec {ctl_fd_ack}>&-
+ unlink ${ctl_ack_fifo}
+
+ exec {ctl_fd}>&-
+ unlink ${ctl_fifo}
+
+ wait -n ${perf_pid}
+ exit $?
+
--pre::
--post::
Pre and post measurement hooks, e.g.:
-perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- make -s -j64 O=defconfig-build/ bzImage
+perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' \-- make -s -j64 O=defconfig-build/ bzImage
-I msecs::
--interval-print msecs::
@@ -176,6 +275,8 @@ Print count deltas every N milliseconds (minimum: 1ms)
The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution.
example: 'perf stat -I 1000 -e cycles -a sleep 5'
+If the metric exists, it is calculated by the counts generated in this interval and the metric is printed after #.
+
--interval-count times::
Print count deltas for fixed number of times.
This option should be used together with "-I" option.
@@ -224,14 +325,38 @@ mode, use --per-node in addition to -a. (system-wide).
-D msecs::
--delay msecs::
-After starting the program, wait msecs before measuring. This is useful to
-filter out the startup phase of the program, which is often very different.
+After starting the program, wait msecs before measuring (-1: start with events
+disabled). This is useful to filter out the startup phase of the program,
+which is often very different.
-T::
--transaction::
Print statistics of transactional execution if supported.
+--metric-no-group::
+By default, events to compute a metric are placed in weak groups. The
+group tries to enforce scheduling all or none of the events. The
+--metric-no-group option places events outside of groups and may
+increase the chance of the event being scheduled - leading to more
+accuracy. However, as events may not be scheduled together accuracy
+for metrics like instructions per cycle can be lower - as both metrics
+may no longer be being measured at the same time.
+
+--metric-no-merge::
+By default metric events in different weak groups can be shared if one
+group contains all the events needed by another. In such cases one
+group will be eliminated reducing event multiplexing and making it so
+that certain groups of metrics sum to 100%. A downside to sharing a
+group is that the group may require multiplexing and so accuracy for a
+small group that need not have multiplexing is lowered. This option
+forbids the event merging logic from sharing events between groups and
+may be used to increase accuracy in this case.
+
+--quiet::
+Don't print output, warnings or messages. This is useful with perf stat
+record below to only write data to the perf.data file.
+
STAT RECORD
-----------
Stores stat data into perf data file.
@@ -262,17 +387,17 @@ Aggregate counts per physical processor for system-wide mode measurements.
Print metrics or metricgroups specified in a comma separated list.
For a group all metrics from the group are added.
The events from the metrics are automatically measured.
-See perf list output for the possble metrics and metricgroups.
+See perf list output for the possible metrics and metricgroups.
-A::
--no-aggr::
Do not aggregate counts across all monitored CPUs.
--topdown::
-Print top down level 1 metrics if supported by the CPU. This allows to
-determine bottle necks in the CPU pipeline for CPU bound workloads,
-by breaking the cycles consumed down into frontend bound, backend bound,
-bad speculation and retiring.
+Print top-down metrics supported by the CPU. This allows to determine
+bottle necks in the CPU pipeline for CPU bound workloads, by breaking
+the cycles consumed down into frontend bound, backend bound, bad
+speculation and retiring.
Frontend bound means that the CPU cannot fetch and decode instructions fast
enough. Backend bound means that computation or memory access is the bottle
@@ -284,6 +409,11 @@ if the workload is actually bound by the CPU and not by something else.
For best results it is usually a good idea to use it with interval
mode like -I 1000, as the bottleneck of workloads can change often.
+This enables --metric-only, unless overridden with --no-metric-only.
+
+The following restrictions only apply to older Intel CPUs and Atom,
+on newer CPUs (IceLake and later) TopDown can be collected for any thread:
+
The top down metrics are collected per core instead of per
CPU thread. Per core mode is automatically enabled
and -a (global monitoring) is needed, requiring root rights or
@@ -295,12 +425,25 @@ echo 0 > /proc/sys/kernel/nmi_watchdog
for best results. Otherwise the bottlenecks may be inconsistent
on workload with changing phases.
-This enables --metric-only, unless overridden with --no-metric-only.
-
To interpret the results it is usually needed to know on which
CPUs the workload runs on. If needed the CPUs can be forced using
taskset.
+--td-level::
+Print the top-down statistics that equal the input level. It allows
+users to print the interested top-down metrics level instead of the
+level 1 top-down metrics.
+
+As the higher levels gather more metrics and use more counters they
+will be less accurate. By convention a metric can be examined by
+appending '_group' to it and this will increase accuracy compared to
+gathering all metrics for a level. For example, level 1 analysis may
+highlight 'tma_frontend_bound'. This metric may be drilled into with
+'tma_frontend_bound_group' with
+'perf stat -M tma_frontend_bound_group...'.
+
+Error out if the input is higher than the supported max level.
+
--no-merge::
Do not merge results from same PMUs.
@@ -314,6 +457,16 @@ Multiple events are created from a single event specification when:
2. Aliases, which are listed immediately after the Kernel PMU events
by perf list, are used.
+--hybrid-merge::
+Merge the hybrid event counts from all PMUs.
+
+For hybrid events, by default, the stat aggregates and reports the event
+counts per PMU. But sometimes, it's also useful to aggregate event counts
+from all PMUs. This option enables that behavior and reports the counts
+without PMUs.
+
+For non-hybrid events, it should be no effect.
+
--smi-cost::
Measure SMI cost if msr/aperf/ and msr/smi/ events are supported.
@@ -343,10 +496,26 @@ counts for all hardware threads in a core but show the sum counts per
hardware thread. This is essentially a replacement for the any bit and
convenient for post processing.
+--summary::
+Print summary for interval mode (-I).
+
+--no-csv-summary::
+Don't print 'summary' at the first column for CVS summary output.
+This option must be used with -x and --summary.
+
+This option can be enabled in perf config by setting the variable
+'stat.no-csv-summary'.
+
+$ perf config stat.no-csv-summary=true
+
+--cputype::
+Only enable events on applying cpu with this type for hybrid platform
+(e.g. core or atom)"
+
EXAMPLES
--------
-$ perf stat -- make
+$ perf stat \-- make
Performance counter stats for 'make':
@@ -402,6 +571,29 @@ The fields are in this order:
Additional metrics may be printed with all earlier fields being empty.
+include::intel-hybrid.txt[]
+
+JSON FORMAT
+-----------
+
+With -j, perf stat is able to print out a JSON format output
+that can be used for parsing.
+
+- timestamp : optional usec time stamp in fractions of second (with -I)
+- optional aggregate options:
+ - core : core identifier (with --per-core)
+ - die : die identifier (with --per-die)
+ - socket : socket identifier (with --per-socket)
+ - node : node identifier (with --per-node)
+ - thread : thread identifier (with --per-thread)
+- counter-value : counter value
+- unit : unit of the counter value or empty
+- event : event name
+- variance : optional variance if multiple values are collected (with -r)
+- runtime : run time of counter
+- metric-value : optional metric value
+- metric-unit : optional unit of metric
+
SEE ALSO
--------
linkperf:perf-top[1], linkperf:perf-list[1]
diff --git a/tools/perf/Documentation/perf-test.txt b/tools/perf/Documentation/perf-test.txt
index b329c65d7f40..951a2f262872 100644
--- a/tools/perf/Documentation/perf-test.txt
+++ b/tools/perf/Documentation/perf-test.txt
@@ -34,3 +34,6 @@ OPTIONS
-F::
--dont-fork::
Do not fork child for each test, run all tests within single process.
+
+--dso::
+ Specify a DSO for the "Symbols" test.
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 487737a725e9..3c202ec080ba 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -38,9 +38,10 @@ Default is to monitor all CPUS.
-e <event>::
--event=<event>::
Select the PMU event. Selection can be a symbolic event name
- (use 'perf list' to list all events) or a raw PMU
- event (eventsel+umask) in the form of rNNN where NNN is a
- hexadecimal event descriptor.
+ (use 'perf list' to list all events) or a raw PMU event in the form
+ of rN where N is a hexadecimal value that represents the raw register
+ encoding with the layout of the event control registers as described
+ by entries in /sys/bus/event_source/devices/cpu/format/*.
-E <entries>::
--entries=<entries>::
@@ -50,9 +51,6 @@ Default is to monitor all CPUS.
--count-filter=<count>::
Only display functions with more events than this.
---group::
- Put the counters into a counter group.
-
--group-sort-idx::
Sort the output by the event at the index n in group. If n is invalid,
sort by the first event. It can support multiple groups with different
@@ -163,6 +161,12 @@ Default is to monitor all CPUS.
-M::
--disassembler-style=:: Set disassembler style for objdump.
+--addr2line=<path>::
+ Path to addr2line binary.
+
+--objdump=<path>::
+ Path to objdump binary.
+
--prefix=PREFIX::
--prefix-strip=N::
Remove first N entries from source file path names in executables
@@ -250,6 +254,10 @@ Default is to monitor all CPUS.
The various filters must be specified as a comma separated list: --branch-filter any_ret,u,k
Note that this feature may not be available on all processors.
+--branch-history::
+ Add the addresses of sampled taken branches to the callstack.
+ This allows to examine the path the program took to each sample.
+
--raw-trace::
When displaying traceevent output, do not use print fmt or plugins.
@@ -277,6 +285,18 @@ Default is to monitor all CPUS.
Record events of type PERF_RECORD_NAMESPACES and display it with the
'cgroup_id' sort key.
+-G name::
+--cgroup name::
+monitor only in the container (cgroup) called "name". This option is available only
+in per-cpu mode. The cgroup filesystem must be mounted. All threads belonging to
+container "name" are monitored when they run on the monitored CPUs. Multiple cgroups
+can be provided. Each cgroup is applied to the corresponding event, i.e., first cgroup
+to first event, second cgroup to second event and so on. It is possible to provide
+an empty cgroup (monitor all the time) using, e.g., -G foo,,bar. Cgroups must have
+corresponding events, i.e., they always refer to events defined earlier on the command
+line. If the user wants to track multiple events for a specific cgroup, the user can
+use '-e e1 -e e2 -G foo,foo' or just use '-e e1 -e e2 -G foo'.
+
--all-cgroups::
Record events of type PERF_RECORD_CGROUP and display it with the
'cgroup' sort key.
@@ -300,10 +320,10 @@ Default is to monitor all CPUS.
perf top -e cycles,probe:icmp_rcv --switch-on=probe:icmp_rcv
- Alternatively one can ask for --group and then two overhead columns
+ Alternatively one can ask for a group and then two overhead columns
will appear, the first for cycles and the second for the switch-on event.
- perf top --group -e cycles,probe:icmp_rcv --switch-on=probe:icmp_rcv
+ perf top -e '{cycles,probe:icmp_rcv}' --switch-on=probe:icmp_rcv
This may be interesting to measure a workload only after some initialization
phase is over, i.e. insert a perf probe at that point and use the above
@@ -317,8 +337,28 @@ Default is to monitor all CPUS.
but probably we'll make the default not to show the switch-on/off events
on the --group mode and if there is only one event besides the off/on ones,
go straight to the histogram browser, just like 'perf top' with no events
- explicitely specified does.
-
+ explicitly specified does.
+
+--stitch-lbr::
+ Show callgraph with stitched LBRs, which may have more complete
+ callgraph. The option must be used with --call-graph lbr recording.
+ Disabled by default. In common cases with call stack overflows,
+ it can recreate better call stacks than the default lbr call stack
+ output. But this approach is not foolproof. There can be cases
+ where it creates incorrect call stacks from incorrect matches.
+ The known limitations include exception handing such as
+ setjmp/longjmp will have calls/returns not match.
+
+ifdef::HAVE_LIBPFM[]
+--pfm-events events::
+Select a PMU event using libpfm4 syntax (see http://perfmon2.sf.net)
+including support for event filters. For example '--pfm-events
+inst_retired:any_p:u:c=1:i'. More than one event can be passed to the
+option using the comma separator. Hardware events and generic hardware
+events cannot be mixed together. The latter must be used with the -e
+option. The -e option and this one can be mixed and matched. Events
+can be grouped using the {} notation.
+endif::HAVE_LIBPFM[]
INTERACTIVE PROMPTING KEYS
--------------------------
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index abc9b5d83312..f0da8cf63e9a 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -97,8 +97,8 @@ filter out the startup phase of the program, which is often very different.
Filter out events for these pids and for 'trace' itself (comma separated list).
-v::
---verbose=::
- Verbosity level.
+--verbose::
+ Increase the verbosity level.
--no-inherit::
Child tasks do not inherit counters.
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index b0152e1095c5..635ba043fd7d 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -346,7 +346,7 @@ to special needs.
HEADER_BPF_PROG_INFO = 25,
-struct bpf_prog_info_linear, which contains detailed information about
+struct perf_bpil, which contains detailed information about
a BPF program, including type, id, tag, jited/xlated instructions, etc.
HEADER_BPF_BTF = 26,
@@ -373,6 +373,70 @@ struct {
Indicates that trace contains records of PERF_RECORD_COMPRESSED type
that have perf_events records in compressed form.
+ HEADER_CPU_PMU_CAPS = 28,
+
+ A list of cpu PMU capabilities. The format of data is as below.
+
+struct {
+ u32 nr_cpu_pmu_caps;
+ {
+ char name[];
+ char value[];
+ } [nr_cpu_pmu_caps]
+};
+
+
+Example:
+ cpu pmu capabilities: branches=32, max_precise=3, pmu_name=icelake
+
+ HEADER_CLOCK_DATA = 29,
+
+ Contains clock id and its reference time together with wall clock
+ time taken at the 'same time', both values are in nanoseconds.
+ The format of data is as below.
+
+struct {
+ u32 version; /* version = 1 */
+ u32 clockid;
+ u64 wall_clock_ns;
+ u64 clockid_time_ns;
+};
+
+ HEADER_HYBRID_TOPOLOGY = 30,
+
+Indicate the hybrid CPUs. The format of data is as below.
+
+struct {
+ u32 nr;
+ struct {
+ char pmu_name[];
+ char cpus[];
+ } [nr]; /* Variable length records */
+};
+
+Example:
+ hybrid cpu system:
+ cpu_core cpu list : 0-15
+ cpu_atom cpu list : 16-23
+
+ HEADER_PMU_CAPS = 31,
+
+ List of pmu capabilities (except cpu pmu which is already
+ covered by HEADER_CPU_PMU_CAPS). Note that hybrid cpu pmu
+ capabilities are also stored here.
+
+struct {
+ u32 nr_pmu;
+ struct {
+ u32 nr_caps;
+ {
+ char name[];
+ char value[];
+ } [nr_caps];
+ char pmu_name[];
+ } [nr_pmu];
+};
+
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,
@@ -545,6 +609,16 @@ struct compressed_event {
char data[];
};
+ PERF_RECORD_FINISHED_INIT = 82,
+
+Marks the end of records for the system, pre-existing threads in system wide
+sessions, etc. Those are the ones prefixed PERF_RECORD_USER_*.
+
+This is used, for instance, to 'perf inject' events after init and before
+regular events, those emitted by the kernel, to support combining guest and
+host records.
+
+
The header is followed by compressed data frame that can be decompressed
into array of perf trace records. The size of the entire compressed event
record including the header is limited by the max value of header.size.
diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt
index 3f37ded13f8c..ba3df49c169d 100644
--- a/tools/perf/Documentation/perf.txt
+++ b/tools/perf/Documentation/perf.txt
@@ -12,32 +12,57 @@ SYNOPSIS
OPTIONS
-------
---debug::
- Setup debug variable (see list below) in value
- range (0, 10). Use like:
- --debug verbose # sets verbose = 1
- --debug verbose=2 # sets verbose = 2
-
- List of debug variables allowed to set:
- verbose - general debug messages
- ordered-events - ordered events object debug messages
- data-convert - data convert command debug messages
- stderr - write debug output (option -v) to stderr
- in browser mode
- perf-event-open - Print perf_event_open() arguments and
- return value
-
---buildid-dir::
- Setup buildid cache directory. It has higher priority than
- buildid.dir config file option.
+-h::
+--help::
+ Run perf help command.
-v::
--version::
- Display perf version.
+ Display perf version.
--h::
---help::
- Run perf help command.
+-vv::
+ Print the compiled-in status of libraries.
+
+--exec-path::
+ Display or set exec path.
+
+--html-path::
+ Display html documentation path.
+
+-p::
+--paginate::
+ Set up pager.
+
+--no-pager::
+ Do not set pager.
+
+--buildid-dir::
+ Setup buildid cache directory. It has higher priority
+ than buildid.dir config file option.
+
+--list-cmds::
+ List the most commonly used perf commands.
+
+--list-opts::
+ List available perf options.
+
+--debugfs-dir::
+ Set debugfs directory or set environment variable PERF_DEBUGFS_DIR.
+
+--debug::
+ Setup debug variable (see list below) in value
+ range (0, 10). Use like:
+ --debug verbose # sets verbose = 1
+ --debug verbose=2 # sets verbose = 2
+
+ List of debug variables allowed to set:
+ verbose - general debug messages
+ ordered-events - ordered events object debug messages
+ data-convert - data convert command debug messages
+ stderr - write debug output (option -v) to stderr
+ in browser mode
+ perf-event-open - Print perf_event_open() arguments and
+ return value
DESCRIPTION
-----------
@@ -51,3 +76,15 @@ SEE ALSO
linkperf:perf-stat[1], linkperf:perf-top[1],
linkperf:perf-record[1], linkperf:perf-report[1],
linkperf:perf-list[1]
+
+linkperf:perf-annotate[1],linkperf:perf-archive[1],linkperf:perf-arm-spe[1],
+linkperf:perf-bench[1], linkperf:perf-buildid-cache[1],
+linkperf:perf-buildid-list[1], linkperf:perf-c2c[1],
+linkperf:perf-config[1], linkperf:perf-data[1], linkperf:perf-diff[1],
+linkperf:perf-evlist[1], linkperf:perf-ftrace[1],
+linkperf:perf-help[1], linkperf:perf-inject[1],
+linkperf:perf-intel-pt[1], linkperf:perf-iostat[1], linkperf:perf-kallsyms[1],
+linkperf:perf-kmem[1], linkperf:perf-kvm[1], linkperf:perf-lock[1],
+linkperf:perf-mem[1], linkperf:perf-probe[1], linkperf:perf-sched[1],
+linkperf:perf-script[1], linkperf:perf-test[1],
+linkperf:perf-trace[1], linkperf:perf-version[1]
diff --git a/tools/perf/Documentation/security.txt b/tools/perf/Documentation/security.txt
new file mode 100644
index 000000000000..4fe3b8b1958f
--- /dev/null
+++ b/tools/perf/Documentation/security.txt
@@ -0,0 +1,237 @@
+Overview
+========
+
+For general security related questions of perf_event_open() syscall usage,
+performance monitoring and observability operations by Perf see here:
+https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html
+
+Enabling LSM based mandatory access control (MAC) to perf_event_open() syscall
+==============================================================================
+
+LSM hooks for mandatory access control for perf_event_open() syscall can be
+used starting from Linux v5.3. Below are the steps to extend Fedora (v31) with
+Targeted policy with perf_event_open() access control capabilities:
+
+1. Download selinux-policy SRPM package (e.g. selinux-policy-3.14.4-48.fc31.src.rpm on FC31)
+ and install it so rpmbuild directory would exist in the current working directory:
+
+ # rpm -Uhv selinux-policy-3.14.4-48.fc31.src.rpm
+
+2. Get into rpmbuild/SPECS directory and unpack the source code:
+
+ # rpmbuild -bp selinux-policy.spec
+
+3. Place patch below at rpmbuild/BUILD/selinux-policy-b86eaaf4dbcf2d51dd4432df7185c0eaf3cbcc02
+ directory and apply it:
+
+ # patch -p1 < selinux-policy-perf-events-perfmon.patch
+ patching file policy/flask/access_vectors
+ patching file policy/flask/security_classes
+ # cat selinux-policy-perf-events-perfmon.patch
+diff -Nura a/policy/flask/access_vectors b/policy/flask/access_vectors
+--- a/policy/flask/access_vectors 2020-02-04 18:19:53.000000000 +0300
++++ b/policy/flask/access_vectors 2020-02-28 23:37:25.000000000 +0300
+@@ -174,6 +174,7 @@
+ wake_alarm
+ block_suspend
+ audit_read
++ perfmon
+ }
+
+ #
+@@ -1099,3 +1100,15 @@
+
+ class xdp_socket
+ inherits socket
++
++class perf_event
++{
++ open
++ cpu
++ kernel
++ tracepoint
++ read
++ write
++}
++
++
+diff -Nura a/policy/flask/security_classes b/policy/flask/security_classes
+--- a/policy/flask/security_classes 2020-02-04 18:19:53.000000000 +0300
++++ b/policy/flask/security_classes 2020-02-28 21:35:17.000000000 +0300
+@@ -200,4 +200,6 @@
+
+ class xdp_socket
+
++class perf_event
++
+ # FLASK
+
+4. Get into rpmbuild/SPECS directory and build policy packages from patched sources:
+
+ # rpmbuild --noclean --noprep -ba selinux-policy.spec
+
+ so you have this:
+
+ # ls -alh rpmbuild/RPMS/noarch/
+ total 33M
+ drwxr-xr-x. 2 root root 4.0K Mar 20 12:16 .
+ drwxr-xr-x. 3 root root 4.0K Mar 20 12:16 ..
+ -rw-r--r--. 1 root root 112K Mar 20 12:16 selinux-policy-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 1.2M Mar 20 12:17 selinux-policy-devel-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 2.3M Mar 20 12:17 selinux-policy-doc-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 12M Mar 20 12:17 selinux-policy-minimum-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 4.5M Mar 20 12:16 selinux-policy-mls-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 111K Mar 20 12:16 selinux-policy-sandbox-3.14.4-48.fc31.noarch.rpm
+ -rw-r--r--. 1 root root 14M Mar 20 12:17 selinux-policy-targeted-3.14.4-48.fc31.noarch.rpm
+
+5. Install SELinux packages from Fedora repo, if not already done so, and
+ update with the patched rpms above:
+
+ # rpm -Uhv rpmbuild/RPMS/noarch/selinux-policy-*
+
+6. Enable SELinux Permissive mode for Targeted policy, if not already done so:
+
+ # cat /etc/selinux/config
+
+ # This file controls the state of SELinux on the system.
+ # SELINUX= can take one of these three values:
+ # enforcing - SELinux security policy is enforced.
+ # permissive - SELinux prints warnings instead of enforcing.
+ # disabled - No SELinux policy is loaded.
+ SELINUX=permissive
+ # SELINUXTYPE= can take one of these three values:
+ # targeted - Targeted processes are protected,
+ # minimum - Modification of targeted policy. Only selected processes are protected.
+ # mls - Multi Level Security protection.
+ SELINUXTYPE=targeted
+
+7. Enable filesystem SELinux labeling at the next reboot:
+
+ # touch /.autorelabel
+
+8. Reboot machine and it will label filesystems and load Targeted policy into the kernel;
+
+9. Login and check that dmesg output doesn't mention that perf_event class is unknown to SELinux subsystem;
+
+10. Check that SELinux is enabled and in Permissive mode
+
+ # getenforce
+ Permissive
+
+11. Turn SELinux into Enforcing mode:
+
+ # setenforce 1
+ # getenforce
+ Enforcing
+
+Opening access to perf_event_open() syscall on Fedora with SELinux
+==================================================================
+
+Access to performance monitoring and observability operations by Perf
+can be limited for superuser or CAP_PERFMON or CAP_SYS_ADMIN privileged
+processes. MAC policy settings (e.g. SELinux) can be loaded into the kernel
+and prevent unauthorized access to perf_event_open() syscall. In such case
+Perf tool provides a message similar to the one below:
+
+ # perf stat
+ Error:
+ Access to performance monitoring and observability operations is limited.
+ Enforced MAC policy settings (SELinux) can limit access to performance
+ monitoring and observability operations. Inspect system audit records for
+ more perf_event access control information and adjusting the policy.
+ Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open
+ access to performance monitoring and observability operations for users
+ without CAP_PERFMON or CAP_SYS_ADMIN Linux capability.
+ perf_event_paranoid setting is -1:
+ -1: Allow use of (almost) all events by all users
+ Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK
+ >= 0: Disallow raw and ftrace function tracepoint access
+ >= 1: Disallow CPU event access
+ >= 2: Disallow kernel profiling
+ To make the adjusted perf_event_paranoid setting permanent preserve it
+ in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)
+
+To make sure that access is limited by MAC policy settings inspect system
+audit records using journalctl command or /var/log/audit/audit.log so the
+output would contain AVC denied records related to perf_event:
+
+ # journalctl --reverse --no-pager | grep perf_event
+
+ python3[1318099]: SELinux is preventing perf from open access on the perf_event labeled unconfined_t.
+ If you believe that perf should be allowed open access on perf_event labeled unconfined_t by default.
+ setroubleshoot[1318099]: SELinux is preventing perf from open access on the perf_event labeled unconfined_t. For complete SELinux messages run: sealert -l 4595ce5b-e58f-462c-9d86-3bc2074935de
+ audit[1318098]: AVC avc: denied { open } for pid=1318098 comm="perf" scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tclass=perf_event permissive=0
+
+In order to open access to perf_event_open() syscall MAC policy settings can
+require to be extended. On SELinux system this can be done by loading a special
+policy module extending base policy settings. Perf related policy module can
+be generated using the system audit records about blocking perf_event access.
+Run the command below to generate my-perf.te policy extension file with
+perf_event related rules:
+
+ # ausearch -c 'perf' --raw | audit2allow -M my-perf && cat my-perf.te
+
+ module my-perf 1.0;
+
+ require {
+ type unconfined_t;
+ class perf_event { cpu kernel open read tracepoint write };
+ }
+
+ #============= unconfined_t ==============
+ allow unconfined_t self:perf_event { cpu kernel open read tracepoint write };
+
+Now compile, pack and load my-perf.pp extension module into the kernel:
+
+ # checkmodule -M -m -o my-perf.mod my-perf.te
+ # semodule_package -o my-perf.pp -m my-perf.mod
+ # semodule -X 300 -i my-perf.pp
+
+After all those taken steps above access to perf_event_open() syscall should
+now be allowed by the policy settings. Check access running Perf like this:
+
+ # perf stat
+ ^C
+ Performance counter stats for 'system wide':
+
+ 36,387.41 msec cpu-clock # 7.999 CPUs utilized
+ 2,629 context-switches # 0.072 K/sec
+ 57 cpu-migrations # 0.002 K/sec
+ 1 page-faults # 0.000 K/sec
+ 263,721,559 cycles # 0.007 GHz
+ 175,746,713 instructions # 0.67 insn per cycle
+ 19,628,798 branches # 0.539 M/sec
+ 1,259,201 branch-misses # 6.42% of all branches
+
+ 4.549061439 seconds time elapsed
+
+The generated perf-event.pp related policy extension module can be removed
+from the kernel using this command:
+
+ # semodule -X 300 -r my-perf
+
+Alternatively the module can be temporarily disabled and enabled back using
+these two commands:
+
+ # semodule -d my-perf
+ # semodule -e my-perf
+
+If something went wrong
+=======================
+
+To turn SELinux into Permissive mode:
+ # setenforce 0
+
+To fully disable SELinux during kernel boot [3] set kernel command line parameter selinux=0
+
+To remove SELinux labeling from local filesystems:
+ # find / -mount -print0 | xargs -0 setfattr -h -x security.selinux
+
+To fully turn SELinux off a machine set SELINUX=disabled at /etc/selinux/config file and reboot;
+
+Links
+=====
+
+[1] https://download-ib01.fedoraproject.org/pub/fedora/linux/updates/31/Everything/SRPMS/Packages/s/selinux-policy-3.14.4-49.fc31.src.rpm
+[2] https://docs.fedoraproject.org/en-US/Fedora/11/html/Security-Enhanced_Linux/sect-Security-Enhanced_Linux-Working_with_SELinux-Enabling_and_Disabling_SELinux.html
+[3] https://danwalsh.livejournal.com/10972.html
diff --git a/tools/perf/Documentation/topdown.txt b/tools/perf/Documentation/topdown.txt
new file mode 100644
index 000000000000..ae0aee86844f
--- /dev/null
+++ b/tools/perf/Documentation/topdown.txt
@@ -0,0 +1,332 @@
+Using TopDown metrics
+---------------------
+
+TopDown metrics break apart performance bottlenecks. Starting at level
+1 it is typical to get metrics on retiring, bad speculation, frontend
+bound, and backend bound. Higher levels provide more detail in to the
+level 1 bottlenecks, such as at level 2: core bound, memory bound,
+heavy operations, light operations, branch mispredicts, machine
+clears, fetch latency and fetch bandwidth. For more details see [1][2][3].
+
+perf stat --topdown implements this using available metrics that vary
+per architecture.
+
+% perf stat -a --topdown -I1000
+# time % tma_retiring % tma_backend_bound % tma_frontend_bound % tma_bad_speculation
+ 1.001141351 11.5 34.9 46.9 6.7
+ 2.006141972 13.4 28.1 50.4 8.1
+ 3.010162040 12.9 28.1 51.1 8.0
+ 4.014009311 12.5 28.6 51.8 7.2
+ 5.017838554 11.8 33.0 48.0 7.2
+ 5.704818971 14.0 27.5 51.3 7.3
+...
+
+New Topdown features in Intel Ice Lake
+======================================
+
+With Ice Lake CPUs the TopDown metrics are directly available as
+fixed counters and do not require generic counters. This allows
+to collect TopDown always in addition to other events.
+
+Using TopDown through RDPMC in applications on Intel Ice Lake
+=============================================================
+
+For more fine grained measurements it can be useful to
+access the new directly from user space. This is more complicated,
+but drastically lowers overhead.
+
+On Ice Lake, there is a new fixed counter 3: SLOTS, which reports
+"pipeline SLOTS" (cycles multiplied by core issue width) and a
+metric register that reports slots ratios for the different bottleneck
+categories.
+
+The metrics counter is CPU model specific and is not available on older
+CPUs.
+
+Example code
+============
+
+Library functions to do the functionality described below
+is also available in libjevents [4]
+
+The application opens a group with fixed counter 3 (SLOTS) and any
+metric event, and allow user programs to read the performance counters.
+
+Fixed counter 3 is mapped to a pseudo event event=0x00, umask=04,
+so the perf_event_attr structure should be initialized with
+{ .config = 0x0400, .type = PERF_TYPE_RAW }
+The metric events are mapped to the pseudo event event=0x00, umask=0x8X.
+For example, the perf_event_attr structure can be initialized with
+{ .config = 0x8000, .type = PERF_TYPE_RAW } for Retiring metric event
+The Fixed counter 3 must be the leader of the group.
+
+#include <linux/perf_event.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+
+/* Provide own perf_event_open stub because glibc doesn't */
+__attribute__((weak))
+int perf_event_open(struct perf_event_attr *attr, pid_t pid,
+ int cpu, int group_fd, unsigned long flags)
+{
+ return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags);
+}
+
+/* Open slots counter file descriptor for current task. */
+struct perf_event_attr slots = {
+ .type = PERF_TYPE_RAW,
+ .size = sizeof(struct perf_event_attr),
+ .config = 0x400,
+ .exclude_kernel = 1,
+};
+
+int slots_fd = perf_event_open(&slots, 0, -1, -1, 0);
+if (slots_fd < 0)
+ ... error ...
+
+/* Memory mapping the fd permits _rdpmc calls from userspace */
+void *slots_p = mmap(0, getpagesize(), PROT_READ, MAP_SHARED, slots_fd, 0);
+if (!slot_p)
+ .... error ...
+
+/*
+ * Open metrics event file descriptor for current task.
+ * Set slots event as the leader of the group.
+ */
+struct perf_event_attr metrics = {
+ .type = PERF_TYPE_RAW,
+ .size = sizeof(struct perf_event_attr),
+ .config = 0x8000,
+ .exclude_kernel = 1,
+};
+
+int metrics_fd = perf_event_open(&metrics, 0, -1, slots_fd, 0);
+if (metrics_fd < 0)
+ ... error ...
+
+/* Memory mapping the fd permits _rdpmc calls from userspace */
+void *metrics_p = mmap(0, getpagesize(), PROT_READ, MAP_SHARED, metrics_fd, 0);
+if (!metrics_p)
+ ... error ...
+
+Note: the file descriptors returned by the perf_event_open calls must be memory
+mapped to permit calls to the _rdpmd instruction. Permission may also be granted
+by writing the /sys/devices/cpu/rdpmc sysfs node.
+
+The RDPMC instruction (or _rdpmc compiler intrinsic) can now be used
+to read slots and the topdown metrics at different points of the program:
+
+#include <stdint.h>
+#include <x86intrin.h>
+
+#define RDPMC_FIXED (1 << 30) /* return fixed counters */
+#define RDPMC_METRIC (1 << 29) /* return metric counters */
+
+#define FIXED_COUNTER_SLOTS 3
+#define METRIC_COUNTER_TOPDOWN_L1_L2 0
+
+static inline uint64_t read_slots(void)
+{
+ return _rdpmc(RDPMC_FIXED | FIXED_COUNTER_SLOTS);
+}
+
+static inline uint64_t read_metrics(void)
+{
+ return _rdpmc(RDPMC_METRIC | METRIC_COUNTER_TOPDOWN_L1_L2);
+}
+
+Then the program can be instrumented to read these metrics at different
+points.
+
+It's not a good idea to do this with too short code regions,
+as the parallelism and overlap in the CPU program execution will
+cause too much measurement inaccuracy. For example instrumenting
+individual basic blocks is definitely too fine grained.
+
+_rdpmc calls should not be mixed with reading the metrics and slots counters
+through system calls, as the kernel will reset these counters after each system
+call.
+
+Decoding metrics values
+=======================
+
+The value reported by read_metrics() contains four 8 bit fields
+that represent a scaled ratio that represent the Level 1 bottleneck.
+All four fields add up to 0xff (= 100%)
+
+The binary ratios in the metric value can be converted to float ratios:
+
+#define GET_METRIC(m, i) (((m) >> (i*8)) & 0xff)
+
+/* L1 Topdown metric events */
+#define TOPDOWN_RETIRING(val) ((float)GET_METRIC(val, 0) / 0xff)
+#define TOPDOWN_BAD_SPEC(val) ((float)GET_METRIC(val, 1) / 0xff)
+#define TOPDOWN_FE_BOUND(val) ((float)GET_METRIC(val, 2) / 0xff)
+#define TOPDOWN_BE_BOUND(val) ((float)GET_METRIC(val, 3) / 0xff)
+
+/*
+ * L2 Topdown metric events.
+ * Available on Sapphire Rapids and later platforms.
+ */
+#define TOPDOWN_HEAVY_OPS(val) ((float)GET_METRIC(val, 4) / 0xff)
+#define TOPDOWN_BR_MISPREDICT(val) ((float)GET_METRIC(val, 5) / 0xff)
+#define TOPDOWN_FETCH_LAT(val) ((float)GET_METRIC(val, 6) / 0xff)
+#define TOPDOWN_MEM_BOUND(val) ((float)GET_METRIC(val, 7) / 0xff)
+
+and then converted to percent for printing.
+
+The ratios in the metric accumulate for the time when the counter
+is enabled. For measuring programs it is often useful to measure
+specific sections. For this it is needed to deltas on metrics.
+
+This can be done by scaling the metrics with the slots counter
+read at the same time.
+
+Then it's possible to take deltas of these slots counts
+measured at different points, and determine the metrics
+for that time period.
+
+ slots_a = read_slots();
+ metric_a = read_metrics();
+
+ ... larger code region ...
+
+ slots_b = read_slots()
+ metric_b = read_metrics()
+
+ # compute scaled metrics for measurement a
+ retiring_slots_a = GET_METRIC(metric_a, 0) * slots_a
+ bad_spec_slots_a = GET_METRIC(metric_a, 1) * slots_a
+ fe_bound_slots_a = GET_METRIC(metric_a, 2) * slots_a
+ be_bound_slots_a = GET_METRIC(metric_a, 3) * slots_a
+
+ # compute delta scaled metrics between b and a
+ retiring_slots = GET_METRIC(metric_b, 0) * slots_b - retiring_slots_a
+ bad_spec_slots = GET_METRIC(metric_b, 1) * slots_b - bad_spec_slots_a
+ fe_bound_slots = GET_METRIC(metric_b, 2) * slots_b - fe_bound_slots_a
+ be_bound_slots = GET_METRIC(metric_b, 3) * slots_b - be_bound_slots_a
+
+Later the individual ratios of L1 metric events for the measurement period can
+be recreated from these counts.
+
+ slots_delta = slots_b - slots_a
+ retiring_ratio = (float)retiring_slots / slots_delta
+ bad_spec_ratio = (float)bad_spec_slots / slots_delta
+ fe_bound_ratio = (float)fe_bound_slots / slots_delta
+ be_bound_ratio = (float)be_bound_slots / slota_delta
+
+ printf("Retiring %.2f%% Bad Speculation %.2f%% FE Bound %.2f%% BE Bound %.2f%%\n",
+ retiring_ratio * 100.,
+ bad_spec_ratio * 100.,
+ fe_bound_ratio * 100.,
+ be_bound_ratio * 100.);
+
+The individual ratios of L2 metric events for the measurement period can be
+recreated from L1 and L2 metric counters. (Available on Sapphire Rapids and
+later platforms)
+
+ # compute scaled metrics for measurement a
+ heavy_ops_slots_a = GET_METRIC(metric_a, 4) * slots_a
+ br_mispredict_slots_a = GET_METRIC(metric_a, 5) * slots_a
+ fetch_lat_slots_a = GET_METRIC(metric_a, 6) * slots_a
+ mem_bound_slots_a = GET_METRIC(metric_a, 7) * slots_a
+
+ # compute delta scaled metrics between b and a
+ heavy_ops_slots = GET_METRIC(metric_b, 4) * slots_b - heavy_ops_slots_a
+ br_mispredict_slots = GET_METRIC(metric_b, 5) * slots_b - br_mispredict_slots_a
+ fetch_lat_slots = GET_METRIC(metric_b, 6) * slots_b - fetch_lat_slots_a
+ mem_bound_slots = GET_METRIC(metric_b, 7) * slots_b - mem_bound_slots_a
+
+ slots_delta = slots_b - slots_a
+ heavy_ops_ratio = (float)heavy_ops_slots / slots_delta
+ light_ops_ratio = retiring_ratio - heavy_ops_ratio;
+
+ br_mispredict_ratio = (float)br_mispredict_slots / slots_delta
+ machine_clears_ratio = bad_spec_ratio - br_mispredict_ratio;
+
+ fetch_lat_ratio = (float)fetch_lat_slots / slots_delta
+ fetch_bw_ratio = fe_bound_ratio - fetch_lat_ratio;
+
+ mem_bound_ratio = (float)mem_bound_slots / slota_delta
+ core_bound_ratio = be_bound_ratio - mem_bound_ratio;
+
+ printf("Heavy Operations %.2f%% Light Operations %.2f%% "
+ "Branch Mispredict %.2f%% Machine Clears %.2f%% "
+ "Fetch Latency %.2f%% Fetch Bandwidth %.2f%% "
+ "Mem Bound %.2f%% Core Bound %.2f%%\n",
+ heavy_ops_ratio * 100.,
+ light_ops_ratio * 100.,
+ br_mispredict_ratio * 100.,
+ machine_clears_ratio * 100.,
+ fetch_lat_ratio * 100.,
+ fetch_bw_ratio * 100.,
+ mem_bound_ratio * 100.,
+ core_bound_ratio * 100.);
+
+Resetting metrics counters
+==========================
+
+Since the individual metrics are only 8bit they lose precision for
+short regions over time because the number of cycles covered by each
+fraction bit shrinks. So the counters need to be reset regularly.
+
+When using the kernel perf API the kernel resets on every read.
+So as long as the reading is at reasonable intervals (every few
+seconds) the precision is good.
+
+When using perf stat it is recommended to always use the -I option,
+with no longer interval than a few seconds
+
+ perf stat -I 1000 --topdown ...
+
+For user programs using RDPMC directly the counter can
+be reset explicitly using ioctl:
+
+ ioctl(perf_fd, PERF_EVENT_IOC_RESET, 0);
+
+This "opens" a new measurement period.
+
+A program using RDPMC for TopDown should schedule such a reset
+regularly, as in every few seconds.
+
+Limits on Intel Ice Lake
+========================
+
+Four pseudo TopDown metric events are exposed for the end-users,
+topdown-retiring, topdown-bad-spec, topdown-fe-bound and topdown-be-bound.
+They can be used to collect the TopDown value under the following
+rules:
+- All the TopDown metric events must be in a group with the SLOTS event.
+- The SLOTS event must be the leader of the group.
+- The PERF_FORMAT_GROUP flag must be applied for each TopDown metric
+ events
+
+The SLOTS event and the TopDown metric events can be counting members of
+a sampling read group. Since the SLOTS event must be the leader of a TopDown
+group, the second event of the group is the sampling event.
+For example, perf record -e '{slots, $sampling_event, topdown-retiring}:S'
+
+Extension on Intel Sapphire Rapids Server
+=========================================
+The metrics counter is extended to support TMA method level 2 metrics.
+The lower half of the register is the TMA level 1 metrics (legacy).
+The upper half is also divided into four 8-bit fields for the new level 2
+metrics. Four more TopDown metric events are exposed for the end-users,
+topdown-heavy-ops, topdown-br-mispredict, topdown-fetch-lat and
+topdown-mem-bound.
+
+Each of the new level 2 metrics in the upper half is a subset of the
+corresponding level 1 metric in the lower half. Software can deduce the
+other four level 2 metrics by subtracting corresponding metrics as below.
+
+ Light_Operations = Retiring - Heavy_Operations
+ Machine_Clears = Bad_Speculation - Branch_Mispredicts
+ Fetch_Bandwidth = Frontend_Bound - Fetch_Latency
+ Core_Bound = Backend_Bound - Memory_Bound
+
+
+[1] https://software.intel.com/en-us/top-down-microarchitecture-analysis-method-win
+[2] https://sites.google.com/site/analysismethods/yasin-pubs
+[3] https://perf.wiki.kernel.org/index.php/Top-Down_Analysis
+[4] https://github.com/andikleen/pmu-tools/tree/master/jevents
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 5d7b947320fb..1da7f4b91b4f 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -3,7 +3,6 @@ tools/arch
tools/scripts
tools/build
tools/include
-tools/lib/traceevent
tools/lib/api
tools/lib/bpf
tools/lib/subcmd
@@ -13,11 +12,14 @@ tools/lib/ctype.c
tools/lib/hweight.c
tools/lib/rbtree.c
tools/lib/string.c
-tools/lib/symbol/kallsyms.c
-tools/lib/symbol/kallsyms.h
+tools/lib/symbol
tools/lib/find_bit.c
tools/lib/bitmap.c
+tools/lib/list_sort.c
tools/lib/str_error_r.c
tools/lib/vsprintf.c
tools/lib/zalloc.c
-scripts/bpf_helpers_doc.py
+scripts/bpf_doc.py
+tools/bpf/bpftool
+kernel/bpf/disasm.c
+kernel/bpf/disasm.h
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index b8fc7d972be9..75f3f6e0a231 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -25,7 +25,7 @@ unexport MAKEFLAGS
# (To override it, run 'make JOBS=1' and similar.)
#
ifeq ($(JOBS),)
- JOBS := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+ JOBS := $(shell (getconf _NPROCESSORS_ONLN || grep -E -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
ifeq ($(JOBS),0)
JOBS := 1
endif
@@ -100,7 +100,10 @@ clean:
# make -C tools/perf -f tests/make
#
build-test:
- @$(MAKE) SHUF=1 -f tests/make REUSE_FEATURES_DUMP=1 MK=Makefile SET_PARALLEL=1 --no-print-directory tarpkg out
+ @$(MAKE) SHUF=1 -f tests/make REUSE_FEATURES_DUMP=1 MK=Makefile SET_PARALLEL=1 --no-print-directory tarpkg make_static make_with_gtk2 out
+
+build-test-tarball:
+ @$(MAKE) -f tests/make REUSE_FEATURES_DUMP=1 MK=Makefile SET_PARALLEL=1 --no-print-directory out
#
# All other targets get passed through:
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 12a8204d63c6..4884520f954f 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -16,19 +16,41 @@ $(shell printf "" > $(OUTPUT).config-detected)
detected = $(shell echo "$(1)=y" >> $(OUTPUT).config-detected)
detected_var = $(shell echo "$(1)=$($(1))" >> $(OUTPUT).config-detected)
-CFLAGS := $(EXTRA_CFLAGS) $(EXTRA_WARNINGS)
+CFLAGS := $(EXTRA_CFLAGS) $(filter-out -Wnested-externs,$(EXTRA_WARNINGS))
+HOSTCFLAGS := $(filter-out -Wnested-externs,$(EXTRA_WARNINGS))
+
+# Enabled Wthread-safety analysis for clang builds.
+ifeq ($(CC_NO_CLANG), 0)
+ CFLAGS += -Wthread-safety
+endif
include $(srctree)/tools/scripts/Makefile.arch
$(call detected_var,SRCARCH)
NO_PERF_REGS := 1
-NO_SYSCALL_TABLE := 1
+
+ifneq ($(NO_SYSCALL_TABLE),1)
+ NO_SYSCALL_TABLE := 1
+
+ ifeq ($(SRCARCH),x86)
+ ifeq (${IS_64_BIT}, 1)
+ NO_SYSCALL_TABLE := 0
+ endif
+ else
+ ifeq ($(SRCARCH),$(filter $(SRCARCH),powerpc arm64 s390 mips loongarch))
+ NO_SYSCALL_TABLE := 0
+ endif
+ endif
+
+ ifneq ($(NO_SYSCALL_TABLE),1)
+ CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT
+ endif
+endif
# Additional ARCH settings for ppc
ifeq ($(SRCARCH),powerpc)
NO_PERF_REGS := 0
- NO_SYSCALL_TABLE := 0
CFLAGS += -I$(OUTPUT)arch/powerpc/include/generated
LIBUNWIND_LIBS := -lunwind -lunwind-ppc64
endif
@@ -37,7 +59,6 @@ endif
ifeq ($(SRCARCH),x86)
$(call detected,CONFIG_X86)
ifeq (${IS_64_BIT}, 1)
- NO_SYSCALL_TABLE := 0
CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT -I$(OUTPUT)arch/x86/include/generated
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind-x86_64 -lunwind -llzma
@@ -55,11 +76,16 @@ endif
ifeq ($(SRCARCH),arm64)
NO_PERF_REGS := 0
- NO_SYSCALL_TABLE := 0
CFLAGS += -I$(OUTPUT)arch/arm64/include/generated
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
endif
+ifeq ($(SRCARCH),loongarch)
+ NO_PERF_REGS := 0
+ CFLAGS += -I$(OUTPUT)arch/loongarch/include/generated
+ LIBUNWIND_LIBS = -lunwind -lunwind-loongarch64
+endif
+
ifeq ($(SRCARCH),riscv)
NO_PERF_REGS := 0
endif
@@ -70,23 +96,24 @@ endif
ifeq ($(ARCH),s390)
NO_PERF_REGS := 0
- NO_SYSCALL_TABLE := 0
CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated
endif
-ifeq ($(NO_PERF_REGS),0)
- $(call detected,CONFIG_PERF_REGS)
+ifeq ($(ARCH),mips)
+ NO_PERF_REGS := 0
+ CFLAGS += -I$(OUTPUT)arch/mips/include/generated
+ LIBUNWIND_LIBS = -lunwind -lunwind-mips
endif
-ifneq ($(NO_SYSCALL_TABLE),1)
- CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT
+ifeq ($(NO_PERF_REGS),0)
+ $(call detected,CONFIG_PERF_REGS)
endif
# So far there's only x86 and arm libdw unwind support merged in perf.
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
# to the check.
-ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc s390 csky riscv))
+ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc s390 csky riscv loongarch))
NO_LIBDW_DWARF_UNWIND := 1
endif
@@ -108,7 +135,7 @@ endef
ifdef LIBUNWIND_DIR
LIBUNWIND_CFLAGS = -I$(LIBUNWIND_DIR)/include
LIBUNWIND_LDFLAGS = -L$(LIBUNWIND_DIR)/lib
- LIBUNWIND_ARCHS = x86 x86_64 arm aarch64 debug-frame-arm debug-frame-aarch64
+ LIBUNWIND_ARCHS = x86 x86_64 arm aarch64 debug-frame-arm debug-frame-aarch64 loongarch
$(foreach libunwind_arch,$(LIBUNWIND_ARCHS),$(call libunwind_arch_set_flags,$(libunwind_arch)))
endif
@@ -118,17 +145,20 @@ FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS) $(LIBUNWIND_LIBS)
-FEATURE_CHECK_LDFLAGS-libunwind-arm = -lunwind -lunwind-arm
-FEATURE_CHECK_LDFLAGS-libunwind-aarch64 = -lunwind -lunwind-aarch64
-FEATURE_CHECK_LDFLAGS-libunwind-x86 = -lunwind -llzma -lunwind-x86
-FEATURE_CHECK_LDFLAGS-libunwind-x86_64 = -lunwind -llzma -lunwind-x86_64
+FEATURE_CHECK_LDFLAGS-libunwind-arm += -lunwind -lunwind-arm
+FEATURE_CHECK_LDFLAGS-libunwind-aarch64 += -lunwind -lunwind-aarch64
+FEATURE_CHECK_LDFLAGS-libunwind-x86 += -lunwind -llzma -lunwind-x86
+FEATURE_CHECK_LDFLAGS-libunwind-x86_64 += -lunwind -llzma -lunwind-x86_64
FEATURE_CHECK_LDFLAGS-libcrypto = -lcrypto
ifdef CSINCLUDES
LIBOPENCSD_CFLAGS := -I$(CSINCLUDES)
endif
-OPENCSDLIBS := -lopencsd_c_api -lopencsd
+OPENCSDLIBS := -lopencsd_c_api
+ifeq ($(findstring -static,${LDFLAGS}),-static)
+ OPENCSDLIBS += -lopencsd -lstdc++
+endif
ifdef CSLIBS
LIBOPENCSD_LDFLAGS := -L$(CSLIBS)
endif
@@ -186,10 +216,17 @@ ifeq ($(call get-executable,$(BISON)),)
dummy := $(error Error: $(BISON) is missing on this system, please install it)
endif
+ifneq ($(OUTPUT),)
+ ifeq ($(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \>\= 371), 1)
+ BISON_FILE_PREFIX_MAP := --file-prefix-map=$(OUTPUT)=
+ endif
+endif
+
# Treat warnings as errors unless directed not to
ifneq ($(WERROR),0)
CORE_CFLAGS += -Werror
CXXFLAGS += -Werror
+ HOSTCFLAGS += -Werror
endif
ifndef DEBUG
@@ -197,6 +234,7 @@ ifndef DEBUG
endif
ifeq ($(DEBUG),0)
+CORE_CFLAGS += -DNDEBUG=1
ifeq ($(CC_NO_CLANG), 0)
CORE_CFLAGS += -O3
else
@@ -213,15 +251,33 @@ ifdef PARSER_DEBUG
endif
# Try different combinations to accommodate systems that only have
-# python[2][-config] in weird combinations but always preferring
-# python2 and python2-config as per pep-0394. If we catch a
-# python[-config] in version 3, the version check will kill it.
-PYTHON2 := $(if $(call get-executable,python2),python2,python)
-override PYTHON := $(call get-executable-or-default,PYTHON,$(PYTHON2))
-PYTHON2_CONFIG := \
- $(if $(call get-executable,$(PYTHON)-config),$(PYTHON)-config,python-config)
-override PYTHON_CONFIG := \
- $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON2_CONFIG))
+# python[2][3]-config in weird combinations in the following order of
+# priority from lowest to highest:
+# * python2-config as per pep-0394.
+# * python-config
+# * python3-config
+# * $(PYTHON)-config (If PYTHON is user supplied but PYTHON_CONFIG isn't)
+#
+PYTHON_AUTO := python-config
+PYTHON_AUTO := $(if $(call get-executable,python2-config),python2-config,$(PYTHON_AUTO))
+PYTHON_AUTO := $(if $(call get-executable,python-config),python-config,$(PYTHON_AUTO))
+PYTHON_AUTO := $(if $(call get-executable,python3-config),python3-config,$(PYTHON_AUTO))
+
+# If PYTHON is defined but PYTHON_CONFIG isn't, then take $(PYTHON)-config as if it was the user
+# supplied value for PYTHON_CONFIG. Because it's "user supplied", error out if it doesn't exist.
+ifdef PYTHON
+ ifndef PYTHON_CONFIG
+ PYTHON_CONFIG_AUTO := $(call get-executable,$(PYTHON)-config)
+ PYTHON_CONFIG := $(if $(PYTHON_CONFIG_AUTO),$(PYTHON_CONFIG_AUTO),\
+ $(call $(error $(PYTHON)-config not found)))
+ endif
+endif
+
+# Select either auto detected python and python-config or use user supplied values if they are
+# defined. get-executable-or-default fails with an error if the first argument is supplied but
+# doesn't exist.
+override PYTHON_CONFIG := $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON_AUTO))
+override PYTHON := $(call get-executable-or-default,PYTHON,$(subst -config,,$(PYTHON_CONFIG)))
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
@@ -243,31 +299,33 @@ ifdef PYTHON_CONFIG
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
+ ifeq ($(CC_NO_CLANG), 0)
+ PYTHON_EMBED_CCOPTS := $(filter-out -ffat-lto-objects, $(PYTHON_EMBED_CCOPTS))
+ endif
endif
FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
-FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
-FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
FEATURE_CHECK_LDFLAGS-libaio = -lrt
-FEATURE_CHECK_LDFLAGS-disassembler-four-args = -lbfd -lopcodes -ldl
-
CORE_CFLAGS += -fno-omit-frame-pointer
CORE_CFLAGS += -ggdb3
CORE_CFLAGS += -funwind-tables
CORE_CFLAGS += -Wall
CORE_CFLAGS += -Wextra
-CORE_CFLAGS += -std=gnu99
+CORE_CFLAGS += -std=gnu11
-CXXFLAGS += -std=gnu++11 -fno-exceptions -fno-rtti
+CXXFLAGS += -std=gnu++14 -fno-exceptions -fno-rtti
CXXFLAGS += -Wall
CXXFLAGS += -fno-omit-frame-pointer
CXXFLAGS += -ggdb3
CXXFLAGS += -funwind-tables
CXXFLAGS += -Wno-strict-aliasing
+HOSTCFLAGS += -Wall
+HOSTCFLAGS += -Wextra
+
# Enforce a non-executable stack, as we may regress (again) in the future by
# adding assembler files missing the .GNU-stack linker note.
LDFLAGS += -Wl,-z,noexecstack
@@ -280,6 +338,9 @@ ifneq ($(TCMALLOC),)
endif
ifeq ($(FEATURES_DUMP),)
+# We will display at the end of this Makefile.config, using $(call feature_display_entries),
+# as we may retry some feature detection here.
+ FEATURE_DISPLAY_DEFERRED := 1
include $(srctree)/tools/build/Makefile.feature
else
include $(FEATURES_DUMP)
@@ -291,11 +352,10 @@ endif
ifeq ($(DEBUG),0)
ifeq ($(feature-fortify-source), 1)
- CORE_CFLAGS += -D_FORTIFY_SOURCE=2
+ CORE_CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2
endif
endif
-INC_FLAGS += -I$(srctree)/tools/lib/perf/include
INC_FLAGS += -I$(src-perf)/util/include
INC_FLAGS += -I$(src-perf)/arch/$(SRCARCH)/include
INC_FLAGS += -I$(srctree)/tools/include/
@@ -313,7 +373,6 @@ endif
INC_FLAGS += -I$(src-perf)/util
INC_FLAGS += -I$(src-perf)
-INC_FLAGS += -I$(srctree)/tools/lib/
CORE_CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
@@ -322,10 +381,6 @@ CXXFLAGS += $(INC_FLAGS)
LIBPERF_CFLAGS := $(CORE_CFLAGS) $(EXTRA_CFLAGS)
-ifeq ($(feature-sync-compare-and-swap), 1)
- CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
-endif
-
ifeq ($(feature-pthread-attr-setaffinity-np), 1)
CFLAGS += -DHAVE_PTHREAD_ATTR_SETAFFINITY_NP
endif
@@ -346,7 +401,7 @@ ifndef NO_BIONIC
endif
ifeq ($(feature-eventfd), 1)
- CFLAGS += -DHAVE_EVENTFD
+ CFLAGS += -DHAVE_EVENTFD_SUPPORT
endif
ifeq ($(feature-get_current_dir_name), 1)
@@ -363,7 +418,6 @@ endif
ifdef NO_LIBELF
NO_DWARF := 1
- NO_DEMANGLE := 1
NO_LIBUNWIND := 1
NO_LIBDW_DWARF_UNWIND := 1
NO_LIBBPF := 1
@@ -377,16 +431,20 @@ else
LIBC_SUPPORT := 1
endif
ifeq ($(LIBC_SUPPORT),1)
- msg := $(warning No libelf found. Disables 'probe' tool, jvmti and BPF support in 'perf record'. Please install libelf-dev, libelf-devel or elfutils-libelf-devel);
-
- NO_LIBELF := 1
- NO_DWARF := 1
- NO_DEMANGLE := 1
- NO_LIBUNWIND := 1
- NO_LIBDW_DWARF_UNWIND := 1
- NO_LIBBPF := 1
- NO_JVMTI := 1
+ msg := $(error ERROR: No libelf found. Disables 'probe' tool, jvmti and BPF support. Please install libelf-dev, libelf-devel, elfutils-libelf-devel or build with NO_LIBELF=1.)
else
+ ifneq ($(filter s% -fsanitize=address%,$(EXTRA_CFLAGS),),)
+ ifneq ($(shell ldconfig -p | grep libasan >/dev/null 2>&1; echo $$?), 0)
+ msg := $(error No libasan found, please install libasan);
+ endif
+ endif
+
+ ifneq ($(filter s% -fsanitize=undefined%,$(EXTRA_CFLAGS),),)
+ ifneq ($(shell ldconfig -p | grep libubsan >/dev/null 2>&1; echo $$?), 0)
+ msg := $(error No libubsan found, please install libubsan);
+ endif
+ endif
+
ifneq ($(filter s% -static%,$(LDFLAGS),),)
msg := $(error No static glibc found, please install glibc-static);
else
@@ -415,10 +473,6 @@ else
endif # libelf support
endif # NO_LIBELF
-ifeq ($(feature-glibc), 1)
- CFLAGS += -DHAVE_GLIBC_SUPPORT
-endif
-
ifeq ($(feature-libaio), 1)
ifndef NO_AIO
CFLAGS += -DHAVE_AIO_SUPPORT
@@ -429,6 +483,10 @@ ifdef NO_DWARF
NO_LIBDW_DWARF_UNWIND := 1
endif
+ifeq ($(feature-scandirat), 1)
+ CFLAGS += -DHAVE_SCANDIRAT_SUPPORT
+endif
+
ifeq ($(feature-sched_getcpu), 1)
CFLAGS += -DHAVE_SCHED_GETCPU_SUPPORT
endif
@@ -454,6 +512,8 @@ ifdef CORESIGHT
CFLAGS += -DCS_RAW_PACKED
endif
endif
+ else
+ dummy := $(error Error: No libopencsd library found or the version is not up-to-date. Please install recent libopencsd to build with CORESIGHT=1)
endif
endif
@@ -462,10 +522,6 @@ ifndef NO_LIBELF
EXTLIBS += -lelf
$(call detected,CONFIG_LIBELF)
- ifeq ($(feature-libelf-mmap), 1)
- CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
- endif
-
ifeq ($(feature-libelf-getphdrnum), 1)
CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
endif
@@ -480,6 +536,14 @@ ifndef NO_LIBELF
CFLAGS += -DHAVE_ELF_GETSHDRSTRNDX_SUPPORT
endif
+ ifndef NO_LIBDEBUGINFOD
+ $(call feature_check,libdebuginfod)
+ ifeq ($(feature-libdebuginfod), 1)
+ CFLAGS += -DHAVE_DEBUGINFOD_SUPPORT
+ EXTLIBS += -ldebuginfod
+ endif
+ endif
+
ifndef NO_DWARF
ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined)
msg := $(warning DWARF register mappings have not been defined for architecture $(SRCARCH), DWARF support disabled);
@@ -499,12 +563,17 @@ ifndef NO_LIBELF
# detecting libbpf without LIBBPF_DYNAMIC, so make VF=1 shows libbpf detection status
$(call feature_check,libbpf)
+
ifdef LIBBPF_DYNAMIC
ifeq ($(feature-libbpf), 1)
EXTLIBS += -lbpf
+ $(call detected,CONFIG_LIBBPF_DYNAMIC)
else
- dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
+ dummy := $(error Error: No libbpf devel library found or older than v1.0, please install/update libbpf-devel);
endif
+ else
+ # Libbpf will be built as a static library from tools/lib/bpf.
+ LIBBPF_STATIC := 1
endif
endif
@@ -593,6 +662,18 @@ ifndef NO_LIBBPF
endif
endif
+ifdef BUILD_BPF_SKEL
+ $(call feature_check,clang-bpf-co-re)
+ ifeq ($(feature-clang-bpf-co-re), 0)
+ dummy := $(error Error: clang too old/not installed. Please install recent clang to build with BUILD_BPF_SKEL)
+ endif
+ ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),)
+ dummy := $(error Error: BPF skeleton support requires libbpf)
+ endif
+ $(call detected,CONFIG_PERF_BPF_SKEL)
+ CFLAGS += -DHAVE_BPF_SKEL
+endif
+
dwarf-post-unwind := 1
dwarf-post-unwind-text := BUG
@@ -647,17 +728,20 @@ ifndef NO_LIBUNWIND
EXTLIBS += $(EXTLIBS_LIBUNWIND)
endif
-ifeq ($(NO_SYSCALL_TABLE),0)
- $(call detected,CONFIG_TRACE)
-else
- ifndef NO_LIBAUDIT
- ifneq ($(feature-libaudit), 1)
- msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
- NO_LIBAUDIT := 1
- else
- CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
- EXTLIBS += -laudit
- $(call detected,CONFIG_TRACE)
+ifneq ($(NO_LIBTRACEEVENT),1)
+ ifeq ($(NO_SYSCALL_TABLE),0)
+ $(call detected,CONFIG_TRACE)
+ else
+ ifndef NO_LIBAUDIT
+ $(call feature_check,libaudit)
+ ifneq ($(feature-libaudit), 1)
+ msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev);
+ NO_LIBAUDIT := 1
+ else
+ CFLAGS += -DHAVE_LIBAUDIT_SUPPORT
+ EXTLIBS += -laudit
+ $(call detected,CONFIG_TRACE)
+ endif
endif
endif
endif
@@ -673,10 +757,6 @@ ifndef NO_LIBCRYPTO
endif
endif
-ifdef NO_NEWT
- NO_SLANG=1
-endif
-
ifndef NO_SLANG
ifneq ($(feature-libslang), 1)
ifneq ($(feature-libslang-include-subdir), 1)
@@ -694,12 +774,14 @@ ifndef NO_SLANG
endif
endif
-ifndef NO_GTK2
+ifdef GTK2
FLAGS_GTK2=$(CFLAGS) $(LDFLAGS) $(EXTLIBS) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
+ $(call feature_check,gtk2)
ifneq ($(feature-gtk2), 1)
msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev);
NO_GTK2 := 1
else
+ $(call feature_check,gtk2-infobar)
ifeq ($(feature-gtk2-infobar), 1)
GTK_CFLAGS := -DHAVE_GTK_INFO_BAR_SUPPORT
endif
@@ -718,6 +800,7 @@ else
PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS))
PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null)
PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS))
+ PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS))
PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS))
FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS)
@@ -729,6 +812,9 @@ else
LDFLAGS += $(PERL_EMBED_LDFLAGS)
EXTLIBS += $(PERL_EMBED_LIBADD)
CFLAGS += -DHAVE_LIBPERL_SUPPORT
+ ifeq ($(CC_NO_CLANG), 0)
+ CFLAGS += -Wno-compound-token-split-by-macro
+ endif
$(call detected,CONFIG_LIBPERL)
endif
endif
@@ -746,6 +832,7 @@ define disable-python_code
NO_LIBPYTHON := 1
endef
+PYTHON_EXTENSION_SUFFIX := '.so'
ifdef NO_LIBPYTHON
$(call disable-python,Python support disabled by user)
else
@@ -760,11 +847,17 @@ else
else
ifneq ($(feature-libpython), 1)
- $(call disable-python,No 'Python.h' (for Python 2.x support) was found: disables Python support - please install python-devel/python-dev)
+ $(call disable-python,No 'Python.h' was found: disables Python support - please install python-devel/python-dev)
else
LDFLAGS += $(PYTHON_EMBED_LDFLAGS)
EXTLIBS += $(PYTHON_EMBED_LIBADD)
- LANG_BINDINGS += $(obj-perf)python/perf.so
+ PYTHON_SETUPTOOLS_INSTALLED := $(shell $(PYTHON) -c 'import setuptools;' 2> /dev/null && echo "yes" || echo "no")
+ ifeq ($(PYTHON_SETUPTOOLS_INSTALLED), yes)
+ PYTHON_EXTENSION_SUFFIX := $(shell $(PYTHON) -c 'from importlib import machinery; print(machinery.EXTENSION_SUFFIXES[0])')
+ LANG_BINDINGS += $(obj-perf)python/perf$(PYTHON_EXTENSION_SUFFIX)
+ else
+ msg := $(warning Missing python setuptools, the python binding won't be built, please install python3-setuptools or equivalent);
+ endif
CFLAGS += -DHAVE_LIBPYTHON_SUPPORT
$(call detected,CONFIG_LIBPYTHON)
endif
@@ -772,56 +865,75 @@ else
endif
endif
-ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd -lopcodes
-else
- # we are on a system that requires -liberty and (maybe) -lz
- # to link against -lbfd; test each case individually here
-
- # call all detections now so we get correct
- # status in VF output
- $(call feature_check,libbfd-liberty)
- $(call feature_check,libbfd-liberty-z)
-
- ifeq ($(feature-libbfd-liberty), 1)
- EXTLIBS += -lbfd -lopcodes -liberty
- FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -ldl
+ifneq ($(NO_JEVENTS),1)
+ ifeq ($(wildcard pmu-events/arch/$(SRCARCH)/mapfile.csv),)
+ NO_JEVENTS := 1
+ endif
+endif
+ifneq ($(NO_JEVENTS),1)
+ NO_JEVENTS := 0
+ ifndef PYTHON
+ $(error ERROR: No python interpreter needed for jevents generation. Install python or build with NO_JEVENTS=1.)
else
- ifeq ($(feature-libbfd-liberty-z), 1)
- EXTLIBS += -lbfd -lopcodes -liberty -lz
- FEATURE_CHECK_LDFLAGS-disassembler-four-args += -liberty -lz -ldl
+ # jevents.py uses f-strings present in Python 3.6 released in Dec. 2016.
+ JEVENTS_PYTHON_GOOD := $(shell $(PYTHON) -c 'import sys;print("1" if(sys.version_info.major >= 3 and sys.version_info.minor >= 6) else "0")' 2> /dev/null)
+ ifneq ($(JEVENTS_PYTHON_GOOD), 1)
+ $(error ERROR: Python interpreter needed for jevents generation too old (older than 3.6). Install a newer python or build with NO_JEVENTS=1.)
endif
endif
- $(call feature_check,disassembler-four-args)
endif
-ifdef NO_DEMANGLE
- CFLAGS += -DNO_DEMANGLE
-else
- ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
- EXTLIBS += -liberty
+ifdef BUILD_NONDISTRO
+ ifeq ($(feature-libbfd), 1)
+ EXTLIBS += -lbfd -lopcodes
else
- ifeq ($(filter -liberty,$(EXTLIBS)),)
- $(call feature_check,cplus-demangle)
+ # we are on a system that requires -liberty and (maybe) -lz
+ # to link against -lbfd; test each case individually here
- # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
- # or any of 'bfd iberty z' trinity
- ifeq ($(feature-cplus-demangle), 1)
- EXTLIBS += -liberty
- else
- msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
- CFLAGS += -DNO_DEMANGLE
+ # call all detections now so we get correct
+ # status in VF output
+ $(call feature_check,libbfd-liberty)
+ $(call feature_check,libbfd-liberty-z)
+
+ ifeq ($(feature-libbfd-liberty), 1)
+ EXTLIBS += -lbfd -lopcodes -liberty
+ else
+ ifeq ($(feature-libbfd-liberty-z), 1)
+ EXTLIBS += -lbfd -lopcodes -liberty -lz
endif
endif
+ $(call feature_check,disassembler-four-args)
+ $(call feature_check,disassembler-init-styled)
endif
- ifneq ($(filter -liberty,$(EXTLIBS)),)
- CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
+ CFLAGS += -DHAVE_LIBBFD_SUPPORT
+ CXXFLAGS += -DHAVE_LIBBFD_SUPPORT
+ ifeq ($(feature-libbfd-buildid), 1)
+ CFLAGS += -DHAVE_LIBBFD_BUILDID_SUPPORT
+ else
+ msg := $(warning Old version of libbfd/binutils things like PE executable profiling will not be available);
endif
endif
-ifneq ($(filter -lbfd,$(EXTLIBS)),)
- CFLAGS += -DHAVE_LIBBFD_SUPPORT
+ifndef NO_DEMANGLE
+ $(call feature_check,cxa-demangle)
+ ifeq ($(feature-cxa-demangle), 1)
+ EXTLIBS += -lstdc++
+ CFLAGS += -DHAVE_CXA_DEMANGLE_SUPPORT
+ CXXFLAGS += -DHAVE_CXA_DEMANGLE_SUPPORT
+ endif
+ ifdef BUILD_NONDISTRO
+ ifeq ($(filter -liberty,$(EXTLIBS)),)
+ $(call feature_check,cplus-demangle)
+ ifeq ($(feature-cplus-demangle), 1)
+ EXTLIBS += -liberty
+ endif
+ endif
+ ifneq ($(filter -liberty,$(EXTLIBS)),)
+ CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
+ CXXFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
+ endif
+ endif
endif
ifndef NO_ZLIB
@@ -899,6 +1011,10 @@ ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif
+ifeq ($(feature-disassembler-init-styled), 1)
+ CFLAGS += -DDISASM_INIT_STYLED
+endif
+
ifeq (${IS_64_BIT}, 1)
ifndef NO_PERF_READ_VDSO32
$(call feature_check,compile-32)
@@ -946,6 +1062,9 @@ ifndef NO_AUXTRACE
ifndef NO_AUXTRACE
$(call detected,CONFIG_AUXTRACE)
CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+ ifeq ($(feature-reallocarray), 0)
+ CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+ endif
endif
endif
@@ -1012,9 +1131,50 @@ ifdef LIBCLANGLLVM
endif
endif
+ifndef NO_LIBPFM4
+ $(call feature_check,libpfm4)
+ ifeq ($(feature-libpfm4), 1)
+ CFLAGS += -DHAVE_LIBPFM
+ EXTLIBS += -lpfm
+ ASCIIDOC_EXTRA = -aHAVE_LIBPFM=1
+ $(call detected,CONFIG_LIBPFM4)
+ else
+ msg := $(warning libpfm4 not found, disables libpfm4 support. Please install libpfm4-dev);
+ endif
+endif
+
+# libtraceevent is a recommended dependency picked up from the system.
+ifneq ($(NO_LIBTRACEEVENT),1)
+ $(call feature_check,libtraceevent)
+ ifeq ($(feature-libtraceevent), 1)
+ CFLAGS += -DHAVE_LIBTRACEEVENT
+ EXTLIBS += -ltraceevent
+ LIBTRACEEVENT_VERSION := $(shell $(PKG_CONFIG) --modversion libtraceevent)
+ LIBTRACEEVENT_VERSION_1 := $(word 1, $(subst ., ,$(LIBTRACEEVENT_VERSION)))
+ LIBTRACEEVENT_VERSION_2 := $(word 2, $(subst ., ,$(LIBTRACEEVENT_VERSION)))
+ LIBTRACEEVENT_VERSION_3 := $(word 3, $(subst ., ,$(LIBTRACEEVENT_VERSION)))
+ LIBTRACEEVENT_VERSION_CPP := $(shell expr $(LIBTRACEEVENT_VERSION_1) \* 255 \* 255 + $(LIBTRACEEVENT_VERSION_2) \* 255 + $(LIBTRACEEVENT_VERSION_3))
+ CFLAGS += -DLIBTRACEEVENT_VERSION=$(LIBTRACEEVENT_VERSION_CPP)
+ $(call detected,CONFIG_LIBTRACEEVENT)
+ else
+ dummy := $(error ERROR: libtraceevent is missing. Please install libtraceevent-dev/libtraceevent-devel or build with NO_LIBTRACEEVENT=1)
+ endif
+
+ $(call feature_check,libtracefs)
+ ifeq ($(feature-libtracefs), 1)
+ EXTLIBS += -ltracefs
+ LIBTRACEFS_VERSION := $(shell $(PKG_CONFIG) --modversion libtracefs)
+ LIBTRACEFS_VERSION_1 := $(word 1, $(subst ., ,$(LIBTRACEFS_VERSION)))
+ LIBTRACEFS_VERSION_2 := $(word 2, $(subst ., ,$(LIBTRACEFS_VERSION)))
+ LIBTRACEFS_VERSION_3 := $(word 3, $(subst ., ,$(LIBTRACEFS_VERSION)))
+ LIBTRACEFS_VERSION_CPP := $(shell expr $(LIBTRACEFS_VERSION_1) \* 255 \* 255 + $(LIBTRACEFS_VERSION_2) \* 255 + $(LIBTRACEFS_VERSION_3))
+ CFLAGS += -DLIBTRACEFS_VERSION=$(LIBTRACEFS_VERSION_CPP)
+ endif
+endif
+
# Among the variables below, these:
# perfexecdir
-# perf_include_dir
+# libbpf_include_dir
# perf_examples_dir
# template_dir
# mandir
@@ -1032,10 +1192,13 @@ prefix ?= $(HOME)
endif
bindir_relative = bin
bindir = $(abspath $(prefix)/$(bindir_relative))
+includedir_relative = include
+includedir = $(abspath $(prefix)/$(includedir_relative))
mandir = share/man
infodir = share/info
perfexecdir = libexec/perf-core
-perf_include_dir = lib/perf/include
+# FIXME: system's libbpf header directory, where we expect to find bpf/bpf_helpers.h, for instance
+libbpf_include_dir = /usr/include
perf_examples_dir = lib/perf/examples
sharedir = $(prefix)/share
template_dir = share/perf-core/templates
@@ -1064,10 +1227,11 @@ ETC_PERFCONFIG_SQ = $(subst ','\'',$(ETC_PERFCONFIG))
STRACE_GROUPS_DIR_SQ = $(subst ','\'',$(STRACE_GROUPS_DIR))
DESTDIR_SQ = $(subst ','\'',$(DESTDIR))
bindir_SQ = $(subst ','\'',$(bindir))
+includedir_SQ = $(subst ','\'',$(includedir))
mandir_SQ = $(subst ','\'',$(mandir))
infodir_SQ = $(subst ','\'',$(infodir))
perfexecdir_SQ = $(subst ','\'',$(perfexecdir))
-perf_include_dir_SQ = $(subst ','\'',$(perf_include_dir))
+libbpf_include_dir_SQ = $(subst ','\'',$(libbpf_include_dir))
perf_examples_dir_SQ = $(subst ','\'',$(perf_examples_dir))
template_dir_SQ = $(subst ','\'',$(template_dir))
htmldir_SQ = $(subst ','\'',$(htmldir))
@@ -1079,13 +1243,13 @@ srcdir_SQ = $(subst ','\'',$(srcdir))
ifneq ($(filter /%,$(firstword $(perfexecdir))),)
perfexec_instdir = $(perfexecdir)
-perf_include_instdir = $(perf_include_dir)
+perf_include_instdir = $(libbpf_include_dir)
perf_examples_instdir = $(perf_examples_dir)
STRACE_GROUPS_INSTDIR = $(STRACE_GROUPS_DIR)
tip_instdir = $(tipdir)
else
perfexec_instdir = $(prefix)/$(perfexecdir)
-perf_include_instdir = $(prefix)/$(perf_include_dir)
+perf_include_instdir = $(prefix)/$(libbpf_include_dir)
perf_examples_instdir = $(prefix)/$(perf_examples_dir)
STRACE_GROUPS_INSTDIR = $(prefix)/$(STRACE_GROUPS_DIR)
tip_instdir = $(prefix)/$(tipdir)
@@ -1096,24 +1260,22 @@ perf_examples_instdir_SQ = $(subst ','\'',$(perf_examples_instdir))
STRACE_GROUPS_INSTDIR_SQ = $(subst ','\'',$(STRACE_GROUPS_INSTDIR))
tip_instdir_SQ = $(subst ','\'',$(tip_instdir))
-# If we install to $(HOME) we keep the traceevent default:
-# $(HOME)/.traceevent/plugins
-# Otherwise we install plugins into the global $(libdir).
-ifdef DESTDIR
-plugindir=$(libdir)/traceevent/plugins
-plugindir_SQ= $(subst ','\'',$(plugindir))
-endif
+export perfexec_instdir_SQ
print_var = $(eval $(print_var_code)) $(info $(MSG))
define print_var_code
- MSG = $(shell printf '...%30s: %s' $(1) $($(1)))
+ MSG = $(shell printf '...%40s: %s' $(1) $($(1)))
endef
+ifeq ($(feature_display),1)
+ $(call feature_display_entries)
+endif
+
ifeq ($(VF),1)
# Display EXTRA features which are detected manualy
# from here with feature_check call and thus cannot
# be partof global state output.
- $(foreach feat,$(FEATURE_TESTS_EXTRA),$(call feature_print_status,$(feat),))
+ $(foreach feat,$(FEATURE_TESTS_EXTRA),$(call feature_print_status,$(feat),) $(info $(MSG)))
$(call print_var,prefix)
$(call print_var,bindir)
$(call print_var,libdir)
@@ -1123,11 +1285,12 @@ ifeq ($(VF),1)
$(call print_var,JDIR)
ifeq ($(dwarf-post-unwind),1)
- $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text))
+ $(call feature_print_text,"DWARF post unwind library", $(dwarf-post-unwind-text)) $(info $(MSG))
endif
- $(info )
endif
+$(info )
+
$(call detected_var,bindir_SQ)
$(call detected_var,PYTHON_WORD)
ifneq ($(OUTPUT),)
@@ -1140,7 +1303,7 @@ $(call detected_var,ETC_PERFCONFIG_SQ)
$(call detected_var,STRACE_GROUPS_DIR_SQ)
$(call detected_var,prefix_SQ)
$(call detected_var,perfexecdir_SQ)
-$(call detected_var,perf_include_dir_SQ)
+$(call detected_var,libbpf_include_dir_SQ)
$(call detected_var,perf_examples_dir_SQ)
$(call detected_var,tipdir_SQ)
$(call detected_var,srcdir_SQ)
@@ -1148,3 +1311,12 @@ $(call detected_var,LIBDIR)
$(call detected_var,GTK_CFLAGS)
$(call detected_var,PERL_EMBED_CCOPTS)
$(call detected_var,PYTHON_EMBED_CCOPTS)
+ifneq ($(BISON_FILE_PREFIX_MAP),)
+$(call detected_var,BISON_FILE_PREFIX_MAP)
+endif
+
+# re-generate FEATURE-DUMP as we may have called feature_check, found out
+# extra libraries to add to LDFLAGS of some other test and then redo those
+# tests.
+$(shell rm -f $(FEATURE_DUMP_FILENAME))
+$(foreach feat,$(FEATURE_TESTS),$(shell echo "$(call feature_assign,$(feat))" >> $(FEATURE_DUMP_FILENAME)))
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index d15a311408f1..a42a6a99c2bc 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -44,11 +44,9 @@ include ../scripts/utilities.mak
#
# Define WERROR=0 to disable treating any warnings as errors.
#
-# Define NO_NEWT if you do not want TUI support. (deprecated)
-#
# Define NO_SLANG if you do not want TUI support.
#
-# Define NO_GTK2 if you do not want GTK+ GUI support.
+# Define GTK2 if you want GTK+ GUI support.
#
# Define NO_DEMANGLE if you do not want C++ symbol demangling.
#
@@ -118,6 +116,18 @@ include ../scripts/utilities.mak
#
# Define LIBBPF_DYNAMIC to enable libbpf dynamic linking.
#
+# Define NO_SYSCALL_TABLE=1 to disable the use of syscall id to/from name tables
+# generated from the kernel .tbl or unistd.h files and use, if available, libaudit
+# for doing the conversions to/from strings/id.
+#
+# Define NO_LIBPFM4 to disable libpfm4 events extension.
+#
+# Define NO_LIBDEBUGINFOD if you do not want support debuginfod
+#
+# Define BUILD_BPF_SKEL to enable BPF skeletons
+#
+# Define BUILD_NONDISTRO to enable building an linking against libbfd and
+# libiberty distribution license incompatible libraries.
# As per kernel Makefile, avoid funny character set dependencies
unexport LC_ALL
@@ -170,9 +180,10 @@ LD += $(EXTRA_LDFLAGS)
HOSTCC ?= gcc
HOSTLD ?= ld
HOSTAR ?= ar
+CLANG ?= clang
+LLVM_STRIP ?= llvm-strip
PKG_CONFIG = $(CROSS_COMPILE)pkg-config
-LLVM_CONFIG ?= llvm-config
RM = rm -f
LN = ln -f
@@ -188,7 +199,7 @@ AWK = awk
# non-config cases
config := 1
-NON_CONFIG_TARGETS := clean python-clean TAGS tags cscope help install-doc install-man install-html install-info install-pdf doc man html info pdf
+NON_CONFIG_TARGETS := clean python-clean TAGS tags cscope help
ifdef MAKECMDGOALS
ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
@@ -211,7 +222,7 @@ else
endif
export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK
-export HOSTCC HOSTLD HOSTAR
+export HOSTCC HOSTLD HOSTAR HOSTCFLAGS
include $(srctree)/tools/build/Makefile.include
@@ -226,10 +237,10 @@ sub-make: fixdep
else # force_fixdep
-LIB_DIR = $(srctree)/tools/lib/api/
-TRACE_EVENT_DIR = $(srctree)/tools/lib/traceevent/
-BPF_DIR = $(srctree)/tools/lib/bpf/
-SUBCMD_DIR = $(srctree)/tools/lib/subcmd/
+LIBAPI_DIR = $(srctree)/tools/lib/api/
+LIBBPF_DIR = $(srctree)/tools/lib/bpf/
+LIBSUBCMD_DIR = $(srctree)/tools/lib/subcmd/
+LIBSYMBOL_DIR = $(srctree)/tools/lib/symbol/
LIBPERF_DIR = $(srctree)/tools/lib/perf/
DOC_DIR = $(srctree)/tools/perf/Documentation/
@@ -271,50 +282,64 @@ PYRF_OBJS =
SCRIPT_SH =
SCRIPT_SH += perf-archive.sh
-SCRIPT_SH += perf-with-kcore.sh
+SCRIPT_SH += perf-iostat.sh
grep-libs = $(filter -l%,$(1))
strip-libs = $(filter-out -l%,$(1))
ifneq ($(OUTPUT),)
- TE_PATH=$(OUTPUT)
- BPF_PATH=$(OUTPUT)
- SUBCMD_PATH=$(OUTPUT)
- LIBPERF_PATH=$(OUTPUT)
-ifneq ($(subdir),)
- API_PATH=$(OUTPUT)/../lib/api/
+ LIBAPI_OUTPUT = $(abspath $(OUTPUT))/libapi
else
- API_PATH=$(OUTPUT)
+ LIBAPI_OUTPUT = $(CURDIR)/libapi
endif
+LIBAPI_DESTDIR = $(LIBAPI_OUTPUT)
+LIBAPI_INCLUDE = $(LIBAPI_DESTDIR)/include
+LIBAPI = $(LIBAPI_OUTPUT)/libapi.a
+export LIBAPI
+CFLAGS += -I$(LIBAPI_OUTPUT)/include
+
+ifneq ($(OUTPUT),)
+ LIBBPF_OUTPUT = $(abspath $(OUTPUT))/libbpf
else
- TE_PATH=$(TRACE_EVENT_DIR)
- API_PATH=$(LIB_DIR)
- BPF_PATH=$(BPF_DIR)
- SUBCMD_PATH=$(SUBCMD_DIR)
- LIBPERF_PATH=$(LIBPERF_DIR)
+ LIBBPF_OUTPUT = $(CURDIR)/libbpf
+endif
+ifdef LIBBPF_STATIC
+ LIBBPF_DESTDIR = $(LIBBPF_OUTPUT)
+ LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
+ LIBBPF = $(LIBBPF_OUTPUT)/libbpf.a
+ CFLAGS += -I$(LIBBPF_OUTPUT)/include
endif
-LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
-export LIBTRACEEVENT
-
-LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)plugins/libtraceevent-dynamic-list
-
-#
-# The static build has no dynsym table, so this does not work for
-# static build. Looks like linker starts to scream about that now
-# (in Fedora 26) so we need to switch it off for static build.
-DYNAMIC_LIST_LDFLAGS = -Xlinker --dynamic-list=$(LIBTRACEEVENT_DYNAMIC_LIST)
-LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS = $(if $(findstring -static,$(LDFLAGS)),,$(DYNAMIC_LIST_LDFLAGS))
-
-LIBAPI = $(API_PATH)libapi.a
-export LIBAPI
-
-LIBBPF = $(BPF_PATH)libbpf.a
+ifneq ($(OUTPUT),)
+ LIBSUBCMD_OUTPUT = $(abspath $(OUTPUT))/libsubcmd
+else
+ LIBSUBCMD_OUTPUT = $(CURDIR)/libsubcmd
+endif
+LIBSUBCMD_DESTDIR = $(LIBSUBCMD_OUTPUT)
+LIBSUBCMD_INCLUDE = $(LIBSUBCMD_DESTDIR)/include
+LIBSUBCMD = $(LIBSUBCMD_OUTPUT)/libsubcmd.a
+CFLAGS += -I$(LIBSUBCMD_OUTPUT)/include
-LIBSUBCMD = $(SUBCMD_PATH)libsubcmd.a
+ifneq ($(OUTPUT),)
+ LIBSYMBOL_OUTPUT = $(abspath $(OUTPUT))/libsymbol
+else
+ LIBSYMBOL_OUTPUT = $(CURDIR)/libsymbol
+endif
+LIBSYMBOL_DESTDIR = $(LIBSYMBOL_OUTPUT)
+LIBSYMBOL_INCLUDE = $(LIBSYMBOL_DESTDIR)/include
+LIBSYMBOL = $(LIBSYMBOL_OUTPUT)/libsymbol.a
+CFLAGS += -I$(LIBSYMBOL_OUTPUT)/include
-LIBPERF = $(LIBPERF_PATH)libperf.a
+ifneq ($(OUTPUT),)
+ LIBPERF_OUTPUT = $(abspath $(OUTPUT))/libperf
+else
+ LIBPERF_OUTPUT = $(CURDIR)/libperf
+endif
+LIBPERF_DESTDIR = $(LIBPERF_OUTPUT)
+LIBPERF_INCLUDE = $(LIBPERF_DESTDIR)/include
+LIBPERF = $(LIBPERF_OUTPUT)/libperf.a
export LIBPERF
+CFLAGS += -I$(LIBPERF_OUTPUT)/include
# python extension build directories
PYTHON_EXTBUILD := $(OUTPUT)python_ext_build/
@@ -324,8 +349,13 @@ export PYTHON_EXTBUILD_LIB PYTHON_EXTBUILD_TMP
python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT)python/perf*.so
-PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
-PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPI)
+ifeq ($(CONFIG_LIBTRACEEVENT),y)
+ PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
+else
+ PYTHON_EXT_SRCS := $(shell grep -v '^\#\|util/trace-event.c' util/python-ext-sources)
+endif
+
+PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBAPI)
SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH))
@@ -345,8 +375,11 @@ ifndef NO_JVMTI
PROGRAMS += $(OUTPUT)$(LIBJVMTI)
endif
+DLFILTERS := dlfilter-test-api-v0.so dlfilter-show-cycles.so
+DLFILTERS := $(patsubst %,$(OUTPUT)dlfilters/%,$(DLFILTERS))
+
# what 'all' will build and 'install' will install, in perfexecdir
-ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS)
+ALL_PROGRAMS = $(PROGRAMS) $(SCRIPTS) $(DLFILTERS)
# what 'all' will build but not install in perfexecdir
OTHER_PROGRAMS = $(OUTPUT)perf
@@ -361,11 +394,9 @@ endif
export PERL_PATH
-PERFLIBS = $(LIBAPI) $(LIBTRACEEVENT) $(LIBSUBCMD) $(LIBPERF)
-ifndef NO_LIBBPF
- ifndef LIBBPF_DYNAMIC
- PERFLIBS += $(LIBBPF)
- endif
+PERFLIBS = $(LIBAPI) $(LIBPERF) $(LIBSUBCMD) $(LIBSYMBOL)
+ifdef LIBBPF_STATIC
+ PERFLIBS += $(LIBBPF)
endif
# We choose to avoid "if .. else if .. else .. endif endif"
@@ -376,7 +407,7 @@ ifneq ($(OUTPUT),)
CFLAGS += -I$(OUTPUT)
endif
-ifndef NO_GTK2
+ifdef GTK2
ALL_PROGRAMS += $(OUTPUT)libperf-gtk.so
GTK_IN := $(OUTPUT)gtk-in.o
endif
@@ -410,6 +441,7 @@ export INSTALL SHELL_PATH
SHELL = $(SHELL_PATH)
+beauty_linux_dir := $(srctree)/tools/perf/trace/beauty/include/linux/
linux_uapi_dir := $(srctree)/tools/include/uapi/linux
asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
@@ -487,11 +519,17 @@ kvm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/kvm_ioctl.sh
$(kvm_ioctl_array): $(kvm_hdr_dir)/kvm.h $(kvm_ioctl_tbl)
$(Q)$(SHELL) '$(kvm_ioctl_tbl)' $(kvm_hdr_dir) > $@
-socket_ipproto_array := $(beauty_outdir)/socket_ipproto_array.c
-socket_ipproto_tbl := $(srctree)/tools/perf/trace/beauty/socket_ipproto.sh
+socket_arrays := $(beauty_outdir)/socket.c
+socket_tbl := $(srctree)/tools/perf/trace/beauty/socket.sh
-$(socket_ipproto_array): $(linux_uapi_dir)/in.h $(socket_ipproto_tbl)
- $(Q)$(SHELL) '$(socket_ipproto_tbl)' $(linux_uapi_dir) > $@
+$(socket_arrays): $(linux_uapi_dir)/in.h $(beauty_linux_dir)/socket.h $(socket_tbl)
+ $(Q)$(SHELL) '$(socket_tbl)' $(linux_uapi_dir) $(beauty_linux_dir) > $@
+
+sockaddr_arrays := $(beauty_outdir)/sockaddr.c
+sockaddr_tbl := $(srctree)/tools/perf/trace/beauty/sockaddr.sh
+
+$(sockaddr_arrays): $(beauty_linux_dir)/socket.h $(sockaddr_tbl)
+ $(Q)$(SHELL) '$(sockaddr_tbl)' $(beauty_linux_dir) > $@
vhost_virtio_ioctl_array := $(beauty_ioctl_outdir)/vhost_virtio_ioctl_array.c
vhost_virtio_hdr_dir := $(srctree)/tools/include/uapi/linux
@@ -520,6 +558,12 @@ mmap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mmap_flags.sh
$(mmap_flags_array): $(linux_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_flags_tbl)
$(Q)$(SHELL) '$(mmap_flags_tbl)' $(linux_uapi_dir) $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
+mremap_flags_array := $(beauty_outdir)/mremap_flags_array.c
+mremap_flags_tbl := $(srctree)/tools/perf/trace/beauty/mremap_flags.sh
+
+$(mremap_flags_array): $(linux_uapi_dir)/mman.h $(mremap_flags_tbl)
+ $(Q)$(SHELL) '$(mremap_flags_tbl)' $(linux_uapi_dir) > $@
+
mount_flags_array := $(beauty_outdir)/mount_flags_array.c
mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/mount_flags.sh
@@ -532,6 +576,13 @@ move_mount_flags_tbl := $(srctree)/tools/perf/trace/beauty/move_mount_flags.sh
$(move_mount_flags_array): $(linux_uapi_dir)/fs.h $(move_mount_flags_tbl)
$(Q)$(SHELL) '$(move_mount_flags_tbl)' $(linux_uapi_dir) > $@
+
+mmap_prot_array := $(beauty_outdir)/mmap_prot_array.c
+mmap_prot_tbl := $(srctree)/tools/perf/trace/beauty/mmap_prot.sh
+
+$(mmap_prot_array): $(asm_generic_uapi_dir)/mman.h $(asm_generic_uapi_dir)/mman-common.h $(mmap_prot_tbl)
+ $(Q)$(SHELL) '$(mmap_prot_tbl)' $(asm_generic_uapi_dir) $(arch_asm_uapi_dir) > $@
+
prctl_option_array := $(beauty_outdir)/prctl_option_array.c
prctl_hdr_dir := $(srctree)/tools/include/uapi/linux/
prctl_option_tbl := $(srctree)/tools/perf/trace/beauty/prctl_option.sh
@@ -574,7 +625,7 @@ arch_errno_hdr_dir := $(srctree)/tools
arch_errno_tbl := $(srctree)/tools/perf/trace/beauty/arch_errno_names.sh
$(arch_errno_name_array): $(arch_errno_tbl)
- $(Q)$(SHELL) '$(arch_errno_tbl)' $(firstword $(CC)) $(arch_errno_hdr_dir) > $@
+ $(Q)$(SHELL) '$(arch_errno_tbl)' '$(patsubst -%,,$(CC))' $(arch_errno_hdr_dir) > $@
sync_file_range_arrays := $(beauty_outdir)/sync_file_range_arrays.c
sync_file_range_tbls := $(srctree)/tools/perf/trace/beauty/sync_file_range.sh
@@ -582,18 +633,30 @@ sync_file_range_tbls := $(srctree)/tools/perf/trace/beauty/sync_file_range.sh
$(sync_file_range_arrays): $(linux_uapi_dir)/fs.h $(sync_file_range_tbls)
$(Q)$(SHELL) '$(sync_file_range_tbls)' $(linux_uapi_dir) > $@
-all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS)
+TESTS_CORESIGHT_DIR := $(srctree)/tools/perf/tests/shell/coresight
+
+tests-coresight-targets: FORCE
+ $(Q)$(MAKE) -C $(TESTS_CORESIGHT_DIR)
+
+tests-coresight-targets-clean:
+ $(call QUIET_CLEAN, coresight)
+ $(Q)$(MAKE) -C $(TESTS_CORESIGHT_DIR) O=$(OUTPUT) clean >/dev/null
+
+all: shell_compatibility_test $(ALL_PROGRAMS) $(LANG_BINDINGS) $(OTHER_PROGRAMS) tests-coresight-targets
# Create python binding output directory if not already present
_dummy := $(shell [ -d '$(OUTPUT)python' ] || mkdir -p '$(OUTPUT)python')
-$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST) $(LIBPERF)
+$(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX): $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBPERF) $(LIBSUBCMD)
$(QUIET_GEN)LDSHARED="$(CC) -pthread -shared" \
- CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
+ CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS)' \
$(PYTHON_WORD) util/setup.py \
--quiet build_ext; \
cp $(PYTHON_EXTBUILD_LIB)perf*.so $(OUTPUT)python/
+python_perf_target:
+ @echo "Target is: $(OUTPUT)python/perf$(PYTHON_EXTENSION_SUFFIX)"
+
please_set_SHELL_PATH_to_a_more_modern_shell:
$(Q)$$(:)
@@ -604,32 +667,22 @@ strip: $(PROGRAMS) $(OUTPUT)perf
PERF_IN := $(OUTPUT)perf-in.o
-JEVENTS := $(OUTPUT)pmu-events/jevents
-JEVENTS_IN := $(OUTPUT)pmu-events/jevents-in.o
-
PMU_EVENTS_IN := $(OUTPUT)pmu-events/pmu-events-in.o
-
-export JEVENTS
+export NO_JEVENTS
build := -f $(srctree)/tools/build/Makefile.build dir=. obj
$(PERF_IN): prepare FORCE
$(Q)$(MAKE) $(build)=perf
-$(JEVENTS_IN): FORCE
- $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=jevents
-
-$(JEVENTS): $(JEVENTS_IN)
- $(QUIET_LINK)$(HOSTCC) $(JEVENTS_IN) -o $@
-
-$(PMU_EVENTS_IN): $(JEVENTS) FORCE
+$(PMU_EVENTS_IN): FORCE prepare
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events
-$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
- $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
+$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) \
$(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@
-$(GTK_IN): FORCE
+$(GTK_IN): FORCE prepare
$(Q)$(MAKE) $(build)=gtk
$(OUTPUT)libperf-gtk.so: $(GTK_IN) $(PERFLIBS)
@@ -643,9 +696,8 @@ $(OUTPUT)common-cmds.h: $(wildcard Documentation/perf-*.txt)
$(SCRIPTS) : % : %.sh
$(QUIET_GEN)$(INSTALL) '$@.sh' '$(OUTPUT)$@'
-$(OUTPUT)PERF-VERSION-FILE: ../../.git/HEAD
+$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE
$(Q)$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT)
- $(Q)touch $(OUTPUT)PERF-VERSION-FILE
# These can record PERF_VERSION
perf.spec $(SCRIPTS) \
@@ -676,7 +728,7 @@ endif
# get relative building directory (to $(OUTPUT))
# and '.' if it's $(OUTPUT) itself
__build-dir = $(subst $(OUTPUT),,$(dir $@))
-build-dir = $(if $(__build-dir),$(__build-dir),.)
+build-dir = $(or $(__build-dir),.)
prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioctl_array) \
$(fadvise_advice_array) \
@@ -688,10 +740,13 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(sndrv_ctl_ioctl_array) \
$(kcmp_type_array) \
$(kvm_ioctl_array) \
- $(socket_ipproto_array) \
+ $(socket_arrays) \
+ $(sockaddr_arrays) \
$(vhost_virtio_ioctl_array) \
$(madvise_behavior_array) \
$(mmap_flags_array) \
+ $(mmap_prot_array) \
+ $(mremap_flags_array) \
$(mount_flags_array) \
$(move_mount_flags_array) \
$(perf_ioctl_array) \
@@ -702,7 +757,16 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
$(x86_arch_prctl_code_array) \
$(rename_flags_array) \
$(arch_errno_name_array) \
- $(sync_file_range_arrays)
+ $(sync_file_range_arrays) \
+ $(LIBAPI) \
+ $(LIBPERF) \
+ $(LIBSUBCMD) \
+ $(LIBSYMBOL) \
+ bpf-skel
+
+ifdef LIBBPF_STATIC
+prepare: $(LIBBPF)
+endif
$(OUTPUT)%.o: %.c prepare FORCE
$(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
@@ -738,6 +802,15 @@ $(OUTPUT)perf-read-vdsox32: perf-read-vdso.c util/find-map.c
$(QUIET_CC)$(CC) -mx32 $(filter -static,$(LDFLAGS)) -Wall -Werror -o $@ perf-read-vdso.c
endif
+$(OUTPUT)dlfilters/%.o: dlfilters/%.c include/perf/perf_dlfilter.h
+ $(Q)$(MKDIR) -p $(OUTPUT)dlfilters
+ $(QUIET_CC)$(CC) -c -Iinclude $(EXTRA_CFLAGS) -o $@ -fpic $<
+
+.SECONDARY: $(DLFILTERS:.so=.o)
+
+$(OUTPUT)dlfilters/%.so: $(OUTPUT)dlfilters/%.o
+ $(QUIET_LINK)$(CC) $(EXTRA_CFLAGS) -shared -o $@ $<
+
ifndef NO_JVMTI
LIBJVMTI_IN := $(OUTPUT)jvmti/jvmti-in.o
@@ -750,50 +823,50 @@ endif
$(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
-LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ) 'EXTRA_CFLAGS=$(EXTRA_CFLAGS)' 'LDFLAGS=$(LDFLAGS)'
-
-$(LIBTRACEEVENT): FORCE
- $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
-
-libtraceevent_plugins: FORCE
- $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
-
-$(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
- $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)plugins/libtraceevent-dynamic-list
-
-$(LIBTRACEEVENT)-clean:
- $(call QUIET_CLEAN, libtraceevent)
- $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
-
-install-traceevent-plugins: libtraceevent_plugins
- $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
-
-$(LIBAPI): FORCE
- $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) $(OUTPUT)libapi.a
+$(LIBAPI): FORCE | $(LIBAPI_OUTPUT)
+ $(Q)$(MAKE) -C $(LIBAPI_DIR) O=$(LIBAPI_OUTPUT) \
+ DESTDIR=$(LIBAPI_DESTDIR) prefix= subdir= \
+ $@ install_headers
$(LIBAPI)-clean:
$(call QUIET_CLEAN, libapi)
- $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
+ $(Q)$(RM) -r -- $(LIBAPI_OUTPUT)
-$(LIBBPF): FORCE
- $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
+$(LIBBPF): FORCE | $(LIBBPF_OUTPUT)
+ $(Q)$(MAKE) -C $(LIBBPF_DIR) FEATURES_DUMP=$(FEATURE_DUMP_EXPORT) \
+ O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= subdir= \
+ $@ install_headers
$(LIBBPF)-clean:
$(call QUIET_CLEAN, libbpf)
- $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) clean >/dev/null
+ $(Q)$(RM) -r -- $(LIBBPF_OUTPUT)
-$(LIBPERF): FORCE
- $(Q)$(MAKE) -C $(LIBPERF_DIR) EXTRA_CFLAGS="$(LIBPERF_CFLAGS)" O=$(OUTPUT) $(OUTPUT)libperf.a
+$(LIBPERF): FORCE | $(LIBPERF_OUTPUT)
+ $(Q)$(MAKE) -C $(LIBPERF_DIR) O=$(LIBPERF_OUTPUT) \
+ DESTDIR=$(LIBPERF_DESTDIR) prefix= subdir= \
+ $@ install_headers
$(LIBPERF)-clean:
$(call QUIET_CLEAN, libperf)
- $(Q)$(MAKE) -C $(LIBPERF_DIR) O=$(OUTPUT) clean >/dev/null
+ $(Q)$(RM) -r -- $(LIBPERF_OUTPUT)
-$(LIBSUBCMD): FORCE
- $(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) $(OUTPUT)libsubcmd.a
+$(LIBSUBCMD): FORCE | $(LIBSUBCMD_OUTPUT)
+ $(Q)$(MAKE) -C $(LIBSUBCMD_DIR) O=$(LIBSUBCMD_OUTPUT) \
+ DESTDIR=$(LIBSUBCMD_DESTDIR) prefix= subdir= \
+ $@ install_headers
$(LIBSUBCMD)-clean:
- $(Q)$(MAKE) -C $(SUBCMD_DIR) O=$(OUTPUT) clean
+ $(call QUIET_CLEAN, libsubcmd)
+ $(Q)$(RM) -r -- $(LIBSUBCMD_OUTPUT)
+
+$(LIBSYMBOL): FORCE | $(LIBSYMBOL_OUTPUT)
+ $(Q)$(MAKE) -C $(LIBSYMBOL_DIR) O=$(LIBSYMBOL_OUTPUT) \
+ DESTDIR=$(LIBSYMBOL_DESTDIR) prefix= subdir= \
+ $@ install_headers
+
+$(LIBSYMBOL)-clean:
+ $(call QUIET_CLEAN, libsymbol)
+ $(Q)$(RM) -r -- $(LIBSYMBOL_OUTPUT)
help:
@echo 'Perf make targets:'
@@ -832,7 +905,7 @@ INSTALL_DOC_TARGETS += quick-install-doc quick-install-man quick-install-html
# 'make doc' should call 'make -C Documentation all'
$(DOC_TARGETS):
- $(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:doc=all)
+ $(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:doc=all) ASCIIDOC_EXTRA=$(ASCIIDOC_EXTRA)
TAG_FOLDERS= . ../lib ../include
TAG_FILES= ../../include/uapi/linux/perf_event.h
@@ -868,7 +941,7 @@ check: $(OUTPUT)common-cmds.h
### Installation rules
-ifndef NO_GTK2
+ifdef GTK2
install-gtk: $(OUTPUT)libperf-gtk.so
$(call QUIET_INSTALL, 'GTK UI') \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(libdir_SQ)'; \
@@ -881,7 +954,9 @@ install-tools: all install-gtk
$(call QUIET_INSTALL, binaries) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \
$(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \
- $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace'
+ $(LN) '$(DESTDIR_SQ)$(bindir_SQ)/perf' '$(DESTDIR_SQ)$(bindir_SQ)/trace'; \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(includedir_SQ)/perf'; \
+ $(INSTALL) -m 644 include/perf/perf_dlfilter.h -t '$(DESTDIR_SQ)$(includedir_SQ)/perf'
ifndef NO_PERF_READ_VDSO32
$(call QUIET_INSTALL, perf-read-vdso32) \
$(INSTALL) $(OUTPUT)perf-read-vdso32 '$(DESTDIR_SQ)$(bindir_SQ)';
@@ -898,29 +973,24 @@ endif
$(call QUIET_INSTALL, libexec) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
ifndef NO_LIBBPF
- $(call QUIET_INSTALL, bpf-headers) \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
- $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'; \
- $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
- $(INSTALL) include/bpf/linux/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'
$(call QUIET_INSTALL, bpf-examples) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
- $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
+ $(INSTALL) examples/bpf/*.c -m 644 -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
endif
$(call QUIET_INSTALL, perf-archive) \
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
- $(call QUIET_INSTALL, perf-with-kcore) \
- $(INSTALL) $(OUTPUT)perf-with-kcore -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ $(call QUIET_INSTALL, perf-iostat) \
+ $(INSTALL) $(OUTPUT)perf-iostat -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
ifndef NO_LIBAUDIT
$(call QUIET_INSTALL, strace/groups) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'; \
- $(INSTALL) trace/strace/groups/* -t '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'
+ $(INSTALL) trace/strace/groups/* -m 644 -t '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'
endif
ifndef NO_LIBPERL
$(call QUIET_INSTALL, perl-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
- $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
- $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'; \
+ $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) scripts/perl/*.pl -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'; \
$(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
endif
@@ -932,25 +1002,33 @@ ifndef NO_LIBPYTHON
$(INSTALL) scripts/python/*.py -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'; \
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
endif
+ $(call QUIET_INSTALL, dlfilters) \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/dlfilters'; \
+ $(INSTALL) $(DLFILTERS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/dlfilters';
$(call QUIET_INSTALL, perf_completion-script) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \
- $(INSTALL) perf-completion.sh '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
+ $(INSTALL) perf-completion.sh -m 644 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
$(call QUIET_INSTALL, perf-tip) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(tip_instdir_SQ)'; \
- $(INSTALL) Documentation/tips.txt -t '$(DESTDIR_SQ)$(tip_instdir_SQ)'
+ $(INSTALL) Documentation/tips.txt -m 644 -t '$(DESTDIR_SQ)$(tip_instdir_SQ)'
install-tests: all install-gtk
$(call QUIET_INSTALL, tests) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
- $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
+ $(INSTALL) tests/attr.py -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
+ $(INSTALL) tests/pe-file.exe* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
- $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
+ $(INSTALL) tests/attr/* -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
$(INSTALL) tests/shell/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
- $(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
+ $(INSTALL) tests/shell/lib/*.sh -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
+ $(INSTALL) tests/shell/lib/*.py -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
+ $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight' ; \
+ $(INSTALL) tests/shell/coresight/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/coresight'
+ $(Q)$(MAKE) -C tests/shell/coresight install-tests
-install-bin: install-tools install-tests install-traceevent-plugins
+install-bin: install-tools install-tests
install: install-bin try-install-man
@@ -959,36 +1037,70 @@ install-python_ext:
# 'make install-doc' should call 'make -C Documentation install'
$(INSTALL_DOC_TARGETS):
- $(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:-doc=)
+ $(Q)$(MAKE) -C $(DOC_DIR) O=$(OUTPUT) $(@:-doc=) ASCIIDOC_EXTRA=$(ASCIIDOC_EXTRA)
### Cleaning rules
-#
-# This is here, not in Makefile.config, because Makefile.config does
-# not get included for the clean target:
-#
-config-clean:
- $(call QUIET_CLEAN, config)
- $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ $(if $(OUTPUT),OUTPUT=$(OUTPUT)feature/,) clean >/dev/null
-
python-clean:
$(python-clean)
-clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBPERF)-clean config-clean fixdep-clean python-clean
- $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-with-kcore $(LANG_BINDINGS)
- $(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
+SKEL_OUT := $(abspath $(OUTPUT)util/bpf_skel)
+SKEL_TMP_OUT := $(abspath $(SKEL_OUT)/.tmp)
+SKELETONS := $(SKEL_OUT)/bpf_prog_profiler.skel.h
+SKELETONS += $(SKEL_OUT)/bperf_leader.skel.h $(SKEL_OUT)/bperf_follower.skel.h
+SKELETONS += $(SKEL_OUT)/bperf_cgroup.skel.h $(SKEL_OUT)/func_latency.skel.h
+SKELETONS += $(SKEL_OUT)/off_cpu.skel.h $(SKEL_OUT)/lock_contention.skel.h
+SKELETONS += $(SKEL_OUT)/kwork_trace.skel.h $(SKEL_OUT)/sample_filter.skel.h
+
+$(SKEL_TMP_OUT) $(LIBAPI_OUTPUT) $(LIBBPF_OUTPUT) $(LIBPERF_OUTPUT) $(LIBSUBCMD_OUTPUT) $(LIBSYMBOL_OUTPUT):
+ $(Q)$(MKDIR) -p $@
+
+ifdef BUILD_BPF_SKEL
+BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool
+BPF_INCLUDE := -I$(SKEL_TMP_OUT)/.. -I$(LIBBPF_INCLUDE)
+
+$(BPFTOOL): | $(SKEL_TMP_OUT)
+ $(Q)CFLAGS= $(MAKE) -C ../bpf/bpftool \
+ OUTPUT=$(SKEL_TMP_OUT)/ bootstrap
+
+$(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) | $(SKEL_TMP_OUT)
+ $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) \
+ -c $(filter util/bpf_skel/%.bpf.c,$^) -o $@ && $(LLVM_STRIP) -g $@
+
+$(SKEL_OUT)/%.skel.h: $(SKEL_TMP_OUT)/%.bpf.o | $(BPFTOOL)
+ $(QUIET_GENSKEL)$(BPFTOOL) gen skeleton $< > $@
+
+bpf-skel: $(SKELETONS)
+
+.PRECIOUS: $(SKEL_TMP_OUT)/%.bpf.o
+
+else # BUILD_BPF_SKEL
+
+bpf-skel:
+
+endif # BUILD_BPF_SKEL
+
+bpf-skel-clean:
+ $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS)
+
+clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean
+ $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS)
+ $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)$(RM) $(OUTPUT).config-detected
- $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)pmu-events/jevents $(OUTPUT)$(LIBJVMTI).so
+ $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)$(LIBJVMTI).so
$(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
$(OUTPUT)util/intel-pt-decoder/inat-tables.c \
$(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c \
$(OUTPUT)pmu-events/pmu-events.c \
+ $(OUTPUT)pmu-events/metric_test.log \
$(OUTPUT)$(fadvise_advice_array) \
$(OUTPUT)$(fsconfig_arrays) \
$(OUTPUT)$(fsmount_arrays) \
$(OUTPUT)$(fspick_arrays) \
$(OUTPUT)$(madvise_behavior_array) \
$(OUTPUT)$(mmap_flags_array) \
+ $(OUTPUT)$(mmap_prot_array) \
+ $(OUTPUT)$(mremap_flags_array) \
$(OUTPUT)$(mount_flags_array) \
$(OUTPUT)$(move_mount_flags_array) \
$(OUTPUT)$(drm_ioctl_array) \
@@ -997,7 +1109,8 @@ clean:: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clea
$(OUTPUT)$(sndrv_pcm_ioctl_array) \
$(OUTPUT)$(kvm_ioctl_array) \
$(OUTPUT)$(kcmp_type_array) \
- $(OUTPUT)$(socket_ipproto_array) \
+ $(OUTPUT)$(socket_arrays) \
+ $(OUTPUT)$(sockaddr_arrays) \
$(OUTPUT)$(vhost_virtio_ioctl_array) \
$(OUTPUT)$(perf_ioctl_array) \
$(OUTPUT)$(prctl_option_array) \
@@ -1022,21 +1135,15 @@ else
@echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP"
endif
-#
-# Trick: if ../../.git does not exist - we are building out of tree for example,
-# then force version regeneration:
-#
-ifeq ($(wildcard ../../.git/HEAD),)
- GIT-HEAD-PHONY = ../../.git/HEAD
-else
- GIT-HEAD-PHONY =
-endif
FORCE:
.PHONY: all install clean config-clean strip install-gtk
.PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
-.PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE prepare
-.PHONY: libtraceevent_plugins archheaders
+.PHONY: .FORCE-PERF-VERSION-FILE TAGS tags cscope FORCE prepare
+.PHONY: archheaders python_perf_target
endif # force_fixdep
+
+# Delete partially updated (corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/tools/perf/arch/arm/include/arch-tests.h b/tools/perf/arch/arm/include/arch-tests.h
index 90ec4c8cb880..452b3d904521 100644
--- a/tools/perf/arch/arm/include/arch-tests.h
+++ b/tools/perf/arch/arm/include/arch-tests.h
@@ -2,11 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-#endif
-
-extern struct test arch_tests[];
+extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/arm/include/perf_regs.h b/tools/perf/arch/arm/include/perf_regs.h
index ed20e0253e25..99a06550e25d 100644
--- a/tools/perf/arch/arm/include/perf_regs.h
+++ b/tools/perf/arch/arm/include/perf_regs.h
@@ -15,46 +15,4 @@ void perf_regs_load(u64 *regs);
#define PERF_REG_IP PERF_REG_ARM_PC
#define PERF_REG_SP PERF_REG_ARM_SP
-static inline const char *perf_reg_name(int id)
-{
- switch (id) {
- case PERF_REG_ARM_R0:
- return "r0";
- case PERF_REG_ARM_R1:
- return "r1";
- case PERF_REG_ARM_R2:
- return "r2";
- case PERF_REG_ARM_R3:
- return "r3";
- case PERF_REG_ARM_R4:
- return "r4";
- case PERF_REG_ARM_R5:
- return "r5";
- case PERF_REG_ARM_R6:
- return "r6";
- case PERF_REG_ARM_R7:
- return "r7";
- case PERF_REG_ARM_R8:
- return "r8";
- case PERF_REG_ARM_R9:
- return "r9";
- case PERF_REG_ARM_R10:
- return "r10";
- case PERF_REG_ARM_FP:
- return "fp";
- case PERF_REG_ARM_IP:
- return "ip";
- case PERF_REG_ARM_SP:
- return "sp";
- case PERF_REG_ARM_LR:
- return "lr";
- case PERF_REG_ARM_PC:
- return "pc";
- default:
- return NULL;
- }
-
- return NULL;
-}
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c
index 6848101a855f..69561111cc6f 100644
--- a/tools/perf/arch/arm/tests/arch-tests.c
+++ b/tools/perf/arch/arm/tests/arch-tests.c
@@ -3,18 +3,10 @@
#include "tests/tests.h"
#include "arch-tests.h"
-struct test arch_tests[] = {
+struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- {
- .desc = "DWARF unwind",
- .func = test__dwarf_unwind,
- },
+ &suite__dwarf_unwind,
#endif
- {
- .desc = "Vectors page",
- .func = test__vectors_page,
- },
- {
- .func = NULL,
- },
+ &suite__vectors_page,
+ NULL,
};
diff --git a/tools/perf/arch/arm/tests/dwarf-unwind.c b/tools/perf/arch/arm/tests/dwarf-unwind.c
index ccfa87055c4a..566fb6c0eae7 100644
--- a/tools/perf/arch/arm/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm/tests/dwarf-unwind.c
@@ -33,7 +33,7 @@ static int sample_ustack(struct perf_sample *sample,
return -1;
}
- stack_size = map->end - sp;
+ stack_size = map__end(map) - sp;
stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
memcpy(buf, (void *) sp, stack_size);
diff --git a/tools/perf/arch/arm/tests/vectors-page.c b/tools/perf/arch/arm/tests/vectors-page.c
index 7ffdd79971c8..55a835837466 100644
--- a/tools/perf/arch/arm/tests/vectors-page.c
+++ b/tools/perf/arch/arm/tests/vectors-page.c
@@ -9,8 +9,7 @@
#define VECTORS__MAP_NAME "[vectors]"
-int test__vectors_page(struct test *test __maybe_unused,
- int subtest __maybe_unused)
+static int test__vectors_page(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
void *start, *end;
@@ -22,3 +21,5 @@ int test__vectors_page(struct test *test __maybe_unused,
return TEST_OK;
}
+
+DEFINE_SUITE("Vectors page", vectors_page);
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index 0a6e75b8777a..adec6c9ee11d 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -4,16 +4,19 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <dirent.h>
#include <stdbool.h>
#include <linux/coresight-pmu.h>
#include <linux/zalloc.h>
+#include <api/fs/fs.h>
-#include "../../util/auxtrace.h"
-#include "../../util/debug.h"
-#include "../../util/evlist.h"
-#include "../../util/pmu.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/debug.h"
+#include "../../../util/evlist.h"
+#include "../../../util/pmu.h"
#include "cs-etm.h"
#include "arm-spe.h"
+#include "hisi-ptt.h"
static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
{
@@ -50,43 +53,113 @@ static struct perf_pmu **find_all_arm_spe_pmus(int *nr_spes, int *err)
return arm_spe_pmus;
}
+static struct perf_pmu **find_all_hisi_ptt_pmus(int *nr_ptts, int *err)
+{
+ struct perf_pmu **hisi_ptt_pmus = NULL;
+ struct dirent *dent;
+ char path[PATH_MAX];
+ DIR *dir = NULL;
+ int idx = 0;
+
+ perf_pmu__event_source_devices_scnprintf(path, sizeof(path));
+ dir = opendir(path);
+ if (!dir) {
+ pr_err("can't read directory '%s'\n", path);
+ *err = -EINVAL;
+ return NULL;
+ }
+
+ while ((dent = readdir(dir))) {
+ if (strstr(dent->d_name, HISI_PTT_PMU_NAME))
+ (*nr_ptts)++;
+ }
+
+ if (!(*nr_ptts))
+ goto out;
+
+ hisi_ptt_pmus = zalloc(sizeof(struct perf_pmu *) * (*nr_ptts));
+ if (!hisi_ptt_pmus) {
+ pr_err("hisi_ptt alloc failed\n");
+ *err = -ENOMEM;
+ goto out;
+ }
+
+ rewinddir(dir);
+ while ((dent = readdir(dir))) {
+ if (strstr(dent->d_name, HISI_PTT_PMU_NAME) && idx < *nr_ptts) {
+ hisi_ptt_pmus[idx] = perf_pmu__find(dent->d_name);
+ if (hisi_ptt_pmus[idx])
+ idx++;
+ }
+ }
+
+out:
+ closedir(dir);
+ return hisi_ptt_pmus;
+}
+
+static struct perf_pmu *find_pmu_for_event(struct perf_pmu **pmus,
+ int pmu_nr, struct evsel *evsel)
+{
+ int i;
+
+ if (!pmus)
+ return NULL;
+
+ for (i = 0; i < pmu_nr; i++) {
+ if (evsel->core.attr.type == pmus[i]->type)
+ return pmus[i];
+ }
+
+ return NULL;
+}
+
struct auxtrace_record
*auxtrace_record__init(struct evlist *evlist, int *err)
{
- struct perf_pmu *cs_etm_pmu;
+ struct perf_pmu *cs_etm_pmu = NULL;
+ struct perf_pmu **arm_spe_pmus = NULL;
+ struct perf_pmu **hisi_ptt_pmus = NULL;
struct evsel *evsel;
- bool found_etm = false;
- bool found_spe = false;
- static struct perf_pmu **arm_spe_pmus = NULL;
- static int nr_spes = 0;
- int i = 0;
+ struct perf_pmu *found_etm = NULL;
+ struct perf_pmu *found_spe = NULL;
+ struct perf_pmu *found_ptt = NULL;
+ int auxtrace_event_cnt = 0;
+ int nr_spes = 0;
+ int nr_ptts = 0;
if (!evlist)
return NULL;
cs_etm_pmu = perf_pmu__find(CORESIGHT_ETM_PMU_NAME);
-
- if (!arm_spe_pmus)
- arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
+ arm_spe_pmus = find_all_arm_spe_pmus(&nr_spes, err);
+ hisi_ptt_pmus = find_all_hisi_ptt_pmus(&nr_ptts, err);
evlist__for_each_entry(evlist, evsel) {
- if (cs_etm_pmu &&
- evsel->core.attr.type == cs_etm_pmu->type)
- found_etm = true;
-
- if (!nr_spes)
- continue;
-
- for (i = 0; i < nr_spes; i++) {
- if (evsel->core.attr.type == arm_spe_pmus[i]->type) {
- found_spe = true;
- break;
- }
- }
+ if (cs_etm_pmu && !found_etm)
+ found_etm = find_pmu_for_event(&cs_etm_pmu, 1, evsel);
+
+ if (arm_spe_pmus && !found_spe)
+ found_spe = find_pmu_for_event(arm_spe_pmus, nr_spes, evsel);
+
+ if (hisi_ptt_pmus && !found_ptt)
+ found_ptt = find_pmu_for_event(hisi_ptt_pmus, nr_ptts, evsel);
}
- if (found_etm && found_spe) {
- pr_err("Concurrent ARM Coresight ETM and SPE operation not currently supported\n");
+ free(arm_spe_pmus);
+ free(hisi_ptt_pmus);
+
+ if (found_etm)
+ auxtrace_event_cnt++;
+
+ if (found_spe)
+ auxtrace_event_cnt++;
+
+ if (found_ptt)
+ auxtrace_event_cnt++;
+
+ if (auxtrace_event_cnt > 1) {
+ pr_err("Concurrent AUX trace operation not currently supported\n");
*err = -EOPNOTSUPP;
return NULL;
}
@@ -96,7 +169,10 @@ struct auxtrace_record
#if defined(__aarch64__)
if (found_spe)
- return arm_spe_recording_init(err, arm_spe_pmus[i]);
+ return arm_spe_recording_init(err, found_spe);
+
+ if (found_ptt)
+ return hisi_ptt_recording_init(err, found_ptt);
#endif
/*
@@ -108,3 +184,35 @@ struct auxtrace_record
*err = 0;
return NULL;
}
+
+#if defined(__arm__)
+u64 compat_auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
+{
+ struct perf_event_mmap_page *pc = mm->userpg;
+ u64 result;
+
+ __asm__ __volatile__(
+" ldrd %0, %H0, [%1]"
+ : "=&r" (result)
+ : "r" (&pc->aux_head), "Qo" (pc->aux_head)
+ );
+
+ return result;
+}
+
+int compat_auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
+{
+ struct perf_event_mmap_page *pc = mm->userpg;
+
+ /* Ensure all reads are done before we write the tail out */
+ smp_mb();
+
+ __asm__ __volatile__(
+" strd %2, %H2, [%1]"
+ : "=Qo" (pc->aux_tail)
+ : "r" (&pc->aux_tail), "r" (tail)
+ );
+
+ return 0;
+}
+#endif
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 941f814820b8..77cb03e6ff87 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -16,18 +16,19 @@
#include <linux/zalloc.h>
#include "cs-etm.h"
-#include "../../util/debug.h"
-#include "../../util/record.h"
-#include "../../util/auxtrace.h"
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evlist.h"
-#include "../../util/evsel.h"
-#include "../../util/evsel_config.h"
-#include "../../util/pmu.h"
-#include "../../util/cs-etm.h"
+#include "../../../util/debug.h"
+#include "../../../util/record.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/perf_api_probe.h"
+#include "../../../util/evsel_config.h"
+#include "../../../util/pmu.h"
+#include "../../../util/cs-etm.h"
#include <internal/lib.h> // page_size
-#include "../../util/session.h"
+#include "../../../util/session.h"
#include <errno.h>
#include <stdlib.h>
@@ -37,8 +38,6 @@ struct cs_etm_recording {
struct auxtrace_record itr;
struct perf_pmu *cs_etm_pmu;
struct evlist *evlist;
- int wrapped_cnt;
- bool *wrapped;
bool snapshot_mode;
size_t snapshot_size;
};
@@ -48,79 +47,119 @@ static const char *metadata_etmv3_ro[CS_ETM_PRIV_MAX] = {
[CS_ETM_ETMIDR] = "mgmt/etmidr",
};
-static const char *metadata_etmv4_ro[CS_ETMV4_PRIV_MAX] = {
+static const char * const metadata_etmv4_ro[] = {
[CS_ETMV4_TRCIDR0] = "trcidr/trcidr0",
[CS_ETMV4_TRCIDR1] = "trcidr/trcidr1",
[CS_ETMV4_TRCIDR2] = "trcidr/trcidr2",
[CS_ETMV4_TRCIDR8] = "trcidr/trcidr8",
[CS_ETMV4_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
+ [CS_ETMV4_TS_SOURCE] = "ts_source",
+};
+
+static const char * const metadata_ete_ro[] = {
+ [CS_ETE_TRCIDR0] = "trcidr/trcidr0",
+ [CS_ETE_TRCIDR1] = "trcidr/trcidr1",
+ [CS_ETE_TRCIDR2] = "trcidr/trcidr2",
+ [CS_ETE_TRCIDR8] = "trcidr/trcidr8",
+ [CS_ETE_TRCAUTHSTATUS] = "mgmt/trcauthstatus",
+ [CS_ETE_TRCDEVARCH] = "mgmt/trcdevarch",
+ [CS_ETE_TS_SOURCE] = "ts_source",
};
static bool cs_etm_is_etmv4(struct auxtrace_record *itr, int cpu);
+static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu);
-static int cs_etm_set_context_id(struct auxtrace_record *itr,
- struct evsel *evsel, int cpu)
+static int cs_etm_validate_context_id(struct auxtrace_record *itr,
+ struct evsel *evsel, int cpu)
{
- struct cs_etm_recording *ptr;
- struct perf_pmu *cs_etm_pmu;
+ struct cs_etm_recording *ptr =
+ container_of(itr, struct cs_etm_recording, itr);
+ struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
char path[PATH_MAX];
- int err = -EINVAL;
+ int err;
u32 val;
+ u64 contextid =
+ evsel->core.attr.config &
+ (perf_pmu__format_bits(&cs_etm_pmu->format, "contextid1") |
+ perf_pmu__format_bits(&cs_etm_pmu->format, "contextid2"));
- ptr = container_of(itr, struct cs_etm_recording, itr);
- cs_etm_pmu = ptr->cs_etm_pmu;
+ if (!contextid)
+ return 0;
- if (!cs_etm_is_etmv4(itr, cpu))
- goto out;
+ /* Not supported in etmv3 */
+ if (!cs_etm_is_etmv4(itr, cpu)) {
+ pr_err("%s: contextid not supported in ETMv3, disable with %s/contextid=0/\n",
+ CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
+ return -EINVAL;
+ }
- /* Get a handle on TRCIRD2 */
+ /* Get a handle on TRCIDR2 */
snprintf(path, PATH_MAX, "cpu%d/%s",
cpu, metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
err = perf_pmu__scan_file(cs_etm_pmu, path, "%x", &val);
/* There was a problem reading the file, bailing out */
if (err != 1) {
- pr_err("%s: can't read file %s\n",
- CORESIGHT_ETM_PMU_NAME, path);
- goto out;
+ pr_err("%s: can't read file %s\n", CORESIGHT_ETM_PMU_NAME,
+ path);
+ return err;
}
- /*
- * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID tracing
- * is supported:
- * 0b00000 Context ID tracing is not supported.
- * 0b00100 Maximum of 32-bit Context ID size.
- * All other values are reserved.
- */
- val = BMVAL(val, 5, 9);
- if (!val || val != 0x4) {
- err = -EINVAL;
- goto out;
+ if (contextid &
+ perf_pmu__format_bits(&cs_etm_pmu->format, "contextid1")) {
+ /*
+ * TRCIDR2.CIDSIZE, bit [9-5], indicates whether contextID
+ * tracing is supported:
+ * 0b00000 Context ID tracing is not supported.
+ * 0b00100 Maximum of 32-bit Context ID size.
+ * All other values are reserved.
+ */
+ val = BMVAL(val, 5, 9);
+ if (!val || val != 0x4) {
+ pr_err("%s: CONTEXTIDR_EL1 isn't supported, disable with %s/contextid1=0/\n",
+ CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
+ return -EINVAL;
+ }
}
- /* All good, let the kernel know */
- evsel->core.attr.config |= (1 << ETM_OPT_CTXTID);
- err = 0;
-
-out:
+ if (contextid &
+ perf_pmu__format_bits(&cs_etm_pmu->format, "contextid2")) {
+ /*
+ * TRCIDR2.VMIDOPT[30:29] != 0 and
+ * TRCIDR2.VMIDSIZE[14:10] == 0b00100 (32bit virtual contextid)
+ * We can't support CONTEXTIDR in VMID if the size of the
+ * virtual context id is < 32bit.
+ * Any value of VMIDSIZE >= 4 (i.e, > 32bit) is fine for us.
+ */
+ if (!BMVAL(val, 29, 30) || BMVAL(val, 10, 14) < 4) {
+ pr_err("%s: CONTEXTIDR_EL2 isn't supported, disable with %s/contextid2=0/\n",
+ CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
+ return -EINVAL;
+ }
+ }
- return err;
+ return 0;
}
-static int cs_etm_set_timestamp(struct auxtrace_record *itr,
- struct evsel *evsel, int cpu)
+static int cs_etm_validate_timestamp(struct auxtrace_record *itr,
+ struct evsel *evsel, int cpu)
{
- struct cs_etm_recording *ptr;
- struct perf_pmu *cs_etm_pmu;
+ struct cs_etm_recording *ptr =
+ container_of(itr, struct cs_etm_recording, itr);
+ struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
char path[PATH_MAX];
- int err = -EINVAL;
+ int err;
u32 val;
- ptr = container_of(itr, struct cs_etm_recording, itr);
- cs_etm_pmu = ptr->cs_etm_pmu;
+ if (!(evsel->core.attr.config &
+ perf_pmu__format_bits(&cs_etm_pmu->format, "timestamp")))
+ return 0;
- if (!cs_etm_is_etmv4(itr, cpu))
- goto out;
+ if (!cs_etm_is_etmv4(itr, cpu)) {
+ pr_err("%s: timestamp not supported in ETMv3, disable with %s/timestamp=0/\n",
+ CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
+ return -EINVAL;
+ }
/* Get a handle on TRCIRD0 */
snprintf(path, PATH_MAX, "cpu%d/%s",
@@ -131,7 +170,7 @@ static int cs_etm_set_timestamp(struct auxtrace_record *itr,
if (err != 1) {
pr_err("%s: can't read file %s\n",
CORESIGHT_ETM_PMU_NAME, path);
- goto out;
+ return err;
}
/*
@@ -143,43 +182,39 @@ static int cs_etm_set_timestamp(struct auxtrace_record *itr,
*/
val &= GENMASK(28, 24);
if (!val) {
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
- /* All good, let the kernel know */
- evsel->core.attr.config |= (1 << ETM_OPT_TS);
- err = 0;
-
-out:
- return err;
+ return 0;
}
-static int cs_etm_set_option(struct auxtrace_record *itr,
- struct evsel *evsel, u32 option)
+/*
+ * Check whether the requested timestamp and contextid options should be
+ * available on all requested CPUs and if not, tell the user how to override.
+ * The kernel will silently disable any unavailable options so a warning here
+ * first is better. In theory the kernel could still disable the option for
+ * some other reason so this is best effort only.
+ */
+static int cs_etm_validate_config(struct auxtrace_record *itr,
+ struct evsel *evsel)
{
int i, err = -EINVAL;
- struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
+ struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* Set option of each CPU we have */
- for (i = 0; i < cpu__max_cpu(); i++) {
- if (!cpu_map__has(event_cpus, i) ||
- !cpu_map__has(online_cpus, i))
+ for (i = 0; i < cpu__max_cpu().cpu; i++) {
+ struct perf_cpu cpu = { .cpu = i, };
+
+ if (!perf_cpu_map__has(event_cpus, cpu) ||
+ !perf_cpu_map__has(online_cpus, cpu))
continue;
- if (option & ETM_OPT_CTXTID) {
- err = cs_etm_set_context_id(itr, evsel, i);
- if (err)
- goto out;
- }
- if (option & ETM_OPT_TS) {
- err = cs_etm_set_timestamp(itr, evsel, i);
- if (err)
- goto out;
- }
- if (option & ~(ETM_OPT_CTXTID | ETM_OPT_TS))
- /* Nothing else is currently supported */
+ err = cs_etm_validate_context_id(itr, evsel, i);
+ if (err)
+ goto out;
+ err = cs_etm_validate_timestamp(itr, evsel, i);
+ if (err)
goto out;
}
@@ -215,7 +250,7 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
struct evsel *evsel)
{
char msg[BUFSIZ], path[PATH_MAX], *sink;
- struct perf_evsel_config_term *term;
+ struct evsel_config_term *term;
int ret = -EINVAL;
u32 hash;
@@ -223,7 +258,7 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
return 0;
list_for_each_entry(term, &evsel->config_terms, list) {
- if (term->type != PERF_EVSEL__CONFIG_TERM_DRV_CFG)
+ if (term->type != EVSEL__CONFIG_TERM_DRV_CFG)
continue;
sink = term->val.str;
@@ -231,9 +266,15 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
ret = perf_pmu__scan_file(pmu, path, "%x", &hash);
if (ret != 1) {
- pr_err("failed to set sink \"%s\" on event %s with %d (%s)\n",
- sink, perf_evsel__name(evsel), errno,
- str_error_r(errno, msg, sizeof(msg)));
+ if (errno == ENOENT)
+ pr_err("Couldn't find sink \"%s\" on event %s\n"
+ "Missing kernel or device support?\n\n"
+ "Hint: An appropriate sink will be picked automatically if one isn't specified.\n",
+ sink, evsel__name(evsel));
+ else
+ pr_err("Failed to set sink \"%s\" on event %s with %d (%s)\n",
+ sink, evsel__name(evsel), errno,
+ str_error_r(errno, msg, sizeof(msg)));
return ret;
}
@@ -242,10 +283,10 @@ static int cs_etm_set_sink_attr(struct perf_pmu *pmu,
}
/*
- * No sink was provided on the command line - for _now_ treat
- * this as an error.
+ * No sink was provided on the command line - allow the CoreSight
+ * system to look for a default
*/
- return ret;
+ return 0;
}
static int cs_etm_recording_options(struct auxtrace_record *itr,
@@ -257,16 +298,10 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct evsel *evsel, *cs_etm_evsel = NULL;
- struct perf_cpu_map *cpus = evlist->core.cpus;
+ struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
int err = 0;
- ptr->evlist = evlist;
- ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
-
- if (perf_can_record_switch_events())
- opts->record_switch_events = true;
-
evlist__for_each_entry(evlist, evsel) {
if (evsel->core.attr.type == cs_etm_pmu->type) {
if (cs_etm_evsel) {
@@ -274,10 +309,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
CORESIGHT_ETM_PMU_NAME);
return -EINVAL;
}
- evsel->core.attr.freq = 0;
- evsel->core.attr.sample_period = 1;
cs_etm_evsel = evsel;
- opts->full_auxtrace = true;
}
}
@@ -285,6 +317,16 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
if (!cs_etm_evsel)
return 0;
+ ptr->evlist = evlist;
+ ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
+
+ if (!record_opts__no_switch_events(opts) &&
+ perf_can_record_switch_events())
+ opts->record_switch_events = true;
+
+ cs_etm_evsel->needs_auxtrace_mmap = true;
+ opts->full_auxtrace = true;
+
ret = cs_etm_set_sink_attr(cs_etm_pmu, cs_etm_evsel);
if (ret)
return ret;
@@ -337,7 +379,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
}
- /* Snapshost size can't be bigger than the auxtrace area */
+ /* Snapshot size can't be bigger than the auxtrace area */
if (opts->auxtrace_snapshot_size >
opts->auxtrace_mmap_pages * (size_t)page_size) {
pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
@@ -354,8 +396,8 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
}
}
- /* We are in full trace mode but '-m,xyz' wasn't specified */
- if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ /* Buffer sizes weren't specified with '-m,xyz' so give some defaults */
+ if (!opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
@@ -363,26 +405,6 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
if (opts->mmap_pages == UINT_MAX)
opts->mmap_pages = KiB(256) / page_size;
}
-
- }
-
- /* Validate auxtrace_mmap_pages provided by user */
- if (opts->auxtrace_mmap_pages) {
- unsigned int max_page = (KiB(128) / page_size);
- size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
-
- if (!privileged &&
- opts->auxtrace_mmap_pages > max_page) {
- opts->auxtrace_mmap_pages = max_page;
- pr_err("auxtrace too big, truncating to %d\n",
- max_page);
- }
-
- if (!is_power_of_2(sz)) {
- pr_err("Invalid mmap size for %s: must be a power of 2\n",
- CORESIGHT_ETM_PMU_NAME);
- return -EINVAL;
- }
}
if (opts->auxtrace_snapshot_mode)
@@ -393,41 +415,39 @@ static int cs_etm_recording_options(struct auxtrace_record *itr,
* To obtain the auxtrace buffer file descriptor, the auxtrace
* event must come first.
*/
- perf_evlist__to_front(evlist, cs_etm_evsel);
+ evlist__to_front(evlist, cs_etm_evsel);
/*
- * In the case of per-cpu mmaps, we need the CPU on the
- * AUX event. We also need the contextID in order to be notified
+ * get the CPU on the sample - need it to associate trace ID in the
+ * AUX_OUTPUT_HW_ID event, and the AUX event for per-cpu mmaps.
+ */
+ evsel__set_sample_bit(cs_etm_evsel, CPU);
+
+ /*
+ * Also the case of per-cpu mmaps, need the contextID in order to be notified
* when a context switch happened.
*/
if (!perf_cpu_map__empty(cpus)) {
- perf_evsel__set_sample_bit(cs_etm_evsel, CPU);
-
- err = cs_etm_set_option(itr, cs_etm_evsel,
- ETM_OPT_CTXTID | ETM_OPT_TS);
- if (err)
- goto out;
+ evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
+ "timestamp", 1);
+ evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel,
+ "contextid", 1);
}
/* Add dummy event to keep tracking */
- if (opts->full_auxtrace) {
- struct evsel *tracking_evsel;
-
- err = parse_events(evlist, "dummy:u", NULL);
- if (err)
- goto out;
-
- tracking_evsel = evlist__last(evlist);
- perf_evlist__set_tracking_event(evlist, tracking_evsel);
-
- tracking_evsel->core.attr.freq = 0;
- tracking_evsel->core.attr.sample_period = 1;
+ err = parse_event(evlist, "dummy:u");
+ if (err)
+ goto out;
+ evsel = evlist__last(evlist);
+ evlist__set_tracking_event(evlist, evsel);
+ evsel->core.attr.freq = 0;
+ evsel->core.attr.sample_period = 1;
- /* In per-cpu case, always need the time of mmap events etc */
- if (!perf_cpu_map__empty(cpus))
- perf_evsel__set_sample_bit(tracking_evsel, TIME);
- }
+ /* In per-cpu case, always need the time of mmap events etc */
+ if (!perf_cpu_map__empty(cpus))
+ evsel__set_sample_bit(evsel, TIME);
+ err = cs_etm_validate_config(itr, cs_etm_evsel);
out:
return err;
}
@@ -483,6 +503,11 @@ static u64 cs_etmv4_get_config(struct auxtrace_record *itr)
config |= BIT(ETM4_CFG_BIT_TS);
if (config_opts & BIT(ETM_OPT_RETSTK))
config |= BIT(ETM4_CFG_BIT_RETSTK);
+ if (config_opts & BIT(ETM_OPT_CTXTID2))
+ config |= BIT(ETM4_CFG_BIT_VMID) |
+ BIT(ETM4_CFG_BIT_VMID_OPT);
+ if (config_opts & BIT(ETM_OPT_BRANCH_BROADCAST))
+ config |= BIT(ETM4_CFG_BIT_BB);
return config;
}
@@ -492,29 +517,37 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
struct evlist *evlist __maybe_unused)
{
int i;
- int etmv3 = 0, etmv4 = 0;
- struct perf_cpu_map *event_cpus = evlist->core.cpus;
+ int etmv3 = 0, etmv4 = 0, ete = 0;
+ struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */
if (!perf_cpu_map__empty(event_cpus)) {
- for (i = 0; i < cpu__max_cpu(); i++) {
- if (!cpu_map__has(event_cpus, i) ||
- !cpu_map__has(online_cpus, i))
+ for (i = 0; i < cpu__max_cpu().cpu; i++) {
+ struct perf_cpu cpu = { .cpu = i, };
+
+ if (!perf_cpu_map__has(event_cpus, cpu) ||
+ !perf_cpu_map__has(online_cpus, cpu))
continue;
- if (cs_etm_is_etmv4(itr, i))
+ if (cs_etm_is_ete(itr, i))
+ ete++;
+ else if (cs_etm_is_etmv4(itr, i))
etmv4++;
else
etmv3++;
}
} else {
/* get configuration for all CPUs in the system */
- for (i = 0; i < cpu__max_cpu(); i++) {
- if (!cpu_map__has(online_cpus, i))
+ for (i = 0; i < cpu__max_cpu().cpu; i++) {
+ struct perf_cpu cpu = { .cpu = i, };
+
+ if (!perf_cpu_map__has(online_cpus, cpu))
continue;
- if (cs_etm_is_etmv4(itr, i))
+ if (cs_etm_is_ete(itr, i))
+ ete++;
+ else if (cs_etm_is_etmv4(itr, i))
etmv4++;
else
etmv3++;
@@ -524,6 +557,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused,
perf_cpu_map__put(online_cpus);
return (CS_ETM_HEADER_SIZE +
+ (ete * CS_ETE_PRIV_SIZE) +
(etmv4 * CS_ETMV4_PRIV_SIZE) +
(etmv3 * CS_ETMV3_PRIV_SIZE));
}
@@ -566,52 +600,160 @@ static int cs_etm_get_ro(struct perf_pmu *pmu, int cpu, const char *path)
return val;
}
+static int cs_etm_get_ro_signed(struct perf_pmu *pmu, int cpu, const char *path)
+{
+ char pmu_path[PATH_MAX];
+ int scan;
+ int val = 0;
+
+ /* Get RO metadata from sysfs */
+ snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
+
+ scan = perf_pmu__scan_file(pmu, pmu_path, "%d", &val);
+ if (scan != 1)
+ pr_err("%s: error reading: %s\n", __func__, pmu_path);
+
+ return val;
+}
+
+static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, int cpu, const char *path)
+{
+ char pmu_path[PATH_MAX];
+
+ /* Get RO metadata from sysfs */
+ snprintf(pmu_path, PATH_MAX, "cpu%d/%s", cpu, path);
+
+ return perf_pmu__file_exists(pmu, pmu_path);
+}
+
+#define TRCDEVARCH_ARCHPART_SHIFT 0
+#define TRCDEVARCH_ARCHPART_MASK GENMASK(11, 0)
+#define TRCDEVARCH_ARCHPART(x) (((x) & TRCDEVARCH_ARCHPART_MASK) >> TRCDEVARCH_ARCHPART_SHIFT)
+
+#define TRCDEVARCH_ARCHVER_SHIFT 12
+#define TRCDEVARCH_ARCHVER_MASK GENMASK(15, 12)
+#define TRCDEVARCH_ARCHVER(x) (((x) & TRCDEVARCH_ARCHVER_MASK) >> TRCDEVARCH_ARCHVER_SHIFT)
+
+static bool cs_etm_is_ete(struct auxtrace_record *itr, int cpu)
+{
+ struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr);
+ struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
+ int trcdevarch;
+
+ if (!cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH]))
+ return false;
+
+ trcdevarch = cs_etm_get_ro(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TRCDEVARCH]);
+ /*
+ * ETE if ARCHVER is 5 (ARCHVER is 4 for ETM) and ARCHPART is 0xA13.
+ * See ETM_DEVARCH_ETE_ARCH in coresight-etm4x.h
+ */
+ return TRCDEVARCH_ARCHVER(trcdevarch) == 5 && TRCDEVARCH_ARCHPART(trcdevarch) == 0xA13;
+}
+
+static void cs_etm_save_etmv4_header(__u64 data[], struct auxtrace_record *itr, int cpu)
+{
+ struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr);
+ struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
+
+ /* Get trace configuration register */
+ data[CS_ETMV4_TRCCONFIGR] = cs_etmv4_get_config(itr);
+ /* traceID set to legacy version, in case new perf running on older system */
+ data[CS_ETMV4_TRCTRACEIDR] =
+ CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG;
+
+ /* Get read-only information from sysFS */
+ data[CS_ETMV4_TRCIDR0] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
+ data[CS_ETMV4_TRCIDR1] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
+ data[CS_ETMV4_TRCIDR2] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
+ data[CS_ETMV4_TRCIDR8] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
+ data[CS_ETMV4_TRCAUTHSTATUS] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_etmv4_ro[CS_ETMV4_TRCAUTHSTATUS]);
+
+ /* Kernels older than 5.19 may not expose ts_source */
+ if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]))
+ data[CS_ETMV4_TS_SOURCE] = (__u64) cs_etm_get_ro_signed(cs_etm_pmu, cpu,
+ metadata_etmv4_ro[CS_ETMV4_TS_SOURCE]);
+ else {
+ pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n",
+ cpu);
+ data[CS_ETMV4_TS_SOURCE] = (__u64) -1;
+ }
+}
+
+static void cs_etm_save_ete_header(__u64 data[], struct auxtrace_record *itr, int cpu)
+{
+ struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr);
+ struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
+
+ /* Get trace configuration register */
+ data[CS_ETE_TRCCONFIGR] = cs_etmv4_get_config(itr);
+ /* traceID set to legacy version, in case new perf running on older system */
+ data[CS_ETE_TRCTRACEIDR] =
+ CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG;
+
+ /* Get read-only information from sysFS */
+ data[CS_ETE_TRCIDR0] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_ete_ro[CS_ETE_TRCIDR0]);
+ data[CS_ETE_TRCIDR1] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_ete_ro[CS_ETE_TRCIDR1]);
+ data[CS_ETE_TRCIDR2] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_ete_ro[CS_ETE_TRCIDR2]);
+ data[CS_ETE_TRCIDR8] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_ete_ro[CS_ETE_TRCIDR8]);
+ data[CS_ETE_TRCAUTHSTATUS] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_ete_ro[CS_ETE_TRCAUTHSTATUS]);
+ /* ETE uses the same registers as ETMv4 plus TRCDEVARCH */
+ data[CS_ETE_TRCDEVARCH] = cs_etm_get_ro(cs_etm_pmu, cpu,
+ metadata_ete_ro[CS_ETE_TRCDEVARCH]);
+
+ /* Kernels older than 5.19 may not expose ts_source */
+ if (cs_etm_pmu_path_exists(cs_etm_pmu, cpu, metadata_ete_ro[CS_ETE_TS_SOURCE]))
+ data[CS_ETE_TS_SOURCE] = (__u64) cs_etm_get_ro_signed(cs_etm_pmu, cpu,
+ metadata_ete_ro[CS_ETE_TS_SOURCE]);
+ else {
+ pr_debug3("[%03d] pmu file 'ts_source' not found. Fallback to safe value (-1)\n",
+ cpu);
+ data[CS_ETE_TS_SOURCE] = (__u64) -1;
+ }
+}
+
static void cs_etm_get_metadata(int cpu, u32 *offset,
struct auxtrace_record *itr,
struct perf_record_auxtrace_info *info)
{
- u32 increment;
+ u32 increment, nr_trc_params;
u64 magic;
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
/* first see what kind of tracer this cpu is affined to */
- if (cs_etm_is_etmv4(itr, cpu)) {
+ if (cs_etm_is_ete(itr, cpu)) {
+ magic = __perf_cs_ete_magic;
+ cs_etm_save_ete_header(&info->priv[*offset], itr, cpu);
+
+ /* How much space was used */
+ increment = CS_ETE_PRIV_MAX;
+ nr_trc_params = CS_ETE_PRIV_MAX - CS_ETM_COMMON_BLK_MAX_V1;
+ } else if (cs_etm_is_etmv4(itr, cpu)) {
magic = __perf_cs_etmv4_magic;
- /* Get trace configuration register */
- info->priv[*offset + CS_ETMV4_TRCCONFIGR] =
- cs_etmv4_get_config(itr);
- /* Get traceID from the framework */
- info->priv[*offset + CS_ETMV4_TRCTRACEIDR] =
- coresight_get_trace_id(cpu);
- /* Get read-only information from sysFS */
- info->priv[*offset + CS_ETMV4_TRCIDR0] =
- cs_etm_get_ro(cs_etm_pmu, cpu,
- metadata_etmv4_ro[CS_ETMV4_TRCIDR0]);
- info->priv[*offset + CS_ETMV4_TRCIDR1] =
- cs_etm_get_ro(cs_etm_pmu, cpu,
- metadata_etmv4_ro[CS_ETMV4_TRCIDR1]);
- info->priv[*offset + CS_ETMV4_TRCIDR2] =
- cs_etm_get_ro(cs_etm_pmu, cpu,
- metadata_etmv4_ro[CS_ETMV4_TRCIDR2]);
- info->priv[*offset + CS_ETMV4_TRCIDR8] =
- cs_etm_get_ro(cs_etm_pmu, cpu,
- metadata_etmv4_ro[CS_ETMV4_TRCIDR8]);
- info->priv[*offset + CS_ETMV4_TRCAUTHSTATUS] =
- cs_etm_get_ro(cs_etm_pmu, cpu,
- metadata_etmv4_ro
- [CS_ETMV4_TRCAUTHSTATUS]);
+ cs_etm_save_etmv4_header(&info->priv[*offset], itr, cpu);
/* How much space was used */
increment = CS_ETMV4_PRIV_MAX;
+ nr_trc_params = CS_ETMV4_PRIV_MAX - CS_ETMV4_TRCCONFIGR;
} else {
magic = __perf_cs_etmv3_magic;
/* Get configuration register */
info->priv[*offset + CS_ETM_ETMCR] = cs_etm_get_config(itr);
- /* Get traceID from the framework */
+ /* traceID set to legacy value in case new perf running on old system */
info->priv[*offset + CS_ETM_ETMTRACEIDR] =
- coresight_get_trace_id(cpu);
+ CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) | CORESIGHT_TRACE_ID_UNUSED_FLAG;
/* Get read-only information from sysFS */
info->priv[*offset + CS_ETM_ETMCCER] =
cs_etm_get_ro(cs_etm_pmu, cpu,
@@ -622,11 +764,13 @@ static void cs_etm_get_metadata(int cpu, u32 *offset,
/* How much space was used */
increment = CS_ETM_PRIV_MAX;
+ nr_trc_params = CS_ETM_PRIV_MAX - CS_ETM_ETMCR;
}
/* Build generic header portion */
info->priv[*offset + CS_ETM_MAGIC] = magic;
info->priv[*offset + CS_ETM_CPU] = cpu;
+ info->priv[*offset + CS_ETM_NR_TRC_PARAMS] = nr_trc_params;
/* Where the next CPU entry should start from */
*offset += increment;
}
@@ -640,7 +784,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
u32 offset;
u64 nr_cpu, type;
struct perf_cpu_map *cpu_map;
- struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
+ struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
@@ -658,8 +802,10 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
} else {
/* Make sure all specified CPUs are online */
for (i = 0; i < perf_cpu_map__nr(event_cpus); i++) {
- if (cpu_map__has(event_cpus, i) &&
- !cpu_map__has(online_cpus, i))
+ struct perf_cpu cpu = { .cpu = i, };
+
+ if (perf_cpu_map__has(event_cpus, cpu) &&
+ !perf_cpu_map__has(online_cpus, cpu))
return -EINVAL;
}
@@ -672,147 +818,21 @@ static int cs_etm_info_fill(struct auxtrace_record *itr,
/* First fill out the session header */
info->type = PERF_AUXTRACE_CS_ETM;
- info->priv[CS_HEADER_VERSION_0] = 0;
+ info->priv[CS_HEADER_VERSION] = CS_HEADER_CURRENT_VERSION;
info->priv[CS_PMU_TYPE_CPUS] = type << 32;
info->priv[CS_PMU_TYPE_CPUS] |= nr_cpu;
info->priv[CS_ETM_SNAPSHOT] = ptr->snapshot_mode;
offset = CS_ETM_SNAPSHOT + 1;
- for (i = 0; i < cpu__max_cpu() && offset < priv_size; i++)
- if (cpu_map__has(cpu_map, i))
- cs_etm_get_metadata(i, &offset, itr, info);
-
- perf_cpu_map__put(online_cpus);
-
- return 0;
-}
-
-static int cs_etm_alloc_wrapped_array(struct cs_etm_recording *ptr, int idx)
-{
- bool *wrapped;
- int cnt = ptr->wrapped_cnt;
-
- /* Make @ptr->wrapped as big as @idx */
- while (cnt <= idx)
- cnt++;
-
- /*
- * Free'ed in cs_etm_recording_free(). Using realloc() to avoid
- * cross compilation problems where the host's system supports
- * reallocarray() but not the target.
- */
- wrapped = realloc(ptr->wrapped, cnt * sizeof(bool));
- if (!wrapped)
- return -ENOMEM;
-
- wrapped[cnt - 1] = false;
- ptr->wrapped_cnt = cnt;
- ptr->wrapped = wrapped;
+ for (i = 0; i < cpu__max_cpu().cpu && offset < priv_size; i++) {
+ struct perf_cpu cpu = { .cpu = i, };
- return 0;
-}
-
-static bool cs_etm_buffer_has_wrapped(unsigned char *buffer,
- size_t buffer_size, u64 head)
-{
- u64 i, watermark;
- u64 *buf = (u64 *)buffer;
- size_t buf_size = buffer_size;
-
- /*
- * We want to look the very last 512 byte (chosen arbitrarily) in
- * the ring buffer.
- */
- watermark = buf_size - 512;
-
- /*
- * @head is continuously increasing - if its value is equal or greater
- * than the size of the ring buffer, it has wrapped around.
- */
- if (head >= buffer_size)
- return true;
-
- /*
- * The value of @head is somewhere within the size of the ring buffer.
- * This can be that there hasn't been enough data to fill the ring
- * buffer yet or the trace time was so long that @head has numerically
- * wrapped around. To find we need to check if we have data at the very
- * end of the ring buffer. We can reliably do this because mmap'ed
- * pages are zeroed out and there is a fresh mapping with every new
- * session.
- */
-
- /* @head is less than 512 byte from the end of the ring buffer */
- if (head > watermark)
- watermark = head;
-
- /*
- * Speed things up by using 64 bit transactions (see "u64 *buf" above)
- */
- watermark >>= 3;
- buf_size >>= 3;
-
- /*
- * If we find trace data at the end of the ring buffer, @head has
- * been there and has numerically wrapped around at least once.
- */
- for (i = watermark; i < buf_size; i++)
- if (buf[i])
- return true;
-
- return false;
-}
-
-static int cs_etm_find_snapshot(struct auxtrace_record *itr,
- int idx, struct auxtrace_mmap *mm,
- unsigned char *data,
- u64 *head, u64 *old)
-{
- int err;
- bool wrapped;
- struct cs_etm_recording *ptr =
- container_of(itr, struct cs_etm_recording, itr);
-
- /*
- * Allocate memory to keep track of wrapping if this is the first
- * time we deal with this *mm.
- */
- if (idx >= ptr->wrapped_cnt) {
- err = cs_etm_alloc_wrapped_array(ptr, idx);
- if (err)
- return err;
- }
-
- /*
- * Check to see if *head has wrapped around. If it hasn't only the
- * amount of data between *head and *old is snapshot'ed to avoid
- * bloating the perf.data file with zeros. But as soon as *head has
- * wrapped around the entire size of the AUX ring buffer it taken.
- */
- wrapped = ptr->wrapped[idx];
- if (!wrapped && cs_etm_buffer_has_wrapped(data, mm->len, *head)) {
- wrapped = true;
- ptr->wrapped[idx] = true;
+ if (perf_cpu_map__has(cpu_map, cpu))
+ cs_etm_get_metadata(i, &offset, itr, info);
}
- pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
- __func__, idx, (size_t)*old, (size_t)*head, mm->len);
-
- /* No wrap has occurred, we can just use *head and *old. */
- if (!wrapped)
- return 0;
-
- /*
- * *head has wrapped around - adjust *head and *old to pickup the
- * entire content of the AUX buffer.
- */
- if (*head >= mm->len) {
- *old = *head - mm->len;
- } else {
- *head += mm->len;
- *old = *head - mm->len;
- }
+ perf_cpu_map__put(online_cpus);
return 0;
}
@@ -854,7 +874,6 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
- zfree(&ptr->wrapped);
free(ptr);
}
@@ -882,7 +901,6 @@ struct auxtrace_record *cs_etm_record_init(int *err)
ptr->itr.recording_options = cs_etm_recording_options;
ptr->itr.info_priv_size = cs_etm_info_priv_size;
ptr->itr.info_fill = cs_etm_info_fill;
- ptr->itr.find_snapshot = cs_etm_find_snapshot;
ptr->itr.snapshot_start = cs_etm_snapshot_start;
ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
ptr->itr.reference = cs_etm_reference;
@@ -894,3 +912,22 @@ struct auxtrace_record *cs_etm_record_init(int *err)
out:
return NULL;
}
+
+/*
+ * Set a default config to enable the user changed config tracking mechanism
+ * (CFG_CHG and evsel__set_config_if_unset()). If no default is set then user
+ * changes aren't tracked.
+ */
+struct perf_event_attr *
+cs_etm_get_default_config(struct perf_pmu *pmu __maybe_unused)
+{
+ struct perf_event_attr *attr;
+
+ attr = zalloc(sizeof(struct perf_event_attr));
+ if (!attr)
+ return NULL;
+
+ attr->sample_period = 1;
+
+ return attr;
+}
diff --git a/tools/perf/arch/arm/util/perf_regs.c b/tools/perf/arch/arm/util/perf_regs.c
index 2864e2e3776d..2833e101a7c6 100644
--- a/tools/perf/arch/arm/util/perf_regs.c
+++ b/tools/perf/arch/arm/util/perf_regs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include "../../util/perf_regs.h"
+#include "../../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index bbc297a7e2e3..860a8b42b4b5 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -10,7 +10,9 @@
#include <linux/string.h>
#include "arm-spe.h"
-#include "../../util/pmu.h"
+#include "hisi-ptt.h"
+#include "../../../util/pmu.h"
+#include "../cs-etm.h"
struct perf_event_attr
*perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
@@ -19,9 +21,12 @@ struct perf_event_attr
if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
/* add ETM default config here */
pmu->selectable = true;
+ return cs_etm_get_default_config(pmu);
#if defined(__aarch64__)
} else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) {
return arm_spe_pmu_default_config(pmu);
+ } else if (strstarts(pmu->name, HISI_PTT_PMU_NAME)) {
+ pmu->selectable = true;
#endif
}
diff --git a/tools/perf/arch/arm/util/unwind-libdw.c b/tools/perf/arch/arm/util/unwind-libdw.c
index 36ba4c69c3c5..1834a0cd9ce3 100644
--- a/tools/perf/arch/arm/util/unwind-libdw.c
+++ b/tools/perf/arch/arm/util/unwind-libdw.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/sample.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
diff --git a/tools/perf/arch/arm/util/unwind-libunwind.c b/tools/perf/arch/arm/util/unwind-libunwind.c
index 3a550225dfaf..438906bf0014 100644
--- a/tools/perf/arch/arm/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm/util/unwind-libunwind.c
@@ -3,8 +3,8 @@
#include <errno.h>
#include <libunwind.h>
#include "perf_regs.h"
-#include "../../util/unwind.h"
-#include "../../util/debug.h"
+#include "../../../util/unwind.h"
+#include "../../../util/debug.h"
int libunwind__arch_reg_id(int regnum)
{
diff --git a/tools/perf/arch/arm64/Makefile b/tools/perf/arch/arm64/Makefile
index dbef716a1913..fab3095fb5d0 100644
--- a/tools/perf/arch/arm64/Makefile
+++ b/tools/perf/arch/arm64/Makefile
@@ -4,6 +4,7 @@ PERF_HAVE_DWARF_REGS := 1
endif
PERF_HAVE_JITDUMP := 1
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+HAVE_KVM_STAT_SUPPORT := 1
#
# Syscall table generation for perf
diff --git a/tools/perf/arch/arm64/annotate/instructions.c b/tools/perf/arch/arm64/annotate/instructions.c
index 037e292ecd8e..4af0c3a0f86e 100644
--- a/tools/perf/arch/arm64/annotate/instructions.c
+++ b/tools/perf/arch/arm64/annotate/instructions.c
@@ -102,7 +102,7 @@ static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
if (err)
goto out_free_arm;
/* b, b.cond, br, cbz/cbnz, tbz/tbnz */
- err = regcomp(&arm->jump_insn, "^[ct]?br?\\.?(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl)?n?z?$",
+ err = regcomp(&arm->jump_insn, "^[ct]?br?\\.?(cc|cs|eq|ge|gt|hi|hs|le|lo|ls|lt|mi|ne|pl|vc|vs)?n?z?$",
REG_EXTENDED);
if (err)
goto out_free_call;
diff --git a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
index 459469b7222c..22cdf911dd9a 100755
--- a/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
+++ b/tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
@@ -23,34 +23,17 @@ create_table_from_c()
{
local sc nr last_sc
- create_table_exe=`mktemp ${TMPDIR:-/tmp}/create-table-XXXXXX`
-
- {
-
- cat <<-_EoHEADER
- #include <stdio.h>
- #include "$input"
- int main(int argc, char *argv[])
- {
- _EoHEADER
-
while read sc nr; do
- printf "%s\n" " printf(\"\\t[%d] = \\\"$sc\\\",\\n\", __NR_$sc);"
+ printf "%s\n" " [$nr] = \"$sc\","
last_sc=$sc
done
- printf "%s\n" " printf(\"#define SYSCALLTBL_ARM64_MAX_ID %d\\n\", __NR_$last_sc);"
- printf "}\n"
-
- } | $hostcc -I $incpath/include/uapi -o $create_table_exe -x c -
-
- $create_table_exe
-
- rm -f $create_table_exe
+ printf "%s\n" "#define SYSCALLTBL_ARM64_MAX_ID __NR_$last_sc"
}
create_table()
{
+ echo "#include \"$input\""
echo "static const char *syscalltbl_arm64[] = {"
create_table_from_c
echo "};"
@@ -58,5 +41,5 @@ create_table()
$gcc -E -dM -x c -I $incpath/include/uapi $input \
|sed -ne 's/^#define __NR_//p' \
- |sort -t' ' -k2 -nu \
+ |sort -t' ' -k2 -n \
|create_table
diff --git a/tools/perf/arch/arm64/include/arch-tests.h b/tools/perf/arch/arm64/include/arch-tests.h
index 90ec4c8cb880..452b3d904521 100644
--- a/tools/perf/arch/arm64/include/arch-tests.h
+++ b/tools/perf/arch/arm64/include/arch-tests.h
@@ -2,11 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-#endif
-
-extern struct test arch_tests[];
+extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/arm64/include/perf_regs.h b/tools/perf/arch/arm64/include/perf_regs.h
index baaa5e64a3fb..35a3cc775b39 100644
--- a/tools/perf/arch/arm64/include/perf_regs.h
+++ b/tools/perf/arch/arm64/include/perf_regs.h
@@ -4,7 +4,9 @@
#include <stdlib.h>
#include <linux/types.h>
+#define perf_event_arm_regs perf_event_arm64_regs
#include <asm/perf_regs.h>
+#undef perf_event_arm_regs
void perf_regs_load(u64 *regs);
@@ -15,80 +17,4 @@ void perf_regs_load(u64 *regs);
#define PERF_REG_IP PERF_REG_ARM64_PC
#define PERF_REG_SP PERF_REG_ARM64_SP
-static inline const char *perf_reg_name(int id)
-{
- switch (id) {
- case PERF_REG_ARM64_X0:
- return "x0";
- case PERF_REG_ARM64_X1:
- return "x1";
- case PERF_REG_ARM64_X2:
- return "x2";
- case PERF_REG_ARM64_X3:
- return "x3";
- case PERF_REG_ARM64_X4:
- return "x4";
- case PERF_REG_ARM64_X5:
- return "x5";
- case PERF_REG_ARM64_X6:
- return "x6";
- case PERF_REG_ARM64_X7:
- return "x7";
- case PERF_REG_ARM64_X8:
- return "x8";
- case PERF_REG_ARM64_X9:
- return "x9";
- case PERF_REG_ARM64_X10:
- return "x10";
- case PERF_REG_ARM64_X11:
- return "x11";
- case PERF_REG_ARM64_X12:
- return "x12";
- case PERF_REG_ARM64_X13:
- return "x13";
- case PERF_REG_ARM64_X14:
- return "x14";
- case PERF_REG_ARM64_X15:
- return "x15";
- case PERF_REG_ARM64_X16:
- return "x16";
- case PERF_REG_ARM64_X17:
- return "x17";
- case PERF_REG_ARM64_X18:
- return "x18";
- case PERF_REG_ARM64_X19:
- return "x19";
- case PERF_REG_ARM64_X20:
- return "x20";
- case PERF_REG_ARM64_X21:
- return "x21";
- case PERF_REG_ARM64_X22:
- return "x22";
- case PERF_REG_ARM64_X23:
- return "x23";
- case PERF_REG_ARM64_X24:
- return "x24";
- case PERF_REG_ARM64_X25:
- return "x25";
- case PERF_REG_ARM64_X26:
- return "x26";
- case PERF_REG_ARM64_X27:
- return "x27";
- case PERF_REG_ARM64_X28:
- return "x28";
- case PERF_REG_ARM64_X29:
- return "x29";
- case PERF_REG_ARM64_SP:
- return "sp";
- case PERF_REG_ARM64_LR:
- return "lr";
- case PERF_REG_ARM64_PC:
- return "pc";
- default:
- return NULL;
- }
-
- return NULL;
-}
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/arm64/tests/arch-tests.c b/tools/perf/arch/arm64/tests/arch-tests.c
index 5b1543c98022..ad16b4f8f63e 100644
--- a/tools/perf/arch/arm64/tests/arch-tests.c
+++ b/tools/perf/arch/arm64/tests/arch-tests.c
@@ -3,14 +3,9 @@
#include "tests/tests.h"
#include "arch-tests.h"
-struct test arch_tests[] = {
+struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- {
- .desc = "DWARF unwind",
- .func = test__dwarf_unwind,
- },
+ &suite__dwarf_unwind,
#endif
- {
- .func = NULL,
- },
+ NULL,
};
diff --git a/tools/perf/arch/arm64/tests/dwarf-unwind.c b/tools/perf/arch/arm64/tests/dwarf-unwind.c
index 46147a483049..90a7ef293ce7 100644
--- a/tools/perf/arch/arm64/tests/dwarf-unwind.c
+++ b/tools/perf/arch/arm64/tests/dwarf-unwind.c
@@ -33,7 +33,7 @@ static int sample_ustack(struct perf_sample *sample,
return -1;
}
- stack_size = map->end - sp;
+ stack_size = map__end(map) - sp;
stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
memcpy(buf, (void *) sp, stack_size);
diff --git a/tools/perf/arch/arm64/util/Build b/tools/perf/arch/arm64/util/Build
index 5c13438c7bd4..78ef7115be3d 100644
--- a/tools/perf/arch/arm64/util/Build
+++ b/tools/perf/arch/arm64/util/Build
@@ -1,6 +1,9 @@
perf-y += header.o
perf-y += machine.o
perf-y += perf_regs.o
+perf-y += tsc.o
+perf-y += pmu.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
@@ -8,4 +11,4 @@ perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
perf-$(CONFIG_AUXTRACE) += ../../arm/util/pmu.o \
../../arm/util/auxtrace.o \
../../arm/util/cs-etm.o \
- arm-spe.o
+ arm-spe.o mem-events.o hisi-ptt.o
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index 27653be24447..3b1676ff03f9 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -14,6 +14,7 @@
#include "../../../util/cpumap.h"
#include "../../../util/event.h"
#include "../../../util/evsel.h"
+#include "../../../util/evsel_config.h"
#include "../../../util/evlist.h"
#include "../../../util/session.h"
#include <internal/lib.h> // page_size
@@ -22,6 +23,7 @@
#include "../../../util/auxtrace.h"
#include "../../../util/record.h"
#include "../../../util/arm-spe.h"
+#include <tools/libc_compat.h> // reallocarray
#define KiB(x) ((x) * 1024)
#define MiB(x) ((x) * 1024 * 1024)
@@ -30,6 +32,8 @@ struct arm_spe_recording {
struct auxtrace_record itr;
struct perf_pmu *arm_spe_pmu;
struct evlist *evlist;
+ int wrapped_cnt;
+ bool *wrapped;
};
static size_t
@@ -60,6 +64,55 @@ static int arm_spe_info_fill(struct auxtrace_record *itr,
return 0;
}
+static void
+arm_spe_snapshot_resolve_auxtrace_defaults(struct record_opts *opts,
+ bool privileged)
+{
+ /*
+ * The default snapshot size is the auxtrace mmap size. If neither auxtrace mmap size nor
+ * snapshot size is specified, then the default is 4MiB for privileged users, 128KiB for
+ * unprivileged users.
+ *
+ * The default auxtrace mmap size is 4MiB/page_size for privileged users, 128KiB for
+ * unprivileged users. If an unprivileged user does not specify mmap pages, the mmap pages
+ * will be reduced from the default 512KiB/page_size to 256KiB/page_size, otherwise the
+ * user is likely to get an error as they exceed their mlock limmit.
+ */
+
+ /*
+ * No size were given to '-S' or '-m,', so go with the default
+ */
+ if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(4) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ } else if (!opts->auxtrace_mmap_pages && !privileged && opts->mmap_pages == UINT_MAX) {
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+
+ /*
+ * '-m,xyz' was specified but no snapshot size, so make the snapshot size as big as the
+ * auxtrace mmap area.
+ */
+ if (!opts->auxtrace_snapshot_size)
+ opts->auxtrace_snapshot_size = opts->auxtrace_mmap_pages * (size_t)page_size;
+
+ /*
+ * '-Sxyz' was specified but no auxtrace mmap area, so make the auxtrace mmap area big
+ * enough to fit the requested snapshot size.
+ */
+ if (!opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_snapshot_size;
+
+ sz = round_up(sz, page_size) / page_size;
+ opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
+ }
+}
+
static int arm_spe_recording_options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
@@ -68,9 +121,11 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
container_of(itr, struct arm_spe_recording, itr);
struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
struct evsel *evsel, *arm_spe_evsel = NULL;
+ struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
struct evsel *tracking_evsel;
int err;
+ u64 bit;
sper->evlist = evlist;
@@ -81,7 +136,8 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
return -EINVAL;
}
evsel->core.attr.freq = 0;
- evsel->core.attr.sample_period = 1;
+ evsel->core.attr.sample_period = arm_spe_pmu->default_config->sample_period;
+ evsel->needs_auxtrace_mmap = true;
arm_spe_evsel = evsel;
opts->full_auxtrace = true;
}
@@ -90,8 +146,38 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
if (!opts->full_auxtrace)
return 0;
+ /*
+ * we are in snapshot mode.
+ */
+ if (opts->auxtrace_snapshot_mode) {
+ /*
+ * Command arguments '-Sxyz' and/or '-m,xyz' are missing, so fill those in with
+ * default values.
+ */
+ if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages)
+ arm_spe_snapshot_resolve_auxtrace_defaults(opts, privileged);
+
+ /*
+ * Snapshot size can't be bigger than the auxtrace area.
+ */
+ if (opts->auxtrace_snapshot_size > opts->auxtrace_mmap_pages * (size_t)page_size) {
+ pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
+ opts->auxtrace_snapshot_size,
+ opts->auxtrace_mmap_pages * (size_t)page_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Something went wrong somewhere - this shouldn't happen.
+ */
+ if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
+ pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
+ return -EINVAL;
+ }
+ }
+
/* We are in full trace mode but '-m,xyz' wasn't specified */
- if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (!opts->auxtrace_mmap_pages) {
if (privileged) {
opts->auxtrace_mmap_pages = MiB(4) / page_size;
} else {
@@ -113,30 +199,246 @@ static int arm_spe_recording_options(struct auxtrace_record *itr,
}
}
+ if (opts->auxtrace_snapshot_mode)
+ pr_debug2("%sx snapshot size: %zu\n", ARM_SPE_PMU_NAME,
+ opts->auxtrace_snapshot_size);
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace event
* must come first.
*/
- perf_evlist__to_front(evlist, arm_spe_evsel);
+ evlist__to_front(evlist, arm_spe_evsel);
- perf_evsel__set_sample_bit(arm_spe_evsel, CPU);
- perf_evsel__set_sample_bit(arm_spe_evsel, TIME);
- perf_evsel__set_sample_bit(arm_spe_evsel, TID);
+ /*
+ * In the case of per-cpu mmaps, sample CPU for AUX event;
+ * also enable the timestamp tracing for samples correlation.
+ */
+ if (!perf_cpu_map__empty(cpus)) {
+ evsel__set_sample_bit(arm_spe_evsel, CPU);
+ evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel,
+ "ts_enable", 1);
+ }
+
+ /*
+ * Set this only so that perf report knows that SPE generates memory info. It has no effect
+ * on the opening of the event or the SPE data produced.
+ */
+ evsel__set_sample_bit(arm_spe_evsel, DATA_SRC);
+
+ /*
+ * The PHYS_ADDR flag does not affect the driver behaviour, it is used to
+ * inform that the resulting output's SPE samples contain physical addresses
+ * where applicable.
+ */
+ bit = perf_pmu__format_bits(&arm_spe_pmu->format, "pa_enable");
+ if (arm_spe_evsel->core.attr.config & bit)
+ evsel__set_sample_bit(arm_spe_evsel, PHYS_ADDR);
/* Add dummy event to keep tracking */
- err = parse_events(evlist, "dummy:u", NULL);
+ err = parse_event(evlist, "dummy:u");
if (err)
return err;
tracking_evsel = evlist__last(evlist);
- perf_evlist__set_tracking_event(evlist, tracking_evsel);
+ evlist__set_tracking_event(evlist, tracking_evsel);
tracking_evsel->core.attr.freq = 0;
tracking_evsel->core.attr.sample_period = 1;
- perf_evsel__set_sample_bit(tracking_evsel, TIME);
- perf_evsel__set_sample_bit(tracking_evsel, CPU);
- perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
+
+ /* In per-cpu case, always need the time of mmap events etc */
+ if (!perf_cpu_map__empty(cpus)) {
+ evsel__set_sample_bit(tracking_evsel, TIME);
+ evsel__set_sample_bit(tracking_evsel, CPU);
+
+ /* also track task context switch */
+ if (!record_opts__no_switch_events(opts))
+ tracking_evsel->core.attr.context_switch = 1;
+ }
+
+ return 0;
+}
+
+static int arm_spe_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
+ struct record_opts *opts,
+ const char *str)
+{
+ unsigned long long snapshot_size = 0;
+ char *endptr;
+
+ if (str) {
+ snapshot_size = strtoull(str, &endptr, 0);
+ if (*endptr || snapshot_size > SIZE_MAX)
+ return -1;
+ }
+
+ opts->auxtrace_snapshot_mode = true;
+ opts->auxtrace_snapshot_size = snapshot_size;
+
+ return 0;
+}
+
+static int arm_spe_snapshot_start(struct auxtrace_record *itr)
+{
+ struct arm_spe_recording *ptr =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct evsel *evsel;
+
+ evlist__for_each_entry(ptr->evlist, evsel) {
+ if (evsel->core.attr.type == ptr->arm_spe_pmu->type)
+ return evsel__disable(evsel);
+ }
+ return -EINVAL;
+}
+
+static int arm_spe_snapshot_finish(struct auxtrace_record *itr)
+{
+ struct arm_spe_recording *ptr =
+ container_of(itr, struct arm_spe_recording, itr);
+ struct evsel *evsel;
+
+ evlist__for_each_entry(ptr->evlist, evsel) {
+ if (evsel->core.attr.type == ptr->arm_spe_pmu->type)
+ return evsel__enable(evsel);
+ }
+ return -EINVAL;
+}
+
+static int arm_spe_alloc_wrapped_array(struct arm_spe_recording *ptr, int idx)
+{
+ bool *wrapped;
+ int cnt = ptr->wrapped_cnt, new_cnt, i;
+
+ /*
+ * No need to allocate, so return early.
+ */
+ if (idx < cnt)
+ return 0;
+
+ /*
+ * Make ptr->wrapped as big as idx.
+ */
+ new_cnt = idx + 1;
+
+ /*
+ * Free'ed in arm_spe_recording_free().
+ */
+ wrapped = reallocarray(ptr->wrapped, new_cnt, sizeof(bool));
+ if (!wrapped)
+ return -ENOMEM;
+
+ /*
+ * init new allocated values.
+ */
+ for (i = cnt; i < new_cnt; i++)
+ wrapped[i] = false;
+
+ ptr->wrapped_cnt = new_cnt;
+ ptr->wrapped = wrapped;
+
+ return 0;
+}
+
+static bool arm_spe_buffer_has_wrapped(unsigned char *buffer,
+ size_t buffer_size, u64 head)
+{
+ u64 i, watermark;
+ u64 *buf = (u64 *)buffer;
+ size_t buf_size = buffer_size;
+
+ /*
+ * Defensively handle the case where head might be continually increasing - if its value is
+ * equal or greater than the size of the ring buffer, then we can safely determine it has
+ * wrapped around. Otherwise, continue to detect if head might have wrapped.
+ */
+ if (head >= buffer_size)
+ return true;
+
+ /*
+ * We want to look the very last 512 byte (chosen arbitrarily) in the ring buffer.
+ */
+ watermark = buf_size - 512;
+
+ /*
+ * The value of head is somewhere within the size of the ring buffer. This can be that there
+ * hasn't been enough data to fill the ring buffer yet or the trace time was so long that
+ * head has numerically wrapped around. To find we need to check if we have data at the
+ * very end of the ring buffer. We can reliably do this because mmap'ed pages are zeroed
+ * out and there is a fresh mapping with every new session.
+ */
+
+ /*
+ * head is less than 512 byte from the end of the ring buffer.
+ */
+ if (head > watermark)
+ watermark = head;
+
+ /*
+ * Speed things up by using 64 bit transactions (see "u64 *buf" above)
+ */
+ watermark /= sizeof(u64);
+ buf_size /= sizeof(u64);
+
+ /*
+ * If we find trace data at the end of the ring buffer, head has been there and has
+ * numerically wrapped around at least once.
+ */
+ for (i = watermark; i < buf_size; i++)
+ if (buf[i])
+ return true;
+
+ return false;
+}
+
+static int arm_spe_find_snapshot(struct auxtrace_record *itr, int idx,
+ struct auxtrace_mmap *mm, unsigned char *data,
+ u64 *head, u64 *old)
+{
+ int err;
+ bool wrapped;
+ struct arm_spe_recording *ptr =
+ container_of(itr, struct arm_spe_recording, itr);
+
+ /*
+ * Allocate memory to keep track of wrapping if this is the first
+ * time we deal with this *mm.
+ */
+ if (idx >= ptr->wrapped_cnt) {
+ err = arm_spe_alloc_wrapped_array(ptr, idx);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Check to see if *head has wrapped around. If it hasn't only the
+ * amount of data between *head and *old is snapshot'ed to avoid
+ * bloating the perf.data file with zeros. But as soon as *head has
+ * wrapped around the entire size of the AUX ring buffer it taken.
+ */
+ wrapped = ptr->wrapped[idx];
+ if (!wrapped && arm_spe_buffer_has_wrapped(data, mm->len, *head)) {
+ wrapped = true;
+ ptr->wrapped[idx] = true;
+ }
+
+ pr_debug3("%s: mmap index %d old head %zu new head %zu size %zu\n",
+ __func__, idx, (size_t)*old, (size_t)*head, mm->len);
+
+ /*
+ * No wrap has occurred, we can just use *head and *old.
+ */
+ if (!wrapped)
+ return 0;
+
+ /*
+ * *head has wrapped around - adjust *head and *old to pickup the
+ * entire content of the AUX buffer.
+ */
+ if (*head >= mm->len) {
+ *old = *head - mm->len;
+ } else {
+ *head += mm->len;
+ *old = *head - mm->len;
+ }
return 0;
}
@@ -155,6 +457,7 @@ static void arm_spe_recording_free(struct auxtrace_record *itr)
struct arm_spe_recording *sper =
container_of(itr, struct arm_spe_recording, itr);
+ zfree(&sper->wrapped);
free(sper);
}
@@ -176,6 +479,10 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
sper->arm_spe_pmu = arm_spe_pmu;
sper->itr.pmu = arm_spe_pmu;
+ sper->itr.snapshot_start = arm_spe_snapshot_start;
+ sper->itr.snapshot_finish = arm_spe_snapshot_finish;
+ sper->itr.find_snapshot = arm_spe_find_snapshot;
+ sper->itr.parse_snapshot_options = arm_spe_parse_snapshot_options;
sper->itr.recording_options = arm_spe_recording_options;
sper->itr.info_priv_size = arm_spe_info_priv_size;
sper->itr.info_fill = arm_spe_info_fill;
diff --git a/tools/perf/arch/arm64/util/arm64_exception_types.h b/tools/perf/arch/arm64/util/arm64_exception_types.h
new file mode 100644
index 000000000000..27c981ebe401
--- /dev/null
+++ b/tools/perf/arch/arm64/util/arm64_exception_types.h
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef ARCH_PERF_ARM64_EXCEPTION_TYPES_H
+#define ARCH_PERF_ARM64_EXCEPTION_TYPES_H
+
+/* Per asm/virt.h */
+#define HVC_STUB_ERR 0xbadca11
+
+/* Per asm/kvm_asm.h */
+#define ARM_EXCEPTION_IRQ 0
+#define ARM_EXCEPTION_EL1_SERROR 1
+#define ARM_EXCEPTION_TRAP 2
+#define ARM_EXCEPTION_IL 3
+/* The hyp-stub will return this for any kvm_call_hyp() call */
+#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
+
+#define kvm_arm_exception_type \
+ {ARM_EXCEPTION_IRQ, "IRQ" }, \
+ {ARM_EXCEPTION_EL1_SERROR, "SERROR" }, \
+ {ARM_EXCEPTION_TRAP, "TRAP" }, \
+ {ARM_EXCEPTION_IL, "ILLEGAL" }, \
+ {ARM_EXCEPTION_HYP_GONE, "HYP_GONE" }
+
+/* Per asm/esr.h */
+#define ESR_ELx_EC_UNKNOWN (0x00)
+#define ESR_ELx_EC_WFx (0x01)
+/* Unallocated EC: 0x02 */
+#define ESR_ELx_EC_CP15_32 (0x03)
+#define ESR_ELx_EC_CP15_64 (0x04)
+#define ESR_ELx_EC_CP14_MR (0x05)
+#define ESR_ELx_EC_CP14_LS (0x06)
+#define ESR_ELx_EC_FP_ASIMD (0x07)
+#define ESR_ELx_EC_CP10_ID (0x08) /* EL2 only */
+#define ESR_ELx_EC_PAC (0x09) /* EL2 and above */
+/* Unallocated EC: 0x0A - 0x0B */
+#define ESR_ELx_EC_CP14_64 (0x0C)
+/* Unallocated EC: 0x0d */
+#define ESR_ELx_EC_ILL (0x0E)
+/* Unallocated EC: 0x0F - 0x10 */
+#define ESR_ELx_EC_SVC32 (0x11)
+#define ESR_ELx_EC_HVC32 (0x12) /* EL2 only */
+#define ESR_ELx_EC_SMC32 (0x13) /* EL2 and above */
+/* Unallocated EC: 0x14 */
+#define ESR_ELx_EC_SVC64 (0x15)
+#define ESR_ELx_EC_HVC64 (0x16) /* EL2 and above */
+#define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */
+#define ESR_ELx_EC_SYS64 (0x18)
+#define ESR_ELx_EC_SVE (0x19)
+#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */
+/* Unallocated EC: 0x1b - 0x1E */
+#define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */
+#define ESR_ELx_EC_IABT_LOW (0x20)
+#define ESR_ELx_EC_IABT_CUR (0x21)
+#define ESR_ELx_EC_PC_ALIGN (0x22)
+/* Unallocated EC: 0x23 */
+#define ESR_ELx_EC_DABT_LOW (0x24)
+#define ESR_ELx_EC_DABT_CUR (0x25)
+#define ESR_ELx_EC_SP_ALIGN (0x26)
+/* Unallocated EC: 0x27 */
+#define ESR_ELx_EC_FP_EXC32 (0x28)
+/* Unallocated EC: 0x29 - 0x2B */
+#define ESR_ELx_EC_FP_EXC64 (0x2C)
+/* Unallocated EC: 0x2D - 0x2E */
+#define ESR_ELx_EC_SERROR (0x2F)
+#define ESR_ELx_EC_BREAKPT_LOW (0x30)
+#define ESR_ELx_EC_BREAKPT_CUR (0x31)
+#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
+#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
+#define ESR_ELx_EC_WATCHPT_LOW (0x34)
+#define ESR_ELx_EC_WATCHPT_CUR (0x35)
+/* Unallocated EC: 0x36 - 0x37 */
+#define ESR_ELx_EC_BKPT32 (0x38)
+/* Unallocated EC: 0x39 */
+#define ESR_ELx_EC_VECTOR32 (0x3A) /* EL2 only */
+/* Unallocated EC: 0x3B */
+#define ESR_ELx_EC_BRK64 (0x3C)
+/* Unallocated EC: 0x3D - 0x3F */
+#define ESR_ELx_EC_MAX (0x3F)
+
+#define ECN(x) { ESR_ELx_EC_##x, #x }
+
+#define kvm_arm_exception_class \
+ ECN(UNKNOWN), ECN(WFx), ECN(CP15_32), ECN(CP15_64), ECN(CP14_MR), \
+ ECN(CP14_LS), ECN(FP_ASIMD), ECN(CP10_ID), ECN(PAC), ECN(CP14_64), \
+ ECN(SVC64), ECN(HVC64), ECN(SMC64), ECN(SYS64), ECN(SVE), \
+ ECN(IMP_DEF), ECN(IABT_LOW), ECN(IABT_CUR), \
+ ECN(PC_ALIGN), ECN(DABT_LOW), ECN(DABT_CUR), \
+ ECN(SP_ALIGN), ECN(FP_EXC32), ECN(FP_EXC64), ECN(SERROR), \
+ ECN(BREAKPT_LOW), ECN(BREAKPT_CUR), ECN(SOFTSTP_LOW), \
+ ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
+ ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
+
+#endif /* ARCH_PERF_ARM64_EXCEPTION_TYPES_H */
diff --git a/tools/perf/arch/arm64/util/hisi-ptt.c b/tools/perf/arch/arm64/util/hisi-ptt.c
new file mode 100644
index 000000000000..ba97c8a562a0
--- /dev/null
+++ b/tools/perf/arch/arm64/util/hisi-ptt.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * HiSilicon PCIe Trace and Tuning (PTT) support
+ * Copyright (c) 2022 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/zalloc.h>
+#include <time.h>
+
+#include <internal/lib.h> // page_size
+#include "../../../util/auxtrace.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/debug.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/hisi-ptt.h"
+#include "../../../util/pmu.h"
+#include "../../../util/record.h"
+#include "../../../util/session.h"
+#include "../../../util/tsc.h"
+
+#define KiB(x) ((x) * 1024)
+#define MiB(x) ((x) * 1024 * 1024)
+
+struct hisi_ptt_recording {
+ struct auxtrace_record itr;
+ struct perf_pmu *hisi_ptt_pmu;
+ struct evlist *evlist;
+};
+
+static size_t
+hisi_ptt_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+ struct evlist *evlist __maybe_unused)
+{
+ return HISI_PTT_AUXTRACE_PRIV_SIZE;
+}
+
+static int hisi_ptt_info_fill(struct auxtrace_record *itr,
+ struct perf_session *session,
+ struct perf_record_auxtrace_info *auxtrace_info,
+ size_t priv_size)
+{
+ struct hisi_ptt_recording *pttr =
+ container_of(itr, struct hisi_ptt_recording, itr);
+ struct perf_pmu *hisi_ptt_pmu = pttr->hisi_ptt_pmu;
+
+ if (priv_size != HISI_PTT_AUXTRACE_PRIV_SIZE)
+ return -EINVAL;
+
+ if (!session->evlist->core.nr_mmaps)
+ return -EINVAL;
+
+ auxtrace_info->type = PERF_AUXTRACE_HISI_PTT;
+ auxtrace_info->priv[0] = hisi_ptt_pmu->type;
+
+ return 0;
+}
+
+static int hisi_ptt_set_auxtrace_mmap_page(struct record_opts *opts)
+{
+ bool privileged = perf_event_paranoid_check(-1);
+
+ if (!opts->full_auxtrace)
+ return 0;
+
+ if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
+ if (privileged) {
+ opts->auxtrace_mmap_pages = MiB(16) / page_size;
+ } else {
+ opts->auxtrace_mmap_pages = KiB(128) / page_size;
+ if (opts->mmap_pages == UINT_MAX)
+ opts->mmap_pages = KiB(256) / page_size;
+ }
+ }
+
+ /* Validate auxtrace_mmap_pages */
+ if (opts->auxtrace_mmap_pages) {
+ size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
+ size_t min_sz = KiB(8);
+
+ if (sz < min_sz || !is_power_of_2(sz)) {
+ pr_err("Invalid mmap size for HISI PTT: must be at least %zuKiB and a power of 2\n",
+ min_sz / 1024);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int hisi_ptt_recording_options(struct auxtrace_record *itr,
+ struct evlist *evlist,
+ struct record_opts *opts)
+{
+ struct hisi_ptt_recording *pttr =
+ container_of(itr, struct hisi_ptt_recording, itr);
+ struct perf_pmu *hisi_ptt_pmu = pttr->hisi_ptt_pmu;
+ struct evsel *evsel, *hisi_ptt_evsel = NULL;
+ struct evsel *tracking_evsel;
+ int err;
+
+ pttr->evlist = evlist;
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.type == hisi_ptt_pmu->type) {
+ if (hisi_ptt_evsel) {
+ pr_err("There may be only one " HISI_PTT_PMU_NAME "x event\n");
+ return -EINVAL;
+ }
+ evsel->core.attr.freq = 0;
+ evsel->core.attr.sample_period = 1;
+ evsel->needs_auxtrace_mmap = true;
+ hisi_ptt_evsel = evsel;
+ opts->full_auxtrace = true;
+ }
+ }
+
+ err = hisi_ptt_set_auxtrace_mmap_page(opts);
+ if (err)
+ return err;
+ /*
+ * To obtain the auxtrace buffer file descriptor, the auxtrace event
+ * must come first.
+ */
+ evlist__to_front(evlist, hisi_ptt_evsel);
+ evsel__set_sample_bit(hisi_ptt_evsel, TIME);
+
+ /* Add dummy event to keep tracking */
+ err = parse_event(evlist, "dummy:u");
+ if (err)
+ return err;
+
+ tracking_evsel = evlist__last(evlist);
+ evlist__set_tracking_event(evlist, tracking_evsel);
+
+ tracking_evsel->core.attr.freq = 0;
+ tracking_evsel->core.attr.sample_period = 1;
+ evsel__set_sample_bit(tracking_evsel, TIME);
+
+ return 0;
+}
+
+static u64 hisi_ptt_reference(struct auxtrace_record *itr __maybe_unused)
+{
+ return rdtsc();
+}
+
+static void hisi_ptt_recording_free(struct auxtrace_record *itr)
+{
+ struct hisi_ptt_recording *pttr =
+ container_of(itr, struct hisi_ptt_recording, itr);
+
+ free(pttr);
+}
+
+struct auxtrace_record *hisi_ptt_recording_init(int *err,
+ struct perf_pmu *hisi_ptt_pmu)
+{
+ struct hisi_ptt_recording *pttr;
+
+ if (!hisi_ptt_pmu) {
+ *err = -ENODEV;
+ return NULL;
+ }
+
+ pttr = zalloc(sizeof(*pttr));
+ if (!pttr) {
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ pttr->hisi_ptt_pmu = hisi_ptt_pmu;
+ pttr->itr.pmu = hisi_ptt_pmu;
+ pttr->itr.recording_options = hisi_ptt_recording_options;
+ pttr->itr.info_priv_size = hisi_ptt_info_priv_size;
+ pttr->itr.info_fill = hisi_ptt_info_fill;
+ pttr->itr.free = hisi_ptt_recording_free;
+ pttr->itr.reference = hisi_ptt_reference;
+ pttr->itr.read_finish = auxtrace_record__read_finish;
+ pttr->itr.alignment = 0;
+
+ *err = 0;
+ return &pttr->itr;
+}
diff --git a/tools/perf/arch/arm64/util/kvm-stat.c b/tools/perf/arch/arm64/util/kvm-stat.c
new file mode 100644
index 000000000000..6611aa21cba9
--- /dev/null
+++ b/tools/perf/arch/arm64/util/kvm-stat.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <memory.h>
+#include "../../../util/evsel.h"
+#include "../../../util/kvm-stat.h"
+#include "arm64_exception_types.h"
+#include "debug.h"
+
+define_exit_reasons_table(arm64_exit_reasons, kvm_arm_exception_type);
+define_exit_reasons_table(arm64_trap_exit_reasons, kvm_arm_exception_class);
+
+const char *kvm_trap_exit_reason = "esr_ec";
+const char *vcpu_id_str = "id";
+const char *kvm_exit_reason = "ret";
+const char *kvm_entry_trace = "kvm:kvm_entry";
+const char *kvm_exit_trace = "kvm:kvm_exit";
+
+const char *kvm_events_tp[] = {
+ "kvm:kvm_entry",
+ "kvm:kvm_exit",
+ NULL,
+};
+
+static void event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->info = 0;
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason);
+ key->exit_reasons = arm64_exit_reasons;
+
+ /*
+ * TRAP exceptions carry exception class info in esr_ec field
+ * and, hence, we need to use a different exit_reasons table to
+ * properly decode event's est_ec.
+ */
+ if (key->key == ARM_EXCEPTION_TRAP) {
+ key->key = evsel__intval(evsel, sample, kvm_trap_exit_reason);
+ key->exit_reasons = arm64_trap_exit_reasons;
+ }
+}
+
+static bool event_begin(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return evsel__name_is(evsel, kvm_entry_trace);
+}
+
+static bool event_end(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (evsel__name_is(evsel, kvm_exit_trace)) {
+ event_get_key(evsel, sample, key);
+ return true;
+ }
+ return false;
+}
+
+static struct kvm_events_ops exit_events = {
+ .is_begin_event = event_begin,
+ .is_end_event = event_end,
+ .decode_key = exit_event_decode_key,
+ .name = "VM-EXIT"
+};
+
+struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+ {
+ .name = "vmexit",
+ .ops = &exit_events,
+ },
+ { NULL, NULL },
+};
+
+const char * const kvm_skip_events[] = {
+ NULL,
+};
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+{
+ kvm->exit_reasons_isa = "arm64";
+ return 0;
+}
diff --git a/tools/perf/arch/arm64/util/machine.c b/tools/perf/arch/arm64/util/machine.c
index d41b27e781d3..235a0a1e1ec7 100644
--- a/tools/perf/arch/arm64/util/machine.c
+++ b/tools/perf/arch/arm64/util/machine.c
@@ -1,27 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
#include <stdio.h>
#include <string.h>
#include "debug.h"
#include "symbol.h"
+#include "callchain.h"
+#include "record.h"
+#include "util/perf_regs.h"
-/* On arm64, kernel text segment start at high memory address,
- * for example 0xffff 0000 8xxx xxxx. Modules start at a low memory
- * address, like 0xffff 0000 00ax xxxx. When only samll amount of
- * memory is used by modules, gap between end of module's text segment
- * and start of kernel text segment may be reach 2G.
- * Therefore do not fill this gap and do not assign it to the kernel dso map.
- */
-
-#define SYMBOL_LIMIT (1 << 12) /* 4K */
-
-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+void arch__add_leaf_frame_record_opts(struct record_opts *opts)
{
- if ((strchr(p->name, '[') && strchr(c->name, '[') == NULL) ||
- (strchr(p->name, '[') == NULL && strchr(c->name, '[')))
- /* Limit range of last symbol in module and kernel */
- p->end += SYMBOL_LIMIT;
- else
- p->end = c->start;
- pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
+ opts->sample_user_regs |= sample_reg_masks[PERF_REG_ARM64_LR].mask;
}
diff --git a/tools/perf/arch/arm64/util/mem-events.c b/tools/perf/arch/arm64/util/mem-events.c
new file mode 100644
index 000000000000..df817d1f9f3e
--- /dev/null
+++ b/tools/perf/arch/arm64/util/mem-events.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "map_symbol.h"
+#include "mem-events.h"
+
+#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
+
+static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
+ E("spe-load", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=0,min_latency=%u/", "arm_spe_0"),
+ E("spe-store", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=0,store_filter=1/", "arm_spe_0"),
+ E("spe-ldst", "arm_spe_0/ts_enable=1,pa_enable=1,load_filter=1,store_filter=1,min_latency=%u/", "arm_spe_0"),
+};
+
+static char mem_ev_name[100];
+
+struct perf_mem_event *perf_mem_events__ptr(int i)
+{
+ if (i >= PERF_MEM_EVENTS__MAX)
+ return NULL;
+
+ return &perf_mem_events[i];
+}
+
+char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
+{
+ struct perf_mem_event *e = perf_mem_events__ptr(i);
+
+ if (i >= PERF_MEM_EVENTS__MAX)
+ return NULL;
+
+ if (i == PERF_MEM_EVENTS__LOAD || i == PERF_MEM_EVENTS__LOAD_STORE)
+ scnprintf(mem_ev_name, sizeof(mem_ev_name),
+ e->name, perf_mem_events__loads_ldlat);
+ else /* PERF_MEM_EVENTS__STORE */
+ scnprintf(mem_ev_name, sizeof(mem_ev_name), e->name);
+
+ return mem_ev_name;
+}
diff --git a/tools/perf/arch/arm64/util/perf_regs.c b/tools/perf/arch/arm64/util/perf_regs.c
index 2833e101a7c6..006692c9b040 100644
--- a/tools/perf/arch/arm64/util/perf_regs.c
+++ b/tools/perf/arch/arm64/util/perf_regs.c
@@ -1,6 +1,171 @@
// SPDX-License-Identifier: GPL-2.0
+#include <errno.h>
+#include <regex.h>
+#include <string.h>
+#include <sys/auxv.h>
+#include <linux/kernel.h>
+#include <linux/zalloc.h>
+
+#include "../../../perf-sys.h"
+#include "../../../util/debug.h"
+#include "../../../util/event.h"
#include "../../../util/perf_regs.h"
+#ifndef HWCAP_SVE
+#define HWCAP_SVE (1 << 22)
+#endif
+
const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG(x0, PERF_REG_ARM64_X0),
+ SMPL_REG(x1, PERF_REG_ARM64_X1),
+ SMPL_REG(x2, PERF_REG_ARM64_X2),
+ SMPL_REG(x3, PERF_REG_ARM64_X3),
+ SMPL_REG(x4, PERF_REG_ARM64_X4),
+ SMPL_REG(x5, PERF_REG_ARM64_X5),
+ SMPL_REG(x6, PERF_REG_ARM64_X6),
+ SMPL_REG(x7, PERF_REG_ARM64_X7),
+ SMPL_REG(x8, PERF_REG_ARM64_X8),
+ SMPL_REG(x9, PERF_REG_ARM64_X9),
+ SMPL_REG(x10, PERF_REG_ARM64_X10),
+ SMPL_REG(x11, PERF_REG_ARM64_X11),
+ SMPL_REG(x12, PERF_REG_ARM64_X12),
+ SMPL_REG(x13, PERF_REG_ARM64_X13),
+ SMPL_REG(x14, PERF_REG_ARM64_X14),
+ SMPL_REG(x15, PERF_REG_ARM64_X15),
+ SMPL_REG(x16, PERF_REG_ARM64_X16),
+ SMPL_REG(x17, PERF_REG_ARM64_X17),
+ SMPL_REG(x18, PERF_REG_ARM64_X18),
+ SMPL_REG(x19, PERF_REG_ARM64_X19),
+ SMPL_REG(x20, PERF_REG_ARM64_X20),
+ SMPL_REG(x21, PERF_REG_ARM64_X21),
+ SMPL_REG(x22, PERF_REG_ARM64_X22),
+ SMPL_REG(x23, PERF_REG_ARM64_X23),
+ SMPL_REG(x24, PERF_REG_ARM64_X24),
+ SMPL_REG(x25, PERF_REG_ARM64_X25),
+ SMPL_REG(x26, PERF_REG_ARM64_X26),
+ SMPL_REG(x27, PERF_REG_ARM64_X27),
+ SMPL_REG(x28, PERF_REG_ARM64_X28),
+ SMPL_REG(x29, PERF_REG_ARM64_X29),
+ SMPL_REG(lr, PERF_REG_ARM64_LR),
+ SMPL_REG(sp, PERF_REG_ARM64_SP),
+ SMPL_REG(pc, PERF_REG_ARM64_PC),
+ SMPL_REG(vg, PERF_REG_ARM64_VG),
SMPL_REG_END
};
+
+/* %xNUM */
+#define SDT_OP_REGEX1 "^(x[1-2]?[0-9]|3[0-1])$"
+
+/* [sp], [sp, NUM] */
+#define SDT_OP_REGEX2 "^\\[sp(, )?([0-9]+)?\\]$"
+
+static regex_t sdt_op_regex1, sdt_op_regex2;
+
+static int sdt_init_op_regex(void)
+{
+ static int initialized;
+ int ret = 0;
+
+ if (initialized)
+ return 0;
+
+ ret = regcomp(&sdt_op_regex1, SDT_OP_REGEX1, REG_EXTENDED);
+ if (ret)
+ goto error;
+
+ ret = regcomp(&sdt_op_regex2, SDT_OP_REGEX2, REG_EXTENDED);
+ if (ret)
+ goto free_regex1;
+
+ initialized = 1;
+ return 0;
+
+free_regex1:
+ regfree(&sdt_op_regex1);
+error:
+ pr_debug4("Regex compilation error.\n");
+ return ret;
+}
+
+/*
+ * SDT marker arguments on Arm64 uses %xREG or [sp, NUM], currently
+ * support these two formats.
+ */
+int arch_sdt_arg_parse_op(char *old_op, char **new_op)
+{
+ int ret, new_len;
+ regmatch_t rm[5];
+
+ ret = sdt_init_op_regex();
+ if (ret < 0)
+ return ret;
+
+ if (!regexec(&sdt_op_regex1, old_op, 3, rm, 0)) {
+ /* Extract xNUM */
+ new_len = 2; /* % NULL */
+ new_len += (int)(rm[1].rm_eo - rm[1].rm_so);
+
+ *new_op = zalloc(new_len);
+ if (!*new_op)
+ return -ENOMEM;
+
+ scnprintf(*new_op, new_len, "%%%.*s",
+ (int)(rm[1].rm_eo - rm[1].rm_so), old_op + rm[1].rm_so);
+ } else if (!regexec(&sdt_op_regex2, old_op, 5, rm, 0)) {
+ /* [sp], [sp, NUM] or [sp,NUM] */
+ new_len = 7; /* + ( % s p ) NULL */
+
+ /* If the argument is [sp], need to fill offset '0' */
+ if (rm[2].rm_so == -1)
+ new_len += 1;
+ else
+ new_len += (int)(rm[2].rm_eo - rm[2].rm_so);
+
+ *new_op = zalloc(new_len);
+ if (!*new_op)
+ return -ENOMEM;
+
+ if (rm[2].rm_so == -1)
+ scnprintf(*new_op, new_len, "+0(%%sp)");
+ else
+ scnprintf(*new_op, new_len, "+%.*s(%%sp)",
+ (int)(rm[2].rm_eo - rm[2].rm_so),
+ old_op + rm[2].rm_so);
+ } else {
+ pr_debug4("Skipping unsupported SDT argument: %s\n", old_op);
+ return SDT_ARG_SKIP;
+ }
+
+ return SDT_ARG_VALID;
+}
+
+uint64_t arch__user_reg_mask(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .sample_type = PERF_SAMPLE_REGS_USER,
+ .disabled = 1,
+ .exclude_kernel = 1,
+ .sample_period = 1,
+ .sample_regs_user = PERF_REGS_MASK
+ };
+ int fd;
+
+ if (getauxval(AT_HWCAP) & HWCAP_SVE)
+ attr.sample_regs_user |= SMPL_REG_MASK(PERF_REG_ARM64_VG);
+
+ /*
+ * Check if the pmu supports perf extended regs, before
+ * returning the register mask to sample.
+ */
+ if (attr.sample_regs_user != PERF_REGS_MASK) {
+ event_attr_init(&attr);
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ if (fd != -1) {
+ close(fd);
+ return attr.sample_regs_user;
+ }
+ }
+ return PERF_REGS_MASK;
+}
diff --git a/tools/perf/arch/arm64/util/pmu.c b/tools/perf/arch/arm64/util/pmu.c
new file mode 100644
index 000000000000..fa143acb4c8d
--- /dev/null
+++ b/tools/perf/arch/arm64/util/pmu.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <internal/cpumap.h>
+#include "../../../util/cpumap.h"
+#include "../../../util/pmu.h"
+#include <api/fs/fs.h>
+#include <math.h>
+
+static struct perf_pmu *pmu__find_core_pmu(void)
+{
+ struct perf_pmu *pmu = NULL;
+
+ while ((pmu = perf_pmu__scan(pmu))) {
+ if (!is_pmu_core(pmu->name))
+ continue;
+
+ /*
+ * The cpumap should cover all CPUs. Otherwise, some CPUs may
+ * not support some events or have different event IDs.
+ */
+ if (pmu->cpus->nr != cpu__max_cpu().cpu)
+ return NULL;
+
+ return pmu;
+ }
+ return NULL;
+}
+
+const struct pmu_metrics_table *pmu_metrics_table__find(void)
+{
+ struct perf_pmu *pmu = pmu__find_core_pmu();
+
+ if (pmu)
+ return perf_pmu__find_metrics_table(pmu);
+
+ return NULL;
+}
+
+const struct pmu_events_table *pmu_events_table__find(void)
+{
+ struct perf_pmu *pmu = pmu__find_core_pmu();
+
+ if (pmu)
+ return perf_pmu__find_events_table(pmu);
+
+ return NULL;
+}
+
+double perf_pmu__cpu_slots_per_cycle(void)
+{
+ char path[PATH_MAX];
+ unsigned long long slots = 0;
+ struct perf_pmu *pmu = pmu__find_core_pmu();
+
+ if (pmu) {
+ perf_pmu__pathname_scnprintf(path, sizeof(path),
+ pmu->name, "caps/slots");
+ /*
+ * The value of slots is not greater than 32 bits, but sysfs__read_int
+ * can't read value with 0x prefix, so use sysfs__read_ull instead.
+ */
+ sysfs__read_ull(path, &slots);
+ }
+
+ return slots ? (double)slots : NAN;
+}
diff --git a/tools/perf/arch/arm64/util/tsc.c b/tools/perf/arch/arm64/util/tsc.c
new file mode 100644
index 000000000000..cc85bd9e73f1
--- /dev/null
+++ b/tools/perf/arch/arm64/util/tsc.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+
+#include "../../../util/tsc.h"
+
+u64 rdtsc(void)
+{
+ u64 val;
+
+ /*
+ * According to ARM DDI 0487F.c, from Armv8.0 to Armv8.5 inclusive, the
+ * system counter is at least 56 bits wide; from Armv8.6, the counter
+ * must be 64 bits wide. So the system counter could be less than 64
+ * bits wide and it is attributed with the flag 'cap_user_time_short'
+ * is true.
+ */
+ asm volatile("mrs %0, cntvct_el0" : "=r" (val));
+
+ return val;
+}
diff --git a/tools/perf/arch/arm64/util/unwind-libdw.c b/tools/perf/arch/arm64/util/unwind-libdw.c
index 7623d85e77f3..09385081bb03 100644
--- a/tools/perf/arch/arm64/util/unwind-libdw.c
+++ b/tools/perf/arch/arm64/util/unwind-libdw.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/sample.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
diff --git a/tools/perf/arch/arm64/util/unwind-libunwind.c b/tools/perf/arch/arm64/util/unwind-libunwind.c
index 1495a9523a23..871af5992298 100644
--- a/tools/perf/arch/arm64/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm64/util/unwind-libunwind.c
@@ -4,83 +4,14 @@
#ifndef REMOTE_UNWIND_LIBUNWIND
#include <libunwind.h>
#include "perf_regs.h"
-#include "../../util/unwind.h"
+#include "../../../util/unwind.h"
#endif
-#include "../../util/debug.h"
+#include "../../../util/debug.h"
int LIBUNWIND__ARCH_REG_ID(int regnum)
{
- switch (regnum) {
- case UNW_AARCH64_X0:
- return PERF_REG_ARM64_X0;
- case UNW_AARCH64_X1:
- return PERF_REG_ARM64_X1;
- case UNW_AARCH64_X2:
- return PERF_REG_ARM64_X2;
- case UNW_AARCH64_X3:
- return PERF_REG_ARM64_X3;
- case UNW_AARCH64_X4:
- return PERF_REG_ARM64_X4;
- case UNW_AARCH64_X5:
- return PERF_REG_ARM64_X5;
- case UNW_AARCH64_X6:
- return PERF_REG_ARM64_X6;
- case UNW_AARCH64_X7:
- return PERF_REG_ARM64_X7;
- case UNW_AARCH64_X8:
- return PERF_REG_ARM64_X8;
- case UNW_AARCH64_X9:
- return PERF_REG_ARM64_X9;
- case UNW_AARCH64_X10:
- return PERF_REG_ARM64_X10;
- case UNW_AARCH64_X11:
- return PERF_REG_ARM64_X11;
- case UNW_AARCH64_X12:
- return PERF_REG_ARM64_X12;
- case UNW_AARCH64_X13:
- return PERF_REG_ARM64_X13;
- case UNW_AARCH64_X14:
- return PERF_REG_ARM64_X14;
- case UNW_AARCH64_X15:
- return PERF_REG_ARM64_X15;
- case UNW_AARCH64_X16:
- return PERF_REG_ARM64_X16;
- case UNW_AARCH64_X17:
- return PERF_REG_ARM64_X17;
- case UNW_AARCH64_X18:
- return PERF_REG_ARM64_X18;
- case UNW_AARCH64_X19:
- return PERF_REG_ARM64_X19;
- case UNW_AARCH64_X20:
- return PERF_REG_ARM64_X20;
- case UNW_AARCH64_X21:
- return PERF_REG_ARM64_X21;
- case UNW_AARCH64_X22:
- return PERF_REG_ARM64_X22;
- case UNW_AARCH64_X23:
- return PERF_REG_ARM64_X23;
- case UNW_AARCH64_X24:
- return PERF_REG_ARM64_X24;
- case UNW_AARCH64_X25:
- return PERF_REG_ARM64_X25;
- case UNW_AARCH64_X26:
- return PERF_REG_ARM64_X26;
- case UNW_AARCH64_X27:
- return PERF_REG_ARM64_X27;
- case UNW_AARCH64_X28:
- return PERF_REG_ARM64_X28;
- case UNW_AARCH64_X29:
- return PERF_REG_ARM64_X29;
- case UNW_AARCH64_X30:
- return PERF_REG_ARM64_LR;
- case UNW_AARCH64_SP:
- return PERF_REG_ARM64_SP;
- case UNW_AARCH64_PC:
- return PERF_REG_ARM64_PC;
- default:
- pr_err("unwind: invalid reg id %d\n", regnum);
+ if (regnum < 0 || regnum >= PERF_REG_ARM64_EXTENDED_MAX)
return -EINVAL;
- }
- return -EINVAL;
+ return regnum;
}
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 59dd875fd5e4..b951374bc49d 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -51,9 +51,7 @@ const char *const s390_triplets[] = {
const char *const sh_triplets[] = {
"sh-unknown-linux-gnu-",
- "sh64-unknown-linux-gnu-",
"sh-linux-gnu-",
- "sh64-linux-gnu-",
NULL
};
@@ -130,7 +128,7 @@ static int lookup_triplets(const char *const *triplets, const char *name)
}
static int perf_env__lookup_binutils_path(struct perf_env *env,
- const char *name, const char **path)
+ const char *name, char **path)
{
int idx;
const char *arch = perf_env__arch(env), *cross_env;
@@ -202,7 +200,7 @@ out_error:
return -1;
}
-int perf_env__lookup_objdump(struct perf_env *env, const char **path)
+int perf_env__lookup_objdump(struct perf_env *env, char **path)
{
/*
* For live mode, env->arch will be NULL and we can use
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index e965ed8bb328..4224c299cc70 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -6,7 +6,7 @@
struct perf_env;
-int perf_env__lookup_objdump(struct perf_env *env, const char **path);
+int perf_env__lookup_objdump(struct perf_env *env, char **path);
bool perf_env__single_address_space(struct perf_env *env);
#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/arch/csky/include/perf_regs.h b/tools/perf/arch/csky/include/perf_regs.h
index 8f336ea1161a..1afcc0e916c2 100644
--- a/tools/perf/arch/csky/include/perf_regs.h
+++ b/tools/perf/arch/csky/include/perf_regs.h
@@ -15,86 +15,4 @@
#define PERF_REG_IP PERF_REG_CSKY_PC
#define PERF_REG_SP PERF_REG_CSKY_SP
-static inline const char *perf_reg_name(int id)
-{
- switch (id) {
- case PERF_REG_CSKY_A0:
- return "a0";
- case PERF_REG_CSKY_A1:
- return "a1";
- case PERF_REG_CSKY_A2:
- return "a2";
- case PERF_REG_CSKY_A3:
- return "a3";
- case PERF_REG_CSKY_REGS0:
- return "regs0";
- case PERF_REG_CSKY_REGS1:
- return "regs1";
- case PERF_REG_CSKY_REGS2:
- return "regs2";
- case PERF_REG_CSKY_REGS3:
- return "regs3";
- case PERF_REG_CSKY_REGS4:
- return "regs4";
- case PERF_REG_CSKY_REGS5:
- return "regs5";
- case PERF_REG_CSKY_REGS6:
- return "regs6";
- case PERF_REG_CSKY_REGS7:
- return "regs7";
- case PERF_REG_CSKY_REGS8:
- return "regs8";
- case PERF_REG_CSKY_REGS9:
- return "regs9";
- case PERF_REG_CSKY_SP:
- return "sp";
- case PERF_REG_CSKY_LR:
- return "lr";
- case PERF_REG_CSKY_PC:
- return "pc";
-#if defined(__CSKYABIV2__)
- case PERF_REG_CSKY_EXREGS0:
- return "exregs0";
- case PERF_REG_CSKY_EXREGS1:
- return "exregs1";
- case PERF_REG_CSKY_EXREGS2:
- return "exregs2";
- case PERF_REG_CSKY_EXREGS3:
- return "exregs3";
- case PERF_REG_CSKY_EXREGS4:
- return "exregs4";
- case PERF_REG_CSKY_EXREGS5:
- return "exregs5";
- case PERF_REG_CSKY_EXREGS6:
- return "exregs6";
- case PERF_REG_CSKY_EXREGS7:
- return "exregs7";
- case PERF_REG_CSKY_EXREGS8:
- return "exregs8";
- case PERF_REG_CSKY_EXREGS9:
- return "exregs9";
- case PERF_REG_CSKY_EXREGS10:
- return "exregs10";
- case PERF_REG_CSKY_EXREGS11:
- return "exregs11";
- case PERF_REG_CSKY_EXREGS12:
- return "exregs12";
- case PERF_REG_CSKY_EXREGS13:
- return "exregs13";
- case PERF_REG_CSKY_EXREGS14:
- return "exregs14";
- case PERF_REG_CSKY_TLS:
- return "tls";
- case PERF_REG_CSKY_HI:
- return "hi";
- case PERF_REG_CSKY_LO:
- return "lo";
-#endif
- default:
- return NULL;
- }
-
- return NULL;
-}
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/nds32/Build b/tools/perf/arch/loongarch/Build
index e4e5f33c84d8..e4e5f33c84d8 100644
--- a/tools/perf/arch/nds32/Build
+++ b/tools/perf/arch/loongarch/Build
diff --git a/tools/perf/arch/loongarch/Makefile b/tools/perf/arch/loongarch/Makefile
new file mode 100644
index 000000000000..c392e7af4743
--- /dev/null
+++ b/tools/perf/arch/loongarch/Makefile
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+endif
+PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+PERF_HAVE_JITDUMP := 1
+
+#
+# Syscall table generation for perf
+#
+
+out := $(OUTPUT)arch/loongarch/include/generated/asm
+header := $(out)/syscalls.c
+incpath := $(srctree)/tools
+sysdef := $(srctree)/tools/arch/loongarch/include/uapi/asm/unistd.h
+sysprf := $(srctree)/tools/perf/arch/loongarch/entry/syscalls/
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sysdef) $(systbl)
+ $(Q)$(SHELL) '$(systbl)' '$(CC)' '$(HOSTCC)' $(incpath) $(sysdef) > $@
+
+clean::
+ $(call QUIET_CLEAN, loongarch) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/loongarch/annotate/instructions.c b/tools/perf/arch/loongarch/annotate/instructions.c
new file mode 100644
index 000000000000..ab21bf122135
--- /dev/null
+++ b/tools/perf/arch/loongarch/annotate/instructions.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Perf annotate functions.
+ *
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+static
+struct ins_ops *loongarch__associate_ins_ops(struct arch *arch, const char *name)
+{
+ struct ins_ops *ops = NULL;
+
+ if (!strncmp(name, "beqz", 4) ||
+ !strncmp(name, "bnez", 4) ||
+ !strncmp(name, "beq", 3) ||
+ !strncmp(name, "bne", 3) ||
+ !strncmp(name, "blt", 3) ||
+ !strncmp(name, "bge", 3) ||
+ !strncmp(name, "bltu", 4) ||
+ !strncmp(name, "bgeu", 4) ||
+ !strncmp(name, "bl", 2))
+ ops = &call_ops;
+ else if (!strncmp(name, "jirl", 4))
+ ops = &ret_ops;
+ else if (name[0] == 'b')
+ ops = &jump_ops;
+ else
+ return NULL;
+
+ arch__associate_ins_ops(arch, name, ops);
+
+ return ops;
+}
+
+static
+int loongarch__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
+{
+ if (!arch->initialized) {
+ arch->associate_instruction_ops = loongarch__associate_ins_ops;
+ arch->initialized = true;
+ arch->objdump.comment_char = '#';
+ }
+
+ return 0;
+}
diff --git a/tools/perf/arch/loongarch/entry/syscalls/mksyscalltbl b/tools/perf/arch/loongarch/entry/syscalls/mksyscalltbl
new file mode 100755
index 000000000000..c52156f7204d
--- /dev/null
+++ b/tools/perf/arch/loongarch/entry/syscalls/mksyscalltbl
@@ -0,0 +1,61 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf. Derived from
+# powerpc script.
+#
+# Author(s): Ming Wang <wangming01@loongson.cn>
+# Author(s): Huacai Chen <chenhuacai@loongson.cn>
+# Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+
+gcc=$1
+hostcc=$2
+incpath=$3
+input=$4
+
+if ! test -r $input; then
+ echo "Could not read input file" >&2
+ exit 1
+fi
+
+create_table_from_c()
+{
+ local sc nr last_sc
+
+ create_table_exe=`mktemp ${TMPDIR:-/tmp}/create-table-XXXXXX`
+
+ {
+
+ cat <<-_EoHEADER
+ #include <stdio.h>
+ #include "$input"
+ int main(int argc, char *argv[])
+ {
+ _EoHEADER
+
+ while read sc nr; do
+ printf "%s\n" " printf(\"\\t[%d] = \\\"$sc\\\",\\n\", $nr);"
+ last_sc=$nr
+ done
+
+ printf "%s\n" " printf(\"#define SYSCALLTBL_LOONGARCH_MAX_ID %d\\n\", $last_sc);"
+ printf "}\n"
+
+ } | $hostcc -I $incpath/include/uapi -o $create_table_exe -x c -
+
+ $create_table_exe
+
+ rm -f $create_table_exe
+}
+
+create_table()
+{
+ echo "static const char *syscalltbl_loongarch[] = {"
+ create_table_from_c
+ echo "};"
+}
+
+$gcc -E -dM -x c -I $incpath/include/uapi $input \
+ |sed -ne 's/^#define __NR_//p' \
+ |sort -t' ' -k2 -n \
+ |create_table
diff --git a/tools/perf/arch/loongarch/include/dwarf-regs-table.h b/tools/perf/arch/loongarch/include/dwarf-regs-table.h
new file mode 100644
index 000000000000..bb3944f5764a
--- /dev/null
+++ b/tools/perf/arch/loongarch/include/dwarf-regs-table.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dwarf-regs-table.h : Mapping of DWARF debug register numbers into
+ * register names.
+ *
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#ifdef DEFINE_DWARF_REGSTR_TABLE
+static const char * const loongarch_regstr_tbl[] = {
+ "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
+ "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
+ "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
+};
+#endif
diff --git a/tools/perf/arch/loongarch/include/perf_regs.h b/tools/perf/arch/loongarch/include/perf_regs.h
new file mode 100644
index 000000000000..7833c7dbd38d
--- /dev/null
+++ b/tools/perf/arch/loongarch/include/perf_regs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include <linux/types.h>
+#include <asm/perf_regs.h>
+
+#define PERF_REGS_MAX PERF_REG_LOONGARCH_MAX
+#define PERF_REG_IP PERF_REG_LOONGARCH_PC
+#define PERF_REG_SP PERF_REG_LOONGARCH_R3
+
+#define PERF_REGS_MASK ((1ULL << PERF_REG_LOONGARCH_MAX) - 1)
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/loongarch/util/Build b/tools/perf/arch/loongarch/util/Build
new file mode 100644
index 000000000000..d776125a2d06
--- /dev/null
+++ b/tools/perf/arch/loongarch/util/Build
@@ -0,0 +1,5 @@
+perf-y += perf_regs.o
+
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
+perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/loongarch/util/dwarf-regs.c b/tools/perf/arch/loongarch/util/dwarf-regs.c
new file mode 100644
index 000000000000..0f6ebc387463
--- /dev/null
+++ b/tools/perf/arch/loongarch/util/dwarf-regs.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwarf-regs.c : Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+
+#include <stdio.h>
+#include <errno.h> /* for EINVAL */
+#include <string.h> /* for strcmp */
+#include <dwarf-regs.h>
+
+struct pt_regs_dwarfnum {
+ const char *name;
+ unsigned int dwarfnum;
+};
+
+static struct pt_regs_dwarfnum loongarch_gpr_table[] = {
+ {"%r0", 0}, {"%r1", 1}, {"%r2", 2}, {"%r3", 3},
+ {"%r4", 4}, {"%r5", 5}, {"%r6", 6}, {"%r7", 7},
+ {"%r8", 8}, {"%r9", 9}, {"%r10", 10}, {"%r11", 11},
+ {"%r12", 12}, {"%r13", 13}, {"%r14", 14}, {"%r15", 15},
+ {"%r16", 16}, {"%r17", 17}, {"%r18", 18}, {"%r19", 19},
+ {"%r20", 20}, {"%r21", 21}, {"%r22", 22}, {"%r23", 23},
+ {"%r24", 24}, {"%r25", 25}, {"%r26", 26}, {"%r27", 27},
+ {"%r28", 28}, {"%r29", 29}, {"%r30", 30}, {"%r31", 31},
+ {NULL, 0}
+};
+
+const char *get_arch_regstr(unsigned int n)
+{
+ n %= 32;
+ return loongarch_gpr_table[n].name;
+}
+
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_dwarfnum *roff;
+
+ for (roff = loongarch_gpr_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->dwarfnum;
+ return -EINVAL;
+}
diff --git a/tools/perf/arch/loongarch/util/perf_regs.c b/tools/perf/arch/loongarch/util/perf_regs.c
new file mode 100644
index 000000000000..2833e101a7c6
--- /dev/null
+++ b/tools/perf/arch/loongarch/util/perf_regs.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG_END
+};
diff --git a/tools/perf/arch/loongarch/util/unwind-libdw.c b/tools/perf/arch/loongarch/util/unwind-libdw.c
new file mode 100644
index 000000000000..a9415385230a
--- /dev/null
+++ b/tools/perf/arch/loongarch/util/unwind-libdw.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020-2023 Loongson Technology Corporation Limited */
+
+#include <elfutils/libdwfl.h>
+#include "../../util/unwind-libdw.h"
+#include "../../util/perf_regs.h"
+#include "../../util/sample.h"
+
+bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
+{
+ struct unwind_info *ui = arg;
+ struct regs_dump *user_regs = &ui->sample->user_regs;
+ Dwarf_Word dwarf_regs[PERF_REG_LOONGARCH_MAX];
+
+#define REG(r) ({ \
+ Dwarf_Word val = 0; \
+ perf_reg_value(&val, user_regs, PERF_REG_LOONGARCH_##r); \
+ val; \
+})
+
+ dwarf_regs[0] = 0;
+ dwarf_regs[1] = REG(R1);
+ dwarf_regs[2] = REG(R2);
+ dwarf_regs[3] = REG(R3);
+ dwarf_regs[4] = REG(R4);
+ dwarf_regs[5] = REG(R5);
+ dwarf_regs[6] = REG(R6);
+ dwarf_regs[7] = REG(R7);
+ dwarf_regs[8] = REG(R8);
+ dwarf_regs[9] = REG(R9);
+ dwarf_regs[10] = REG(R10);
+ dwarf_regs[11] = REG(R11);
+ dwarf_regs[12] = REG(R12);
+ dwarf_regs[13] = REG(R13);
+ dwarf_regs[14] = REG(R14);
+ dwarf_regs[15] = REG(R15);
+ dwarf_regs[16] = REG(R16);
+ dwarf_regs[17] = REG(R17);
+ dwarf_regs[18] = REG(R18);
+ dwarf_regs[19] = REG(R19);
+ dwarf_regs[20] = REG(R20);
+ dwarf_regs[21] = REG(R21);
+ dwarf_regs[22] = REG(R22);
+ dwarf_regs[23] = REG(R23);
+ dwarf_regs[24] = REG(R24);
+ dwarf_regs[25] = REG(R25);
+ dwarf_regs[26] = REG(R26);
+ dwarf_regs[27] = REG(R27);
+ dwarf_regs[28] = REG(R28);
+ dwarf_regs[29] = REG(R29);
+ dwarf_regs[30] = REG(R30);
+ dwarf_regs[31] = REG(R31);
+ dwfl_thread_state_register_pc(thread, REG(PC));
+
+ return dwfl_thread_state_registers(thread, 0, PERF_REG_LOONGARCH_MAX, dwarf_regs);
+}
diff --git a/tools/perf/arch/loongarch/util/unwind-libunwind.c b/tools/perf/arch/loongarch/util/unwind-libunwind.c
new file mode 100644
index 000000000000..f693167b86ef
--- /dev/null
+++ b/tools/perf/arch/loongarch/util/unwind-libunwind.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <errno.h>
+#include <libunwind.h>
+#include "perf_regs.h"
+#include "../../util/unwind.h"
+#include "util/debug.h"
+
+int libunwind__arch_reg_id(int regnum)
+{
+ switch (regnum) {
+ case UNW_LOONGARCH64_R1:
+ return PERF_REG_LOONGARCH_R1;
+ case UNW_LOONGARCH64_R2:
+ return PERF_REG_LOONGARCH_R2;
+ case UNW_LOONGARCH64_R3:
+ return PERF_REG_LOONGARCH_R3;
+ case UNW_LOONGARCH64_R4:
+ return PERF_REG_LOONGARCH_R4;
+ case UNW_LOONGARCH64_R5:
+ return PERF_REG_LOONGARCH_R5;
+ case UNW_LOONGARCH64_R6:
+ return PERF_REG_LOONGARCH_R6;
+ case UNW_LOONGARCH64_R7:
+ return PERF_REG_LOONGARCH_R7;
+ case UNW_LOONGARCH64_R8:
+ return PERF_REG_LOONGARCH_R8;
+ case UNW_LOONGARCH64_R9:
+ return PERF_REG_LOONGARCH_R9;
+ case UNW_LOONGARCH64_R10:
+ return PERF_REG_LOONGARCH_R10;
+ case UNW_LOONGARCH64_R11:
+ return PERF_REG_LOONGARCH_R11;
+ case UNW_LOONGARCH64_R12:
+ return PERF_REG_LOONGARCH_R12;
+ case UNW_LOONGARCH64_R13:
+ return PERF_REG_LOONGARCH_R13;
+ case UNW_LOONGARCH64_R14:
+ return PERF_REG_LOONGARCH_R14;
+ case UNW_LOONGARCH64_R15:
+ return PERF_REG_LOONGARCH_R15;
+ case UNW_LOONGARCH64_R16:
+ return PERF_REG_LOONGARCH_R16;
+ case UNW_LOONGARCH64_R17:
+ return PERF_REG_LOONGARCH_R17;
+ case UNW_LOONGARCH64_R18:
+ return PERF_REG_LOONGARCH_R18;
+ case UNW_LOONGARCH64_R19:
+ return PERF_REG_LOONGARCH_R19;
+ case UNW_LOONGARCH64_R20:
+ return PERF_REG_LOONGARCH_R20;
+ case UNW_LOONGARCH64_R21:
+ return PERF_REG_LOONGARCH_R21;
+ case UNW_LOONGARCH64_R22:
+ return PERF_REG_LOONGARCH_R22;
+ case UNW_LOONGARCH64_R23:
+ return PERF_REG_LOONGARCH_R23;
+ case UNW_LOONGARCH64_R24:
+ return PERF_REG_LOONGARCH_R24;
+ case UNW_LOONGARCH64_R25:
+ return PERF_REG_LOONGARCH_R25;
+ case UNW_LOONGARCH64_R26:
+ return PERF_REG_LOONGARCH_R26;
+ case UNW_LOONGARCH64_R27:
+ return PERF_REG_LOONGARCH_R27;
+ case UNW_LOONGARCH64_R28:
+ return PERF_REG_LOONGARCH_R28;
+ case UNW_LOONGARCH64_R29:
+ return PERF_REG_LOONGARCH_R29;
+ case UNW_LOONGARCH64_R30:
+ return PERF_REG_LOONGARCH_R30;
+ case UNW_LOONGARCH64_R31:
+ return PERF_REG_LOONGARCH_R31;
+ case UNW_LOONGARCH64_PC:
+ return PERF_REG_LOONGARCH_PC;
+ default:
+ pr_err("unwind: invalid reg id %d\n", regnum);
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
diff --git a/tools/perf/arch/mips/Build b/tools/perf/arch/mips/Build
index 1bb8bf6d7fd4..e4e5f33c84d8 100644
--- a/tools/perf/arch/mips/Build
+++ b/tools/perf/arch/mips/Build
@@ -1 +1 @@
-# empty
+perf-y += util/
diff --git a/tools/perf/arch/mips/Makefile b/tools/perf/arch/mips/Makefile
new file mode 100644
index 000000000000..8bc09072e3d6
--- /dev/null
+++ b/tools/perf/arch/mips/Makefile
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0
+ifndef NO_DWARF
+PERF_HAVE_DWARF_REGS := 1
+endif
+
+# Syscall table generation for perf
+out := $(OUTPUT)arch/mips/include/generated/asm
+header := $(out)/syscalls_n64.c
+sysprf := $(srctree)/tools/perf/arch/mips/entry/syscalls
+sysdef := $(sysprf)/syscall_n64.tbl
+systbl := $(sysprf)/mksyscalltbl
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+$(header): $(sysdef) $(systbl)
+ $(Q)$(SHELL) '$(systbl)' $(sysdef) > $@
+
+clean::
+ $(call QUIET_CLEAN, mips) $(RM) $(header)
+
+archheaders: $(header)
diff --git a/tools/perf/arch/mips/annotate/instructions.c b/tools/perf/arch/mips/annotate/instructions.c
new file mode 100644
index 000000000000..340993f2a897
--- /dev/null
+++ b/tools/perf/arch/mips/annotate/instructions.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+static
+struct ins_ops *mips__associate_ins_ops(struct arch *arch, const char *name)
+{
+ struct ins_ops *ops = NULL;
+
+ if (!strncmp(name, "bal", 3) ||
+ !strncmp(name, "bgezal", 6) ||
+ !strncmp(name, "bltzal", 6) ||
+ !strncmp(name, "bgtzal", 6) ||
+ !strncmp(name, "blezal", 6) ||
+ !strncmp(name, "beqzal", 6) ||
+ !strncmp(name, "bnezal", 6) ||
+ !strncmp(name, "bgtzl", 5) ||
+ !strncmp(name, "bltzl", 5) ||
+ !strncmp(name, "bgezl", 5) ||
+ !strncmp(name, "blezl", 5) ||
+ !strncmp(name, "jialc", 5) ||
+ !strncmp(name, "beql", 4) ||
+ !strncmp(name, "bnel", 4) ||
+ !strncmp(name, "jal", 3))
+ ops = &call_ops;
+ else if (!strncmp(name, "jr", 2))
+ ops = &ret_ops;
+ else if (name[0] == 'j' || name[0] == 'b')
+ ops = &jump_ops;
+ else
+ return NULL;
+
+ arch__associate_ins_ops(arch, name, ops);
+
+ return ops;
+}
+
+static
+int mips__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
+{
+ if (!arch->initialized) {
+ arch->associate_instruction_ops = mips__associate_ins_ops;
+ arch->initialized = true;
+ arch->objdump.comment_char = '#';
+ }
+
+ return 0;
+}
diff --git a/tools/perf/arch/mips/entry/syscalls/mksyscalltbl b/tools/perf/arch/mips/entry/syscalls/mksyscalltbl
new file mode 100644
index 000000000000..fb1f49451af6
--- /dev/null
+++ b/tools/perf/arch/mips/entry/syscalls/mksyscalltbl
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Generate system call table for perf. Derived from
+# s390 script.
+#
+# Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+# Changed by: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+SYSCALL_TBL=$1
+
+if ! test -r $SYSCALL_TBL; then
+ echo "Could not read input file" >&2
+ exit 1
+fi
+
+create_table()
+{
+ local max_nr nr abi sc discard
+
+ echo 'static const char *syscalltbl_mips_n64[] = {'
+ while read nr abi sc discard; do
+ printf '\t[%d] = "%s",\n' $nr $sc
+ max_nr=$nr
+ done
+ echo '};'
+ echo "#define SYSCALLTBL_MIPS_N64_MAX_ID $max_nr"
+}
+
+grep -E "^[[:digit:]]+[[:space:]]+(n64)" $SYSCALL_TBL \
+ |sort -k1 -n \
+ |create_table
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
new file mode 100644
index 000000000000..3f1886ad9d80
--- /dev/null
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -0,0 +1,367 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "n64" for this file.
+#
+0 n64 read sys_read
+1 n64 write sys_write
+2 n64 open sys_open
+3 n64 close sys_close
+4 n64 stat sys_newstat
+5 n64 fstat sys_newfstat
+6 n64 lstat sys_newlstat
+7 n64 poll sys_poll
+8 n64 lseek sys_lseek
+9 n64 mmap sys_mips_mmap
+10 n64 mprotect sys_mprotect
+11 n64 munmap sys_munmap
+12 n64 brk sys_brk
+13 n64 rt_sigaction sys_rt_sigaction
+14 n64 rt_sigprocmask sys_rt_sigprocmask
+15 n64 ioctl sys_ioctl
+16 n64 pread64 sys_pread64
+17 n64 pwrite64 sys_pwrite64
+18 n64 readv sys_readv
+19 n64 writev sys_writev
+20 n64 access sys_access
+21 n64 pipe sysm_pipe
+22 n64 _newselect sys_select
+23 n64 sched_yield sys_sched_yield
+24 n64 mremap sys_mremap
+25 n64 msync sys_msync
+26 n64 mincore sys_mincore
+27 n64 madvise sys_madvise
+28 n64 shmget sys_shmget
+29 n64 shmat sys_shmat
+30 n64 shmctl sys_old_shmctl
+31 n64 dup sys_dup
+32 n64 dup2 sys_dup2
+33 n64 pause sys_pause
+34 n64 nanosleep sys_nanosleep
+35 n64 getitimer sys_getitimer
+36 n64 setitimer sys_setitimer
+37 n64 alarm sys_alarm
+38 n64 getpid sys_getpid
+39 n64 sendfile sys_sendfile64
+40 n64 socket sys_socket
+41 n64 connect sys_connect
+42 n64 accept sys_accept
+43 n64 sendto sys_sendto
+44 n64 recvfrom sys_recvfrom
+45 n64 sendmsg sys_sendmsg
+46 n64 recvmsg sys_recvmsg
+47 n64 shutdown sys_shutdown
+48 n64 bind sys_bind
+49 n64 listen sys_listen
+50 n64 getsockname sys_getsockname
+51 n64 getpeername sys_getpeername
+52 n64 socketpair sys_socketpair
+53 n64 setsockopt sys_setsockopt
+54 n64 getsockopt sys_getsockopt
+55 n64 clone __sys_clone
+56 n64 fork __sys_fork
+57 n64 execve sys_execve
+58 n64 exit sys_exit
+59 n64 wait4 sys_wait4
+60 n64 kill sys_kill
+61 n64 uname sys_newuname
+62 n64 semget sys_semget
+63 n64 semop sys_semop
+64 n64 semctl sys_old_semctl
+65 n64 shmdt sys_shmdt
+66 n64 msgget sys_msgget
+67 n64 msgsnd sys_msgsnd
+68 n64 msgrcv sys_msgrcv
+69 n64 msgctl sys_old_msgctl
+70 n64 fcntl sys_fcntl
+71 n64 flock sys_flock
+72 n64 fsync sys_fsync
+73 n64 fdatasync sys_fdatasync
+74 n64 truncate sys_truncate
+75 n64 ftruncate sys_ftruncate
+76 n64 getdents sys_getdents
+77 n64 getcwd sys_getcwd
+78 n64 chdir sys_chdir
+79 n64 fchdir sys_fchdir
+80 n64 rename sys_rename
+81 n64 mkdir sys_mkdir
+82 n64 rmdir sys_rmdir
+83 n64 creat sys_creat
+84 n64 link sys_link
+85 n64 unlink sys_unlink
+86 n64 symlink sys_symlink
+87 n64 readlink sys_readlink
+88 n64 chmod sys_chmod
+89 n64 fchmod sys_fchmod
+90 n64 chown sys_chown
+91 n64 fchown sys_fchown
+92 n64 lchown sys_lchown
+93 n64 umask sys_umask
+94 n64 gettimeofday sys_gettimeofday
+95 n64 getrlimit sys_getrlimit
+96 n64 getrusage sys_getrusage
+97 n64 sysinfo sys_sysinfo
+98 n64 times sys_times
+99 n64 ptrace sys_ptrace
+100 n64 getuid sys_getuid
+101 n64 syslog sys_syslog
+102 n64 getgid sys_getgid
+103 n64 setuid sys_setuid
+104 n64 setgid sys_setgid
+105 n64 geteuid sys_geteuid
+106 n64 getegid sys_getegid
+107 n64 setpgid sys_setpgid
+108 n64 getppid sys_getppid
+109 n64 getpgrp sys_getpgrp
+110 n64 setsid sys_setsid
+111 n64 setreuid sys_setreuid
+112 n64 setregid sys_setregid
+113 n64 getgroups sys_getgroups
+114 n64 setgroups sys_setgroups
+115 n64 setresuid sys_setresuid
+116 n64 getresuid sys_getresuid
+117 n64 setresgid sys_setresgid
+118 n64 getresgid sys_getresgid
+119 n64 getpgid sys_getpgid
+120 n64 setfsuid sys_setfsuid
+121 n64 setfsgid sys_setfsgid
+122 n64 getsid sys_getsid
+123 n64 capget sys_capget
+124 n64 capset sys_capset
+125 n64 rt_sigpending sys_rt_sigpending
+126 n64 rt_sigtimedwait sys_rt_sigtimedwait
+127 n64 rt_sigqueueinfo sys_rt_sigqueueinfo
+128 n64 rt_sigsuspend sys_rt_sigsuspend
+129 n64 sigaltstack sys_sigaltstack
+130 n64 utime sys_utime
+131 n64 mknod sys_mknod
+132 n64 personality sys_personality
+133 n64 ustat sys_ustat
+134 n64 statfs sys_statfs
+135 n64 fstatfs sys_fstatfs
+136 n64 sysfs sys_sysfs
+137 n64 getpriority sys_getpriority
+138 n64 setpriority sys_setpriority
+139 n64 sched_setparam sys_sched_setparam
+140 n64 sched_getparam sys_sched_getparam
+141 n64 sched_setscheduler sys_sched_setscheduler
+142 n64 sched_getscheduler sys_sched_getscheduler
+143 n64 sched_get_priority_max sys_sched_get_priority_max
+144 n64 sched_get_priority_min sys_sched_get_priority_min
+145 n64 sched_rr_get_interval sys_sched_rr_get_interval
+146 n64 mlock sys_mlock
+147 n64 munlock sys_munlock
+148 n64 mlockall sys_mlockall
+149 n64 munlockall sys_munlockall
+150 n64 vhangup sys_vhangup
+151 n64 pivot_root sys_pivot_root
+152 n64 _sysctl sys_ni_syscall
+153 n64 prctl sys_prctl
+154 n64 adjtimex sys_adjtimex
+155 n64 setrlimit sys_setrlimit
+156 n64 chroot sys_chroot
+157 n64 sync sys_sync
+158 n64 acct sys_acct
+159 n64 settimeofday sys_settimeofday
+160 n64 mount sys_mount
+161 n64 umount2 sys_umount
+162 n64 swapon sys_swapon
+163 n64 swapoff sys_swapoff
+164 n64 reboot sys_reboot
+165 n64 sethostname sys_sethostname
+166 n64 setdomainname sys_setdomainname
+167 n64 create_module sys_ni_syscall
+168 n64 init_module sys_init_module
+169 n64 delete_module sys_delete_module
+170 n64 get_kernel_syms sys_ni_syscall
+171 n64 query_module sys_ni_syscall
+172 n64 quotactl sys_quotactl
+173 n64 nfsservctl sys_ni_syscall
+174 n64 getpmsg sys_ni_syscall
+175 n64 putpmsg sys_ni_syscall
+176 n64 afs_syscall sys_ni_syscall
+# 177 reserved for security
+177 n64 reserved177 sys_ni_syscall
+178 n64 gettid sys_gettid
+179 n64 readahead sys_readahead
+180 n64 setxattr sys_setxattr
+181 n64 lsetxattr sys_lsetxattr
+182 n64 fsetxattr sys_fsetxattr
+183 n64 getxattr sys_getxattr
+184 n64 lgetxattr sys_lgetxattr
+185 n64 fgetxattr sys_fgetxattr
+186 n64 listxattr sys_listxattr
+187 n64 llistxattr sys_llistxattr
+188 n64 flistxattr sys_flistxattr
+189 n64 removexattr sys_removexattr
+190 n64 lremovexattr sys_lremovexattr
+191 n64 fremovexattr sys_fremovexattr
+192 n64 tkill sys_tkill
+193 n64 reserved193 sys_ni_syscall
+194 n64 futex sys_futex
+195 n64 sched_setaffinity sys_sched_setaffinity
+196 n64 sched_getaffinity sys_sched_getaffinity
+197 n64 cacheflush sys_cacheflush
+198 n64 cachectl sys_cachectl
+199 n64 sysmips __sys_sysmips
+200 n64 io_setup sys_io_setup
+201 n64 io_destroy sys_io_destroy
+202 n64 io_getevents sys_io_getevents
+203 n64 io_submit sys_io_submit
+204 n64 io_cancel sys_io_cancel
+205 n64 exit_group sys_exit_group
+206 n64 lookup_dcookie sys_lookup_dcookie
+207 n64 epoll_create sys_epoll_create
+208 n64 epoll_ctl sys_epoll_ctl
+209 n64 epoll_wait sys_epoll_wait
+210 n64 remap_file_pages sys_remap_file_pages
+211 n64 rt_sigreturn sys_rt_sigreturn
+212 n64 set_tid_address sys_set_tid_address
+213 n64 restart_syscall sys_restart_syscall
+214 n64 semtimedop sys_semtimedop
+215 n64 fadvise64 sys_fadvise64_64
+216 n64 timer_create sys_timer_create
+217 n64 timer_settime sys_timer_settime
+218 n64 timer_gettime sys_timer_gettime
+219 n64 timer_getoverrun sys_timer_getoverrun
+220 n64 timer_delete sys_timer_delete
+221 n64 clock_settime sys_clock_settime
+222 n64 clock_gettime sys_clock_gettime
+223 n64 clock_getres sys_clock_getres
+224 n64 clock_nanosleep sys_clock_nanosleep
+225 n64 tgkill sys_tgkill
+226 n64 utimes sys_utimes
+227 n64 mbind sys_mbind
+228 n64 get_mempolicy sys_get_mempolicy
+229 n64 set_mempolicy sys_set_mempolicy
+230 n64 mq_open sys_mq_open
+231 n64 mq_unlink sys_mq_unlink
+232 n64 mq_timedsend sys_mq_timedsend
+233 n64 mq_timedreceive sys_mq_timedreceive
+234 n64 mq_notify sys_mq_notify
+235 n64 mq_getsetattr sys_mq_getsetattr
+236 n64 vserver sys_ni_syscall
+237 n64 waitid sys_waitid
+# 238 was sys_setaltroot
+239 n64 add_key sys_add_key
+240 n64 request_key sys_request_key
+241 n64 keyctl sys_keyctl
+242 n64 set_thread_area sys_set_thread_area
+243 n64 inotify_init sys_inotify_init
+244 n64 inotify_add_watch sys_inotify_add_watch
+245 n64 inotify_rm_watch sys_inotify_rm_watch
+246 n64 migrate_pages sys_migrate_pages
+247 n64 openat sys_openat
+248 n64 mkdirat sys_mkdirat
+249 n64 mknodat sys_mknodat
+250 n64 fchownat sys_fchownat
+251 n64 futimesat sys_futimesat
+252 n64 newfstatat sys_newfstatat
+253 n64 unlinkat sys_unlinkat
+254 n64 renameat sys_renameat
+255 n64 linkat sys_linkat
+256 n64 symlinkat sys_symlinkat
+257 n64 readlinkat sys_readlinkat
+258 n64 fchmodat sys_fchmodat
+259 n64 faccessat sys_faccessat
+260 n64 pselect6 sys_pselect6
+261 n64 ppoll sys_ppoll
+262 n64 unshare sys_unshare
+263 n64 splice sys_splice
+264 n64 sync_file_range sys_sync_file_range
+265 n64 tee sys_tee
+266 n64 vmsplice sys_vmsplice
+267 n64 move_pages sys_move_pages
+268 n64 set_robust_list sys_set_robust_list
+269 n64 get_robust_list sys_get_robust_list
+270 n64 kexec_load sys_kexec_load
+271 n64 getcpu sys_getcpu
+272 n64 epoll_pwait sys_epoll_pwait
+273 n64 ioprio_set sys_ioprio_set
+274 n64 ioprio_get sys_ioprio_get
+275 n64 utimensat sys_utimensat
+276 n64 signalfd sys_signalfd
+277 n64 timerfd sys_ni_syscall
+278 n64 eventfd sys_eventfd
+279 n64 fallocate sys_fallocate
+280 n64 timerfd_create sys_timerfd_create
+281 n64 timerfd_gettime sys_timerfd_gettime
+282 n64 timerfd_settime sys_timerfd_settime
+283 n64 signalfd4 sys_signalfd4
+284 n64 eventfd2 sys_eventfd2
+285 n64 epoll_create1 sys_epoll_create1
+286 n64 dup3 sys_dup3
+287 n64 pipe2 sys_pipe2
+288 n64 inotify_init1 sys_inotify_init1
+289 n64 preadv sys_preadv
+290 n64 pwritev sys_pwritev
+291 n64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+292 n64 perf_event_open sys_perf_event_open
+293 n64 accept4 sys_accept4
+294 n64 recvmmsg sys_recvmmsg
+295 n64 fanotify_init sys_fanotify_init
+296 n64 fanotify_mark sys_fanotify_mark
+297 n64 prlimit64 sys_prlimit64
+298 n64 name_to_handle_at sys_name_to_handle_at
+299 n64 open_by_handle_at sys_open_by_handle_at
+300 n64 clock_adjtime sys_clock_adjtime
+301 n64 syncfs sys_syncfs
+302 n64 sendmmsg sys_sendmmsg
+303 n64 setns sys_setns
+304 n64 process_vm_readv sys_process_vm_readv
+305 n64 process_vm_writev sys_process_vm_writev
+306 n64 kcmp sys_kcmp
+307 n64 finit_module sys_finit_module
+308 n64 getdents64 sys_getdents64
+309 n64 sched_setattr sys_sched_setattr
+310 n64 sched_getattr sys_sched_getattr
+311 n64 renameat2 sys_renameat2
+312 n64 seccomp sys_seccomp
+313 n64 getrandom sys_getrandom
+314 n64 memfd_create sys_memfd_create
+315 n64 bpf sys_bpf
+316 n64 execveat sys_execveat
+317 n64 userfaultfd sys_userfaultfd
+318 n64 membarrier sys_membarrier
+319 n64 mlock2 sys_mlock2
+320 n64 copy_file_range sys_copy_file_range
+321 n64 preadv2 sys_preadv2
+322 n64 pwritev2 sys_pwritev2
+323 n64 pkey_mprotect sys_pkey_mprotect
+324 n64 pkey_alloc sys_pkey_alloc
+325 n64 pkey_free sys_pkey_free
+326 n64 statx sys_statx
+327 n64 rseq sys_rseq
+328 n64 io_pgetevents sys_io_pgetevents
+# 329 through 423 are reserved to sync up with other architectures
+424 n64 pidfd_send_signal sys_pidfd_send_signal
+425 n64 io_uring_setup sys_io_uring_setup
+426 n64 io_uring_enter sys_io_uring_enter
+427 n64 io_uring_register sys_io_uring_register
+428 n64 open_tree sys_open_tree
+429 n64 move_mount sys_move_mount
+430 n64 fsopen sys_fsopen
+431 n64 fsconfig sys_fsconfig
+432 n64 fsmount sys_fsmount
+433 n64 fspick sys_fspick
+434 n64 pidfd_open sys_pidfd_open
+435 n64 clone3 __sys_clone3
+436 n64 close_range sys_close_range
+437 n64 openat2 sys_openat2
+438 n64 pidfd_getfd sys_pidfd_getfd
+439 n64 faccessat2 sys_faccessat2
+440 n64 process_madvise sys_process_madvise
+441 n64 epoll_pwait2 sys_epoll_pwait2
+442 n64 mount_setattr sys_mount_setattr
+443 n64 quotactl_fd sys_quotactl_fd
+444 n64 landlock_create_ruleset sys_landlock_create_ruleset
+445 n64 landlock_add_rule sys_landlock_add_rule
+446 n64 landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 n64 process_mrelease sys_process_mrelease
+449 n64 futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/tools/perf/arch/mips/include/dwarf-regs-table.h b/tools/perf/arch/mips/include/dwarf-regs-table.h
new file mode 100644
index 000000000000..5badbcd3c5ec
--- /dev/null
+++ b/tools/perf/arch/mips/include/dwarf-regs-table.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * dwarf-regs-table.h : Mapping of DWARF debug register numbers into
+ * register names.
+ *
+ * Copyright (C) 2013 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifdef DEFINE_DWARF_REGSTR_TABLE
+#undef REG_DWARFNUM_NAME
+#define REG_DWARFNUM_NAME(reg, idx) [idx] = "$" #reg
+static const char * const mips_regstr_tbl[] = {
+ "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9",
+ "$10", "$11", "$12", "$13", "$14", "$15", "$16", "$17", "$18", "$19",
+ "$20", "$21", "$22", "$23", "$24", "$25", "$26", "$27", "$28", "%29",
+ "$30", "$31",
+ REG_DWARFNUM_NAME(hi, 64),
+ REG_DWARFNUM_NAME(lo, 65),
+};
+#endif
diff --git a/tools/perf/arch/mips/include/perf_regs.h b/tools/perf/arch/mips/include/perf_regs.h
new file mode 100644
index 000000000000..b8cd8bbb37ba
--- /dev/null
+++ b/tools/perf/arch/mips/include/perf_regs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_PERF_REGS_H
+#define ARCH_PERF_REGS_H
+
+#include <stdlib.h>
+#include <linux/types.h>
+#include <asm/perf_regs.h>
+
+#define PERF_REGS_MAX PERF_REG_MIPS_MAX
+#define PERF_REG_IP PERF_REG_MIPS_PC
+#define PERF_REG_SP PERF_REG_MIPS_R29
+
+#define PERF_REGS_MASK ((1ULL << PERF_REG_MIPS_MAX) - 1)
+
+#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/mips/util/Build b/tools/perf/arch/mips/util/Build
new file mode 100644
index 000000000000..51c8900a9a10
--- /dev/null
+++ b/tools/perf/arch/mips/util/Build
@@ -0,0 +1,3 @@
+perf-y += perf_regs.o
+perf-$(CONFIG_DWARF) += dwarf-regs.o
+perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o
diff --git a/tools/perf/arch/mips/util/dwarf-regs.c b/tools/perf/arch/mips/util/dwarf-regs.c
new file mode 100644
index 000000000000..25c13a91c2a7
--- /dev/null
+++ b/tools/perf/arch/mips/util/dwarf-regs.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwarf-regs.c : Mapping of DWARF debug register numbers into register names.
+ *
+ * Copyright (C) 2013 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <stdio.h>
+#include <dwarf-regs.h>
+
+static const char *mips_gpr_names[32] = {
+ "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9",
+ "$10", "$11", "$12", "$13", "$14", "$15", "$16", "$17", "$18", "$19",
+ "$20", "$21", "$22", "$23", "$24", "$25", "$26", "$27", "$28", "$29",
+ "$30", "$31"
+};
+
+const char *get_arch_regstr(unsigned int n)
+{
+ if (n < 32)
+ return mips_gpr_names[n];
+ if (n == 64)
+ return "hi";
+ if (n == 65)
+ return "lo";
+ return NULL;
+}
diff --git a/tools/perf/arch/mips/util/perf_regs.c b/tools/perf/arch/mips/util/perf_regs.c
new file mode 100644
index 000000000000..2864e2e3776d
--- /dev/null
+++ b/tools/perf/arch/mips/util/perf_regs.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "../../util/perf_regs.h"
+
+const struct sample_reg sample_reg_masks[] = {
+ SMPL_REG_END
+};
diff --git a/tools/perf/arch/mips/util/unwind-libunwind.c b/tools/perf/arch/mips/util/unwind-libunwind.c
new file mode 100644
index 000000000000..0d8c99c29da6
--- /dev/null
+++ b/tools/perf/arch/mips/util/unwind-libunwind.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <errno.h>
+#include <libunwind.h>
+#include "perf_regs.h"
+#include "../../util/unwind.h"
+#include "util/debug.h"
+
+int libunwind__arch_reg_id(int regnum)
+{
+ switch (regnum) {
+ case UNW_MIPS_R1 ... UNW_MIPS_R25:
+ return regnum - UNW_MIPS_R1 + PERF_REG_MIPS_R1;
+ case UNW_MIPS_R28 ... UNW_MIPS_R31:
+ return regnum - UNW_MIPS_R28 + PERF_REG_MIPS_R28;
+ case UNW_MIPS_PC:
+ return PERF_REG_MIPS_PC;
+ default:
+ pr_err("unwind: invalid reg id %d\n", regnum);
+ return -EINVAL;
+ }
+}
diff --git a/tools/perf/arch/nds32/util/Build b/tools/perf/arch/nds32/util/Build
deleted file mode 100644
index d0bc205fe49a..000000000000
--- a/tools/perf/arch/nds32/util/Build
+++ /dev/null
@@ -1 +0,0 @@
-perf-y += header.o
diff --git a/tools/perf/arch/nds32/util/header.c b/tools/perf/arch/nds32/util/header.c
deleted file mode 100644
index ef9dbdbe7968..000000000000
--- a/tools/perf/arch/nds32/util/header.c
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2005-2017 Andes Technology Corporation
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <api/fs/fs.h>
-#include "header.h"
-
-#define STR_LEN 1024
-
-char *get_cpuid_str(struct perf_pmu *pmu)
-{
- /* In nds32, we only have one cpu */
- char *buf = NULL;
- struct cpu_map *cpus;
- const char *sysfs = sysfs__mountpoint();
-
- if (!sysfs || !pmu || !pmu->cpus)
- return NULL;
-
- buf = malloc(STR_LEN);
- if (!buf)
- return NULL;
-
- cpus = cpu_map__get(pmu->cpus);
- sprintf(buf, "0x%x", cpus->nr - 1);
- cpu_map__put(cpus);
- return buf;
-}
diff --git a/tools/perf/arch/powerpc/Makefile b/tools/perf/arch/powerpc/Makefile
index e58d00d62f02..840ea0e59287 100644
--- a/tools/perf/arch/powerpc/Makefile
+++ b/tools/perf/arch/powerpc/Makefile
@@ -14,7 +14,6 @@ PERF_HAVE_JITDUMP := 1
out := $(OUTPUT)arch/powerpc/include/generated/asm
header32 := $(out)/syscalls_32.c
header64 := $(out)/syscalls_64.c
-syskrn := $(srctree)/arch/powerpc/kernel/syscalls/syscall.tbl
sysprf := $(srctree)/tools/perf/arch/powerpc/entry/syscalls
sysdef := $(sysprf)/syscall.tbl
systbl := $(sysprf)/mksyscalltbl
@@ -23,15 +22,9 @@ systbl := $(sysprf)/mksyscalltbl
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
$(header64): $(sysdef) $(systbl)
- @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
- (diff -B $(sysdef) $(syskrn) >/dev/null) \
- || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
$(Q)$(SHELL) '$(systbl)' '64' $(sysdef) > $@
$(header32): $(sysdef) $(systbl)
- @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
- (diff -B $(sysdef) $(syskrn) >/dev/null) \
- || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
$(Q)$(SHELL) '$(systbl)' '32' $(sysdef) > $@
clean::
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 35b61bfc1b1a..a0be127475b1 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -9,7 +9,7 @@
#
0 nospu restart_syscall sys_restart_syscall
1 nospu exit sys_exit
-2 nospu fork ppc_fork
+2 nospu fork sys_fork
3 common read sys_read
4 common write sys_write
5 common open sys_open compat_sys_open
@@ -32,7 +32,7 @@
18 spu oldstat sys_ni_syscall
19 common lseek sys_lseek compat_sys_lseek
20 common getpid sys_getpid
-21 nospu mount sys_mount compat_sys_mount
+21 nospu mount sys_mount
22 32 umount sys_oldumount
22 64 umount sys_ni_syscall
22 spu umount sys_ni_syscall
@@ -110,7 +110,7 @@
79 common settimeofday sys_settimeofday compat_sys_settimeofday
80 common getgroups sys_getgroups
81 common setgroups sys_setgroups
-82 32 select ppc_select sys_ni_syscall
+82 32 select sys_old_select compat_sys_old_select
82 64 select sys_ni_syscall
82 spu select sys_ni_syscall
83 common symlink sys_symlink
@@ -158,7 +158,7 @@
119 32 sigreturn sys_sigreturn compat_sys_sigreturn
119 64 sigreturn sys_ni_syscall
119 spu sigreturn sys_ni_syscall
-120 nospu clone ppc_clone
+120 nospu clone sys_clone
121 common setdomainname sys_setdomainname
122 common uname sys_newuname
123 common modify_ldt sys_ni_syscall
@@ -176,11 +176,11 @@
131 nospu quotactl sys_quotactl
132 common getpgid sys_getpgid
133 common fchdir sys_fchdir
-134 common bdflush sys_bdflush
+134 common bdflush sys_ni_syscall
135 common sysfs sys_sysfs
-136 32 personality sys_personality ppc64_personality
-136 64 personality ppc64_personality
-136 spu personality ppc64_personality
+136 32 personality sys_personality compat_sys_ppc64_personality
+136 64 personality sys_ppc64_personality
+136 spu personality sys_ppc64_personality
137 common afs_syscall sys_ni_syscall
138 common setfsuid sys_setfsuid
139 common setfsgid sys_setfsgid
@@ -189,11 +189,11 @@
142 common _newselect sys_select compat_sys_select
143 common flock sys_flock
144 common msync sys_msync
-145 common readv sys_readv compat_sys_readv
-146 common writev sys_writev compat_sys_writev
+145 common readv sys_readv
+146 common writev sys_writev
147 common getsid sys_getsid
148 common fdatasync sys_fdatasync
-149 nospu _sysctl sys_sysctl compat_sys_sysctl
+149 nospu _sysctl sys_ni_syscall
150 common mlock sys_mlock
151 common munlock sys_munlock
152 common mlockall sys_mlockall
@@ -228,8 +228,10 @@
176 64 rt_sigtimedwait sys_rt_sigtimedwait
177 nospu rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
178 nospu rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
-179 common pread64 sys_pread64 compat_sys_pread64
-180 common pwrite64 sys_pwrite64 compat_sys_pwrite64
+179 32 pread64 sys_ppc_pread64 compat_sys_ppc_pread64
+179 64 pread64 sys_pread64
+180 32 pwrite64 sys_ppc_pwrite64 compat_sys_ppc_pwrite64
+180 64 pwrite64 sys_pwrite64
181 common chown sys_chown
182 common getcwd sys_getcwd
183 common capget sys_capget
@@ -240,12 +242,13 @@
186 spu sendfile sys_sendfile64
187 common getpmsg sys_ni_syscall
188 common putpmsg sys_ni_syscall
-189 nospu vfork ppc_vfork
+189 nospu vfork sys_vfork
190 common ugetrlimit sys_getrlimit compat_sys_getrlimit
-191 common readahead sys_readahead compat_sys_readahead
+191 32 readahead sys_ppc_readahead compat_sys_ppc_readahead
+191 64 readahead sys_readahead
192 32 mmap2 sys_mmap2 compat_sys_mmap2
-193 32 truncate64 sys_truncate64 compat_sys_truncate64
-194 32 ftruncate64 sys_ftruncate64 compat_sys_ftruncate64
+193 32 truncate64 sys_ppc_truncate64 compat_sys_ppc_truncate64
+194 32 ftruncate64 sys_ppc_ftruncate64 compat_sys_ppc_ftruncate64
195 32 stat64 sys_stat64
196 32 lstat64 sys_lstat64
197 32 fstat64 sys_fstat64
@@ -288,7 +291,8 @@
230 common io_submit sys_io_submit compat_sys_io_submit
231 common io_cancel sys_io_cancel
232 nospu set_tid_address sys_set_tid_address
-233 common fadvise64 sys_fadvise64 ppc32_fadvise64
+233 32 fadvise64 sys_ppc32_fadvise64 compat_sys_ppc32_fadvise64
+233 64 fadvise64 sys_fadvise64
234 nospu exit_group sys_exit_group
235 nospu lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
236 common epoll_create sys_epoll_create
@@ -316,26 +320,24 @@
248 32 clock_nanosleep sys_clock_nanosleep_time32
248 64 clock_nanosleep sys_clock_nanosleep
248 spu clock_nanosleep sys_clock_nanosleep
-249 32 swapcontext ppc_swapcontext ppc32_swapcontext
-249 64 swapcontext ppc64_swapcontext
-249 spu swapcontext sys_ni_syscall
+249 nospu swapcontext sys_swapcontext compat_sys_swapcontext
250 common tgkill sys_tgkill
251 32 utimes sys_utimes_time32
251 64 utimes sys_utimes
251 spu utimes sys_utimes
252 common statfs64 sys_statfs64 compat_sys_statfs64
253 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
-254 32 fadvise64_64 ppc_fadvise64_64
+254 32 fadvise64_64 sys_ppc_fadvise64_64
254 spu fadvise64_64 sys_ni_syscall
255 common rtas sys_rtas
256 32 sys_debug_setcontext sys_debug_setcontext sys_ni_syscall
256 64 sys_debug_setcontext sys_ni_syscall
256 spu sys_debug_setcontext sys_ni_syscall
# 257 reserved for vserver
-258 nospu migrate_pages sys_migrate_pages compat_sys_migrate_pages
-259 nospu mbind sys_mbind compat_sys_mbind
-260 nospu get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-261 nospu set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+258 nospu migrate_pages sys_migrate_pages
+259 nospu mbind sys_mbind
+260 nospu get_mempolicy sys_get_mempolicy
+261 nospu set_mempolicy sys_set_mempolicy
262 nospu mq_open sys_mq_open compat_sys_mq_open
263 nospu mq_unlink sys_mq_unlink
264 32 mq_timedsend sys_mq_timedsend_time32
@@ -363,7 +365,7 @@
282 common unshare sys_unshare
283 common splice sys_splice
284 common tee sys_tee
-285 common vmsplice sys_vmsplice compat_sys_vmsplice
+285 common vmsplice sys_vmsplice
286 common openat sys_openat compat_sys_openat
287 common mkdirat sys_mkdirat
288 common mknodat sys_mknodat
@@ -383,7 +385,7 @@
298 common faccessat sys_faccessat
299 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
300 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
-301 common move_pages sys_move_pages compat_sys_move_pages
+301 common move_pages sys_move_pages
302 common getcpu sys_getcpu
303 nospu epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
304 32 utimensat sys_utimensat_time32
@@ -392,8 +394,11 @@
305 common signalfd sys_signalfd compat_sys_signalfd
306 common timerfd_create sys_timerfd_create
307 common eventfd sys_eventfd
-308 common sync_file_range2 sys_sync_file_range2 compat_sys_sync_file_range2
-309 nospu fallocate sys_fallocate compat_sys_fallocate
+308 32 sync_file_range2 sys_ppc_sync_file_range2 compat_sys_ppc_sync_file_range2
+308 64 sync_file_range2 sys_sync_file_range2
+308 spu sync_file_range2 sys_sync_file_range2
+309 32 fallocate sys_ppc_fallocate compat_sys_fallocate
+309 64 fallocate sys_fallocate
310 nospu subpage_prot sys_subpage_prot
311 32 timerfd_settime sys_timerfd_settime32
311 64 timerfd_settime sys_timerfd_settime
@@ -427,8 +432,8 @@
336 common recv sys_recv compat_sys_recv
337 common recvfrom sys_recvfrom compat_sys_recvfrom
338 common shutdown sys_shutdown
-339 common setsockopt sys_setsockopt compat_sys_setsockopt
-340 common getsockopt sys_getsockopt compat_sys_getsockopt
+339 common setsockopt sys_setsockopt sys_setsockopt
+340 common getsockopt sys_getsockopt sys_getsockopt
341 common sendmsg sys_sendmsg compat_sys_sendmsg
342 common recvmsg sys_recvmsg compat_sys_recvmsg
343 32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
@@ -443,8 +448,8 @@
348 common syncfs sys_syncfs
349 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
350 common setns sys_setns
-351 nospu process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-352 nospu process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+351 nospu process_vm_readv sys_process_vm_readv
+352 nospu process_vm_writev sys_process_vm_writev
353 nospu finit_module sys_finit_module
354 nospu kcmp sys_kcmp
355 common sched_setattr sys_sched_setattr
@@ -456,7 +461,7 @@
361 common bpf sys_bpf
362 nospu execveat sys_execveat compat_sys_execveat
363 32 switch_endian sys_ni_syscall
-363 64 switch_endian ppc_switch_endian
+363 64 switch_endian sys_switch_endian
363 spu switch_endian sys_ni_syscall
364 common userfaultfd sys_userfaultfd
365 common membarrier sys_membarrier
@@ -516,6 +521,19 @@
432 common fsmount sys_fsmount
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
-435 nospu clone3 ppc_clone3
+435 nospu clone3 sys_clone3
+436 common close_range sys_close_range
437 common openat2 sys_openat2
438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/tools/perf/arch/powerpc/include/arch-tests.h b/tools/perf/arch/powerpc/include/arch-tests.h
index 1c7be75cbc78..452b3d904521 100644
--- a/tools/perf/arch/powerpc/include/arch-tests.h
+++ b/tools/perf/arch/powerpc/include/arch-tests.h
@@ -2,13 +2,6 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-int test__arch_unwind_sample(struct perf_sample *sample,
- struct thread *thread);
-#endif
-
-extern struct test arch_tests[];
+extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/powerpc/include/perf_regs.h b/tools/perf/arch/powerpc/include/perf_regs.h
index e18a3556f5e3..9bb17c3f370b 100644
--- a/tools/perf/arch/powerpc/include/perf_regs.h
+++ b/tools/perf/arch/powerpc/include/perf_regs.h
@@ -19,56 +19,4 @@ void perf_regs_load(u64 *regs);
#define PERF_REG_IP PERF_REG_POWERPC_NIP
#define PERF_REG_SP PERF_REG_POWERPC_R1
-static const char *reg_names[] = {
- [PERF_REG_POWERPC_R0] = "r0",
- [PERF_REG_POWERPC_R1] = "r1",
- [PERF_REG_POWERPC_R2] = "r2",
- [PERF_REG_POWERPC_R3] = "r3",
- [PERF_REG_POWERPC_R4] = "r4",
- [PERF_REG_POWERPC_R5] = "r5",
- [PERF_REG_POWERPC_R6] = "r6",
- [PERF_REG_POWERPC_R7] = "r7",
- [PERF_REG_POWERPC_R8] = "r8",
- [PERF_REG_POWERPC_R9] = "r9",
- [PERF_REG_POWERPC_R10] = "r10",
- [PERF_REG_POWERPC_R11] = "r11",
- [PERF_REG_POWERPC_R12] = "r12",
- [PERF_REG_POWERPC_R13] = "r13",
- [PERF_REG_POWERPC_R14] = "r14",
- [PERF_REG_POWERPC_R15] = "r15",
- [PERF_REG_POWERPC_R16] = "r16",
- [PERF_REG_POWERPC_R17] = "r17",
- [PERF_REG_POWERPC_R18] = "r18",
- [PERF_REG_POWERPC_R19] = "r19",
- [PERF_REG_POWERPC_R20] = "r20",
- [PERF_REG_POWERPC_R21] = "r21",
- [PERF_REG_POWERPC_R22] = "r22",
- [PERF_REG_POWERPC_R23] = "r23",
- [PERF_REG_POWERPC_R24] = "r24",
- [PERF_REG_POWERPC_R25] = "r25",
- [PERF_REG_POWERPC_R26] = "r26",
- [PERF_REG_POWERPC_R27] = "r27",
- [PERF_REG_POWERPC_R28] = "r28",
- [PERF_REG_POWERPC_R29] = "r29",
- [PERF_REG_POWERPC_R30] = "r30",
- [PERF_REG_POWERPC_R31] = "r31",
- [PERF_REG_POWERPC_NIP] = "nip",
- [PERF_REG_POWERPC_MSR] = "msr",
- [PERF_REG_POWERPC_ORIG_R3] = "orig_r3",
- [PERF_REG_POWERPC_CTR] = "ctr",
- [PERF_REG_POWERPC_LINK] = "link",
- [PERF_REG_POWERPC_XER] = "xer",
- [PERF_REG_POWERPC_CCR] = "ccr",
- [PERF_REG_POWERPC_SOFTE] = "softe",
- [PERF_REG_POWERPC_TRAP] = "trap",
- [PERF_REG_POWERPC_DAR] = "dar",
- [PERF_REG_POWERPC_DSISR] = "dsisr",
- [PERF_REG_POWERPC_SIER] = "sier",
- [PERF_REG_POWERPC_MMCRA] = "mmcra"
-};
-
-static inline const char *perf_reg_name(int id)
-{
- return reg_names[id];
-}
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/powerpc/tests/arch-tests.c b/tools/perf/arch/powerpc/tests/arch-tests.c
index 8c3fbd4af817..eb98c57b5aeb 100644
--- a/tools/perf/arch/powerpc/tests/arch-tests.c
+++ b/tools/perf/arch/powerpc/tests/arch-tests.c
@@ -3,14 +3,10 @@
#include "tests/tests.h"
#include "arch-tests.h"
-struct test arch_tests[] = {
+
+struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- {
- .desc = "Test dwarf unwind",
- .func = test__dwarf_unwind,
- },
+ &suite__dwarf_unwind,
#endif
- {
- .func = NULL,
- },
+ NULL,
};
diff --git a/tools/perf/arch/powerpc/tests/dwarf-unwind.c b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
index 8efd9ed9e9db..32fffb593fbf 100644
--- a/tools/perf/arch/powerpc/tests/dwarf-unwind.c
+++ b/tools/perf/arch/powerpc/tests/dwarf-unwind.c
@@ -7,7 +7,6 @@
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
-#include "arch-tests.h"
#define STACK_SIZE 8192
@@ -34,7 +33,7 @@ static int sample_ustack(struct perf_sample *sample,
return -1;
}
- stack_size = map->end - sp;
+ stack_size = map__end(map) - sp;
stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
memcpy(buf, (void *) sp, stack_size);
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build
index e5c9504f8586..9889245c555c 100644
--- a/tools/perf/arch/powerpc/util/Build
+++ b/tools/perf/arch/powerpc/util/Build
@@ -1,7 +1,10 @@
perf-y += header.o
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-y += perf_regs.o
perf-y += mem-events.o
+perf-y += sym-handling.o
+perf-y += evsel.o
+perf-y += event.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_DWARF) += skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/book3s_hcalls.h b/tools/perf/arch/powerpc/util/book3s_hcalls.h
index 54cfa0530e86..488f4339b83c 100644
--- a/tools/perf/arch/powerpc/util/book3s_hcalls.h
+++ b/tools/perf/arch/powerpc/util/book3s_hcalls.h
@@ -84,7 +84,7 @@
{0x1a4, "H_CREATE_RPT"}, \
{0x1a8, "H_REMOVE_RPT"}, \
{0x1ac, "H_REGISTER_RPAGES"}, \
- {0x1b0, "H_DISABLE_AND_GETC"}, \
+ {0x1b0, "H_DISABLE_AND_GET"}, \
{0x1b4, "H_ERROR_DATA"}, \
{0x1b8, "H_GET_HCA_INFO"}, \
{0x1bc, "H_GET_PERF_COUNT"}, \
diff --git a/tools/perf/arch/powerpc/util/event.c b/tools/perf/arch/powerpc/util/event.c
new file mode 100644
index 000000000000..77d8cc2b5691
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/event.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+
+#include "../../../util/event.h"
+#include "../../../util/synthetic-events.h"
+#include "../../../util/machine.h"
+#include "../../../util/tool.h"
+#include "../../../util/map.h"
+#include "../../../util/debug.h"
+#include "../../../util/sample.h"
+
+void arch_perf_parse_sample_weight(struct perf_sample *data,
+ const __u64 *array, u64 type)
+{
+ union perf_sample_weight weight;
+
+ weight.full = *array;
+ if (type & PERF_SAMPLE_WEIGHT)
+ data->weight = weight.full;
+ else {
+ data->weight = weight.var1_dw;
+ data->ins_lat = weight.var2_w;
+ data->p_stage_cyc = weight.var3_w;
+ }
+}
+
+void arch_perf_synthesize_sample_weight(const struct perf_sample *data,
+ __u64 *array, u64 type)
+{
+ *array = data->weight;
+
+ if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ *array &= 0xffffffff;
+ *array |= ((u64)data->ins_lat << 32);
+ }
+}
+
+const char *arch_perf_header_entry(const char *se_header)
+{
+ if (!strcmp(se_header, "Local INSTR Latency"))
+ return "Finish Cyc";
+ else if (!strcmp(se_header, "INSTR Latency"))
+ return "Global Finish_cyc";
+ else if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
+ return "Dispatch Cyc";
+ else if (!strcmp(se_header, "Pipeline Stage Cycle"))
+ return "Global Dispatch_cyc";
+ return se_header;
+}
+
+int arch_support_sort_key(const char *sort_key)
+{
+ if (!strcmp(sort_key, "p_stage_cyc"))
+ return 1;
+ if (!strcmp(sort_key, "local_p_stage_cyc"))
+ return 1;
+ return 0;
+}
diff --git a/tools/perf/arch/powerpc/util/evsel.c b/tools/perf/arch/powerpc/util/evsel.c
new file mode 100644
index 000000000000..2f733cdc8dbb
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/evsel.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include "util/evsel.h"
+
+void arch_evsel__set_sample_weight(struct evsel *evsel)
+{
+ evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
+}
diff --git a/tools/perf/arch/powerpc/util/header.c b/tools/perf/arch/powerpc/util/header.c
index 3b4cdfc5efd6..c8d0dc775e5d 100644
--- a/tools/perf/arch/powerpc/util/header.c
+++ b/tools/perf/arch/powerpc/util/header.c
@@ -7,14 +7,9 @@
#include <string.h>
#include <linux/stringify.h>
#include "header.h"
-
-#define mfspr(rn) ({unsigned long rval; \
- asm volatile("mfspr %0," __stringify(rn) \
- : "=r" (rval)); rval; })
-
-#define SPRN_PVR 0x11F /* Processor Version Register */
-#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
-#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revison field */
+#include "utils_header.h"
+#include "metricgroup.h"
+#include <api/fs/fs.h>
int
get_cpuid(char *buffer, size_t sz)
@@ -44,3 +39,12 @@ get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
return bufp;
}
+
+int arch_get_runtimeparam(const struct pmu_metric *pm)
+{
+ int count;
+ char path[PATH_MAX] = "/devices/hv_24x7/interface/";
+
+ strcat(path, pm->aggr_mode == PerChip ? "sockets" : "coresperchip");
+ return sysfs__read_int(path, &count) < 0 ? 1 : count;
+}
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
index 16807269317c..ea1220d66b67 100644
--- a/tools/perf/arch/powerpc/util/kvm-stat.c
+++ b/tools/perf/arch/powerpc/util/kvm-stat.c
@@ -14,7 +14,6 @@
#define NR_TPS 4
const char *vcpu_id_str = "vcpu_id";
-const int decode_str_len = 40;
const char *kvm_entry_trace = "kvm_hv:kvm_guest_enter";
const char *kvm_exit_trace = "kvm_hv:kvm_guest_exit";
@@ -39,7 +38,7 @@ static void hcall_event_get_key(struct evsel *evsel,
struct event_key *key)
{
key->info = 0;
- key->key = perf_evsel__intval(evsel, sample, "req");
+ key->key = evsel__intval(evsel, sample, "req");
}
static const char *get_hcall_exit_reason(u64 exit_code)
@@ -61,13 +60,13 @@ static bool hcall_event_end(struct evsel *evsel,
struct perf_sample *sample __maybe_unused,
struct event_key *key __maybe_unused)
{
- return (!strcmp(evsel->name, kvm_events_tp[3]));
+ return (evsel__name_is(evsel, kvm_events_tp[3]));
}
static bool hcall_event_begin(struct evsel *evsel,
struct perf_sample *sample, struct event_key *key)
{
- if (!strcmp(evsel->name, kvm_events_tp[2])) {
+ if (evsel__name_is(evsel, kvm_events_tp[2])) {
hcall_event_get_key(evsel, sample, key);
return true;
}
@@ -80,7 +79,7 @@ static void hcall_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
{
const char *hcall_reason = get_hcall_exit_reason(key->key);
- scnprintf(decode, decode_str_len, "%s", hcall_reason);
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", hcall_reason);
}
static struct kvm_events_ops hcall_events = {
@@ -113,10 +112,11 @@ static int is_tracepoint_available(const char *str, struct evlist *evlist)
struct parse_events_error err;
int ret;
- bzero(&err, sizeof(err));
+ parse_events_error__init(&err);
ret = parse_events(evlist, str, &err);
if (err.str)
- parse_events_print_error(&err, "tracepoint");
+ parse_events_error__print(&err, "tracepoint");
+ parse_events_error__exit(&err);
return ret;
}
@@ -176,7 +176,7 @@ int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
}
/*
- * Incase of powerpc architecture, pmu registers are programmable
+ * In case of powerpc architecture, pmu registers are programmable
* by guest kernel. So monitoring guest via host may not provide
* valid samples with default 'cycles' event. It is better to use
* 'trace_imc/trace_cycles' event for guest profiling, since it
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c
index 07fb5e049488..4120fafe0be4 100644
--- a/tools/perf/arch/powerpc/util/mem-events.c
+++ b/tools/perf/arch/powerpc/util/mem-events.c
@@ -3,7 +3,7 @@
#include "mem-events.h"
/* PowerPC does not support 'ldlat' parameter. */
-char *perf_mem_events__name(int i)
+char *perf_mem_events__name(int i, char *pmu_name __maybe_unused)
{
if (i == PERF_MEM_EVENTS__LOAD)
return (char *) "cpu/mem-loads/";
diff --git a/tools/perf/arch/powerpc/util/perf_regs.c b/tools/perf/arch/powerpc/util/perf_regs.c
index 0a5242900248..8d07a78e742a 100644
--- a/tools/perf/arch/powerpc/util/perf_regs.c
+++ b/tools/perf/arch/powerpc/util/perf_regs.c
@@ -6,9 +6,16 @@
#include "../../../util/perf_regs.h"
#include "../../../util/debug.h"
+#include "../../../util/event.h"
+#include "../../../util/header.h"
+#include "../../../perf-sys.h"
+#include "utils_header.h"
#include <linux/kernel.h>
+#define PVR_POWER9 0x004E
+#define PVR_POWER10 0x0080
+
const struct sample_reg sample_reg_masks[] = {
SMPL_REG(r0, PERF_REG_POWERPC_R0),
SMPL_REG(r1, PERF_REG_POWERPC_R1),
@@ -55,6 +62,20 @@ const struct sample_reg sample_reg_masks[] = {
SMPL_REG(dsisr, PERF_REG_POWERPC_DSISR),
SMPL_REG(sier, PERF_REG_POWERPC_SIER),
SMPL_REG(mmcra, PERF_REG_POWERPC_MMCRA),
+ SMPL_REG(mmcr0, PERF_REG_POWERPC_MMCR0),
+ SMPL_REG(mmcr1, PERF_REG_POWERPC_MMCR1),
+ SMPL_REG(mmcr2, PERF_REG_POWERPC_MMCR2),
+ SMPL_REG(mmcr3, PERF_REG_POWERPC_MMCR3),
+ SMPL_REG(sier2, PERF_REG_POWERPC_SIER2),
+ SMPL_REG(sier3, PERF_REG_POWERPC_SIER3),
+ SMPL_REG(pmc1, PERF_REG_POWERPC_PMC1),
+ SMPL_REG(pmc2, PERF_REG_POWERPC_PMC2),
+ SMPL_REG(pmc3, PERF_REG_POWERPC_PMC3),
+ SMPL_REG(pmc4, PERF_REG_POWERPC_PMC4),
+ SMPL_REG(pmc5, PERF_REG_POWERPC_PMC5),
+ SMPL_REG(pmc6, PERF_REG_POWERPC_PMC6),
+ SMPL_REG(sdar, PERF_REG_POWERPC_SDAR),
+ SMPL_REG(siar, PERF_REG_POWERPC_SIAR),
SMPL_REG_END
};
@@ -163,3 +184,45 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
return SDT_ARG_VALID;
}
+
+uint64_t arch__intr_reg_mask(void)
+{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .sample_type = PERF_SAMPLE_REGS_INTR,
+ .precise_ip = 1,
+ .disabled = 1,
+ .exclude_kernel = 1,
+ };
+ int fd;
+ u32 version;
+ u64 extended_mask = 0, mask = PERF_REGS_MASK;
+
+ /*
+ * Get the PVR value to set the extended
+ * mask specific to platform.
+ */
+ version = (((mfspr(SPRN_PVR)) >> 16) & 0xFFFF);
+ if (version == PVR_POWER9)
+ extended_mask = PERF_REG_PMU_MASK_300;
+ else if (version == PVR_POWER10)
+ extended_mask = PERF_REG_PMU_MASK_31;
+ else
+ return mask;
+
+ attr.sample_regs_intr = extended_mask;
+ attr.sample_period = 1;
+ event_attr_init(&attr);
+
+ /*
+ * check if the pmu supports perf extended regs, before
+ * returning the register mask to sample.
+ */
+ fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+ if (fd != -1) {
+ close(fd);
+ mask |= extended_mask;
+ }
+ return mask;
+}
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 3018a054526a..b7223feec770 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -45,7 +45,7 @@ static const Dwfl_Callbacks offline_callbacks = {
*/
static int check_return_reg(int ra_regno, Dwarf_Frame *frame)
{
- Dwarf_Op ops_mem[2];
+ Dwarf_Op ops_mem[3];
Dwarf_Op dummy;
Dwarf_Op *ops = &dummy;
size_t nops;
@@ -255,14 +255,14 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
thread__find_symbol(thread, PERF_RECORD_MISC_USER, ip, &al);
if (al.map)
- dso = al.map->dso;
+ dso = map__dso(al.map);
if (!dso) {
pr_debug("%" PRIx64 " dso is NULL\n", ip);
return skip_slot;
}
- rc = check_return_addr(dso, al.map->start, ip);
+ rc = check_return_addr(dso, map__start(al.map), ip);
pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n",
dso->long_name, al.sym->name, ip, rc);
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index 0856b32f9e08..947bfad7aa59 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -104,7 +104,7 @@ void arch__fix_tev_from_maps(struct perf_probe_event *pev,
lep_offset = PPC64_LOCAL_ENTRY_OFFSET(sym->arch_sym);
- if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS)
+ if (map__dso(map)->symtab_type == DSO_BINARY_TYPE__KALLSYMS)
tev->point.offset += PPC64LE_LEP_OFFSET;
else if (lep_offset) {
if (pev->uprobes)
@@ -131,7 +131,7 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
for (i = 0; i < ntevs; i++) {
tev = &pev->tevs[i];
map__for_each_symbol(map, sym, tmp) {
- if (map->unmap_ip(map, sym->start) == tev->point.address) {
+ if (map__unmap_ip(map, sym->start) == tev->point.address) {
arch__fix_tev_from_maps(pev, tev, map, sym);
break;
}
diff --git a/tools/perf/arch/powerpc/util/unwind-libdw.c b/tools/perf/arch/powerpc/util/unwind-libdw.c
index abf2dbc7f829..e616642c754c 100644
--- a/tools/perf/arch/powerpc/util/unwind-libdw.c
+++ b/tools/perf/arch/powerpc/util/unwind-libdw.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
#include <linux/kernel.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/sample.h"
/* See backends/ppc_initreg.c and backends/ppc_regs.c in elfutils. */
static const int special_regs[3][2] = {
diff --git a/tools/perf/arch/powerpc/util/utils_header.h b/tools/perf/arch/powerpc/util/utils_header.h
new file mode 100644
index 000000000000..2baeb1c1ae85
--- /dev/null
+++ b/tools/perf/arch/powerpc/util/utils_header.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_UTIL_HEADER_H
+#define __PERF_UTIL_HEADER_H
+
+#include <linux/stringify.h>
+
+#define mfspr(rn) ({unsigned long rval; \
+ asm volatile("mfspr %0," __stringify(rn) \
+ : "=r" (rval)); rval; })
+
+#define SPRN_PVR 0x11F /* Processor Version Register */
+#define PVR_VER(pvr) (((pvr) >> 16) & 0xFFFF) /* Version field */
+#define PVR_REV(pvr) (((pvr) >> 0) & 0xFFFF) /* Revision field */
+
+#endif /* __PERF_UTIL_HEADER_H */
diff --git a/tools/perf/arch/riscv/Makefile b/tools/perf/arch/riscv/Makefile
index 1aa9dd772489..a8d25d005207 100644
--- a/tools/perf/arch/riscv/Makefile
+++ b/tools/perf/arch/riscv/Makefile
@@ -2,3 +2,4 @@ ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
+PERF_HAVE_JITDUMP := 1
diff --git a/tools/perf/arch/riscv/include/perf_regs.h b/tools/perf/arch/riscv/include/perf_regs.h
index 7a8bcde7a2b1..6944bf0de53e 100644
--- a/tools/perf/arch/riscv/include/perf_regs.h
+++ b/tools/perf/arch/riscv/include/perf_regs.h
@@ -19,78 +19,4 @@
#define PERF_REG_IP PERF_REG_RISCV_PC
#define PERF_REG_SP PERF_REG_RISCV_SP
-static inline const char *perf_reg_name(int id)
-{
- switch (id) {
- case PERF_REG_RISCV_PC:
- return "pc";
- case PERF_REG_RISCV_RA:
- return "ra";
- case PERF_REG_RISCV_SP:
- return "sp";
- case PERF_REG_RISCV_GP:
- return "gp";
- case PERF_REG_RISCV_TP:
- return "tp";
- case PERF_REG_RISCV_T0:
- return "t0";
- case PERF_REG_RISCV_T1:
- return "t1";
- case PERF_REG_RISCV_T2:
- return "t2";
- case PERF_REG_RISCV_S0:
- return "s0";
- case PERF_REG_RISCV_S1:
- return "s1";
- case PERF_REG_RISCV_A0:
- return "a0";
- case PERF_REG_RISCV_A1:
- return "a1";
- case PERF_REG_RISCV_A2:
- return "a2";
- case PERF_REG_RISCV_A3:
- return "a3";
- case PERF_REG_RISCV_A4:
- return "a4";
- case PERF_REG_RISCV_A5:
- return "a5";
- case PERF_REG_RISCV_A6:
- return "a6";
- case PERF_REG_RISCV_A7:
- return "a7";
- case PERF_REG_RISCV_S2:
- return "s2";
- case PERF_REG_RISCV_S3:
- return "s3";
- case PERF_REG_RISCV_S4:
- return "s4";
- case PERF_REG_RISCV_S5:
- return "s5";
- case PERF_REG_RISCV_S6:
- return "s6";
- case PERF_REG_RISCV_S7:
- return "s7";
- case PERF_REG_RISCV_S8:
- return "s8";
- case PERF_REG_RISCV_S9:
- return "s9";
- case PERF_REG_RISCV_S10:
- return "s10";
- case PERF_REG_RISCV_S11:
- return "s11";
- case PERF_REG_RISCV_T3:
- return "t3";
- case PERF_REG_RISCV_T4:
- return "t4";
- case PERF_REG_RISCV_T5:
- return "t5";
- case PERF_REG_RISCV_T6:
- return "t6";
- default:
- return NULL;
- }
-
- return NULL;
-}
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/riscv/util/Build b/tools/perf/arch/riscv/util/Build
index 7d3050134ae0..603dbb5ae4dc 100644
--- a/tools/perf/arch/riscv/util/Build
+++ b/tools/perf/arch/riscv/util/Build
@@ -1,4 +1,5 @@
perf-y += perf_regs.o
+perf-y += header.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
diff --git a/tools/perf/arch/riscv/util/header.c b/tools/perf/arch/riscv/util/header.c
new file mode 100644
index 000000000000..4a41856938a8
--- /dev/null
+++ b/tools/perf/arch/riscv/util/header.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implementation of get_cpuid().
+ *
+ * Author: Nikita Shubin <n.shubin@yadro.com>
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <api/fs/fs.h>
+#include <errno.h>
+#include "../../util/debug.h"
+#include "../../util/header.h"
+
+#define CPUINFO_MVEN "mvendorid"
+#define CPUINFO_MARCH "marchid"
+#define CPUINFO_MIMP "mimpid"
+#define CPUINFO "/proc/cpuinfo"
+
+static char *_get_field(const char *line)
+{
+ char *line2, *nl;
+
+ line2 = strrchr(line, ' ');
+ if (!line2)
+ return NULL;
+
+ line2++;
+ nl = strrchr(line, '\n');
+ if (!nl)
+ return NULL;
+
+ return strndup(line2, nl - line2);
+}
+
+static char *_get_cpuid(void)
+{
+ char *line = NULL;
+ char *mvendorid = NULL;
+ char *marchid = NULL;
+ char *mimpid = NULL;
+ char *cpuid = NULL;
+ int read;
+ unsigned long line_sz;
+ FILE *cpuinfo;
+
+ cpuinfo = fopen(CPUINFO, "r");
+ if (cpuinfo == NULL)
+ return cpuid;
+
+ while ((read = getline(&line, &line_sz, cpuinfo)) != -1) {
+ if (!strncmp(line, CPUINFO_MVEN, strlen(CPUINFO_MVEN))) {
+ mvendorid = _get_field(line);
+ if (!mvendorid)
+ goto free;
+ } else if (!strncmp(line, CPUINFO_MARCH, strlen(CPUINFO_MARCH))) {
+ marchid = _get_field(line);
+ if (!marchid)
+ goto free;
+ } else if (!strncmp(line, CPUINFO_MIMP, strlen(CPUINFO_MIMP))) {
+ mimpid = _get_field(line);
+ if (!mimpid)
+ goto free;
+
+ break;
+ }
+ }
+
+ if (!mvendorid || !marchid || !mimpid)
+ goto free;
+
+ if (asprintf(&cpuid, "%s-%s-%s", mvendorid, marchid, mimpid) < 0)
+ cpuid = NULL;
+
+free:
+ fclose(cpuinfo);
+ free(mvendorid);
+ free(marchid);
+ free(mimpid);
+
+ return cpuid;
+}
+
+int get_cpuid(char *buffer, size_t sz)
+{
+ char *cpuid = _get_cpuid();
+ int ret = 0;
+
+ if (sz < strlen(cpuid)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ scnprintf(buffer, sz, "%s", cpuid);
+free:
+ free(cpuid);
+ return ret;
+}
+
+char *
+get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
+{
+ return _get_cpuid();
+}
diff --git a/tools/perf/arch/riscv/util/unwind-libdw.c b/tools/perf/arch/riscv/util/unwind-libdw.c
index 19536e172850..54a198714eb8 100644
--- a/tools/perf/arch/riscv/util/unwind-libdw.c
+++ b/tools/perf/arch/riscv/util/unwind-libdw.c
@@ -4,7 +4,7 @@
#include <elfutils/libdwfl.h>
#include "../../util/unwind-libdw.h"
#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../util/sample.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
diff --git a/tools/perf/arch/riscv64/annotate/instructions.c b/tools/perf/arch/riscv64/annotate/instructions.c
new file mode 100644
index 000000000000..869a0eb28953
--- /dev/null
+++ b/tools/perf/arch/riscv64/annotate/instructions.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+static
+struct ins_ops *riscv64__associate_ins_ops(struct arch *arch, const char *name)
+{
+ struct ins_ops *ops = NULL;
+
+ if (!strncmp(name, "jal", 3) ||
+ !strncmp(name, "jr", 2) ||
+ !strncmp(name, "call", 4))
+ ops = &call_ops;
+ else if (!strncmp(name, "ret", 3))
+ ops = &ret_ops;
+ else if (name[0] == 'j' || name[0] == 'b')
+ ops = &jump_ops;
+ else
+ return NULL;
+
+ arch__associate_ins_ops(arch, name, ops);
+
+ return ops;
+}
+
+static
+int riscv64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
+{
+ if (!arch->initialized) {
+ arch->associate_instruction_ops = riscv64__associate_ins_ops;
+ arch->initialized = true;
+ arch->objdump.comment_char = '#';
+ }
+
+ return 0;
+}
diff --git a/tools/perf/arch/s390/Makefile b/tools/perf/arch/s390/Makefile
index 6ac8887be7c9..74bffbea03e2 100644
--- a/tools/perf/arch/s390/Makefile
+++ b/tools/perf/arch/s390/Makefile
@@ -12,7 +12,6 @@ PERF_HAVE_JITDUMP := 1
out := $(OUTPUT)arch/s390/include/generated/asm
header := $(out)/syscalls_64.c
-syskrn := $(srctree)/arch/s390/kernel/syscalls/syscall.tbl
sysprf := $(srctree)/tools/perf/arch/s390/entry/syscalls
sysdef := $(sysprf)/syscall.tbl
systbl := $(sysprf)/mksyscalltbl
@@ -21,9 +20,6 @@ systbl := $(sysprf)/mksyscalltbl
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
$(header): $(sysdef) $(systbl)
- @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \
- (diff -B $(sysdef) $(syskrn) >/dev/null) \
- || echo "Warning: Kernel ABI header at '$(sysdef)' differs from latest version at '$(syskrn)'" >&2 )) || true
$(Q)$(SHELL) '$(systbl)' $(sysdef) > $@
clean::
diff --git a/tools/perf/arch/s390/annotate/instructions.c b/tools/perf/arch/s390/annotate/instructions.c
index 0e136630659e..de925b0e35ce 100644
--- a/tools/perf/arch/s390/annotate/instructions.c
+++ b/tools/perf/arch/s390/annotate/instructions.c
@@ -39,7 +39,7 @@ static int s390_call__parse(struct arch *arch, struct ins_operands *ops,
target.addr = map__objdump_2mem(map, ops->target.addr);
if (maps__find_ams(ms->maps, &target) == 0 &&
- map__rip_2objdump(target.ms.map, map->map_ip(target.ms.map, target.addr)) == ops->target.addr)
+ map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
ops->target.sym = target.ms.sym;
return 0;
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index b38d48464368..799147658dee 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -15,86 +15,86 @@
5 common open sys_open compat_sys_open
6 common close sys_close sys_close
7 common restart_syscall sys_restart_syscall sys_restart_syscall
-8 common creat sys_creat compat_sys_creat
-9 common link sys_link compat_sys_link
-10 common unlink sys_unlink compat_sys_unlink
+8 common creat sys_creat sys_creat
+9 common link sys_link sys_link
+10 common unlink sys_unlink sys_unlink
11 common execve sys_execve compat_sys_execve
-12 common chdir sys_chdir compat_sys_chdir
-13 32 time - compat_sys_time
-14 common mknod sys_mknod compat_sys_mknod
-15 common chmod sys_chmod compat_sys_chmod
-16 32 lchown - compat_sys_s390_lchown16
+12 common chdir sys_chdir sys_chdir
+13 32 time - sys_time32
+14 common mknod sys_mknod sys_mknod
+15 common chmod sys_chmod sys_chmod
+16 32 lchown - sys_lchown16
19 common lseek sys_lseek compat_sys_lseek
20 common getpid sys_getpid sys_getpid
-21 common mount sys_mount compat_sys_mount
-22 common umount sys_oldumount compat_sys_oldumount
-23 32 setuid - compat_sys_s390_setuid16
-24 32 getuid - compat_sys_s390_getuid16
-25 32 stime - compat_sys_stime
+21 common mount sys_mount sys_mount
+22 common umount sys_oldumount sys_oldumount
+23 32 setuid - sys_setuid16
+24 32 getuid - sys_getuid16
+25 32 stime - sys_stime32
26 common ptrace sys_ptrace compat_sys_ptrace
27 common alarm sys_alarm sys_alarm
29 common pause sys_pause sys_pause
-30 common utime sys_utime compat_sys_utime
-33 common access sys_access compat_sys_access
+30 common utime sys_utime sys_utime32
+33 common access sys_access sys_access
34 common nice sys_nice sys_nice
36 common sync sys_sync sys_sync
37 common kill sys_kill sys_kill
-38 common rename sys_rename compat_sys_rename
-39 common mkdir sys_mkdir compat_sys_mkdir
-40 common rmdir sys_rmdir compat_sys_rmdir
+38 common rename sys_rename sys_rename
+39 common mkdir sys_mkdir sys_mkdir
+40 common rmdir sys_rmdir sys_rmdir
41 common dup sys_dup sys_dup
-42 common pipe sys_pipe compat_sys_pipe
+42 common pipe sys_pipe sys_pipe
43 common times sys_times compat_sys_times
-45 common brk sys_brk compat_sys_brk
-46 32 setgid - compat_sys_s390_setgid16
-47 32 getgid - compat_sys_s390_getgid16
-48 common signal sys_signal compat_sys_signal
-49 32 geteuid - compat_sys_s390_geteuid16
-50 32 getegid - compat_sys_s390_getegid16
-51 common acct sys_acct compat_sys_acct
-52 common umount2 sys_umount compat_sys_umount
+45 common brk sys_brk sys_brk
+46 32 setgid - sys_setgid16
+47 32 getgid - sys_getgid16
+48 common signal sys_signal sys_signal
+49 32 geteuid - sys_geteuid16
+50 32 getegid - sys_getegid16
+51 common acct sys_acct sys_acct
+52 common umount2 sys_umount sys_umount
54 common ioctl sys_ioctl compat_sys_ioctl
55 common fcntl sys_fcntl compat_sys_fcntl
57 common setpgid sys_setpgid sys_setpgid
60 common umask sys_umask sys_umask
-61 common chroot sys_chroot compat_sys_chroot
+61 common chroot sys_chroot sys_chroot
62 common ustat sys_ustat compat_sys_ustat
63 common dup2 sys_dup2 sys_dup2
64 common getppid sys_getppid sys_getppid
65 common getpgrp sys_getpgrp sys_getpgrp
66 common setsid sys_setsid sys_setsid
67 common sigaction sys_sigaction compat_sys_sigaction
-70 32 setreuid - compat_sys_s390_setreuid16
-71 32 setregid - compat_sys_s390_setregid16
-72 common sigsuspend sys_sigsuspend compat_sys_sigsuspend
+70 32 setreuid - sys_setreuid16
+71 32 setregid - sys_setregid16
+72 common sigsuspend sys_sigsuspend sys_sigsuspend
73 common sigpending sys_sigpending compat_sys_sigpending
-74 common sethostname sys_sethostname compat_sys_sethostname
+74 common sethostname sys_sethostname sys_sethostname
75 common setrlimit sys_setrlimit compat_sys_setrlimit
76 32 getrlimit - compat_sys_old_getrlimit
77 common getrusage sys_getrusage compat_sys_getrusage
78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
79 common settimeofday sys_settimeofday compat_sys_settimeofday
-80 32 getgroups - compat_sys_s390_getgroups16
-81 32 setgroups - compat_sys_s390_setgroups16
-83 common symlink sys_symlink compat_sys_symlink
-85 common readlink sys_readlink compat_sys_readlink
-86 common uselib sys_uselib compat_sys_uselib
-87 common swapon sys_swapon compat_sys_swapon
-88 common reboot sys_reboot compat_sys_reboot
+80 32 getgroups - sys_getgroups16
+81 32 setgroups - sys_setgroups16
+83 common symlink sys_symlink sys_symlink
+85 common readlink sys_readlink sys_readlink
+86 common uselib sys_uselib sys_uselib
+87 common swapon sys_swapon sys_swapon
+88 common reboot sys_reboot sys_reboot
89 common readdir - compat_sys_old_readdir
90 common mmap sys_old_mmap compat_sys_s390_old_mmap
-91 common munmap sys_munmap compat_sys_munmap
+91 common munmap sys_munmap sys_munmap
92 common truncate sys_truncate compat_sys_truncate
93 common ftruncate sys_ftruncate compat_sys_ftruncate
94 common fchmod sys_fchmod sys_fchmod
-95 32 fchown - compat_sys_s390_fchown16
+95 32 fchown - sys_fchown16
96 common getpriority sys_getpriority sys_getpriority
97 common setpriority sys_setpriority sys_setpriority
99 common statfs sys_statfs compat_sys_statfs
100 common fstatfs sys_fstatfs compat_sys_fstatfs
101 32 ioperm - -
102 common socketcall sys_socketcall compat_sys_socketcall
-103 common syslog sys_syslog compat_sys_syslog
+103 common syslog sys_syslog sys_syslog
104 common setitimer sys_setitimer compat_sys_setitimer
105 common getitimer sys_getitimer compat_sys_getitimer
106 common stat sys_newstat compat_sys_newstat
@@ -104,76 +104,76 @@
111 common vhangup sys_vhangup sys_vhangup
112 common idle - -
114 common wait4 sys_wait4 compat_sys_wait4
-115 common swapoff sys_swapoff compat_sys_swapoff
+115 common swapoff sys_swapoff sys_swapoff
116 common sysinfo sys_sysinfo compat_sys_sysinfo
117 common ipc sys_s390_ipc compat_sys_s390_ipc
118 common fsync sys_fsync sys_fsync
119 common sigreturn sys_sigreturn compat_sys_sigreturn
-120 common clone sys_clone compat_sys_clone
-121 common setdomainname sys_setdomainname compat_sys_setdomainname
-122 common uname sys_newuname compat_sys_newuname
-124 common adjtimex sys_adjtimex compat_sys_adjtimex
-125 common mprotect sys_mprotect compat_sys_mprotect
+120 common clone sys_clone sys_clone
+121 common setdomainname sys_setdomainname sys_setdomainname
+122 common uname sys_newuname sys_newuname
+124 common adjtimex sys_adjtimex sys_adjtimex_time32
+125 common mprotect sys_mprotect sys_mprotect
126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
127 common create_module - -
-128 common init_module sys_init_module compat_sys_init_module
-129 common delete_module sys_delete_module compat_sys_delete_module
+128 common init_module sys_init_module sys_init_module
+129 common delete_module sys_delete_module sys_delete_module
130 common get_kernel_syms - -
-131 common quotactl sys_quotactl compat_sys_quotactl
+131 common quotactl sys_quotactl sys_quotactl
132 common getpgid sys_getpgid sys_getpgid
133 common fchdir sys_fchdir sys_fchdir
-134 common bdflush sys_bdflush compat_sys_bdflush
-135 common sysfs sys_sysfs compat_sys_sysfs
+134 common bdflush sys_ni_syscall sys_ni_syscall
+135 common sysfs sys_sysfs sys_sysfs
136 common personality sys_s390_personality sys_s390_personality
137 common afs_syscall - -
-138 32 setfsuid - compat_sys_s390_setfsuid16
-139 32 setfsgid - compat_sys_s390_setfsgid16
-140 32 _llseek - compat_sys_llseek
+138 32 setfsuid - sys_setfsuid16
+139 32 setfsgid - sys_setfsgid16
+140 32 _llseek - sys_llseek
141 common getdents sys_getdents compat_sys_getdents
142 32 _newselect - compat_sys_select
142 64 select sys_select -
143 common flock sys_flock sys_flock
-144 common msync sys_msync compat_sys_msync
-145 common readv sys_readv compat_sys_readv
-146 common writev sys_writev compat_sys_writev
+144 common msync sys_msync sys_msync
+145 common readv sys_readv sys_readv
+146 common writev sys_writev sys_writev
147 common getsid sys_getsid sys_getsid
148 common fdatasync sys_fdatasync sys_fdatasync
-149 common _sysctl sys_sysctl compat_sys_sysctl
-150 common mlock sys_mlock compat_sys_mlock
-151 common munlock sys_munlock compat_sys_munlock
+149 common _sysctl - -
+150 common mlock sys_mlock sys_mlock
+151 common munlock sys_munlock sys_munlock
152 common mlockall sys_mlockall sys_mlockall
153 common munlockall sys_munlockall sys_munlockall
-154 common sched_setparam sys_sched_setparam compat_sys_sched_setparam
-155 common sched_getparam sys_sched_getparam compat_sys_sched_getparam
-156 common sched_setscheduler sys_sched_setscheduler compat_sys_sched_setscheduler
+154 common sched_setparam sys_sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler sys_sched_setscheduler
157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler
158 common sched_yield sys_sched_yield sys_sched_yield
159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max
160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval compat_sys_sched_rr_get_interval
-162 common nanosleep sys_nanosleep compat_sys_nanosleep
-163 common mremap sys_mremap compat_sys_mremap
-164 32 setresuid - compat_sys_s390_setresuid16
-165 32 getresuid - compat_sys_s390_getresuid16
+161 common sched_rr_get_interval sys_sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep sys_nanosleep_time32
+163 common mremap sys_mremap sys_mremap
+164 32 setresuid - sys_setresuid16
+165 32 getresuid - sys_getresuid16
167 common query_module - -
-168 common poll sys_poll compat_sys_poll
+168 common poll sys_poll sys_poll
169 common nfsservctl - -
-170 32 setresgid - compat_sys_s390_setresgid16
-171 32 getresgid - compat_sys_s390_getresgid16
-172 common prctl sys_prctl compat_sys_prctl
+170 32 setresgid - sys_setresgid16
+171 32 getresgid - sys_getresgid16
+172 common prctl sys_prctl sys_prctl
173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
-177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait
+177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time32
178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
180 common pread64 sys_pread64 compat_sys_s390_pread64
181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64
-182 32 chown - compat_sys_s390_chown16
-183 common getcwd sys_getcwd compat_sys_getcwd
-184 common capget sys_capget compat_sys_capget
-185 common capset sys_capset compat_sys_capset
+182 32 chown - sys_chown16
+183 common getcwd sys_getcwd sys_getcwd
+184 common capget sys_capget sys_capget
+185 common capset sys_capset sys_capset
186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
187 common sendfile sys_sendfile64 compat_sys_sendfile
188 common getpmsg - -
@@ -187,7 +187,7 @@
195 32 stat64 - compat_sys_s390_stat64
196 32 lstat64 - compat_sys_s390_lstat64
197 32 fstat64 - compat_sys_s390_fstat64
-198 32 lchown32 - compat_sys_lchown
+198 32 lchown32 - sys_lchown
198 64 lchown sys_lchown -
199 32 getuid32 - sys_getuid
199 64 getuid sys_getuid -
@@ -201,21 +201,21 @@
203 64 setreuid sys_setreuid -
204 32 setregid32 - sys_setregid
204 64 setregid sys_setregid -
-205 32 getgroups32 - compat_sys_getgroups
+205 32 getgroups32 - sys_getgroups
205 64 getgroups sys_getgroups -
-206 32 setgroups32 - compat_sys_setgroups
+206 32 setgroups32 - sys_setgroups
206 64 setgroups sys_setgroups -
207 32 fchown32 - sys_fchown
207 64 fchown sys_fchown -
208 32 setresuid32 - sys_setresuid
208 64 setresuid sys_setresuid -
-209 32 getresuid32 - compat_sys_getresuid
+209 32 getresuid32 - sys_getresuid
209 64 getresuid sys_getresuid -
210 32 setresgid32 - sys_setresgid
210 64 setresgid sys_setresgid -
-211 32 getresgid32 - compat_sys_getresgid
+211 32 getresgid32 - sys_getresgid
211 64 getresgid sys_getresgid -
-212 32 chown32 - compat_sys_chown
+212 32 chown32 - sys_chown
212 64 chown sys_chown -
213 32 setuid32 - sys_setuid
213 64 setuid sys_setuid -
@@ -225,166 +225,231 @@
215 64 setfsuid sys_setfsuid -
216 32 setfsgid32 - sys_setfsgid
216 64 setfsgid sys_setfsgid -
-217 common pivot_root sys_pivot_root compat_sys_pivot_root
-218 common mincore sys_mincore compat_sys_mincore
-219 common madvise sys_madvise compat_sys_madvise
-220 common getdents64 sys_getdents64 compat_sys_getdents64
+217 common pivot_root sys_pivot_root sys_pivot_root
+218 common mincore sys_mincore sys_mincore
+219 common madvise sys_madvise sys_madvise
+220 common getdents64 sys_getdents64 sys_getdents64
221 32 fcntl64 - compat_sys_fcntl64
222 common readahead sys_readahead compat_sys_s390_readahead
223 32 sendfile64 - compat_sys_sendfile64
-224 common setxattr sys_setxattr compat_sys_setxattr
-225 common lsetxattr sys_lsetxattr compat_sys_lsetxattr
-226 common fsetxattr sys_fsetxattr compat_sys_fsetxattr
-227 common getxattr sys_getxattr compat_sys_getxattr
-228 common lgetxattr sys_lgetxattr compat_sys_lgetxattr
-229 common fgetxattr sys_fgetxattr compat_sys_fgetxattr
-230 common listxattr sys_listxattr compat_sys_listxattr
-231 common llistxattr sys_llistxattr compat_sys_llistxattr
-232 common flistxattr sys_flistxattr compat_sys_flistxattr
-233 common removexattr sys_removexattr compat_sys_removexattr
-234 common lremovexattr sys_lremovexattr compat_sys_lremovexattr
-235 common fremovexattr sys_fremovexattr compat_sys_fremovexattr
+224 common setxattr sys_setxattr sys_setxattr
+225 common lsetxattr sys_lsetxattr sys_lsetxattr
+226 common fsetxattr sys_fsetxattr sys_fsetxattr
+227 common getxattr sys_getxattr sys_getxattr
+228 common lgetxattr sys_lgetxattr sys_lgetxattr
+229 common fgetxattr sys_fgetxattr sys_fgetxattr
+230 common listxattr sys_listxattr sys_listxattr
+231 common llistxattr sys_llistxattr sys_llistxattr
+232 common flistxattr sys_flistxattr sys_flistxattr
+233 common removexattr sys_removexattr sys_removexattr
+234 common lremovexattr sys_lremovexattr sys_lremovexattr
+235 common fremovexattr sys_fremovexattr sys_fremovexattr
236 common gettid sys_gettid sys_gettid
237 common tkill sys_tkill sys_tkill
-238 common futex sys_futex compat_sys_futex
+238 common futex sys_futex sys_futex_time32
239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
241 common tgkill sys_tgkill sys_tgkill
243 common io_setup sys_io_setup compat_sys_io_setup
-244 common io_destroy sys_io_destroy compat_sys_io_destroy
-245 common io_getevents sys_io_getevents compat_sys_io_getevents
+244 common io_destroy sys_io_destroy sys_io_destroy
+245 common io_getevents sys_io_getevents sys_io_getevents_time32
246 common io_submit sys_io_submit compat_sys_io_submit
-247 common io_cancel sys_io_cancel compat_sys_io_cancel
+247 common io_cancel sys_io_cancel sys_io_cancel
248 common exit_group sys_exit_group sys_exit_group
249 common epoll_create sys_epoll_create sys_epoll_create
-250 common epoll_ctl sys_epoll_ctl compat_sys_epoll_ctl
-251 common epoll_wait sys_epoll_wait compat_sys_epoll_wait
-252 common set_tid_address sys_set_tid_address compat_sys_set_tid_address
+250 common epoll_ctl sys_epoll_ctl sys_epoll_ctl
+251 common epoll_wait sys_epoll_wait sys_epoll_wait
+252 common set_tid_address sys_set_tid_address sys_set_tid_address
253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64
254 common timer_create sys_timer_create compat_sys_timer_create
-255 common timer_settime sys_timer_settime compat_sys_timer_settime
-256 common timer_gettime sys_timer_gettime compat_sys_timer_gettime
+255 common timer_settime sys_timer_settime sys_timer_settime32
+256 common timer_gettime sys_timer_gettime sys_timer_gettime32
257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun
258 common timer_delete sys_timer_delete sys_timer_delete
-259 common clock_settime sys_clock_settime compat_sys_clock_settime
-260 common clock_gettime sys_clock_gettime compat_sys_clock_gettime
-261 common clock_getres sys_clock_getres compat_sys_clock_getres
-262 common clock_nanosleep sys_clock_nanosleep compat_sys_clock_nanosleep
+259 common clock_settime sys_clock_settime sys_clock_settime32
+260 common clock_gettime sys_clock_gettime sys_clock_gettime32
+261 common clock_getres sys_clock_getres sys_clock_getres_time32
+262 common clock_nanosleep sys_clock_nanosleep sys_clock_nanosleep_time32
264 32 fadvise64_64 - compat_sys_s390_fadvise64_64
265 common statfs64 sys_statfs64 compat_sys_statfs64
266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
-267 common remap_file_pages sys_remap_file_pages compat_sys_remap_file_pages
-268 common mbind sys_mbind compat_sys_mbind
-269 common get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy
-270 common set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy
+267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages
+268 common mbind sys_mbind sys_mbind
+269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy
+270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy
271 common mq_open sys_mq_open compat_sys_mq_open
-272 common mq_unlink sys_mq_unlink compat_sys_mq_unlink
-273 common mq_timedsend sys_mq_timedsend compat_sys_mq_timedsend
-274 common mq_timedreceive sys_mq_timedreceive compat_sys_mq_timedreceive
+272 common mq_unlink sys_mq_unlink sys_mq_unlink
+273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32
+274 common mq_timedreceive sys_mq_timedreceive sys_mq_timedreceive_time32
275 common mq_notify sys_mq_notify compat_sys_mq_notify
276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
277 common kexec_load sys_kexec_load compat_sys_kexec_load
-278 common add_key sys_add_key compat_sys_add_key
-279 common request_key sys_request_key compat_sys_request_key
+278 common add_key sys_add_key sys_add_key
+279 common request_key sys_request_key sys_request_key
280 common keyctl sys_keyctl compat_sys_keyctl
281 common waitid sys_waitid compat_sys_waitid
282 common ioprio_set sys_ioprio_set sys_ioprio_set
283 common ioprio_get sys_ioprio_get sys_ioprio_get
284 common inotify_init sys_inotify_init sys_inotify_init
-285 common inotify_add_watch sys_inotify_add_watch compat_sys_inotify_add_watch
+285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch
286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
-287 common migrate_pages sys_migrate_pages compat_sys_migrate_pages
+287 common migrate_pages sys_migrate_pages sys_migrate_pages
288 common openat sys_openat compat_sys_openat
-289 common mkdirat sys_mkdirat compat_sys_mkdirat
-290 common mknodat sys_mknodat compat_sys_mknodat
-291 common fchownat sys_fchownat compat_sys_fchownat
-292 common futimesat sys_futimesat compat_sys_futimesat
+289 common mkdirat sys_mkdirat sys_mkdirat
+290 common mknodat sys_mknodat sys_mknodat
+291 common fchownat sys_fchownat sys_fchownat
+292 common futimesat sys_futimesat sys_futimesat_time32
293 32 fstatat64 - compat_sys_s390_fstatat64
293 64 newfstatat sys_newfstatat -
-294 common unlinkat sys_unlinkat compat_sys_unlinkat
-295 common renameat sys_renameat compat_sys_renameat
-296 common linkat sys_linkat compat_sys_linkat
-297 common symlinkat sys_symlinkat compat_sys_symlinkat
-298 common readlinkat sys_readlinkat compat_sys_readlinkat
-299 common fchmodat sys_fchmodat compat_sys_fchmodat
-300 common faccessat sys_faccessat compat_sys_faccessat
-301 common pselect6 sys_pselect6 compat_sys_pselect6
-302 common ppoll sys_ppoll compat_sys_ppoll
-303 common unshare sys_unshare compat_sys_unshare
+294 common unlinkat sys_unlinkat sys_unlinkat
+295 common renameat sys_renameat sys_renameat
+296 common linkat sys_linkat sys_linkat
+297 common symlinkat sys_symlinkat sys_symlinkat
+298 common readlinkat sys_readlinkat sys_readlinkat
+299 common fchmodat sys_fchmodat sys_fchmodat
+300 common faccessat sys_faccessat sys_faccessat
+301 common pselect6 sys_pselect6 compat_sys_pselect6_time32
+302 common ppoll sys_ppoll compat_sys_ppoll_time32
+303 common unshare sys_unshare sys_unshare
304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
-306 common splice sys_splice compat_sys_splice
+306 common splice sys_splice sys_splice
307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
-308 common tee sys_tee compat_sys_tee
-309 common vmsplice sys_vmsplice compat_sys_vmsplice
-310 common move_pages sys_move_pages compat_sys_move_pages
-311 common getcpu sys_getcpu compat_sys_getcpu
+308 common tee sys_tee sys_tee
+309 common vmsplice sys_vmsplice sys_vmsplice
+310 common move_pages sys_move_pages sys_move_pages
+311 common getcpu sys_getcpu sys_getcpu
312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
-313 common utimes sys_utimes compat_sys_utimes
+313 common utimes sys_utimes sys_utimes_time32
314 common fallocate sys_fallocate compat_sys_s390_fallocate
-315 common utimensat sys_utimensat compat_sys_utimensat
+315 common utimensat sys_utimensat sys_utimensat_time32
316 common signalfd sys_signalfd compat_sys_signalfd
317 common timerfd - -
318 common eventfd sys_eventfd sys_eventfd
319 common timerfd_create sys_timerfd_create sys_timerfd_create
-320 common timerfd_settime sys_timerfd_settime compat_sys_timerfd_settime
-321 common timerfd_gettime sys_timerfd_gettime compat_sys_timerfd_gettime
+320 common timerfd_settime sys_timerfd_settime sys_timerfd_settime32
+321 common timerfd_gettime sys_timerfd_gettime sys_timerfd_gettime32
322 common signalfd4 sys_signalfd4 compat_sys_signalfd4
323 common eventfd2 sys_eventfd2 sys_eventfd2
324 common inotify_init1 sys_inotify_init1 sys_inotify_init1
-325 common pipe2 sys_pipe2 compat_sys_pipe2
+325 common pipe2 sys_pipe2 sys_pipe2
326 common dup3 sys_dup3 sys_dup3
327 common epoll_create1 sys_epoll_create1 sys_epoll_create1
328 common preadv sys_preadv compat_sys_preadv
329 common pwritev sys_pwritev compat_sys_pwritev
330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
-331 common perf_event_open sys_perf_event_open compat_sys_perf_event_open
+331 common perf_event_open sys_perf_event_open sys_perf_event_open
332 common fanotify_init sys_fanotify_init sys_fanotify_init
333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
-334 common prlimit64 sys_prlimit64 compat_sys_prlimit64
-335 common name_to_handle_at sys_name_to_handle_at compat_sys_name_to_handle_at
+334 common prlimit64 sys_prlimit64 sys_prlimit64
+335 common name_to_handle_at sys_name_to_handle_at sys_name_to_handle_at
336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
-337 common clock_adjtime sys_clock_adjtime compat_sys_clock_adjtime
+337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32
338 common syncfs sys_syncfs sys_syncfs
339 common setns sys_setns sys_setns
-340 common process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
-341 common process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
+340 common process_vm_readv sys_process_vm_readv sys_process_vm_readv
+341 common process_vm_writev sys_process_vm_writev sys_process_vm_writev
342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr
-343 common kcmp sys_kcmp compat_sys_kcmp
-344 common finit_module sys_finit_module compat_sys_finit_module
-345 common sched_setattr sys_sched_setattr compat_sys_sched_setattr
-346 common sched_getattr sys_sched_getattr compat_sys_sched_getattr
-347 common renameat2 sys_renameat2 compat_sys_renameat2
-348 common seccomp sys_seccomp compat_sys_seccomp
-349 common getrandom sys_getrandom compat_sys_getrandom
-350 common memfd_create sys_memfd_create compat_sys_memfd_create
-351 common bpf sys_bpf compat_sys_bpf
-352 common s390_pci_mmio_write sys_s390_pci_mmio_write compat_sys_s390_pci_mmio_write
-353 common s390_pci_mmio_read sys_s390_pci_mmio_read compat_sys_s390_pci_mmio_read
+343 common kcmp sys_kcmp sys_kcmp
+344 common finit_module sys_finit_module sys_finit_module
+345 common sched_setattr sys_sched_setattr sys_sched_setattr
+346 common sched_getattr sys_sched_getattr sys_sched_getattr
+347 common renameat2 sys_renameat2 sys_renameat2
+348 common seccomp sys_seccomp sys_seccomp
+349 common getrandom sys_getrandom sys_getrandom
+350 common memfd_create sys_memfd_create sys_memfd_create
+351 common bpf sys_bpf sys_bpf
+352 common s390_pci_mmio_write sys_s390_pci_mmio_write sys_s390_pci_mmio_write
+353 common s390_pci_mmio_read sys_s390_pci_mmio_read sys_s390_pci_mmio_read
354 common execveat sys_execveat compat_sys_execveat
355 common userfaultfd sys_userfaultfd sys_userfaultfd
356 common membarrier sys_membarrier sys_membarrier
-357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg
+357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg_time32
358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
359 common socket sys_socket sys_socket
-360 common socketpair sys_socketpair compat_sys_socketpair
-361 common bind sys_bind compat_sys_bind
-362 common connect sys_connect compat_sys_connect
+360 common socketpair sys_socketpair sys_socketpair
+361 common bind sys_bind sys_bind
+362 common connect sys_connect sys_connect
363 common listen sys_listen sys_listen
-364 common accept4 sys_accept4 compat_sys_accept4
-365 common getsockopt sys_getsockopt compat_sys_getsockopt
-366 common setsockopt sys_setsockopt compat_sys_setsockopt
-367 common getsockname sys_getsockname compat_sys_getsockname
-368 common getpeername sys_getpeername compat_sys_getpeername
-369 common sendto sys_sendto compat_sys_sendto
+364 common accept4 sys_accept4 sys_accept4
+365 common getsockopt sys_getsockopt sys_getsockopt
+366 common setsockopt sys_setsockopt sys_setsockopt
+367 common getsockname sys_getsockname sys_getsockname
+368 common getpeername sys_getpeername sys_getpeername
+369 common sendto sys_sendto sys_sendto
370 common sendmsg sys_sendmsg compat_sys_sendmsg
371 common recvfrom sys_recvfrom compat_sys_recvfrom
372 common recvmsg sys_recvmsg compat_sys_recvmsg
373 common shutdown sys_shutdown sys_shutdown
-374 common mlock2 sys_mlock2 compat_sys_mlock2
-375 common copy_file_range sys_copy_file_range compat_sys_copy_file_range
+374 common mlock2 sys_mlock2 sys_mlock2
+375 common copy_file_range sys_copy_file_range sys_copy_file_range
376 common preadv2 sys_preadv2 compat_sys_preadv2
377 common pwritev2 sys_pwritev2 compat_sys_pwritev2
-378 common s390_guarded_storage sys_s390_guarded_storage compat_sys_s390_guarded_storage
-379 common statx sys_statx compat_sys_statx
-380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
+378 common s390_guarded_storage sys_s390_guarded_storage sys_s390_guarded_storage
+379 common statx sys_statx sys_statx
+380 common s390_sthyi sys_s390_sthyi sys_s390_sthyi
+381 common kexec_file_load sys_kexec_file_load sys_kexec_file_load
+382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
+383 common rseq sys_rseq sys_rseq
+384 common pkey_mprotect sys_pkey_mprotect sys_pkey_mprotect
+385 common pkey_alloc sys_pkey_alloc sys_pkey_alloc
+386 common pkey_free sys_pkey_free sys_pkey_free
+# room for arch specific syscalls
+392 64 semtimedop sys_semtimedop -
+393 common semget sys_semget sys_semget
+394 common semctl sys_semctl compat_sys_semctl
+395 common shmget sys_shmget sys_shmget
+396 common shmctl sys_shmctl compat_sys_shmctl
+397 common shmat sys_shmat compat_sys_shmat
+398 common shmdt sys_shmdt sys_shmdt
+399 common msgget sys_msgget sys_msgget
+400 common msgsnd sys_msgsnd compat_sys_msgsnd
+401 common msgrcv sys_msgrcv compat_sys_msgrcv
+402 common msgctl sys_msgctl compat_sys_msgctl
+403 32 clock_gettime64 - sys_clock_gettime
+404 32 clock_settime64 - sys_clock_settime
+405 32 clock_adjtime64 - sys_clock_adjtime
+406 32 clock_getres_time64 - sys_clock_getres
+407 32 clock_nanosleep_time64 - sys_clock_nanosleep
+408 32 timer_gettime64 - sys_timer_gettime
+409 32 timer_settime64 - sys_timer_settime
+410 32 timerfd_gettime64 - sys_timerfd_gettime
+411 32 timerfd_settime64 - sys_timerfd_settime
+412 32 utimensat_time64 - sys_utimensat
+413 32 pselect6_time64 - compat_sys_pselect6_time64
+414 32 ppoll_time64 - compat_sys_ppoll_time64
+416 32 io_pgetevents_time64 - sys_io_pgetevents
+417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
+418 32 mq_timedsend_time64 - sys_mq_timedsend
+419 32 mq_timedreceive_time64 - sys_mq_timedreceive
+420 32 semtimedop_time64 - sys_semtimedop
+421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64
+422 32 futex_time64 - sys_futex
+423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree sys_open_tree
+429 common move_mount sys_move_mount sys_move_mount
+430 common fsopen sys_fsopen sys_fsopen
+431 common fsconfig sys_fsconfig sys_fsconfig
+432 common fsmount sys_fsmount sys_fsmount
+433 common fspick sys_fspick sys_fspick
+434 common pidfd_open sys_pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3 sys_clone3
+436 common close_range sys_close_range sys_close_range
+437 common openat2 sys_openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 common process_mrelease sys_process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/tools/perf/arch/s390/include/perf_regs.h b/tools/perf/arch/s390/include/perf_regs.h
index bcfbaed78cc2..52fcc0891da6 100644
--- a/tools/perf/arch/s390/include/perf_regs.h
+++ b/tools/perf/arch/s390/include/perf_regs.h
@@ -14,82 +14,4 @@ void perf_regs_load(u64 *regs);
#define PERF_REG_IP PERF_REG_S390_PC
#define PERF_REG_SP PERF_REG_S390_R15
-static inline const char *perf_reg_name(int id)
-{
- switch (id) {
- case PERF_REG_S390_R0:
- return "R0";
- case PERF_REG_S390_R1:
- return "R1";
- case PERF_REG_S390_R2:
- return "R2";
- case PERF_REG_S390_R3:
- return "R3";
- case PERF_REG_S390_R4:
- return "R4";
- case PERF_REG_S390_R5:
- return "R5";
- case PERF_REG_S390_R6:
- return "R6";
- case PERF_REG_S390_R7:
- return "R7";
- case PERF_REG_S390_R8:
- return "R8";
- case PERF_REG_S390_R9:
- return "R9";
- case PERF_REG_S390_R10:
- return "R10";
- case PERF_REG_S390_R11:
- return "R11";
- case PERF_REG_S390_R12:
- return "R12";
- case PERF_REG_S390_R13:
- return "R13";
- case PERF_REG_S390_R14:
- return "R14";
- case PERF_REG_S390_R15:
- return "R15";
- case PERF_REG_S390_FP0:
- return "FP0";
- case PERF_REG_S390_FP1:
- return "FP1";
- case PERF_REG_S390_FP2:
- return "FP2";
- case PERF_REG_S390_FP3:
- return "FP3";
- case PERF_REG_S390_FP4:
- return "FP4";
- case PERF_REG_S390_FP5:
- return "FP5";
- case PERF_REG_S390_FP6:
- return "FP6";
- case PERF_REG_S390_FP7:
- return "FP7";
- case PERF_REG_S390_FP8:
- return "FP8";
- case PERF_REG_S390_FP9:
- return "FP9";
- case PERF_REG_S390_FP10:
- return "FP10";
- case PERF_REG_S390_FP11:
- return "FP11";
- case PERF_REG_S390_FP12:
- return "FP12";
- case PERF_REG_S390_FP13:
- return "FP13";
- case PERF_REG_S390_FP14:
- return "FP14";
- case PERF_REG_S390_FP15:
- return "FP15";
- case PERF_REG_S390_MASK:
- return "MASK";
- case PERF_REG_S390_PC:
- return "PC";
- default:
- return NULL;
- }
-
- return NULL;
-}
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/s390/util/Build b/tools/perf/arch/s390/util/Build
index 3d9d0f4f72ca..fa66f15a14ec 100644
--- a/tools/perf/arch/s390/util/Build
+++ b/tools/perf/arch/s390/util/Build
@@ -1,10 +1,11 @@
perf-y += header.o
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-y += perf_regs.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
perf-y += machine.o
+perf-y += pmu.o
perf-$(CONFIG_AUXTRACE) += auxtrace.o
diff --git a/tools/perf/arch/s390/util/auxtrace.c b/tools/perf/arch/s390/util/auxtrace.c
index 0db5c58c98e8..5068baa3e092 100644
--- a/tools/perf/arch/s390/util/auxtrace.c
+++ b/tools/perf/arch/s390/util/auxtrace.c
@@ -98,6 +98,7 @@ struct auxtrace_record *auxtrace_record__init(struct evlist *evlist,
evlist__for_each_entry(evlist, pos) {
if (pos->core.attr.config == PERF_EVENT_CPUM_SF_DIAG) {
diagnose = 1;
+ pos->needs_auxtrace_mmap = true;
break;
}
}
diff --git a/tools/perf/arch/s390/util/dwarf-regs.c b/tools/perf/arch/s390/util/dwarf-regs.c
index a8ace5cc6301..dfddb3099bfa 100644
--- a/tools/perf/arch/s390/util/dwarf-regs.c
+++ b/tools/perf/arch/s390/util/dwarf-regs.c
@@ -3,8 +3,7 @@
* Mapping of DWARF debug register numbers into register names.
*
* Copyright IBM Corp. 2010, 2017
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
- * Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
+ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
*
*/
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
index 0fd4e9f49ed0..0aed92df51ba 100644
--- a/tools/perf/arch/s390/util/kvm-stat.c
+++ b/tools/perf/arch/s390/util/kvm-stat.c
@@ -19,7 +19,6 @@ define_exit_reasons_table(sie_diagnose_codes, diagnose_codes);
define_exit_reasons_table(sie_icpt_prog_codes, icpt_prog_codes);
const char *vcpu_id_str = "id";
-const int decode_str_len = 40;
const char *kvm_exit_reason = "icptcode";
const char *kvm_entry_trace = "kvm:kvm_s390_sie_enter";
const char *kvm_exit_trace = "kvm:kvm_s390_sie_exit";
@@ -30,7 +29,7 @@ static void event_icpt_insn_get_key(struct evsel *evsel,
{
unsigned long insn;
- insn = perf_evsel__intval(evsel, sample, "instruction");
+ insn = evsel__intval(evsel, sample, "instruction");
key->key = icpt_insn_decoder(insn);
key->exit_reasons = sie_icpt_insn_codes;
}
@@ -39,7 +38,7 @@ static void event_sigp_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
- key->key = perf_evsel__intval(evsel, sample, "order_code");
+ key->key = evsel__intval(evsel, sample, "order_code");
key->exit_reasons = sie_sigp_order_codes;
}
@@ -47,7 +46,7 @@ static void event_diag_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
- key->key = perf_evsel__intval(evsel, sample, "code");
+ key->key = evsel__intval(evsel, sample, "code");
key->exit_reasons = sie_diagnose_codes;
}
@@ -55,7 +54,7 @@ static void event_icpt_prog_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
- key->key = perf_evsel__intval(evsel, sample, "code");
+ key->key = evsel__intval(evsel, sample, "code");
key->exit_reasons = sie_icpt_prog_codes;
}
diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
index 724efb2d842d..98bc3f39d5f3 100644
--- a/tools/perf/arch/s390/util/machine.c
+++ b/tools/perf/arch/s390/util/machine.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
#include <unistd.h>
#include <stdio.h>
#include <string.h>
@@ -34,19 +35,3 @@ int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
return 0;
}
-
-/* On s390 kernel text segment start is located at very low memory addresses,
- * for example 0x10000. Modules are located at very high memory addresses,
- * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
- * and beginning of first module's text segment is very big.
- * Therefore do not fill this gap and do not assign it to the kernel dso map.
- */
-void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
-{
- if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
- /* Last kernel symbol mapped to end of page */
- p->end = roundup(p->end, page_size);
- else
- p->end = c->start;
- pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
-}
diff --git a/tools/perf/arch/s390/util/pmu.c b/tools/perf/arch/s390/util/pmu.c
new file mode 100644
index 000000000000..11f03f32e3fd
--- /dev/null
+++ b/tools/perf/arch/s390/util/pmu.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright IBM Corp. 2023
+ * Author(s): Thomas Richter <tmricht@linux.ibm.com>
+ */
+
+#include <string.h>
+
+#include "../../../util/pmu.h"
+
+#define S390_PMUPAI_CRYPTO "pai_crypto"
+#define S390_PMUPAI_EXT "pai_ext"
+#define S390_PMUCPUM_CF "cpum_cf"
+
+struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu)
+{
+ if (!strcmp(pmu->name, S390_PMUPAI_CRYPTO) ||
+ !strcmp(pmu->name, S390_PMUPAI_EXT) ||
+ !strcmp(pmu->name, S390_PMUCPUM_CF))
+ pmu->selectable = true;
+ return NULL;
+}
diff --git a/tools/perf/arch/s390/util/unwind-libdw.c b/tools/perf/arch/s390/util/unwind-libdw.c
index 387c698cdd1b..7d92452d5287 100644
--- a/tools/perf/arch/s390/util/unwind-libdw.c
+++ b/tools/perf/arch/s390/util/unwind-libdw.c
@@ -3,6 +3,7 @@
#include "../../util/unwind-libdw.h"
#include "../../util/perf_regs.h"
#include "../../util/event.h"
+#include "../../util/sample.h"
#include "dwarf-regs-table.h"
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 8cc6642fce7a..5a9f9a7bf07d 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -10,10 +10,11 @@ PERF_HAVE_JITDUMP := 1
# Syscall table generation
#
-out := $(OUTPUT)arch/x86/include/generated/asm
-header := $(out)/syscalls_64.c
-sys := $(srctree)/tools/perf/arch/x86/entry/syscalls
-systbl := $(sys)/syscalltbl.sh
+generated := $(OUTPUT)arch/x86/include/generated
+out := $(generated)/asm
+header := $(out)/syscalls_64.c
+sys := $(srctree)/tools/perf/arch/x86/entry/syscalls
+systbl := $(sys)/syscalltbl.sh
# Create output directory if not already present
_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
@@ -22,6 +23,6 @@ $(header): $(sys)/syscall_64.tbl $(systbl)
$(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@
clean::
- $(call QUIET_CLEAN, x86) $(RM) $(header)
+ $(call QUIET_CLEAN, x86) $(RM) -r $(header) $(generated)
archheaders: $(header)
diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c
index 7eb5621c021d..305872692bfd 100644
--- a/tools/perf/arch/x86/annotate/instructions.c
+++ b/tools/perf/arch/x86/annotate/instructions.c
@@ -110,6 +110,7 @@ static struct ins x86__instructions[] = {
{ .name = "por", .ops = &mov_ops, },
{ .name = "rclb", .ops = &mov_ops, },
{ .name = "rcll", .ops = &mov_ops, },
+ { .name = "ret", .ops = &ret_ops, },
{ .name = "retq", .ops = &ret_ops, },
{ .name = "sbb", .ops = &mov_ops, },
{ .name = "sbbl", .ops = &mov_ops, },
@@ -143,9 +144,32 @@ static struct ins x86__instructions[] = {
{ .name = "xorps", .ops = &mov_ops, },
};
-static bool x86__ins_is_fused(struct arch *arch, const char *ins1,
+static bool amd__ins_is_fused(struct arch *arch, const char *ins1,
const char *ins2)
{
+ if (strstr(ins2, "jmp"))
+ return false;
+
+ /* Family >= 15h supports cmp/test + branch fusion */
+ if (arch->family >= 0x15 && (strstarts(ins1, "test") ||
+ (strstarts(ins1, "cmp") && !strstr(ins1, "xchg")))) {
+ return true;
+ }
+
+ /* Family >= 19h supports some ALU + branch fusion */
+ if (arch->family >= 0x19 && (strstarts(ins1, "add") ||
+ strstarts(ins1, "sub") || strstarts(ins1, "and") ||
+ strstarts(ins1, "inc") || strstarts(ins1, "dec") ||
+ strstarts(ins1, "or") || strstarts(ins1, "xor"))) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool intel__ins_is_fused(struct arch *arch, const char *ins1,
+ const char *ins2)
+{
if (arch->family != 6 || arch->model < 0x1e || strstr(ins2, "jmp"))
return false;
@@ -183,6 +207,9 @@ static int x86__cpuid_parse(struct arch *arch, char *cpuid)
if (ret == 3) {
arch->family = family;
arch->model = model;
+ arch->ins_is_fused = strstarts(cpuid, "AuthenticAMD") ?
+ amd__ins_is_fused :
+ intel__ins_is_fused;
return 0;
}
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 44d510bc9b78..c84d12608cd2 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -8,397 +8,412 @@
#
# The abi is "common", "64" or "x32" for this file.
#
-0 common read __x64_sys_read
-1 common write __x64_sys_write
-2 common open __x64_sys_open
-3 common close __x64_sys_close
-4 common stat __x64_sys_newstat
-5 common fstat __x64_sys_newfstat
-6 common lstat __x64_sys_newlstat
-7 common poll __x64_sys_poll
-8 common lseek __x64_sys_lseek
-9 common mmap __x64_sys_mmap
-10 common mprotect __x64_sys_mprotect
-11 common munmap __x64_sys_munmap
-12 common brk __x64_sys_brk
-13 64 rt_sigaction __x64_sys_rt_sigaction
-14 common rt_sigprocmask __x64_sys_rt_sigprocmask
-15 64 rt_sigreturn __x64_sys_rt_sigreturn/ptregs
-16 64 ioctl __x64_sys_ioctl
-17 common pread64 __x64_sys_pread64
-18 common pwrite64 __x64_sys_pwrite64
-19 64 readv __x64_sys_readv
-20 64 writev __x64_sys_writev
-21 common access __x64_sys_access
-22 common pipe __x64_sys_pipe
-23 common select __x64_sys_select
-24 common sched_yield __x64_sys_sched_yield
-25 common mremap __x64_sys_mremap
-26 common msync __x64_sys_msync
-27 common mincore __x64_sys_mincore
-28 common madvise __x64_sys_madvise
-29 common shmget __x64_sys_shmget
-30 common shmat __x64_sys_shmat
-31 common shmctl __x64_sys_shmctl
-32 common dup __x64_sys_dup
-33 common dup2 __x64_sys_dup2
-34 common pause __x64_sys_pause
-35 common nanosleep __x64_sys_nanosleep
-36 common getitimer __x64_sys_getitimer
-37 common alarm __x64_sys_alarm
-38 common setitimer __x64_sys_setitimer
-39 common getpid __x64_sys_getpid
-40 common sendfile __x64_sys_sendfile64
-41 common socket __x64_sys_socket
-42 common connect __x64_sys_connect
-43 common accept __x64_sys_accept
-44 common sendto __x64_sys_sendto
-45 64 recvfrom __x64_sys_recvfrom
-46 64 sendmsg __x64_sys_sendmsg
-47 64 recvmsg __x64_sys_recvmsg
-48 common shutdown __x64_sys_shutdown
-49 common bind __x64_sys_bind
-50 common listen __x64_sys_listen
-51 common getsockname __x64_sys_getsockname
-52 common getpeername __x64_sys_getpeername
-53 common socketpair __x64_sys_socketpair
-54 64 setsockopt __x64_sys_setsockopt
-55 64 getsockopt __x64_sys_getsockopt
-56 common clone __x64_sys_clone/ptregs
-57 common fork __x64_sys_fork/ptregs
-58 common vfork __x64_sys_vfork/ptregs
-59 64 execve __x64_sys_execve/ptregs
-60 common exit __x64_sys_exit
-61 common wait4 __x64_sys_wait4
-62 common kill __x64_sys_kill
-63 common uname __x64_sys_newuname
-64 common semget __x64_sys_semget
-65 common semop __x64_sys_semop
-66 common semctl __x64_sys_semctl
-67 common shmdt __x64_sys_shmdt
-68 common msgget __x64_sys_msgget
-69 common msgsnd __x64_sys_msgsnd
-70 common msgrcv __x64_sys_msgrcv
-71 common msgctl __x64_sys_msgctl
-72 common fcntl __x64_sys_fcntl
-73 common flock __x64_sys_flock
-74 common fsync __x64_sys_fsync
-75 common fdatasync __x64_sys_fdatasync
-76 common truncate __x64_sys_truncate
-77 common ftruncate __x64_sys_ftruncate
-78 common getdents __x64_sys_getdents
-79 common getcwd __x64_sys_getcwd
-80 common chdir __x64_sys_chdir
-81 common fchdir __x64_sys_fchdir
-82 common rename __x64_sys_rename
-83 common mkdir __x64_sys_mkdir
-84 common rmdir __x64_sys_rmdir
-85 common creat __x64_sys_creat
-86 common link __x64_sys_link
-87 common unlink __x64_sys_unlink
-88 common symlink __x64_sys_symlink
-89 common readlink __x64_sys_readlink
-90 common chmod __x64_sys_chmod
-91 common fchmod __x64_sys_fchmod
-92 common chown __x64_sys_chown
-93 common fchown __x64_sys_fchown
-94 common lchown __x64_sys_lchown
-95 common umask __x64_sys_umask
-96 common gettimeofday __x64_sys_gettimeofday
-97 common getrlimit __x64_sys_getrlimit
-98 common getrusage __x64_sys_getrusage
-99 common sysinfo __x64_sys_sysinfo
-100 common times __x64_sys_times
-101 64 ptrace __x64_sys_ptrace
-102 common getuid __x64_sys_getuid
-103 common syslog __x64_sys_syslog
-104 common getgid __x64_sys_getgid
-105 common setuid __x64_sys_setuid
-106 common setgid __x64_sys_setgid
-107 common geteuid __x64_sys_geteuid
-108 common getegid __x64_sys_getegid
-109 common setpgid __x64_sys_setpgid
-110 common getppid __x64_sys_getppid
-111 common getpgrp __x64_sys_getpgrp
-112 common setsid __x64_sys_setsid
-113 common setreuid __x64_sys_setreuid
-114 common setregid __x64_sys_setregid
-115 common getgroups __x64_sys_getgroups
-116 common setgroups __x64_sys_setgroups
-117 common setresuid __x64_sys_setresuid
-118 common getresuid __x64_sys_getresuid
-119 common setresgid __x64_sys_setresgid
-120 common getresgid __x64_sys_getresgid
-121 common getpgid __x64_sys_getpgid
-122 common setfsuid __x64_sys_setfsuid
-123 common setfsgid __x64_sys_setfsgid
-124 common getsid __x64_sys_getsid
-125 common capget __x64_sys_capget
-126 common capset __x64_sys_capset
-127 64 rt_sigpending __x64_sys_rt_sigpending
-128 64 rt_sigtimedwait __x64_sys_rt_sigtimedwait
-129 64 rt_sigqueueinfo __x64_sys_rt_sigqueueinfo
-130 common rt_sigsuspend __x64_sys_rt_sigsuspend
-131 64 sigaltstack __x64_sys_sigaltstack
-132 common utime __x64_sys_utime
-133 common mknod __x64_sys_mknod
+0 common read sys_read
+1 common write sys_write
+2 common open sys_open
+3 common close sys_close
+4 common stat sys_newstat
+5 common fstat sys_newfstat
+6 common lstat sys_newlstat
+7 common poll sys_poll
+8 common lseek sys_lseek
+9 common mmap sys_mmap
+10 common mprotect sys_mprotect
+11 common munmap sys_munmap
+12 common brk sys_brk
+13 64 rt_sigaction sys_rt_sigaction
+14 common rt_sigprocmask sys_rt_sigprocmask
+15 64 rt_sigreturn sys_rt_sigreturn
+16 64 ioctl sys_ioctl
+17 common pread64 sys_pread64
+18 common pwrite64 sys_pwrite64
+19 64 readv sys_readv
+20 64 writev sys_writev
+21 common access sys_access
+22 common pipe sys_pipe
+23 common select sys_select
+24 common sched_yield sys_sched_yield
+25 common mremap sys_mremap
+26 common msync sys_msync
+27 common mincore sys_mincore
+28 common madvise sys_madvise
+29 common shmget sys_shmget
+30 common shmat sys_shmat
+31 common shmctl sys_shmctl
+32 common dup sys_dup
+33 common dup2 sys_dup2
+34 common pause sys_pause
+35 common nanosleep sys_nanosleep
+36 common getitimer sys_getitimer
+37 common alarm sys_alarm
+38 common setitimer sys_setitimer
+39 common getpid sys_getpid
+40 common sendfile sys_sendfile64
+41 common socket sys_socket
+42 common connect sys_connect
+43 common accept sys_accept
+44 common sendto sys_sendto
+45 64 recvfrom sys_recvfrom
+46 64 sendmsg sys_sendmsg
+47 64 recvmsg sys_recvmsg
+48 common shutdown sys_shutdown
+49 common bind sys_bind
+50 common listen sys_listen
+51 common getsockname sys_getsockname
+52 common getpeername sys_getpeername
+53 common socketpair sys_socketpair
+54 64 setsockopt sys_setsockopt
+55 64 getsockopt sys_getsockopt
+56 common clone sys_clone
+57 common fork sys_fork
+58 common vfork sys_vfork
+59 64 execve sys_execve
+60 common exit sys_exit
+61 common wait4 sys_wait4
+62 common kill sys_kill
+63 common uname sys_newuname
+64 common semget sys_semget
+65 common semop sys_semop
+66 common semctl sys_semctl
+67 common shmdt sys_shmdt
+68 common msgget sys_msgget
+69 common msgsnd sys_msgsnd
+70 common msgrcv sys_msgrcv
+71 common msgctl sys_msgctl
+72 common fcntl sys_fcntl
+73 common flock sys_flock
+74 common fsync sys_fsync
+75 common fdatasync sys_fdatasync
+76 common truncate sys_truncate
+77 common ftruncate sys_ftruncate
+78 common getdents sys_getdents
+79 common getcwd sys_getcwd
+80 common chdir sys_chdir
+81 common fchdir sys_fchdir
+82 common rename sys_rename
+83 common mkdir sys_mkdir
+84 common rmdir sys_rmdir
+85 common creat sys_creat
+86 common link sys_link
+87 common unlink sys_unlink
+88 common symlink sys_symlink
+89 common readlink sys_readlink
+90 common chmod sys_chmod
+91 common fchmod sys_fchmod
+92 common chown sys_chown
+93 common fchown sys_fchown
+94 common lchown sys_lchown
+95 common umask sys_umask
+96 common gettimeofday sys_gettimeofday
+97 common getrlimit sys_getrlimit
+98 common getrusage sys_getrusage
+99 common sysinfo sys_sysinfo
+100 common times sys_times
+101 64 ptrace sys_ptrace
+102 common getuid sys_getuid
+103 common syslog sys_syslog
+104 common getgid sys_getgid
+105 common setuid sys_setuid
+106 common setgid sys_setgid
+107 common geteuid sys_geteuid
+108 common getegid sys_getegid
+109 common setpgid sys_setpgid
+110 common getppid sys_getppid
+111 common getpgrp sys_getpgrp
+112 common setsid sys_setsid
+113 common setreuid sys_setreuid
+114 common setregid sys_setregid
+115 common getgroups sys_getgroups
+116 common setgroups sys_setgroups
+117 common setresuid sys_setresuid
+118 common getresuid sys_getresuid
+119 common setresgid sys_setresgid
+120 common getresgid sys_getresgid
+121 common getpgid sys_getpgid
+122 common setfsuid sys_setfsuid
+123 common setfsgid sys_setfsgid
+124 common getsid sys_getsid
+125 common capget sys_capget
+126 common capset sys_capset
+127 64 rt_sigpending sys_rt_sigpending
+128 64 rt_sigtimedwait sys_rt_sigtimedwait
+129 64 rt_sigqueueinfo sys_rt_sigqueueinfo
+130 common rt_sigsuspend sys_rt_sigsuspend
+131 64 sigaltstack sys_sigaltstack
+132 common utime sys_utime
+133 common mknod sys_mknod
134 64 uselib
-135 common personality __x64_sys_personality
-136 common ustat __x64_sys_ustat
-137 common statfs __x64_sys_statfs
-138 common fstatfs __x64_sys_fstatfs
-139 common sysfs __x64_sys_sysfs
-140 common getpriority __x64_sys_getpriority
-141 common setpriority __x64_sys_setpriority
-142 common sched_setparam __x64_sys_sched_setparam
-143 common sched_getparam __x64_sys_sched_getparam
-144 common sched_setscheduler __x64_sys_sched_setscheduler
-145 common sched_getscheduler __x64_sys_sched_getscheduler
-146 common sched_get_priority_max __x64_sys_sched_get_priority_max
-147 common sched_get_priority_min __x64_sys_sched_get_priority_min
-148 common sched_rr_get_interval __x64_sys_sched_rr_get_interval
-149 common mlock __x64_sys_mlock
-150 common munlock __x64_sys_munlock
-151 common mlockall __x64_sys_mlockall
-152 common munlockall __x64_sys_munlockall
-153 common vhangup __x64_sys_vhangup
-154 common modify_ldt __x64_sys_modify_ldt
-155 common pivot_root __x64_sys_pivot_root
-156 64 _sysctl __x64_sys_sysctl
-157 common prctl __x64_sys_prctl
-158 common arch_prctl __x64_sys_arch_prctl
-159 common adjtimex __x64_sys_adjtimex
-160 common setrlimit __x64_sys_setrlimit
-161 common chroot __x64_sys_chroot
-162 common sync __x64_sys_sync
-163 common acct __x64_sys_acct
-164 common settimeofday __x64_sys_settimeofday
-165 common mount __x64_sys_mount
-166 common umount2 __x64_sys_umount
-167 common swapon __x64_sys_swapon
-168 common swapoff __x64_sys_swapoff
-169 common reboot __x64_sys_reboot
-170 common sethostname __x64_sys_sethostname
-171 common setdomainname __x64_sys_setdomainname
-172 common iopl __x64_sys_iopl/ptregs
-173 common ioperm __x64_sys_ioperm
+135 common personality sys_personality
+136 common ustat sys_ustat
+137 common statfs sys_statfs
+138 common fstatfs sys_fstatfs
+139 common sysfs sys_sysfs
+140 common getpriority sys_getpriority
+141 common setpriority sys_setpriority
+142 common sched_setparam sys_sched_setparam
+143 common sched_getparam sys_sched_getparam
+144 common sched_setscheduler sys_sched_setscheduler
+145 common sched_getscheduler sys_sched_getscheduler
+146 common sched_get_priority_max sys_sched_get_priority_max
+147 common sched_get_priority_min sys_sched_get_priority_min
+148 common sched_rr_get_interval sys_sched_rr_get_interval
+149 common mlock sys_mlock
+150 common munlock sys_munlock
+151 common mlockall sys_mlockall
+152 common munlockall sys_munlockall
+153 common vhangup sys_vhangup
+154 common modify_ldt sys_modify_ldt
+155 common pivot_root sys_pivot_root
+156 64 _sysctl sys_ni_syscall
+157 common prctl sys_prctl
+158 common arch_prctl sys_arch_prctl
+159 common adjtimex sys_adjtimex
+160 common setrlimit sys_setrlimit
+161 common chroot sys_chroot
+162 common sync sys_sync
+163 common acct sys_acct
+164 common settimeofday sys_settimeofday
+165 common mount sys_mount
+166 common umount2 sys_umount
+167 common swapon sys_swapon
+168 common swapoff sys_swapoff
+169 common reboot sys_reboot
+170 common sethostname sys_sethostname
+171 common setdomainname sys_setdomainname
+172 common iopl sys_iopl
+173 common ioperm sys_ioperm
174 64 create_module
-175 common init_module __x64_sys_init_module
-176 common delete_module __x64_sys_delete_module
+175 common init_module sys_init_module
+176 common delete_module sys_delete_module
177 64 get_kernel_syms
178 64 query_module
-179 common quotactl __x64_sys_quotactl
+179 common quotactl sys_quotactl
180 64 nfsservctl
181 common getpmsg
182 common putpmsg
183 common afs_syscall
184 common tuxcall
185 common security
-186 common gettid __x64_sys_gettid
-187 common readahead __x64_sys_readahead
-188 common setxattr __x64_sys_setxattr
-189 common lsetxattr __x64_sys_lsetxattr
-190 common fsetxattr __x64_sys_fsetxattr
-191 common getxattr __x64_sys_getxattr
-192 common lgetxattr __x64_sys_lgetxattr
-193 common fgetxattr __x64_sys_fgetxattr
-194 common listxattr __x64_sys_listxattr
-195 common llistxattr __x64_sys_llistxattr
-196 common flistxattr __x64_sys_flistxattr
-197 common removexattr __x64_sys_removexattr
-198 common lremovexattr __x64_sys_lremovexattr
-199 common fremovexattr __x64_sys_fremovexattr
-200 common tkill __x64_sys_tkill
-201 common time __x64_sys_time
-202 common futex __x64_sys_futex
-203 common sched_setaffinity __x64_sys_sched_setaffinity
-204 common sched_getaffinity __x64_sys_sched_getaffinity
+186 common gettid sys_gettid
+187 common readahead sys_readahead
+188 common setxattr sys_setxattr
+189 common lsetxattr sys_lsetxattr
+190 common fsetxattr sys_fsetxattr
+191 common getxattr sys_getxattr
+192 common lgetxattr sys_lgetxattr
+193 common fgetxattr sys_fgetxattr
+194 common listxattr sys_listxattr
+195 common llistxattr sys_llistxattr
+196 common flistxattr sys_flistxattr
+197 common removexattr sys_removexattr
+198 common lremovexattr sys_lremovexattr
+199 common fremovexattr sys_fremovexattr
+200 common tkill sys_tkill
+201 common time sys_time
+202 common futex sys_futex
+203 common sched_setaffinity sys_sched_setaffinity
+204 common sched_getaffinity sys_sched_getaffinity
205 64 set_thread_area
-206 64 io_setup __x64_sys_io_setup
-207 common io_destroy __x64_sys_io_destroy
-208 common io_getevents __x64_sys_io_getevents
-209 64 io_submit __x64_sys_io_submit
-210 common io_cancel __x64_sys_io_cancel
+206 64 io_setup sys_io_setup
+207 common io_destroy sys_io_destroy
+208 common io_getevents sys_io_getevents
+209 64 io_submit sys_io_submit
+210 common io_cancel sys_io_cancel
211 64 get_thread_area
-212 common lookup_dcookie __x64_sys_lookup_dcookie
-213 common epoll_create __x64_sys_epoll_create
+212 common lookup_dcookie sys_lookup_dcookie
+213 common epoll_create sys_epoll_create
214 64 epoll_ctl_old
215 64 epoll_wait_old
-216 common remap_file_pages __x64_sys_remap_file_pages
-217 common getdents64 __x64_sys_getdents64
-218 common set_tid_address __x64_sys_set_tid_address
-219 common restart_syscall __x64_sys_restart_syscall
-220 common semtimedop __x64_sys_semtimedop
-221 common fadvise64 __x64_sys_fadvise64
-222 64 timer_create __x64_sys_timer_create
-223 common timer_settime __x64_sys_timer_settime
-224 common timer_gettime __x64_sys_timer_gettime
-225 common timer_getoverrun __x64_sys_timer_getoverrun
-226 common timer_delete __x64_sys_timer_delete
-227 common clock_settime __x64_sys_clock_settime
-228 common clock_gettime __x64_sys_clock_gettime
-229 common clock_getres __x64_sys_clock_getres
-230 common clock_nanosleep __x64_sys_clock_nanosleep
-231 common exit_group __x64_sys_exit_group
-232 common epoll_wait __x64_sys_epoll_wait
-233 common epoll_ctl __x64_sys_epoll_ctl
-234 common tgkill __x64_sys_tgkill
-235 common utimes __x64_sys_utimes
+216 common remap_file_pages sys_remap_file_pages
+217 common getdents64 sys_getdents64
+218 common set_tid_address sys_set_tid_address
+219 common restart_syscall sys_restart_syscall
+220 common semtimedop sys_semtimedop
+221 common fadvise64 sys_fadvise64
+222 64 timer_create sys_timer_create
+223 common timer_settime sys_timer_settime
+224 common timer_gettime sys_timer_gettime
+225 common timer_getoverrun sys_timer_getoverrun
+226 common timer_delete sys_timer_delete
+227 common clock_settime sys_clock_settime
+228 common clock_gettime sys_clock_gettime
+229 common clock_getres sys_clock_getres
+230 common clock_nanosleep sys_clock_nanosleep
+231 common exit_group sys_exit_group
+232 common epoll_wait sys_epoll_wait
+233 common epoll_ctl sys_epoll_ctl
+234 common tgkill sys_tgkill
+235 common utimes sys_utimes
236 64 vserver
-237 common mbind __x64_sys_mbind
-238 common set_mempolicy __x64_sys_set_mempolicy
-239 common get_mempolicy __x64_sys_get_mempolicy
-240 common mq_open __x64_sys_mq_open
-241 common mq_unlink __x64_sys_mq_unlink
-242 common mq_timedsend __x64_sys_mq_timedsend
-243 common mq_timedreceive __x64_sys_mq_timedreceive
-244 64 mq_notify __x64_sys_mq_notify
-245 common mq_getsetattr __x64_sys_mq_getsetattr
-246 64 kexec_load __x64_sys_kexec_load
-247 64 waitid __x64_sys_waitid
-248 common add_key __x64_sys_add_key
-249 common request_key __x64_sys_request_key
-250 common keyctl __x64_sys_keyctl
-251 common ioprio_set __x64_sys_ioprio_set
-252 common ioprio_get __x64_sys_ioprio_get
-253 common inotify_init __x64_sys_inotify_init
-254 common inotify_add_watch __x64_sys_inotify_add_watch
-255 common inotify_rm_watch __x64_sys_inotify_rm_watch
-256 common migrate_pages __x64_sys_migrate_pages
-257 common openat __x64_sys_openat
-258 common mkdirat __x64_sys_mkdirat
-259 common mknodat __x64_sys_mknodat
-260 common fchownat __x64_sys_fchownat
-261 common futimesat __x64_sys_futimesat
-262 common newfstatat __x64_sys_newfstatat
-263 common unlinkat __x64_sys_unlinkat
-264 common renameat __x64_sys_renameat
-265 common linkat __x64_sys_linkat
-266 common symlinkat __x64_sys_symlinkat
-267 common readlinkat __x64_sys_readlinkat
-268 common fchmodat __x64_sys_fchmodat
-269 common faccessat __x64_sys_faccessat
-270 common pselect6 __x64_sys_pselect6
-271 common ppoll __x64_sys_ppoll
-272 common unshare __x64_sys_unshare
-273 64 set_robust_list __x64_sys_set_robust_list
-274 64 get_robust_list __x64_sys_get_robust_list
-275 common splice __x64_sys_splice
-276 common tee __x64_sys_tee
-277 common sync_file_range __x64_sys_sync_file_range
-278 64 vmsplice __x64_sys_vmsplice
-279 64 move_pages __x64_sys_move_pages
-280 common utimensat __x64_sys_utimensat
-281 common epoll_pwait __x64_sys_epoll_pwait
-282 common signalfd __x64_sys_signalfd
-283 common timerfd_create __x64_sys_timerfd_create
-284 common eventfd __x64_sys_eventfd
-285 common fallocate __x64_sys_fallocate
-286 common timerfd_settime __x64_sys_timerfd_settime
-287 common timerfd_gettime __x64_sys_timerfd_gettime
-288 common accept4 __x64_sys_accept4
-289 common signalfd4 __x64_sys_signalfd4
-290 common eventfd2 __x64_sys_eventfd2
-291 common epoll_create1 __x64_sys_epoll_create1
-292 common dup3 __x64_sys_dup3
-293 common pipe2 __x64_sys_pipe2
-294 common inotify_init1 __x64_sys_inotify_init1
-295 64 preadv __x64_sys_preadv
-296 64 pwritev __x64_sys_pwritev
-297 64 rt_tgsigqueueinfo __x64_sys_rt_tgsigqueueinfo
-298 common perf_event_open __x64_sys_perf_event_open
-299 64 recvmmsg __x64_sys_recvmmsg
-300 common fanotify_init __x64_sys_fanotify_init
-301 common fanotify_mark __x64_sys_fanotify_mark
-302 common prlimit64 __x64_sys_prlimit64
-303 common name_to_handle_at __x64_sys_name_to_handle_at
-304 common open_by_handle_at __x64_sys_open_by_handle_at
-305 common clock_adjtime __x64_sys_clock_adjtime
-306 common syncfs __x64_sys_syncfs
-307 64 sendmmsg __x64_sys_sendmmsg
-308 common setns __x64_sys_setns
-309 common getcpu __x64_sys_getcpu
-310 64 process_vm_readv __x64_sys_process_vm_readv
-311 64 process_vm_writev __x64_sys_process_vm_writev
-312 common kcmp __x64_sys_kcmp
-313 common finit_module __x64_sys_finit_module
-314 common sched_setattr __x64_sys_sched_setattr
-315 common sched_getattr __x64_sys_sched_getattr
-316 common renameat2 __x64_sys_renameat2
-317 common seccomp __x64_sys_seccomp
-318 common getrandom __x64_sys_getrandom
-319 common memfd_create __x64_sys_memfd_create
-320 common kexec_file_load __x64_sys_kexec_file_load
-321 common bpf __x64_sys_bpf
-322 64 execveat __x64_sys_execveat/ptregs
-323 common userfaultfd __x64_sys_userfaultfd
-324 common membarrier __x64_sys_membarrier
-325 common mlock2 __x64_sys_mlock2
-326 common copy_file_range __x64_sys_copy_file_range
-327 64 preadv2 __x64_sys_preadv2
-328 64 pwritev2 __x64_sys_pwritev2
-329 common pkey_mprotect __x64_sys_pkey_mprotect
-330 common pkey_alloc __x64_sys_pkey_alloc
-331 common pkey_free __x64_sys_pkey_free
-332 common statx __x64_sys_statx
-333 common io_pgetevents __x64_sys_io_pgetevents
-334 common rseq __x64_sys_rseq
+237 common mbind sys_mbind
+238 common set_mempolicy sys_set_mempolicy
+239 common get_mempolicy sys_get_mempolicy
+240 common mq_open sys_mq_open
+241 common mq_unlink sys_mq_unlink
+242 common mq_timedsend sys_mq_timedsend
+243 common mq_timedreceive sys_mq_timedreceive
+244 64 mq_notify sys_mq_notify
+245 common mq_getsetattr sys_mq_getsetattr
+246 64 kexec_load sys_kexec_load
+247 64 waitid sys_waitid
+248 common add_key sys_add_key
+249 common request_key sys_request_key
+250 common keyctl sys_keyctl
+251 common ioprio_set sys_ioprio_set
+252 common ioprio_get sys_ioprio_get
+253 common inotify_init sys_inotify_init
+254 common inotify_add_watch sys_inotify_add_watch
+255 common inotify_rm_watch sys_inotify_rm_watch
+256 common migrate_pages sys_migrate_pages
+257 common openat sys_openat
+258 common mkdirat sys_mkdirat
+259 common mknodat sys_mknodat
+260 common fchownat sys_fchownat
+261 common futimesat sys_futimesat
+262 common newfstatat sys_newfstatat
+263 common unlinkat sys_unlinkat
+264 common renameat sys_renameat
+265 common linkat sys_linkat
+266 common symlinkat sys_symlinkat
+267 common readlinkat sys_readlinkat
+268 common fchmodat sys_fchmodat
+269 common faccessat sys_faccessat
+270 common pselect6 sys_pselect6
+271 common ppoll sys_ppoll
+272 common unshare sys_unshare
+273 64 set_robust_list sys_set_robust_list
+274 64 get_robust_list sys_get_robust_list
+275 common splice sys_splice
+276 common tee sys_tee
+277 common sync_file_range sys_sync_file_range
+278 64 vmsplice sys_vmsplice
+279 64 move_pages sys_move_pages
+280 common utimensat sys_utimensat
+281 common epoll_pwait sys_epoll_pwait
+282 common signalfd sys_signalfd
+283 common timerfd_create sys_timerfd_create
+284 common eventfd sys_eventfd
+285 common fallocate sys_fallocate
+286 common timerfd_settime sys_timerfd_settime
+287 common timerfd_gettime sys_timerfd_gettime
+288 common accept4 sys_accept4
+289 common signalfd4 sys_signalfd4
+290 common eventfd2 sys_eventfd2
+291 common epoll_create1 sys_epoll_create1
+292 common dup3 sys_dup3
+293 common pipe2 sys_pipe2
+294 common inotify_init1 sys_inotify_init1
+295 64 preadv sys_preadv
+296 64 pwritev sys_pwritev
+297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+298 common perf_event_open sys_perf_event_open
+299 64 recvmmsg sys_recvmmsg
+300 common fanotify_init sys_fanotify_init
+301 common fanotify_mark sys_fanotify_mark
+302 common prlimit64 sys_prlimit64
+303 common name_to_handle_at sys_name_to_handle_at
+304 common open_by_handle_at sys_open_by_handle_at
+305 common clock_adjtime sys_clock_adjtime
+306 common syncfs sys_syncfs
+307 64 sendmmsg sys_sendmmsg
+308 common setns sys_setns
+309 common getcpu sys_getcpu
+310 64 process_vm_readv sys_process_vm_readv
+311 64 process_vm_writev sys_process_vm_writev
+312 common kcmp sys_kcmp
+313 common finit_module sys_finit_module
+314 common sched_setattr sys_sched_setattr
+315 common sched_getattr sys_sched_getattr
+316 common renameat2 sys_renameat2
+317 common seccomp sys_seccomp
+318 common getrandom sys_getrandom
+319 common memfd_create sys_memfd_create
+320 common kexec_file_load sys_kexec_file_load
+321 common bpf sys_bpf
+322 64 execveat sys_execveat
+323 common userfaultfd sys_userfaultfd
+324 common membarrier sys_membarrier
+325 common mlock2 sys_mlock2
+326 common copy_file_range sys_copy_file_range
+327 64 preadv2 sys_preadv2
+328 64 pwritev2 sys_pwritev2
+329 common pkey_mprotect sys_pkey_mprotect
+330 common pkey_alloc sys_pkey_alloc
+331 common pkey_free sys_pkey_free
+332 common statx sys_statx
+333 common io_pgetevents sys_io_pgetevents
+334 common rseq sys_rseq
# don't use numbers 387 through 423, add new calls after the last
# 'common' entry
-424 common pidfd_send_signal __x64_sys_pidfd_send_signal
-425 common io_uring_setup __x64_sys_io_uring_setup
-426 common io_uring_enter __x64_sys_io_uring_enter
-427 common io_uring_register __x64_sys_io_uring_register
-428 common open_tree __x64_sys_open_tree
-429 common move_mount __x64_sys_move_mount
-430 common fsopen __x64_sys_fsopen
-431 common fsconfig __x64_sys_fsconfig
-432 common fsmount __x64_sys_fsmount
-433 common fspick __x64_sys_fspick
-434 common pidfd_open __x64_sys_pidfd_open
-435 common clone3 __x64_sys_clone3/ptregs
-437 common openat2 __x64_sys_openat2
-438 common pidfd_getfd __x64_sys_pidfd_getfd
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 common memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
#
-# x32-specific system call numbers start at 512 to avoid cache impact
-# for native 64-bit operation. The __x32_compat_sys stubs are created
-# on-the-fly for compat_sys_*() compatibility system calls if X86_X32
-# is defined.
+# Due to a historical design error, certain syscalls are numbered differently
+# in x32 as compared to native x86_64. These syscalls have numbers 512-547.
+# Do not add new syscalls to this range. Numbers 548 and above are available
+# for non-x32 use.
#
-512 x32 rt_sigaction __x32_compat_sys_rt_sigaction
-513 x32 rt_sigreturn sys32_x32_rt_sigreturn
-514 x32 ioctl __x32_compat_sys_ioctl
-515 x32 readv __x32_compat_sys_readv
-516 x32 writev __x32_compat_sys_writev
-517 x32 recvfrom __x32_compat_sys_recvfrom
-518 x32 sendmsg __x32_compat_sys_sendmsg
-519 x32 recvmsg __x32_compat_sys_recvmsg
-520 x32 execve __x32_compat_sys_execve/ptregs
-521 x32 ptrace __x32_compat_sys_ptrace
-522 x32 rt_sigpending __x32_compat_sys_rt_sigpending
-523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait_time64
-524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo
-525 x32 sigaltstack __x32_compat_sys_sigaltstack
-526 x32 timer_create __x32_compat_sys_timer_create
-527 x32 mq_notify __x32_compat_sys_mq_notify
-528 x32 kexec_load __x32_compat_sys_kexec_load
-529 x32 waitid __x32_compat_sys_waitid
-530 x32 set_robust_list __x32_compat_sys_set_robust_list
-531 x32 get_robust_list __x32_compat_sys_get_robust_list
-532 x32 vmsplice __x32_compat_sys_vmsplice
-533 x32 move_pages __x32_compat_sys_move_pages
-534 x32 preadv __x32_compat_sys_preadv64
-535 x32 pwritev __x32_compat_sys_pwritev64
-536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo
-537 x32 recvmmsg __x32_compat_sys_recvmmsg_time64
-538 x32 sendmmsg __x32_compat_sys_sendmmsg
-539 x32 process_vm_readv __x32_compat_sys_process_vm_readv
-540 x32 process_vm_writev __x32_compat_sys_process_vm_writev
-541 x32 setsockopt __x32_compat_sys_setsockopt
-542 x32 getsockopt __x32_compat_sys_getsockopt
-543 x32 io_setup __x32_compat_sys_io_setup
-544 x32 io_submit __x32_compat_sys_io_submit
-545 x32 execveat __x32_compat_sys_execveat/ptregs
-546 x32 preadv2 __x32_compat_sys_preadv64v2
-547 x32 pwritev2 __x32_compat_sys_pwritev64v2
+512 x32 rt_sigaction compat_sys_rt_sigaction
+513 x32 rt_sigreturn compat_sys_x32_rt_sigreturn
+514 x32 ioctl compat_sys_ioctl
+515 x32 readv sys_readv
+516 x32 writev sys_writev
+517 x32 recvfrom compat_sys_recvfrom
+518 x32 sendmsg compat_sys_sendmsg
+519 x32 recvmsg compat_sys_recvmsg
+520 x32 execve compat_sys_execve
+521 x32 ptrace compat_sys_ptrace
+522 x32 rt_sigpending compat_sys_rt_sigpending
+523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+524 x32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+525 x32 sigaltstack compat_sys_sigaltstack
+526 x32 timer_create compat_sys_timer_create
+527 x32 mq_notify compat_sys_mq_notify
+528 x32 kexec_load compat_sys_kexec_load
+529 x32 waitid compat_sys_waitid
+530 x32 set_robust_list compat_sys_set_robust_list
+531 x32 get_robust_list compat_sys_get_robust_list
+532 x32 vmsplice sys_vmsplice
+533 x32 move_pages sys_move_pages
+534 x32 preadv compat_sys_preadv64
+535 x32 pwritev compat_sys_pwritev64
+536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+537 x32 recvmmsg compat_sys_recvmmsg_time64
+538 x32 sendmmsg compat_sys_sendmmsg
+539 x32 process_vm_readv sys_process_vm_readv
+540 x32 process_vm_writev sys_process_vm_writev
+541 x32 setsockopt sys_setsockopt
+542 x32 getsockopt sys_getsockopt
+543 x32 io_setup compat_sys_io_setup
+544 x32 io_submit compat_sys_io_submit
+545 x32 execveat compat_sys_execveat
+546 x32 preadv2 compat_sys_preadv64v2
+547 x32 pwritev2 compat_sys_pwritev64v2
+# This is the end of the legacy x32 range. Numbers 548 and above are
+# not special and are not to be used for x32-specific syscalls.
diff --git a/tools/perf/arch/x86/include/arch-tests.h b/tools/perf/arch/x86/include/arch-tests.h
index c41c5affe4be..902e9ea9b99e 100644
--- a/tools/perf/arch/x86/include/arch-tests.h
+++ b/tools/perf/arch/x86/include/arch-tests.h
@@ -2,23 +2,16 @@
#ifndef ARCH_TESTS_H
#define ARCH_TESTS_H
-#include <linux/compiler.h>
-struct test;
+struct test_suite;
/* Tests */
-int test__rdpmc(struct test *test __maybe_unused, int subtest);
-int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest);
-int test__insn_x86(struct test *test __maybe_unused, int subtest);
-int test__intel_pt_pkt_decoder(struct test *test, int subtest);
-int test__bp_modify(struct test *test, int subtest);
+int test__rdpmc(struct test_suite *test, int subtest);
+int test__insn_x86(struct test_suite *test, int subtest);
+int test__intel_pt_pkt_decoder(struct test_suite *test, int subtest);
+int test__intel_pt_hybrid_compat(struct test_suite *test, int subtest);
+int test__bp_modify(struct test_suite *test, int subtest);
+int test__x86_sample_parsing(struct test_suite *test, int subtest);
-#ifdef HAVE_DWARF_UNWIND_SUPPORT
-struct thread;
-struct perf_sample;
-int test__arch_unwind_sample(struct perf_sample *sample,
- struct thread *thread);
-#endif
-
-extern struct test arch_tests[];
+extern struct test_suite *arch_tests[];
#endif
diff --git a/tools/perf/arch/x86/include/perf_regs.h b/tools/perf/arch/x86/include/perf_regs.h
index b7321337d100..16e23b722042 100644
--- a/tools/perf/arch/x86/include/perf_regs.h
+++ b/tools/perf/arch/x86/include/perf_regs.h
@@ -23,86 +23,4 @@ void perf_regs_load(u64 *regs);
#define PERF_REG_IP PERF_REG_X86_IP
#define PERF_REG_SP PERF_REG_X86_SP
-static inline const char *perf_reg_name(int id)
-{
- switch (id) {
- case PERF_REG_X86_AX:
- return "AX";
- case PERF_REG_X86_BX:
- return "BX";
- case PERF_REG_X86_CX:
- return "CX";
- case PERF_REG_X86_DX:
- return "DX";
- case PERF_REG_X86_SI:
- return "SI";
- case PERF_REG_X86_DI:
- return "DI";
- case PERF_REG_X86_BP:
- return "BP";
- case PERF_REG_X86_SP:
- return "SP";
- case PERF_REG_X86_IP:
- return "IP";
- case PERF_REG_X86_FLAGS:
- return "FLAGS";
- case PERF_REG_X86_CS:
- return "CS";
- case PERF_REG_X86_SS:
- return "SS";
- case PERF_REG_X86_DS:
- return "DS";
- case PERF_REG_X86_ES:
- return "ES";
- case PERF_REG_X86_FS:
- return "FS";
- case PERF_REG_X86_GS:
- return "GS";
-#ifdef HAVE_ARCH_X86_64_SUPPORT
- case PERF_REG_X86_R8:
- return "R8";
- case PERF_REG_X86_R9:
- return "R9";
- case PERF_REG_X86_R10:
- return "R10";
- case PERF_REG_X86_R11:
- return "R11";
- case PERF_REG_X86_R12:
- return "R12";
- case PERF_REG_X86_R13:
- return "R13";
- case PERF_REG_X86_R14:
- return "R14";
- case PERF_REG_X86_R15:
- return "R15";
-#endif /* HAVE_ARCH_X86_64_SUPPORT */
-
-#define XMM(x) \
- case PERF_REG_X86_XMM ## x: \
- case PERF_REG_X86_XMM ## x + 1: \
- return "XMM" #x;
- XMM(0)
- XMM(1)
- XMM(2)
- XMM(3)
- XMM(4)
- XMM(5)
- XMM(6)
- XMM(7)
- XMM(8)
- XMM(9)
- XMM(10)
- XMM(11)
- XMM(12)
- XMM(13)
- XMM(14)
- XMM(15)
-#undef XMM
- default:
- return NULL;
- }
-
- return NULL;
-}
-
#endif /* ARCH_PERF_REGS_H */
diff --git a/tools/perf/arch/x86/tests/Build b/tools/perf/arch/x86/tests/Build
index 2997c506550c..6f4e8636c3bf 100644
--- a/tools/perf/arch/x86/tests/Build
+++ b/tools/perf/arch/x86/tests/Build
@@ -2,7 +2,6 @@ perf-$(CONFIG_DWARF_UNWIND) += regs_load.o
perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
perf-y += arch-tests.o
-perf-y += rdpmc.o
-perf-y += perf-time-to-tsc.o
-perf-$(CONFIG_AUXTRACE) += insn-x86.o intel-pt-pkt-decoder-test.o
+perf-y += sample-parsing.o
+perf-$(CONFIG_AUXTRACE) += insn-x86.o intel-pt-test.o
perf-$(CONFIG_X86_64) += bp-modify.o
diff --git a/tools/perf/arch/x86/tests/arch-tests.c b/tools/perf/arch/x86/tests/arch-tests.c
index 6763135aec17..aae6ea0fe52b 100644
--- a/tools/perf/arch/x86/tests/arch-tests.c
+++ b/tools/perf/arch/x86/tests/arch-tests.c
@@ -3,39 +3,37 @@
#include "tests/tests.h"
#include "arch-tests.h"
-struct test arch_tests[] = {
- {
- .desc = "x86 rdpmc",
- .func = test__rdpmc,
- },
- {
- .desc = "Convert perf time to TSC",
- .func = test__perf_time_to_tsc,
- },
+#ifdef HAVE_AUXTRACE_SUPPORT
+DEFINE_SUITE("x86 instruction decoder - new instructions", insn_x86);
+
+static struct test_case intel_pt_tests[] = {
+ TEST_CASE("Intel PT packet decoder", intel_pt_pkt_decoder),
+ TEST_CASE("Intel PT hybrid CPU compatibility", intel_pt_hybrid_compat),
+ { .name = NULL, }
+};
+
+struct test_suite suite__intel_pt = {
+ .desc = "Intel PT",
+ .test_cases = intel_pt_tests,
+};
+
+#endif
+#if defined(__x86_64__)
+DEFINE_SUITE("x86 bp modify", bp_modify);
+#endif
+DEFINE_SUITE("x86 Sample parsing", x86_sample_parsing);
+
+struct test_suite *arch_tests[] = {
#ifdef HAVE_DWARF_UNWIND_SUPPORT
- {
- .desc = "DWARF unwind",
- .func = test__dwarf_unwind,
- },
+ &suite__dwarf_unwind,
#endif
#ifdef HAVE_AUXTRACE_SUPPORT
- {
- .desc = "x86 instruction decoder - new instructions",
- .func = test__insn_x86,
- },
- {
- .desc = "Intel PT packet decoder",
- .func = test__intel_pt_pkt_decoder,
- },
+ &suite__insn_x86,
+ &suite__intel_pt,
#endif
#if defined(__x86_64__)
- {
- .desc = "x86 bp modify",
- .func = test__bp_modify,
- },
+ &suite__bp_modify,
#endif
- {
- .func = NULL,
- },
-
+ &suite__x86_sample_parsing,
+ NULL,
};
diff --git a/tools/perf/arch/x86/tests/bp-modify.c b/tools/perf/arch/x86/tests/bp-modify.c
index adcacf1b6609..0924ccd9e36d 100644
--- a/tools/perf/arch/x86/tests/bp-modify.c
+++ b/tools/perf/arch/x86/tests/bp-modify.c
@@ -73,7 +73,7 @@ static int bp_modify1(void)
/*
* The parent does following steps:
* - creates a new breakpoint (id 0) for bp_2 function
- * - changes that breakponit to bp_1 function
+ * - changes that breakpoint to bp_1 function
* - waits for the breakpoint to hit and checks
* it has proper rip of bp_1 function
* - detaches the child
@@ -204,7 +204,7 @@ out:
return rip == (unsigned long) bp_1 ? TEST_OK : TEST_FAIL;
}
-int test__bp_modify(struct test *test __maybe_unused,
+int test__bp_modify(struct test_suite *test __maybe_unused,
int subtest __maybe_unused)
{
TEST_ASSERT_VAL("modify test 1 failed\n", !bp_modify1());
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c
index ef43be9b6ec2..497593be80f2 100644
--- a/tools/perf/arch/x86/tests/dwarf-unwind.c
+++ b/tools/perf/arch/x86/tests/dwarf-unwind.c
@@ -7,7 +7,6 @@
#include "event.h"
#include "debug.h"
#include "tests/tests.h"
-#include "arch-tests.h"
#define STACK_SIZE 8192
@@ -34,10 +33,17 @@ static int sample_ustack(struct perf_sample *sample,
return -1;
}
- stack_size = map->end - sp;
+ stack_size = map__end(map) - sp;
stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
memcpy(buf, (void *) sp, stack_size);
+#ifdef MEMORY_SANITIZER
+ /*
+ * Copying the stack may copy msan poison, avoid false positives in the
+ * unwinder by removing the poison here.
+ */
+ __msan_unpoison(buf, stack_size);
+#endif
stack->data = (char *) buf;
stack->size = stack_size;
return 0;
@@ -55,6 +61,14 @@ int test__arch_unwind_sample(struct perf_sample *sample,
return -1;
}
+#ifdef MEMORY_SANITIZER
+ /*
+ * Assignments to buf in the assembly function perf_regs_load aren't
+ * seen by memory sanitizer. Zero the memory to convince memory
+ * sanitizer the memory is initialized.
+ */
+ memset(buf, 0, sizeof(u64) * PERF_REGS_MAX);
+#endif
perf_regs_load(buf);
regs->abi = PERF_SAMPLE_REGS_ABI;
regs->regs = buf;
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-32.c b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
index 9708ae892061..ba429cadb18f 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-32.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-32.c
@@ -2197,6 +2197,924 @@
"3e f2 ff 25 78 56 34 12 \tnotrack bnd jmp *0x12345678",},
{{0x3e, 0xf2, 0xff, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 9, 0, "jmp", "indirect",
"3e f2 ff a4 c8 78 56 34 12 \tnotrack bnd jmp *0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x58, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 58 cb \tvaddph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 58 8c c8 78 56 34 12 \tvaddph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x58, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 58 cb \tvaddph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 58 8c c8 78 56 34 12 \tvaddph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x58, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 58 cb \tvaddph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 58 8c c8 78 56 34 12 \tvaddph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x58, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 58 cb \tvaddsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 58 8c c8 78 56 34 12 \tvaddsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x6c, 0x48, 0xc2, 0xeb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 48 c2 eb 12 \tvcmple_oqph %zmm3,%zmm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x48, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 48 c2 ac c8 78 56 34 12 12 \tvcmple_oqph 0x12345678(%eax,%ecx,8),%zmm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x08, 0xc2, 0xeb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 08 c2 eb 12 \tvcmple_oqph %xmm3,%xmm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x08, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 08 c2 ac c8 78 56 34 12 12 \tvcmple_oqph 0x12345678(%eax,%ecx,8),%xmm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x28, 0xc2, 0xeb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 28 c2 eb 12 \tvcmple_oqph %ymm3,%ymm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x28, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 28 c2 ac c8 78 56 34 12 12 \tvcmple_oqph 0x12345678(%eax,%ecx,8),%ymm2,%k5",},
+{{0x62, 0xf3, 0x6e, 0x08, 0xc2, 0xeb, 0x12, }, 7, 0, "", "",
+"62 f3 6e 08 c2 eb 12 \tvcmple_oqsh %xmm3,%xmm2,%k5",},
+{{0x62, 0xf3, 0x6e, 0x08, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6e 08 c2 ac c8 78 56 34 12 12 \tvcmple_oqsh 0x12345678(%eax,%ecx,8),%xmm2,%k5",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x2f, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 2f ca \tvcomish %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x2f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 2f 8c c8 78 56 34 12 \tvcomish 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 5b ca \tvcvtdq2ph %zmm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 5b 8c c8 78 56 34 12 \tvcvtdq2ph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 5b ca \tvcvtdq2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 5b ca \tvcvtdq2ph %ymm2,%xmm1",},
+{{0x62, 0xf5, 0xfd, 0x48, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 fd 48 5a ca \tvcvtpd2ph %zmm2,%xmm1",},
+{{0x62, 0xf5, 0xfd, 0x08, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 fd 08 5a ca \tvcvtpd2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0xfd, 0x28, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 fd 28 5a ca \tvcvtpd2ph %ymm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 5b ca \tvcvtph2dq %ymm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 5b 8c c8 78 56 34 12 \tvcvtph2dq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 5b ca \tvcvtph2dq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 5b 8c c8 78 56 34 12 \tvcvtph2dq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 5b ca \tvcvtph2dq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 5b 8c c8 78 56 34 12 \tvcvtph2dq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 5a ca \tvcvtph2pd %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 5a 8c c8 78 56 34 12 \tvcvtph2pd 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 5a ca \tvcvtph2pd %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 5a 8c c8 78 56 34 12 \tvcvtph2pd 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 5a ca \tvcvtph2pd %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 5a 8c c8 78 56 34 12 \tvcvtph2pd 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x13, 0xca, }, 6, 0, "", "",
+"62 f2 7d 48 13 ca \tvcvtph2ps %ymm2,%zmm1",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0xc4, 0xe2, 0x79, 0x13, 0xca, }, 5, 0, "", "",
+"c4 e2 79 13 ca \tvcvtph2ps %xmm2,%xmm1",},
+{{0xc4, 0xe2, 0x79, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 79 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0xc4, 0xe2, 0x7d, 0x13, 0xca, }, 5, 0, "", "",
+"c4 e2 7d 13 ca \tvcvtph2ps %xmm2,%ymm1",},
+{{0xc4, 0xe2, 0x7d, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 7d 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0xc4, 0xe2, 0x79, 0x13, 0xca, }, 5, 0, "", "",
+"c4 e2 79 13 ca \tvcvtph2ps %xmm2,%xmm1",},
+{{0xc4, 0xe2, 0x79, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 79 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0xc4, 0xe2, 0x7d, 0x13, 0xca, }, 5, 0, "", "",
+"c4 e2 7d 13 ca \tvcvtph2ps %xmm2,%ymm1",},
+{{0xc4, 0xe2, 0x7d, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 7d 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x13, 0xca, }, 6, 0, "", "",
+"62 f6 7d 48 13 ca \tvcvtph2psx %ymm2,%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 48 13 8c c8 78 56 34 12 \tvcvtph2psx 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x13, 0xca, }, 6, 0, "", "",
+"62 f6 7d 08 13 ca \tvcvtph2psx %xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 08 13 8c c8 78 56 34 12 \tvcvtph2psx 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x13, 0xca, }, 6, 0, "", "",
+"62 f6 7d 28 13 ca \tvcvtph2psx %xmm2,%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 28 13 8c c8 78 56 34 12 \tvcvtph2psx 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 7b ca \tvcvtph2qq %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 7b 8c c8 78 56 34 12 \tvcvtph2qq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 7b ca \tvcvtph2qq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7b 8c c8 78 56 34 12 \tvcvtph2qq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 7b ca \tvcvtph2qq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 7b 8c c8 78 56 34 12 \tvcvtph2qq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 79 ca \tvcvtph2udq %ymm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 79 8c c8 78 56 34 12 \tvcvtph2udq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 79 ca \tvcvtph2udq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 79 8c c8 78 56 34 12 \tvcvtph2udq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 79 ca \tvcvtph2udq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 79 8c c8 78 56 34 12 \tvcvtph2udq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 79 ca \tvcvtph2uqq %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 79 8c c8 78 56 34 12 \tvcvtph2uqq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 79 ca \tvcvtph2uqq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 79 8c c8 78 56 34 12 \tvcvtph2uqq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 79 ca \tvcvtph2uqq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 79 8c c8 78 56 34 12 \tvcvtph2uqq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 7d ca \tvcvtph2uw %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 7d 8c c8 78 56 34 12 \tvcvtph2uw 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 7d ca \tvcvtph2uw %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 7d 8c c8 78 56 34 12 \tvcvtph2uw 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 7d ca \tvcvtph2uw %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 7d 8c c8 78 56 34 12 \tvcvtph2uw 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 7d ca \tvcvtph2w %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 7d 8c c8 78 56 34 12 \tvcvtph2w 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 7d ca \tvcvtph2w %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7d 8c c8 78 56 34 12 \tvcvtph2w 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 7d ca \tvcvtph2w %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 7d 8c c8 78 56 34 12 \tvcvtph2w 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf3, 0x7d, 0x48, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7d 48 1d 8c c8 78 56 34 12 12 \tvcvtps2ph $0x12,%zmm1,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf3, 0x7d, 0x48, 0x1d, 0xd1, 0x12, }, 7, 0, "", "",
+"62 f3 7d 48 1d d1 12 \tvcvtps2ph $0x12,%zmm2,%ymm1",},
+{{0xc4, 0xe3, 0x7d, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 11, 0, "", "",
+"c4 e3 7d 1d 8c c8 78 56 34 12 12 \tvcvtps2ph $0x12,%ymm1,0x12345678(%eax,%ecx,8)",},
+{{0xc4, 0xe3, 0x79, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 11, 0, "", "",
+"c4 e3 79 1d 8c c8 78 56 34 12 12 \tvcvtps2ph $0x12,%xmm1,0x12345678(%eax,%ecx,8)",},
+{{0xc4, 0xe3, 0x79, 0x1d, 0xd1, 0x12, }, 6, 0, "", "",
+"c4 e3 79 1d d1 12 \tvcvtps2ph $0x12,%xmm2,%xmm1",},
+{{0xc4, 0xe3, 0x7d, 0x1d, 0xd1, 0x12, }, 6, 0, "", "",
+"c4 e3 7d 1d d1 12 \tvcvtps2ph $0x12,%ymm2,%xmm1",},
+{{0xc4, 0xe3, 0x7d, 0x1d, 0xd1, 0x12, }, 6, 0, "", "",
+"c4 e3 7d 1d d1 12 \tvcvtps2ph $0x12,%ymm2,%xmm1",},
+{{0xc4, 0xe3, 0x7d, 0x1d, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 11, 0, "", "",
+"c4 e3 7d 1d 94 c8 78 56 34 12 12 \tvcvtps2ph $0x12,%ymm2,0x12345678(%eax,%ecx,8)",},
+{{0xc4, 0xe3, 0x79, 0x1d, 0xd1, 0x12, }, 6, 0, "", "",
+"c4 e3 79 1d d1 12 \tvcvtps2ph $0x12,%xmm2,%xmm1",},
+{{0xc4, 0xe3, 0x79, 0x1d, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 11, 0, "", "",
+"c4 e3 79 1d 94 c8 78 56 34 12 12 \tvcvtps2ph $0x12,%xmm2,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x1d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 1d ca \tvcvtps2phx %zmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 1d 8c c8 78 56 34 12 \tvcvtps2phx 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x1d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 1d ca \tvcvtps2phx %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x1d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 1d ca \tvcvtps2phx %ymm2,%xmm1",},
+{{0x62, 0xf5, 0xfc, 0x48, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 fc 48 5b ca \tvcvtqq2ph %zmm2,%xmm1",},
+{{0x62, 0xf5, 0xfc, 0x08, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 fc 08 5b ca \tvcvtqq2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0xfc, 0x28, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 fc 28 5b ca \tvcvtqq2ph %ymm2,%xmm1",},
+{{0x62, 0xf5, 0xef, 0x08, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 ef 08 5a 8c c8 78 56 34 12 \tvcvtsd2sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 5a 8c c8 78 56 34 12 \tvcvtsh2sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x2d, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 2d 84 c8 78 56 34 12 \tvcvtsh2si 0x12345678(%eax,%ecx,8),%eax",},
+{{0x62, 0xf6, 0x6c, 0x08, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6c 08 13 8c c8 78 56 34 12 \tvcvtsh2ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x79, 0xc1, }, 6, 0, "", "",
+"62 f5 7e 08 79 c1 \tvcvtsh2usi %xmm1,%eax",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x79, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 79 84 c8 78 56 34 12 \tvcvtsh2usi 0x12345678(%eax,%ecx,8),%eax",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x2a, 0xc8, }, 6, 0, "", "",
+"62 f5 6e 08 2a c8 \tvcvtsi2sh %eax,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x2a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 2a 8c c8 78 56 34 12 \tvcvtsi2sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x2a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 2a 8c c8 78 56 34 12 \tvcvtsi2sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x1d, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 1d cb \tvcvtss2sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 1d 8c c8 78 56 34 12 \tvcvtss2sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x48, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7e 48 5b ca \tvcvttph2dq %ymm2,%zmm1",},
+{{0x62, 0xf5, 0x7e, 0x48, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 48 5b 8c c8 78 56 34 12 \tvcvttph2dq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7e 08 5b ca \tvcvttph2dq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 5b 8c c8 78 56 34 12 \tvcvttph2dq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x28, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7e 28 5b ca \tvcvttph2dq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7e, 0x28, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 28 5b 8c c8 78 56 34 12 \tvcvttph2dq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 7a ca \tvcvttph2qq %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 7a 8c c8 78 56 34 12 \tvcvttph2qq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 7a ca \tvcvttph2qq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7a 8c c8 78 56 34 12 \tvcvttph2qq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 7a ca \tvcvttph2qq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 7a 8c c8 78 56 34 12 \tvcvttph2qq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 78 ca \tvcvttph2udq %ymm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 78 8c c8 78 56 34 12 \tvcvttph2udq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 78 ca \tvcvttph2udq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 78 8c c8 78 56 34 12 \tvcvttph2udq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 78 ca \tvcvttph2udq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 78 8c c8 78 56 34 12 \tvcvttph2udq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 78 ca \tvcvttph2uqq %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 78 8c c8 78 56 34 12 \tvcvttph2uqq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 78 ca \tvcvttph2uqq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 78 8c c8 78 56 34 12 \tvcvttph2uqq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 78 ca \tvcvttph2uqq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 78 8c c8 78 56 34 12 \tvcvttph2uqq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 7c ca \tvcvttph2uw %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 7c 8c c8 78 56 34 12 \tvcvttph2uw 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 7c ca \tvcvttph2uw %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 7c 8c c8 78 56 34 12 \tvcvttph2uw 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 7c ca \tvcvttph2uw %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 7c 8c c8 78 56 34 12 \tvcvttph2uw 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 7c ca \tvcvttph2w %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 7c 8c c8 78 56 34 12 \tvcvttph2w 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 7c ca \tvcvttph2w %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7c 8c c8 78 56 34 12 \tvcvttph2w 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 7c ca \tvcvttph2w %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 7c 8c c8 78 56 34 12 \tvcvttph2w 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x2c, 0xc1, }, 6, 0, "", "",
+"62 f5 7e 08 2c c1 \tvcvttsh2si %xmm1,%eax",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x2c, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 2c 84 c8 78 56 34 12 \tvcvttsh2si 0x12345678(%eax,%ecx,8),%eax",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x78, 0xc1, }, 6, 0, "", "",
+"62 f5 7e 08 78 c1 \tvcvttsh2usi %xmm1,%eax",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x78, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 78 84 c8 78 56 34 12 \tvcvttsh2usi 0x12345678(%eax,%ecx,8),%eax",},
+{{0x62, 0xf5, 0x7f, 0x48, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7f 48 7a ca \tvcvtudq2ph %zmm2,%ymm1",},
+{{0x62, 0xf5, 0x7f, 0x48, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7f 48 7a 8c c8 78 56 34 12 \tvcvtudq2ph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7f, 0x08, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7f 08 7a ca \tvcvtudq2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7f, 0x28, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7f 28 7a ca \tvcvtudq2ph %ymm2,%xmm1",},
+{{0x62, 0xf5, 0xff, 0x48, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 ff 48 7a ca \tvcvtuqq2ph %zmm2,%xmm1",},
+{{0x62, 0xf5, 0xff, 0x08, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 ff 08 7a ca \tvcvtuqq2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0xff, 0x28, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 ff 28 7a ca \tvcvtuqq2ph %ymm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x7b, 0xc8, }, 6, 0, "", "",
+"62 f5 6e 08 7b c8 \tvcvtusi2sh %eax,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 7b 8c c8 78 56 34 12 \tvcvtusi2sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 7b 8c c8 78 56 34 12 \tvcvtusi2sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7f, 0x48, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7f 48 7d ca \tvcvtuw2ph %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7f, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7f 48 7d 8c c8 78 56 34 12 \tvcvtuw2ph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7f, 0x08, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7f 08 7d ca \tvcvtuw2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7f, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7f 08 7d 8c c8 78 56 34 12 \tvcvtuw2ph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7f, 0x28, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7f 28 7d ca \tvcvtuw2ph %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7f, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7f 28 7d 8c c8 78 56 34 12 \tvcvtuw2ph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7e, 0x48, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7e 48 7d ca \tvcvtw2ph %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7e, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 48 7d 8c c8 78 56 34 12 \tvcvtw2ph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7e 08 7d ca \tvcvtw2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 7d 8c c8 78 56 34 12 \tvcvtw2ph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x28, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7e 28 7d ca \tvcvtw2ph %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7e, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 28 7d 8c c8 78 56 34 12 \tvcvtw2ph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5e, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 5e cb \tvdivph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 5e 8c c8 78 56 34 12 \tvdivph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5e, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 5e cb \tvdivph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 5e 8c c8 78 56 34 12 \tvdivph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5e, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 5e cb \tvdivph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 5e 8c c8 78 56 34 12 \tvdivph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5e, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 5e cb \tvdivsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 5e 8c c8 78 56 34 12 \tvdivsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x48, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 48 56 cb \tvfcmaddcph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6f, 0x48, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 48 56 8c c8 78 56 34 12 \tvfcmaddcph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 08 56 cb \tvfcmaddcph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 08 56 8c c8 78 56 34 12 \tvfcmaddcph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x28, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 28 56 cb \tvfcmaddcph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6f, 0x28, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 28 56 8c c8 78 56 34 12 \tvfcmaddcph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0x57, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 08 57 cb \tvfcmaddcsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0x57, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 08 57 8c c8 78 56 34 12 \tvfcmaddcsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x48, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 48 d6 cb \tvfcmulcph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6f, 0x48, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 48 d6 8c c8 78 56 34 12 \tvfcmulcph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 08 d6 cb \tvfcmulcph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 08 d6 8c c8 78 56 34 12 \tvfcmulcph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x28, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 28 d6 cb \tvfcmulcph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6f, 0x28, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 28 d6 8c c8 78 56 34 12 \tvfcmulcph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0xd7, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 08 d7 cb \tvfcmulcsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0xd7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 08 d7 8c c8 78 56 34 12 \tvfcmulcsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x98, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 98 cb \tvfmadd132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x98, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 98 8c c8 78 56 34 12 \tvfmadd132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x98, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 98 cb \tvfmadd132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x98, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 98 8c c8 78 56 34 12 \tvfmadd132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x98, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 98 cb \tvfmadd132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x98, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 98 8c c8 78 56 34 12 \tvfmadd132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x99, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 99 cb \tvfmadd132sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x99, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 99 8c c8 78 56 34 12 \tvfmadd132sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 a8 cb \tvfmadd213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 a8 8c c8 78 56 34 12 \tvfmadd213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 a8 cb \tvfmadd213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 a8 8c c8 78 56 34 12 \tvfmadd213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 a8 cb \tvfmadd213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 a8 8c c8 78 56 34 12 \tvfmadd213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa9, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 a9 cb \tvfmadd213sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa9, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 a9 8c c8 78 56 34 12 \tvfmadd213sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 b8 cb \tvfmadd231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 b8 8c c8 78 56 34 12 \tvfmadd231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 b8 cb \tvfmadd231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 b8 8c c8 78 56 34 12 \tvfmadd231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 b8 cb \tvfmadd231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 b8 8c c8 78 56 34 12 \tvfmadd231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb9, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 b9 cb \tvfmadd231sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb9, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 b9 8c c8 78 56 34 12 \tvfmadd231sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x48, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 48 56 cb \tvfmaddcph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6e, 0x48, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 48 56 8c c8 78 56 34 12 \tvfmaddcph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 08 56 cb \tvfmaddcph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 08 56 8c c8 78 56 34 12 \tvfmaddcph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x28, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 28 56 cb \tvfmaddcph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x28, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 28 56 8c c8 78 56 34 12 \tvfmaddcph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0x57, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 08 57 cb \tvfmaddcsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0x57, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 08 57 8c c8 78 56 34 12 \tvfmaddcsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x96, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 96 cb \tvfmaddsub132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x96, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 96 8c c8 78 56 34 12 \tvfmaddsub132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x96, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 96 cb \tvfmaddsub132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x96, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 96 8c c8 78 56 34 12 \tvfmaddsub132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x96, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 96 cb \tvfmaddsub132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x96, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 96 8c c8 78 56 34 12 \tvfmaddsub132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 a6 cb \tvfmaddsub213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 a6 8c c8 78 56 34 12 \tvfmaddsub213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 a6 cb \tvfmaddsub213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 a6 8c c8 78 56 34 12 \tvfmaddsub213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 a6 cb \tvfmaddsub213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 a6 8c c8 78 56 34 12 \tvfmaddsub213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 b6 cb \tvfmaddsub231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 b6 8c c8 78 56 34 12 \tvfmaddsub231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 b6 cb \tvfmaddsub231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 b6 8c c8 78 56 34 12 \tvfmaddsub231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 b6 cb \tvfmaddsub231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 b6 8c c8 78 56 34 12 \tvfmaddsub231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9a, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 9a cb \tvfmsub132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 9a 8c c8 78 56 34 12 \tvfmsub132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9a, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9a cb \tvfmsub132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9a 8c c8 78 56 34 12 \tvfmsub132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9a, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 9a cb \tvfmsub132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 9a 8c c8 78 56 34 12 \tvfmsub132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9b, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9b cb \tvfmsub132sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9b 8c c8 78 56 34 12 \tvfmsub132sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xaa, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 aa cb \tvfmsub213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xaa, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 aa 8c c8 78 56 34 12 \tvfmsub213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xaa, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 aa cb \tvfmsub213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xaa, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 aa 8c c8 78 56 34 12 \tvfmsub213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xaa, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 aa cb \tvfmsub213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xaa, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 aa 8c c8 78 56 34 12 \tvfmsub213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xab, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ab cb \tvfmsub213sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xab, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ab 8c c8 78 56 34 12 \tvfmsub213sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xba, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 ba cb \tvfmsub231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xba, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 ba 8c c8 78 56 34 12 \tvfmsub231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xba, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ba cb \tvfmsub231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xba, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ba 8c c8 78 56 34 12 \tvfmsub231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xba, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 ba cb \tvfmsub231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xba, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 ba 8c c8 78 56 34 12 \tvfmsub231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbb, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 bb cb \tvfmsub231sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbb, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 bb 8c c8 78 56 34 12 \tvfmsub231sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x97, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 97 cb \tvfmsubadd132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x97, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 97 8c c8 78 56 34 12 \tvfmsubadd132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x97, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 97 cb \tvfmsubadd132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x97, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 97 8c c8 78 56 34 12 \tvfmsubadd132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x97, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 97 cb \tvfmsubadd132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x97, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 97 8c c8 78 56 34 12 \tvfmsubadd132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 a7 cb \tvfmsubadd213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 a7 8c c8 78 56 34 12 \tvfmsubadd213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 a7 cb \tvfmsubadd213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 a7 8c c8 78 56 34 12 \tvfmsubadd213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 a7 cb \tvfmsubadd213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 a7 8c c8 78 56 34 12 \tvfmsubadd213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 b7 cb \tvfmsubadd231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 b7 8c c8 78 56 34 12 \tvfmsubadd231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 b7 cb \tvfmsubadd231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 b7 8c c8 78 56 34 12 \tvfmsubadd231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 b7 cb \tvfmsubadd231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 b7 8c c8 78 56 34 12 \tvfmsubadd231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x48, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 48 d6 cb \tvfmulcph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6e, 0x48, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 48 d6 8c c8 78 56 34 12 \tvfmulcph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 08 d6 cb \tvfmulcph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 08 d6 8c c8 78 56 34 12 \tvfmulcph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x28, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 28 d6 cb \tvfmulcph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x28, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 28 d6 8c c8 78 56 34 12 \tvfmulcph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0xd7, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 08 d7 cb \tvfmulcsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0xd7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 08 d7 8c c8 78 56 34 12 \tvfmulcsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 9c cb \tvfnmadd132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 9c 8c c8 78 56 34 12 \tvfnmadd132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9c cb \tvfnmadd132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9c 8c c8 78 56 34 12 \tvfnmadd132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 9c cb \tvfnmadd132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 9c 8c c8 78 56 34 12 \tvfnmadd132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9d, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9d cb \tvfnmadd132sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9d 8c c8 78 56 34 12 \tvfnmadd132sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xac, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 ac cb \tvfnmadd213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xac, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 ac 8c c8 78 56 34 12 \tvfnmadd213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xac, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ac cb \tvfnmadd213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xac, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ac 8c c8 78 56 34 12 \tvfnmadd213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xac, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 ac cb \tvfnmadd213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xac, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 ac 8c c8 78 56 34 12 \tvfnmadd213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xad, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ad cb \tvfnmadd213sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xad, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ad 8c c8 78 56 34 12 \tvfnmadd213sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xbc, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 bc cb \tvfnmadd231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xbc, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 bc 8c c8 78 56 34 12 \tvfnmadd231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbc, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 bc cb \tvfnmadd231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbc, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 bc 8c c8 78 56 34 12 \tvfnmadd231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xbc, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 bc cb \tvfnmadd231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xbc, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 bc 8c c8 78 56 34 12 \tvfnmadd231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbd, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 bd cb \tvfnmadd231sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbd, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 bd 8c c8 78 56 34 12 \tvfnmadd231sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9e, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 9e cb \tvfnmsub132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 9e 8c c8 78 56 34 12 \tvfnmsub132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9e, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9e cb \tvfnmsub132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9e 8c c8 78 56 34 12 \tvfnmsub132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9e, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 9e cb \tvfnmsub132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 9e 8c c8 78 56 34 12 \tvfnmsub132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9f, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9f cb \tvfnmsub132sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9f 8c c8 78 56 34 12 \tvfnmsub132sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xae, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 ae cb \tvfnmsub213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xae, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 ae 8c c8 78 56 34 12 \tvfnmsub213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xae, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ae cb \tvfnmsub213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xae, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ae 8c c8 78 56 34 12 \tvfnmsub213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xae, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 ae cb \tvfnmsub213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xae, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 ae 8c c8 78 56 34 12 \tvfnmsub213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xaf, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 af cb \tvfnmsub213sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xaf, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 af 8c c8 78 56 34 12 \tvfnmsub213sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xbe, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 be cb \tvfnmsub231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xbe, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 be 8c c8 78 56 34 12 \tvfnmsub231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbe, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 be cb \tvfnmsub231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbe, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 be 8c c8 78 56 34 12 \tvfnmsub231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xbe, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 be cb \tvfnmsub231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xbe, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 be 8c c8 78 56 34 12 \tvfnmsub231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbf, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 bf cb \tvfnmsub231sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbf, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 bf 8c c8 78 56 34 12 \tvfnmsub231sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x66, 0xe9, 0x12, }, 7, 0, "", "",
+"62 f3 7c 48 66 e9 12 \tvfpclassph $0x12,%zmm1,%k5",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x66, 0xe9, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 66 e9 12 \tvfpclassph $0x12,%xmm1,%k5",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x66, 0xe9, 0x12, }, 7, 0, "", "",
+"62 f3 7c 28 66 e9 12 \tvfpclassph $0x12,%ymm1,%k5",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x67, 0xe9, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 67 e9 12 \tvfpclasssh $0x12,%xmm1,%k5",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x67, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 08 67 ac c8 78 56 34 12 12 \tvfpclasssh $0x12,0x12345678(%eax,%ecx,8),%k5",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x42, 0xca, }, 6, 0, "", "",
+"62 f6 7d 48 42 ca \tvgetexpph %zmm2,%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x42, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 48 42 8c c8 78 56 34 12 \tvgetexpph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x42, 0xca, }, 6, 0, "", "",
+"62 f6 7d 08 42 ca \tvgetexpph %xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x42, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 08 42 8c c8 78 56 34 12 \tvgetexpph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x42, 0xca, }, 6, 0, "", "",
+"62 f6 7d 28 42 ca \tvgetexpph %ymm2,%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x42, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 28 42 8c c8 78 56 34 12 \tvgetexpph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x43, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 43 cb \tvgetexpsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x43, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 43 8c c8 78 56 34 12 \tvgetexpsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x26, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 48 26 ca 12 \tvgetmantph $0x12,%zmm2,%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x26, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 48 26 8c c8 78 56 34 12 12 \tvgetmantph $0x12,0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x26, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 26 ca 12 \tvgetmantph $0x12,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x26, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 08 26 8c c8 78 56 34 12 12 \tvgetmantph $0x12,0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x26, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 28 26 ca 12 \tvgetmantph $0x12,%ymm2,%ymm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x26, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 28 26 8c c8 78 56 34 12 12 \tvgetmantph $0x12,0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x27, 0xcb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 08 27 cb 12 \tvgetmantsh $0x12,%xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x27, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 08 27 8c c8 78 56 34 12 12 \tvgetmantsh $0x12,0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5f, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 5f cb \tvmaxph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 5f 8c c8 78 56 34 12 \tvmaxph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5f, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 5f cb \tvmaxph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 5f 8c c8 78 56 34 12 \tvmaxph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5f, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 5f cb \tvmaxph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 5f 8c c8 78 56 34 12 \tvmaxph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5f, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 5f cb \tvmaxsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 5f 8c c8 78 56 34 12 \tvmaxsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5d, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 5d cb \tvminph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 5d 8c c8 78 56 34 12 \tvminph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5d, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 5d cb \tvminph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 5d 8c c8 78 56 34 12 \tvminph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5d, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 5d cb \tvminph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 5d 8c c8 78 56 34 12 \tvminph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5d, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 5d cb \tvminsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 5d 8c c8 78 56 34 12 \tvminsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x11, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 11 8c c8 78 56 34 12 \tvmovsh %xmm1,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x10, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 10 8c c8 78 56 34 12 \tvmovsh 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x10, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 10 cb \tvmovsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7e, 0xc8, }, 6, 0, "", "",
+"62 f5 7d 08 7e c8 \tvmovw %xmm1,%eax",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7e 8c c8 78 56 34 12 \tvmovw %xmm1,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x6e, 0xc8, }, 6, 0, "", "",
+"62 f5 7d 08 6e c8 \tvmovw %eax,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x6e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 6e 8c c8 78 56 34 12 \tvmovw 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x59, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 59 cb \tvmulph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 59 8c c8 78 56 34 12 \tvmulph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x59, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 59 cb \tvmulph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 59 8c c8 78 56 34 12 \tvmulph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x59, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 59 cb \tvmulph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 59 8c c8 78 56 34 12 \tvmulph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x59, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 59 cb \tvmulsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 59 8c c8 78 56 34 12 \tvmulsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x4c, 0xca, }, 6, 0, "", "",
+"62 f6 7d 48 4c ca \tvrcpph %zmm2,%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x4c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 48 4c 8c c8 78 56 34 12 \tvrcpph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x4c, 0xca, }, 6, 0, "", "",
+"62 f6 7d 08 4c ca \tvrcpph %xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x4c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 08 4c 8c c8 78 56 34 12 \tvrcpph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x4c, 0xca, }, 6, 0, "", "",
+"62 f6 7d 28 4c ca \tvrcpph %ymm2,%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x4c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 28 4c 8c c8 78 56 34 12 \tvrcpph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x4d, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 4d cb \tvrcpsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x4d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 4d 8c c8 78 56 34 12 \tvrcpsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x56, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 48 56 ca 12 \tvreduceph $0x12,%zmm2,%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 48 56 8c c8 78 56 34 12 12 \tvreduceph $0x12,0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x56, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 56 ca 12 \tvreduceph $0x12,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 08 56 8c c8 78 56 34 12 12 \tvreduceph $0x12,0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x56, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 28 56 ca 12 \tvreduceph $0x12,%ymm2,%ymm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 28 56 8c c8 78 56 34 12 12 \tvreduceph $0x12,0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x57, 0xcb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 08 57 cb 12 \tvreducesh $0x12,%xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x57, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 08 57 8c c8 78 56 34 12 12 \tvreducesh $0x12,0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x08, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 48 08 ca 12 \tvrndscaleph $0x12,%zmm2,%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x08, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 48 08 8c c8 78 56 34 12 12 \tvrndscaleph $0x12,0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x08, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 08 ca 12 \tvrndscaleph $0x12,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x08, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 08 08 8c c8 78 56 34 12 12 \tvrndscaleph $0x12,0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x08, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 28 08 ca 12 \tvrndscaleph $0x12,%ymm2,%ymm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x08, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 28 08 8c c8 78 56 34 12 12 \tvrndscaleph $0x12,0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x0a, 0xcb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 08 0a cb 12 \tvrndscalesh $0x12,%xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x0a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 08 0a 8c c8 78 56 34 12 12 \tvrndscalesh $0x12,0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x4e, 0xca, }, 6, 0, "", "",
+"62 f6 7d 48 4e ca \tvrsqrtph %zmm2,%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x4e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 48 4e 8c c8 78 56 34 12 \tvrsqrtph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x4e, 0xca, }, 6, 0, "", "",
+"62 f6 7d 08 4e ca \tvrsqrtph %xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x4e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 08 4e 8c c8 78 56 34 12 \tvrsqrtph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x4e, 0xca, }, 6, 0, "", "",
+"62 f6 7d 28 4e ca \tvrsqrtph %ymm2,%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x4e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 28 4e 8c c8 78 56 34 12 \tvrsqrtph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x4f, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 4f cb \tvrsqrtsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x4f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 4f 8c c8 78 56 34 12 \tvrsqrtsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x2c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 2c cb \tvscalefph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x2c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 2c 8c c8 78 56 34 12 \tvscalefph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x2c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 2c cb \tvscalefph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x2c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 2c 8c c8 78 56 34 12 \tvscalefph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x2c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 2c cb \tvscalefph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x2c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 2c 8c c8 78 56 34 12 \tvscalefph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x2d, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 2d cb \tvscalefsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x2d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 2d 8c c8 78 56 34 12 \tvscalefsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x51, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 51 ca \tvsqrtph %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 51 8c c8 78 56 34 12 \tvsqrtph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x51, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 51 ca \tvsqrtph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 51 8c c8 78 56 34 12 \tvsqrtph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x51, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 51 ca \tvsqrtph %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 51 8c c8 78 56 34 12 \tvsqrtph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x51, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 51 cb \tvsqrtsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 51 8c c8 78 56 34 12 \tvsqrtsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5c, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 5c cb \tvsubph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 5c 8c c8 78 56 34 12 \tvsubph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5c, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 5c cb \tvsubph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 5c 8c c8 78 56 34 12 \tvsubph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5c, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 5c cb \tvsubph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 5c 8c c8 78 56 34 12 \tvsubph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5c, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 5c cb \tvsubsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 5c 8c c8 78 56 34 12 \tvsubsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x2e, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 2e ca \tvucomish %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x2e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 2e 8c c8 78 56 34 12 \tvucomish 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0xf3, 0x0f, 0x3a, 0xf0, 0xc0, 0x00, }, 6, 0, "", "",
+"f3 0f 3a f0 c0 00 \threset $0x0",},
+{{0x0f, 0x01, 0xe8, }, 3, 0, "", "",
+"0f 01 e8 \tserialize ",},
+{{0xf2, 0x0f, 0x01, 0xe9, }, 4, 0, "", "",
+"f2 0f 01 e9 \txresldtrk ",},
+{{0xf2, 0x0f, 0x01, 0xe8, }, 4, 0, "", "",
+"f2 0f 01 e8 \txsusldtrk ",},
{{0x0f, 0x01, 0xcf, }, 3, 0, "", "",
"0f 01 cf \tencls ",},
{{0x0f, 0x01, 0xd7, }, 3, 0, "", "",
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-64.c b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
index 5da17d41d302..3a47e98fec33 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-64.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-64.c
@@ -2459,6 +2459,1432 @@
"3e f2 ff a4 c8 78 56 34 12 \tnotrack bnd jmpq *0x12345678(%rax,%rcx,8)",},
{{0x3e, 0xf2, 0x41, 0xff, 0xa4, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "jmp", "indirect",
"3e f2 41 ff a4 c8 78 56 34 12 \tnotrack bnd jmpq *0x12345678(%r8,%rcx,8)",},
+{{0xc4, 0xe2, 0x78, 0x49, 0x04, 0xc8, }, 6, 0, "", "",
+"c4 e2 78 49 04 c8 \tldtilecfg (%rax,%rcx,8)",},
+{{0xc4, 0xc2, 0x78, 0x49, 0x04, 0xc8, }, 6, 0, "", "",
+"c4 c2 78 49 04 c8 \tldtilecfg (%r8,%rcx,8)",},
+{{0xc4, 0xe2, 0x79, 0x49, 0x04, 0xc8, }, 6, 0, "", "",
+"c4 e2 79 49 04 c8 \tsttilecfg (%rax,%rcx,8)",},
+{{0xc4, 0xc2, 0x79, 0x49, 0x04, 0xc8, }, 6, 0, "", "",
+"c4 c2 79 49 04 c8 \tsttilecfg (%r8,%rcx,8)",},
+{{0xc4, 0xe2, 0x7a, 0x5c, 0xd1, }, 5, 0, "", "",
+"c4 e2 7a 5c d1 \ttdpbf16ps %tmm0,%tmm1,%tmm2",},
+{{0xc4, 0xe2, 0x7b, 0x5e, 0xd1, }, 5, 0, "", "",
+"c4 e2 7b 5e d1 \ttdpbssd %tmm0,%tmm1,%tmm2",},
+{{0xc4, 0xe2, 0x7a, 0x5e, 0xd1, }, 5, 0, "", "",
+"c4 e2 7a 5e d1 \ttdpbsud %tmm0,%tmm1,%tmm2",},
+{{0xc4, 0xe2, 0x79, 0x5e, 0xd1, }, 5, 0, "", "",
+"c4 e2 79 5e d1 \ttdpbusd %tmm0,%tmm1,%tmm2",},
+{{0xc4, 0xe2, 0x78, 0x5e, 0xd1, }, 5, 0, "", "",
+"c4 e2 78 5e d1 \ttdpbuud %tmm0,%tmm1,%tmm2",},
+{{0xc4, 0xe2, 0x7b, 0x4b, 0x0c, 0xc8, }, 6, 0, "", "",
+"c4 e2 7b 4b 0c c8 \ttileloadd (%rax,%rcx,8),%tmm1",},
+{{0xc4, 0xc2, 0x7b, 0x4b, 0x14, 0xc8, }, 6, 0, "", "",
+"c4 c2 7b 4b 14 c8 \ttileloadd (%r8,%rcx,8),%tmm2",},
+{{0xc4, 0xe2, 0x79, 0x4b, 0x0c, 0xc8, }, 6, 0, "", "",
+"c4 e2 79 4b 0c c8 \ttileloaddt1 (%rax,%rcx,8),%tmm1",},
+{{0xc4, 0xc2, 0x79, 0x4b, 0x14, 0xc8, }, 6, 0, "", "",
+"c4 c2 79 4b 14 c8 \ttileloaddt1 (%r8,%rcx,8),%tmm2",},
+{{0xc4, 0xe2, 0x78, 0x49, 0xc0, }, 5, 0, "", "",
+"c4 e2 78 49 c0 \ttilerelease ",},
+{{0xc4, 0xe2, 0x7a, 0x4b, 0x0c, 0xc8, }, 6, 0, "", "",
+"c4 e2 7a 4b 0c c8 \ttilestored %tmm1,(%rax,%rcx,8)",},
+{{0xc4, 0xc2, 0x7a, 0x4b, 0x14, 0xc8, }, 6, 0, "", "",
+"c4 c2 7a 4b 14 c8 \ttilestored %tmm2,(%r8,%rcx,8)",},
+{{0xc4, 0xe2, 0x7b, 0x49, 0xc0, }, 5, 0, "", "",
+"c4 e2 7b 49 c0 \ttilezero %tmm0",},
+{{0xc4, 0xe2, 0x7b, 0x49, 0xf8, }, 5, 0, "", "",
+"c4 e2 7b 49 f8 \ttilezero %tmm7",},
+{{0xf3, 0x0f, 0x01, 0xee, }, 4, 0, "", "",
+"f3 0f 01 ee \tclui ",},
+{{0xf3, 0x0f, 0xc7, 0xf0, }, 4, 0, "", "",
+"f3 0f c7 f0 \tsenduipi %rax",},
+{{0xf3, 0x41, 0x0f, 0xc7, 0xf0, }, 5, 0, "", "",
+"f3 41 0f c7 f0 \tsenduipi %r8",},
+{{0xf3, 0x0f, 0x01, 0xef, }, 4, 0, "", "",
+"f3 0f 01 ef \tstui ",},
+{{0xf3, 0x0f, 0x01, 0xed, }, 4, 0, "", "",
+"f3 0f 01 ed \ttestui ",},
+{{0xf3, 0x0f, 0x01, 0xec, }, 4, 0, "", "",
+"f3 0f 01 ec \tuiret ",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x58, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 58 cb \tvaddph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 58 8c c8 78 56 34 12 \tvaddph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x48, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 48 58 8c c8 78 56 34 12 \tvaddph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x58, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 58 cb \tvaddph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 58 8c c8 78 56 34 12 \tvaddph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x08, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 08 58 8c c8 78 56 34 12 \tvaddph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x58, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 58 cb \tvaddph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 58 8c c8 78 56 34 12 \tvaddph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x28, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 28 58 8c c8 78 56 34 12 \tvaddph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x58, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 58 cb \tvaddsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 58 8c c8 78 56 34 12 \tvaddsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x58, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 58 8c c8 78 56 34 12 \tvaddsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x6c, 0x48, 0xc2, 0xeb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 48 c2 eb 12 \tvcmple_oqph %zmm3,%zmm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x48, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 48 c2 ac c8 78 56 34 12 12 \tvcmple_oqph 0x12345678(%rax,%rcx,8),%zmm2,%k5",},
+{{0x67, 0x62, 0xf3, 0x6c, 0x48, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 6c 48 c2 ac c8 78 56 34 12 12 \tvcmple_oqph 0x12345678(%eax,%ecx,8),%zmm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x08, 0xc2, 0xeb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 08 c2 eb 12 \tvcmple_oqph %xmm3,%xmm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x08, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 08 c2 ac c8 78 56 34 12 12 \tvcmple_oqph 0x12345678(%rax,%rcx,8),%xmm2,%k5",},
+{{0x67, 0x62, 0xf3, 0x6c, 0x08, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 6c 08 c2 ac c8 78 56 34 12 12 \tvcmple_oqph 0x12345678(%eax,%ecx,8),%xmm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x28, 0xc2, 0xeb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 28 c2 eb 12 \tvcmple_oqph %ymm3,%ymm2,%k5",},
+{{0x62, 0xf3, 0x6c, 0x28, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 28 c2 ac c8 78 56 34 12 12 \tvcmple_oqph 0x12345678(%rax,%rcx,8),%ymm2,%k5",},
+{{0x67, 0x62, 0xf3, 0x6c, 0x28, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 6c 28 c2 ac c8 78 56 34 12 12 \tvcmple_oqph 0x12345678(%eax,%ecx,8),%ymm2,%k5",},
+{{0x62, 0xf3, 0x6e, 0x08, 0xc2, 0xeb, 0x12, }, 7, 0, "", "",
+"62 f3 6e 08 c2 eb 12 \tvcmple_oqsh %xmm3,%xmm2,%k5",},
+{{0x62, 0xf3, 0x6e, 0x08, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6e 08 c2 ac c8 78 56 34 12 12 \tvcmple_oqsh 0x12345678(%rax,%rcx,8),%xmm2,%k5",},
+{{0x67, 0x62, 0xf3, 0x6e, 0x08, 0xc2, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 6e 08 c2 ac c8 78 56 34 12 12 \tvcmple_oqsh 0x12345678(%eax,%ecx,8),%xmm2,%k5",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x2f, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 2f ca \tvcomish %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x2f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 2f 8c c8 78 56 34 12 \tvcomish 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x08, 0x2f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 08 2f 8c c8 78 56 34 12 \tvcomish 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 5b ca \tvcvtdq2ph %zmm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 5b 8c c8 78 56 34 12 \tvcvtdq2ph 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x48, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 48 5b 8c c8 78 56 34 12 \tvcvtdq2ph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 5b ca \tvcvtdq2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 5b ca \tvcvtdq2ph %ymm2,%xmm1",},
+{{0x62, 0xf5, 0xfd, 0x48, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 fd 48 5a ca \tvcvtpd2ph %zmm2,%xmm1",},
+{{0x62, 0xf5, 0xfd, 0x08, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 fd 08 5a ca \tvcvtpd2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0xfd, 0x28, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 fd 28 5a ca \tvcvtpd2ph %ymm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 5b ca \tvcvtph2dq %ymm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 5b 8c c8 78 56 34 12 \tvcvtph2dq 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x48, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 48 5b 8c c8 78 56 34 12 \tvcvtph2dq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 5b ca \tvcvtph2dq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 5b 8c c8 78 56 34 12 \tvcvtph2dq 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x08, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 08 5b 8c c8 78 56 34 12 \tvcvtph2dq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 5b ca \tvcvtph2dq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 5b 8c c8 78 56 34 12 \tvcvtph2dq 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x28, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 28 5b 8c c8 78 56 34 12 \tvcvtph2dq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 5a ca \tvcvtph2pd %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 5a 8c c8 78 56 34 12 \tvcvtph2pd 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x48, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 48 5a 8c c8 78 56 34 12 \tvcvtph2pd 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 5a ca \tvcvtph2pd %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 5a 8c c8 78 56 34 12 \tvcvtph2pd 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x08, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 08 5a 8c c8 78 56 34 12 \tvcvtph2pd 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x5a, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 5a ca \tvcvtph2pd %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 5a 8c c8 78 56 34 12 \tvcvtph2pd 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x28, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 28 5a 8c c8 78 56 34 12 \tvcvtph2pd 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x13, 0xca, }, 6, 0, "", "",
+"62 f2 7d 48 13 ca \tvcvtph2ps %ymm2,%zmm1",},
+{{0x62, 0xf2, 0x7d, 0x48, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f2 7d 48 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf2, 0x7d, 0x48, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f2 7d 48 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0xc4, 0xe2, 0x79, 0x13, 0xca, }, 5, 0, "", "",
+"c4 e2 79 13 ca \tvcvtph2ps %xmm2,%xmm1",},
+{{0xc4, 0xe2, 0x79, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 79 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0xc4, 0xe2, 0x79, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 c4 e2 79 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0xc4, 0xe2, 0x7d, 0x13, 0xca, }, 5, 0, "", "",
+"c4 e2 7d 13 ca \tvcvtph2ps %xmm2,%ymm1",},
+{{0xc4, 0xe2, 0x7d, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 7d 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0xc4, 0xe2, 0x7d, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 c4 e2 7d 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0xc4, 0xe2, 0x79, 0x13, 0xca, }, 5, 0, "", "",
+"c4 e2 79 13 ca \tvcvtph2ps %xmm2,%xmm1",},
+{{0xc4, 0xe2, 0x79, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 79 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0xc4, 0xe2, 0x79, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 c4 e2 79 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0xc4, 0xe2, 0x7d, 0x13, 0xca, }, 5, 0, "", "",
+"c4 e2 7d 13 ca \tvcvtph2ps %xmm2,%ymm1",},
+{{0xc4, 0xe2, 0x7d, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 10, 0, "", "",
+"c4 e2 7d 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0xc4, 0xe2, 0x7d, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"67 c4 e2 7d 13 8c c8 78 56 34 12 \tvcvtph2ps 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x13, 0xca, }, 6, 0, "", "",
+"62 f6 7d 48 13 ca \tvcvtph2psx %ymm2,%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 48 13 8c c8 78 56 34 12 \tvcvtph2psx 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x48, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 48 13 8c c8 78 56 34 12 \tvcvtph2psx 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x13, 0xca, }, 6, 0, "", "",
+"62 f6 7d 08 13 ca \tvcvtph2psx %xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 08 13 8c c8 78 56 34 12 \tvcvtph2psx 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x08, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 08 13 8c c8 78 56 34 12 \tvcvtph2psx 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x13, 0xca, }, 6, 0, "", "",
+"62 f6 7d 28 13 ca \tvcvtph2psx %xmm2,%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 28 13 8c c8 78 56 34 12 \tvcvtph2psx 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x28, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 28 13 8c c8 78 56 34 12 \tvcvtph2psx 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 7b ca \tvcvtph2qq %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 7b 8c c8 78 56 34 12 \tvcvtph2qq 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x48, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 48 7b 8c c8 78 56 34 12 \tvcvtph2qq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 7b ca \tvcvtph2qq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7b 8c c8 78 56 34 12 \tvcvtph2qq 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x08, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 08 7b 8c c8 78 56 34 12 \tvcvtph2qq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7b, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 7b ca \tvcvtph2qq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 7b 8c c8 78 56 34 12 \tvcvtph2qq 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x28, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 28 7b 8c c8 78 56 34 12 \tvcvtph2qq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 79 ca \tvcvtph2udq %ymm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 79 8c c8 78 56 34 12 \tvcvtph2udq 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x48, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 48 79 8c c8 78 56 34 12 \tvcvtph2udq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 79 ca \tvcvtph2udq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 79 8c c8 78 56 34 12 \tvcvtph2udq 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x08, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 08 79 8c c8 78 56 34 12 \tvcvtph2udq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 79 ca \tvcvtph2udq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 79 8c c8 78 56 34 12 \tvcvtph2udq 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x28, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 28 79 8c c8 78 56 34 12 \tvcvtph2udq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 79 ca \tvcvtph2uqq %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 79 8c c8 78 56 34 12 \tvcvtph2uqq 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x48, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 48 79 8c c8 78 56 34 12 \tvcvtph2uqq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 79 ca \tvcvtph2uqq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 79 8c c8 78 56 34 12 \tvcvtph2uqq 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x08, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 08 79 8c c8 78 56 34 12 \tvcvtph2uqq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x79, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 79 ca \tvcvtph2uqq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 79 8c c8 78 56 34 12 \tvcvtph2uqq 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x28, 0x79, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 28 79 8c c8 78 56 34 12 \tvcvtph2uqq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 7d ca \tvcvtph2uw %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 7d 8c c8 78 56 34 12 \tvcvtph2uw 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 48 7d 8c c8 78 56 34 12 \tvcvtph2uw 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 7d ca \tvcvtph2uw %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 7d 8c c8 78 56 34 12 \tvcvtph2uw 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 08 7d 8c c8 78 56 34 12 \tvcvtph2uw 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 7d ca \tvcvtph2uw %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 7d 8c c8 78 56 34 12 \tvcvtph2uw 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 28 7d 8c c8 78 56 34 12 \tvcvtph2uw 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 7d ca \tvcvtph2w %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 7d 8c c8 78 56 34 12 \tvcvtph2w 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 48 7d 8c c8 78 56 34 12 \tvcvtph2w 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 7d ca \tvcvtph2w %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7d 8c c8 78 56 34 12 \tvcvtph2w 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 08 7d 8c c8 78 56 34 12 \tvcvtph2w 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 7d ca \tvcvtph2w %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 7d 8c c8 78 56 34 12 \tvcvtph2w 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 28 7d 8c c8 78 56 34 12 \tvcvtph2w 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf3, 0x7d, 0x48, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7d 48 1d 8c c8 78 56 34 12 12 \tvcvtps2ph $0x12,%zmm1,0x12345678(%rax,%rcx,8)",},
+{{0x67, 0x62, 0xf3, 0x7d, 0x48, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7d 48 1d 8c c8 78 56 34 12 12 \tvcvtps2ph $0x12,%zmm1,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf3, 0x7d, 0x48, 0x1d, 0xd1, 0x12, }, 7, 0, "", "",
+"62 f3 7d 48 1d d1 12 \tvcvtps2ph $0x12,%zmm2,%ymm1",},
+{{0xc4, 0xe3, 0x7d, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 11, 0, "", "",
+"c4 e3 7d 1d 8c c8 78 56 34 12 12 \tvcvtps2ph $0x12,%ymm1,0x12345678(%rax,%rcx,8)",},
+{{0x67, 0xc4, 0xe3, 0x7d, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"67 c4 e3 7d 1d 8c c8 78 56 34 12 12 \tvcvtps2ph $0x12,%ymm1,0x12345678(%eax,%ecx,8)",},
+{{0xc4, 0xe3, 0x79, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 11, 0, "", "",
+"c4 e3 79 1d 8c c8 78 56 34 12 12 \tvcvtps2ph $0x12,%xmm1,0x12345678(%rax,%rcx,8)",},
+{{0x67, 0xc4, 0xe3, 0x79, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"67 c4 e3 79 1d 8c c8 78 56 34 12 12 \tvcvtps2ph $0x12,%xmm1,0x12345678(%eax,%ecx,8)",},
+{{0xc4, 0xe3, 0x79, 0x1d, 0xd1, 0x12, }, 6, 0, "", "",
+"c4 e3 79 1d d1 12 \tvcvtps2ph $0x12,%xmm2,%xmm1",},
+{{0xc4, 0xe3, 0x7d, 0x1d, 0xd1, 0x12, }, 6, 0, "", "",
+"c4 e3 7d 1d d1 12 \tvcvtps2ph $0x12,%ymm2,%xmm1",},
+{{0xc4, 0xe3, 0x7d, 0x1d, 0xd1, 0x12, }, 6, 0, "", "",
+"c4 e3 7d 1d d1 12 \tvcvtps2ph $0x12,%ymm2,%xmm1",},
+{{0xc4, 0xe3, 0x7d, 0x1d, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 11, 0, "", "",
+"c4 e3 7d 1d 94 c8 78 56 34 12 12 \tvcvtps2ph $0x12,%ymm2,0x12345678(%rax,%rcx,8)",},
+{{0x67, 0xc4, 0xe3, 0x7d, 0x1d, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"67 c4 e3 7d 1d 94 c8 78 56 34 12 12 \tvcvtps2ph $0x12,%ymm2,0x12345678(%eax,%ecx,8)",},
+{{0xc4, 0xe3, 0x79, 0x1d, 0xd1, 0x12, }, 6, 0, "", "",
+"c4 e3 79 1d d1 12 \tvcvtps2ph $0x12,%xmm2,%xmm1",},
+{{0xc4, 0xe3, 0x79, 0x1d, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 11, 0, "", "",
+"c4 e3 79 1d 94 c8 78 56 34 12 12 \tvcvtps2ph $0x12,%xmm2,0x12345678(%rax,%rcx,8)",},
+{{0x67, 0xc4, 0xe3, 0x79, 0x1d, 0x94, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"67 c4 e3 79 1d 94 c8 78 56 34 12 12 \tvcvtps2ph $0x12,%xmm2,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x1d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 1d ca \tvcvtps2phx %zmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 1d 8c c8 78 56 34 12 \tvcvtps2phx 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x48, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 48 1d 8c c8 78 56 34 12 \tvcvtps2phx 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x1d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 1d ca \tvcvtps2phx %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x1d, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 1d ca \tvcvtps2phx %ymm2,%xmm1",},
+{{0x62, 0xf5, 0xfc, 0x48, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 fc 48 5b ca \tvcvtqq2ph %zmm2,%xmm1",},
+{{0x62, 0xf5, 0xfc, 0x08, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 fc 08 5b ca \tvcvtqq2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0xfc, 0x28, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 fc 28 5b ca \tvcvtqq2ph %ymm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0xef, 0x08, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 ef 08 5a 8c c8 78 56 34 12 \tvcvtsd2sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x5a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 5a 8c c8 78 56 34 12 \tvcvtsh2sd 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x08, 0x2d, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 08 2d 84 c8 78 56 34 12 \tvcvtsh2si 0x12345678(%eax,%ecx,8),%eax",},
+{{0x67, 0x62, 0xf5, 0xfe, 0x08, 0x2d, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 fe 08 2d 84 c8 78 56 34 12 \tvcvtsh2si 0x12345678(%eax,%ecx,8),%rax",},
+{{0x67, 0x62, 0xf6, 0x6c, 0x08, 0x13, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6c 08 13 8c c8 78 56 34 12 \tvcvtsh2ss 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x79, 0xc1, }, 6, 0, "", "",
+"62 f5 7e 08 79 c1 \tvcvtsh2usi %xmm1,%eax",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x79, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 79 84 c8 78 56 34 12 \tvcvtsh2usi 0x12345678(%rax,%rcx,8),%eax",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x08, 0x79, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 08 79 84 c8 78 56 34 12 \tvcvtsh2usi 0x12345678(%eax,%ecx,8),%eax",},
+{{0x62, 0xf5, 0xfe, 0x08, 0x79, 0xc1, }, 6, 0, "", "",
+"62 f5 fe 08 79 c1 \tvcvtsh2usi %xmm1,%rax",},
+{{0x62, 0xf5, 0xfe, 0x08, 0x79, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 fe 08 79 84 c8 78 56 34 12 \tvcvtsh2usi 0x12345678(%rax,%rcx,8),%rax",},
+{{0x67, 0x62, 0xf5, 0xfe, 0x08, 0x79, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 fe 08 79 84 c8 78 56 34 12 \tvcvtsh2usi 0x12345678(%eax,%ecx,8),%rax",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x2a, 0xc8, }, 6, 0, "", "",
+"62 f5 6e 08 2a c8 \tvcvtsi2sh %eax,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x2a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 2a 8c c8 78 56 34 12 \tvcvtsi2shl 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x2a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 2a 8c c8 78 56 34 12 \tvcvtsi2shl 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0xee, 0x08, 0x2a, 0xc8, }, 6, 0, "", "",
+"62 f5 ee 08 2a c8 \tvcvtsi2sh %rax,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x2a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 2a 8c c8 78 56 34 12 \tvcvtsi2shl 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x2a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 2a 8c c8 78 56 34 12 \tvcvtsi2shl 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x1d, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 1d cb \tvcvtss2sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 1d 8c c8 78 56 34 12 \tvcvtss2sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x08, 0x1d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 08 1d 8c c8 78 56 34 12 \tvcvtss2sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x48, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7e 48 5b ca \tvcvttph2dq %ymm2,%zmm1",},
+{{0x62, 0xf5, 0x7e, 0x48, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 48 5b 8c c8 78 56 34 12 \tvcvttph2dq 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x48, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 48 5b 8c c8 78 56 34 12 \tvcvttph2dq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7e 08 5b ca \tvcvttph2dq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 5b 8c c8 78 56 34 12 \tvcvttph2dq 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x08, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 08 5b 8c c8 78 56 34 12 \tvcvttph2dq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x28, 0x5b, 0xca, }, 6, 0, "", "",
+"62 f5 7e 28 5b ca \tvcvttph2dq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7e, 0x28, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 28 5b 8c c8 78 56 34 12 \tvcvttph2dq 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x28, 0x5b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 28 5b 8c c8 78 56 34 12 \tvcvttph2dq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 7a ca \tvcvttph2qq %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 7a 8c c8 78 56 34 12 \tvcvttph2qq 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x48, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 48 7a 8c c8 78 56 34 12 \tvcvttph2qq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 7a ca \tvcvttph2qq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7a 8c c8 78 56 34 12 \tvcvttph2qq 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x08, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 08 7a 8c c8 78 56 34 12 \tvcvttph2qq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 7a ca \tvcvttph2qq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 7a 8c c8 78 56 34 12 \tvcvttph2qq 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x28, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 28 7a 8c c8 78 56 34 12 \tvcvttph2qq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 78 ca \tvcvttph2udq %ymm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 78 8c c8 78 56 34 12 \tvcvttph2udq 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x48, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 48 78 8c c8 78 56 34 12 \tvcvttph2udq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 78 ca \tvcvttph2udq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 78 8c c8 78 56 34 12 \tvcvttph2udq 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x08, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 08 78 8c c8 78 56 34 12 \tvcvttph2udq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 78 ca \tvcvttph2udq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 78 8c c8 78 56 34 12 \tvcvttph2udq 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x28, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 28 78 8c c8 78 56 34 12 \tvcvttph2udq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 78 ca \tvcvttph2uqq %xmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 78 8c c8 78 56 34 12 \tvcvttph2uqq 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x48, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 48 78 8c c8 78 56 34 12 \tvcvttph2uqq 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 78 ca \tvcvttph2uqq %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 78 8c c8 78 56 34 12 \tvcvttph2uqq 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x08, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 08 78 8c c8 78 56 34 12 \tvcvttph2uqq 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x78, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 78 ca \tvcvttph2uqq %xmm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 78 8c c8 78 56 34 12 \tvcvttph2uqq 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x28, 0x78, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 28 78 8c c8 78 56 34 12 \tvcvttph2uqq 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 7c ca \tvcvttph2uw %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 7c 8c c8 78 56 34 12 \tvcvttph2uw 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x48, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 48 7c 8c c8 78 56 34 12 \tvcvttph2uw 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 7c ca \tvcvttph2uw %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 7c 8c c8 78 56 34 12 \tvcvttph2uw 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x08, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 08 7c 8c c8 78 56 34 12 \tvcvttph2uw 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 7c ca \tvcvttph2uw %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 7c 8c c8 78 56 34 12 \tvcvttph2uw 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x28, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 28 7c 8c c8 78 56 34 12 \tvcvttph2uw 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7d 48 7c ca \tvcvttph2w %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x48, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 48 7c 8c c8 78 56 34 12 \tvcvttph2w 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x48, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 48 7c 8c c8 78 56 34 12 \tvcvttph2w 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7d 08 7c ca \tvcvttph2w %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7c 8c c8 78 56 34 12 \tvcvttph2w 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x08, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 08 7c 8c c8 78 56 34 12 \tvcvttph2w 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7c, 0xca, }, 6, 0, "", "",
+"62 f5 7d 28 7c ca \tvcvttph2w %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7d, 0x28, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 28 7c 8c c8 78 56 34 12 \tvcvttph2w 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x28, 0x7c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 28 7c 8c c8 78 56 34 12 \tvcvttph2w 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x2c, 0xc1, }, 6, 0, "", "",
+"62 f5 7e 08 2c c1 \tvcvttsh2si %xmm1,%eax",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x2c, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 2c 84 c8 78 56 34 12 \tvcvttsh2si 0x12345678(%rax,%rcx,8),%eax",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x08, 0x2c, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 08 2c 84 c8 78 56 34 12 \tvcvttsh2si 0x12345678(%eax,%ecx,8),%eax",},
+{{0x62, 0xf5, 0xfe, 0x08, 0x2c, 0xc1, }, 6, 0, "", "",
+"62 f5 fe 08 2c c1 \tvcvttsh2si %xmm1,%rax",},
+{{0x62, 0xf5, 0xfe, 0x08, 0x2c, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 fe 08 2c 84 c8 78 56 34 12 \tvcvttsh2si 0x12345678(%rax,%rcx,8),%rax",},
+{{0x67, 0x62, 0xf5, 0xfe, 0x08, 0x2c, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 fe 08 2c 84 c8 78 56 34 12 \tvcvttsh2si 0x12345678(%eax,%ecx,8),%rax",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x78, 0xc1, }, 6, 0, "", "",
+"62 f5 7e 08 78 c1 \tvcvttsh2usi %xmm1,%eax",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x78, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 78 84 c8 78 56 34 12 \tvcvttsh2usi 0x12345678(%rax,%rcx,8),%eax",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x08, 0x78, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 08 78 84 c8 78 56 34 12 \tvcvttsh2usi 0x12345678(%eax,%ecx,8),%eax",},
+{{0x62, 0xf5, 0xfe, 0x08, 0x78, 0xc1, }, 6, 0, "", "",
+"62 f5 fe 08 78 c1 \tvcvttsh2usi %xmm1,%rax",},
+{{0x62, 0xf5, 0xfe, 0x08, 0x78, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 fe 08 78 84 c8 78 56 34 12 \tvcvttsh2usi 0x12345678(%rax,%rcx,8),%rax",},
+{{0x67, 0x62, 0xf5, 0xfe, 0x08, 0x78, 0x84, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 fe 08 78 84 c8 78 56 34 12 \tvcvttsh2usi 0x12345678(%eax,%ecx,8),%rax",},
+{{0x62, 0xf5, 0x7f, 0x48, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7f 48 7a ca \tvcvtudq2ph %zmm2,%ymm1",},
+{{0x62, 0xf5, 0x7f, 0x48, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7f 48 7a 8c c8 78 56 34 12 \tvcvtudq2ph 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7f, 0x48, 0x7a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7f 48 7a 8c c8 78 56 34 12 \tvcvtudq2ph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7f, 0x08, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7f 08 7a ca \tvcvtudq2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7f, 0x28, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 7f 28 7a ca \tvcvtudq2ph %ymm2,%xmm1",},
+{{0x62, 0xf5, 0xff, 0x48, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 ff 48 7a ca \tvcvtuqq2ph %zmm2,%xmm1",},
+{{0x62, 0xf5, 0xff, 0x08, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 ff 08 7a ca \tvcvtuqq2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0xff, 0x28, 0x7a, 0xca, }, 6, 0, "", "",
+"62 f5 ff 28 7a ca \tvcvtuqq2ph %ymm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x7b, 0xc8, }, 6, 0, "", "",
+"62 f5 6e 08 7b c8 \tvcvtusi2sh %eax,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 7b 8c c8 78 56 34 12 \tvcvtusi2shl 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 7b 8c c8 78 56 34 12 \tvcvtusi2shl 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0xee, 0x08, 0x7b, 0xc8, }, 6, 0, "", "",
+"62 f5 ee 08 7b c8 \tvcvtusi2sh %rax,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 7b 8c c8 78 56 34 12 \tvcvtusi2shl 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x7b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 7b 8c c8 78 56 34 12 \tvcvtusi2shl 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7f, 0x48, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7f 48 7d ca \tvcvtuw2ph %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7f, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7f 48 7d 8c c8 78 56 34 12 \tvcvtuw2ph 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7f, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7f 48 7d 8c c8 78 56 34 12 \tvcvtuw2ph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7f, 0x08, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7f 08 7d ca \tvcvtuw2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7f, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7f 08 7d 8c c8 78 56 34 12 \tvcvtuw2ph 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7f, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7f 08 7d 8c c8 78 56 34 12 \tvcvtuw2ph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7f, 0x28, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7f 28 7d ca \tvcvtuw2ph %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7f, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7f 28 7d 8c c8 78 56 34 12 \tvcvtuw2ph 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7f, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7f 28 7d 8c c8 78 56 34 12 \tvcvtuw2ph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x7e, 0x48, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7e 48 7d ca \tvcvtw2ph %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7e, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 48 7d 8c c8 78 56 34 12 \tvcvtw2ph 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x48, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 48 7d 8c c8 78 56 34 12 \tvcvtw2ph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7e 08 7d ca \tvcvtw2ph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 7d 8c c8 78 56 34 12 \tvcvtw2ph 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x08, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 08 7d 8c c8 78 56 34 12 \tvcvtw2ph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x28, 0x7d, 0xca, }, 6, 0, "", "",
+"62 f5 7e 28 7d ca \tvcvtw2ph %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7e, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 28 7d 8c c8 78 56 34 12 \tvcvtw2ph 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x28, 0x7d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 28 7d 8c c8 78 56 34 12 \tvcvtw2ph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5e, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 5e cb \tvdivph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 5e 8c c8 78 56 34 12 \tvdivph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x48, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 48 5e 8c c8 78 56 34 12 \tvdivph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5e, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 5e cb \tvdivph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 5e 8c c8 78 56 34 12 \tvdivph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x08, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 08 5e 8c c8 78 56 34 12 \tvdivph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5e, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 5e cb \tvdivph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 5e 8c c8 78 56 34 12 \tvdivph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x28, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 28 5e 8c c8 78 56 34 12 \tvdivph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5e, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 5e cb \tvdivsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 5e 8c c8 78 56 34 12 \tvdivsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x5e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 5e 8c c8 78 56 34 12 \tvdivsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x48, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 48 56 cb \tvfcmaddcph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6f, 0x48, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 48 56 8c c8 78 56 34 12 \tvfcmaddcph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6f, 0x48, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6f 48 56 8c c8 78 56 34 12 \tvfcmaddcph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 08 56 cb \tvfcmaddcph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 08 56 8c c8 78 56 34 12 \tvfcmaddcph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6f, 0x08, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6f 08 56 8c c8 78 56 34 12 \tvfcmaddcph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x28, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 28 56 cb \tvfcmaddcph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6f, 0x28, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 28 56 8c c8 78 56 34 12 \tvfcmaddcph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6f, 0x28, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6f 28 56 8c c8 78 56 34 12 \tvfcmaddcph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0x57, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 08 57 cb \tvfcmaddcsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0x57, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 08 57 8c c8 78 56 34 12 \tvfcmaddcsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6f, 0x08, 0x57, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6f 08 57 8c c8 78 56 34 12 \tvfcmaddcsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x48, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 48 d6 cb \tvfcmulcph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6f, 0x48, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 48 d6 8c c8 78 56 34 12 \tvfcmulcph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6f, 0x48, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6f 48 d6 8c c8 78 56 34 12 \tvfcmulcph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 08 d6 cb \tvfcmulcph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 08 d6 8c c8 78 56 34 12 \tvfcmulcph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6f, 0x08, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6f 08 d6 8c c8 78 56 34 12 \tvfcmulcph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x28, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 28 d6 cb \tvfcmulcph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6f, 0x28, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 28 d6 8c c8 78 56 34 12 \tvfcmulcph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6f, 0x28, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6f 28 d6 8c c8 78 56 34 12 \tvfcmulcph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0xd7, 0xcb, }, 6, 0, "", "",
+"62 f6 6f 08 d7 cb \tvfcmulcsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6f, 0x08, 0xd7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6f 08 d7 8c c8 78 56 34 12 \tvfcmulcsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6f, 0x08, 0xd7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6f 08 d7 8c c8 78 56 34 12 \tvfcmulcsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x98, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 98 cb \tvfmadd132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x98, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 98 8c c8 78 56 34 12 \tvfmadd132ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0x98, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 98 8c c8 78 56 34 12 \tvfmadd132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x98, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 98 cb \tvfmadd132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x98, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 98 8c c8 78 56 34 12 \tvfmadd132ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x98, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 98 8c c8 78 56 34 12 \tvfmadd132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x98, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 98 cb \tvfmadd132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x98, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 98 8c c8 78 56 34 12 \tvfmadd132ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0x98, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 98 8c c8 78 56 34 12 \tvfmadd132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x99, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 99 cb \tvfmadd132sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x99, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 99 8c c8 78 56 34 12 \tvfmadd132sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x99, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 99 8c c8 78 56 34 12 \tvfmadd132sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 a8 cb \tvfmadd213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 a8 8c c8 78 56 34 12 \tvfmadd213ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xa8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 a8 8c c8 78 56 34 12 \tvfmadd213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 a8 cb \tvfmadd213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 a8 8c c8 78 56 34 12 \tvfmadd213ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xa8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 a8 8c c8 78 56 34 12 \tvfmadd213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 a8 cb \tvfmadd213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 a8 8c c8 78 56 34 12 \tvfmadd213ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xa8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 a8 8c c8 78 56 34 12 \tvfmadd213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa9, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 a9 cb \tvfmadd213sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa9, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 a9 8c c8 78 56 34 12 \tvfmadd213sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xa9, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 a9 8c c8 78 56 34 12 \tvfmadd213sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 b8 cb \tvfmadd231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 b8 8c c8 78 56 34 12 \tvfmadd231ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xb8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 b8 8c c8 78 56 34 12 \tvfmadd231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 b8 cb \tvfmadd231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 b8 8c c8 78 56 34 12 \tvfmadd231ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xb8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 b8 8c c8 78 56 34 12 \tvfmadd231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb8, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 b8 cb \tvfmadd231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 b8 8c c8 78 56 34 12 \tvfmadd231ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xb8, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 b8 8c c8 78 56 34 12 \tvfmadd231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb9, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 b9 cb \tvfmadd231sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb9, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 b9 8c c8 78 56 34 12 \tvfmadd231sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xb9, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 b9 8c c8 78 56 34 12 \tvfmadd231sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x48, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 48 56 cb \tvfmaddcph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6e, 0x48, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 48 56 8c c8 78 56 34 12 \tvfmaddcph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6e, 0x48, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6e 48 56 8c c8 78 56 34 12 \tvfmaddcph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 08 56 cb \tvfmaddcph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 08 56 8c c8 78 56 34 12 \tvfmaddcph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6e, 0x08, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6e 08 56 8c c8 78 56 34 12 \tvfmaddcph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x28, 0x56, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 28 56 cb \tvfmaddcph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x28, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 28 56 8c c8 78 56 34 12 \tvfmaddcph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6e, 0x28, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6e 28 56 8c c8 78 56 34 12 \tvfmaddcph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0x57, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 08 57 cb \tvfmaddcsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0x57, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 08 57 8c c8 78 56 34 12 \tvfmaddcsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6e, 0x08, 0x57, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6e 08 57 8c c8 78 56 34 12 \tvfmaddcsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x96, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 96 cb \tvfmaddsub132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x96, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 96 8c c8 78 56 34 12 \tvfmaddsub132ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0x96, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 96 8c c8 78 56 34 12 \tvfmaddsub132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x96, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 96 cb \tvfmaddsub132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x96, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 96 8c c8 78 56 34 12 \tvfmaddsub132ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x96, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 96 8c c8 78 56 34 12 \tvfmaddsub132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x96, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 96 cb \tvfmaddsub132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x96, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 96 8c c8 78 56 34 12 \tvfmaddsub132ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0x96, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 96 8c c8 78 56 34 12 \tvfmaddsub132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 a6 cb \tvfmaddsub213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 a6 8c c8 78 56 34 12 \tvfmaddsub213ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xa6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 a6 8c c8 78 56 34 12 \tvfmaddsub213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 a6 cb \tvfmaddsub213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 a6 8c c8 78 56 34 12 \tvfmaddsub213ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xa6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 a6 8c c8 78 56 34 12 \tvfmaddsub213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 a6 cb \tvfmaddsub213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 a6 8c c8 78 56 34 12 \tvfmaddsub213ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xa6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 a6 8c c8 78 56 34 12 \tvfmaddsub213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 b6 cb \tvfmaddsub231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 b6 8c c8 78 56 34 12 \tvfmaddsub231ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xb6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 b6 8c c8 78 56 34 12 \tvfmaddsub231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 b6 cb \tvfmaddsub231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 b6 8c c8 78 56 34 12 \tvfmaddsub231ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xb6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 b6 8c c8 78 56 34 12 \tvfmaddsub231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb6, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 b6 cb \tvfmaddsub231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 b6 8c c8 78 56 34 12 \tvfmaddsub231ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xb6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 b6 8c c8 78 56 34 12 \tvfmaddsub231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9a, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 9a cb \tvfmsub132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 9a 8c c8 78 56 34 12 \tvfmsub132ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0x9a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 9a 8c c8 78 56 34 12 \tvfmsub132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9a, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9a cb \tvfmsub132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9a 8c c8 78 56 34 12 \tvfmsub132ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x9a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 9a 8c c8 78 56 34 12 \tvfmsub132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9a, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 9a cb \tvfmsub132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 9a 8c c8 78 56 34 12 \tvfmsub132ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0x9a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 9a 8c c8 78 56 34 12 \tvfmsub132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9b, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9b cb \tvfmsub132sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9b 8c c8 78 56 34 12 \tvfmsub132sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x9b, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 9b 8c c8 78 56 34 12 \tvfmsub132sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xaa, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 aa cb \tvfmsub213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xaa, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 aa 8c c8 78 56 34 12 \tvfmsub213ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xaa, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 aa 8c c8 78 56 34 12 \tvfmsub213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xaa, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 aa cb \tvfmsub213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xaa, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 aa 8c c8 78 56 34 12 \tvfmsub213ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xaa, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 aa 8c c8 78 56 34 12 \tvfmsub213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xaa, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 aa cb \tvfmsub213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xaa, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 aa 8c c8 78 56 34 12 \tvfmsub213ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xaa, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 aa 8c c8 78 56 34 12 \tvfmsub213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xab, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ab cb \tvfmsub213sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xab, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ab 8c c8 78 56 34 12 \tvfmsub213sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xab, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 ab 8c c8 78 56 34 12 \tvfmsub213sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xba, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 ba cb \tvfmsub231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xba, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 ba 8c c8 78 56 34 12 \tvfmsub231ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xba, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 ba 8c c8 78 56 34 12 \tvfmsub231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xba, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ba cb \tvfmsub231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xba, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ba 8c c8 78 56 34 12 \tvfmsub231ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xba, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 ba 8c c8 78 56 34 12 \tvfmsub231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xba, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 ba cb \tvfmsub231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xba, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 ba 8c c8 78 56 34 12 \tvfmsub231ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xba, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 ba 8c c8 78 56 34 12 \tvfmsub231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbb, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 bb cb \tvfmsub231sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbb, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 bb 8c c8 78 56 34 12 \tvfmsub231sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xbb, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 bb 8c c8 78 56 34 12 \tvfmsub231sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x97, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 97 cb \tvfmsubadd132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x97, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 97 8c c8 78 56 34 12 \tvfmsubadd132ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0x97, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 97 8c c8 78 56 34 12 \tvfmsubadd132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x97, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 97 cb \tvfmsubadd132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x97, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 97 8c c8 78 56 34 12 \tvfmsubadd132ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x97, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 97 8c c8 78 56 34 12 \tvfmsubadd132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x97, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 97 cb \tvfmsubadd132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x97, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 97 8c c8 78 56 34 12 \tvfmsubadd132ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0x97, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 97 8c c8 78 56 34 12 \tvfmsubadd132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 a7 cb \tvfmsubadd213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xa7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 a7 8c c8 78 56 34 12 \tvfmsubadd213ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xa7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 a7 8c c8 78 56 34 12 \tvfmsubadd213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 a7 cb \tvfmsubadd213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xa7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 a7 8c c8 78 56 34 12 \tvfmsubadd213ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xa7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 a7 8c c8 78 56 34 12 \tvfmsubadd213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 a7 cb \tvfmsubadd213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xa7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 a7 8c c8 78 56 34 12 \tvfmsubadd213ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xa7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 a7 8c c8 78 56 34 12 \tvfmsubadd213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 b7 cb \tvfmsubadd231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xb7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 b7 8c c8 78 56 34 12 \tvfmsubadd231ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xb7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 b7 8c c8 78 56 34 12 \tvfmsubadd231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 b7 cb \tvfmsubadd231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xb7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 b7 8c c8 78 56 34 12 \tvfmsubadd231ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xb7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 b7 8c c8 78 56 34 12 \tvfmsubadd231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb7, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 b7 cb \tvfmsubadd231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xb7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 b7 8c c8 78 56 34 12 \tvfmsubadd231ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xb7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 b7 8c c8 78 56 34 12 \tvfmsubadd231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x48, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 48 d6 cb \tvfmulcph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6e, 0x48, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 48 d6 8c c8 78 56 34 12 \tvfmulcph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6e, 0x48, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6e 48 d6 8c c8 78 56 34 12 \tvfmulcph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 08 d6 cb \tvfmulcph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 08 d6 8c c8 78 56 34 12 \tvfmulcph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6e, 0x08, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6e 08 d6 8c c8 78 56 34 12 \tvfmulcph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x28, 0xd6, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 28 d6 cb \tvfmulcph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x28, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 28 d6 8c c8 78 56 34 12 \tvfmulcph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6e, 0x28, 0xd6, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6e 28 d6 8c c8 78 56 34 12 \tvfmulcph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0xd7, 0xcb, }, 6, 0, "", "",
+"62 f6 6e 08 d7 cb \tvfmulcsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6e, 0x08, 0xd7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6e 08 d7 8c c8 78 56 34 12 \tvfmulcsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6e, 0x08, 0xd7, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6e 08 d7 8c c8 78 56 34 12 \tvfmulcsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 9c cb \tvfnmadd132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 9c 8c c8 78 56 34 12 \tvfnmadd132ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0x9c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 9c 8c c8 78 56 34 12 \tvfnmadd132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9c cb \tvfnmadd132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9c 8c c8 78 56 34 12 \tvfnmadd132ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x9c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 9c 8c c8 78 56 34 12 \tvfnmadd132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 9c cb \tvfnmadd132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 9c 8c c8 78 56 34 12 \tvfnmadd132ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0x9c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 9c 8c c8 78 56 34 12 \tvfnmadd132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9d, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9d cb \tvfnmadd132sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9d 8c c8 78 56 34 12 \tvfnmadd132sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x9d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 9d 8c c8 78 56 34 12 \tvfnmadd132sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xac, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 ac cb \tvfnmadd213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xac, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 ac 8c c8 78 56 34 12 \tvfnmadd213ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xac, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 ac 8c c8 78 56 34 12 \tvfnmadd213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xac, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ac cb \tvfnmadd213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xac, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ac 8c c8 78 56 34 12 \tvfnmadd213ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xac, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 ac 8c c8 78 56 34 12 \tvfnmadd213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xac, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 ac cb \tvfnmadd213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xac, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 ac 8c c8 78 56 34 12 \tvfnmadd213ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xac, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 ac 8c c8 78 56 34 12 \tvfnmadd213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xad, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ad cb \tvfnmadd213sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xad, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ad 8c c8 78 56 34 12 \tvfnmadd213sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xad, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 ad 8c c8 78 56 34 12 \tvfnmadd213sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xbc, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 bc cb \tvfnmadd231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xbc, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 bc 8c c8 78 56 34 12 \tvfnmadd231ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xbc, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 bc 8c c8 78 56 34 12 \tvfnmadd231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbc, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 bc cb \tvfnmadd231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbc, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 bc 8c c8 78 56 34 12 \tvfnmadd231ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xbc, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 bc 8c c8 78 56 34 12 \tvfnmadd231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xbc, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 bc cb \tvfnmadd231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xbc, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 bc 8c c8 78 56 34 12 \tvfnmadd231ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xbc, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 bc 8c c8 78 56 34 12 \tvfnmadd231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbd, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 bd cb \tvfnmadd231sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbd, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 bd 8c c8 78 56 34 12 \tvfnmadd231sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xbd, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 bd 8c c8 78 56 34 12 \tvfnmadd231sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9e, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 9e cb \tvfnmsub132ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x9e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 9e 8c c8 78 56 34 12 \tvfnmsub132ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0x9e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 9e 8c c8 78 56 34 12 \tvfnmsub132ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9e, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9e cb \tvfnmsub132ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9e 8c c8 78 56 34 12 \tvfnmsub132ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x9e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 9e 8c c8 78 56 34 12 \tvfnmsub132ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9e, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 9e cb \tvfnmsub132ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x9e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 9e 8c c8 78 56 34 12 \tvfnmsub132ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0x9e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 9e 8c c8 78 56 34 12 \tvfnmsub132ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9f, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 9f cb \tvfnmsub132sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x9f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 9f 8c c8 78 56 34 12 \tvfnmsub132sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x9f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 9f 8c c8 78 56 34 12 \tvfnmsub132sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xae, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 ae cb \tvfnmsub213ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xae, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 ae 8c c8 78 56 34 12 \tvfnmsub213ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xae, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 ae 8c c8 78 56 34 12 \tvfnmsub213ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xae, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 ae cb \tvfnmsub213ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xae, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 ae 8c c8 78 56 34 12 \tvfnmsub213ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xae, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 ae 8c c8 78 56 34 12 \tvfnmsub213ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xae, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 ae cb \tvfnmsub213ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xae, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 ae 8c c8 78 56 34 12 \tvfnmsub213ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xae, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 ae 8c c8 78 56 34 12 \tvfnmsub213ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xaf, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 af cb \tvfnmsub213sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xaf, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 af 8c c8 78 56 34 12 \tvfnmsub213sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xaf, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 af 8c c8 78 56 34 12 \tvfnmsub213sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xbe, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 be cb \tvfnmsub231ph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0xbe, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 be 8c c8 78 56 34 12 \tvfnmsub231ph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0xbe, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 be 8c c8 78 56 34 12 \tvfnmsub231ph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbe, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 be cb \tvfnmsub231ph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbe, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 be 8c c8 78 56 34 12 \tvfnmsub231ph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xbe, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 be 8c c8 78 56 34 12 \tvfnmsub231ph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xbe, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 be cb \tvfnmsub231ph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0xbe, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 be 8c c8 78 56 34 12 \tvfnmsub231ph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0xbe, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 be 8c c8 78 56 34 12 \tvfnmsub231ph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbf, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 bf cb \tvfnmsub231sh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0xbf, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 bf 8c c8 78 56 34 12 \tvfnmsub231sh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0xbf, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 bf 8c c8 78 56 34 12 \tvfnmsub231sh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x66, 0xe9, 0x12, }, 7, 0, "", "",
+"62 f3 7c 48 66 e9 12 \tvfpclassph $0x12,%zmm1,%k5",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x66, 0xe9, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 66 e9 12 \tvfpclassph $0x12,%xmm1,%k5",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x66, 0xe9, 0x12, }, 7, 0, "", "",
+"62 f3 7c 28 66 e9 12 \tvfpclassph $0x12,%ymm1,%k5",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x67, 0xe9, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 67 e9 12 \tvfpclasssh $0x12,%xmm1,%k5",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x67, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 08 67 ac c8 78 56 34 12 12 \tvfpclasssh $0x12,0x12345678(%rax,%rcx,8),%k5",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x08, 0x67, 0xac, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 08 67 ac c8 78 56 34 12 12 \tvfpclasssh $0x12,0x12345678(%eax,%ecx,8),%k5",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x42, 0xca, }, 6, 0, "", "",
+"62 f6 7d 48 42 ca \tvgetexpph %zmm2,%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x42, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 48 42 8c c8 78 56 34 12 \tvgetexpph 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x48, 0x42, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 48 42 8c c8 78 56 34 12 \tvgetexpph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x42, 0xca, }, 6, 0, "", "",
+"62 f6 7d 08 42 ca \tvgetexpph %xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x42, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 08 42 8c c8 78 56 34 12 \tvgetexpph 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x08, 0x42, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 08 42 8c c8 78 56 34 12 \tvgetexpph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x42, 0xca, }, 6, 0, "", "",
+"62 f6 7d 28 42 ca \tvgetexpph %ymm2,%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x42, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 28 42 8c c8 78 56 34 12 \tvgetexpph 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x28, 0x42, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 28 42 8c c8 78 56 34 12 \tvgetexpph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x43, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 43 cb \tvgetexpsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x43, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 43 8c c8 78 56 34 12 \tvgetexpsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x43, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 43 8c c8 78 56 34 12 \tvgetexpsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x26, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 48 26 ca 12 \tvgetmantph $0x12,%zmm2,%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x26, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 48 26 8c c8 78 56 34 12 12 \tvgetmantph $0x12,0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x48, 0x26, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 48 26 8c c8 78 56 34 12 12 \tvgetmantph $0x12,0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x26, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 26 ca 12 \tvgetmantph $0x12,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x26, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 08 26 8c c8 78 56 34 12 12 \tvgetmantph $0x12,0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x08, 0x26, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 08 26 8c c8 78 56 34 12 12 \tvgetmantph $0x12,0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x26, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 28 26 ca 12 \tvgetmantph $0x12,%ymm2,%ymm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x26, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 28 26 8c c8 78 56 34 12 12 \tvgetmantph $0x12,0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x28, 0x26, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 28 26 8c c8 78 56 34 12 12 \tvgetmantph $0x12,0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x27, 0xcb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 08 27 cb 12 \tvgetmantsh $0x12,%xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x27, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 08 27 8c c8 78 56 34 12 12 \tvgetmantsh $0x12,0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf3, 0x6c, 0x08, 0x27, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 6c 08 27 8c c8 78 56 34 12 12 \tvgetmantsh $0x12,0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5f, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 5f cb \tvmaxph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 5f 8c c8 78 56 34 12 \tvmaxph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x48, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 48 5f 8c c8 78 56 34 12 \tvmaxph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5f, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 5f cb \tvmaxph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 5f 8c c8 78 56 34 12 \tvmaxph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x08, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 08 5f 8c c8 78 56 34 12 \tvmaxph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5f, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 5f cb \tvmaxph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 5f 8c c8 78 56 34 12 \tvmaxph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x28, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 28 5f 8c c8 78 56 34 12 \tvmaxph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5f, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 5f cb \tvmaxsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 5f 8c c8 78 56 34 12 \tvmaxsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x5f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 5f 8c c8 78 56 34 12 \tvmaxsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5d, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 5d cb \tvminph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 5d 8c c8 78 56 34 12 \tvminph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x48, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 48 5d 8c c8 78 56 34 12 \tvminph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5d, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 5d cb \tvminph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 5d 8c c8 78 56 34 12 \tvminph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x08, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 08 5d 8c c8 78 56 34 12 \tvminph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5d, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 5d cb \tvminph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 5d 8c c8 78 56 34 12 \tvminph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x28, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 28 5d 8c c8 78 56 34 12 \tvminph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5d, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 5d cb \tvminsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 5d 8c c8 78 56 34 12 \tvminsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x5d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 5d 8c c8 78 56 34 12 \tvminsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x11, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 11 8c c8 78 56 34 12 \tvmovsh %xmm1,0x12345678(%rax,%rcx,8)",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x08, 0x11, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 08 11 8c c8 78 56 34 12 \tvmovsh %xmm1,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf5, 0x7e, 0x08, 0x10, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7e 08 10 8c c8 78 56 34 12 \tvmovsh 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7e, 0x08, 0x10, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7e 08 10 8c c8 78 56 34 12 \tvmovsh 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x10, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 10 cb \tvmovsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7e, 0xc8, }, 6, 0, "", "",
+"62 f5 7d 08 7e c8 \tvmovw %xmm1,%eax",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x7e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 7e 8c c8 78 56 34 12 \tvmovw %xmm1,0x12345678(%rax,%rcx,8)",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x08, 0x7e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 08 7e 8c c8 78 56 34 12 \tvmovw %xmm1,0x12345678(%eax,%ecx,8)",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x6e, 0xc8, }, 6, 0, "", "",
+"62 f5 7d 08 6e c8 \tvmovw %eax,%xmm1",},
+{{0x62, 0xf5, 0x7d, 0x08, 0x6e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7d 08 6e 8c c8 78 56 34 12 \tvmovw 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7d, 0x08, 0x6e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7d 08 6e 8c c8 78 56 34 12 \tvmovw 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x59, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 59 cb \tvmulph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 59 8c c8 78 56 34 12 \tvmulph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x48, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 48 59 8c c8 78 56 34 12 \tvmulph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x59, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 59 cb \tvmulph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 59 8c c8 78 56 34 12 \tvmulph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x08, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 08 59 8c c8 78 56 34 12 \tvmulph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x59, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 59 cb \tvmulph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 59 8c c8 78 56 34 12 \tvmulph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x28, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 28 59 8c c8 78 56 34 12 \tvmulph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x59, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 59 cb \tvmulsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 59 8c c8 78 56 34 12 \tvmulsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x59, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 59 8c c8 78 56 34 12 \tvmulsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x4c, 0xca, }, 6, 0, "", "",
+"62 f6 7d 48 4c ca \tvrcpph %zmm2,%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x4c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 48 4c 8c c8 78 56 34 12 \tvrcpph 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x48, 0x4c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 48 4c 8c c8 78 56 34 12 \tvrcpph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x4c, 0xca, }, 6, 0, "", "",
+"62 f6 7d 08 4c ca \tvrcpph %xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x4c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 08 4c 8c c8 78 56 34 12 \tvrcpph 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x08, 0x4c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 08 4c 8c c8 78 56 34 12 \tvrcpph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x4c, 0xca, }, 6, 0, "", "",
+"62 f6 7d 28 4c ca \tvrcpph %ymm2,%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x4c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 28 4c 8c c8 78 56 34 12 \tvrcpph 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x28, 0x4c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 28 4c 8c c8 78 56 34 12 \tvrcpph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x4d, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 4d cb \tvrcpsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x4d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 4d 8c c8 78 56 34 12 \tvrcpsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x4d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 4d 8c c8 78 56 34 12 \tvrcpsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x56, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 48 56 ca 12 \tvreduceph $0x12,%zmm2,%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 48 56 8c c8 78 56 34 12 12 \tvreduceph $0x12,0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x48, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 48 56 8c c8 78 56 34 12 12 \tvreduceph $0x12,0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x56, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 56 ca 12 \tvreduceph $0x12,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 08 56 8c c8 78 56 34 12 12 \tvreduceph $0x12,0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x08, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 08 56 8c c8 78 56 34 12 12 \tvreduceph $0x12,0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x56, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 28 56 ca 12 \tvreduceph $0x12,%ymm2,%ymm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 28 56 8c c8 78 56 34 12 12 \tvreduceph $0x12,0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x28, 0x56, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 28 56 8c c8 78 56 34 12 12 \tvreduceph $0x12,0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x57, 0xcb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 08 57 cb 12 \tvreducesh $0x12,%xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x57, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 08 57 8c c8 78 56 34 12 12 \tvreducesh $0x12,0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf3, 0x6c, 0x08, 0x57, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 6c 08 57 8c c8 78 56 34 12 12 \tvreducesh $0x12,0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x08, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 48 08 ca 12 \tvrndscaleph $0x12,%zmm2,%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x48, 0x08, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 48 08 8c c8 78 56 34 12 12 \tvrndscaleph $0x12,0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x48, 0x08, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 48 08 8c c8 78 56 34 12 12 \tvrndscaleph $0x12,0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x08, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 08 08 ca 12 \tvrndscaleph $0x12,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x08, 0x08, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 08 08 8c c8 78 56 34 12 12 \tvrndscaleph $0x12,0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x08, 0x08, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 08 08 8c c8 78 56 34 12 12 \tvrndscaleph $0x12,0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x08, 0xca, 0x12, }, 7, 0, "", "",
+"62 f3 7c 28 08 ca 12 \tvrndscaleph $0x12,%ymm2,%ymm1",},
+{{0x62, 0xf3, 0x7c, 0x28, 0x08, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 7c 28 08 8c c8 78 56 34 12 12 \tvrndscaleph $0x12,0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf3, 0x7c, 0x28, 0x08, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 7c 28 08 8c c8 78 56 34 12 12 \tvrndscaleph $0x12,0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x0a, 0xcb, 0x12, }, 7, 0, "", "",
+"62 f3 6c 08 0a cb 12 \tvrndscalesh $0x12,%xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf3, 0x6c, 0x08, 0x0a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 12, 0, "", "",
+"62 f3 6c 08 0a 8c c8 78 56 34 12 12 \tvrndscalesh $0x12,0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf3, 0x6c, 0x08, 0x0a, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, 0x12, }, 13, 0, "", "",
+"67 62 f3 6c 08 0a 8c c8 78 56 34 12 12 \tvrndscalesh $0x12,0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x4e, 0xca, }, 6, 0, "", "",
+"62 f6 7d 48 4e ca \tvrsqrtph %zmm2,%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x48, 0x4e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 48 4e 8c c8 78 56 34 12 \tvrsqrtph 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x48, 0x4e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 48 4e 8c c8 78 56 34 12 \tvrsqrtph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x4e, 0xca, }, 6, 0, "", "",
+"62 f6 7d 08 4e ca \tvrsqrtph %xmm2,%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x08, 0x4e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 08 4e 8c c8 78 56 34 12 \tvrsqrtph 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x08, 0x4e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 08 4e 8c c8 78 56 34 12 \tvrsqrtph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x4e, 0xca, }, 6, 0, "", "",
+"62 f6 7d 28 4e ca \tvrsqrtph %ymm2,%ymm1",},
+{{0x62, 0xf6, 0x7d, 0x28, 0x4e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 7d 28 4e 8c c8 78 56 34 12 \tvrsqrtph 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf6, 0x7d, 0x28, 0x4e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 7d 28 4e 8c c8 78 56 34 12 \tvrsqrtph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x4f, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 4f cb \tvrsqrtsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x4f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 4f 8c c8 78 56 34 12 \tvrsqrtsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x4f, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 4f 8c c8 78 56 34 12 \tvrsqrtsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x2c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 48 2c cb \tvscalefph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x48, 0x2c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 48 2c 8c c8 78 56 34 12 \tvscalefph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x48, 0x2c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 48 2c 8c c8 78 56 34 12 \tvscalefph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x2c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 2c cb \tvscalefph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x2c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 2c 8c c8 78 56 34 12 \tvscalefph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x2c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 2c 8c c8 78 56 34 12 \tvscalefph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x2c, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 28 2c cb \tvscalefph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x28, 0x2c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 28 2c 8c c8 78 56 34 12 \tvscalefph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x28, 0x2c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 28 2c 8c c8 78 56 34 12 \tvscalefph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x2d, 0xcb, }, 6, 0, "", "",
+"62 f6 6d 08 2d cb \tvscalefsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf6, 0x6d, 0x08, 0x2d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f6 6d 08 2d 8c c8 78 56 34 12 \tvscalefsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf6, 0x6d, 0x08, 0x2d, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f6 6d 08 2d 8c c8 78 56 34 12 \tvscalefsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x51, 0xca, }, 6, 0, "", "",
+"62 f5 7c 48 51 ca \tvsqrtph %zmm2,%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x48, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 48 51 8c c8 78 56 34 12 \tvsqrtph 0x12345678(%rax,%rcx,8),%zmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x48, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 48 51 8c c8 78 56 34 12 \tvsqrtph 0x12345678(%eax,%ecx,8),%zmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x51, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 51 ca \tvsqrtph %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 51 8c c8 78 56 34 12 \tvsqrtph 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x08, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 08 51 8c c8 78 56 34 12 \tvsqrtph 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x51, 0xca, }, 6, 0, "", "",
+"62 f5 7c 28 51 ca \tvsqrtph %ymm2,%ymm1",},
+{{0x62, 0xf5, 0x7c, 0x28, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 28 51 8c c8 78 56 34 12 \tvsqrtph 0x12345678(%rax,%rcx,8),%ymm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x28, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 28 51 8c c8 78 56 34 12 \tvsqrtph 0x12345678(%eax,%ecx,8),%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x51, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 51 cb \tvsqrtsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 51 8c c8 78 56 34 12 \tvsqrtsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x51, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 51 8c c8 78 56 34 12 \tvsqrtsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5c, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 48 5c cb \tvsubph %zmm3,%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x48, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 48 5c 8c c8 78 56 34 12 \tvsubph 0x12345678(%rax,%rcx,8),%zmm2,%zmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x48, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 48 5c 8c c8 78 56 34 12 \tvsubph 0x12345678(%eax,%ecx,8),%zmm2,%zmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5c, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 08 5c cb \tvsubph %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x08, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 08 5c 8c c8 78 56 34 12 \tvsubph 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x08, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 08 5c 8c c8 78 56 34 12 \tvsubph 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5c, 0xcb, }, 6, 0, "", "",
+"62 f5 6c 28 5c cb \tvsubph %ymm3,%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6c, 0x28, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6c 28 5c 8c c8 78 56 34 12 \tvsubph 0x12345678(%rax,%rcx,8),%ymm2,%ymm1",},
+{{0x67, 0x62, 0xf5, 0x6c, 0x28, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6c 28 5c 8c c8 78 56 34 12 \tvsubph 0x12345678(%eax,%ecx,8),%ymm2,%ymm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5c, 0xcb, }, 6, 0, "", "",
+"62 f5 6e 08 5c cb \tvsubsh %xmm3,%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x6e, 0x08, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 6e 08 5c 8c c8 78 56 34 12 \tvsubsh 0x12345678(%rax,%rcx,8),%xmm2,%xmm1",},
+{{0x67, 0x62, 0xf5, 0x6e, 0x08, 0x5c, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 6e 08 5c 8c c8 78 56 34 12 \tvsubsh 0x12345678(%eax,%ecx,8),%xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x2e, 0xca, }, 6, 0, "", "",
+"62 f5 7c 08 2e ca \tvucomish %xmm2,%xmm1",},
+{{0x62, 0xf5, 0x7c, 0x08, 0x2e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 11, 0, "", "",
+"62 f5 7c 08 2e 8c c8 78 56 34 12 \tvucomish 0x12345678(%rax,%rcx,8),%xmm1",},
+{{0x67, 0x62, 0xf5, 0x7c, 0x08, 0x2e, 0x8c, 0xc8, 0x78, 0x56, 0x34, 0x12, }, 12, 0, "", "",
+"67 62 f5 7c 08 2e 8c c8 78 56 34 12 \tvucomish 0x12345678(%eax,%ecx,8),%xmm1",},
+{{0xf3, 0x0f, 0x3a, 0xf0, 0xc0, 0x00, }, 6, 0, "", "",
+"f3 0f 3a f0 c0 00 \threset $0x0",},
+{{0x0f, 0x01, 0xe8, }, 3, 0, "", "",
+"0f 01 e8 \tserialize ",},
+{{0xf2, 0x0f, 0x01, 0xe9, }, 4, 0, "", "",
+"f2 0f 01 e9 \txresldtrk ",},
+{{0xf2, 0x0f, 0x01, 0xe8, }, 4, 0, "", "",
+"f2 0f 01 e8 \txsusldtrk ",},
{{0x0f, 0x01, 0xcf, }, 3, 0, "", "",
"0f 01 cf \tencls ",},
{{0x0f, 0x01, 0xd7, }, 3, 0, "", "",
diff --git a/tools/perf/arch/x86/tests/insn-x86-dat-src.c b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
index c3808e94c46e..a391464c8dee 100644
--- a/tools/perf/arch/x86/tests/insn-x86-dat-src.c
+++ b/tools/perf/arch/x86/tests/insn-x86-dat-src.c
@@ -1910,6 +1910,724 @@ int main(void)
asm volatile("notrack bnd jmpq *0x12345678(%rax,%rcx,8)"); /* Expecting: jmp indirect 0 */
asm volatile("notrack bnd jmpq *0x12345678(%r8,%rcx,8)"); /* Expecting: jmp indirect 0 */
+ /* AMX */
+
+ asm volatile("ldtilecfg (%rax,%rcx,8)");
+ asm volatile("ldtilecfg (%r8,%rcx,8)");
+ asm volatile("sttilecfg (%rax,%rcx,8)");
+ asm volatile("sttilecfg (%r8,%rcx,8)");
+ asm volatile("tdpbf16ps %tmm0, %tmm1, %tmm2");
+ asm volatile("tdpbssd %tmm0, %tmm1, %tmm2");
+ asm volatile("tdpbsud %tmm0, %tmm1, %tmm2");
+ asm volatile("tdpbusd %tmm0, %tmm1, %tmm2");
+ asm volatile("tdpbuud %tmm0, %tmm1, %tmm2");
+ asm volatile("tileloadd (%rax,%rcx,8), %tmm1");
+ asm volatile("tileloadd (%r8,%rcx,8), %tmm2");
+ asm volatile("tileloaddt1 (%rax,%rcx,8), %tmm1");
+ asm volatile("tileloaddt1 (%r8,%rcx,8), %tmm2");
+ asm volatile("tilerelease");
+ asm volatile("tilestored %tmm1, (%rax,%rcx,8)");
+ asm volatile("tilestored %tmm2, (%r8,%rcx,8)");
+ asm volatile("tilezero %tmm0");
+ asm volatile("tilezero %tmm7");
+
+ /* User Interrupt */
+
+ asm volatile("clui");
+ asm volatile("senduipi %rax");
+ asm volatile("senduipi %r8");
+ asm volatile("stui");
+ asm volatile("testui");
+ asm volatile("uiret");
+
+ /* AVX512-FP16 */
+
+ asm volatile("vaddph %zmm3, %zmm2, %zmm1");
+ asm volatile("vaddph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vaddph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vaddph %xmm3, %xmm2, %xmm1");
+ asm volatile("vaddph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vaddph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vaddph %ymm3, %ymm2, %ymm1");
+ asm volatile("vaddph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vaddph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vaddsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vaddsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vaddsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcmpph $0x12, %zmm3, %zmm2, %k5");
+ asm volatile("vcmpph $0x12, 0x12345678(%rax,%rcx,8), %zmm2, %k5");
+ asm volatile("vcmpph $0x12, 0x12345678(%eax,%ecx,8), %zmm2, %k5");
+ asm volatile("vcmpph $0x12, %xmm3, %xmm2, %k5");
+ asm volatile("vcmpph $0x12, 0x12345678(%rax,%rcx,8), %xmm2, %k5");
+ asm volatile("vcmpph $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %k5");
+ asm volatile("vcmpph $0x12, %ymm3, %ymm2, %k5");
+ asm volatile("vcmpph $0x12, 0x12345678(%rax,%rcx,8), %ymm2, %k5");
+ asm volatile("vcmpph $0x12, 0x12345678(%eax,%ecx,8), %ymm2, %k5");
+ asm volatile("vcmpsh $0x12, %xmm3, %xmm2, %k5");
+ asm volatile("vcmpsh $0x12, 0x12345678(%rax,%rcx,8), %xmm2, %k5");
+ asm volatile("vcmpsh $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %k5");
+ asm volatile("vcomish %xmm2, %xmm1");
+ asm volatile("vcomish 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcomish 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtdq2ph %zmm2, %ymm1");
+ asm volatile("vcvtdq2ph 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtdq2ph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtdq2ph %xmm2, %xmm1");
+ asm volatile("vcvtdq2ph %ymm2, %xmm1");
+ asm volatile("vcvtpd2ph %zmm2, %xmm1");
+ asm volatile("vcvtpd2ph %xmm2, %xmm1");
+ asm volatile("vcvtpd2ph %ymm2, %xmm1");
+ asm volatile("vcvtph2dq %ymm2, %zmm1");
+ asm volatile("vcvtph2dq 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtph2dq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2dq %xmm2, %xmm1");
+ asm volatile("vcvtph2dq 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2dq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2dq %xmm2, %ymm1");
+ asm volatile("vcvtph2dq 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2dq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2pd %xmm2, %zmm1");
+ asm volatile("vcvtph2pd 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtph2pd 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2pd %xmm2, %xmm1");
+ asm volatile("vcvtph2pd 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2pd 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2pd %xmm2, %ymm1");
+ asm volatile("vcvtph2pd 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2pd 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2ps %ymm2, %zmm1");
+ asm volatile("vcvtph2ps 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2ps %xmm2, %xmm1");
+ asm volatile("vcvtph2ps 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2ps %xmm2, %ymm1");
+ asm volatile("vcvtph2ps 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2ps %xmm2, %xmm1");
+ asm volatile("vcvtph2ps 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2ps %xmm2, %ymm1");
+ asm volatile("vcvtph2ps 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2psx %ymm2, %zmm1");
+ asm volatile("vcvtph2psx 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtph2psx 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2psx %xmm2, %xmm1");
+ asm volatile("vcvtph2psx 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2psx 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2psx %xmm2, %ymm1");
+ asm volatile("vcvtph2psx 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2psx 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2qq %xmm2, %zmm1");
+ asm volatile("vcvtph2qq 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtph2qq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2qq %xmm2, %xmm1");
+ asm volatile("vcvtph2qq 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2qq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2qq %xmm2, %ymm1");
+ asm volatile("vcvtph2qq 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2qq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2udq %ymm2, %zmm1");
+ asm volatile("vcvtph2udq 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtph2udq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2udq %xmm2, %xmm1");
+ asm volatile("vcvtph2udq 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2udq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2udq %xmm2, %ymm1");
+ asm volatile("vcvtph2udq 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2udq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2uqq %xmm2, %zmm1");
+ asm volatile("vcvtph2uqq 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtph2uqq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2uqq %xmm2, %xmm1");
+ asm volatile("vcvtph2uqq 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2uqq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2uqq %xmm2, %ymm1");
+ asm volatile("vcvtph2uqq 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2uqq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2uw %zmm2, %zmm1");
+ asm volatile("vcvtph2uw 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtph2uw 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2uw %xmm2, %xmm1");
+ asm volatile("vcvtph2uw 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2uw 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2uw %ymm2, %ymm1");
+ asm volatile("vcvtph2uw 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2uw 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2w %zmm2, %zmm1");
+ asm volatile("vcvtph2w 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtph2w 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2w %xmm2, %xmm1");
+ asm volatile("vcvtph2w 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtph2w 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2w %ymm2, %ymm1");
+ asm volatile("vcvtph2w 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtph2w 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtps2ph $0x12, %zmm1, 0x12345678(%rax,%rcx,8)");
+ asm volatile("vcvtps2ph $0x12, %zmm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2ph $0x12, %zmm2, %ymm1");
+ asm volatile("vcvtps2ph $0x12, %ymm1, 0x12345678(%rax,%rcx,8)");
+ asm volatile("vcvtps2ph $0x12, %ymm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2ph $0x12, %xmm1, 0x12345678(%rax,%rcx,8)");
+ asm volatile("vcvtps2ph $0x12, %xmm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2ph $0x12, %xmm2, %xmm1");
+ asm volatile("vcvtps2ph $0x12, %ymm2, %xmm1");
+ asm volatile("vcvtps2ph $0x12, %ymm2, %xmm1");
+ asm volatile("vcvtps2ph $0x12, %ymm2, 0x12345678(%rax,%rcx,8)");
+ asm volatile("vcvtps2ph $0x12, %ymm2, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2ph $0x12, %xmm2, %xmm1");
+ asm volatile("vcvtps2ph $0x12, %xmm2, 0x12345678(%rax,%rcx,8)");
+ asm volatile("vcvtps2ph $0x12, %xmm2, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2phx %zmm2, %ymm1");
+ asm volatile("vcvtps2phx 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtps2phx 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtps2phx %xmm2, %xmm1");
+ asm volatile("vcvtps2phx %ymm2, %xmm1");
+ asm volatile("vcvtqq2ph %zmm2, %xmm1");
+ asm volatile("vcvtqq2ph %xmm2, %xmm1");
+ asm volatile("vcvtqq2ph %ymm2, %xmm1");
+ asm volatile("vcvtsd2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsh2sd 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsh2si 0x12345678(%eax,%ecx,8), %eax");
+ asm volatile("vcvtsh2si 0x12345678(%eax,%ecx,8), %rax");
+ asm volatile("vcvtsh2ss 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsh2usi %xmm1, %eax");
+ asm volatile("vcvtsh2usi 0x12345678(%rax,%rcx,8), %eax");
+ asm volatile("vcvtsh2usi 0x12345678(%eax,%ecx,8), %eax");
+ asm volatile("vcvtsh2usi %xmm1, %rax");
+ asm volatile("vcvtsh2usi 0x12345678(%rax,%rcx,8), %rax");
+ asm volatile("vcvtsh2usi 0x12345678(%eax,%ecx,8), %rax");
+ asm volatile("vcvtsi2sh %eax, %xmm2, %xmm1");
+ asm volatile("vcvtsi2sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsi2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsi2sh %rax, %xmm2, %xmm1");
+ asm volatile("vcvtsi2sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsi2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtss2sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vcvtss2sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vcvtss2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvttph2dq %ymm2, %zmm1");
+ asm volatile("vcvttph2dq 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvttph2dq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2dq %xmm2, %xmm1");
+ asm volatile("vcvttph2dq 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvttph2dq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2dq %xmm2, %ymm1");
+ asm volatile("vcvttph2dq 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvttph2dq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2qq %xmm2, %zmm1");
+ asm volatile("vcvttph2qq 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvttph2qq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2qq %xmm2, %xmm1");
+ asm volatile("vcvttph2qq 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvttph2qq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2qq %xmm2, %ymm1");
+ asm volatile("vcvttph2qq 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvttph2qq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2udq %ymm2, %zmm1");
+ asm volatile("vcvttph2udq 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvttph2udq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2udq %xmm2, %xmm1");
+ asm volatile("vcvttph2udq 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvttph2udq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2udq %xmm2, %ymm1");
+ asm volatile("vcvttph2udq 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvttph2udq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2uqq %xmm2, %zmm1");
+ asm volatile("vcvttph2uqq 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvttph2uqq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2uqq %xmm2, %xmm1");
+ asm volatile("vcvttph2uqq 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvttph2uqq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2uqq %xmm2, %ymm1");
+ asm volatile("vcvttph2uqq 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvttph2uqq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2uw %zmm2, %zmm1");
+ asm volatile("vcvttph2uw 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvttph2uw 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2uw %xmm2, %xmm1");
+ asm volatile("vcvttph2uw 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvttph2uw 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2uw %ymm2, %ymm1");
+ asm volatile("vcvttph2uw 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvttph2uw 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2w %zmm2, %zmm1");
+ asm volatile("vcvttph2w 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvttph2w 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2w %xmm2, %xmm1");
+ asm volatile("vcvttph2w 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvttph2w 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2w %ymm2, %ymm1");
+ asm volatile("vcvttph2w 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvttph2w 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttsh2si %xmm1, %eax");
+ asm volatile("vcvttsh2si 0x12345678(%rax,%rcx,8), %eax");
+ asm volatile("vcvttsh2si 0x12345678(%eax,%ecx,8), %eax");
+ asm volatile("vcvttsh2si %xmm1, %rax");
+ asm volatile("vcvttsh2si 0x12345678(%rax,%rcx,8), %rax");
+ asm volatile("vcvttsh2si 0x12345678(%eax,%ecx,8), %rax");
+ asm volatile("vcvttsh2usi %xmm1, %eax");
+ asm volatile("vcvttsh2usi 0x12345678(%rax,%rcx,8), %eax");
+ asm volatile("vcvttsh2usi 0x12345678(%eax,%ecx,8), %eax");
+ asm volatile("vcvttsh2usi %xmm1, %rax");
+ asm volatile("vcvttsh2usi 0x12345678(%rax,%rcx,8), %rax");
+ asm volatile("vcvttsh2usi 0x12345678(%eax,%ecx,8), %rax");
+ asm volatile("vcvtudq2ph %zmm2, %ymm1");
+ asm volatile("vcvtudq2ph 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtudq2ph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtudq2ph %xmm2, %xmm1");
+ asm volatile("vcvtudq2ph %ymm2, %xmm1");
+ asm volatile("vcvtuqq2ph %zmm2, %xmm1");
+ asm volatile("vcvtuqq2ph %xmm2, %xmm1");
+ asm volatile("vcvtuqq2ph %ymm2, %xmm1");
+ asm volatile("vcvtusi2sh %eax, %xmm2, %xmm1");
+ asm volatile("vcvtusi2sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vcvtusi2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtusi2sh %rax, %xmm2, %xmm1");
+ asm volatile("vcvtusi2sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vcvtusi2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtuw2ph %zmm2, %zmm1");
+ asm volatile("vcvtuw2ph 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtuw2ph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtuw2ph %xmm2, %xmm1");
+ asm volatile("vcvtuw2ph 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtuw2ph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtuw2ph %ymm2, %ymm1");
+ asm volatile("vcvtuw2ph 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtuw2ph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtw2ph %zmm2, %zmm1");
+ asm volatile("vcvtw2ph 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vcvtw2ph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtw2ph %xmm2, %xmm1");
+ asm volatile("vcvtw2ph 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vcvtw2ph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtw2ph %ymm2, %ymm1");
+ asm volatile("vcvtw2ph 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vcvtw2ph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vdivph %zmm3, %zmm2, %zmm1");
+ asm volatile("vdivph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vdivph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vdivph %xmm3, %xmm2, %xmm1");
+ asm volatile("vdivph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vdivph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vdivph %ymm3, %ymm2, %ymm1");
+ asm volatile("vdivph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vdivph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vdivsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vdivsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vdivsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfcmaddcph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfcmaddcph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfcmaddcph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfcmaddcph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfcmaddcph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfcmaddcph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfcmaddcph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfcmaddcph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfcmaddcph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfcmaddcsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfcmaddcsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfcmaddcsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfcmulcph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfcmulcph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfcmulcph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfcmulcph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfcmulcph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfcmulcph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfcmulcph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfcmulcph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfcmulcph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfcmulcsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfcmulcsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfcmulcsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmadd132ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmadd132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmadd132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd132ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmadd132ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmadd132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmadd132sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd132sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd132sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmadd213ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmadd213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmadd213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd213ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmadd213ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmadd213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmadd213sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd213sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd213sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmadd231ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmadd231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmadd231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd231ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmadd231ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmadd231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmadd231sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd231sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd231sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddcph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmaddcph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddcph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddcph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddcph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddcph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddcph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmaddcph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddcph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddcsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddcsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddcsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmaddsub132ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddsub132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddsub132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddsub132ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmaddsub132ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddsub132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddsub213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmaddsub213ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddsub213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddsub213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddsub213ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmaddsub213ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddsub213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddsub231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmaddsub231ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddsub231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddsub231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddsub231ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmaddsub231ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddsub231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsub132ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmsub132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsub132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub132ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsub132ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub132sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub132sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub132sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsub213ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmsub213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsub213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub213ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsub213ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub213sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub213sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub213sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsub231ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmsub231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsub231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub231ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsub231ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub231sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub231sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub231sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsubadd132ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmsubadd132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsubadd132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsubadd132ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsubadd132ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmsubadd132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsubadd213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsubadd213ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmsubadd213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsubadd213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsubadd213ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsubadd213ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmsubadd213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsubadd231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsubadd231ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmsubadd231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsubadd231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsubadd231ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsubadd231ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmsubadd231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmulcph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmulcph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfmulcph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmulcph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmulcph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmulcph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmulcph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmulcph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfmulcph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmulcsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmulcsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfmulcsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmadd132ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfnmadd132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmadd132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd132ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmadd132ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfnmadd132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmadd132sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd132sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd132sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmadd213ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfnmadd213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmadd213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd213ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmadd213ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfnmadd213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmadd213sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd213sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd213sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmadd231ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfnmadd231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmadd231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd231ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmadd231ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfnmadd231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmadd231sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd231sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd231sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmsub132ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfnmsub132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmsub132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub132ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmsub132ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfnmsub132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmsub132sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub132sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub132sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmsub213ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfnmsub213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmsub213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub213ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmsub213ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfnmsub213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmsub213sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub213sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub213sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmsub231ph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vfnmsub231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmsub231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub231ph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmsub231ph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vfnmsub231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmsub231sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub231sh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub231sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfpclassph $0x12, %zmm1, %k5");
+ asm volatile("vfpclassph $0x12, %xmm1, %k5");
+ asm volatile("vfpclassph $0x12, %ymm1, %k5");
+ asm volatile("vfpclasssh $0x12, %xmm1, %k5");
+ asm volatile("vfpclasssh $0x12, 0x12345678(%rax,%rcx,8), %k5");
+ asm volatile("vfpclasssh $0x12, 0x12345678(%eax,%ecx,8), %k5");
+ asm volatile("vgetexpph %zmm2, %zmm1");
+ asm volatile("vgetexpph 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vgetexpph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vgetexpph %xmm2, %xmm1");
+ asm volatile("vgetexpph 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vgetexpph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vgetexpph %ymm2, %ymm1");
+ asm volatile("vgetexpph 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vgetexpph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vgetexpsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vgetexpsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vgetexpsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vgetmantph $0x12, %zmm2, %zmm1");
+ asm volatile("vgetmantph $0x12, 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vgetmantph $0x12, 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vgetmantph $0x12, %xmm2, %xmm1");
+ asm volatile("vgetmantph $0x12, 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vgetmantph $0x12, 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vgetmantph $0x12, %ymm2, %ymm1");
+ asm volatile("vgetmantph $0x12, 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vgetmantph $0x12, 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vgetmantsh $0x12, %xmm3, %xmm2, %xmm1");
+ asm volatile("vgetmantsh $0x12, 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vgetmantsh $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vmaxph %zmm3, %zmm2, %zmm1");
+ asm volatile("vmaxph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vmaxph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vmaxph %xmm3, %xmm2, %xmm1");
+ asm volatile("vmaxph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vmaxph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vmaxph %ymm3, %ymm2, %ymm1");
+ asm volatile("vmaxph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vmaxph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vmaxsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vmaxsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vmaxsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vminph %zmm3, %zmm2, %zmm1");
+ asm volatile("vminph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vminph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vminph %xmm3, %xmm2, %xmm1");
+ asm volatile("vminph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vminph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vminph %ymm3, %ymm2, %ymm1");
+ asm volatile("vminph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vminph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vminsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vminsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vminsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vmovsh %xmm1, 0x12345678(%rax,%rcx,8)");
+ asm volatile("vmovsh %xmm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vmovsh 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vmovsh 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vmovsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vmovw %xmm1, %eax");
+ asm volatile("vmovw %xmm1, 0x12345678(%rax,%rcx,8)");
+ asm volatile("vmovw %xmm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vmovw %eax, %xmm1");
+ asm volatile("vmovw 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vmovw 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vmulph %zmm3, %zmm2, %zmm1");
+ asm volatile("vmulph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vmulph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vmulph %xmm3, %xmm2, %xmm1");
+ asm volatile("vmulph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vmulph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vmulph %ymm3, %ymm2, %ymm1");
+ asm volatile("vmulph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vmulph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vmulsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vmulsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vmulsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vrcpph %zmm2, %zmm1");
+ asm volatile("vrcpph 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vrcpph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vrcpph %xmm2, %xmm1");
+ asm volatile("vrcpph 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vrcpph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vrcpph %ymm2, %ymm1");
+ asm volatile("vrcpph 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vrcpph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vrcpsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vrcpsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vrcpsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vreduceph $0x12, %zmm2, %zmm1");
+ asm volatile("vreduceph $0x12, 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vreduceph $0x12, 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vreduceph $0x12, %xmm2, %xmm1");
+ asm volatile("vreduceph $0x12, 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vreduceph $0x12, 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vreduceph $0x12, %ymm2, %ymm1");
+ asm volatile("vreduceph $0x12, 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vreduceph $0x12, 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vreducesh $0x12, %xmm3, %xmm2, %xmm1");
+ asm volatile("vreducesh $0x12, 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vreducesh $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vrndscaleph $0x12, %zmm2, %zmm1");
+ asm volatile("vrndscaleph $0x12, 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vrndscaleph $0x12, 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vrndscaleph $0x12, %xmm2, %xmm1");
+ asm volatile("vrndscaleph $0x12, 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vrndscaleph $0x12, 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vrndscaleph $0x12, %ymm2, %ymm1");
+ asm volatile("vrndscaleph $0x12, 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vrndscaleph $0x12, 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vrndscalesh $0x12, %xmm3, %xmm2, %xmm1");
+ asm volatile("vrndscalesh $0x12, 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vrndscalesh $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vrsqrtph %zmm2, %zmm1");
+ asm volatile("vrsqrtph 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vrsqrtph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vrsqrtph %xmm2, %xmm1");
+ asm volatile("vrsqrtph 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vrsqrtph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vrsqrtph %ymm2, %ymm1");
+ asm volatile("vrsqrtph 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vrsqrtph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vrsqrtsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vrsqrtsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vrsqrtsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vscalefph %zmm3, %zmm2, %zmm1");
+ asm volatile("vscalefph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vscalefph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vscalefph %xmm3, %xmm2, %xmm1");
+ asm volatile("vscalefph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vscalefph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vscalefph %ymm3, %ymm2, %ymm1");
+ asm volatile("vscalefph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vscalefph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vscalefsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vscalefsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vscalefsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vsqrtph %zmm2, %zmm1");
+ asm volatile("vsqrtph 0x12345678(%rax,%rcx,8), %zmm1");
+ asm volatile("vsqrtph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vsqrtph %xmm2, %xmm1");
+ asm volatile("vsqrtph 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vsqrtph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vsqrtph %ymm2, %ymm1");
+ asm volatile("vsqrtph 0x12345678(%rax,%rcx,8), %ymm1");
+ asm volatile("vsqrtph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vsqrtsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vsqrtsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vsqrtsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vsubph %zmm3, %zmm2, %zmm1");
+ asm volatile("vsubph 0x12345678(%rax,%rcx,8), %zmm2, %zmm1");
+ asm volatile("vsubph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vsubph %xmm3, %xmm2, %xmm1");
+ asm volatile("vsubph 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vsubph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vsubph %ymm3, %ymm2, %ymm1");
+ asm volatile("vsubph 0x12345678(%rax,%rcx,8), %ymm2, %ymm1");
+ asm volatile("vsubph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vsubsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vsubsh 0x12345678(%rax,%rcx,8), %xmm2, %xmm1");
+ asm volatile("vsubsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vucomish %xmm2, %xmm1");
+ asm volatile("vucomish 0x12345678(%rax,%rcx,8), %xmm1");
+ asm volatile("vucomish 0x12345678(%eax,%ecx,8), %xmm1");
+
#else /* #ifdef __x86_64__ */
/* bound r32, mem (same op code as EVEX prefix) */
@@ -3670,8 +4388,479 @@ int main(void)
asm volatile("notrack bnd jmp *(0x12345678)"); /* Expecting: jmp indirect 0 */
asm volatile("notrack bnd jmp *0x12345678(%eax,%ecx,8)"); /* Expecting: jmp indirect 0 */
+ /* AVX512-FP16 */
+
+ asm volatile("vaddph %zmm3, %zmm2, %zmm1");
+ asm volatile("vaddph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vaddph %xmm3, %xmm2, %xmm1");
+ asm volatile("vaddph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vaddph %ymm3, %ymm2, %ymm1");
+ asm volatile("vaddph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vaddsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vaddsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcmpph $0x12, %zmm3, %zmm2, %k5");
+ asm volatile("vcmpph $0x12, 0x12345678(%eax,%ecx,8), %zmm2, %k5");
+ asm volatile("vcmpph $0x12, %xmm3, %xmm2, %k5");
+ asm volatile("vcmpph $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %k5");
+ asm volatile("vcmpph $0x12, %ymm3, %ymm2, %k5");
+ asm volatile("vcmpph $0x12, 0x12345678(%eax,%ecx,8), %ymm2, %k5");
+ asm volatile("vcmpsh $0x12, %xmm3, %xmm2, %k5");
+ asm volatile("vcmpsh $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %k5");
+ asm volatile("vcomish %xmm2, %xmm1");
+ asm volatile("vcomish 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtdq2ph %zmm2, %ymm1");
+ asm volatile("vcvtdq2ph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtdq2ph %xmm2, %xmm1");
+ asm volatile("vcvtdq2ph %ymm2, %xmm1");
+ asm volatile("vcvtpd2ph %zmm2, %xmm1");
+ asm volatile("vcvtpd2ph %xmm2, %xmm1");
+ asm volatile("vcvtpd2ph %ymm2, %xmm1");
+ asm volatile("vcvtph2dq %ymm2, %zmm1");
+ asm volatile("vcvtph2dq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2dq %xmm2, %xmm1");
+ asm volatile("vcvtph2dq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2dq %xmm2, %ymm1");
+ asm volatile("vcvtph2dq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2pd %xmm2, %zmm1");
+ asm volatile("vcvtph2pd 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2pd %xmm2, %xmm1");
+ asm volatile("vcvtph2pd 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2pd %xmm2, %ymm1");
+ asm volatile("vcvtph2pd 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2ps %ymm2, %zmm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2ps %xmm2, %xmm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2ps %xmm2, %ymm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2ps %xmm2, %xmm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2ps %xmm2, %ymm1");
+ asm volatile("vcvtph2ps 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2psx %ymm2, %zmm1");
+ asm volatile("vcvtph2psx 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2psx %xmm2, %xmm1");
+ asm volatile("vcvtph2psx 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2psx %xmm2, %ymm1");
+ asm volatile("vcvtph2psx 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2qq %xmm2, %zmm1");
+ asm volatile("vcvtph2qq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2qq %xmm2, %xmm1");
+ asm volatile("vcvtph2qq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2qq %xmm2, %ymm1");
+ asm volatile("vcvtph2qq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2udq %ymm2, %zmm1");
+ asm volatile("vcvtph2udq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2udq %xmm2, %xmm1");
+ asm volatile("vcvtph2udq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2udq %xmm2, %ymm1");
+ asm volatile("vcvtph2udq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2uqq %xmm2, %zmm1");
+ asm volatile("vcvtph2uqq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2uqq %xmm2, %xmm1");
+ asm volatile("vcvtph2uqq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2uqq %xmm2, %ymm1");
+ asm volatile("vcvtph2uqq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2uw %zmm2, %zmm1");
+ asm volatile("vcvtph2uw 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2uw %xmm2, %xmm1");
+ asm volatile("vcvtph2uw 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2uw %ymm2, %ymm1");
+ asm volatile("vcvtph2uw 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtph2w %zmm2, %zmm1");
+ asm volatile("vcvtph2w 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtph2w %xmm2, %xmm1");
+ asm volatile("vcvtph2w 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtph2w %ymm2, %ymm1");
+ asm volatile("vcvtph2w 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtps2ph $0x12, %zmm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2ph $0x12, %zmm2, %ymm1");
+ asm volatile("vcvtps2ph $0x12, %ymm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2ph $0x12, %xmm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2ph $0x12, %xmm2, %xmm1");
+ asm volatile("vcvtps2ph $0x12, %ymm2, %xmm1");
+ asm volatile("vcvtps2ph $0x12, %ymm2, %xmm1");
+ asm volatile("vcvtps2ph $0x12, %ymm2, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2ph $0x12, %xmm2, %xmm1");
+ asm volatile("vcvtps2ph $0x12, %xmm2, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vcvtps2phx %zmm2, %ymm1");
+ asm volatile("vcvtps2phx 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtps2phx %xmm2, %xmm1");
+ asm volatile("vcvtps2phx %ymm2, %xmm1");
+ asm volatile("vcvtqq2ph %zmm2, %xmm1");
+ asm volatile("vcvtqq2ph %xmm2, %xmm1");
+ asm volatile("vcvtqq2ph %ymm2, %xmm1");
+ asm volatile("vcvtsd2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsh2sd 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsh2si 0x12345678(%eax,%ecx,8), %eax");
+ asm volatile("vcvtsh2ss 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsh2usi %xmm1, %eax");
+ asm volatile("vcvtsh2usi 0x12345678(%eax,%ecx,8), %eax");
+ asm volatile("vcvtsi2sh %eax, %xmm2, %xmm1");
+ asm volatile("vcvtsi2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtsi2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtss2sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vcvtss2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvttph2dq %ymm2, %zmm1");
+ asm volatile("vcvttph2dq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2dq %xmm2, %xmm1");
+ asm volatile("vcvttph2dq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2dq %xmm2, %ymm1");
+ asm volatile("vcvttph2dq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2qq %xmm2, %zmm1");
+ asm volatile("vcvttph2qq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2qq %xmm2, %xmm1");
+ asm volatile("vcvttph2qq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2qq %xmm2, %ymm1");
+ asm volatile("vcvttph2qq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2udq %ymm2, %zmm1");
+ asm volatile("vcvttph2udq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2udq %xmm2, %xmm1");
+ asm volatile("vcvttph2udq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2udq %xmm2, %ymm1");
+ asm volatile("vcvttph2udq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2uqq %xmm2, %zmm1");
+ asm volatile("vcvttph2uqq 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2uqq %xmm2, %xmm1");
+ asm volatile("vcvttph2uqq 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2uqq %xmm2, %ymm1");
+ asm volatile("vcvttph2uqq 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2uw %zmm2, %zmm1");
+ asm volatile("vcvttph2uw 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2uw %xmm2, %xmm1");
+ asm volatile("vcvttph2uw 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2uw %ymm2, %ymm1");
+ asm volatile("vcvttph2uw 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttph2w %zmm2, %zmm1");
+ asm volatile("vcvttph2w 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvttph2w %xmm2, %xmm1");
+ asm volatile("vcvttph2w 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvttph2w %ymm2, %ymm1");
+ asm volatile("vcvttph2w 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvttsh2si %xmm1, %eax");
+ asm volatile("vcvttsh2si 0x12345678(%eax,%ecx,8), %eax");
+ asm volatile("vcvttsh2usi %xmm1, %eax");
+ asm volatile("vcvttsh2usi 0x12345678(%eax,%ecx,8), %eax");
+ asm volatile("vcvtudq2ph %zmm2, %ymm1");
+ asm volatile("vcvtudq2ph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtudq2ph %xmm2, %xmm1");
+ asm volatile("vcvtudq2ph %ymm2, %xmm1");
+ asm volatile("vcvtuqq2ph %zmm2, %xmm1");
+ asm volatile("vcvtuqq2ph %xmm2, %xmm1");
+ asm volatile("vcvtuqq2ph %ymm2, %xmm1");
+ asm volatile("vcvtusi2sh %eax, %xmm2, %xmm1");
+ asm volatile("vcvtusi2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtusi2sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vcvtuw2ph %zmm2, %zmm1");
+ asm volatile("vcvtuw2ph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtuw2ph %xmm2, %xmm1");
+ asm volatile("vcvtuw2ph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtuw2ph %ymm2, %ymm1");
+ asm volatile("vcvtuw2ph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vcvtw2ph %zmm2, %zmm1");
+ asm volatile("vcvtw2ph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vcvtw2ph %xmm2, %xmm1");
+ asm volatile("vcvtw2ph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vcvtw2ph %ymm2, %ymm1");
+ asm volatile("vcvtw2ph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vdivph %zmm3, %zmm2, %zmm1");
+ asm volatile("vdivph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vdivph %xmm3, %xmm2, %xmm1");
+ asm volatile("vdivph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vdivph %ymm3, %ymm2, %ymm1");
+ asm volatile("vdivph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vdivsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vdivsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfcmaddcph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfcmaddcph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfcmaddcph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfcmaddcph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfcmaddcph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfcmaddcph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfcmaddcsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfcmaddcsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfcmulcph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfcmulcph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfcmulcph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfcmulcph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfcmulcph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfcmulcph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfcmulcsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfcmulcsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmadd132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmadd132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmadd132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmadd132sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd132sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmadd213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmadd213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmadd213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmadd213sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd213sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmadd231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmadd231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmadd231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmadd231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmadd231sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmadd231sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddcph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmaddcph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddcph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddcph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddcph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmaddcph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddcsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddcsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmaddsub132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddsub132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddsub132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmaddsub132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddsub213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmaddsub213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddsub213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddsub213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmaddsub213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmaddsub231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmaddsub231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmaddsub231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmaddsub231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmaddsub231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmaddsub231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsub132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsub132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsub132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub132sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub132sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsub213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsub213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsub213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub213sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub213sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsub231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsub231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsub231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsub231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsub231sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsub231sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsubadd132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsubadd132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsubadd132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsubadd132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsubadd213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsubadd213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsubadd213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsubadd213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsubadd213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmsubadd231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmsubadd231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmsubadd231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmsubadd231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmsubadd231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmsubadd231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmulcph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfmulcph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfmulcph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmulcph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfmulcph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfmulcph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfmulcsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfmulcsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmadd132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmadd132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmadd132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmadd132sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd132sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmadd213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmadd213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmadd213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmadd213sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd213sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmadd231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmadd231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmadd231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmadd231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmadd231sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmadd231sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub132ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmsub132ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmsub132ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub132ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub132ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmsub132ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmsub132sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub132sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub213ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmsub213ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmsub213ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub213ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub213ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmsub213ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmsub213sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub213sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub231ph %zmm3, %zmm2, %zmm1");
+ asm volatile("vfnmsub231ph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vfnmsub231ph %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub231ph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfnmsub231ph %ymm3, %ymm2, %ymm1");
+ asm volatile("vfnmsub231ph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vfnmsub231sh %xmm3, %xmm2, %xmm1");
+ asm volatile("vfnmsub231sh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vfpclassph $0x12, %zmm1, %k5");
+ asm volatile("vfpclassph $0x12, %xmm1, %k5");
+ asm volatile("vfpclassph $0x12, %ymm1, %k5");
+ asm volatile("vfpclasssh $0x12, %xmm1, %k5");
+ asm volatile("vfpclasssh $0x12, 0x12345678(%eax,%ecx,8), %k5");
+ asm volatile("vgetexpph %zmm2, %zmm1");
+ asm volatile("vgetexpph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vgetexpph %xmm2, %xmm1");
+ asm volatile("vgetexpph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vgetexpph %ymm2, %ymm1");
+ asm volatile("vgetexpph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vgetexpsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vgetexpsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vgetmantph $0x12, %zmm2, %zmm1");
+ asm volatile("vgetmantph $0x12, 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vgetmantph $0x12, %xmm2, %xmm1");
+ asm volatile("vgetmantph $0x12, 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vgetmantph $0x12, %ymm2, %ymm1");
+ asm volatile("vgetmantph $0x12, 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vgetmantsh $0x12, %xmm3, %xmm2, %xmm1");
+ asm volatile("vgetmantsh $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vmaxph %zmm3, %zmm2, %zmm1");
+ asm volatile("vmaxph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vmaxph %xmm3, %xmm2, %xmm1");
+ asm volatile("vmaxph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vmaxph %ymm3, %ymm2, %ymm1");
+ asm volatile("vmaxph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vmaxsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vmaxsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vminph %zmm3, %zmm2, %zmm1");
+ asm volatile("vminph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vminph %xmm3, %xmm2, %xmm1");
+ asm volatile("vminph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vminph %ymm3, %ymm2, %ymm1");
+ asm volatile("vminph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vminsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vminsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vmovsh %xmm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vmovsh 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vmovsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vmovw %xmm1, %eax");
+ asm volatile("vmovw %xmm1, 0x12345678(%eax,%ecx,8)");
+ asm volatile("vmovw %eax, %xmm1");
+ asm volatile("vmovw 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vmulph %zmm3, %zmm2, %zmm1");
+ asm volatile("vmulph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vmulph %xmm3, %xmm2, %xmm1");
+ asm volatile("vmulph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vmulph %ymm3, %ymm2, %ymm1");
+ asm volatile("vmulph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vmulsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vmulsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vrcpph %zmm2, %zmm1");
+ asm volatile("vrcpph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vrcpph %xmm2, %xmm1");
+ asm volatile("vrcpph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vrcpph %ymm2, %ymm1");
+ asm volatile("vrcpph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vrcpsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vrcpsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vreduceph $0x12, %zmm2, %zmm1");
+ asm volatile("vreduceph $0x12, 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vreduceph $0x12, %xmm2, %xmm1");
+ asm volatile("vreduceph $0x12, 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vreduceph $0x12, %ymm2, %ymm1");
+ asm volatile("vreduceph $0x12, 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vreducesh $0x12, %xmm3, %xmm2, %xmm1");
+ asm volatile("vreducesh $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vrndscaleph $0x12, %zmm2, %zmm1");
+ asm volatile("vrndscaleph $0x12, 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vrndscaleph $0x12, %xmm2, %xmm1");
+ asm volatile("vrndscaleph $0x12, 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vrndscaleph $0x12, %ymm2, %ymm1");
+ asm volatile("vrndscaleph $0x12, 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vrndscalesh $0x12, %xmm3, %xmm2, %xmm1");
+ asm volatile("vrndscalesh $0x12, 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vrsqrtph %zmm2, %zmm1");
+ asm volatile("vrsqrtph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vrsqrtph %xmm2, %xmm1");
+ asm volatile("vrsqrtph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vrsqrtph %ymm2, %ymm1");
+ asm volatile("vrsqrtph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vrsqrtsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vrsqrtsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vscalefph %zmm3, %zmm2, %zmm1");
+ asm volatile("vscalefph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vscalefph %xmm3, %xmm2, %xmm1");
+ asm volatile("vscalefph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vscalefph %ymm3, %ymm2, %ymm1");
+ asm volatile("vscalefph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vscalefsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vscalefsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vsqrtph %zmm2, %zmm1");
+ asm volatile("vsqrtph 0x12345678(%eax,%ecx,8), %zmm1");
+ asm volatile("vsqrtph %xmm2, %xmm1");
+ asm volatile("vsqrtph 0x12345678(%eax,%ecx,8), %xmm1");
+ asm volatile("vsqrtph %ymm2, %ymm1");
+ asm volatile("vsqrtph 0x12345678(%eax,%ecx,8), %ymm1");
+ asm volatile("vsqrtsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vsqrtsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vsubph %zmm3, %zmm2, %zmm1");
+ asm volatile("vsubph 0x12345678(%eax,%ecx,8), %zmm2, %zmm1");
+ asm volatile("vsubph %xmm3, %xmm2, %xmm1");
+ asm volatile("vsubph 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vsubph %ymm3, %ymm2, %ymm1");
+ asm volatile("vsubph 0x12345678(%eax,%ecx,8), %ymm2, %ymm1");
+ asm volatile("vsubsh %xmm3, %xmm2, %xmm1");
+ asm volatile("vsubsh 0x12345678(%eax,%ecx,8), %xmm2, %xmm1");
+ asm volatile("vucomish %xmm2, %xmm1");
+ asm volatile("vucomish 0x12345678(%eax,%ecx,8), %xmm1");
+
#endif /* #ifndef __x86_64__ */
+ /* Prediction history reset */
+
+ asm volatile("hreset $0");
+
+ /* Serialize instruction execution */
+
+ asm volatile("serialize");
+
+ /* TSX suspend load address tracking */
+
+ asm volatile("xresldtrk");
+ asm volatile("xsusldtrk");
+
/* SGX */
asm volatile("encls");
diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c
index 745f29adb14b..735257d205b5 100644
--- a/tools/perf/arch/x86/tests/insn-x86.c
+++ b/tools/perf/arch/x86/tests/insn-x86.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
-#include "../../../../arch/x86/include/asm/insn.h"
#include <string.h>
#include "debug.h"
#include "tests/tests.h"
#include "arch-tests.h"
+#include "../../../../arch/x86/include/asm/insn.h"
#include "intel-pt-decoder/intel-pt-insn-decoder.h"
@@ -29,6 +29,8 @@ struct test_data test_data_64[] = {
#include "insn-x86-dat-64.c"
{{0x0f, 0x01, 0xee}, 3, 0, NULL, NULL, "0f 01 ee \trdpkru"},
{{0x0f, 0x01, 0xef}, 3, 0, NULL, NULL, "0f 01 ef \twrpkru"},
+ {{0xf2, 0x0f, 0x01, 0xca}, 4, 0, "erets", "indirect", "f2 0f 01 ca \terets"},
+ {{0xf3, 0x0f, 0x01, 0xca}, 4, 0, "eretu", "indirect", "f3 0f 01 ca \teretu"},
{{0}, 0, 0, NULL, NULL, NULL},
};
@@ -48,6 +50,9 @@ static int get_op(const char *op_str)
{"int", INTEL_PT_OP_INT},
{"syscall", INTEL_PT_OP_SYSCALL},
{"sysret", INTEL_PT_OP_SYSRET},
+ {"vmentry", INTEL_PT_OP_VMENTRY},
+ {"erets", INTEL_PT_OP_ERETS},
+ {"eretu", INTEL_PT_OP_ERETU},
{NULL, 0},
};
struct val_data *val;
@@ -95,13 +100,12 @@ static int get_branch(const char *branch_str)
static int test_data_item(struct test_data *dat, int x86_64)
{
struct intel_pt_insn intel_pt_insn;
+ int op, branch, ret;
struct insn insn;
- int op, branch;
-
- insn_init(&insn, dat->data, MAX_INSN_SIZE, x86_64);
- insn_get_length(&insn);
- if (!insn_complete(&insn)) {
+ ret = insn_decode(&insn, dat->data, MAX_INSN_SIZE,
+ x86_64 ? INSN_MODE_64 : INSN_MODE_32);
+ if (ret < 0) {
pr_debug("Failed to decode: %s\n", dat->asm_rep);
return -1;
}
@@ -173,7 +177,7 @@ static int test_data_set(struct test_data *dat_set, int x86_64)
* verbose (-v) option to see all the instructions and whether or not they
* decoded successfully.
*/
-int test__insn_x86(struct test *test __maybe_unused, int subtest __maybe_unused)
+int test__insn_x86(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
int ret = 0;
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
index 3ec562a2aaba..360a082fc928 100644
--- a/tools/perf/arch/x86/tests/intel-cqm.c
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -37,7 +37,7 @@ static pid_t spawn(void)
* the last read counter value to avoid triggering a WARN_ON_ONCE() in
* smp_call_function_many() caused by sending IPIs from NMI context.
*/
-int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subtest __maybe_unused)
+int test__intel_cqm_count_nmi_context(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct evlist *evlist = NULL;
struct evsel *evsel = NULL;
@@ -52,11 +52,11 @@ int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subt
evlist = evlist__new();
if (!evlist) {
- pr_debug("perf_evlist__new failed\n");
+ pr_debug("evlist__new failed\n");
return TEST_FAIL;
}
- ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
+ ret = parse_event(evlist, "intel_cqm/llc_occupancy/");
if (ret) {
pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n");
err = TEST_SKIP;
diff --git a/tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c b/tools/perf/arch/x86/tests/intel-pt-test.c
index 901bf1f449c4..70b7f79396b1 100644
--- a/tools/perf/arch/x86/tests/intel-pt-pkt-decoder-test.c
+++ b/tools/perf/arch/x86/tests/intel-pt-test.c
@@ -1,12 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/compiler.h>
+#include <linux/bits.h>
#include <string.h>
+#include <cpuid.h>
+#include <sched.h>
#include "intel-pt-decoder/intel-pt-pkt-decoder.h"
#include "debug.h"
#include "tests/tests.h"
#include "arch-tests.h"
+#include "cpumap.h"
/**
* struct test_data - Test data.
@@ -17,7 +22,7 @@
* @new_ctx: expected new packet context
* @ctx_unchanged: the packet context must not change
*/
-struct test_data {
+static struct test_data {
int len;
u8 bytes[INTEL_PT_PKT_MAX_SZ];
enum intel_pt_pkt_ctx ctx;
@@ -66,12 +71,15 @@ struct test_data {
{7, {0x9d, 1, 2, 3, 4, 5, 6}, 0, {INTEL_PT_FUP, 4, 0x60504030201}, 0, 0 },
{9, {0xdd, 1, 2, 3, 4, 5, 6, 7, 8}, 0, {INTEL_PT_FUP, 6, 0x807060504030201}, 0, 0 },
/* Paging Information Packet */
- {8, {0x02, 0x43, 2, 4, 6, 8, 10, 12}, 0, {INTEL_PT_PIP, 0, 0x60504030201}, 0, 0 },
- {8, {0x02, 0x43, 3, 4, 6, 8, 10, 12}, 0, {INTEL_PT_PIP, 0, 0x60504030201 | (1ULL << 63)}, 0, 0 },
+ {8, {0x02, 0x43, 2, 4, 6, 8, 10, 12}, 0, {INTEL_PT_PIP, 0, 0xC0A08060402}, 0, 0 },
+ {8, {0x02, 0x43, 3, 4, 6, 8, 10, 12}, 0, {INTEL_PT_PIP, 0, 0xC0A08060403}, 0, 0 },
/* Mode Exec Packet */
{2, {0x99, 0x00}, 0, {INTEL_PT_MODE_EXEC, 0, 16}, 0, 0 },
- {2, {0x99, 0x01}, 0, {INTEL_PT_MODE_EXEC, 0, 64}, 0, 0 },
- {2, {0x99, 0x02}, 0, {INTEL_PT_MODE_EXEC, 0, 32}, 0, 0 },
+ {2, {0x99, 0x01}, 0, {INTEL_PT_MODE_EXEC, 1, 64}, 0, 0 },
+ {2, {0x99, 0x02}, 0, {INTEL_PT_MODE_EXEC, 2, 32}, 0, 0 },
+ {2, {0x99, 0x04}, 0, {INTEL_PT_MODE_EXEC, 4, 16}, 0, 0 },
+ {2, {0x99, 0x05}, 0, {INTEL_PT_MODE_EXEC, 5, 64}, 0, 0 },
+ {2, {0x99, 0x06}, 0, {INTEL_PT_MODE_EXEC, 6, 32}, 0, 0 },
/* Mode TSX Packet */
{2, {0x99, 0x20}, 0, {INTEL_PT_MODE_TSX, 0, 0}, 0, 0 },
{2, {0x99, 0x21}, 0, {INTEL_PT_MODE_TSX, 0, 1}, 0, 0 },
@@ -166,6 +174,14 @@ struct test_data {
{2, {0x02, 0xb3}, INTEL_PT_BLK_4_CTX, {INTEL_PT_BEP_IP, 0, 0}, 0, 0 },
{2, {0x02, 0x33}, INTEL_PT_BLK_8_CTX, {INTEL_PT_BEP, 0, 0}, 0, 0 },
{2, {0x02, 0xb3}, INTEL_PT_BLK_8_CTX, {INTEL_PT_BEP_IP, 0, 0}, 0, 0 },
+ /* Control Flow Event Packet */
+ {4, {0x02, 0x13, 0x01, 0x03}, 0, {INTEL_PT_CFE, 1, 3}, 0, 0 },
+ {4, {0x02, 0x13, 0x81, 0x03}, 0, {INTEL_PT_CFE_IP, 1, 3}, 0, 0 },
+ {4, {0x02, 0x13, 0x1f, 0x00}, 0, {INTEL_PT_CFE, 0x1f, 0}, 0, 0 },
+ {4, {0x02, 0x13, 0x9f, 0xff}, 0, {INTEL_PT_CFE_IP, 0x1f, 0xff}, 0, 0 },
+ /* */
+ {11, {0x02, 0x53, 0x09, 1, 2, 3, 4, 5, 6, 7}, 0, {INTEL_PT_EVD, 0x09, 0x7060504030201}, 0, 0 },
+ {11, {0x02, 0x53, 0x3f, 2, 3, 4, 5, 6, 7, 8}, 0, {INTEL_PT_EVD, 0x3f, 0x8070605040302}, 0, 0 },
/* Terminator */
{0, {0}, 0, {0, 0, 0}, 0, 0 },
};
@@ -289,7 +305,7 @@ static int test_one(struct test_data *d)
* This test feeds byte sequences to the Intel PT packet decoder and checks the
* results. Changes to the packet context are also checked.
*/
-int test__intel_pt_pkt_decoder(struct test *test __maybe_unused, int subtest __maybe_unused)
+int test__intel_pt_pkt_decoder(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
{
struct test_data *d = data;
int ret;
@@ -302,3 +318,152 @@ int test__intel_pt_pkt_decoder(struct test *test __maybe_unused, int subtest __m
return TEST_OK;
}
+
+static int setaffinity(int cpu)
+{
+ cpu_set_t cpu_set;
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(cpu, &cpu_set);
+ if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
+ pr_debug("sched_setaffinity() failed for CPU %d\n", cpu);
+ return -1;
+ }
+ return 0;
+}
+
+#define INTEL_PT_ADDR_FILT_CNT_MASK GENMASK(2, 0)
+#define INTEL_PT_SUBLEAF_CNT 2
+#define CPUID_REG_CNT 4
+
+struct cpuid_result {
+ union {
+ struct {
+ unsigned int eax;
+ unsigned int ebx;
+ unsigned int ecx;
+ unsigned int edx;
+ };
+ unsigned int reg[CPUID_REG_CNT];
+ };
+};
+
+struct pt_caps {
+ struct cpuid_result subleaf[INTEL_PT_SUBLEAF_CNT];
+};
+
+static int get_pt_caps(int cpu, struct pt_caps *caps)
+{
+ struct cpuid_result r;
+ int i;
+
+ if (setaffinity(cpu))
+ return -1;
+
+ memset(caps, 0, sizeof(*caps));
+
+ for (i = 0; i < INTEL_PT_SUBLEAF_CNT; i++) {
+ __get_cpuid_count(20, i, &r.eax, &r.ebx, &r.ecx, &r.edx);
+ pr_debug("CPU %d CPUID leaf 20 subleaf %d\n", cpu, i);
+ pr_debug("eax = 0x%08x\n", r.eax);
+ pr_debug("ebx = 0x%08x\n", r.ebx);
+ pr_debug("ecx = 0x%08x\n", r.ecx);
+ pr_debug("edx = 0x%08x\n", r.edx);
+ caps->subleaf[i] = r;
+ }
+
+ return 0;
+}
+
+static bool is_hydrid(void)
+{
+ unsigned int eax, ebx, ecx, edx = 0;
+ bool result;
+
+ __get_cpuid_count(7, 0, &eax, &ebx, &ecx, &edx);
+ result = edx & BIT(15);
+ pr_debug("Is %shybrid : CPUID leaf 7 subleaf 0 edx %#x (bit-15 indicates hybrid)\n",
+ result ? "" : "not ", edx);
+ return result;
+}
+
+static int compare_caps(int cpu, struct pt_caps *caps, struct pt_caps *caps0)
+{
+ struct pt_caps mask = { /* Mask of bits to check*/
+ .subleaf = {
+ [0] = {
+ .ebx = GENMASK(8, 0),
+ .ecx = GENMASK(3, 0),
+ },
+ [1] = {
+ .eax = GENMASK(31, 16),
+ .ebx = GENMASK(31, 0),
+ }
+ }
+ };
+ unsigned int m, reg, reg0;
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < INTEL_PT_SUBLEAF_CNT; i++) {
+ for (j = 0; j < CPUID_REG_CNT; j++) {
+ m = mask.subleaf[i].reg[j];
+ reg = m & caps->subleaf[i].reg[j];
+ reg0 = m & caps0->subleaf[i].reg[j];
+ if ((reg & reg0) != reg0) {
+ pr_debug("CPU %d subleaf %d reg %d FAIL %#x vs %#x\n",
+ cpu, i, j, reg, reg0);
+ ret = -1;
+ }
+ }
+ }
+
+ m = INTEL_PT_ADDR_FILT_CNT_MASK;
+ reg = m & caps->subleaf[1].eax;
+ reg0 = m & caps0->subleaf[1].eax;
+ if (reg < reg0) {
+ pr_debug("CPU %d subleaf 1 reg 0 FAIL address filter count %#x vs %#x\n",
+ cpu, reg, reg0);
+ ret = -1;
+ }
+
+ if (!ret)
+ pr_debug("CPU %d OK\n", cpu);
+
+ return ret;
+}
+
+int test__intel_pt_hybrid_compat(struct test_suite *test, int subtest)
+{
+ int max_cpu = cpu__max_cpu().cpu;
+ struct pt_caps last_caps;
+ struct pt_caps caps0;
+ int ret = TEST_OK;
+ int cpu;
+
+ if (!is_hydrid()) {
+ test->test_cases[subtest].skip_reason = "not hybrid";
+ return TEST_SKIP;
+ }
+
+ if (get_pt_caps(0, &caps0))
+ return TEST_FAIL;
+
+ for (cpu = 1, last_caps = caps0; cpu < max_cpu; cpu++) {
+ struct pt_caps caps;
+
+ if (get_pt_caps(cpu, &caps)) {
+ pr_debug("CPU %d not found\n", cpu);
+ continue;
+ }
+ if (!memcmp(&caps, &last_caps, sizeof(caps))) {
+ pr_debug("CPU %d same caps as previous CPU\n", cpu);
+ continue;
+ }
+ if (compare_caps(cpu, &caps, &caps0))
+ ret = TEST_FAIL;
+ last_caps = caps;
+ }
+
+ return ret;
+}
diff --git a/tools/perf/arch/x86/tests/rdpmc.c b/tools/perf/arch/x86/tests/rdpmc.c
deleted file mode 100644
index 1ea916656a2d..000000000000
--- a/tools/perf/arch/x86/tests/rdpmc.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <errno.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include "perf-sys.h"
-#include "debug.h"
-#include "tests/tests.h"
-#include "cloexec.h"
-#include "event.h"
-#include <internal/lib.h> // page_size
-#include "arch-tests.h"
-
-static u64 rdpmc(unsigned int counter)
-{
- unsigned int low, high;
-
- asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
-
- return low | ((u64)high) << 32;
-}
-
-static u64 rdtsc(void)
-{
- unsigned int low, high;
-
- asm volatile("rdtsc" : "=a" (low), "=d" (high));
-
- return low | ((u64)high) << 32;
-}
-
-static u64 mmap_read_self(void *addr)
-{
- struct perf_event_mmap_page *pc = addr;
- u32 seq, idx, time_mult = 0, time_shift = 0;
- u64 count, cyc = 0, time_offset = 0, enabled, running, delta;
-
- do {
- seq = pc->lock;
- barrier();
-
- enabled = pc->time_enabled;
- running = pc->time_running;
-
- if (enabled != running) {
- cyc = rdtsc();
- time_mult = pc->time_mult;
- time_shift = pc->time_shift;
- time_offset = pc->time_offset;
- }
-
- idx = pc->index;
- count = pc->offset;
- if (idx)
- count += rdpmc(idx - 1);
-
- barrier();
- } while (pc->lock != seq);
-
- if (enabled != running) {
- u64 quot, rem;
-
- quot = (cyc >> time_shift);
- rem = cyc & (((u64)1 << time_shift) - 1);
- delta = time_offset + quot * time_mult +
- ((rem * time_mult) >> time_shift);
-
- enabled += delta;
- if (idx)
- running += delta;
-
- quot = count / running;
- rem = count % running;
- count = quot * enabled + (rem * enabled) / running;
- }
-
- return count;
-}
-
-/*
- * If the RDPMC instruction faults then signal this back to the test parent task:
- */
-static void segfault_handler(int sig __maybe_unused,
- siginfo_t *info __maybe_unused,
- void *uc __maybe_unused)
-{
- exit(-1);
-}
-
-static int __test__rdpmc(void)
-{
- volatile int tmp = 0;
- u64 i, loops = 1000;
- int n;
- int fd;
- void *addr;
- struct perf_event_attr attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_INSTRUCTIONS,
- .exclude_kernel = 1,
- };
- u64 delta_sum = 0;
- struct sigaction sa;
- char sbuf[STRERR_BUFSIZE];
-
- sigfillset(&sa.sa_mask);
- sa.sa_sigaction = segfault_handler;
- sa.sa_flags = 0;
- sigaction(SIGSEGV, &sa, NULL);
-
- fd = sys_perf_event_open(&attr, 0, -1, -1,
- perf_event_open_cloexec_flag());
- if (fd < 0) {
- pr_err("Error: sys_perf_event_open() syscall returned "
- "with %d (%s)\n", fd,
- str_error_r(errno, sbuf, sizeof(sbuf)));
- return -1;
- }
-
- addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0);
- if (addr == (void *)(-1)) {
- pr_err("Error: mmap() syscall returned with (%s)\n",
- str_error_r(errno, sbuf, sizeof(sbuf)));
- goto out_close;
- }
-
- for (n = 0; n < 6; n++) {
- u64 stamp, now, delta;
-
- stamp = mmap_read_self(addr);
-
- for (i = 0; i < loops; i++)
- tmp++;
-
- now = mmap_read_self(addr);
- loops *= 10;
-
- delta = now - stamp;
- pr_debug("%14d: %14Lu\n", n, (long long)delta);
-
- delta_sum += delta;
- }
-
- munmap(addr, page_size);
- pr_debug(" ");
-out_close:
- close(fd);
-
- if (!delta_sum)
- return -1;
-
- return 0;
-}
-
-int test__rdpmc(struct test *test __maybe_unused, int subtest __maybe_unused)
-{
- int status = 0;
- int wret = 0;
- int ret;
- int pid;
-
- pid = fork();
- if (pid < 0)
- return -1;
-
- if (!pid) {
- ret = __test__rdpmc();
-
- exit(ret);
- }
-
- wret = waitpid(pid, &status, 0);
- if (wret < 0 || status)
- return -1;
-
- return 0;
-}
diff --git a/tools/perf/arch/x86/tests/sample-parsing.c b/tools/perf/arch/x86/tests/sample-parsing.c
new file mode 100644
index 000000000000..a061e8619267
--- /dev/null
+++ b/tools/perf/arch/x86/tests/sample-parsing.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stdbool.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "event.h"
+#include "evsel.h"
+#include "debug.h"
+#include "util/sample.h"
+#include "util/synthetic-events.h"
+
+#include "tests/tests.h"
+#include "arch-tests.h"
+
+#define COMP(m) do { \
+ if (s1->m != s2->m) { \
+ pr_debug("Samples differ at '"#m"'\n"); \
+ return false; \
+ } \
+} while (0)
+
+static bool samples_same(const struct perf_sample *s1,
+ const struct perf_sample *s2,
+ u64 type)
+{
+ if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ COMP(ins_lat);
+ COMP(retire_lat);
+ }
+
+ return true;
+}
+
+static int do_test(u64 sample_type)
+{
+ struct evsel evsel = {
+ .needs_swap = false,
+ .core = {
+ . attr = {
+ .sample_type = sample_type,
+ .read_format = 0,
+ },
+ },
+ };
+ union perf_event *event;
+ struct perf_sample sample = {
+ .weight = 101,
+ .ins_lat = 102,
+ .retire_lat = 103,
+ };
+ struct perf_sample sample_out;
+ size_t i, sz, bufsz;
+ int err, ret = -1;
+
+ sz = perf_event__sample_event_size(&sample, sample_type, 0);
+ bufsz = sz + 4096; /* Add a bit for overrun checking */
+ event = malloc(bufsz);
+ if (!event) {
+ pr_debug("malloc failed\n");
+ return -1;
+ }
+
+ memset(event, 0xff, bufsz);
+ event->header.type = PERF_RECORD_SAMPLE;
+ event->header.misc = 0;
+ event->header.size = sz;
+
+ err = perf_event__synthesize_sample(event, sample_type, 0, &sample);
+ if (err) {
+ pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
+ "perf_event__synthesize_sample", sample_type, err);
+ goto out_free;
+ }
+
+ /* The data does not contain 0xff so we use that to check the size */
+ for (i = bufsz; i > 0; i--) {
+ if (*(i - 1 + (u8 *)event) != 0xff)
+ break;
+ }
+ if (i != sz) {
+ pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
+ i, sz);
+ goto out_free;
+ }
+
+ evsel.sample_size = __evsel__sample_size(sample_type);
+
+ err = evsel__parse_sample(&evsel, event, &sample_out);
+ if (err) {
+ pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
+ "evsel__parse_sample", sample_type, err);
+ goto out_free;
+ }
+
+ if (!samples_same(&sample, &sample_out, sample_type)) {
+ pr_debug("parsing failed for sample_type %#"PRIx64"\n",
+ sample_type);
+ goto out_free;
+ }
+
+ ret = 0;
+out_free:
+ free(event);
+
+ return ret;
+}
+
+/**
+ * test__x86_sample_parsing - test X86 specific sample parsing
+ *
+ * This function implements a test that synthesizes a sample event, parses it
+ * and then checks that the parsed sample matches the original sample. If the
+ * test passes %0 is returned, otherwise %-1 is returned.
+ *
+ * For now, the PERF_SAMPLE_WEIGHT_STRUCT is the only X86 specific sample type.
+ * The test only checks the PERF_SAMPLE_WEIGHT_STRUCT type.
+ */
+int test__x86_sample_parsing(struct test_suite *test __maybe_unused, int subtest __maybe_unused)
+{
+ return do_test(PERF_SAMPLE_WEIGHT_STRUCT);
+}
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build
index 47f9c56e744f..195ccfdef7aa 100644
--- a/tools/perf/arch/x86/util/Build
+++ b/tools/perf/arch/x86/util/Build
@@ -1,11 +1,15 @@
perf-y += header.o
perf-y += tsc.o
perf-y += pmu.o
-perf-y += kvm-stat.o
+perf-$(CONFIG_LIBTRACEEVENT) += kvm-stat.o
perf-y += perf_regs.o
-perf-y += group.o
+perf-y += topdown.o
perf-y += machine.o
perf-y += event.o
+perf-y += evlist.o
+perf-y += mem-events.o
+perf-y += evsel.o
+perf-y += iostat.o
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o
diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c
index 3e6791531ca5..546feda08428 100644
--- a/tools/perf/arch/x86/util/archinsn.c
+++ b/tools/perf/arch/x86/util/archinsn.c
@@ -1,17 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
-#include "../../../../arch/x86/include/asm/insn.h"
#include "archinsn.h"
#include "event.h"
#include "machine.h"
#include "thread.h"
#include "symbol.h"
+#include "../../../../arch/x86/include/asm/insn.h"
void arch_fetch_insn(struct perf_sample *sample,
struct thread *thread,
struct machine *machine)
{
struct insn insn;
- int len;
+ int len, ret;
bool is64bit = false;
if (!sample->ip)
@@ -19,8 +19,9 @@ void arch_fetch_insn(struct perf_sample *sample,
len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit);
if (len <= 0)
return;
- insn_init(&insn, sample->insn, len, is64bit);
- insn_get_length(&insn);
- if (insn_complete(&insn) && insn.length <= len)
+
+ ret = insn_decode(&insn, sample->insn, len,
+ is64bit ? INSN_MODE_64 : INSN_MODE_32);
+ if (ret >= 0 && insn.length <= len)
sample->insn_len = insn.length;
}
diff --git a/tools/perf/arch/x86/util/auxtrace.c b/tools/perf/arch/x86/util/auxtrace.c
index 3da506e13f49..330d03216b0e 100644
--- a/tools/perf/arch/x86/util/auxtrace.c
+++ b/tools/perf/arch/x86/util/auxtrace.c
@@ -26,11 +26,7 @@ struct auxtrace_record *auxtrace_record__init_intel(struct evlist *evlist,
bool found_bts = false;
intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
- if (intel_pt_pmu)
- intel_pt_pmu->auxtrace = true;
intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
- if (intel_bts_pmu)
- intel_bts_pmu->auxtrace = true;
evlist__for_each_entry(evlist, evsel) {
if (intel_pt_pmu && evsel->core.attr.type == intel_pt_pmu->type)
diff --git a/tools/perf/arch/x86/util/cpuid.h b/tools/perf/arch/x86/util/cpuid.h
new file mode 100644
index 000000000000..0a3ae0ace7e9
--- /dev/null
+++ b/tools/perf/arch/x86/util/cpuid.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef PERF_CPUID_H
+#define PERF_CPUID_H 1
+
+
+static inline void
+cpuid(unsigned int op, unsigned int op2, unsigned int *a, unsigned int *b,
+ unsigned int *c, unsigned int *d)
+{
+ /*
+ * Preserve %ebx/%rbx register by either placing it in %rdi or saving it
+ * on the stack - x86-64 needs to avoid the stack red zone. In PIC
+ * compilations %ebx contains the address of the global offset
+ * table. %rbx is occasionally used to address stack variables in
+ * presence of dynamic allocas.
+ */
+ asm(
+#if defined(__x86_64__)
+ "mov %%rbx, %%rdi\n"
+ "cpuid\n"
+ "xchg %%rdi, %%rbx\n"
+#else
+ "pushl %%ebx\n"
+ "cpuid\n"
+ "movl %%ebx, %%edi\n"
+ "popl %%ebx\n"
+#endif
+ : "=a"(*a), "=D"(*b), "=c"(*c), "=d"(*d)
+ : "a"(op), "2"(op2));
+}
+
+void get_cpuid_0(char *vendor, unsigned int *lvl);
+
+#endif
diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c
index 047dc00eafa6..5741ffe47312 100644
--- a/tools/perf/arch/x86/util/event.c
+++ b/tools/perf/arch/x86/util/event.c
@@ -2,6 +2,7 @@
#include <linux/types.h>
#include <linux/string.h>
#include <linux/zalloc.h>
+#include <stdlib.h>
#include "../../../util/event.h"
#include "../../../util/synthetic-events.h"
@@ -9,6 +10,7 @@
#include "../../../util/tool.h"
#include "../../../util/map.h"
#include "../../../util/debug.h"
+#include "util/sample.h"
#if defined(__x86_64__)
@@ -17,8 +19,8 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
struct machine *machine)
{
int rc = 0;
- struct map *pos;
- struct maps *kmaps = &machine->kmaps;
+ struct map_rb_node *pos;
+ struct maps *kmaps = machine__kernel_maps(machine);
union perf_event *event = zalloc(sizeof(event->mmap) +
machine->id_hdr_size);
@@ -31,11 +33,12 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
maps__for_each_entry(kmaps, pos) {
struct kmap *kmap;
size_t size;
+ struct map *map = pos->map;
- if (!__map__is_extra_kernel_map(pos))
+ if (!__map__is_extra_kernel_map(map))
continue;
- kmap = map__kmap(pos);
+ kmap = map__kmap(map);
size = sizeof(event->mmap) - sizeof(event->mmap.filename) +
PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) +
@@ -56,9 +59,9 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
event->mmap.header.size = size;
- event->mmap.start = pos->start;
- event->mmap.len = pos->end - pos->start;
- event->mmap.pgoff = pos->pgoff;
+ event->mmap.start = map__start(map);
+ event->mmap.len = map__size(map);
+ event->mmap.pgoff = map__pgoff(map);
event->mmap.pid = machine->pid;
strlcpy(event->mmap.filename, kmap->name, PATH_MAX);
@@ -75,3 +78,49 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool,
}
#endif
+
+void arch_perf_parse_sample_weight(struct perf_sample *data,
+ const __u64 *array, u64 type)
+{
+ union perf_sample_weight weight;
+
+ weight.full = *array;
+ if (type & PERF_SAMPLE_WEIGHT)
+ data->weight = weight.full;
+ else {
+ data->weight = weight.var1_dw;
+ data->ins_lat = weight.var2_w;
+ data->retire_lat = weight.var3_w;
+ }
+}
+
+void arch_perf_synthesize_sample_weight(const struct perf_sample *data,
+ __u64 *array, u64 type)
+{
+ *array = data->weight;
+
+ if (type & PERF_SAMPLE_WEIGHT_STRUCT) {
+ *array &= 0xffffffff;
+ *array |= ((u64)data->ins_lat << 32);
+ *array |= ((u64)data->retire_lat << 48);
+ }
+}
+
+const char *arch_perf_header_entry(const char *se_header)
+{
+ if (!strcmp(se_header, "Local Pipeline Stage Cycle"))
+ return "Local Retire Latency";
+ else if (!strcmp(se_header, "Pipeline Stage Cycle"))
+ return "Retire Latency";
+
+ return se_header;
+}
+
+int arch_support_sort_key(const char *sort_key)
+{
+ if (!strcmp(sort_key, "p_stage_cyc"))
+ return 1;
+ if (!strcmp(sort_key, "local_p_stage_cyc"))
+ return 1;
+ return 0;
+}
diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c
new file mode 100644
index 000000000000..d4193479a364
--- /dev/null
+++ b/tools/perf/arch/x86/util/evlist.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include "util/pmu.h"
+#include "util/evlist.h"
+#include "util/parse-events.h"
+#include "util/event.h"
+#include "util/pmu-hybrid.h"
+#include "topdown.h"
+
+static int ___evlist__add_default_attrs(struct evlist *evlist,
+ struct perf_event_attr *attrs,
+ size_t nr_attrs)
+{
+ struct perf_cpu_map *cpus;
+ struct evsel *evsel, *n;
+ struct perf_pmu *pmu;
+ LIST_HEAD(head);
+ size_t i = 0;
+
+ for (i = 0; i < nr_attrs; i++)
+ event_attr_init(attrs + i);
+
+ if (!perf_pmu__has_hybrid())
+ return evlist__add_attrs(evlist, attrs, nr_attrs);
+
+ for (i = 0; i < nr_attrs; i++) {
+ if (attrs[i].type == PERF_TYPE_SOFTWARE) {
+ evsel = evsel__new(attrs + i);
+ if (evsel == NULL)
+ goto out_delete_partial_list;
+ list_add_tail(&evsel->core.node, &head);
+ continue;
+ }
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ evsel = evsel__new(attrs + i);
+ if (evsel == NULL)
+ goto out_delete_partial_list;
+ evsel->core.attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
+ cpus = perf_cpu_map__get(pmu->cpus);
+ evsel->core.cpus = cpus;
+ evsel->core.own_cpus = perf_cpu_map__get(cpus);
+ evsel->pmu_name = strdup(pmu->name);
+ list_add_tail(&evsel->core.node, &head);
+ }
+ }
+
+ evlist__splice_list_tail(evlist, &head);
+
+ return 0;
+
+out_delete_partial_list:
+ __evlist__for_each_entry_safe(&head, n, evsel)
+ evsel__delete(evsel);
+ return -1;
+}
+
+int arch_evlist__add_default_attrs(struct evlist *evlist,
+ struct perf_event_attr *attrs,
+ size_t nr_attrs)
+{
+ if (!nr_attrs)
+ return 0;
+
+ return ___evlist__add_default_attrs(evlist, attrs, nr_attrs);
+}
+
+int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs)
+{
+ if (topdown_sys_has_perf_metrics() &&
+ (!lhs->pmu_name || !strncmp(lhs->pmu_name, "cpu", 3))) {
+ /* Ensure the topdown slots comes first. */
+ if (strcasestr(lhs->name, "slots"))
+ return -1;
+ if (strcasestr(rhs->name, "slots"))
+ return 1;
+ /* Followed by topdown events. */
+ if (strcasestr(lhs->name, "topdown") && !strcasestr(rhs->name, "topdown"))
+ return -1;
+ if (!strcasestr(lhs->name, "topdown") && strcasestr(rhs->name, "topdown"))
+ return 1;
+ }
+
+ /* Default ordering by insertion index. */
+ return lhs->core.idx - rhs->core.idx;
+}
diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c
new file mode 100644
index 000000000000..ea3972d785d1
--- /dev/null
+++ b/tools/perf/arch/x86/util/evsel.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdio.h>
+#include <stdlib.h>
+#include "util/evsel.h"
+#include "util/env.h"
+#include "util/pmu.h"
+#include "linux/string.h"
+#include "evsel.h"
+#include "util/debug.h"
+
+#define IBS_FETCH_L3MISSONLY (1ULL << 59)
+#define IBS_OP_L3MISSONLY (1ULL << 16)
+
+void arch_evsel__set_sample_weight(struct evsel *evsel)
+{
+ evsel__set_sample_bit(evsel, WEIGHT_STRUCT);
+}
+
+void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr)
+{
+ struct perf_env env = { .total_mem = 0, } ;
+
+ if (!perf_env__cpuid(&env))
+ return;
+
+ /*
+ * On AMD, precise cycles event sampling internally uses IBS pmu.
+ * But IBS does not have filtering capabilities and perf by default
+ * sets exclude_guest = 1. This makes IBS pmu event init fail and
+ * thus perf ends up doing non-precise sampling. Avoid it by clearing
+ * exclude_guest.
+ */
+ if (env.cpuid && strstarts(env.cpuid, "AuthenticAMD"))
+ attr->exclude_guest = 0;
+
+ free(env.cpuid);
+}
+
+/* Check whether the evsel's PMU supports the perf metrics */
+bool evsel__sys_has_perf_metrics(const struct evsel *evsel)
+{
+ const char *pmu_name = evsel->pmu_name ? evsel->pmu_name : "cpu";
+
+ /*
+ * The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU
+ * on a non-hybrid machine, "cpu_core" PMU on a hybrid machine.
+ * The slots event is only available for the core PMU, which
+ * supports the perf metrics feature.
+ * Checking both the PERF_TYPE_RAW type and the slots event
+ * should be good enough to detect the perf metrics feature.
+ */
+ if ((evsel->core.attr.type == PERF_TYPE_RAW) &&
+ pmu_have_event(pmu_name, "slots"))
+ return true;
+
+ return false;
+}
+
+bool arch_evsel__must_be_in_group(const struct evsel *evsel)
+{
+ if (!evsel__sys_has_perf_metrics(evsel))
+ return false;
+
+ return evsel->name &&
+ (strcasestr(evsel->name, "slots") ||
+ strcasestr(evsel->name, "topdown"));
+}
+
+int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
+{
+ u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK;
+ u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT;
+ const char *event_name;
+
+ if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event])
+ event_name = evsel__hw_names[event];
+ else
+ event_name = "unknown-hardware";
+
+ /* The PMU type is not required for the non-hybrid platform. */
+ if (!pmu)
+ return scnprintf(bf, size, "%s", event_name);
+
+ return scnprintf(bf, size, "%s/%s/",
+ evsel->pmu_name ? evsel->pmu_name : "cpu",
+ event_name);
+}
+
+static void ibs_l3miss_warn(void)
+{
+ pr_warning(
+"WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n"
+"and tagged operation does not cause L3 Miss. This causes sampling period skew.\n");
+}
+
+void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr)
+{
+ struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu;
+ static int warned_once;
+ /* 0: Uninitialized, 1: Yes, -1: No */
+ static int is_amd;
+
+ if (warned_once || is_amd == -1)
+ return;
+
+ if (!is_amd) {
+ struct perf_env *env = evsel__env(evsel);
+
+ if (!perf_env__cpuid(env) || !env->cpuid ||
+ !strstarts(env->cpuid, "AuthenticAMD")) {
+ is_amd = -1;
+ return;
+ }
+ is_amd = 1;
+ }
+
+ evsel_pmu = evsel__find_pmu(evsel);
+ if (!evsel_pmu)
+ return;
+
+ ibs_fetch_pmu = perf_pmu__find("ibs_fetch");
+ ibs_op_pmu = perf_pmu__find("ibs_op");
+
+ if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) {
+ if (attr->config & IBS_FETCH_L3MISSONLY) {
+ ibs_l3miss_warn();
+ warned_once = 1;
+ }
+ } else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) {
+ if (attr->config & IBS_OP_L3MISSONLY) {
+ ibs_l3miss_warn();
+ warned_once = 1;
+ }
+ }
+}
diff --git a/tools/perf/arch/x86/util/evsel.h b/tools/perf/arch/x86/util/evsel.h
new file mode 100644
index 000000000000..19ad1691374d
--- /dev/null
+++ b/tools/perf/arch/x86/util/evsel.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _EVSEL_H
+#define _EVSEL_H 1
+
+bool evsel__sys_has_perf_metrics(const struct evsel *evsel);
+
+#endif
diff --git a/tools/perf/arch/x86/util/group.c b/tools/perf/arch/x86/util/group.c
deleted file mode 100644
index e2f8034b8973..000000000000
--- a/tools/perf/arch/x86/util/group.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <stdio.h>
-#include "api/fs/fs.h"
-#include "util/group.h"
-
-/*
- * Check whether we can use a group for top down.
- * Without a group may get bad results due to multiplexing.
- */
-bool arch_topdown_check_group(bool *warn)
-{
- int n;
-
- if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0)
- return false;
- if (n > 0) {
- *warn = true;
- return false;
- }
- return true;
-}
-
-void arch_topdown_group_warn(void)
-{
- fprintf(stderr,
- "nmi_watchdog enabled with topdown. May give wrong results.\n"
- "Disable with echo 0 > /proc/sys/kernel/nmi_watchdog\n");
-}
diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
index 578c8c568ffd..a51444a77a5f 100644
--- a/tools/perf/arch/x86/util/header.c
+++ b/tools/perf/arch/x86/util/header.c
@@ -9,18 +9,17 @@
#include "../../../util/debug.h"
#include "../../../util/header.h"
+#include "cpuid.h"
-static inline void
-cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c,
- unsigned int *d)
+void get_cpuid_0(char *vendor, unsigned int *lvl)
{
- __asm__ __volatile__ (".byte 0x53\n\tcpuid\n\t"
- "movl %%ebx, %%esi\n\t.byte 0x5b"
- : "=a" (*a),
- "=S" (*b),
- "=c" (*c),
- "=d" (*d)
- : "a" (op));
+ unsigned int b, c, d;
+
+ cpuid(0, 0, lvl, &b, &c, &d);
+ strncpy(&vendor[0], (char *)(&b), 4);
+ strncpy(&vendor[4], (char *)(&d), 4);
+ strncpy(&vendor[8], (char *)(&c), 4);
+ vendor[12] = '\0';
}
static int
@@ -31,14 +30,10 @@ __get_cpuid(char *buffer, size_t sz, const char *fmt)
int nb;
char vendor[16];
- cpuid(0, &lvl, &b, &c, &d);
- strncpy(&vendor[0], (char *)(&b), 4);
- strncpy(&vendor[4], (char *)(&d), 4);
- strncpy(&vendor[8], (char *)(&c), 4);
- vendor[12] = '\0';
+ get_cpuid_0(vendor, &lvl);
if (lvl >= 1) {
- cpuid(1, &a, &b, &c, &d);
+ cpuid(1, 0, &a, &b, &c, &d);
family = (a >> 8) & 0xf; /* bits 11 - 8 */
model = (a >> 4) & 0xf; /* Bits 7 - 4 */
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index 09f93800bffd..439c2956f3e7 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -110,7 +110,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
container_of(itr, struct intel_bts_recording, itr);
struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
struct evsel *evsel, *intel_bts_evsel = NULL;
- const struct perf_cpu_map *cpus = evlist->core.cpus;
+ const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
if (opts->auxtrace_sample_mode) {
@@ -129,6 +129,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
+ evsel->needs_auxtrace_mmap = true;
intel_bts_evsel = evsel;
opts->full_auxtrace = true;
}
@@ -218,13 +219,13 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
* To obtain the auxtrace buffer file descriptor, the auxtrace event
* must come first.
*/
- perf_evlist__to_front(evlist, intel_bts_evsel);
+ evlist__to_front(evlist, intel_bts_evsel);
/*
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
if (!perf_cpu_map__empty(cpus))
- perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
+ evsel__set_sample_bit(intel_bts_evsel, CPU);
}
/* Add dummy event to keep tracking */
@@ -232,13 +233,13 @@ static int intel_bts_recording_options(struct auxtrace_record *itr,
struct evsel *tracking_evsel;
int err;
- err = parse_events(evlist, "dummy:u", NULL);
+ err = parse_event(evlist, "dummy:u");
if (err)
return err;
tracking_evsel = evlist__last(evlist);
- perf_evlist__set_tracking_event(evlist, tracking_evsel);
+ evlist__set_tracking_event(evlist, tracking_evsel);
tracking_evsel->core.attr.freq = 0;
tracking_evsel->core.attr.sample_period = 1;
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 1643aed8c4c8..17336da08b58 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -11,6 +11,7 @@
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/zalloc.h>
+#include <linux/err.h>
#include <cpuid.h>
#include "../../../util/session.h"
@@ -25,6 +26,7 @@
#include "../../../util/pmu.h"
#include "../../../util/debug.h"
#include "../../../util/auxtrace.h"
+#include "../../../util/perf_api_probe.h"
#include "../../../util/record.h"
#include "../../../util/target.h"
#include "../../../util/tsc.h"
@@ -58,7 +60,8 @@ struct intel_pt_recording {
size_t priv_size;
};
-static int intel_pt_parse_terms_with_default(struct list_head *formats,
+static int intel_pt_parse_terms_with_default(const char *pmu_name,
+ struct list_head *formats,
const char *str,
u64 *config)
{
@@ -77,7 +80,8 @@ static int intel_pt_parse_terms_with_default(struct list_head *formats,
goto out_free;
attr.config = *config;
- err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
+ err = perf_pmu__config_terms(pmu_name, formats, &attr, terms, true,
+ NULL);
if (err)
goto out_free;
@@ -87,11 +91,12 @@ out_free:
return err;
}
-static int intel_pt_parse_terms(struct list_head *formats, const char *str,
- u64 *config)
+static int intel_pt_parse_terms(const char *pmu_name, struct list_head *formats,
+ const char *str, u64 *config)
{
*config = 0;
- return intel_pt_parse_terms_with_default(formats, str, config);
+ return intel_pt_parse_terms_with_default(pmu_name, formats, str,
+ config);
}
static u64 intel_pt_masked_bits(u64 mask, u64 bits)
@@ -189,16 +194,19 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
int pos = 0;
u64 config;
char c;
+ int dirfd;
+
+ dirfd = perf_pmu__event_source_devices_fd();
pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
- if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
- &mtc) != 1)
+ if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc", "%d",
+ &mtc) != 1)
mtc = 1;
if (mtc) {
- if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
- &mtc_periods) != 1)
+ if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc_periods", "%x",
+ &mtc_periods) != 1)
mtc_periods = 0;
if (mtc_periods) {
mtc_period = intel_pt_pick_bit(mtc_periods, 3);
@@ -207,13 +215,13 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
}
}
- if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
- &psb_cyc) != 1)
+ if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_cyc", "%d",
+ &psb_cyc) != 1)
psb_cyc = 1;
if (psb_cyc && mtc_periods) {
- if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
- &psb_periods) != 1)
+ if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_periods", "%x",
+ &psb_periods) != 1)
psb_periods = 0;
if (psb_periods) {
psb_period = intel_pt_pick_bit(psb_periods, 3);
@@ -222,14 +230,16 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
}
}
- if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
- perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1)
+ if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 &&
+ perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/branch", "%c", &c) == 1)
pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
- intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, buf,
+ &config);
+ close(dirfd);
return config;
}
@@ -301,6 +311,7 @@ intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist)
ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) +
intel_pt_filter_bytes(filter);
+ ptr->priv_size += sizeof(u64); /* Cap Event Trace */
return ptr->priv_size;
}
@@ -330,25 +341,32 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
unsigned long max_non_turbo_ratio;
size_t filter_str_len;
const char *filter;
+ int event_trace;
__u64 *info;
int err;
if (priv_size != ptr->priv_size)
return -EINVAL;
- intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
- intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
- &noretcomp_bit);
- intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "tsc", &tsc_bit);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "noretcomp", &noretcomp_bit);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "mtc", &mtc_bit);
mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
"mtc_period");
- intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "cyc", &cyc_bit);
intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio",
"%lu", &max_non_turbo_ratio) != 1)
max_non_turbo_ratio = 0;
+ if (perf_pmu__scan_file(intel_pt_pmu, "caps/event_trace",
+ "%d", &event_trace) != 1)
+ event_trace = 0;
filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
filter_str_len = filter ? strlen(filter) : 0;
@@ -369,7 +387,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
ui__warning("Intel Processor Trace: TSC not available\n");
}
- per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus);
+ per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
@@ -399,36 +417,34 @@ static int intel_pt_info_fill(struct auxtrace_record *itr,
info += len >> 3;
}
+ *info++ = event_trace;
+
return 0;
}
+#ifdef HAVE_LIBTRACEEVENT
static int intel_pt_track_switches(struct evlist *evlist)
{
const char *sched_switch = "sched:sched_switch";
struct evsel *evsel;
int err;
- if (!perf_evlist__can_select_event(evlist, sched_switch))
+ if (!evlist__can_select_event(evlist, sched_switch))
return -EPERM;
- err = parse_events(evlist, sched_switch, NULL);
- if (err) {
- pr_debug2("%s: failed to parse %s, error %d\n",
+ evsel = evlist__add_sched_switch(evlist, true);
+ if (IS_ERR(evsel)) {
+ err = PTR_ERR(evsel);
+ pr_debug2("%s: failed to create %s, error = %d\n",
__func__, sched_switch, err);
return err;
}
- evsel = evlist__last(evlist);
-
- perf_evsel__set_sample_bit(evsel, CPU);
- perf_evsel__set_sample_bit(evsel, TIME);
-
- evsel->core.system_wide = true;
- evsel->no_aux_samples = true;
evsel->immediate = true;
return 0;
}
+#endif
static void intel_pt_valid_str(char *str, size_t len, u64 valid)
{
@@ -476,7 +492,7 @@ static void intel_pt_valid_str(char *str, size_t len, u64 valid)
}
}
-static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
+static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, int dirfd,
const char *caps, const char *name,
const char *supported, u64 config)
{
@@ -486,11 +502,11 @@ static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
u64 bits;
int ok;
- if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
+ if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, caps, "%llx", &valid) != 1)
valid = 0;
if (supported &&
- perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
+ perf_pmu__scan_file_at(intel_pt_pmu, dirfd, supported, "%d", &ok) == 1 && !ok)
valid = 0;
valid |= 1;
@@ -519,57 +535,45 @@ out_err:
static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
struct evsel *evsel)
{
- int err;
+ int err, dirfd;
char c;
if (!evsel)
return 0;
+ dirfd = perf_pmu__event_source_devices_fd();
+ if (dirfd < 0)
+ return dirfd;
+
/*
* If supported, force pass-through config term (pt=1) even if user
* sets pt=0, which avoids senseless kernel errors.
*/
- if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
+ if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 &&
!(evsel->core.attr.config & 1)) {
pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
evsel->core.attr.config |= 1;
}
- err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
+ err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/cycle_thresholds",
"cyc_thresh", "caps/psb_cyc",
evsel->core.attr.config);
if (err)
- return err;
+ goto out;
- err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
+ err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/mtc_periods",
"mtc_period", "caps/mtc",
evsel->core.attr.config);
if (err)
- return err;
+ goto out;
- return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
+ err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/psb_periods",
"psb_period", "caps/psb_cyc",
evsel->core.attr.config);
-}
-
-static void intel_pt_config_sample_mode(struct perf_pmu *intel_pt_pmu,
- struct evsel *evsel)
-{
- struct perf_evsel_config_term *term;
- u64 user_bits = 0, bits;
-
- term = perf_evsel__get_config_term(evsel, CFG_CHG);
- if (term)
- user_bits = term->val.cfg_chg;
- bits = perf_pmu__format_bits(&intel_pt_pmu->format, "psb_period");
-
- /* Did user change psb_period */
- if (bits & user_bits)
- return;
-
- /* Set psb_period to 0 */
- evsel->core.attr.config &= ~bits;
+out:
+ close(dirfd);
+ return err;
}
static void intel_pt_min_max_sample_sz(struct evlist *evlist,
@@ -618,7 +622,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
bool have_timing_info, need_immediate = false;
struct evsel *evsel, *intel_pt_evsel = NULL;
- const struct perf_cpu_map *cpus = evlist->core.cpus;
+ const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
u64 tsc_bit;
int err;
@@ -634,6 +638,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
}
evsel->core.attr.freq = 0;
evsel->core.attr.sample_period = 1;
+ evsel->no_aux_samples = true;
+ evsel->needs_auxtrace_mmap = true;
intel_pt_evsel = evsel;
opts->full_auxtrace = true;
}
@@ -661,7 +667,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
return 0;
if (opts->auxtrace_sample_mode)
- intel_pt_config_sample_mode(intel_pt_pmu, intel_pt_evsel);
+ evsel__set_config_if_unset(intel_pt_pmu, intel_pt_evsel,
+ "psb_period", 0);
err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
if (err)
@@ -768,7 +775,14 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
}
}
- intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
+ if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) {
+ u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4;
+
+ intel_pt_evsel->core.attr.aux_watermark = aux_watermark;
+ }
+
+ intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format,
+ "tsc", &tsc_bit);
if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit))
have_timing_info = true;
@@ -779,7 +793,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* Per-cpu recording needs sched_switch events to distinguish different
* threads.
*/
- if (have_timing_info && !perf_cpu_map__empty(cpus)) {
+ if (have_timing_info && !perf_cpu_map__empty(cpus) &&
+ !record_opts__no_switch_events(opts)) {
if (perf_can_record_switch_events()) {
bool cpu_wide = !target__none(&opts->target) &&
!target__has_task(&opts->target);
@@ -787,24 +802,17 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
if (!cpu_wide && perf_can_record_cpu_wide()) {
struct evsel *switch_evsel;
- err = parse_events(evlist, "dummy:u", NULL);
- if (err)
- return err;
+ switch_evsel = evlist__add_dummy_on_all_cpus(evlist);
+ if (!switch_evsel)
+ return -ENOMEM;
- switch_evsel = evlist__last(evlist);
-
- switch_evsel->core.attr.freq = 0;
- switch_evsel->core.attr.sample_period = 1;
switch_evsel->core.attr.context_switch = 1;
-
- switch_evsel->core.system_wide = true;
- switch_evsel->no_aux_samples = true;
switch_evsel->immediate = true;
- perf_evsel__set_sample_bit(switch_evsel, TID);
- perf_evsel__set_sample_bit(switch_evsel, TIME);
- perf_evsel__set_sample_bit(switch_evsel, CPU);
- perf_evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
+ evsel__set_sample_bit(switch_evsel, TID);
+ evsel__set_sample_bit(switch_evsel, TIME);
+ evsel__set_sample_bit(switch_evsel, CPU);
+ evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
opts->record_switch_events = false;
ptr->have_sched_switch = 3;
@@ -817,6 +825,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
ptr->have_sched_switch = 2;
}
} else {
+#ifdef HAVE_LIBTRACEEVENT
err = intel_pt_track_switches(evlist);
if (err == -EPERM)
pr_debug2("Unable to select sched:sched_switch\n");
@@ -824,49 +833,56 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
return err;
else
ptr->have_sched_switch = 1;
+#endif
}
}
+ if (have_timing_info && !intel_pt_evsel->core.attr.exclude_kernel &&
+ perf_can_record_text_poke_events() && perf_can_record_cpu_wide())
+ opts->text_poke = true;
+
if (intel_pt_evsel) {
/*
* To obtain the auxtrace buffer file descriptor, the auxtrace
* event must come first.
*/
- perf_evlist__to_front(evlist, intel_pt_evsel);
+ evlist__to_front(evlist, intel_pt_evsel);
/*
* In the case of per-cpu mmaps, we need the CPU on the
* AUX event.
*/
if (!perf_cpu_map__empty(cpus))
- perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
+ evsel__set_sample_bit(intel_pt_evsel, CPU);
}
/* Add dummy event to keep tracking */
if (opts->full_auxtrace) {
+ bool need_system_wide_tracking;
struct evsel *tracking_evsel;
- err = parse_events(evlist, "dummy:u", NULL);
- if (err)
- return err;
-
- tracking_evsel = evlist__last(evlist);
+ /*
+ * User space tasks can migrate between CPUs, so when tracing
+ * selected CPUs, sideband for all CPUs is still needed.
+ */
+ need_system_wide_tracking = opts->target.cpu_list &&
+ !intel_pt_evsel->core.attr.exclude_user;
- perf_evlist__set_tracking_event(evlist, tracking_evsel);
+ tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking);
+ if (!tracking_evsel)
+ return -ENOMEM;
- tracking_evsel->core.attr.freq = 0;
- tracking_evsel->core.attr.sample_period = 1;
+ evlist__set_tracking_event(evlist, tracking_evsel);
- tracking_evsel->no_aux_samples = true;
if (need_immediate)
tracking_evsel->immediate = true;
/* In per-cpu case, always need the time of mmap events etc */
if (!perf_cpu_map__empty(cpus)) {
- perf_evsel__set_sample_bit(tracking_evsel, TIME);
+ evsel__set_sample_bit(tracking_evsel, TIME);
/* And the CPU for switch events */
- perf_evsel__set_sample_bit(tracking_evsel, CPU);
+ evsel__set_sample_bit(tracking_evsel, CPU);
}
- perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
+ evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
}
/*
@@ -874,7 +890,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr,
* per-cpu with no sched_switch (except workload-only).
*/
if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) &&
- !target__none(&opts->target))
+ !target__none(&opts->target) &&
+ !intel_pt_evsel->core.attr.exclude_user)
ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
return 0;
diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c
new file mode 100644
index 000000000000..df7b5dfcc26a
--- /dev/null
+++ b/tools/perf/arch/x86/util/iostat.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * perf iostat
+ *
+ * Copyright (C) 2020, Intel Corporation
+ *
+ * Authors: Alexander Antonov <alexander.antonov@linux.intel.com>
+ */
+
+#include <api/fs/fs.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/zalloc.h>
+#include <limits.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <regex.h>
+#include "util/cpumap.h"
+#include "util/debug.h"
+#include "util/iostat.h"
+#include "util/counts.h"
+#include "path.h"
+
+#ifndef MAX_PATH
+#define MAX_PATH 1024
+#endif
+
+#define UNCORE_IIO_PMU_PATH "devices/uncore_iio_%d"
+#define SYSFS_UNCORE_PMU_PATH "%s/"UNCORE_IIO_PMU_PATH
+#define PLATFORM_MAPPING_PATH UNCORE_IIO_PMU_PATH"/die%d"
+
+/*
+ * Each metric requiries one IIO event which increments at every 4B transfer
+ * in corresponding direction. The formulas to compute metrics are generic:
+ * #EventCount * 4B / (1024 * 1024)
+ */
+static const char * const iostat_metrics[] = {
+ "Inbound Read(MB)",
+ "Inbound Write(MB)",
+ "Outbound Read(MB)",
+ "Outbound Write(MB)",
+};
+
+static inline int iostat_metrics_count(void)
+{
+ return sizeof(iostat_metrics) / sizeof(char *);
+}
+
+static const char *iostat_metric_by_idx(int idx)
+{
+ return *(iostat_metrics + idx % iostat_metrics_count());
+}
+
+struct iio_root_port {
+ u32 domain;
+ u8 bus;
+ u8 die;
+ u8 pmu_idx;
+ int idx;
+};
+
+struct iio_root_ports_list {
+ struct iio_root_port **rps;
+ int nr_entries;
+};
+
+static struct iio_root_ports_list *root_ports;
+
+static void iio_root_port_show(FILE *output,
+ const struct iio_root_port * const rp)
+{
+ if (output && rp)
+ fprintf(output, "S%d-uncore_iio_%d<%04x:%02x>\n",
+ rp->die, rp->pmu_idx, rp->domain, rp->bus);
+}
+
+static struct iio_root_port *iio_root_port_new(u32 domain, u8 bus,
+ u8 die, u8 pmu_idx)
+{
+ struct iio_root_port *p = calloc(1, sizeof(*p));
+
+ if (p) {
+ p->domain = domain;
+ p->bus = bus;
+ p->die = die;
+ p->pmu_idx = pmu_idx;
+ }
+ return p;
+}
+
+static void iio_root_ports_list_free(struct iio_root_ports_list *list)
+{
+ int idx;
+
+ if (list) {
+ for (idx = 0; idx < list->nr_entries; idx++)
+ zfree(&list->rps[idx]);
+ zfree(&list->rps);
+ free(list);
+ }
+}
+
+static struct iio_root_port *iio_root_port_find_by_notation(
+ const struct iio_root_ports_list * const list, u32 domain, u8 bus)
+{
+ int idx;
+ struct iio_root_port *rp;
+
+ if (list) {
+ for (idx = 0; idx < list->nr_entries; idx++) {
+ rp = list->rps[idx];
+ if (rp && rp->domain == domain && rp->bus == bus)
+ return rp;
+ }
+ }
+ return NULL;
+}
+
+static int iio_root_ports_list_insert(struct iio_root_ports_list *list,
+ struct iio_root_port * const rp)
+{
+ struct iio_root_port **tmp_buf;
+
+ if (list && rp) {
+ rp->idx = list->nr_entries++;
+ tmp_buf = realloc(list->rps,
+ list->nr_entries * sizeof(*list->rps));
+ if (!tmp_buf) {
+ pr_err("Failed to realloc memory\n");
+ return -ENOMEM;
+ }
+ tmp_buf[rp->idx] = rp;
+ list->rps = tmp_buf;
+ }
+ return 0;
+}
+
+static int iio_mapping(u8 pmu_idx, struct iio_root_ports_list * const list)
+{
+ char *buf;
+ char path[MAX_PATH];
+ u32 domain;
+ u8 bus;
+ struct iio_root_port *rp;
+ size_t size;
+ int ret;
+
+ for (int die = 0; die < cpu__max_node(); die++) {
+ scnprintf(path, MAX_PATH, PLATFORM_MAPPING_PATH, pmu_idx, die);
+ if (sysfs__read_str(path, &buf, &size) < 0) {
+ if (pmu_idx)
+ goto out;
+ pr_err("Mode iostat is not supported\n");
+ return -1;
+ }
+ ret = sscanf(buf, "%04x:%02hhx", &domain, &bus);
+ free(buf);
+ if (ret != 2) {
+ pr_err("Invalid mapping data: iio_%d; die%d\n",
+ pmu_idx, die);
+ return -1;
+ }
+ rp = iio_root_port_new(domain, bus, die, pmu_idx);
+ if (!rp || iio_root_ports_list_insert(list, rp)) {
+ free(rp);
+ return -ENOMEM;
+ }
+ }
+out:
+ return 0;
+}
+
+static u8 iio_pmu_count(void)
+{
+ u8 pmu_idx = 0;
+ char path[MAX_PATH];
+ const char *sysfs = sysfs__mountpoint();
+
+ if (sysfs) {
+ for (;; pmu_idx++) {
+ snprintf(path, sizeof(path), SYSFS_UNCORE_PMU_PATH,
+ sysfs, pmu_idx);
+ if (access(path, F_OK) != 0)
+ break;
+ }
+ }
+ return pmu_idx;
+}
+
+static int iio_root_ports_scan(struct iio_root_ports_list **list)
+{
+ int ret = -ENOMEM;
+ struct iio_root_ports_list *tmp_list;
+ u8 pmu_count = iio_pmu_count();
+
+ if (!pmu_count) {
+ pr_err("Unsupported uncore pmu configuration\n");
+ return -1;
+ }
+
+ tmp_list = calloc(1, sizeof(*tmp_list));
+ if (!tmp_list)
+ goto err;
+
+ for (u8 pmu_idx = 0; pmu_idx < pmu_count; pmu_idx++) {
+ ret = iio_mapping(pmu_idx, tmp_list);
+ if (ret)
+ break;
+ }
+err:
+ if (!ret)
+ *list = tmp_list;
+ else
+ iio_root_ports_list_free(tmp_list);
+
+ return ret;
+}
+
+static int iio_root_port_parse_str(u32 *domain, u8 *bus, char *str)
+{
+ int ret;
+ regex_t regex;
+ /*
+ * Expected format domain:bus:
+ * Valid domain range [0:ffff]
+ * Valid bus range [0:ff]
+ * Example: 0000:af, 0:3d, 01:7
+ */
+ regcomp(&regex, "^([a-f0-9A-F]{1,}):([a-f0-9A-F]{1,2})", REG_EXTENDED);
+ ret = regexec(&regex, str, 0, NULL, 0);
+ if (ret || sscanf(str, "%08x:%02hhx", domain, bus) != 2)
+ pr_warning("Unrecognized root port format: %s\n"
+ "Please use the following format:\n"
+ "\t [domain]:[bus]\n"
+ "\t for example: 0000:3d\n", str);
+
+ regfree(&regex);
+ return ret;
+}
+
+static int iio_root_ports_list_filter(struct iio_root_ports_list **list,
+ const char *filter)
+{
+ char *tok, *tmp, *filter_copy = NULL;
+ struct iio_root_port *rp;
+ u32 domain;
+ u8 bus;
+ int ret = -ENOMEM;
+ struct iio_root_ports_list *tmp_list = calloc(1, sizeof(*tmp_list));
+
+ if (!tmp_list)
+ goto err;
+
+ filter_copy = strdup(filter);
+ if (!filter_copy)
+ goto err;
+
+ for (tok = strtok_r(filter_copy, ",", &tmp); tok;
+ tok = strtok_r(NULL, ",", &tmp)) {
+ if (!iio_root_port_parse_str(&domain, &bus, tok)) {
+ rp = iio_root_port_find_by_notation(*list, domain, bus);
+ if (rp) {
+ (*list)->rps[rp->idx] = NULL;
+ ret = iio_root_ports_list_insert(tmp_list, rp);
+ if (ret) {
+ free(rp);
+ goto err;
+ }
+ } else if (!iio_root_port_find_by_notation(tmp_list,
+ domain, bus))
+ pr_warning("Root port %04x:%02x were not found\n",
+ domain, bus);
+ }
+ }
+
+ if (tmp_list->nr_entries == 0) {
+ pr_err("Requested root ports were not found\n");
+ ret = -EINVAL;
+ }
+err:
+ iio_root_ports_list_free(*list);
+ if (ret)
+ iio_root_ports_list_free(tmp_list);
+ else
+ *list = tmp_list;
+
+ free(filter_copy);
+ return ret;
+}
+
+static int iostat_event_group(struct evlist *evl,
+ struct iio_root_ports_list *list)
+{
+ int ret;
+ int idx;
+ const char *iostat_cmd_template =
+ "{uncore_iio_%x/event=0x83,umask=0x04,ch_mask=0xF,fc_mask=0x07/,\
+ uncore_iio_%x/event=0x83,umask=0x01,ch_mask=0xF,fc_mask=0x07/,\
+ uncore_iio_%x/event=0xc0,umask=0x04,ch_mask=0xF,fc_mask=0x07/,\
+ uncore_iio_%x/event=0xc0,umask=0x01,ch_mask=0xF,fc_mask=0x07/}";
+ const int len_template = strlen(iostat_cmd_template) + 1;
+ struct evsel *evsel = NULL;
+ int metrics_count = iostat_metrics_count();
+ char *iostat_cmd = calloc(len_template, 1);
+
+ if (!iostat_cmd)
+ return -ENOMEM;
+
+ for (idx = 0; idx < list->nr_entries; idx++) {
+ sprintf(iostat_cmd, iostat_cmd_template,
+ list->rps[idx]->pmu_idx, list->rps[idx]->pmu_idx,
+ list->rps[idx]->pmu_idx, list->rps[idx]->pmu_idx);
+ ret = parse_event(evl, iostat_cmd);
+ if (ret)
+ goto err;
+ }
+
+ evlist__for_each_entry(evl, evsel) {
+ evsel->priv = list->rps[evsel->core.idx / metrics_count];
+ }
+ list->nr_entries = 0;
+err:
+ iio_root_ports_list_free(list);
+ free(iostat_cmd);
+ return ret;
+}
+
+int iostat_prepare(struct evlist *evlist, struct perf_stat_config *config)
+{
+ if (evlist->core.nr_entries > 0) {
+ pr_warning("The -e and -M options are not supported."
+ "All chosen events/metrics will be dropped\n");
+ evlist__delete(evlist);
+ evlist = evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+ }
+
+ config->metric_only = true;
+ config->aggr_mode = AGGR_GLOBAL;
+
+ return iostat_event_group(evlist, root_ports);
+}
+
+int iostat_parse(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ int ret;
+ struct perf_stat_config *config = (struct perf_stat_config *)opt->data;
+
+ ret = iio_root_ports_scan(&root_ports);
+ if (!ret) {
+ config->iostat_run = true;
+ if (!str)
+ iostat_mode = IOSTAT_RUN;
+ else if (!strcmp(str, "list"))
+ iostat_mode = IOSTAT_LIST;
+ else {
+ iostat_mode = IOSTAT_RUN;
+ ret = iio_root_ports_list_filter(&root_ports, str);
+ }
+ }
+ return ret;
+}
+
+void iostat_list(struct evlist *evlist, struct perf_stat_config *config)
+{
+ struct evsel *evsel;
+ struct iio_root_port *rp = NULL;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (rp != evsel->priv) {
+ rp = evsel->priv;
+ iio_root_port_show(config->output, rp);
+ }
+ }
+}
+
+void iostat_release(struct evlist *evlist)
+{
+ struct evsel *evsel;
+ struct iio_root_port *rp = NULL;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (rp != evsel->priv) {
+ rp = evsel->priv;
+ zfree(&evsel->priv);
+ }
+ }
+}
+
+void iostat_prefix(struct evlist *evlist,
+ struct perf_stat_config *config,
+ char *prefix, struct timespec *ts)
+{
+ struct iio_root_port *rp = evlist->selected->priv;
+
+ if (rp) {
+ if (ts)
+ sprintf(prefix, "%6lu.%09lu%s%04x:%02x%s",
+ ts->tv_sec, ts->tv_nsec,
+ config->csv_sep, rp->domain, rp->bus,
+ config->csv_sep);
+ else
+ sprintf(prefix, "%04x:%02x%s", rp->domain, rp->bus,
+ config->csv_sep);
+ }
+}
+
+void iostat_print_header_prefix(struct perf_stat_config *config)
+{
+ if (config->csv_output)
+ fputs("port,", config->output);
+ else if (config->interval)
+ fprintf(config->output, "# time port ");
+ else
+ fprintf(config->output, " port ");
+}
+
+void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
+ struct perf_stat_output_ctx *out)
+{
+ double iostat_value = 0;
+ u64 prev_count_val = 0;
+ const char *iostat_metric = iostat_metric_by_idx(evsel->core.idx);
+ u8 die = ((struct iio_root_port *)evsel->priv)->die;
+ struct perf_counts_values *count = perf_counts(evsel->counts, die, 0);
+
+ if (count && count->run && count->ena) {
+ if (evsel->prev_raw_counts && !out->force_header) {
+ struct perf_counts_values *prev_count =
+ perf_counts(evsel->prev_raw_counts, die, 0);
+
+ prev_count_val = prev_count->val;
+ prev_count->val = count->val;
+ }
+ iostat_value = (count->val - prev_count_val) /
+ ((double) count->run / count->ena);
+ }
+ out->print_metric(config, out->ctx, NULL, "%8.0f", iostat_metric,
+ iostat_value / (256 * 1024));
+}
+
+void iostat_print_counters(struct evlist *evlist,
+ struct perf_stat_config *config, struct timespec *ts,
+ char *prefix, iostat_print_counter_t print_cnt_cb, void *arg)
+{
+ void *perf_device = NULL;
+ struct evsel *counter = evlist__first(evlist);
+
+ evlist__set_selected(evlist, counter);
+ iostat_prefix(evlist, config, prefix, ts);
+ fprintf(config->output, "%s", prefix);
+ evlist__for_each_entry(evlist, counter) {
+ perf_device = evlist->selected->priv;
+ if (perf_device && perf_device != counter->priv) {
+ evlist__set_selected(evlist, counter);
+ iostat_prefix(evlist, config, prefix, ts);
+ fprintf(config->output, "\n%s", prefix);
+ }
+ print_cnt_cb(config, counter, arg);
+ }
+ fputc('\n', config->output);
+}
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index c0775c39227f..424716518b75 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -18,7 +18,6 @@ static struct kvm_events_ops exit_events = {
};
const char *vcpu_id_str = "vcpu_id";
-const int decode_str_len = 20;
const char *kvm_exit_reason = "exit_reason";
const char *kvm_entry_trace = "kvm:kvm_entry";
const char *kvm_exit_trace = "kvm:kvm_exit";
@@ -31,8 +30,8 @@ const char *kvm_exit_trace = "kvm:kvm_exit";
static void mmio_event_get_key(struct evsel *evsel, struct perf_sample *sample,
struct event_key *key)
{
- key->key = perf_evsel__intval(evsel, sample, "gpa");
- key->info = perf_evsel__intval(evsel, sample, "type");
+ key->key = evsel__intval(evsel, sample, "gpa");
+ key->info = evsel__intval(evsel, sample, "type");
}
#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
@@ -47,8 +46,8 @@ static bool mmio_event_begin(struct evsel *evsel,
return true;
/* MMIO write begin event in kernel. */
- if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
- perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
+ if (evsel__name_is(evsel, "kvm:kvm_mmio") &&
+ evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
mmio_event_get_key(evsel, sample, key);
return true;
}
@@ -64,8 +63,8 @@ static bool mmio_event_end(struct evsel *evsel, struct perf_sample *sample,
return true;
/* MMIO read end event in kernel.*/
- if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
- perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
+ if (evsel__name_is(evsel, "kvm:kvm_mmio") &&
+ evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
mmio_event_get_key(evsel, sample, key);
return true;
}
@@ -77,7 +76,7 @@ static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
struct event_key *key,
char *decode)
{
- scnprintf(decode, decode_str_len, "%#lx:%s",
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%#lx:%s",
(unsigned long)key->key,
key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
}
@@ -94,15 +93,15 @@ static void ioport_event_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
- key->key = perf_evsel__intval(evsel, sample, "port");
- key->info = perf_evsel__intval(evsel, sample, "rw");
+ key->key = evsel__intval(evsel, sample, "port");
+ key->info = evsel__intval(evsel, sample, "rw");
}
static bool ioport_event_begin(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
- if (!strcmp(evsel->name, "kvm:kvm_pio")) {
+ if (evsel__name_is(evsel, "kvm:kvm_pio")) {
ioport_event_get_key(evsel, sample, key);
return true;
}
@@ -121,7 +120,7 @@ static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
struct event_key *key,
char *decode)
{
- scnprintf(decode, decode_str_len, "%#llx:%s",
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%#llx:%s",
(unsigned long long)key->key,
key->info ? "POUT" : "PIN");
}
@@ -133,11 +132,56 @@ static struct kvm_events_ops ioport_events = {
.name = "IO Port Access"
};
+ /* The time of emulation msr is from kvm_msr to kvm_entry. */
+static void msr_event_get_key(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ key->key = evsel__intval(evsel, sample, "ecx");
+ key->info = evsel__intval(evsel, sample, "write");
+}
+
+static bool msr_event_begin(struct evsel *evsel,
+ struct perf_sample *sample,
+ struct event_key *key)
+{
+ if (evsel__name_is(evsel, "kvm:kvm_msr")) {
+ msr_event_get_key(evsel, sample, key);
+ return true;
+ }
+
+ return false;
+}
+
+static bool msr_event_end(struct evsel *evsel,
+ struct perf_sample *sample __maybe_unused,
+ struct event_key *key __maybe_unused)
+{
+ return kvm_entry_event(evsel);
+}
+
+static void msr_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+ struct event_key *key,
+ char *decode)
+{
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%#llx:%s",
+ (unsigned long long)key->key,
+ key->info ? "W" : "R");
+}
+
+static struct kvm_events_ops msr_events = {
+ .is_begin_event = msr_event_begin,
+ .is_end_event = msr_event_end,
+ .decode_key = msr_event_decode_key,
+ .name = "MSR Access"
+};
+
const char *kvm_events_tp[] = {
"kvm:kvm_entry",
"kvm:kvm_exit",
"kvm:kvm_mmio",
"kvm:kvm_pio",
+ "kvm:kvm_msr",
NULL,
};
@@ -145,6 +189,7 @@ struct kvm_reg_events_ops kvm_reg_events_ops[] = {
{ .name = "vmexit", .ops = &exit_events },
{ .name = "mmio", .ops = &mmio_events },
{ .name = "ioport", .ops = &ioport_events },
+ { .name = "msr", .ops = &msr_events },
{ NULL, NULL },
};
diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c
new file mode 100644
index 000000000000..f683ac702247
--- /dev/null
+++ b/tools/perf/arch/x86/util/mem-events.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/pmu.h"
+#include "util/env.h"
+#include "map_symbol.h"
+#include "mem-events.h"
+#include "linux/string.h"
+
+static char mem_loads_name[100];
+static bool mem_loads_name__init;
+static char mem_stores_name[100];
+
+#define MEM_LOADS_AUX 0x8203
+#define MEM_LOADS_AUX_NAME "{%s/mem-loads-aux/,%s/mem-loads,ldlat=%u/}:P"
+
+#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }
+
+static struct perf_mem_event perf_mem_events_intel[PERF_MEM_EVENTS__MAX] = {
+ E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "%s/events/mem-loads"),
+ E("ldlat-stores", "%s/mem-stores/P", "%s/events/mem-stores"),
+ E(NULL, NULL, NULL),
+};
+
+static struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = {
+ E(NULL, NULL, NULL),
+ E(NULL, NULL, NULL),
+ E("mem-ldst", "ibs_op//", "ibs_op"),
+};
+
+static int perf_mem_is_amd_cpu(void)
+{
+ struct perf_env env = { .total_mem = 0, };
+
+ perf_env__cpuid(&env);
+ if (env.cpuid && strstarts(env.cpuid, "AuthenticAMD"))
+ return 1;
+ return -1;
+}
+
+struct perf_mem_event *perf_mem_events__ptr(int i)
+{
+ /* 0: Uninitialized, 1: Yes, -1: No */
+ static int is_amd;
+
+ if (i >= PERF_MEM_EVENTS__MAX)
+ return NULL;
+
+ if (!is_amd)
+ is_amd = perf_mem_is_amd_cpu();
+
+ if (is_amd == 1)
+ return &perf_mem_events_amd[i];
+
+ return &perf_mem_events_intel[i];
+}
+
+bool is_mem_loads_aux_event(struct evsel *leader)
+{
+ if (perf_pmu__find("cpu")) {
+ if (!pmu_have_event("cpu", "mem-loads-aux"))
+ return false;
+ } else if (perf_pmu__find("cpu_core")) {
+ if (!pmu_have_event("cpu_core", "mem-loads-aux"))
+ return false;
+ }
+
+ return leader->core.attr.config == MEM_LOADS_AUX;
+}
+
+char *perf_mem_events__name(int i, char *pmu_name)
+{
+ struct perf_mem_event *e = perf_mem_events__ptr(i);
+
+ if (!e)
+ return NULL;
+
+ if (i == PERF_MEM_EVENTS__LOAD) {
+ if (mem_loads_name__init && !pmu_name)
+ return mem_loads_name;
+
+ if (!pmu_name) {
+ mem_loads_name__init = true;
+ pmu_name = (char *)"cpu";
+ }
+
+ if (pmu_have_event(pmu_name, "mem-loads-aux")) {
+ scnprintf(mem_loads_name, sizeof(mem_loads_name),
+ MEM_LOADS_AUX_NAME, pmu_name, pmu_name,
+ perf_mem_events__loads_ldlat);
+ } else {
+ scnprintf(mem_loads_name, sizeof(mem_loads_name),
+ e->name, pmu_name,
+ perf_mem_events__loads_ldlat);
+ }
+ return mem_loads_name;
+ }
+
+ if (i == PERF_MEM_EVENTS__STORE) {
+ if (!pmu_name)
+ pmu_name = (char *)"cpu";
+
+ scnprintf(mem_stores_name, sizeof(mem_stores_name),
+ e->name, pmu_name);
+ return mem_stores_name;
+ }
+
+ return (char *)e->name;
+}
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
index fca81b39b09f..0ed177991ad0 100644
--- a/tools/perf/arch/x86/util/perf_regs.c
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -9,6 +9,8 @@
#include "../../../util/perf_regs.h"
#include "../../../util/debug.h"
#include "../../../util/event.h"
+#include "../../../util/pmu.h"
+#include "../../../util/pmu-hybrid.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG(AX, PERF_REG_X86_AX),
@@ -165,7 +167,7 @@ static int sdt_init_op_regex(void)
/*
* Max x86 register name length is 5(ex: %r15d). So, 6th char
* should always contain NULL. This helps to find register name
- * length using strlen, insted of maintaing one more variable.
+ * length using strlen, instead of maintaining one more variable.
*/
#define SDT_REG_NAME_SIZE 6
@@ -207,7 +209,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
* and displacement 0 (Both sign and displacement 0 are
* optional so it may be empty). Use one more character
* to hold last NULL so that strlen can be used to find
- * prefix length, instead of maintaing one more variable.
+ * prefix length, instead of maintaining one more variable.
*/
char prefix[3] = {0};
@@ -284,12 +286,22 @@ uint64_t arch__intr_reg_mask(void)
.disabled = 1,
.exclude_kernel = 1,
};
+ struct perf_pmu *pmu;
int fd;
/*
* In an unnamed union, init it here to build on older gcc versions
*/
attr.sample_period = 1;
+ if (perf_pmu__has_hybrid()) {
+ /*
+ * The same register set is supported among different hybrid PMUs.
+ * Only check the first available one.
+ */
+ pmu = list_first_entry(&perf_pmu__hybrid_pmus, typeof(*pmu), hybrid_list);
+ attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT;
+ }
+
event_attr_init(&attr);
fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c
index d48d608517fd..3c0de3370d7e 100644
--- a/tools/perf/arch/x86/util/pmu.c
+++ b/tools/perf/arch/x86/util/pmu.c
@@ -1,20 +1,170 @@
// SPDX-License-Identifier: GPL-2.0
#include <string.h>
-
+#include <stdio.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <fcntl.h>
#include <linux/stddef.h>
#include <linux/perf_event.h>
+#include <linux/zalloc.h>
+#include <api/fs/fs.h>
+#include <errno.h>
#include "../../../util/intel-pt.h"
#include "../../../util/intel-bts.h"
#include "../../../util/pmu.h"
+#include "../../../util/fncache.h"
+
+struct pmu_alias {
+ char *name;
+ char *alias;
+ struct list_head list;
+};
+
+static LIST_HEAD(pmu_alias_name_list);
+static bool cached_list;
struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
{
#ifdef HAVE_AUXTRACE_SUPPORT
- if (!strcmp(pmu->name, INTEL_PT_PMU_NAME))
+ if (!strcmp(pmu->name, INTEL_PT_PMU_NAME)) {
+ pmu->auxtrace = true;
return intel_pt_pmu_default_config(pmu);
- if (!strcmp(pmu->name, INTEL_BTS_PMU_NAME))
+ }
+ if (!strcmp(pmu->name, INTEL_BTS_PMU_NAME)) {
+ pmu->auxtrace = true;
pmu->selectable = true;
+ }
#endif
return NULL;
}
+
+static void pmu_alias__delete(struct pmu_alias *pmu_alias)
+{
+ if (!pmu_alias)
+ return;
+
+ zfree(&pmu_alias->name);
+ zfree(&pmu_alias->alias);
+ free(pmu_alias);
+}
+
+static struct pmu_alias *pmu_alias__new(char *name, char *alias)
+{
+ struct pmu_alias *pmu_alias = zalloc(sizeof(*pmu_alias));
+
+ if (pmu_alias) {
+ pmu_alias->name = strdup(name);
+ if (!pmu_alias->name)
+ goto out_delete;
+
+ pmu_alias->alias = strdup(alias);
+ if (!pmu_alias->alias)
+ goto out_delete;
+ }
+ return pmu_alias;
+
+out_delete:
+ pmu_alias__delete(pmu_alias);
+ return NULL;
+}
+
+static int setup_pmu_alias_list(void)
+{
+ int fd, dirfd;
+ DIR *dir;
+ struct dirent *dent;
+ struct pmu_alias *pmu_alias;
+ char buf[MAX_PMU_NAME_LEN];
+ FILE *file;
+ int ret = -ENOMEM;
+
+ dirfd = perf_pmu__event_source_devices_fd();
+ if (dirfd < 0)
+ return -1;
+
+ dir = fdopendir(dirfd);
+ if (!dir)
+ return -errno;
+
+ while ((dent = readdir(dir))) {
+ if (!strcmp(dent->d_name, ".") ||
+ !strcmp(dent->d_name, ".."))
+ continue;
+
+ fd = perf_pmu__pathname_fd(dirfd, dent->d_name, "alias", O_RDONLY);
+ if (fd < 0)
+ continue;
+
+ file = fdopen(fd, "r");
+ if (!file)
+ continue;
+
+ if (!fgets(buf, sizeof(buf), file)) {
+ fclose(file);
+ continue;
+ }
+
+ fclose(file);
+
+ /* Remove the last '\n' */
+ buf[strlen(buf) - 1] = 0;
+
+ pmu_alias = pmu_alias__new(dent->d_name, buf);
+ if (!pmu_alias)
+ goto close_dir;
+
+ list_add_tail(&pmu_alias->list, &pmu_alias_name_list);
+ }
+
+ ret = 0;
+
+close_dir:
+ closedir(dir);
+ return ret;
+}
+
+static char *__pmu_find_real_name(const char *name)
+{
+ struct pmu_alias *pmu_alias;
+
+ list_for_each_entry(pmu_alias, &pmu_alias_name_list, list) {
+ if (!strcmp(name, pmu_alias->alias))
+ return pmu_alias->name;
+ }
+
+ return (char *)name;
+}
+
+char *pmu_find_real_name(const char *name)
+{
+ if (cached_list)
+ return __pmu_find_real_name(name);
+
+ setup_pmu_alias_list();
+ cached_list = true;
+
+ return __pmu_find_real_name(name);
+}
+
+static char *__pmu_find_alias_name(const char *name)
+{
+ struct pmu_alias *pmu_alias;
+
+ list_for_each_entry(pmu_alias, &pmu_alias_name_list, list) {
+ if (!strcmp(name, pmu_alias->name))
+ return pmu_alias->alias;
+ }
+ return NULL;
+}
+
+char *pmu_find_alias_name(const char *name)
+{
+ if (cached_list)
+ return __pmu_find_alias_name(name);
+
+ setup_pmu_alias_list();
+ cached_list = true;
+
+ return __pmu_find_alias_name(name);
+}
diff --git a/tools/perf/arch/x86/util/topdown.c b/tools/perf/arch/x86/util/topdown.c
new file mode 100644
index 000000000000..9ad5e5c7bd27
--- /dev/null
+++ b/tools/perf/arch/x86/util/topdown.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "api/fs/fs.h"
+#include "util/evsel.h"
+#include "util/pmu.h"
+#include "util/topdown.h"
+#include "topdown.h"
+#include "evsel.h"
+
+/* Check whether there is a PMU which supports the perf metrics. */
+bool topdown_sys_has_perf_metrics(void)
+{
+ static bool has_perf_metrics;
+ static bool cached;
+ struct perf_pmu *pmu;
+
+ if (cached)
+ return has_perf_metrics;
+
+ /*
+ * The perf metrics feature is a core PMU feature.
+ * The PERF_TYPE_RAW type is the type of a core PMU.
+ * The slots event is only available when the core PMU
+ * supports the perf metrics feature.
+ */
+ pmu = perf_pmu__find_by_type(PERF_TYPE_RAW);
+ if (pmu && pmu_have_event(pmu->name, "slots"))
+ has_perf_metrics = true;
+
+ cached = true;
+ return has_perf_metrics;
+}
+
+#define TOPDOWN_SLOTS 0x0400
+
+/*
+ * Check whether a topdown group supports sample-read.
+ *
+ * Only Topdown metric supports sample-read. The slots
+ * event must be the leader of the topdown group.
+ */
+bool arch_topdown_sample_read(struct evsel *leader)
+{
+ if (!evsel__sys_has_perf_metrics(leader))
+ return false;
+
+ if (leader->core.attr.config == TOPDOWN_SLOTS)
+ return true;
+
+ return false;
+}
diff --git a/tools/perf/arch/x86/util/topdown.h b/tools/perf/arch/x86/util/topdown.h
new file mode 100644
index 000000000000..46bf9273e572
--- /dev/null
+++ b/tools/perf/arch/x86/util/topdown.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _TOPDOWN_H
+#define _TOPDOWN_H 1
+
+bool topdown_sys_has_perf_metrics(void);
+
+#endif
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
index 2f55afb14e1f..9b99f48b923c 100644
--- a/tools/perf/arch/x86/util/tsc.c
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -1,84 +1,91 @@
// SPDX-License-Identifier: GPL-2.0
-#include <stdbool.h>
-#include <errno.h>
-
-#include <linux/stddef.h>
-#include <linux/perf_event.h>
-
#include <linux/types.h>
-#include <asm/barrier.h>
+#include <math.h>
+#include <string.h>
+#include <stdlib.h>
+
#include "../../../util/debug.h"
-#include "../../../util/event.h"
-#include "../../../util/synthetic-events.h"
#include "../../../util/tsc.h"
+#include "cpuid.h"
-int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
- struct perf_tsc_conversion *tc)
+u64 rdtsc(void)
{
- bool cap_user_time_zero;
- u32 seq;
- int i = 0;
-
- while (1) {
- seq = pc->lock;
- rmb();
- tc->time_mult = pc->time_mult;
- tc->time_shift = pc->time_shift;
- tc->time_zero = pc->time_zero;
- cap_user_time_zero = pc->cap_user_time_zero;
- rmb();
- if (pc->lock == seq && !(seq & 1))
- break;
- if (++i > 10000) {
- pr_debug("failed to get perf_event_mmap_page lock\n");
- return -EINVAL;
- }
- }
+ unsigned int low, high;
- if (!cap_user_time_zero)
- return -EOPNOTSUPP;
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
- return 0;
+ return low | ((u64)high) << 32;
}
-u64 rdtsc(void)
+/*
+ * Derive the TSC frequency in Hz from the /proc/cpuinfo, for example:
+ * ...
+ * model name : Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz
+ * ...
+ * will return 3000000000.
+ */
+static double cpuinfo_tsc_freq(void)
{
- unsigned int low, high;
+ double result = 0;
+ FILE *cpuinfo;
+ char *line = NULL;
+ size_t len = 0;
- asm volatile("rdtsc" : "=a" (low), "=d" (high));
+ cpuinfo = fopen("/proc/cpuinfo", "r");
+ if (!cpuinfo) {
+ pr_err("Failed to read /proc/cpuinfo for TSC frequency");
+ return NAN;
+ }
+ while (getline(&line, &len, cpuinfo) > 0) {
+ if (!strncmp(line, "model name", 10)) {
+ char *pos = strstr(line + 11, " @ ");
- return low | ((u64)high) << 32;
+ if (pos && sscanf(pos, " @ %lfGHz", &result) == 1) {
+ result *= 1000000000;
+ goto out;
+ }
+ }
+ }
+out:
+ if (fpclassify(result) == FP_ZERO)
+ pr_err("Failed to find TSC frequency in /proc/cpuinfo");
+
+ free(line);
+ fclose(cpuinfo);
+ return result;
}
-int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc,
- struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
+double arch_get_tsc_freq(void)
{
- union perf_event event = {
- .time_conv = {
- .header = {
- .type = PERF_RECORD_TIME_CONV,
- .size = sizeof(struct perf_record_time_conv),
- },
- },
- };
- struct perf_tsc_conversion tc;
- int err;
+ unsigned int a, b, c, d, lvl;
+ static bool cached;
+ static double tsc;
+ char vendor[16];
- if (!pc)
- return 0;
- err = perf_read_tsc_conversion(pc, &tc);
- if (err == -EOPNOTSUPP)
+ if (cached)
+ return tsc;
+
+ cached = true;
+ get_cpuid_0(vendor, &lvl);
+ if (!strstr(vendor, "Intel"))
return 0;
- if (err)
- return err;
- pr_debug2("Synthesizing TSC conversion information\n");
+ /*
+ * Don't support Time Stamp Counter and
+ * Nominal Core Crystal Clock Information Leaf.
+ */
+ if (lvl < 0x15) {
+ tsc = cpuinfo_tsc_freq();
+ return tsc;
+ }
- event.time_conv.time_mult = tc.time_mult;
- event.time_conv.time_shift = tc.time_shift;
- event.time_conv.time_zero = tc.time_zero;
+ cpuid(0x15, 0, &a, &b, &c, &d);
+ /* TSC frequency is not enumerated */
+ if (!a || !b || !c) {
+ tsc = cpuinfo_tsc_freq();
+ return tsc;
+ }
- return process(tool, &event, NULL, machine);
+ tsc = (double)c * (double)b / (double)a;
+ return tsc;
}
diff --git a/tools/perf/arch/x86/util/unwind-libdw.c b/tools/perf/arch/x86/util/unwind-libdw.c
index fda8f4206ee4..ef71e8bf80bf 100644
--- a/tools/perf/arch/x86/util/unwind-libdw.c
+++ b/tools/perf/arch/x86/util/unwind-libdw.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "util/sample.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
diff --git a/tools/perf/bench/Build b/tools/perf/bench/Build
index e4e321b6f883..0f158dc8139b 100644
--- a/tools/perf/bench/Build
+++ b/tools/perf/bench/Build
@@ -1,16 +1,22 @@
perf-y += sched-messaging.o
perf-y += sched-pipe.o
+perf-y += syscall.o
perf-y += mem-functions.o
perf-y += futex-hash.o
perf-y += futex-wake.o
perf-y += futex-wake-parallel.o
perf-y += futex-requeue.o
perf-y += futex-lock-pi.o
-
perf-y += epoll-wait.o
perf-y += epoll-ctl.o
+perf-y += synthesize.o
+perf-y += kallsyms-parse.o
+perf-y += find-bit-bench.o
+perf-y += inject-buildid.o
+perf-y += evlist-open-close.o
+perf-y += breakpoint.o
+perf-y += pmu-scan.o
-perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-lib.o
perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
perf-$(CONFIG_X86_64) += mem-memset-x86-64-asm.o
diff --git a/tools/perf/bench/bench.h b/tools/perf/bench/bench.h
index 4aa6de1aa67d..0d2b65976212 100644
--- a/tools/perf/bench/bench.h
+++ b/tools/perf/bench/bench.h
@@ -10,40 +10,39 @@ extern struct timeval bench__start, bench__end, bench__runtime;
* The madvise transparent hugepage constants were added in glibc
* 2.13. For compatibility with older versions of glibc, define these
* tokens if they are not already defined.
- *
- * PA-RISC uses different madvise values from other architectures and
- * needs to be special-cased.
*/
-#ifdef __hppa__
-# ifndef MADV_HUGEPAGE
-# define MADV_HUGEPAGE 67
-# endif
-# ifndef MADV_NOHUGEPAGE
-# define MADV_NOHUGEPAGE 68
-# endif
-#else
# ifndef MADV_HUGEPAGE
# define MADV_HUGEPAGE 14
# endif
# ifndef MADV_NOHUGEPAGE
# define MADV_NOHUGEPAGE 15
# endif
-#endif
int bench_numa(int argc, const char **argv);
int bench_sched_messaging(int argc, const char **argv);
int bench_sched_pipe(int argc, const char **argv);
+int bench_syscall_basic(int argc, const char **argv);
+int bench_syscall_getpgid(int argc, const char **argv);
+int bench_syscall_fork(int argc, const char **argv);
+int bench_syscall_execve(int argc, const char **argv);
int bench_mem_memcpy(int argc, const char **argv);
int bench_mem_memset(int argc, const char **argv);
+int bench_mem_find_bit(int argc, const char **argv);
int bench_futex_hash(int argc, const char **argv);
int bench_futex_wake(int argc, const char **argv);
int bench_futex_wake_parallel(int argc, const char **argv);
int bench_futex_requeue(int argc, const char **argv);
/* pi futexes */
int bench_futex_lock_pi(int argc, const char **argv);
-
int bench_epoll_wait(int argc, const char **argv);
int bench_epoll_ctl(int argc, const char **argv);
+int bench_synthesize(int argc, const char **argv);
+int bench_kallsyms_parse(int argc, const char **argv);
+int bench_inject_build_id(int argc, const char **argv);
+int bench_evlist_open_close(int argc, const char **argv);
+int bench_breakpoint_thread(int argc, const char **argv);
+int bench_breakpoint_enable(int argc, const char **argv);
+int bench_pmu_scan(int argc, const char **argv);
#define BENCH_FORMAT_DEFAULT_STR "default"
#define BENCH_FORMAT_DEFAULT 0
diff --git a/tools/perf/bench/breakpoint.c b/tools/perf/bench/breakpoint.c
new file mode 100644
index 000000000000..41385f89ffc7
--- /dev/null
+++ b/tools/perf/bench/breakpoint.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <subcmd/parse-options.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
+#include <linux/time64.h>
+#include <sys/syscall.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <pthread.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include "bench.h"
+#include "futex.h"
+
+struct {
+ unsigned int nbreakpoints;
+ unsigned int nparallel;
+ unsigned int nthreads;
+} thread_params = {
+ .nbreakpoints = 1,
+ .nparallel = 1,
+ .nthreads = 1,
+};
+
+static const struct option thread_options[] = {
+ OPT_UINTEGER('b', "breakpoints", &thread_params.nbreakpoints,
+ "Specify amount of breakpoints"),
+ OPT_UINTEGER('p', "parallelism", &thread_params.nparallel, "Specify amount of parallelism"),
+ OPT_UINTEGER('t', "threads", &thread_params.nthreads, "Specify amount of threads"),
+ OPT_END()
+};
+
+static const char * const thread_usage[] = {
+ "perf bench breakpoint thread <options>",
+ NULL
+};
+
+struct breakpoint {
+ int fd;
+ char watched;
+};
+
+static int breakpoint_setup(void *addr)
+{
+ struct perf_event_attr attr = { .size = 0, };
+
+ attr.type = PERF_TYPE_BREAKPOINT;
+ attr.size = sizeof(attr);
+ attr.inherit = 1;
+ attr.exclude_kernel = 1;
+ attr.exclude_hv = 1;
+ attr.bp_addr = (unsigned long)addr;
+ attr.bp_type = HW_BREAKPOINT_RW;
+ attr.bp_len = HW_BREAKPOINT_LEN_1;
+ return syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
+}
+
+static void *passive_thread(void *arg)
+{
+ unsigned int *done = (unsigned int *)arg;
+
+ while (!__atomic_load_n(done, __ATOMIC_RELAXED))
+ futex_wait(done, 0, NULL, 0);
+ return NULL;
+}
+
+static void *active_thread(void *arg)
+{
+ unsigned int *done = (unsigned int *)arg;
+
+ while (!__atomic_load_n(done, __ATOMIC_RELAXED));
+ return NULL;
+}
+
+static void *breakpoint_thread(void *arg)
+{
+ unsigned int i, done;
+ int *repeat = (int *)arg;
+ pthread_t *threads;
+
+ threads = calloc(thread_params.nthreads, sizeof(threads[0]));
+ if (!threads)
+ exit((perror("calloc"), EXIT_FAILURE));
+
+ while (__atomic_fetch_sub(repeat, 1, __ATOMIC_RELAXED) > 0) {
+ done = 0;
+ for (i = 0; i < thread_params.nthreads; i++) {
+ if (pthread_create(&threads[i], NULL, passive_thread, &done))
+ exit((perror("pthread_create"), EXIT_FAILURE));
+ }
+ __atomic_store_n(&done, 1, __ATOMIC_RELAXED);
+ futex_wake(&done, thread_params.nthreads, 0);
+ for (i = 0; i < thread_params.nthreads; i++)
+ pthread_join(threads[i], NULL);
+ }
+ free(threads);
+ return NULL;
+}
+
+// The benchmark creates nbreakpoints inheritable breakpoints,
+// then starts nparallel threads which create and join bench_repeat batches of nthreads threads.
+int bench_breakpoint_thread(int argc, const char **argv)
+{
+ unsigned int i, result_usec;
+ int repeat = bench_repeat;
+ struct breakpoint *breakpoints;
+ pthread_t *parallel;
+ struct timeval start, stop, diff;
+
+ if (parse_options(argc, argv, thread_options, thread_usage, 0)) {
+ usage_with_options(thread_usage, thread_options);
+ exit(EXIT_FAILURE);
+ }
+ breakpoints = calloc(thread_params.nbreakpoints, sizeof(breakpoints[0]));
+ parallel = calloc(thread_params.nparallel, sizeof(parallel[0]));
+ if (!breakpoints || !parallel)
+ exit((perror("calloc"), EXIT_FAILURE));
+
+ for (i = 0; i < thread_params.nbreakpoints; i++) {
+ breakpoints[i].fd = breakpoint_setup(&breakpoints[i].watched);
+ if (breakpoints[i].fd == -1)
+ exit((perror("perf_event_open"), EXIT_FAILURE));
+ }
+ gettimeofday(&start, NULL);
+ for (i = 0; i < thread_params.nparallel; i++) {
+ if (pthread_create(&parallel[i], NULL, breakpoint_thread, &repeat))
+ exit((perror("pthread_create"), EXIT_FAILURE));
+ }
+ for (i = 0; i < thread_params.nparallel; i++)
+ pthread_join(parallel[i], NULL);
+ gettimeofday(&stop, NULL);
+ timersub(&stop, &start, &diff);
+ for (i = 0; i < thread_params.nbreakpoints; i++)
+ close(breakpoints[i].fd);
+ free(parallel);
+ free(breakpoints);
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ printf("# Created/joined %d threads with %d breakpoints and %d parallelism\n",
+ bench_repeat, thread_params.nbreakpoints, thread_params.nparallel);
+ printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
+ (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
+ result_usec = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ printf(" %14lf usecs/op\n",
+ (double)result_usec / bench_repeat / thread_params.nthreads);
+ printf(" %14lf usecs/op/cpu\n",
+ (double)result_usec / bench_repeat /
+ thread_params.nthreads * thread_params.nparallel);
+ break;
+ case BENCH_FORMAT_SIMPLE:
+ printf("%lu.%03lu\n", (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
+ break;
+ default:
+ fprintf(stderr, "Unknown format: %d\n", bench_format);
+ exit(EXIT_FAILURE);
+ }
+ return 0;
+}
+
+struct {
+ unsigned int npassive;
+ unsigned int nactive;
+} enable_params = {
+ .nactive = 0,
+ .npassive = 0,
+};
+
+static const struct option enable_options[] = {
+ OPT_UINTEGER('p', "passive", &enable_params.npassive, "Specify amount of passive threads"),
+ OPT_UINTEGER('a', "active", &enable_params.nactive, "Specify amount of active threads"),
+ OPT_END()
+};
+
+static const char * const enable_usage[] = {
+ "perf bench breakpoint enable <options>",
+ NULL
+};
+
+// The benchmark creates an inheritable breakpoint,
+// then starts npassive threads that block and nactive threads that actively spin
+// and then disables and enables the breakpoint bench_repeat times.
+int bench_breakpoint_enable(int argc, const char **argv)
+{
+ unsigned int i, nthreads, result_usec, done = 0;
+ char watched;
+ int fd;
+ pthread_t *threads;
+ struct timeval start, stop, diff;
+
+ if (parse_options(argc, argv, enable_options, enable_usage, 0)) {
+ usage_with_options(enable_usage, enable_options);
+ exit(EXIT_FAILURE);
+ }
+ fd = breakpoint_setup(&watched);
+ if (fd == -1)
+ exit((perror("perf_event_open"), EXIT_FAILURE));
+ nthreads = enable_params.npassive + enable_params.nactive;
+ threads = calloc(nthreads, sizeof(threads[0]));
+ if (!threads)
+ exit((perror("calloc"), EXIT_FAILURE));
+
+ for (i = 0; i < nthreads; i++) {
+ if (pthread_create(&threads[i], NULL,
+ i < enable_params.npassive ? passive_thread : active_thread, &done))
+ exit((perror("pthread_create"), EXIT_FAILURE));
+ }
+ usleep(10000); // let the threads block
+ gettimeofday(&start, NULL);
+ for (i = 0; i < bench_repeat; i++) {
+ if (ioctl(fd, PERF_EVENT_IOC_DISABLE, 0))
+ exit((perror("ioctl(PERF_EVENT_IOC_DISABLE)"), EXIT_FAILURE));
+ if (ioctl(fd, PERF_EVENT_IOC_ENABLE, 0))
+ exit((perror("ioctl(PERF_EVENT_IOC_ENABLE)"), EXIT_FAILURE));
+ }
+ gettimeofday(&stop, NULL);
+ timersub(&stop, &start, &diff);
+ __atomic_store_n(&done, 1, __ATOMIC_RELAXED);
+ futex_wake(&done, enable_params.npassive, 0);
+ for (i = 0; i < nthreads; i++)
+ pthread_join(threads[i], NULL);
+ free(threads);
+ close(fd);
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ printf("# Enabled/disabled breakpoint %d time with %d passive and %d active threads\n",
+ bench_repeat, enable_params.npassive, enable_params.nactive);
+ printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
+ (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
+ result_usec = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ printf(" %14lf usecs/op\n", (double)result_usec / bench_repeat);
+ break;
+ case BENCH_FORMAT_SIMPLE:
+ printf("%lu.%03lu\n", (long)diff.tv_sec, (long)(diff.tv_usec / USEC_PER_MSEC));
+ break;
+ default:
+ fprintf(stderr, "Unknown format: %d\n", bench_format);
+ exit(EXIT_FAILURE);
+ }
+ return 0;
+}
diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c
index cadc18d42aa4..521d1ff97b06 100644
--- a/tools/perf/bench/epoll-ctl.c
+++ b/tools/perf/bench/epoll-ctl.c
@@ -5,7 +5,7 @@
* Benchmark the various operations allowed for epoll_ctl(2).
* The idea is to concurrently stress a single epoll instance
*/
-#ifdef HAVE_EVENTFD
+#ifdef HAVE_EVENTFD_SUPPORT
/* For the CLR_() macros */
#include <string.h>
#include <pthread.h>
@@ -21,9 +21,9 @@
#include <sys/resource.h>
#include <sys/epoll.h>
#include <sys/eventfd.h>
-#include <internal/cpumap.h>
#include <perf/cpumap.h>
+#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include "bench.h"
@@ -59,10 +59,10 @@ static unsigned int nested = 0;
/* amount of fds to monitor, per thread */
static unsigned int nfds = 64;
-static pthread_mutex_t thread_lock;
+static struct mutex thread_lock;
static unsigned int threads_starting;
static struct stats all_stats[EPOLL_NR_OPS];
-static pthread_cond_t thread_parent, thread_worker;
+static struct cond thread_parent, thread_worker;
struct worker {
int tid;
@@ -107,7 +107,7 @@ static void nest_epollfd(void)
printinfo("Nesting level(s): %d\n", nested);
epollfdp = calloc(nested, sizeof(int));
- if (!epollfd)
+ if (!epollfdp)
err(EXIT_FAILURE, "calloc");
for (i = 0; i < nested; i++) {
@@ -175,12 +175,12 @@ static void *workerfn(void *arg)
struct timespec ts = { .tv_sec = 0,
.tv_nsec = 250 };
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
- pthread_cond_signal(&thread_parent);
- pthread_cond_wait(&thread_worker, &thread_lock);
- pthread_mutex_unlock(&thread_lock);
+ cond_signal(&thread_parent);
+ cond_wait(&thread_worker, &thread_lock);
+ mutex_unlock(&thread_lock);
/* Let 'em loose */
do {
@@ -223,13 +223,20 @@ static void init_fdmaps(struct worker *w, int pct)
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{
pthread_attr_t thread_attr, *attrp = NULL;
- cpu_set_t cpuset;
+ cpu_set_t *cpuset;
unsigned int i, j;
int ret = 0;
+ int nrcpus;
+ size_t size;
if (!noaffinity)
pthread_attr_init(&thread_attr);
+ nrcpus = perf_cpu_map__nr(cpu);
+ cpuset = CPU_ALLOC(nrcpus);
+ BUG_ON(!cpuset);
+ size = CPU_ALLOC_SIZE(nrcpus);
+
for (i = 0; i < nthreads; i++) {
struct worker *w = &worker[i];
@@ -253,22 +260,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
init_fdmaps(w, 50);
if (!noaffinity) {
- CPU_ZERO(&cpuset);
- CPU_SET(cpu->map[i % cpu->nr], &cpuset);
+ CPU_ZERO_S(size, cpuset);
+ CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
+ size, cpuset);
- ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
- if (ret)
+ ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
+ if (ret) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+ }
attrp = &thread_attr;
}
ret = pthread_create(&w->thread, attrp, workerfn,
(void *)(struct worker *) w);
- if (ret)
+ if (ret) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
+ }
}
+ CPU_FREE(cpuset);
if (!noaffinity)
pthread_attr_destroy(&thread_attr);
@@ -334,7 +347,7 @@ int bench_epoll_ctl(int argc, const char **argv)
/* default to the number of CPUs */
if (!nthreads)
- nthreads = cpu->nr;
+ nthreads = perf_cpu_map__nr(cpu);
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
@@ -355,9 +368,9 @@ int bench_epoll_ctl(int argc, const char **argv)
for (i = 0; i < EPOLL_NR_OPS; i++)
init_stats(&all_stats[i]);
- pthread_mutex_init(&thread_lock, NULL);
- pthread_cond_init(&thread_parent, NULL);
- pthread_cond_init(&thread_worker, NULL);
+ mutex_init(&thread_lock);
+ cond_init(&thread_parent);
+ cond_init(&thread_worker);
threads_starting = nthreads;
@@ -365,11 +378,11 @@ int bench_epoll_ctl(int argc, const char **argv)
do_threads(worker, cpu);
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
while (threads_starting)
- pthread_cond_wait(&thread_parent, &thread_lock);
- pthread_cond_broadcast(&thread_worker);
- pthread_mutex_unlock(&thread_lock);
+ cond_wait(&thread_parent, &thread_lock);
+ cond_broadcast(&thread_worker);
+ mutex_unlock(&thread_lock);
sleep(nsecs);
toggle_done(0, NULL, NULL);
@@ -382,9 +395,9 @@ int bench_epoll_ctl(int argc, const char **argv)
}
/* cleanup & report results */
- pthread_cond_destroy(&thread_parent);
- pthread_cond_destroy(&thread_worker);
- pthread_mutex_destroy(&thread_lock);
+ cond_destroy(&thread_parent);
+ cond_destroy(&thread_worker);
+ mutex_destroy(&thread_lock);
for (i = 0; i < nthreads; i++) {
unsigned long t[EPOLL_NR_OPS];
@@ -412,4 +425,4 @@ int bench_epoll_ctl(int argc, const char **argv)
errmem:
err(EXIT_FAILURE, "calloc");
}
-#endif // HAVE_EVENTFD
+#endif // HAVE_EVENTFD_SUPPORT
diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c
index f938c585d512..c1cdf03c075d 100644
--- a/tools/perf/bench/epoll-wait.c
+++ b/tools/perf/bench/epoll-wait.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#ifdef HAVE_EVENTFD
+#ifdef HAVE_EVENTFD_SUPPORT
/*
* Copyright (C) 2018 Davidlohr Bueso.
*
@@ -17,7 +17,7 @@
* While the second model, enabled via --multiq option, uses multiple
* queueing (which refers to one epoll instance per worker). For example,
* short lived tcp connections in a high throughput httpd server will
- * ditribute the accept()'ing connections across CPUs. In this case each
+ * distribute the accept()'ing connections across CPUs. In this case each
* worker does a limited amount of processing.
*
* [queue A] ---> [worker]
@@ -76,10 +76,10 @@
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/types.h>
-#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "../util/stat.h"
+#include "../util/mutex.h"
#include <subcmd/parse-options.h>
#include "bench.h"
@@ -110,10 +110,10 @@ static bool multiq; /* use an epoll instance per thread */
/* amount of fds to monitor, per thread */
static unsigned int nfds = 64;
-static pthread_mutex_t thread_lock;
+static struct mutex thread_lock;
static unsigned int threads_starting;
static struct stats throughput_stats;
-static pthread_cond_t thread_parent, thread_worker;
+static struct cond thread_parent, thread_worker;
struct worker {
int tid;
@@ -190,16 +190,16 @@ static void *workerfn(void *arg)
int to = nonblocking? 0 : -1;
int efd = multiq ? w->epollfd : epollfd;
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
- pthread_cond_signal(&thread_parent);
- pthread_cond_wait(&thread_worker, &thread_lock);
- pthread_mutex_unlock(&thread_lock);
+ cond_signal(&thread_parent);
+ cond_wait(&thread_worker, &thread_lock);
+ mutex_unlock(&thread_lock);
do {
/*
- * Block undefinitely waiting for the IN event.
+ * Block indefinitely waiting for the IN event.
* In order to stress the epoll_wait(2) syscall,
* call it event per event, instead of a larger
* batch (max)limit.
@@ -292,9 +292,11 @@ static void print_summary(void)
static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
{
pthread_attr_t thread_attr, *attrp = NULL;
- cpu_set_t cpuset;
+ cpu_set_t *cpuset;
unsigned int i, j;
int ret = 0, events = EPOLLIN;
+ int nrcpus;
+ size_t size;
if (oneshot)
events |= EPOLLONESHOT;
@@ -307,6 +309,11 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
if (!noaffinity)
pthread_attr_init(&thread_attr);
+ nrcpus = perf_cpu_map__nr(cpu);
+ cpuset = CPU_ALLOC(nrcpus);
+ BUG_ON(!cpuset);
+ size = CPU_ALLOC_SIZE(nrcpus);
+
for (i = 0; i < nthreads; i++) {
struct worker *w = &worker[i];
@@ -342,22 +349,28 @@ static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
}
if (!noaffinity) {
- CPU_ZERO(&cpuset);
- CPU_SET(cpu->map[i % cpu->nr], &cpuset);
+ CPU_ZERO_S(size, cpuset);
+ CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu,
+ size, cpuset);
- ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
- if (ret)
+ ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
+ if (ret) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+ }
attrp = &thread_attr;
}
ret = pthread_create(&w->thread, attrp, workerfn,
(void *)(struct worker *) w);
- if (ret)
+ if (ret) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
+ }
}
+ CPU_FREE(cpuset);
if (!noaffinity)
pthread_attr_destroy(&thread_attr);
@@ -453,7 +466,7 @@ int bench_epoll_wait(int argc, const char **argv)
/* default to the number of CPUs and leave one for the writer pthread */
if (!nthreads)
- nthreads = cpu->nr - 1;
+ nthreads = perf_cpu_map__nr(cpu) - 1;
worker = calloc(nthreads, sizeof(*worker));
if (!worker) {
@@ -473,9 +486,9 @@ int bench_epoll_wait(int argc, const char **argv)
getpid(), nthreads, oneshot ? " (EPOLLONESHOT semantics)": "", nfds, nsecs);
init_stats(&throughput_stats);
- pthread_mutex_init(&thread_lock, NULL);
- pthread_cond_init(&thread_parent, NULL);
- pthread_cond_init(&thread_worker, NULL);
+ mutex_init(&thread_lock);
+ cond_init(&thread_parent);
+ cond_init(&thread_worker);
threads_starting = nthreads;
@@ -483,11 +496,11 @@ int bench_epoll_wait(int argc, const char **argv)
do_threads(worker, cpu);
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
while (threads_starting)
- pthread_cond_wait(&thread_parent, &thread_lock);
- pthread_cond_broadcast(&thread_worker);
- pthread_mutex_unlock(&thread_lock);
+ cond_wait(&thread_parent, &thread_lock);
+ cond_broadcast(&thread_worker);
+ mutex_unlock(&thread_lock);
/*
* At this point the workers should be blocked waiting for read events
@@ -510,16 +523,17 @@ int bench_epoll_wait(int argc, const char **argv)
err(EXIT_FAILURE, "pthread_join");
/* cleanup & report results */
- pthread_cond_destroy(&thread_parent);
- pthread_cond_destroy(&thread_worker);
- pthread_mutex_destroy(&thread_lock);
+ cond_destroy(&thread_parent);
+ cond_destroy(&thread_worker);
+ mutex_destroy(&thread_lock);
/* sort the array back before reporting */
if (randomize)
qsort(worker, nthreads, sizeof(struct worker), cmpworker);
for (i = 0; i < nthreads; i++) {
- unsigned long t = worker[i].ops / bench__runtime.tv_sec;
+ unsigned long t = bench__runtime.tv_sec > 0 ?
+ worker[i].ops / bench__runtime.tv_sec : 0;
update_stats(&throughput_stats, t);
@@ -539,4 +553,4 @@ int bench_epoll_wait(int argc, const char **argv)
errmem:
err(EXIT_FAILURE, "calloc");
}
-#endif // HAVE_EVENTFD
+#endif // HAVE_EVENTFD_SUPPORT
diff --git a/tools/perf/bench/evlist-open-close.c b/tools/perf/bench/evlist-open-close.c
new file mode 100644
index 000000000000..5a27691469ed
--- /dev/null
+++ b/tools/perf/bench/evlist-open-close.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include "bench.h"
+#include "../util/debug.h"
+#include "../util/stat.h"
+#include "../util/evlist.h"
+#include "../util/evsel.h"
+#include "../util/strbuf.h"
+#include "../util/record.h"
+#include "../util/parse-events.h"
+#include "internal/threadmap.h"
+#include "internal/cpumap.h"
+#include <linux/perf_event.h>
+#include <linux/kernel.h>
+#include <linux/time64.h>
+#include <linux/string.h>
+#include <subcmd/parse-options.h>
+
+#define MMAP_FLUSH_DEFAULT 1
+
+static int iterations = 100;
+static int nr_events = 1;
+static const char *event_string = "dummy";
+
+static inline u64 timeval2usec(struct timeval *tv)
+{
+ return tv->tv_sec * USEC_PER_SEC + tv->tv_usec;
+}
+
+static struct record_opts opts = {
+ .sample_time = true,
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 4000,
+ .target = {
+ .uses_mmap = true,
+ .default_per_cpu = true,
+ },
+ .mmap_flush = MMAP_FLUSH_DEFAULT,
+ .nr_threads_synthesize = 1,
+ .ctl_fd = -1,
+ .ctl_fd_ack = -1,
+};
+
+static const struct option options[] = {
+ OPT_STRING('e', "event", &event_string, "event", "event selector. use 'perf list' to list available events"),
+ OPT_INTEGER('n', "nr-events", &nr_events,
+ "number of dummy events to create (default 1). If used with -e, it clones those events n times (1 = no change)"),
+ OPT_INTEGER('i', "iterations", &iterations, "Number of iterations used to compute average (default=100)"),
+ OPT_BOOLEAN('a', "all-cpus", &opts.target.system_wide, "system-wide collection from all CPUs"),
+ OPT_STRING('C', "cpu", &opts.target.cpu_list, "cpu", "list of cpus where to open events"),
+ OPT_STRING('p', "pid", &opts.target.pid, "pid", "record events on existing process id"),
+ OPT_STRING('t', "tid", &opts.target.tid, "tid", "record events on existing thread id"),
+ OPT_STRING('u', "uid", &opts.target.uid_str, "user", "user to profile"),
+ OPT_BOOLEAN(0, "per-thread", &opts.target.per_thread, "use per-thread mmaps"),
+ OPT_END()
+};
+
+static const char *const bench_usage[] = {
+ "perf bench internals evlist-open-close <options>",
+ NULL
+};
+
+static int evlist__count_evsel_fds(struct evlist *evlist)
+{
+ struct evsel *evsel;
+ int cnt = 0;
+
+ evlist__for_each_entry(evlist, evsel)
+ cnt += evsel->core.threads->nr * perf_cpu_map__nr(evsel->core.cpus);
+
+ return cnt;
+}
+
+static struct evlist *bench__create_evlist(char *evstr)
+{
+ struct parse_events_error err;
+ struct evlist *evlist = evlist__new();
+ int ret;
+
+ if (!evlist) {
+ pr_err("Not enough memory to create evlist\n");
+ return NULL;
+ }
+
+ parse_events_error__init(&err);
+ ret = parse_events(evlist, evstr, &err);
+ if (ret) {
+ parse_events_error__print(&err, evstr);
+ parse_events_error__exit(&err);
+ pr_err("Run 'perf list' for a list of valid events\n");
+ ret = 1;
+ goto out_delete_evlist;
+ }
+ parse_events_error__exit(&err);
+ ret = evlist__create_maps(evlist, &opts.target);
+ if (ret < 0) {
+ pr_err("Not enough memory to create thread/cpu maps\n");
+ goto out_delete_evlist;
+ }
+
+ evlist__config(evlist, &opts, NULL);
+
+ return evlist;
+
+out_delete_evlist:
+ evlist__delete(evlist);
+ return NULL;
+}
+
+static int bench__do_evlist_open_close(struct evlist *evlist)
+{
+ char sbuf[STRERR_BUFSIZE];
+ int err = evlist__open(evlist);
+
+ if (err < 0) {
+ pr_err("evlist__open: %s\n", str_error_r(errno, sbuf, sizeof(sbuf)));
+ return err;
+ }
+
+ err = evlist__mmap(evlist, opts.mmap_pages);
+ if (err < 0) {
+ pr_err("evlist__mmap: %s\n", str_error_r(errno, sbuf, sizeof(sbuf)));
+ return err;
+ }
+
+ evlist__enable(evlist);
+ evlist__disable(evlist);
+ evlist__munmap(evlist);
+ evlist__close(evlist);
+
+ return 0;
+}
+
+static int bench_evlist_open_close__run(char *evstr)
+{
+ // used to print statistics only
+ struct evlist *evlist = bench__create_evlist(evstr);
+ double time_average, time_stddev;
+ struct timeval start, end, diff;
+ struct stats time_stats;
+ u64 runtime_us;
+ int i, err;
+
+ if (!evlist)
+ return -ENOMEM;
+
+ init_stats(&time_stats);
+
+ printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.user_requested_cpus));
+ printf(" Number of threads:\t%d\n", evlist->core.threads->nr);
+ printf(" Number of events:\t%d (%d fds)\n",
+ evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
+ printf(" Number of iterations:\t%d\n", iterations);
+
+ evlist__delete(evlist);
+
+ for (i = 0; i < iterations; i++) {
+ pr_debug("Started iteration %d\n", i);
+ evlist = bench__create_evlist(evstr);
+ if (!evlist)
+ return -ENOMEM;
+
+ gettimeofday(&start, NULL);
+ err = bench__do_evlist_open_close(evlist);
+ if (err) {
+ evlist__delete(evlist);
+ return err;
+ }
+
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &diff);
+ runtime_us = timeval2usec(&diff);
+ update_stats(&time_stats, runtime_us);
+
+ evlist__delete(evlist);
+ pr_debug("Iteration %d took:\t%" PRIu64 "us\n", i, runtime_us);
+ }
+
+ time_average = avg_stats(&time_stats);
+ time_stddev = stddev_stats(&time_stats);
+ printf(" Average open-close took: %.3f usec (+- %.3f usec)\n", time_average, time_stddev);
+
+ return 0;
+}
+
+static char *bench__repeat_event_string(const char *evstr, int n)
+{
+ char sbuf[STRERR_BUFSIZE];
+ struct strbuf buf;
+ int i, str_size = strlen(evstr),
+ final_size = str_size * n + n,
+ err = strbuf_init(&buf, final_size);
+
+ if (err) {
+ pr_err("strbuf_init: %s\n", str_error_r(err, sbuf, sizeof(sbuf)));
+ goto out_error;
+ }
+
+ for (i = 0; i < n; i++) {
+ err = strbuf_add(&buf, evstr, str_size);
+ if (err) {
+ pr_err("strbuf_add: %s\n", str_error_r(err, sbuf, sizeof(sbuf)));
+ goto out_error;
+ }
+
+ err = strbuf_addch(&buf, i == n-1 ? '\0' : ',');
+ if (err) {
+ pr_err("strbuf_addch: %s\n", str_error_r(err, sbuf, sizeof(sbuf)));
+ goto out_error;
+ }
+ }
+
+ return strbuf_detach(&buf, NULL);
+
+out_error:
+ strbuf_release(&buf);
+ return NULL;
+}
+
+
+int bench_evlist_open_close(int argc, const char **argv)
+{
+ char *evstr, errbuf[BUFSIZ];
+ int err;
+
+ argc = parse_options(argc, argv, options, bench_usage, 0);
+ if (argc) {
+ usage_with_options(bench_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ err = target__validate(&opts.target);
+ if (err) {
+ target__strerror(&opts.target, err, errbuf, sizeof(errbuf));
+ pr_err("%s\n", errbuf);
+ goto out;
+ }
+
+ err = target__parse_uid(&opts.target);
+ if (err) {
+ target__strerror(&opts.target, err, errbuf, sizeof(errbuf));
+ pr_err("%s", errbuf);
+ goto out;
+ }
+
+ /* Enable ignoring missing threads when -u/-p option is defined. */
+ opts.ignore_missing_thread = opts.target.uid != UINT_MAX || opts.target.pid;
+
+ evstr = bench__repeat_event_string(event_string, nr_events);
+ if (!evstr) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = bench_evlist_open_close__run(evstr);
+
+ free(evstr);
+out:
+ return err;
+}
diff --git a/tools/perf/bench/find-bit-bench.c b/tools/perf/bench/find-bit-bench.c
new file mode 100644
index 000000000000..7e25b0e413f6
--- /dev/null
+++ b/tools/perf/bench/find-bit-bench.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Benchmark find_next_bit and related bit operations.
+ *
+ * Copyright 2020 Google LLC.
+ */
+#include <stdlib.h>
+#include "bench.h"
+#include "../util/stat.h"
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/time64.h>
+#include <subcmd/parse-options.h>
+
+static unsigned int outer_iterations = 5;
+static unsigned int inner_iterations = 100000;
+
+static const struct option options[] = {
+ OPT_UINTEGER('i', "outer-iterations", &outer_iterations,
+ "Number of outer iterations used"),
+ OPT_UINTEGER('j', "inner-iterations", &inner_iterations,
+ "Number of inner iterations used"),
+ OPT_END()
+};
+
+static const char *const bench_usage[] = {
+ "perf bench mem find_bit <options>",
+ NULL
+};
+
+static unsigned int accumulator;
+static unsigned int use_of_val;
+
+static noinline void workload(int val)
+{
+ use_of_val += val;
+ accumulator++;
+}
+
+#if (defined(__i386__) || defined(__x86_64__)) && defined(__GCC_ASM_FLAG_OUTPUTS__)
+static bool asm_test_bit(long nr, const unsigned long *addr)
+{
+ bool oldbit;
+
+ asm volatile("bt %2,%1"
+ : "=@ccc" (oldbit)
+ : "m" (*(unsigned long *)addr), "Ir" (nr) : "memory");
+
+ return oldbit;
+}
+#else
+#define asm_test_bit test_bit
+#endif
+
+static int do_for_each_set_bit(unsigned int num_bits)
+{
+ unsigned long *to_test = bitmap_zalloc(num_bits);
+ struct timeval start, end, diff;
+ u64 runtime_us;
+ struct stats fb_time_stats, tb_time_stats;
+ double time_average, time_stddev;
+ unsigned int bit, i, j;
+ unsigned int set_bits, skip;
+
+ init_stats(&fb_time_stats);
+ init_stats(&tb_time_stats);
+
+ for (set_bits = 1; set_bits <= num_bits; set_bits <<= 1) {
+ bitmap_zero(to_test, num_bits);
+ skip = num_bits / set_bits;
+ for (i = 0; i < num_bits; i += skip)
+ __set_bit(i, to_test);
+
+ for (i = 0; i < outer_iterations; i++) {
+#ifndef NDEBUG
+ unsigned int old = accumulator;
+#endif
+
+ gettimeofday(&start, NULL);
+ for (j = 0; j < inner_iterations; j++) {
+ for_each_set_bit(bit, to_test, num_bits)
+ workload(bit);
+ }
+ gettimeofday(&end, NULL);
+ assert(old + (inner_iterations * set_bits) == accumulator);
+ timersub(&end, &start, &diff);
+ runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ update_stats(&fb_time_stats, runtime_us);
+
+#ifndef NDEBUG
+ old = accumulator;
+#endif
+ gettimeofday(&start, NULL);
+ for (j = 0; j < inner_iterations; j++) {
+ for (bit = 0; bit < num_bits; bit++) {
+ if (asm_test_bit(bit, to_test))
+ workload(bit);
+ }
+ }
+ gettimeofday(&end, NULL);
+ assert(old + (inner_iterations * set_bits) == accumulator);
+ timersub(&end, &start, &diff);
+ runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ update_stats(&tb_time_stats, runtime_us);
+ }
+
+ printf("%d operations %d bits set of %d bits\n",
+ inner_iterations, set_bits, num_bits);
+ time_average = avg_stats(&fb_time_stats);
+ time_stddev = stddev_stats(&fb_time_stats);
+ printf(" Average for_each_set_bit took: %.3f usec (+- %.3f usec)\n",
+ time_average, time_stddev);
+ time_average = avg_stats(&tb_time_stats);
+ time_stddev = stddev_stats(&tb_time_stats);
+ printf(" Average test_bit loop took: %.3f usec (+- %.3f usec)\n",
+ time_average, time_stddev);
+
+ if (use_of_val == accumulator) /* Try to avoid compiler tricks. */
+ printf("\n");
+ }
+ bitmap_free(to_test);
+ return 0;
+}
+
+int bench_mem_find_bit(int argc, const char **argv)
+{
+ int err = 0, i;
+
+ argc = parse_options(argc, argv, options, bench_usage, 0);
+ if (argc) {
+ usage_with_options(bench_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ for (i = 1; i <= 2048; i <<= 1)
+ do_for_each_set_bit(i);
+
+ return err;
+}
diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c
index 65eebe06c04d..2005a3fa3026 100644
--- a/tools/perf/bench/futex-hash.c
+++ b/tools/perf/bench/futex-hash.c
@@ -20,9 +20,10 @@
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <sys/time.h>
-#include <internal/cpumap.h>
+#include <sys/mman.h>
#include <perf/cpumap.h>
+#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include "bench.h"
@@ -30,18 +31,14 @@
#include <err.h>
-static unsigned int nthreads = 0;
-static unsigned int nsecs = 10;
-/* amount of futexes per thread */
-static unsigned int nfutexes = 1024;
-static bool fshared = false, done = false, silent = false;
+static bool done = false;
static int futex_flag = 0;
struct timeval bench__start, bench__end, bench__runtime;
-static pthread_mutex_t thread_lock;
+static struct mutex thread_lock;
static unsigned int threads_starting;
static struct stats throughput_stats;
-static pthread_cond_t thread_parent, thread_worker;
+static struct cond thread_parent, thread_worker;
struct worker {
int tid;
@@ -50,12 +47,18 @@ struct worker {
unsigned long ops;
};
+static struct bench_futex_parameters params = {
+ .nfutexes = 1024,
+ .runtime = 10,
+};
+
static const struct option options[] = {
- OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
- OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
- OPT_UINTEGER('f', "futexes", &nfutexes, "Specify amount of futexes per threads"),
- OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
- OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
+ OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('r', "runtime", &params.runtime, "Specify runtime (in seconds)"),
+ OPT_UINTEGER('f', "futexes", &params.nfutexes, "Specify amount of futexes per threads"),
+ OPT_BOOLEAN( 's', "silent", &params.silent, "Silent mode: do not display data/details"),
+ OPT_BOOLEAN( 'S', "shared", &params.fshared, "Use shared futexes instead of private ones"),
+ OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
OPT_END()
};
@@ -71,15 +74,15 @@ static void *workerfn(void *arg)
unsigned int i;
unsigned long ops = w->ops; /* avoid cacheline bouncing */
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
- pthread_cond_signal(&thread_parent);
- pthread_cond_wait(&thread_worker, &thread_lock);
- pthread_mutex_unlock(&thread_lock);
+ cond_signal(&thread_parent);
+ cond_wait(&thread_worker, &thread_lock);
+ mutex_unlock(&thread_lock);
do {
- for (i = 0; i < nfutexes; i++, ops++) {
+ for (i = 0; i < params.nfutexes; i++, ops++) {
/*
* We want the futex calls to fail in order to stress
* the hashing of uaddr and not measure other steps,
@@ -87,7 +90,7 @@ static void *workerfn(void *arg)
* the critical region protected by hb->lock.
*/
ret = futex_wait(&w->futex[i], 1234, NULL, futex_flag);
- if (!silent &&
+ if (!params.silent &&
(!ret || errno != EAGAIN || errno != EWOULDBLOCK))
warn("Non-expected futex return call");
}
@@ -113,19 +116,21 @@ static void print_summary(void)
double stddev = stddev_stats(&throughput_stats);
printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
- !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
+ !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
(int)bench__runtime.tv_sec);
}
int bench_futex_hash(int argc, const char **argv)
{
int ret = 0;
- cpu_set_t cpuset;
+ cpu_set_t *cpuset;
struct sigaction act;
unsigned int i;
pthread_attr_t thread_attr;
struct worker *worker = NULL;
struct perf_cpu_map *cpu;
+ int nrcpus;
+ size_t size;
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
if (argc) {
@@ -142,79 +147,95 @@ int bench_futex_hash(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
- if (!nthreads) /* default to the number of CPUs */
- nthreads = cpu->nr;
+ if (params.mlockall) {
+ if (mlockall(MCL_CURRENT | MCL_FUTURE))
+ err(EXIT_FAILURE, "mlockall");
+ }
+
+ if (!params.nthreads) /* default to the number of CPUs */
+ params.nthreads = perf_cpu_map__nr(cpu);
- worker = calloc(nthreads, sizeof(*worker));
+ worker = calloc(params.nthreads, sizeof(*worker));
if (!worker)
goto errmem;
- if (!fshared)
+ if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
- getpid(), nthreads, nfutexes, fshared ? "shared":"private", nsecs);
+ getpid(), params.nthreads, params.nfutexes, params.fshared ? "shared":"private", params.runtime);
init_stats(&throughput_stats);
- pthread_mutex_init(&thread_lock, NULL);
- pthread_cond_init(&thread_parent, NULL);
- pthread_cond_init(&thread_worker, NULL);
+ mutex_init(&thread_lock);
+ cond_init(&thread_parent);
+ cond_init(&thread_worker);
- threads_starting = nthreads;
+ threads_starting = params.nthreads;
pthread_attr_init(&thread_attr);
gettimeofday(&bench__start, NULL);
- for (i = 0; i < nthreads; i++) {
+
+ nrcpus = perf_cpu_map__nr(cpu);
+ cpuset = CPU_ALLOC(nrcpus);
+ BUG_ON(!cpuset);
+ size = CPU_ALLOC_SIZE(nrcpus);
+
+ for (i = 0; i < params.nthreads; i++) {
worker[i].tid = i;
- worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
+ worker[i].futex = calloc(params.nfutexes, sizeof(*worker[i].futex));
if (!worker[i].futex)
goto errmem;
- CPU_ZERO(&cpuset);
- CPU_SET(cpu->map[i % cpu->nr], &cpuset);
+ CPU_ZERO_S(size, cpuset);
- ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset);
- if (ret)
+ CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
+ ret = pthread_attr_setaffinity_np(&thread_attr, size, cpuset);
+ if (ret) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
-
+ }
ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
(void *)(struct worker *) &worker[i]);
- if (ret)
+ if (ret) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
+ }
}
+ CPU_FREE(cpuset);
pthread_attr_destroy(&thread_attr);
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
while (threads_starting)
- pthread_cond_wait(&thread_parent, &thread_lock);
- pthread_cond_broadcast(&thread_worker);
- pthread_mutex_unlock(&thread_lock);
+ cond_wait(&thread_parent, &thread_lock);
+ cond_broadcast(&thread_worker);
+ mutex_unlock(&thread_lock);
- sleep(nsecs);
+ sleep(params.runtime);
toggle_done(0, NULL, NULL);
- for (i = 0; i < nthreads; i++) {
+ for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(worker[i].thread, NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
/* cleanup & report results */
- pthread_cond_destroy(&thread_parent);
- pthread_cond_destroy(&thread_worker);
- pthread_mutex_destroy(&thread_lock);
+ cond_destroy(&thread_parent);
+ cond_destroy(&thread_worker);
+ mutex_destroy(&thread_lock);
- for (i = 0; i < nthreads; i++) {
- unsigned long t = worker[i].ops / bench__runtime.tv_sec;
+ for (i = 0; i < params.nthreads; i++) {
+ unsigned long t = bench__runtime.tv_sec > 0 ?
+ worker[i].ops / bench__runtime.tv_sec : 0;
update_stats(&throughput_stats, t);
- if (!silent) {
- if (nfutexes == 1)
+ if (!params.silent) {
+ if (params.nfutexes == 1)
printf("[thread %2d] futex: %p [ %ld ops/sec ]\n",
worker[i].tid, &worker[i].futex[0], t);
else
printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n",
worker[i].tid, &worker[i].futex[0],
- &worker[i].futex[nfutexes-1], t);
+ &worker[i].futex[params.nfutexes-1], t);
}
zfree(&worker[i].futex);
diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c
index 89fd8f325f38..2d0417949727 100644
--- a/tools/perf/bench/futex-lock-pi.c
+++ b/tools/perf/bench/futex-lock-pi.c
@@ -8,13 +8,13 @@
#include <pthread.h>
#include <signal.h>
+#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <errno.h>
-#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "bench.h"
#include "futex.h"
@@ -22,6 +22,7 @@
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
+#include <sys/mman.h>
struct worker {
int tid;
@@ -32,22 +33,24 @@ struct worker {
static u_int32_t global_futex = 0;
static struct worker *worker;
-static unsigned int nsecs = 10;
-static bool silent = false, multi = false;
-static bool done = false, fshared = false;
-static unsigned int nthreads = 0;
+static bool done = false;
static int futex_flag = 0;
-static pthread_mutex_t thread_lock;
+static struct mutex thread_lock;
static unsigned int threads_starting;
static struct stats throughput_stats;
-static pthread_cond_t thread_parent, thread_worker;
+static struct cond thread_parent, thread_worker;
+
+static struct bench_futex_parameters params = {
+ .runtime = 10,
+};
static const struct option options[] = {
- OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
- OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
- OPT_BOOLEAN( 'M', "multi", &multi, "Use multiple futexes"),
- OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
- OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
+ OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('r', "runtime", &params.runtime, "Specify runtime (in seconds)"),
+ OPT_BOOLEAN( 'M', "multi", &params.multi, "Use multiple futexes"),
+ OPT_BOOLEAN( 's', "silent", &params.silent, "Silent mode: do not display data/details"),
+ OPT_BOOLEAN( 'S', "shared", &params.fshared, "Use shared futexes instead of private ones"),
+ OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
OPT_END()
};
@@ -62,7 +65,7 @@ static void print_summary(void)
double stddev = stddev_stats(&throughput_stats);
printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
- !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
+ !params.silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
(int)bench__runtime.tv_sec);
}
@@ -81,12 +84,12 @@ static void *workerfn(void *arg)
struct worker *w = (struct worker *) arg;
unsigned long ops = w->ops;
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
- pthread_cond_signal(&thread_parent);
- pthread_cond_wait(&thread_worker, &thread_lock);
- pthread_mutex_unlock(&thread_lock);
+ cond_signal(&thread_parent);
+ cond_wait(&thread_worker, &thread_lock);
+ mutex_unlock(&thread_lock);
do {
int ret;
@@ -94,7 +97,7 @@ static void *workerfn(void *arg)
ret = futex_lock_pi(w->futex, NULL, futex_flag);
if (ret) { /* handle lock acquisition */
- if (!silent)
+ if (!params.silent)
warn("thread %d: Could not lock pi-lock for %p (%d)",
w->tid, w->futex, ret);
if (done)
@@ -105,7 +108,7 @@ static void *workerfn(void *arg)
usleep(1);
ret = futex_unlock_pi(w->futex, futex_flag);
- if (ret && !silent)
+ if (ret && !params.silent)
warn("thread %d: Could not unlock pi-lock for %p (%d)",
w->tid, w->futex, ret);
ops++; /* account for thread's share of work */
@@ -118,30 +121,41 @@ static void *workerfn(void *arg)
static void create_threads(struct worker *w, pthread_attr_t thread_attr,
struct perf_cpu_map *cpu)
{
- cpu_set_t cpuset;
+ cpu_set_t *cpuset;
unsigned int i;
+ int nrcpus = perf_cpu_map__nr(cpu);
+ size_t size;
- threads_starting = nthreads;
+ threads_starting = params.nthreads;
- for (i = 0; i < nthreads; i++) {
+ cpuset = CPU_ALLOC(nrcpus);
+ BUG_ON(!cpuset);
+ size = CPU_ALLOC_SIZE(nrcpus);
+
+ for (i = 0; i < params.nthreads; i++) {
worker[i].tid = i;
- if (multi) {
+ if (params.multi) {
worker[i].futex = calloc(1, sizeof(u_int32_t));
if (!worker[i].futex)
err(EXIT_FAILURE, "calloc");
} else
worker[i].futex = &global_futex;
- CPU_ZERO(&cpuset);
- CPU_SET(cpu->map[i % cpu->nr], &cpuset);
+ CPU_ZERO_S(size, cpuset);
+ CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+ if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+ }
- if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i]))
+ if (pthread_create(&w[i].thread, &thread_attr, workerfn, &worker[i])) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
+ }
}
+ CPU_FREE(cpuset);
}
int bench_futex_lock_pi(int argc, const char **argv)
@@ -165,66 +179,73 @@ int bench_futex_lock_pi(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
- if (!nthreads)
- nthreads = cpu->nr;
+ if (params.mlockall) {
+ if (mlockall(MCL_CURRENT | MCL_FUTURE))
+ err(EXIT_FAILURE, "mlockall");
+ }
+
+ if (!params.nthreads)
+ params.nthreads = perf_cpu_map__nr(cpu);
- worker = calloc(nthreads, sizeof(*worker));
+ worker = calloc(params.nthreads, sizeof(*worker));
if (!worker)
err(EXIT_FAILURE, "calloc");
- if (!fshared)
+ if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: %d threads doing pi lock/unlock pairing for %d secs.\n\n",
- getpid(), nthreads, nsecs);
+ getpid(), params.nthreads, params.runtime);
init_stats(&throughput_stats);
- pthread_mutex_init(&thread_lock, NULL);
- pthread_cond_init(&thread_parent, NULL);
- pthread_cond_init(&thread_worker, NULL);
+ mutex_init(&thread_lock);
+ cond_init(&thread_parent);
+ cond_init(&thread_worker);
- threads_starting = nthreads;
+ threads_starting = params.nthreads;
pthread_attr_init(&thread_attr);
gettimeofday(&bench__start, NULL);
create_threads(worker, thread_attr, cpu);
pthread_attr_destroy(&thread_attr);
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
while (threads_starting)
- pthread_cond_wait(&thread_parent, &thread_lock);
- pthread_cond_broadcast(&thread_worker);
- pthread_mutex_unlock(&thread_lock);
+ cond_wait(&thread_parent, &thread_lock);
+ cond_broadcast(&thread_worker);
+ mutex_unlock(&thread_lock);
- sleep(nsecs);
+ sleep(params.runtime);
toggle_done(0, NULL, NULL);
- for (i = 0; i < nthreads; i++) {
+ for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(worker[i].thread, NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
/* cleanup & report results */
- pthread_cond_destroy(&thread_parent);
- pthread_cond_destroy(&thread_worker);
- pthread_mutex_destroy(&thread_lock);
+ cond_destroy(&thread_parent);
+ cond_destroy(&thread_worker);
+ mutex_destroy(&thread_lock);
- for (i = 0; i < nthreads; i++) {
- unsigned long t = worker[i].ops / bench__runtime.tv_sec;
+ for (i = 0; i < params.nthreads; i++) {
+ unsigned long t = bench__runtime.tv_sec > 0 ?
+ worker[i].ops / bench__runtime.tv_sec : 0;
update_stats(&throughput_stats, t);
- if (!silent)
+ if (!params.silent)
printf("[thread %3d] futex: %p [ %ld ops/sec ]\n",
worker[i].tid, worker[i].futex, t);
- if (multi)
+ if (params.multi)
zfree(&worker[i].futex);
}
print_summary();
free(worker);
+ perf_cpu_map__put(cpu);
return ret;
err:
usage_with_options(bench_futex_lock_pi_usage, options);
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index 7a15c2e61022..69ad896f556c 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -6,7 +6,8 @@
* on futex2, N at a time.
*
* This program is particularly useful to measure the latency of nthread
- * requeues without waking up any tasks -- thus mimicking a regular futex_wait.
+ * requeues without waking up any tasks (in the non-pi case) -- thus
+ * mimicking a regular futex_wait.
*/
/* For the CLR_() macros */
@@ -14,13 +15,13 @@
#include <pthread.h>
#include <signal.h>
+#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
-#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "bench.h"
#include "futex.h"
@@ -28,28 +29,35 @@
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
+#include <sys/mman.h>
static u_int32_t futex1 = 0, futex2 = 0;
-/*
- * How many tasks to requeue at a time.
- * Default to 1 in order to make the kernel work more.
- */
-static unsigned int nrequeue = 1;
-
static pthread_t *worker;
-static bool done = false, silent = false, fshared = false;
-static pthread_mutex_t thread_lock;
-static pthread_cond_t thread_parent, thread_worker;
+static bool done = false;
+static struct mutex thread_lock;
+static struct cond thread_parent, thread_worker;
static struct stats requeuetime_stats, requeued_stats;
-static unsigned int threads_starting, nthreads = 0;
+static unsigned int threads_starting;
static int futex_flag = 0;
+static struct bench_futex_parameters params = {
+ /*
+ * How many tasks to requeue at a time.
+ * Default to 1 in order to make the kernel work more.
+ */
+ .nrequeue = 1,
+};
+
static const struct option options[] = {
- OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
- OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"),
- OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
- OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
+ OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('q', "nrequeue", &params.nrequeue, "Specify amount of threads to requeue at once"),
+ OPT_BOOLEAN( 's', "silent", &params.silent, "Silent mode: do not display data/details"),
+ OPT_BOOLEAN( 'S', "shared", &params.fshared, "Use shared futexes instead of private ones"),
+ OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
+ OPT_BOOLEAN( 'B', "broadcast", &params.broadcast, "Requeue all threads at once"),
+ OPT_BOOLEAN( 'p', "pi", &params.pi, "Use PI-aware variants of FUTEX_CMP_REQUEUE"),
+
OPT_END()
};
@@ -66,43 +74,83 @@ static void print_summary(void)
printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n",
requeued_avg,
- nthreads,
+ params.nthreads,
requeuetime_avg / USEC_PER_MSEC,
rel_stddev_stats(requeuetime_stddev, requeuetime_avg));
}
static void *workerfn(void *arg __maybe_unused)
{
- pthread_mutex_lock(&thread_lock);
+ int ret;
+
+ mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
- pthread_cond_signal(&thread_parent);
- pthread_cond_wait(&thread_worker, &thread_lock);
- pthread_mutex_unlock(&thread_lock);
+ cond_signal(&thread_parent);
+ cond_wait(&thread_worker, &thread_lock);
+ mutex_unlock(&thread_lock);
+
+ while (1) {
+ if (!params.pi) {
+ ret = futex_wait(&futex1, 0, NULL, futex_flag);
+ if (!ret)
+ break;
+
+ if (ret && errno != EAGAIN) {
+ if (!params.silent)
+ warnx("futex_wait");
+ break;
+ }
+ } else {
+ ret = futex_wait_requeue_pi(&futex1, 0, &futex2,
+ NULL, futex_flag);
+ if (!ret) {
+ /* got the lock at futex2 */
+ futex_unlock_pi(&futex2, futex_flag);
+ break;
+ }
+
+ if (ret && errno != EAGAIN) {
+ if (!params.silent)
+ warnx("futex_wait_requeue_pi");
+ break;
+ }
+ }
+ }
- futex_wait(&futex1, 0, NULL, futex_flag);
return NULL;
}
static void block_threads(pthread_t *w,
pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
{
- cpu_set_t cpuset;
+ cpu_set_t *cpuset;
unsigned int i;
+ int nrcpus = perf_cpu_map__nr(cpu);
+ size_t size;
- threads_starting = nthreads;
+ threads_starting = params.nthreads;
+
+ cpuset = CPU_ALLOC(nrcpus);
+ BUG_ON(!cpuset);
+ size = CPU_ALLOC_SIZE(nrcpus);
/* create and block all threads */
- for (i = 0; i < nthreads; i++) {
- CPU_ZERO(&cpuset);
- CPU_SET(cpu->map[i % cpu->nr], &cpuset);
+ for (i = 0; i < params.nthreads; i++) {
+ CPU_ZERO_S(size, cpuset);
+ CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+ if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+ }
- if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+ if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
+ }
}
+ CPU_FREE(cpuset);
}
static void toggle_done(int sig __maybe_unused,
@@ -133,55 +181,82 @@ int bench_futex_requeue(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
- if (!nthreads)
- nthreads = cpu->nr;
+ if (params.mlockall) {
+ if (mlockall(MCL_CURRENT | MCL_FUTURE))
+ err(EXIT_FAILURE, "mlockall");
+ }
+
+ if (!params.nthreads)
+ params.nthreads = perf_cpu_map__nr(cpu);
- worker = calloc(nthreads, sizeof(*worker));
+ worker = calloc(params.nthreads, sizeof(*worker));
if (!worker)
err(EXIT_FAILURE, "calloc");
- if (!fshared)
+ if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
- if (nrequeue > nthreads)
- nrequeue = nthreads;
+ if (params.nrequeue > params.nthreads)
+ params.nrequeue = params.nthreads;
- printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
- "%d at a time.\n\n", getpid(), nthreads,
- fshared ? "shared":"private", &futex1, &futex2, nrequeue);
+ if (params.broadcast)
+ params.nrequeue = params.nthreads;
+
+ printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %s%p), "
+ "%d at a time.\n\n", getpid(), params.nthreads,
+ params.fshared ? "shared":"private", &futex1,
+ params.pi ? "PI ": "", &futex2, params.nrequeue);
init_stats(&requeued_stats);
init_stats(&requeuetime_stats);
pthread_attr_init(&thread_attr);
- pthread_mutex_init(&thread_lock, NULL);
- pthread_cond_init(&thread_parent, NULL);
- pthread_cond_init(&thread_worker, NULL);
+ mutex_init(&thread_lock);
+ cond_init(&thread_parent);
+ cond_init(&thread_worker);
for (j = 0; j < bench_repeat && !done; j++) {
- unsigned int nrequeued = 0;
+ unsigned int nrequeued = 0, wakeups = 0;
struct timeval start, end, runtime;
/* create, launch & block all threads */
block_threads(worker, thread_attr, cpu);
/* make sure all threads are already blocked */
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
while (threads_starting)
- pthread_cond_wait(&thread_parent, &thread_lock);
- pthread_cond_broadcast(&thread_worker);
- pthread_mutex_unlock(&thread_lock);
+ cond_wait(&thread_parent, &thread_lock);
+ cond_broadcast(&thread_worker);
+ mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start requeueing */
gettimeofday(&start, NULL);
- while (nrequeued < nthreads) {
+ while (nrequeued < params.nthreads) {
+ int r;
+
/*
- * Do not wakeup any tasks blocked on futex1, allowing
- * us to really measure futex_wait functionality.
+ * For the regular non-pi case, do not wakeup any tasks
+ * blocked on futex1, allowing us to really measure
+ * futex_wait functionality. For the PI case the first
+ * waiter is always awoken.
*/
- nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0,
- nrequeue, futex_flag);
+ if (!params.pi) {
+ r = futex_cmp_requeue(&futex1, 0, &futex2, 0,
+ params.nrequeue,
+ futex_flag);
+ } else {
+ r = futex_cmp_requeue_pi(&futex1, 0, &futex2,
+ params.nrequeue,
+ futex_flag);
+ wakeups++; /* assume no error */
+ }
+
+ if (r < 0)
+ err(EXIT_FAILURE, "couldn't requeue from %p to %p",
+ &futex1, &futex2);
+
+ nrequeued += r;
}
gettimeofday(&end, NULL);
@@ -190,17 +265,32 @@ int bench_futex_requeue(int argc, const char **argv)
update_stats(&requeued_stats, nrequeued);
update_stats(&requeuetime_stats, runtime.tv_usec);
- if (!silent) {
- printf("[Run %d]: Requeued %d of %d threads in %.4f ms\n",
- j + 1, nrequeued, nthreads, runtime.tv_usec / (double)USEC_PER_MSEC);
+ if (!params.silent) {
+ if (!params.pi)
+ printf("[Run %d]: Requeued %d of %d threads in "
+ "%.4f ms\n", j + 1, nrequeued,
+ params.nthreads,
+ runtime.tv_usec / (double)USEC_PER_MSEC);
+ else {
+ nrequeued -= wakeups;
+ printf("[Run %d]: Awoke and Requeued (%d+%d) of "
+ "%d threads in %.4f ms\n",
+ j + 1, wakeups, nrequeued,
+ params.nthreads,
+ runtime.tv_usec / (double)USEC_PER_MSEC);
+ }
+
}
- /* everybody should be blocked on futex2, wake'em up */
- nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
- if (nthreads != nrequeued)
- warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
+ if (!params.pi) {
+ /* everybody should be blocked on futex2, wake'em up */
+ nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
+ if (params.nthreads != nrequeued)
+ warnx("couldn't wakeup all tasks (%d/%d)",
+ nrequeued, params.nthreads);
+ }
- for (i = 0; i < nthreads; i++) {
+ for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
@@ -208,14 +298,15 @@ int bench_futex_requeue(int argc, const char **argv)
}
/* cleanup & report results */
- pthread_cond_destroy(&thread_parent);
- pthread_cond_destroy(&thread_worker);
- pthread_mutex_destroy(&thread_lock);
+ cond_destroy(&thread_parent);
+ cond_destroy(&thread_worker);
+ mutex_destroy(&thread_lock);
pthread_attr_destroy(&thread_attr);
print_summary();
free(worker);
+ perf_cpu_map__put(cpu);
return ret;
err:
usage_with_options(bench_futex_requeue_usage, options);
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
index cd2b81a845ac..6682e49d0ee0 100644
--- a/tools/perf/bench/futex-wake-parallel.c
+++ b/tools/perf/bench/futex-wake-parallel.c
@@ -10,6 +10,7 @@
#include "bench.h"
#include <linux/compiler.h>
#include "../util/debug.h"
+#include "../util/mutex.h"
#ifndef HAVE_PTHREAD_BARRIER
int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe_unused)
@@ -29,12 +30,12 @@ int bench_futex_wake_parallel(int argc __maybe_unused, const char **argv __maybe
#include <linux/time64.h>
#include <errno.h>
#include "futex.h"
-#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
+#include <sys/mman.h>
struct thread_data {
pthread_t worker;
@@ -48,20 +49,23 @@ static unsigned int nwakes = 1;
static u_int32_t futex = 0;
static pthread_t *blocked_worker;
-static bool done = false, silent = false, fshared = false;
-static unsigned int nblocked_threads = 0, nwaking_threads = 0;
-static pthread_mutex_t thread_lock;
-static pthread_cond_t thread_parent, thread_worker;
+static bool done = false;
+static struct mutex thread_lock;
+static struct cond thread_parent, thread_worker;
static pthread_barrier_t barrier;
static struct stats waketime_stats, wakeup_stats;
static unsigned int threads_starting;
static int futex_flag = 0;
+static struct bench_futex_parameters params;
+
static const struct option options[] = {
- OPT_UINTEGER('t', "threads", &nblocked_threads, "Specify amount of threads"),
- OPT_UINTEGER('w', "nwakers", &nwaking_threads, "Specify amount of waking threads"),
- OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
- OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
+ OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('w', "nwakers", &params.nwakes, "Specify amount of waking threads"),
+ OPT_BOOLEAN( 's', "silent", &params.silent, "Silent mode: do not display data/details"),
+ OPT_BOOLEAN( 'S', "shared", &params.fshared, "Use shared futexes instead of private ones"),
+ OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
+
OPT_END()
};
@@ -97,10 +101,10 @@ static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
- pthread_barrier_init(&barrier, NULL, nwaking_threads + 1);
+ pthread_barrier_init(&barrier, NULL, params.nwakes + 1);
/* create and block all threads */
- for (i = 0; i < nwaking_threads; i++) {
+ for (i = 0; i < params.nwakes; i++) {
/*
* Thread creation order will impact per-thread latency
* as it will affect the order to acquire the hb spinlock.
@@ -113,7 +117,7 @@ static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
pthread_barrier_wait(&barrier);
- for (i = 0; i < nwaking_threads; i++)
+ for (i = 0; i < params.nwakes; i++)
if (pthread_join(td[i].worker, NULL))
err(EXIT_FAILURE, "pthread_join");
@@ -122,12 +126,12 @@ static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
static void *blocked_workerfn(void *arg __maybe_unused)
{
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
- pthread_cond_signal(&thread_parent);
- pthread_cond_wait(&thread_worker, &thread_lock);
- pthread_mutex_unlock(&thread_lock);
+ cond_signal(&thread_parent);
+ cond_wait(&thread_worker, &thread_lock);
+ mutex_unlock(&thread_lock);
while (1) { /* handle spurious wakeups */
if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
@@ -141,22 +145,33 @@ static void *blocked_workerfn(void *arg __maybe_unused)
static void block_threads(pthread_t *w, pthread_attr_t thread_attr,
struct perf_cpu_map *cpu)
{
- cpu_set_t cpuset;
+ cpu_set_t *cpuset;
unsigned int i;
+ int nrcpus = perf_cpu_map__nr(cpu);
+ size_t size;
- threads_starting = nblocked_threads;
+ threads_starting = params.nthreads;
+
+ cpuset = CPU_ALLOC(nrcpus);
+ BUG_ON(!cpuset);
+ size = CPU_ALLOC_SIZE(nrcpus);
/* create and block all threads */
- for (i = 0; i < nblocked_threads; i++) {
- CPU_ZERO(&cpuset);
- CPU_SET(cpu->map[i % cpu->nr], &cpuset);
+ for (i = 0; i < params.nthreads; i++) {
+ CPU_ZERO_S(size, cpuset);
+ CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+ if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+ }
- if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
+ if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL)) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
+ }
}
+ CPU_FREE(cpuset);
}
static void print_run(struct thread_data *waking_worker, unsigned int run_num)
@@ -168,7 +183,7 @@ static void print_run(struct thread_data *waking_worker, unsigned int run_num)
init_stats(&__wakeup_stats);
init_stats(&__waketime_stats);
- for (i = 0; i < nwaking_threads; i++) {
+ for (i = 0; i < params.nwakes; i++) {
update_stats(&__waketime_stats, waking_worker[i].runtime.tv_usec);
update_stats(&__wakeup_stats, waking_worker[i].nwoken);
}
@@ -179,7 +194,7 @@ static void print_run(struct thread_data *waking_worker, unsigned int run_num)
printf("[Run %d]: Avg per-thread latency (waking %d/%d threads) "
"in %.4f ms (+-%.2f%%)\n", run_num + 1, wakeup_avg,
- nblocked_threads, waketime_avg / USEC_PER_MSEC,
+ params.nthreads, waketime_avg / USEC_PER_MSEC,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
@@ -194,7 +209,7 @@ static void print_summary(void)
printf("Avg per-thread latency (waking %d/%d threads) in %.4f ms (+-%.2f%%)\n",
wakeup_avg,
- nblocked_threads,
+ params.nthreads,
waketime_avg / USEC_PER_MSEC,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
@@ -204,7 +219,7 @@ static void do_run_stats(struct thread_data *waking_worker)
{
unsigned int i;
- for (i = 0; i < nwaking_threads; i++) {
+ for (i = 0; i < params.nwakes; i++) {
update_stats(&waketime_stats, waking_worker[i].runtime.tv_usec);
update_stats(&wakeup_stats, waking_worker[i].nwoken);
}
@@ -239,47 +254,53 @@ int bench_futex_wake_parallel(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
+ if (params.mlockall) {
+ if (mlockall(MCL_CURRENT | MCL_FUTURE))
+ err(EXIT_FAILURE, "mlockall");
+ }
+
cpu = perf_cpu_map__new(NULL);
if (!cpu)
err(EXIT_FAILURE, "calloc");
- if (!nblocked_threads)
- nblocked_threads = cpu->nr;
+ if (!params.nthreads)
+ params.nthreads = perf_cpu_map__nr(cpu);
/* some sanity checks */
- if (nwaking_threads > nblocked_threads || !nwaking_threads)
- nwaking_threads = nblocked_threads;
+ if (params.nwakes > params.nthreads ||
+ !params.nwakes)
+ params.nwakes = params.nthreads;
- if (nblocked_threads % nwaking_threads)
+ if (params.nthreads % params.nwakes)
errx(EXIT_FAILURE, "Must be perfectly divisible");
/*
* Each thread will wakeup nwakes tasks in
* a single futex_wait call.
*/
- nwakes = nblocked_threads/nwaking_threads;
+ nwakes = params.nthreads/params.nwakes;
- blocked_worker = calloc(nblocked_threads, sizeof(*blocked_worker));
+ blocked_worker = calloc(params.nthreads, sizeof(*blocked_worker));
if (!blocked_worker)
err(EXIT_FAILURE, "calloc");
- if (!fshared)
+ if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
"futex %p), %d threads waking up %d at a time.\n\n",
- getpid(), nblocked_threads, fshared ? "shared":"private",
- &futex, nwaking_threads, nwakes);
+ getpid(), params.nthreads, params.fshared ? "shared":"private",
+ &futex, params.nwakes, nwakes);
init_stats(&wakeup_stats);
init_stats(&waketime_stats);
pthread_attr_init(&thread_attr);
- pthread_mutex_init(&thread_lock, NULL);
- pthread_cond_init(&thread_parent, NULL);
- pthread_cond_init(&thread_worker, NULL);
+ mutex_init(&thread_lock);
+ cond_init(&thread_parent);
+ cond_init(&thread_worker);
for (j = 0; j < bench_repeat && !done; j++) {
- waking_worker = calloc(nwaking_threads, sizeof(*waking_worker));
+ waking_worker = calloc(params.nwakes, sizeof(*waking_worker));
if (!waking_worker)
err(EXIT_FAILURE, "calloc");
@@ -287,39 +308,40 @@ int bench_futex_wake_parallel(int argc, const char **argv)
block_threads(blocked_worker, thread_attr, cpu);
/* make sure all threads are already blocked */
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
while (threads_starting)
- pthread_cond_wait(&thread_parent, &thread_lock);
- pthread_cond_broadcast(&thread_worker);
- pthread_mutex_unlock(&thread_lock);
+ cond_wait(&thread_parent, &thread_lock);
+ cond_broadcast(&thread_worker);
+ mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start waking folks up */
wakeup_threads(waking_worker, thread_attr);
- for (i = 0; i < nblocked_threads; i++) {
+ for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(blocked_worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
do_run_stats(waking_worker);
- if (!silent)
+ if (!params.silent)
print_run(waking_worker, j);
free(waking_worker);
}
/* cleanup & report results */
- pthread_cond_destroy(&thread_parent);
- pthread_cond_destroy(&thread_worker);
- pthread_mutex_destroy(&thread_lock);
+ cond_destroy(&thread_parent);
+ cond_destroy(&thread_worker);
+ mutex_destroy(&thread_lock);
pthread_attr_destroy(&thread_attr);
print_summary();
free(blocked_worker);
+ perf_cpu_map__put(cpu);
return ret;
}
#endif /* HAVE_PTHREAD_BARRIER */
diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
index 2dfcef3e371e..9ecab6620a87 100644
--- a/tools/perf/bench/futex-wake.c
+++ b/tools/perf/bench/futex-wake.c
@@ -14,13 +14,13 @@
#include <pthread.h>
#include <signal.h>
+#include "../util/mutex.h"
#include "../util/stat.h"
#include <subcmd/parse-options.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/time64.h>
#include <errno.h>
-#include <internal/cpumap.h>
#include <perf/cpumap.h>
#include "bench.h"
#include "futex.h"
@@ -28,29 +28,34 @@
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
+#include <sys/mman.h>
/* all threads will block on the same futex */
static u_int32_t futex1 = 0;
-/*
- * How many wakeups to do at a time.
- * Default to 1 in order to make the kernel work more.
- */
-static unsigned int nwakes = 1;
-
-pthread_t *worker;
-static bool done = false, silent = false, fshared = false;
-static pthread_mutex_t thread_lock;
-static pthread_cond_t thread_parent, thread_worker;
+static pthread_t *worker;
+static bool done = false;
+static struct mutex thread_lock;
+static struct cond thread_parent, thread_worker;
static struct stats waketime_stats, wakeup_stats;
-static unsigned int threads_starting, nthreads = 0;
+static unsigned int threads_starting;
static int futex_flag = 0;
+static struct bench_futex_parameters params = {
+ /*
+ * How many wakeups to do at a time.
+ * Default to 1 in order to make the kernel work more.
+ */
+ .nwakes = 1,
+};
+
static const struct option options[] = {
- OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
- OPT_UINTEGER('w', "nwakes", &nwakes, "Specify amount of threads to wake at once"),
- OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
- OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
+ OPT_UINTEGER('t', "threads", &params.nthreads, "Specify amount of threads"),
+ OPT_UINTEGER('w', "nwakes", &params.nwakes, "Specify amount of threads to wake at once"),
+ OPT_BOOLEAN( 's', "silent", &params.silent, "Silent mode: do not display data/details"),
+ OPT_BOOLEAN( 'S', "shared", &params.fshared, "Use shared futexes instead of private ones"),
+ OPT_BOOLEAN( 'm', "mlockall", &params.mlockall, "Lock all current and future memory"),
+
OPT_END()
};
@@ -61,12 +66,12 @@ static const char * const bench_futex_wake_usage[] = {
static void *workerfn(void *arg __maybe_unused)
{
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
- pthread_cond_signal(&thread_parent);
- pthread_cond_wait(&thread_worker, &thread_lock);
- pthread_mutex_unlock(&thread_lock);
+ cond_signal(&thread_parent);
+ cond_wait(&thread_worker, &thread_lock);
+ mutex_unlock(&thread_lock);
while (1) {
if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
@@ -85,7 +90,7 @@ static void print_summary(void)
printf("Wokeup %d of %d threads in %.4f ms (+-%.2f%%)\n",
wakeup_avg,
- nthreads,
+ params.nthreads,
waketime_avg / USEC_PER_MSEC,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
@@ -93,22 +98,32 @@ static void print_summary(void)
static void block_threads(pthread_t *w,
pthread_attr_t thread_attr, struct perf_cpu_map *cpu)
{
- cpu_set_t cpuset;
+ cpu_set_t *cpuset;
unsigned int i;
+ size_t size;
+ int nrcpus = perf_cpu_map__nr(cpu);
+ threads_starting = params.nthreads;
- threads_starting = nthreads;
+ cpuset = CPU_ALLOC(nrcpus);
+ BUG_ON(!cpuset);
+ size = CPU_ALLOC_SIZE(nrcpus);
/* create and block all threads */
- for (i = 0; i < nthreads; i++) {
- CPU_ZERO(&cpuset);
- CPU_SET(cpu->map[i % cpu->nr], &cpuset);
+ for (i = 0; i < params.nthreads; i++) {
+ CPU_ZERO_S(size, cpuset);
+ CPU_SET_S(perf_cpu_map__cpu(cpu, i % perf_cpu_map__nr(cpu)).cpu, size, cpuset);
- if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpuset))
+ if (pthread_attr_setaffinity_np(&thread_attr, size, cpuset)) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+ }
- if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
+ if (pthread_create(&w[i], &thread_attr, workerfn, NULL)) {
+ CPU_FREE(cpuset);
err(EXIT_FAILURE, "pthread_create");
+ }
}
+ CPU_FREE(cpuset);
}
static void toggle_done(int sig __maybe_unused,
@@ -141,26 +156,32 @@ int bench_futex_wake(int argc, const char **argv)
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
- if (!nthreads)
- nthreads = cpu->nr;
+ if (params.mlockall) {
+ if (mlockall(MCL_CURRENT | MCL_FUTURE))
+ err(EXIT_FAILURE, "mlockall");
+ }
+
+ if (!params.nthreads)
+ params.nthreads = perf_cpu_map__nr(cpu);
- worker = calloc(nthreads, sizeof(*worker));
+ worker = calloc(params.nthreads, sizeof(*worker));
if (!worker)
err(EXIT_FAILURE, "calloc");
- if (!fshared)
+ if (!params.fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
printf("Run summary [PID %d]: blocking on %d threads (at [%s] futex %p), "
"waking up %d at a time.\n\n",
- getpid(), nthreads, fshared ? "shared":"private", &futex1, nwakes);
+ getpid(), params.nthreads, params.fshared ? "shared":"private",
+ &futex1, params.nwakes);
init_stats(&wakeup_stats);
init_stats(&waketime_stats);
pthread_attr_init(&thread_attr);
- pthread_mutex_init(&thread_lock, NULL);
- pthread_cond_init(&thread_parent, NULL);
- pthread_cond_init(&thread_worker, NULL);
+ mutex_init(&thread_lock);
+ cond_init(&thread_parent);
+ cond_init(&thread_worker);
for (j = 0; j < bench_repeat && !done; j++) {
unsigned int nwoken = 0;
@@ -170,30 +191,32 @@ int bench_futex_wake(int argc, const char **argv)
block_threads(worker, thread_attr, cpu);
/* make sure all threads are already blocked */
- pthread_mutex_lock(&thread_lock);
+ mutex_lock(&thread_lock);
while (threads_starting)
- pthread_cond_wait(&thread_parent, &thread_lock);
- pthread_cond_broadcast(&thread_worker);
- pthread_mutex_unlock(&thread_lock);
+ cond_wait(&thread_parent, &thread_lock);
+ cond_broadcast(&thread_worker);
+ mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start waking folks up */
gettimeofday(&start, NULL);
- while (nwoken != nthreads)
- nwoken += futex_wake(&futex1, nwakes, futex_flag);
+ while (nwoken != params.nthreads)
+ nwoken += futex_wake(&futex1,
+ params.nwakes, futex_flag);
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
update_stats(&wakeup_stats, nwoken);
update_stats(&waketime_stats, runtime.tv_usec);
- if (!silent) {
+ if (!params.silent) {
printf("[Run %d]: Wokeup %d of %d threads in %.4f ms\n",
- j + 1, nwoken, nthreads, runtime.tv_usec / (double)USEC_PER_MSEC);
+ j + 1, nwoken, params.nthreads,
+ runtime.tv_usec / (double)USEC_PER_MSEC);
}
- for (i = 0; i < nthreads; i++) {
+ for (i = 0; i < params.nthreads; i++) {
ret = pthread_join(worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
@@ -202,13 +225,14 @@ int bench_futex_wake(int argc, const char **argv)
}
/* cleanup & report results */
- pthread_cond_destroy(&thread_parent);
- pthread_cond_destroy(&thread_worker);
- pthread_mutex_destroy(&thread_lock);
+ cond_destroy(&thread_parent);
+ cond_destroy(&thread_worker);
+ mutex_destroy(&thread_lock);
pthread_attr_destroy(&thread_attr);
print_summary();
free(worker);
+ perf_cpu_map__put(cpu);
return ret;
}
diff --git a/tools/perf/bench/futex.h b/tools/perf/bench/futex.h
index 31b53cc7d5bc..ebdc2b032afc 100644
--- a/tools/perf/bench/futex.h
+++ b/tools/perf/bench/futex.h
@@ -13,28 +13,51 @@
#include <sys/types.h>
#include <linux/futex.h>
+struct bench_futex_parameters {
+ bool silent;
+ bool fshared;
+ bool mlockall;
+ bool multi; /* lock-pi */
+ bool pi; /* requeue-pi */
+ bool broadcast; /* requeue */
+ unsigned int runtime; /* seconds*/
+ unsigned int nthreads;
+ unsigned int nfutexes;
+ unsigned int nwakes;
+ unsigned int nrequeue;
+};
+
/**
- * futex() - SYS_futex syscall wrapper
+ * futex_syscall() - SYS_futex syscall wrapper
* @uaddr: address of first futex
* @op: futex op code
* @val: typically expected value of uaddr, but varies by op
* @timeout: typically an absolute struct timespec (except where noted
* otherwise). Overloaded by some ops
- * @uaddr2: address of second futex for some ops\
+ * @uaddr2: address of second futex for some ops
* @val3: varies by op
* @opflags: flags to be bitwise OR'd with op, such as FUTEX_PRIVATE_FLAG
*
- * futex() is used by all the following futex op wrappers. It can also be
+ * futex_syscall() is used by all the following futex op wrappers. It can also be
* used for misuse and abuse testing. Generally, the specific op wrappers
- * should be used instead. It is a macro instead of an static inline function as
- * some of the types over overloaded (timeout is used for nr_requeue for
- * example).
+ * should be used instead.
*
* These argument descriptions are the defaults for all
* like-named arguments in the following wrappers except where noted below.
*/
-#define futex(uaddr, op, val, timeout, uaddr2, val3, opflags) \
- syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3)
+static inline int
+futex_syscall(volatile u_int32_t *uaddr, int op, u_int32_t val, struct timespec *timeout,
+ volatile u_int32_t *uaddr2, int val3, int opflags)
+{
+ return syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3);
+}
+
+static inline int
+futex_syscall_nr_requeue(volatile u_int32_t *uaddr, int op, u_int32_t val, int nr_requeue,
+ volatile u_int32_t *uaddr2, int val3, int opflags)
+{
+ return syscall(SYS_futex, uaddr, op | opflags, val, nr_requeue, uaddr2, val3);
+}
/**
* futex_wait() - block on uaddr with optional timeout
@@ -43,7 +66,7 @@
static inline int
futex_wait(u_int32_t *uaddr, u_int32_t val, struct timespec *timeout, int opflags)
{
- return futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags);
+ return futex_syscall(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags);
}
/**
@@ -53,7 +76,7 @@ futex_wait(u_int32_t *uaddr, u_int32_t val, struct timespec *timeout, int opflag
static inline int
futex_wake(u_int32_t *uaddr, int nr_wake, int opflags)
{
- return futex(uaddr, FUTEX_WAKE, nr_wake, NULL, NULL, 0, opflags);
+ return futex_syscall(uaddr, FUTEX_WAKE, nr_wake, NULL, NULL, 0, opflags);
}
/**
@@ -62,7 +85,7 @@ futex_wake(u_int32_t *uaddr, int nr_wake, int opflags)
static inline int
futex_lock_pi(u_int32_t *uaddr, struct timespec *timeout, int opflags)
{
- return futex(uaddr, FUTEX_LOCK_PI, 0, timeout, NULL, 0, opflags);
+ return futex_syscall(uaddr, FUTEX_LOCK_PI, 0, timeout, NULL, 0, opflags);
}
/**
@@ -71,19 +94,53 @@ futex_lock_pi(u_int32_t *uaddr, struct timespec *timeout, int opflags)
static inline int
futex_unlock_pi(u_int32_t *uaddr, int opflags)
{
- return futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags);
+ return futex_syscall(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags);
}
/**
* futex_cmp_requeue() - requeue tasks from uaddr to uaddr2
* @nr_wake: wake up to this many tasks
-* @nr_requeue: requeue up to this many tasks
+* @nr_requeue: requeue up to this many tasks
*/
static inline int
futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wake,
int nr_requeue, int opflags)
{
- return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
- val, opflags);
+ return futex_syscall_nr_requeue(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
+ val, opflags);
}
+
+/**
+ * futex_wait_requeue_pi() - block on uaddr and prepare to requeue to uaddr2
+ * @uaddr: non-PI futex source
+ * @uaddr2: PI futex target
+ *
+ * This is the first half of the requeue_pi mechanism. It shall always be
+ * paired with futex_cmp_requeue_pi().
+ */
+static inline int
+futex_wait_requeue_pi(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2,
+ struct timespec *timeout, int opflags)
+{
+ return futex_syscall(uaddr, FUTEX_WAIT_REQUEUE_PI, val, timeout, uaddr2, 0,
+ opflags);
+}
+
+/**
+ * futex_cmp_requeue_pi() - requeue tasks from uaddr to uaddr2
+ * @uaddr: non-PI futex source
+ * @uaddr2: PI futex target
+ * @nr_requeue: requeue up to this many tasks
+ *
+ * This is the second half of the requeue_pi mechanism. It shall always be
+ * paired with futex_wait_requeue_pi(). The first waker is always awoken.
+ */
+static inline int
+futex_cmp_requeue_pi(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2,
+ int nr_requeue, int opflags)
+{
+ return futex_syscall_nr_requeue(uaddr, FUTEX_CMP_REQUEUE_PI, 1, nr_requeue, uaddr2,
+ val, opflags);
+}
+
#endif /* _FUTEX_H */
diff --git a/tools/perf/bench/inject-buildid.c b/tools/perf/bench/inject-buildid.c
new file mode 100644
index 000000000000..49331743c743
--- /dev/null
+++ b/tools/perf/bench/inject-buildid.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdlib.h>
+#include <stddef.h>
+#include <ftw.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <sys/wait.h>
+#include <linux/kernel.h>
+#include <linux/time64.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/zalloc.h>
+#include <internal/lib.h>
+#include <subcmd/parse-options.h>
+
+#include "bench.h"
+#include "util/data.h"
+#include "util/stat.h"
+#include "util/debug.h"
+#include "util/symbol.h"
+#include "util/session.h"
+#include "util/build-id.h"
+#include "util/sample.h"
+#include "util/synthetic-events.h"
+
+#define MMAP_DEV_MAJOR 8
+#define DSO_MMAP_RATIO 4
+
+static unsigned int iterations = 100;
+static unsigned int nr_mmaps = 100;
+static unsigned int nr_samples = 100; /* samples per mmap */
+
+static u64 bench_sample_type;
+static u16 bench_id_hdr_size;
+
+struct bench_data {
+ int pid;
+ int input_pipe[2];
+ int output_pipe[2];
+ pthread_t th;
+};
+
+struct bench_dso {
+ struct list_head list;
+ char *name;
+ int ino;
+};
+
+static int nr_dsos;
+static struct bench_dso *dsos;
+
+extern int cmd_inject(int argc, const char *argv[]);
+
+static const struct option options[] = {
+ OPT_UINTEGER('i', "iterations", &iterations,
+ "Number of iterations used to compute average (default: 100)"),
+ OPT_UINTEGER('m', "nr-mmaps", &nr_mmaps,
+ "Number of mmap events for each iteration (default: 100)"),
+ OPT_UINTEGER('n', "nr-samples", &nr_samples,
+ "Number of sample events per mmap event (default: 100)"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show iteration count, DSO name, etc)"),
+ OPT_END()
+};
+
+static const char *const bench_usage[] = {
+ "perf bench internals inject-build-id <options>",
+ NULL
+};
+
+/*
+ * Helper for collect_dso that adds the given file as a dso to dso_list
+ * if it contains a build-id. Stops after collecting 4 times more than
+ * we need (for MMAP2 events).
+ */
+static int add_dso(const char *fpath, const struct stat *sb __maybe_unused,
+ int typeflag, struct FTW *ftwbuf __maybe_unused)
+{
+ struct bench_dso *dso = &dsos[nr_dsos];
+ struct build_id bid;
+
+ if (typeflag == FTW_D || typeflag == FTW_SL)
+ return 0;
+
+ if (filename__read_build_id(fpath, &bid) < 0)
+ return 0;
+
+ dso->name = realpath(fpath, NULL);
+ if (dso->name == NULL)
+ return -1;
+
+ dso->ino = nr_dsos++;
+ pr_debug2(" Adding DSO: %s\n", fpath);
+
+ /* stop if we collected enough DSOs */
+ if ((unsigned int)nr_dsos == DSO_MMAP_RATIO * nr_mmaps)
+ return 1;
+
+ return 0;
+}
+
+static void collect_dso(void)
+{
+ dsos = calloc(nr_mmaps * DSO_MMAP_RATIO, sizeof(*dsos));
+ if (dsos == NULL) {
+ printf(" Memory allocation failed\n");
+ exit(1);
+ }
+
+ if (nftw("/usr/lib/", add_dso, 10, FTW_PHYS) < 0)
+ return;
+
+ pr_debug(" Collected %d DSOs\n", nr_dsos);
+}
+
+static void release_dso(void)
+{
+ int i;
+
+ for (i = 0; i < nr_dsos; i++) {
+ struct bench_dso *dso = &dsos[i];
+
+ zfree(&dso->name);
+ }
+ free(dsos);
+}
+
+/* Fake address used by mmap and sample events */
+static u64 dso_map_addr(struct bench_dso *dso)
+{
+ return 0x400000ULL + dso->ino * 8192ULL;
+}
+
+static ssize_t synthesize_attr(struct bench_data *data)
+{
+ union perf_event event;
+
+ memset(&event, 0, sizeof(event.attr) + sizeof(u64));
+
+ event.header.type = PERF_RECORD_HEADER_ATTR;
+ event.header.size = sizeof(event.attr) + sizeof(u64);
+
+ event.attr.attr.type = PERF_TYPE_SOFTWARE;
+ event.attr.attr.config = PERF_COUNT_SW_TASK_CLOCK;
+ event.attr.attr.exclude_kernel = 1;
+ event.attr.attr.sample_id_all = 1;
+ event.attr.attr.sample_type = bench_sample_type;
+
+ return writen(data->input_pipe[1], &event, event.header.size);
+}
+
+static ssize_t synthesize_fork(struct bench_data *data)
+{
+ union perf_event event;
+
+ memset(&event, 0, sizeof(event.fork) + bench_id_hdr_size);
+
+ event.header.type = PERF_RECORD_FORK;
+ event.header.misc = PERF_RECORD_MISC_FORK_EXEC;
+ event.header.size = sizeof(event.fork) + bench_id_hdr_size;
+
+ event.fork.ppid = 1;
+ event.fork.ptid = 1;
+ event.fork.pid = data->pid;
+ event.fork.tid = data->pid;
+
+ return writen(data->input_pipe[1], &event, event.header.size);
+}
+
+static ssize_t synthesize_mmap(struct bench_data *data, struct bench_dso *dso, u64 timestamp)
+{
+ union perf_event event;
+ size_t len = offsetof(struct perf_record_mmap2, filename);
+ u64 *id_hdr_ptr = (void *)&event;
+ int ts_idx;
+
+ len += roundup(strlen(dso->name) + 1, 8) + bench_id_hdr_size;
+
+ memset(&event, 0, min(len, sizeof(event.mmap2)));
+
+ event.header.type = PERF_RECORD_MMAP2;
+ event.header.misc = PERF_RECORD_MISC_USER;
+ event.header.size = len;
+
+ event.mmap2.pid = data->pid;
+ event.mmap2.tid = data->pid;
+ event.mmap2.maj = MMAP_DEV_MAJOR;
+ event.mmap2.ino = dso->ino;
+
+ strcpy(event.mmap2.filename, dso->name);
+
+ event.mmap2.start = dso_map_addr(dso);
+ event.mmap2.len = 4096;
+ event.mmap2.prot = PROT_EXEC;
+
+ if (len > sizeof(event.mmap2)) {
+ /* write mmap2 event first */
+ if (writen(data->input_pipe[1], &event, len - bench_id_hdr_size) < 0)
+ return -1;
+ /* zero-fill sample id header */
+ memset(id_hdr_ptr, 0, bench_id_hdr_size);
+ /* put timestamp in the right position */
+ ts_idx = (bench_id_hdr_size / sizeof(u64)) - 2;
+ id_hdr_ptr[ts_idx] = timestamp;
+ if (writen(data->input_pipe[1], id_hdr_ptr, bench_id_hdr_size) < 0)
+ return -1;
+
+ return len;
+ }
+
+ ts_idx = (len / sizeof(u64)) - 2;
+ id_hdr_ptr[ts_idx] = timestamp;
+ return writen(data->input_pipe[1], &event, len);
+}
+
+static ssize_t synthesize_sample(struct bench_data *data, struct bench_dso *dso, u64 timestamp)
+{
+ union perf_event event;
+ struct perf_sample sample = {
+ .tid = data->pid,
+ .pid = data->pid,
+ .ip = dso_map_addr(dso),
+ .time = timestamp,
+ };
+
+ event.header.type = PERF_RECORD_SAMPLE;
+ event.header.misc = PERF_RECORD_MISC_USER;
+ event.header.size = perf_event__sample_event_size(&sample, bench_sample_type, 0);
+
+ perf_event__synthesize_sample(&event, bench_sample_type, 0, &sample);
+
+ return writen(data->input_pipe[1], &event, event.header.size);
+}
+
+static ssize_t synthesize_flush(struct bench_data *data)
+{
+ struct perf_event_header header = {
+ .size = sizeof(header),
+ .type = PERF_RECORD_FINISHED_ROUND,
+ };
+
+ return writen(data->input_pipe[1], &header, header.size);
+}
+
+static void *data_reader(void *arg)
+{
+ struct bench_data *data = arg;
+ char buf[8192];
+ int flag;
+ int n;
+
+ flag = fcntl(data->output_pipe[0], F_GETFL);
+ fcntl(data->output_pipe[0], F_SETFL, flag | O_NONBLOCK);
+
+ /* read out data from child */
+ while (true) {
+ n = read(data->output_pipe[0], buf, sizeof(buf));
+ if (n > 0)
+ continue;
+ if (n == 0)
+ break;
+
+ if (errno != EINTR && errno != EAGAIN)
+ break;
+
+ usleep(100);
+ }
+
+ close(data->output_pipe[0]);
+ return NULL;
+}
+
+static int setup_injection(struct bench_data *data, bool build_id_all)
+{
+ int ready_pipe[2];
+ int dev_null_fd;
+ char buf;
+
+ if (pipe(ready_pipe) < 0)
+ return -1;
+
+ if (pipe(data->input_pipe) < 0)
+ return -1;
+
+ if (pipe(data->output_pipe) < 0)
+ return -1;
+
+ data->pid = fork();
+ if (data->pid < 0)
+ return -1;
+
+ if (data->pid == 0) {
+ const char **inject_argv;
+ int inject_argc = 2;
+
+ close(data->input_pipe[1]);
+ close(data->output_pipe[0]);
+ close(ready_pipe[0]);
+
+ dup2(data->input_pipe[0], STDIN_FILENO);
+ close(data->input_pipe[0]);
+ dup2(data->output_pipe[1], STDOUT_FILENO);
+ close(data->output_pipe[1]);
+
+ dev_null_fd = open("/dev/null", O_WRONLY);
+ if (dev_null_fd < 0)
+ exit(1);
+
+ dup2(dev_null_fd, STDERR_FILENO);
+
+ if (build_id_all)
+ inject_argc++;
+
+ inject_argv = calloc(inject_argc + 1, sizeof(*inject_argv));
+ if (inject_argv == NULL)
+ exit(1);
+
+ inject_argv[0] = strdup("inject");
+ inject_argv[1] = strdup("-b");
+ if (build_id_all)
+ inject_argv[2] = strdup("--buildid-all");
+
+ /* signal that we're ready to go */
+ close(ready_pipe[1]);
+
+ cmd_inject(inject_argc, inject_argv);
+
+ exit(0);
+ }
+
+ pthread_create(&data->th, NULL, data_reader, data);
+
+ close(ready_pipe[1]);
+ close(data->input_pipe[0]);
+ close(data->output_pipe[1]);
+
+ /* wait for child ready */
+ if (read(ready_pipe[0], &buf, 1) < 0)
+ return -1;
+ close(ready_pipe[0]);
+
+ return 0;
+}
+
+static int inject_build_id(struct bench_data *data, u64 *max_rss)
+{
+ int status;
+ unsigned int i, k;
+ struct rusage rusage;
+
+ /* this makes the child to run */
+ if (perf_header__write_pipe(data->input_pipe[1]) < 0)
+ return -1;
+
+ if (synthesize_attr(data) < 0)
+ return -1;
+
+ if (synthesize_fork(data) < 0)
+ return -1;
+
+ for (i = 0; i < nr_mmaps; i++) {
+ int idx = rand() % (nr_dsos - 1);
+ struct bench_dso *dso = &dsos[idx];
+ u64 timestamp = rand() % 1000000;
+
+ pr_debug2(" [%d] injecting: %s\n", i+1, dso->name);
+ if (synthesize_mmap(data, dso, timestamp) < 0)
+ return -1;
+
+ for (k = 0; k < nr_samples; k++) {
+ if (synthesize_sample(data, dso, timestamp + k * 1000) < 0)
+ return -1;
+ }
+
+ if ((i + 1) % 10 == 0) {
+ if (synthesize_flush(data) < 0)
+ return -1;
+ }
+ }
+
+ /* this makes the child to finish */
+ close(data->input_pipe[1]);
+
+ wait4(data->pid, &status, 0, &rusage);
+ *max_rss = rusage.ru_maxrss;
+
+ pr_debug(" Child %d exited with %d\n", data->pid, status);
+
+ return 0;
+}
+
+static void do_inject_loop(struct bench_data *data, bool build_id_all)
+{
+ unsigned int i;
+ struct stats time_stats, mem_stats;
+ double time_average, time_stddev;
+ double mem_average, mem_stddev;
+
+ init_stats(&time_stats);
+ init_stats(&mem_stats);
+
+ pr_debug(" Build-id%s injection benchmark\n", build_id_all ? "-all" : "");
+
+ for (i = 0; i < iterations; i++) {
+ struct timeval start, end, diff;
+ u64 runtime_us, max_rss;
+
+ pr_debug(" Iteration #%d\n", i+1);
+
+ if (setup_injection(data, build_id_all) < 0) {
+ printf(" Build-id injection setup failed\n");
+ break;
+ }
+
+ gettimeofday(&start, NULL);
+ if (inject_build_id(data, &max_rss) < 0) {
+ printf(" Build-id injection failed\n");
+ break;
+ }
+
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &diff);
+ runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ update_stats(&time_stats, runtime_us);
+ update_stats(&mem_stats, max_rss);
+
+ pthread_join(data->th, NULL);
+ }
+
+ time_average = avg_stats(&time_stats) / USEC_PER_MSEC;
+ time_stddev = stddev_stats(&time_stats) / USEC_PER_MSEC;
+ printf(" Average build-id%s injection took: %.3f msec (+- %.3f msec)\n",
+ build_id_all ? "-all" : "", time_average, time_stddev);
+
+ /* each iteration, it processes MMAP2 + BUILD_ID + nr_samples * SAMPLE */
+ time_average = avg_stats(&time_stats) / (nr_mmaps * (nr_samples + 2));
+ time_stddev = stddev_stats(&time_stats) / (nr_mmaps * (nr_samples + 2));
+ printf(" Average time per event: %.3f usec (+- %.3f usec)\n",
+ time_average, time_stddev);
+
+ mem_average = avg_stats(&mem_stats);
+ mem_stddev = stddev_stats(&mem_stats);
+ printf(" Average memory usage: %.0f KB (+- %.0f KB)\n",
+ mem_average, mem_stddev);
+}
+
+static int do_inject_loops(struct bench_data *data)
+{
+
+ srand(time(NULL));
+ symbol__init(NULL);
+
+ bench_sample_type = PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_IP;
+ bench_sample_type |= PERF_SAMPLE_TID | PERF_SAMPLE_TIME;
+ bench_id_hdr_size = 32;
+
+ collect_dso();
+ if (nr_dsos == 0) {
+ printf(" Cannot collect DSOs for injection\n");
+ return -1;
+ }
+
+ do_inject_loop(data, false);
+ do_inject_loop(data, true);
+
+ release_dso();
+ return 0;
+}
+
+int bench_inject_build_id(int argc, const char **argv)
+{
+ struct bench_data data;
+
+ argc = parse_options(argc, argv, options, bench_usage, 0);
+ if (argc) {
+ usage_with_options(bench_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ return do_inject_loops(&data);
+}
+
diff --git a/tools/perf/bench/kallsyms-parse.c b/tools/perf/bench/kallsyms-parse.c
new file mode 100644
index 000000000000..2b0d0f980ae9
--- /dev/null
+++ b/tools/perf/bench/kallsyms-parse.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Benchmark of /proc/kallsyms parsing.
+ *
+ * Copyright 2020 Google LLC.
+ */
+#include <stdlib.h>
+#include "bench.h"
+#include "../util/stat.h"
+#include <linux/time64.h>
+#include <subcmd/parse-options.h>
+#include <symbol/kallsyms.h>
+
+static unsigned int iterations = 100;
+
+static const struct option options[] = {
+ OPT_UINTEGER('i', "iterations", &iterations,
+ "Number of iterations used to compute average"),
+ OPT_END()
+};
+
+static const char *const bench_usage[] = {
+ "perf bench internals kallsyms-parse <options>",
+ NULL
+};
+
+static int bench_process_symbol(void *arg __maybe_unused,
+ const char *name __maybe_unused,
+ char type __maybe_unused,
+ u64 start __maybe_unused)
+{
+ return 0;
+}
+
+static int do_kallsyms_parse(void)
+{
+ struct timeval start, end, diff;
+ u64 runtime_us;
+ unsigned int i;
+ double time_average, time_stddev;
+ int err;
+ struct stats time_stats;
+
+ init_stats(&time_stats);
+
+ for (i = 0; i < iterations; i++) {
+ gettimeofday(&start, NULL);
+ err = kallsyms__parse("/proc/kallsyms", NULL,
+ bench_process_symbol);
+ if (err)
+ return err;
+
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &diff);
+ runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ update_stats(&time_stats, runtime_us);
+ }
+
+ time_average = avg_stats(&time_stats) / USEC_PER_MSEC;
+ time_stddev = stddev_stats(&time_stats) / USEC_PER_MSEC;
+ printf(" Average kallsyms__parse took: %.3f ms (+- %.3f ms)\n",
+ time_average, time_stddev);
+ return 0;
+}
+
+int bench_kallsyms_parse(int argc, const char **argv)
+{
+ argc = parse_options(argc, argv, options, bench_usage, 0);
+ if (argc) {
+ usage_with_options(bench_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ return do_kallsyms_parse();
+}
diff --git a/tools/perf/bench/mem-functions.c b/tools/perf/bench/mem-functions.c
index 9235b76501be..19d45c377ac1 100644
--- a/tools/perf/bench/mem-functions.c
+++ b/tools/perf/bench/mem-functions.c
@@ -223,12 +223,8 @@ static int bench_mem_common(int argc, const char **argv, struct bench_mem_info *
return 0;
}
-static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
+static void memcpy_prefault(memcpy_t fn, size_t size, void *src, void *dst)
{
- u64 cycle_start = 0ULL, cycle_end = 0ULL;
- memcpy_t fn = r->fn.memcpy;
- int i;
-
/* Make sure to always prefault zero pages even if MMAP_THRESH is crossed: */
memset(src, 0, size);
@@ -237,6 +233,15 @@ static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, vo
* to not measure page fault overhead:
*/
fn(dst, src, size);
+}
+
+static u64 do_memcpy_cycles(const struct function *r, size_t size, void *src, void *dst)
+{
+ u64 cycle_start = 0ULL, cycle_end = 0ULL;
+ memcpy_t fn = r->fn.memcpy;
+ int i;
+
+ memcpy_prefault(fn, size, src, dst);
cycle_start = get_cycles();
for (i = 0; i < nr_loops; ++i)
@@ -252,11 +257,7 @@ static double do_memcpy_gettimeofday(const struct function *r, size_t size, void
memcpy_t fn = r->fn.memcpy;
int i;
- /*
- * We prefault the freshly allocated memory range here,
- * to not measure page fault overhead:
- */
- fn(dst, src, size);
+ memcpy_prefault(fn, size, src, dst);
BUG_ON(gettimeofday(&tv_start, NULL));
for (i = 0; i < nr_loops; ++i)
diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S
index 9ad015a1e202..6eb45a2aa8db 100644
--- a/tools/perf/bench/mem-memcpy-x86-64-asm.S
+++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S
@@ -2,6 +2,9 @@
/* Various wrappers to make the kernel .S file build in user-space: */
+// memcpy_orig and memcpy_erms are being defined as SYM_L_LOCAL but we need it
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#define memcpy MEMCPY /* don't hide glibc's memcpy() */
#define altinstr_replacement text
#define globl p2align 4; .globl
diff --git a/tools/perf/bench/mem-memcpy-x86-64-lib.c b/tools/perf/bench/mem-memcpy-x86-64-lib.c
deleted file mode 100644
index 4130734dde84..000000000000
--- a/tools/perf/bench/mem-memcpy-x86-64-lib.c
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * From code in arch/x86/lib/usercopy_64.c, copied to keep tools/ copy
- * of the kernel's arch/x86/lib/memcpy_64.s used in 'perf bench mem memcpy'
- * happy.
- */
-#include <linux/types.h>
-
-unsigned long __memcpy_mcsafe(void *dst, const void *src, size_t cnt);
-unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len);
-
-unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len)
-{
- for (; len; --len, to++, from++) {
- /*
- * Call the assembly routine back directly since
- * memcpy_mcsafe() may silently fallback to memcpy.
- */
- unsigned long rem = __memcpy_mcsafe(to, from, 1);
-
- if (rem)
- break;
- }
- return len;
-}
diff --git a/tools/perf/bench/mem-memset-x86-64-asm.S b/tools/perf/bench/mem-memset-x86-64-asm.S
index d550bd526162..6f093c483842 100644
--- a/tools/perf/bench/mem-memset-x86-64-asm.S
+++ b/tools/perf/bench/mem-memset-x86-64-asm.S
@@ -1,4 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
+// memset_orig and memset_erms are being defined as SYM_L_LOCAL but we need it
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
#define memset MEMSET /* don't hide glibc's memset() */
#define altinstr_replacement text
#define globl p2align 4; .globl
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 5797253b9700..1fbd7c947abc 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -6,8 +6,6 @@
*/
#include <inttypes.h>
-/* For the CLR_() macros */
-#include <pthread.h>
#include <subcmd/parse-options.h>
#include "../util/cloexec.h"
@@ -18,6 +16,7 @@
#include <sched.h>
#include <stdio.h>
#include <assert.h>
+#include <debug.h>
#include <malloc.h>
#include <signal.h>
#include <stdlib.h>
@@ -34,6 +33,8 @@
#include <linux/numa.h>
#include <linux/zalloc.h>
+#include "../util/header.h"
+#include "../util/mutex.h"
#include <numa.h>
#include <numaif.h>
@@ -42,7 +43,7 @@
#endif
/*
- * Regular printout to the terminal, supressed if -q is specified:
+ * Regular printout to the terminal, suppressed if -q is specified:
*/
#define tprintf(x...) do { if (g && g->p.show_details >= 0) printf(x); } while (0)
@@ -54,7 +55,7 @@
struct thread_data {
int curr_cpu;
- cpu_set_t bind_cpumask;
+ cpu_set_t *bind_cpumask;
int bind_node;
u8 *process_data;
int process_nr;
@@ -66,7 +67,7 @@ struct thread_data {
u64 system_time_ns;
u64 user_time_ns;
double speed_gbs;
- pthread_mutex_t *process_lock;
+ struct mutex *process_lock;
};
/* Parameters set by options: */
@@ -116,7 +117,6 @@ struct params {
long bytes_thread;
int nr_tasks;
- bool show_quiet;
bool show_convergence;
bool measure_convergence;
@@ -136,15 +136,16 @@ struct params {
struct global_info {
u8 *data;
- pthread_mutex_t startup_mutex;
+ struct mutex startup_mutex;
+ struct cond startup_cond;
int nr_tasks_started;
- pthread_mutex_t startup_done_mutex;
-
- pthread_mutex_t start_work_mutex;
+ struct mutex start_work_mutex;
+ struct cond start_work_cond;
int nr_tasks_working;
+ bool start_work;
- pthread_mutex_t stop_work_mutex;
+ struct mutex stop_work_mutex;
u64 bytes_done;
struct thread_data *threads;
@@ -196,7 +197,8 @@ static const struct option options[] = {
OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details, "
"convergence is reached when each process (all its threads) is running on a single NUMA node."),
OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
- OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "quiet mode"),
+ OPT_BOOLEAN('q', "quiet" , &quiet,
+ "quiet mode (do not show any warnings or messages)"),
OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
/* Special option string parsing callbacks: */
@@ -247,85 +249,135 @@ static int is_node_present(int node)
*/
static bool node_has_cpus(int node)
{
- struct bitmask *cpu = numa_allocate_cpumask();
- unsigned int i;
+ struct bitmask *cpumask = numa_allocate_cpumask();
+ bool ret = false; /* fall back to nocpus */
+ int cpu;
- if (cpu && !numa_node_to_cpus(node, cpu)) {
- for (i = 0; i < cpu->size; i++) {
- if (numa_bitmask_isbitset(cpu, i))
- return true;
+ BUG_ON(!cpumask);
+ if (!numa_node_to_cpus(node, cpumask)) {
+ for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
+ if (numa_bitmask_isbitset(cpumask, cpu)) {
+ ret = true;
+ break;
+ }
}
}
+ numa_free_cpumask(cpumask);
- return false; /* lets fall back to nocpus safely */
+ return ret;
}
-static cpu_set_t bind_to_cpu(int target_cpu)
+static cpu_set_t *bind_to_cpu(int target_cpu)
{
- cpu_set_t orig_mask, mask;
- int ret;
+ int nrcpus = numa_num_possible_cpus();
+ cpu_set_t *orig_mask, *mask;
+ size_t size;
- ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
- BUG_ON(ret);
+ orig_mask = CPU_ALLOC(nrcpus);
+ BUG_ON(!orig_mask);
+ size = CPU_ALLOC_SIZE(nrcpus);
+ CPU_ZERO_S(size, orig_mask);
+
+ if (sched_getaffinity(0, size, orig_mask))
+ goto err_out;
+
+ mask = CPU_ALLOC(nrcpus);
+ if (!mask)
+ goto err_out;
- CPU_ZERO(&mask);
+ CPU_ZERO_S(size, mask);
if (target_cpu == -1) {
int cpu;
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
- CPU_SET(cpu, &mask);
+ CPU_SET_S(cpu, size, mask);
} else {
- BUG_ON(target_cpu < 0 || target_cpu >= g->p.nr_cpus);
- CPU_SET(target_cpu, &mask);
+ if (target_cpu < 0 || target_cpu >= g->p.nr_cpus)
+ goto err;
+
+ CPU_SET_S(target_cpu, size, mask);
}
- ret = sched_setaffinity(0, sizeof(mask), &mask);
- BUG_ON(ret);
+ if (sched_setaffinity(0, size, mask))
+ goto err;
return orig_mask;
+
+err:
+ CPU_FREE(mask);
+err_out:
+ CPU_FREE(orig_mask);
+
+ /* BUG_ON due to failure in allocation of orig_mask/mask */
+ BUG_ON(-1);
+ return NULL;
}
-static cpu_set_t bind_to_node(int target_node)
+static cpu_set_t *bind_to_node(int target_node)
{
- int cpus_per_node = g->p.nr_cpus / nr_numa_nodes();
- cpu_set_t orig_mask, mask;
+ int nrcpus = numa_num_possible_cpus();
+ size_t size;
+ cpu_set_t *orig_mask, *mask;
int cpu;
- int ret;
- BUG_ON(cpus_per_node * nr_numa_nodes() != g->p.nr_cpus);
- BUG_ON(!cpus_per_node);
+ orig_mask = CPU_ALLOC(nrcpus);
+ BUG_ON(!orig_mask);
+ size = CPU_ALLOC_SIZE(nrcpus);
+ CPU_ZERO_S(size, orig_mask);
- ret = sched_getaffinity(0, sizeof(orig_mask), &orig_mask);
- BUG_ON(ret);
+ if (sched_getaffinity(0, size, orig_mask))
+ goto err_out;
+
+ mask = CPU_ALLOC(nrcpus);
+ if (!mask)
+ goto err_out;
- CPU_ZERO(&mask);
+ CPU_ZERO_S(size, mask);
if (target_node == NUMA_NO_NODE) {
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
- CPU_SET(cpu, &mask);
+ CPU_SET_S(cpu, size, mask);
} else {
- int cpu_start = (target_node + 0) * cpus_per_node;
- int cpu_stop = (target_node + 1) * cpus_per_node;
+ struct bitmask *cpumask = numa_allocate_cpumask();
- BUG_ON(cpu_stop > g->p.nr_cpus);
+ if (!cpumask)
+ goto err;
- for (cpu = cpu_start; cpu < cpu_stop; cpu++)
- CPU_SET(cpu, &mask);
+ if (!numa_node_to_cpus(target_node, cpumask)) {
+ for (cpu = 0; cpu < (int)cpumask->size; cpu++) {
+ if (numa_bitmask_isbitset(cpumask, cpu))
+ CPU_SET_S(cpu, size, mask);
+ }
+ }
+ numa_free_cpumask(cpumask);
}
- ret = sched_setaffinity(0, sizeof(mask), &mask);
- BUG_ON(ret);
+ if (sched_setaffinity(0, size, mask))
+ goto err;
return orig_mask;
+
+err:
+ CPU_FREE(mask);
+err_out:
+ CPU_FREE(orig_mask);
+
+ /* BUG_ON due to failure in allocation of orig_mask/mask */
+ BUG_ON(-1);
+ return NULL;
}
-static void bind_to_cpumask(cpu_set_t mask)
+static void bind_to_cpumask(cpu_set_t *mask)
{
int ret;
+ size_t size = CPU_ALLOC_SIZE(numa_num_possible_cpus());
- ret = sched_setaffinity(0, sizeof(mask), &mask);
- BUG_ON(ret);
+ ret = sched_setaffinity(0, size, mask);
+ if (ret) {
+ CPU_FREE(mask);
+ BUG_ON(ret);
+ }
}
static void mempol_restore(void)
@@ -339,18 +391,22 @@ static void mempol_restore(void)
static void bind_to_memnode(int node)
{
- unsigned long nodemask;
+ struct bitmask *node_mask;
int ret;
if (node == NUMA_NO_NODE)
return;
- BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask)*8);
- nodemask = 1L << node;
+ node_mask = numa_allocate_nodemask();
+ BUG_ON(!node_mask);
+
+ numa_bitmask_clearall(node_mask);
+ numa_bitmask_setbit(node_mask, node);
- ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
- dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);
+ ret = set_mempolicy(MPOL_BIND, node_mask->maskp, node_mask->size + 1);
+ dprintf("binding to node %d, mask: %016lx => %d\n", node, *node_mask->maskp, ret);
+ numa_bitmask_free(node_mask);
BUG_ON(ret);
}
@@ -367,7 +423,7 @@ do { \
static u8 *alloc_data(ssize_t bytes0, int map_flags,
int init_zero, int init_cpu0, int thp, int init_random)
{
- cpu_set_t orig_mask;
+ cpu_set_t *orig_mask = NULL;
ssize_t bytes;
u8 *buf;
int ret;
@@ -425,6 +481,7 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
/* Restore affinity: */
if (init_cpu0) {
bind_to_cpumask(orig_mask);
+ CPU_FREE(orig_mask);
mempol_restore();
}
@@ -467,18 +524,6 @@ static void * setup_private_data(ssize_t bytes)
return alloc_data(bytes, MAP_PRIVATE, 0, g->p.init_cpu0, g->p.thp, g->p.init_random);
}
-/*
- * Return a process-shared (global) mutex:
- */
-static void init_global_mutex(pthread_mutex_t *mutex)
-{
- pthread_mutexattr_t attr;
-
- pthread_mutexattr_init(&attr);
- pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
- pthread_mutex_init(mutex, &attr);
-}
-
static int parse_cpu_list(const char *arg)
{
p0.cpu_list_str = strdup(arg);
@@ -564,10 +609,16 @@ static int parse_setup_cpu_list(void)
return -1;
}
+ if (is_cpu_online(bind_cpu_0) != 1 || is_cpu_online(bind_cpu_1) != 1) {
+ printf("\nTest not applicable, bind_cpu_0 or bind_cpu_1 is offline\n");
+ return -1;
+ }
+
BUG_ON(bind_cpu_0 < 0 || bind_cpu_1 < 0);
BUG_ON(bind_cpu_0 > bind_cpu_1);
for (bind_cpu = bind_cpu_0; bind_cpu <= bind_cpu_1; bind_cpu += step) {
+ size_t size = CPU_ALLOC_SIZE(g->p.nr_cpus);
int i;
for (i = 0; i < mul; i++) {
@@ -587,10 +638,15 @@ static int parse_setup_cpu_list(void)
tprintf("%2d", bind_cpu);
}
- CPU_ZERO(&td->bind_cpumask);
+ td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+ BUG_ON(!td->bind_cpumask);
+ CPU_ZERO_S(size, td->bind_cpumask);
for (cpu = bind_cpu; cpu < bind_cpu+bind_len; cpu++) {
- BUG_ON(cpu < 0 || cpu >= g->p.nr_cpus);
- CPU_SET(cpu, &td->bind_cpumask);
+ if (cpu < 0 || cpu >= g->p.nr_cpus) {
+ CPU_FREE(td->bind_cpumask);
+ BUG_ON(-1);
+ }
+ CPU_SET_S(cpu, size, td->bind_cpumask);
}
t++;
}
@@ -729,12 +785,8 @@ static int parse_nodes_opt(const struct option *opt __maybe_unused,
return -1;
return parse_node_list(arg);
-
- return 0;
}
-#define BIT(x) (1ul << x)
-
static inline uint32_t lfsr_32(uint32_t lfsr)
{
const uint32_t taps = BIT(1) | BIT(5) | BIT(6) | BIT(31);
@@ -795,7 +847,7 @@ static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val
if (g->p.data_rand_walk) {
u32 lfsr = nr + loop + val;
- int j;
+ long j;
for (i = 0; i < words/1024; i++) {
long start, end;
@@ -813,12 +865,12 @@ static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val
}
}
} else if (!g->p.data_backwards || (nr + loop) & 1) {
+ /* Process data forwards: */
d0 = data + off;
d = data + off + 1;
d1 = data + words;
- /* Process data forwards: */
for (;;) {
if (unlikely(d >= d1))
d = data;
@@ -836,7 +888,6 @@ static u64 do_work(u8 *__data, long bytes, int nr, int nr_max, int loop, u64 val
d = data + off - 1;
d1 = data + words;
- /* Process data forwards: */
for (;;) {
if (unlikely(d < data))
d = data + words-1;
@@ -862,8 +913,6 @@ static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
prctl(0, bytes_worked);
}
-#define MAX_NR_NODES 64
-
/*
* Count the number of nodes a process's threads
* are spread out on.
@@ -874,10 +923,15 @@ static void update_curr_cpu(int task_nr, unsigned long bytes_worked)
*/
static int count_process_nodes(int process_nr)
{
- char node_present[MAX_NR_NODES] = { 0, };
+ char *node_present;
int nodes;
int n, t;
+ node_present = (char *)malloc(g->p.nr_nodes * sizeof(char));
+ BUG_ON(!node_present);
+ for (nodes = 0; nodes < g->p.nr_nodes; nodes++)
+ node_present[nodes] = 0;
+
for (t = 0; t < g->p.nr_threads; t++) {
struct thread_data *td;
int task_nr;
@@ -887,17 +941,20 @@ static int count_process_nodes(int process_nr)
td = g->threads + task_nr;
node = numa_node_of_cpu(td->curr_cpu);
- if (node < 0) /* curr_cpu was likely still -1 */
+ if (node < 0) /* curr_cpu was likely still -1 */ {
+ free(node_present);
return 0;
+ }
node_present[node] = 1;
}
nodes = 0;
- for (n = 0; n < MAX_NR_NODES; n++)
+ for (n = 0; n < g->p.nr_nodes; n++)
nodes += node_present[n];
+ free(node_present);
return nodes;
}
@@ -966,7 +1023,7 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
{
unsigned int loops_done_min, loops_done_max;
int process_groups;
- int nodes[MAX_NR_NODES];
+ int *nodes;
int distance;
int nr_min;
int nr_max;
@@ -980,6 +1037,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
if (!g->p.show_convergence && !g->p.measure_convergence)
return;
+ nodes = (int *)malloc(g->p.nr_nodes * sizeof(int));
+ BUG_ON(!nodes);
for (node = 0; node < g->p.nr_nodes; node++)
nodes[node] = 0;
@@ -1021,8 +1080,10 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
BUG_ON(sum > g->p.nr_tasks);
- if (0 && (sum < g->p.nr_tasks))
+ if (0 && (sum < g->p.nr_tasks)) {
+ free(nodes);
return;
+ }
/*
* Count the number of distinct process groups present
@@ -1074,6 +1135,8 @@ static void calc_convergence(double runtime_ns_max, double *convergence)
}
tprintf("\n");
}
+
+ free(nodes);
}
static void show_summary(double runtime_ns_max, int l, double *convergence)
@@ -1133,19 +1196,22 @@ static void *worker_thread(void *__tdata)
}
if (g->p.serialize_startup) {
- pthread_mutex_lock(&g->startup_mutex);
+ mutex_lock(&g->startup_mutex);
g->nr_tasks_started++;
- pthread_mutex_unlock(&g->startup_mutex);
+ /* The last thread wakes the main process. */
+ if (g->nr_tasks_started == g->p.nr_tasks)
+ cond_signal(&g->startup_cond);
+
+ mutex_unlock(&g->startup_mutex);
/* Here we will wait for the main process to start us all at once: */
- pthread_mutex_lock(&g->start_work_mutex);
+ mutex_lock(&g->start_work_mutex);
+ g->start_work = false;
g->nr_tasks_working++;
+ while (!g->start_work)
+ cond_wait(&g->start_work_cond, &g->start_work_mutex);
- /* Last one wake the main process: */
- if (g->nr_tasks_working == g->p.nr_tasks)
- pthread_mutex_unlock(&g->startup_done_mutex);
-
- pthread_mutex_unlock(&g->start_work_mutex);
+ mutex_unlock(&g->start_work_mutex);
}
gettimeofday(&start0, NULL);
@@ -1164,17 +1230,17 @@ static void *worker_thread(void *__tdata)
val += do_work(thread_data, g->p.bytes_thread, 0, 1, l, val);
if (g->p.sleep_usecs) {
- pthread_mutex_lock(td->process_lock);
+ mutex_lock(td->process_lock);
usleep(g->p.sleep_usecs);
- pthread_mutex_unlock(td->process_lock);
+ mutex_unlock(td->process_lock);
}
/*
* Amount of work to be done under a process-global lock:
*/
if (g->p.bytes_process_locked) {
- pthread_mutex_lock(td->process_lock);
+ mutex_lock(td->process_lock);
val += do_work(process_data, g->p.bytes_process_locked, thread_nr, g->p.nr_threads, l, val);
- pthread_mutex_unlock(td->process_lock);
+ mutex_unlock(td->process_lock);
}
work_done = g->p.bytes_global + g->p.bytes_process +
@@ -1208,7 +1274,7 @@ static void *worker_thread(void *__tdata)
* by migrating to CPU#0:
*/
if (first_task && g->p.perturb_secs && (int)(stop.tv_sec - last_perturbance) >= g->p.perturb_secs) {
- cpu_set_t orig_mask;
+ cpu_set_t *orig_mask;
int target_cpu;
int this_cpu;
@@ -1232,6 +1298,7 @@ static void *worker_thread(void *__tdata)
printf(" (injecting perturbalance, moved to CPU#%d)\n", target_cpu);
bind_to_cpumask(orig_mask);
+ CPU_FREE(orig_mask);
}
if (details >= 3) {
@@ -1270,9 +1337,9 @@ static void *worker_thread(void *__tdata)
free_data(thread_data, g->p.bytes_thread);
- pthread_mutex_lock(&g->stop_work_mutex);
+ mutex_lock(&g->stop_work_mutex);
g->bytes_done += bytes_done;
- pthread_mutex_unlock(&g->stop_work_mutex);
+ mutex_unlock(&g->stop_work_mutex);
return NULL;
}
@@ -1282,7 +1349,7 @@ static void *worker_thread(void *__tdata)
*/
static void worker_process(int process_nr)
{
- pthread_mutex_t process_lock;
+ struct mutex process_lock;
struct thread_data *td;
pthread_t *pthreads;
u8 *process_data;
@@ -1290,7 +1357,7 @@ static void worker_process(int process_nr)
int ret;
int t;
- pthread_mutex_init(&process_lock, NULL);
+ mutex_init(&process_lock);
set_taskname("process %d", process_nr);
/*
@@ -1365,21 +1432,31 @@ static void init_thread_data(void)
for (t = 0; t < g->p.nr_tasks; t++) {
struct thread_data *td = g->threads + t;
+ size_t cpuset_size = CPU_ALLOC_SIZE(g->p.nr_cpus);
int cpu;
/* Allow all nodes by default: */
td->bind_node = NUMA_NO_NODE;
/* Allow all CPUs by default: */
- CPU_ZERO(&td->bind_cpumask);
+ td->bind_cpumask = CPU_ALLOC(g->p.nr_cpus);
+ BUG_ON(!td->bind_cpumask);
+ CPU_ZERO_S(cpuset_size, td->bind_cpumask);
for (cpu = 0; cpu < g->p.nr_cpus; cpu++)
- CPU_SET(cpu, &td->bind_cpumask);
+ CPU_SET_S(cpu, cpuset_size, td->bind_cpumask);
}
}
static void deinit_thread_data(void)
{
ssize_t size = sizeof(*g->threads)*g->p.nr_tasks;
+ int t;
+
+ /* Free the bind_cpumask allocated for thread_data */
+ for (t = 0; t < g->p.nr_tasks; t++) {
+ struct thread_data *td = g->threads + t;
+ CPU_FREE(td->bind_cpumask);
+ }
free_data(g->threads, size);
}
@@ -1396,9 +1473,9 @@ static int init(void)
g->p.nr_nodes = numa_max_node() + 1;
/* char array in count_process_nodes(): */
- BUG_ON(g->p.nr_nodes > MAX_NR_NODES || g->p.nr_nodes < 0);
+ BUG_ON(g->p.nr_nodes < 0);
- if (g->p.show_quiet && !g->p.show_details)
+ if (quiet && !g->p.show_details)
g->p.show_details = -1;
/* Some memory should be specified: */
@@ -1439,10 +1516,11 @@ static int init(void)
g->data = setup_shared_data(g->p.bytes_global);
/* Startup serialization: */
- init_global_mutex(&g->start_work_mutex);
- init_global_mutex(&g->startup_mutex);
- init_global_mutex(&g->startup_done_mutex);
- init_global_mutex(&g->stop_work_mutex);
+ mutex_init_pshared(&g->start_work_mutex);
+ cond_init_pshared(&g->start_work_cond);
+ mutex_init_pshared(&g->startup_mutex);
+ cond_init_pshared(&g->startup_cond);
+ mutex_init_pshared(&g->stop_work_mutex);
init_thread_data();
@@ -1476,7 +1554,7 @@ static void print_res(const char *name, double val,
if (!name)
name = "main,";
- if (!g->p.show_quiet)
+ if (!quiet)
printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
else
printf(" %14.3f %s\n", val, txt_long);
@@ -1501,9 +1579,6 @@ static int __bench_numa(const char *name)
pids = zalloc(g->p.nr_proc * sizeof(*pids));
pid = -1;
- /* All threads try to acquire it, this way we can wait for them to start up: */
- pthread_mutex_lock(&g->start_work_mutex);
-
if (g->p.serialize_startup) {
tprintf(" #\n");
tprintf(" # Startup synchronization: ..."); fflush(stdout);
@@ -1525,22 +1600,29 @@ static int __bench_numa(const char *name)
pids[i] = pid;
}
- /* Wait for all the threads to start up: */
- while (g->nr_tasks_started != g->p.nr_tasks)
- usleep(USEC_PER_MSEC);
-
- BUG_ON(g->nr_tasks_started != g->p.nr_tasks);
if (g->p.serialize_startup) {
+ bool threads_ready = false;
double startup_sec;
- pthread_mutex_lock(&g->startup_done_mutex);
-
- /* This will start all threads: */
- pthread_mutex_unlock(&g->start_work_mutex);
-
- /* This mutex is locked - the last started thread will wake us: */
- pthread_mutex_lock(&g->startup_done_mutex);
+ /*
+ * Wait for all the threads to start up. The last thread will
+ * signal this process.
+ */
+ mutex_lock(&g->startup_mutex);
+ while (g->nr_tasks_started != g->p.nr_tasks)
+ cond_wait(&g->startup_cond, &g->startup_mutex);
+
+ mutex_unlock(&g->startup_mutex);
+
+ /* Wait for all threads to be at the start_work_cond. */
+ while (!threads_ready) {
+ mutex_lock(&g->start_work_mutex);
+ threads_ready = (g->nr_tasks_working == g->p.nr_tasks);
+ mutex_unlock(&g->start_work_mutex);
+ if (!threads_ready)
+ usleep(1);
+ }
gettimeofday(&stop, NULL);
@@ -1554,7 +1636,11 @@ static int __bench_numa(const char *name)
tprintf(" #\n");
start = stop;
- pthread_mutex_unlock(&g->startup_done_mutex);
+ /* Start all threads running. */
+ mutex_lock(&g->start_work_mutex);
+ g->start_work = true;
+ mutex_unlock(&g->start_work_mutex);
+ cond_broadcast(&g->start_work_cond);
} else {
gettimeofday(&start, NULL);
}
@@ -1630,7 +1716,7 @@ static int __bench_numa(const char *name)
"GB/sec,", "total-speed", "GB/sec total speed");
if (g->p.show_details >= 2) {
- char tname[14 + 2 * 10 + 1];
+ char tname[14 + 2 * 11 + 1];
struct thread_data *td;
for (p = 0; p < g->p.nr_proc; p++) {
for (t = 0; t < g->p.nr_threads; t++) {
@@ -1733,12 +1819,12 @@ err:
*/
static const char *tests[][MAX_ARGS] = {
/* Basic single-stream NUMA bandwidth measurements: */
- { "RAM-bw-local,", "mem", "-p", "1", "-t", "1", "-P", "1024",
+ { "RAM-bw-local,", "mem", "-p", "1", "-t", "1", "-P", "1024",
"-C" , "0", "-M", "0", OPT_BW_RAM },
{ "RAM-bw-local-NOTHP,",
"mem", "-p", "1", "-t", "1", "-P", "1024",
"-C" , "0", "-M", "0", OPT_BW_RAM_NOTHP },
- { "RAM-bw-remote,", "mem", "-p", "1", "-t", "1", "-P", "1024",
+ { "RAM-bw-remote,", "mem", "-p", "1", "-t", "1", "-P", "1024",
"-C" , "0", "-M", "1", OPT_BW_RAM },
/* 2-stream NUMA bandwidth measurements: */
@@ -1755,7 +1841,7 @@ static const char *tests[][MAX_ARGS] = {
{ " 1x3-convergence,", "mem", "-p", "1", "-t", "3", "-P", "512", OPT_CONV },
{ " 1x4-convergence,", "mem", "-p", "1", "-t", "4", "-P", "512", OPT_CONV },
{ " 1x6-convergence,", "mem", "-p", "1", "-t", "6", "-P", "1020", OPT_CONV },
- { " 2x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV },
+ { " 2x3-convergence,", "mem", "-p", "2", "-t", "3", "-P", "1020", OPT_CONV },
{ " 3x3-convergence,", "mem", "-p", "3", "-t", "3", "-P", "1020", OPT_CONV },
{ " 4x4-convergence,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_CONV },
{ " 4x4-convergence-NOTHP,",
@@ -1780,24 +1866,24 @@ static const char *tests[][MAX_ARGS] = {
"mem", "-p", "8", "-t", "1", "-P", " 512", OPT_BW_NOTHP },
{ "16x1-bw-process,", "mem", "-p", "16", "-t", "1", "-P", "256", OPT_BW },
- { " 4x1-bw-thread,", "mem", "-p", "1", "-t", "4", "-T", "256", OPT_BW },
- { " 8x1-bw-thread,", "mem", "-p", "1", "-t", "8", "-T", "256", OPT_BW },
- { "16x1-bw-thread,", "mem", "-p", "1", "-t", "16", "-T", "128", OPT_BW },
- { "32x1-bw-thread,", "mem", "-p", "1", "-t", "32", "-T", "64", OPT_BW },
+ { " 1x4-bw-thread,", "mem", "-p", "1", "-t", "4", "-T", "256", OPT_BW },
+ { " 1x8-bw-thread,", "mem", "-p", "1", "-t", "8", "-T", "256", OPT_BW },
+ { "1x16-bw-thread,", "mem", "-p", "1", "-t", "16", "-T", "128", OPT_BW },
+ { "1x32-bw-thread,", "mem", "-p", "1", "-t", "32", "-T", "64", OPT_BW },
- { " 2x3-bw-thread,", "mem", "-p", "2", "-t", "3", "-P", "512", OPT_BW },
- { " 4x4-bw-thread,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_BW },
- { " 4x6-bw-thread,", "mem", "-p", "4", "-t", "6", "-P", "512", OPT_BW },
- { " 4x8-bw-thread,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW },
- { " 4x8-bw-thread-NOTHP,",
+ { " 2x3-bw-process,", "mem", "-p", "2", "-t", "3", "-P", "512", OPT_BW },
+ { " 4x4-bw-process,", "mem", "-p", "4", "-t", "4", "-P", "512", OPT_BW },
+ { " 4x6-bw-process,", "mem", "-p", "4", "-t", "6", "-P", "512", OPT_BW },
+ { " 4x8-bw-process,", "mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW },
+ { " 4x8-bw-process-NOTHP,",
"mem", "-p", "4", "-t", "8", "-P", "512", OPT_BW_NOTHP },
- { " 3x3-bw-thread,", "mem", "-p", "3", "-t", "3", "-P", "512", OPT_BW },
- { " 5x5-bw-thread,", "mem", "-p", "5", "-t", "5", "-P", "512", OPT_BW },
+ { " 3x3-bw-process,", "mem", "-p", "3", "-t", "3", "-P", "512", OPT_BW },
+ { " 5x5-bw-process,", "mem", "-p", "5", "-t", "5", "-P", "512", OPT_BW },
- { "2x16-bw-thread,", "mem", "-p", "2", "-t", "16", "-P", "512", OPT_BW },
- { "1x32-bw-thread,", "mem", "-p", "1", "-t", "32", "-P", "2048", OPT_BW },
+ { "2x16-bw-process,", "mem", "-p", "2", "-t", "16", "-P", "512", OPT_BW },
+ { "1x32-bw-process,", "mem", "-p", "1", "-t", "32", "-P", "2048", OPT_BW },
- { "numa02-bw,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW },
+ { "numa02-bw,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW },
{ "numa02-bw-NOTHP,", "mem", "-p", "1", "-t", "32", "-T", "32", OPT_BW_NOTHP },
{ "numa01-bw-thread,", "mem", "-p", "2", "-t", "16", "-T", "192", OPT_BW },
{ "numa01-bw-thread-NOTHP,",
diff --git a/tools/perf/bench/pmu-scan.c b/tools/perf/bench/pmu-scan.c
new file mode 100644
index 000000000000..f0f007843bb8
--- /dev/null
+++ b/tools/perf/bench/pmu-scan.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Benchmark scanning sysfs files for PMU information.
+ *
+ * Copyright 2023 Google LLC.
+ */
+#include <stdio.h>
+#include "bench.h"
+#include "util/debug.h"
+#include "util/pmu.h"
+#include "util/pmus.h"
+#include "util/stat.h"
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/time64.h>
+#include <subcmd/parse-options.h>
+
+static unsigned int iterations = 100;
+
+struct pmu_scan_result {
+ char *name;
+ int nr_aliases;
+ int nr_formats;
+ int nr_caps;
+};
+
+static const struct option options[] = {
+ OPT_UINTEGER('i', "iterations", &iterations,
+ "Number of iterations used to compute average"),
+ OPT_END()
+};
+
+static const char *const bench_usage[] = {
+ "perf bench internals pmu-scan <options>",
+ NULL
+};
+
+static int nr_pmus;
+static struct pmu_scan_result *results;
+
+static int save_result(void)
+{
+ struct perf_pmu *pmu;
+ struct list_head *list;
+ struct pmu_scan_result *r;
+
+ perf_pmu__scan(NULL);
+
+ perf_pmus__for_each_pmu(pmu) {
+ r = realloc(results, (nr_pmus + 1) * sizeof(*r));
+ if (r == NULL)
+ return -ENOMEM;
+
+ results = r;
+ r = results + nr_pmus;
+
+ r->name = strdup(pmu->name);
+ r->nr_caps = pmu->nr_caps;
+
+ r->nr_aliases = 0;
+ list_for_each(list, &pmu->aliases)
+ r->nr_aliases++;
+
+ r->nr_formats = 0;
+ list_for_each(list, &pmu->format)
+ r->nr_formats++;
+
+ pr_debug("pmu[%d] name=%s, nr_caps=%d, nr_aliases=%d, nr_formats=%d\n",
+ nr_pmus, r->name, r->nr_caps, r->nr_aliases, r->nr_formats);
+ nr_pmus++;
+ }
+
+ perf_pmu__destroy();
+ return 0;
+}
+
+static int check_result(void)
+{
+ struct pmu_scan_result *r;
+ struct perf_pmu *pmu;
+ struct list_head *list;
+ int nr;
+
+ for (int i = 0; i < nr_pmus; i++) {
+ r = &results[i];
+ pmu = perf_pmu__find(r->name);
+ if (pmu == NULL) {
+ pr_err("Cannot find PMU %s\n", r->name);
+ return -1;
+ }
+
+ if (pmu->nr_caps != (u32)r->nr_caps) {
+ pr_err("Unmatched number of event caps in %s: expect %d vs got %d\n",
+ pmu->name, r->nr_caps, pmu->nr_caps);
+ return -1;
+ }
+
+ nr = 0;
+ list_for_each(list, &pmu->aliases)
+ nr++;
+ if (nr != r->nr_aliases) {
+ pr_err("Unmatched number of event aliases in %s: expect %d vs got %d\n",
+ pmu->name, r->nr_aliases, nr);
+ return -1;
+ }
+
+ nr = 0;
+ list_for_each(list, &pmu->format)
+ nr++;
+ if (nr != r->nr_formats) {
+ pr_err("Unmatched number of event formats in %s: expect %d vs got %d\n",
+ pmu->name, r->nr_formats, nr);
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void delete_result(void)
+{
+ for (int i = 0; i < nr_pmus; i++)
+ free(results[i].name);
+ free(results);
+
+ results = NULL;
+ nr_pmus = 0;
+}
+
+static int run_pmu_scan(void)
+{
+ struct stats stats;
+ struct timeval start, end, diff;
+ double time_average, time_stddev;
+ u64 runtime_us;
+ unsigned int i;
+ int ret;
+
+ init_stats(&stats);
+ pr_info("Computing performance of sysfs PMU event scan for %u times\n",
+ iterations);
+
+ if (save_result() < 0) {
+ pr_err("Failed to initialize PMU scan result\n");
+ return -1;
+ }
+
+ for (i = 0; i < iterations; i++) {
+ gettimeofday(&start, NULL);
+ perf_pmu__scan(NULL);
+ gettimeofday(&end, NULL);
+
+ timersub(&end, &start, &diff);
+ runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ update_stats(&stats, runtime_us);
+
+ ret = check_result();
+ perf_pmu__destroy();
+ if (ret < 0)
+ break;
+ }
+
+ time_average = avg_stats(&stats);
+ time_stddev = stddev_stats(&stats);
+ pr_info(" Average PMU scanning took: %.3f usec (+- %.3f usec)\n",
+ time_average, time_stddev);
+
+ delete_result();
+ return 0;
+}
+
+int bench_pmu_scan(int argc, const char **argv)
+{
+ int err = 0;
+
+ argc = parse_options(argc, argv, options, bench_usage, 0);
+ if (argc) {
+ usage_with_options(bench_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ err = run_pmu_scan();
+
+ return err;
+}
diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c
index 97e4a4fb3362..488f6e6ba1a5 100644
--- a/tools/perf/bench/sched-messaging.c
+++ b/tools/perf/bench/sched-messaging.c
@@ -40,7 +40,7 @@ struct sender_context {
unsigned int num_fds;
int ready_out;
int wakefd;
- int out_fds[0];
+ int out_fds[];
};
struct receiver_context {
@@ -66,11 +66,10 @@ static void fdpair(int fds[2])
/* Block until we're ready to go */
static void ready(int ready_out, int wakefd)
{
- char dummy;
struct pollfd pollfd = { .fd = wakefd, .events = POLLIN };
/* Tell them we're ready. */
- if (write(ready_out, &dummy, 1) != 1)
+ if (write(ready_out, "R", 1) != 1)
err(EXIT_FAILURE, "CLIENT: ready write");
/* Wait for "GO" signal */
@@ -85,6 +84,7 @@ static void *sender(struct sender_context *ctx)
unsigned int i, j;
ready(ctx->ready_out, ctx->wakefd);
+ memset(data, 'S', sizeof(data));
/* Now pump to every receiver. */
for (i = 0; i < nr_loops; i++) {
@@ -309,11 +309,11 @@ int bench_sched_messaging(int argc, const char **argv)
num_groups, num_groups * 2 * num_fds,
thread_mode ? "threads" : "processes");
printf(" %14s: %lu.%03lu [sec]\n", "Total time",
- diff.tv_sec,
+ (unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
break;
case BENCH_FORMAT_SIMPLE:
- printf("%lu.%03lu\n", diff.tv_sec,
+ printf("%lu.%03lu\n", (unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
break;
default:
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 3c88d1f201f1..a960e7a93aec 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -156,7 +156,7 @@ int bench_sched_pipe(int argc, const char **argv)
result_usec += diff.tv_usec;
printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
- diff.tv_sec,
+ (unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
printf(" %14lf usecs/op\n",
@@ -168,7 +168,7 @@ int bench_sched_pipe(int argc, const char **argv)
case BENCH_FORMAT_SIMPLE:
printf("%lu.%03lu\n",
- diff.tv_sec,
+ (unsigned long) diff.tv_sec,
(unsigned long) (diff.tv_usec / USEC_PER_MSEC));
break;
diff --git a/tools/perf/bench/synthesize.c b/tools/perf/bench/synthesize.c
new file mode 100644
index 000000000000..7401ebbac100
--- /dev/null
+++ b/tools/perf/bench/synthesize.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Benchmark synthesis of perf events such as at the start of a 'perf
+ * record'. Synthesis is done on the current process and the 'dummy' event
+ * handlers are invoked that support dump_trace but otherwise do nothing.
+ *
+ * Copyright 2019 Google LLC.
+ */
+#include <stdio.h>
+#include "bench.h"
+#include "../util/debug.h"
+#include "../util/session.h"
+#include "../util/stat.h"
+#include "../util/synthetic-events.h"
+#include "../util/target.h"
+#include "../util/thread_map.h"
+#include "../util/tool.h"
+#include "../util/util.h"
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/time64.h>
+#include <subcmd/parse-options.h>
+
+static unsigned int min_threads = 1;
+static unsigned int max_threads = UINT_MAX;
+static unsigned int single_iterations = 10000;
+static unsigned int multi_iterations = 10;
+static bool run_st;
+static bool run_mt;
+
+static const struct option options[] = {
+ OPT_BOOLEAN('s', "st", &run_st, "Run single threaded benchmark"),
+ OPT_BOOLEAN('t', "mt", &run_mt, "Run multi-threaded benchmark"),
+ OPT_UINTEGER('m', "min-threads", &min_threads,
+ "Minimum number of threads in multithreaded bench"),
+ OPT_UINTEGER('M', "max-threads", &max_threads,
+ "Maximum number of threads in multithreaded bench"),
+ OPT_UINTEGER('i', "single-iterations", &single_iterations,
+ "Number of iterations used to compute single-threaded average"),
+ OPT_UINTEGER('I', "multi-iterations", &multi_iterations,
+ "Number of iterations used to compute multi-threaded average"),
+ OPT_END()
+};
+
+static const char *const bench_usage[] = {
+ "perf bench internals synthesize <options>",
+ NULL
+};
+
+static atomic_t event_count;
+
+static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ atomic_inc(&event_count);
+ return 0;
+}
+
+static int do_run_single_threaded(struct perf_session *session,
+ struct perf_thread_map *threads,
+ struct target *target, bool data_mmap)
+{
+ const unsigned int nr_threads_synthesize = 1;
+ struct timeval start, end, diff;
+ u64 runtime_us;
+ unsigned int i;
+ double time_average, time_stddev, event_average, event_stddev;
+ int err;
+ struct stats time_stats, event_stats;
+
+ init_stats(&time_stats);
+ init_stats(&event_stats);
+
+ for (i = 0; i < single_iterations; i++) {
+ atomic_set(&event_count, 0);
+ gettimeofday(&start, NULL);
+ err = __machine__synthesize_threads(&session->machines.host,
+ NULL,
+ target, threads,
+ process_synthesized_event,
+ true, data_mmap,
+ nr_threads_synthesize);
+ if (err)
+ return err;
+
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &diff);
+ runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ update_stats(&time_stats, runtime_us);
+ update_stats(&event_stats, atomic_read(&event_count));
+ }
+
+ time_average = avg_stats(&time_stats);
+ time_stddev = stddev_stats(&time_stats);
+ printf(" Average %ssynthesis took: %.3f usec (+- %.3f usec)\n",
+ data_mmap ? "data " : "", time_average, time_stddev);
+
+ event_average = avg_stats(&event_stats);
+ event_stddev = stddev_stats(&event_stats);
+ printf(" Average num. events: %.3f (+- %.3f)\n",
+ event_average, event_stddev);
+
+ printf(" Average time per event %.3f usec\n",
+ time_average / event_average);
+ return 0;
+}
+
+static int run_single_threaded(void)
+{
+ struct perf_session *session;
+ struct target target = {
+ .pid = "self",
+ };
+ struct perf_thread_map *threads;
+ int err;
+
+ perf_set_singlethreaded();
+ session = perf_session__new(NULL, NULL);
+ if (IS_ERR(session)) {
+ pr_err("Session creation failed.\n");
+ return PTR_ERR(session);
+ }
+ threads = thread_map__new_by_pid(getpid());
+ if (!threads) {
+ pr_err("Thread map creation failed.\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ puts(
+"Computing performance of single threaded perf event synthesis by\n"
+"synthesizing events on the perf process itself:");
+
+ err = do_run_single_threaded(session, threads, &target, false);
+ if (err)
+ goto err_out;
+
+ err = do_run_single_threaded(session, threads, &target, true);
+
+err_out:
+ if (threads)
+ perf_thread_map__put(threads);
+
+ perf_session__delete(session);
+ return err;
+}
+
+static int do_run_multi_threaded(struct target *target,
+ unsigned int nr_threads_synthesize)
+{
+ struct timeval start, end, diff;
+ u64 runtime_us;
+ unsigned int i;
+ double time_average, time_stddev, event_average, event_stddev;
+ int err;
+ struct stats time_stats, event_stats;
+ struct perf_session *session;
+
+ init_stats(&time_stats);
+ init_stats(&event_stats);
+ for (i = 0; i < multi_iterations; i++) {
+ session = perf_session__new(NULL, NULL);
+ if (IS_ERR(session))
+ return PTR_ERR(session);
+
+ atomic_set(&event_count, 0);
+ gettimeofday(&start, NULL);
+ err = __machine__synthesize_threads(&session->machines.host,
+ NULL,
+ target, NULL,
+ process_synthesized_event,
+ true, false,
+ nr_threads_synthesize);
+ if (err) {
+ perf_session__delete(session);
+ return err;
+ }
+
+ gettimeofday(&end, NULL);
+ timersub(&end, &start, &diff);
+ runtime_us = diff.tv_sec * USEC_PER_SEC + diff.tv_usec;
+ update_stats(&time_stats, runtime_us);
+ update_stats(&event_stats, atomic_read(&event_count));
+ perf_session__delete(session);
+ }
+
+ time_average = avg_stats(&time_stats);
+ time_stddev = stddev_stats(&time_stats);
+ printf(" Average synthesis took: %.3f usec (+- %.3f usec)\n",
+ time_average, time_stddev);
+
+ event_average = avg_stats(&event_stats);
+ event_stddev = stddev_stats(&event_stats);
+ printf(" Average num. events: %.3f (+- %.3f)\n",
+ event_average, event_stddev);
+
+ printf(" Average time per event %.3f usec\n",
+ time_average / event_average);
+ return 0;
+}
+
+static int run_multi_threaded(void)
+{
+ struct target target = {
+ .cpu_list = "0"
+ };
+ unsigned int nr_threads_synthesize;
+ int err;
+
+ if (max_threads == UINT_MAX)
+ max_threads = sysconf(_SC_NPROCESSORS_ONLN);
+
+ puts(
+"Computing performance of multi threaded perf event synthesis by\n"
+"synthesizing events on CPU 0:");
+
+ for (nr_threads_synthesize = min_threads;
+ nr_threads_synthesize <= max_threads;
+ nr_threads_synthesize++) {
+ if (nr_threads_synthesize == 1)
+ perf_set_singlethreaded();
+ else
+ perf_set_multithreaded();
+
+ printf(" Number of synthesis threads: %u\n",
+ nr_threads_synthesize);
+
+ err = do_run_multi_threaded(&target, nr_threads_synthesize);
+ if (err)
+ return err;
+ }
+ perf_set_singlethreaded();
+ return 0;
+}
+
+int bench_synthesize(int argc, const char **argv)
+{
+ int err = 0;
+
+ argc = parse_options(argc, argv, options, bench_usage, 0);
+ if (argc) {
+ usage_with_options(bench_usage, options);
+ exit(EXIT_FAILURE);
+ }
+
+ /*
+ * If neither single threaded or multi-threaded are specified, default
+ * to running just single threaded.
+ */
+ if (!run_st && !run_mt)
+ run_st = true;
+
+ if (run_st)
+ err = run_single_threaded();
+
+ if (!err && run_mt)
+ err = run_multi_threaded();
+
+ return err;
+}
diff --git a/tools/perf/bench/syscall.c b/tools/perf/bench/syscall.c
new file mode 100644
index 000000000000..ea4dfc07cbd6
--- /dev/null
+++ b/tools/perf/bench/syscall.c
@@ -0,0 +1,184 @@
+/*
+ *
+ * syscall.c
+ *
+ * syscall: Benchmark for system call performance
+ */
+#include "../perf.h"
+#include "../util/util.h"
+#include <subcmd/parse-options.h>
+#include "../builtin.h"
+#include "bench.h"
+
+#include <stdio.h>
+#include <sys/time.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+#ifndef __NR_fork
+#define __NR_fork -1
+#endif
+
+#define LOOPS_DEFAULT 10000000
+static int loops = LOOPS_DEFAULT;
+
+static const struct option options[] = {
+ OPT_INTEGER('l', "loop", &loops, "Specify number of loops"),
+ OPT_END()
+};
+
+static const char * const bench_syscall_usage[] = {
+ "perf bench syscall <options>",
+ NULL
+};
+
+static void test_fork(void)
+{
+ pid_t pid = fork();
+
+ if (pid < 0) {
+ fprintf(stderr, "fork failed\n");
+ exit(1);
+ } else if (pid == 0) {
+ exit(0);
+ } else {
+ if (waitpid(pid, NULL, 0) < 0) {
+ fprintf(stderr, "waitpid failed\n");
+ exit(1);
+ }
+ }
+}
+
+static void test_execve(void)
+{
+ const char *pathname = "/bin/true";
+ char *const argv[] = { (char *)pathname, NULL };
+ pid_t pid = fork();
+
+ if (pid < 0) {
+ fprintf(stderr, "fork failed\n");
+ exit(1);
+ } else if (pid == 0) {
+ execve(pathname, argv, NULL);
+ fprintf(stderr, "execve /bin/true failed\n");
+ exit(1);
+ } else {
+ if (waitpid(pid, NULL, 0) < 0) {
+ fprintf(stderr, "waitpid failed\n");
+ exit(1);
+ }
+ }
+}
+
+static int bench_syscall_common(int argc, const char **argv, int syscall)
+{
+ struct timeval start, stop, diff;
+ unsigned long long result_usec = 0;
+ const char *name = NULL;
+ int i;
+
+ argc = parse_options(argc, argv, options, bench_syscall_usage, 0);
+
+ gettimeofday(&start, NULL);
+
+ for (i = 0; i < loops; i++) {
+ switch (syscall) {
+ case __NR_getppid:
+ getppid();
+ break;
+ case __NR_getpgid:
+ getpgid(0);
+ break;
+ case __NR_fork:
+ test_fork();
+ /* Only loop 10000 times to save time */
+ if (i == 10000)
+ loops = 10000;
+ break;
+ case __NR_execve:
+ test_execve();
+ /* Only loop 10000 times to save time */
+ if (i == 10000)
+ loops = 10000;
+ break;
+ default:
+ break;
+ }
+ }
+
+ gettimeofday(&stop, NULL);
+ timersub(&stop, &start, &diff);
+
+ switch (syscall) {
+ case __NR_getppid:
+ name = "getppid()";
+ break;
+ case __NR_getpgid:
+ name = "getpgid()";
+ break;
+ case __NR_fork:
+ name = "fork()";
+ break;
+ case __NR_execve:
+ name = "execve()";
+ break;
+ default:
+ break;
+ }
+
+ switch (bench_format) {
+ case BENCH_FORMAT_DEFAULT:
+ printf("# Executed %'d %s calls\n", loops, name);
+
+ result_usec = diff.tv_sec * 1000000;
+ result_usec += diff.tv_usec;
+
+ printf(" %14s: %lu.%03lu [sec]\n\n", "Total time",
+ (unsigned long) diff.tv_sec,
+ (unsigned long) (diff.tv_usec/1000));
+
+ printf(" %14lf usecs/op\n",
+ (double)result_usec / (double)loops);
+ printf(" %'14d ops/sec\n",
+ (int)((double)loops /
+ ((double)result_usec / (double)1000000)));
+ break;
+
+ case BENCH_FORMAT_SIMPLE:
+ printf("%lu.%03lu\n",
+ (unsigned long) diff.tv_sec,
+ (unsigned long) (diff.tv_usec / 1000));
+ break;
+
+ default:
+ /* reaching here is something disaster */
+ fprintf(stderr, "Unknown format:%d\n", bench_format);
+ exit(1);
+ break;
+ }
+
+ return 0;
+}
+
+int bench_syscall_basic(int argc, const char **argv)
+{
+ return bench_syscall_common(argc, argv, __NR_getppid);
+}
+
+int bench_syscall_getpgid(int argc, const char **argv)
+{
+ return bench_syscall_common(argc, argv, __NR_getpgid);
+}
+
+int bench_syscall_fork(int argc, const char **argv)
+{
+ return bench_syscall_common(argc, argv, __NR_fork);
+}
+
+int bench_syscall_execve(int argc, const char **argv)
+{
+ return bench_syscall_common(argc, argv, __NR_execve);
+}
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 6c0a0412502e..63cdf6ea6f6d 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -15,7 +15,6 @@
#include <linux/zalloc.h>
#include "util/symbol.h"
-#include "perf.h"
#include "util/debug.h"
#include "util/evlist.h"
@@ -36,6 +35,7 @@
#include "util/block-range.h"
#include "util/map_symbol.h"
#include "util/branch.h"
+#include "util/util.h"
#include <dlfcn.h>
#include <errno.h>
@@ -46,10 +46,17 @@ struct perf_annotate {
struct perf_tool tool;
struct perf_session *session;
struct annotation_options opts;
- bool use_tui, use_stdio, use_stdio2, use_gtk;
+#ifdef HAVE_SLANG_SUPPORT
+ bool use_tui;
+#endif
+ bool use_stdio, use_stdio2;
+#ifdef HAVE_GTK2_SUPPORT
+ bool use_gtk;
+#endif
bool skip_missing;
bool has_br_stack;
bool group_set;
+ float min_percent;
const char *sym_hist_filter;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -190,7 +197,6 @@ static int process_branch_callback(struct evsel *evsel,
};
struct addr_location a;
- int ret;
if (machine__resolve(machine, &a, sample) < 0)
return -1;
@@ -199,12 +205,11 @@ static int process_branch_callback(struct evsel *evsel,
return 0;
if (a.map != NULL)
- a.map->dso->hit = 1;
+ map__dso(a.map)->hit = 1;
hist__account_cycles(sample->branch_stack, al, sample, false, NULL);
- ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
- return ret;
+ return hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann);
}
static bool has_annotation(struct perf_annotate *ann)
@@ -212,11 +217,9 @@ static bool has_annotation(struct perf_annotate *ann)
return ui__has_annotation() || ann->use_stdio2;
}
-static int perf_evsel__add_sample(struct evsel *evsel,
- struct perf_sample *sample,
- struct addr_location *al,
- struct perf_annotate *ann,
- struct machine *machine)
+static int evsel__add_sample(struct evsel *evsel, struct perf_sample *sample,
+ struct addr_location *al, struct perf_annotate *ann,
+ struct machine *machine)
{
struct hists *hists = evsel__hists(evsel);
struct hist_entry *he;
@@ -232,16 +235,17 @@ static int perf_evsel__add_sample(struct evsel *evsel,
* the DSO?
*/
if (al->sym != NULL) {
- rb_erase_cached(&al->sym->rb_node,
- &al->map->dso->symbols);
+ struct dso *dso = map__dso(al->map);
+
+ rb_erase_cached(&al->sym->rb_node, &dso->symbols);
symbol__delete(al->sym);
- dso__reset_find_symbol_cache(al->map->dso);
+ dso__reset_find_symbol_cache(dso);
}
return 0;
}
/*
- * XXX filtered samples can still have branch entires pointing into our
+ * XXX filtered samples can still have branch entries pointing into our
* symbol and are missed.
*/
process_branch_stack(sample->branch_stack, al, sample);
@@ -249,7 +253,7 @@ static int perf_evsel__add_sample(struct evsel *evsel,
if (ann->has_br_stack && has_annotation(ann))
return process_branch_callback(evsel, sample, al, ann, machine);
- he = hists__add_entry(hists, al, NULL, NULL, NULL, sample, true);
+ he = hists__add_entry(hists, al, NULL, NULL, NULL, NULL, sample, true);
if (he == NULL)
return -ENOMEM;
@@ -278,7 +282,7 @@ static int process_sample_event(struct perf_tool *tool,
goto out_put;
if (!al.filtered &&
- perf_evsel__add_sample(evsel, sample, &al, ann, machine)) {
+ evsel__add_sample(evsel, sample, &al, ann, machine)) {
pr_warning("problem incrementing symbol count, "
"skipping event\n");
ret = -1;
@@ -317,13 +321,24 @@ static void hists__find_annotations(struct hists *hists,
struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
struct annotation *notes;
- if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned)
+ if (he->ms.sym == NULL || map__dso(he->ms.map)->annotate_warned)
goto find_next;
if (ann->sym_hist_filter &&
(strcmp(he->ms.sym->name, ann->sym_hist_filter) != 0))
goto find_next;
+ if (ann->min_percent) {
+ float percent = 0;
+ u64 total = hists__total_period(hists);
+
+ if (total)
+ percent = 100.0 * he->stat.period / total;
+
+ if (percent < ann->min_percent)
+ goto find_next;
+ }
+
notes = symbol__annotation(he->ms.sym);
if (notes->src == NULL) {
find_next:
@@ -338,6 +353,7 @@ find_next:
int ret;
int (*annotate)(struct hist_entry *he,
struct evsel *evsel,
+ struct annotation_options *options,
struct hist_browser_timer *hbt);
annotate = dlsym(perf_gtk_handle,
@@ -347,7 +363,7 @@ find_next:
return;
}
- ret = annotate(he, evsel, NULL);
+ ret = annotate(he, evsel, &ann->opts, NULL);
if (!ret || !ann->skip_missing)
return;
@@ -376,13 +392,6 @@ find_next:
} else {
hist_entry__tty_annotate(he, evsel, ann);
nd = rb_next(nd);
- /*
- * Since we have a hist_entry per IP for the same
- * symbol, free he->ms.sym->src to signal we already
- * processed this symbol.
- */
- zfree(&notes->src->cycles_hist);
- zfree(&notes->src);
}
}
}
@@ -413,8 +422,8 @@ static int __cmd_annotate(struct perf_annotate *ann)
goto out;
if (dump_trace) {
- perf_session__fprintf_nr_events(session, stdout);
- perf_evlist__fprintf_nr_events(session->evlist, stdout);
+ perf_session__fprintf_nr_events(session, stdout, false);
+ evlist__fprintf_nr_events(session->evlist, stdout, false);
goto out;
}
@@ -427,17 +436,16 @@ static int __cmd_annotate(struct perf_annotate *ann)
total_nr_samples = 0;
evlist__for_each_entry(session->evlist, pos) {
struct hists *hists = evsel__hists(pos);
- u32 nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ u32 nr_samples = hists->stats.nr_samples;
if (nr_samples > 0) {
total_nr_samples += nr_samples;
hists__collapse_resort(hists, NULL);
/* Don't sort callchain */
- perf_evsel__reset_sample_bit(pos, CALLCHAIN);
- perf_evsel__output_resort(pos, NULL);
+ evsel__reset_sample_bit(pos, CALLCHAIN);
+ evsel__output_resort(pos, NULL);
- if (symbol_conf.event_group &&
- !perf_evsel__is_group_leader(pos))
+ if (symbol_conf.event_group && !evsel__is_group_leader(pos))
continue;
hists__find_annotations(hists, pos, ann);
@@ -465,6 +473,16 @@ out:
return ret;
}
+static int parse_percent_limit(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ struct perf_annotate *ann = opt->value;
+ double pcnt = strtof(str, NULL);
+
+ ann->min_percent = pcnt;
+ return 0;
+}
+
static const char * const annotate_usage[] = {
"perf annotate [<options>]",
NULL
@@ -483,16 +501,24 @@ int cmd_annotate(int argc, const char **argv)
.namespaces = perf_event__process_namespaces,
.attr = perf_event__process_attr,
.build_id = perf_event__process_build_id,
+#ifdef HAVE_LIBTRACEEVENT
.tracing_data = perf_event__process_tracing_data,
+#endif
+ .id_index = perf_event__process_id_index,
+ .auxtrace_info = perf_event__process_auxtrace_info,
+ .auxtrace = perf_event__process_auxtrace,
.feature = process_feature_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
- .opts = annotation__default_options,
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
};
+ struct itrace_synth_opts itrace_synth_opts = {
+ .set = 0,
+ };
+ const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
@@ -503,11 +529,15 @@ int cmd_annotate(int argc, const char **argv)
OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('q', "quiet", &quiet, "do now show any message"),
+ OPT_BOOLEAN('q', "quiet", &quiet, "do now show any warnings or messages"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
+#ifdef HAVE_GTK2_SUPPORT
OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"),
+#endif
+#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"),
+#endif
OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"),
OPT_BOOLEAN(0, "stdio2", &annotate.use_stdio2, "Use the stdio interface"),
OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
@@ -533,14 +563,20 @@ int cmd_annotate(int argc, const char **argv)
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &annotate.opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
- OPT_STRING('M', "disassembler-style", &annotate.opts.disassembler_style, "disassembler style",
+ OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING(0, "prefix", &annotate.opts.prefix, "prefix",
"Add prefix to source file path names in programs (with --prefix-strip)"),
OPT_STRING(0, "prefix-strip", &annotate.opts.prefix_strip, "N",
"Strip first N entries of source file path name in programs (with --prefix)"),
- OPT_STRING(0, "objdump", &annotate.opts.objdump_path, "path",
+ OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
+ OPT_STRING(0, "addr2line", &addr2line_path, "path",
+ "addr2line binary to use for line numbers"),
+ OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
+ "Enable symbol demangling"),
+ OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
+ "Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
"Show event group information together"),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
@@ -553,6 +589,11 @@ int cmd_annotate(int argc, const char **argv)
OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period",
"Set percent type local/global-period/hits",
annotate_parse_percent_type),
+ OPT_CALLBACK(0, "percent-limit", &annotate, "percent",
+ "Don't show entries under that percent", parse_percent_limit),
+ OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
+ "Instruction Tracing options\n" ITRACE_HELP,
+ itrace_parse_synth_opts),
OPT_END()
};
@@ -561,6 +602,7 @@ int cmd_annotate(int argc, const char **argv)
set_option_flag(options, 0, "show-total-period", PARSE_OPT_EXCLUSIVE);
set_option_flag(options, 0, "show-nr-samples", PARSE_OPT_EXCLUSIVE);
+ annotation_options__init(&annotate.opts);
ret = hists__init();
if (ret < 0)
@@ -580,28 +622,52 @@ int cmd_annotate(int argc, const char **argv)
annotate.sym_hist_filter = argv[0];
}
+ if (disassembler_style) {
+ annotate.opts.disassembler_style = strdup(disassembler_style);
+ if (!annotate.opts.disassembler_style)
+ return -ENOMEM;
+ }
+ if (objdump_path) {
+ annotate.opts.objdump_path = strdup(objdump_path);
+ if (!annotate.opts.objdump_path)
+ return -ENOMEM;
+ }
+ if (addr2line_path) {
+ symbol_conf.addr2line_path = strdup(addr2line_path);
+ if (!symbol_conf.addr2line_path)
+ return -ENOMEM;
+ }
+
if (annotate_check_args(&annotate.opts) < 0)
return -EINVAL;
+#ifdef HAVE_GTK2_SUPPORT
if (symbol_conf.show_nr_samples && annotate.use_gtk) {
pr_err("--show-nr-samples is not available in --gtk mode at this time\n");
return ret;
}
+#endif
+
+ ret = symbol__validate_sym_arguments();
+ if (ret)
+ return ret;
if (quiet)
perf_quiet_option();
data.path = input_name;
- annotate.session = perf_session__new(&data, false, &annotate.tool);
+ annotate.session = perf_session__new(&data, &annotate.tool);
if (IS_ERR(annotate.session))
return PTR_ERR(annotate.session);
+ annotate.session->itrace_synth_opts = &itrace_synth_opts;
+
annotate.has_br_stack = perf_header__has_feat(&annotate.session->header,
HEADER_BRANCH_STACK);
if (annotate.group_set)
- perf_evlist__force_leader(annotate.session->evlist);
+ evlist__force_leader(annotate.session->evlist);
ret = symbol__annotation_init();
if (ret < 0)
@@ -615,36 +681,45 @@ int cmd_annotate(int argc, const char **argv)
if (annotate.use_stdio || annotate.use_stdio2)
use_browser = 0;
+#ifdef HAVE_SLANG_SUPPORT
else if (annotate.use_tui)
use_browser = 1;
+#endif
+#ifdef HAVE_GTK2_SUPPORT
else if (annotate.use_gtk)
use_browser = 2;
+#endif
setup_browser(true);
- if ((use_browser == 1 || annotate.use_stdio2) && annotate.has_br_stack) {
+ /*
+ * Events of different processes may correspond to the same
+ * symbol, we do not care about the processes in annotate,
+ * set sort order to avoid repeated output.
+ */
+ sort_order = "dso,symbol";
+
+ /*
+ * Set SORT_MODE__BRANCH so that annotate display IPC/Cycle
+ * if branch info is in perf data in TUI mode.
+ */
+ if ((use_browser == 1 || annotate.use_stdio2) && annotate.has_br_stack)
sort__mode = SORT_MODE__BRANCH;
- if (setup_sorting(annotate.session->evlist) < 0)
- usage_with_options(annotate_usage, options);
- } else {
- if (setup_sorting(NULL) < 0)
- usage_with_options(annotate_usage, options);
- }
+
+ if (setup_sorting(NULL) < 0)
+ usage_with_options(annotate_usage, options);
ret = __cmd_annotate(&annotate);
out_delete:
/*
- * Speed up the exit process, for large files this can
- * take quite a while.
- *
- * XXX Enable this when using valgrind or if we ever
- * librarize this command.
- *
- * Also experiment with obstacks to see how much speed
- * up we'll get here.
- *
- * perf_session__delete(session);
+ * Speed up the exit process by only deleting for debug builds. For
+ * large files this can save time.
*/
+#ifndef NDEBUG
+ perf_session__delete(annotate.session);
+#endif
+ annotation_options__exit(&annotate.opts);
+
return ret;
}
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index c06fe21c8613..58f1cfe1eb34 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -11,6 +11,7 @@
* Available benchmark collection list:
*
* sched ... scheduler and IPC performance
+ * syscall ... System call performance
* mem ... memory access performance
* numa ... NUMA scheduling and MM performance
* futex ... Futex performance
@@ -49,9 +50,19 @@ static struct bench sched_benchmarks[] = {
{ NULL, NULL, NULL }
};
+static struct bench syscall_benchmarks[] = {
+ { "basic", "Benchmark for basic getppid(2) calls", bench_syscall_basic },
+ { "getpgid", "Benchmark for getpgid(2) calls", bench_syscall_getpgid },
+ { "fork", "Benchmark for fork(2) calls", bench_syscall_fork },
+ { "execve", "Benchmark for execve(2) calls", bench_syscall_execve },
+ { "all", "Run all syscall benchmarks", NULL },
+ { NULL, NULL, NULL },
+};
+
static struct bench mem_benchmarks[] = {
{ "memcpy", "Benchmark for memcpy() functions", bench_mem_memcpy },
{ "memset", "Benchmark for memset() functions", bench_mem_memset },
+ { "find_bit", "Benchmark for find_bit() functions", bench_mem_find_bit },
{ "all", "Run all memory access benchmarks", NULL },
{ NULL, NULL, NULL }
};
@@ -67,14 +78,30 @@ static struct bench futex_benchmarks[] = {
{ NULL, NULL, NULL }
};
-#ifdef HAVE_EVENTFD
+#ifdef HAVE_EVENTFD_SUPPORT
static struct bench epoll_benchmarks[] = {
{ "wait", "Benchmark epoll concurrent epoll_waits", bench_epoll_wait },
{ "ctl", "Benchmark epoll concurrent epoll_ctls", bench_epoll_ctl },
{ "all", "Run all futex benchmarks", NULL },
{ NULL, NULL, NULL }
};
-#endif // HAVE_EVENTFD
+#endif // HAVE_EVENTFD_SUPPORT
+
+static struct bench internals_benchmarks[] = {
+ { "synthesize", "Benchmark perf event synthesis", bench_synthesize },
+ { "kallsyms-parse", "Benchmark kallsyms parsing", bench_kallsyms_parse },
+ { "inject-build-id", "Benchmark build-id injection", bench_inject_build_id },
+ { "evlist-open-close", "Benchmark evlist open and close", bench_evlist_open_close },
+ { "pmu-scan", "Benchmark sysfs PMU info scanning", bench_pmu_scan },
+ { NULL, NULL, NULL }
+};
+
+static struct bench breakpoint_benchmarks[] = {
+ { "thread", "Benchmark thread start/finish with breakpoints", bench_breakpoint_thread},
+ { "enable", "Benchmark breakpoint enable/disable", bench_breakpoint_enable},
+ { "all", "Run all breakpoint benchmarks", NULL},
+ { NULL, NULL, NULL },
+};
struct collection {
const char *name;
@@ -84,14 +111,17 @@ struct collection {
static struct collection collections[] = {
{ "sched", "Scheduler and IPC benchmarks", sched_benchmarks },
+ { "syscall", "System call benchmarks", syscall_benchmarks },
{ "mem", "Memory access benchmarks", mem_benchmarks },
#ifdef HAVE_LIBNUMA_SUPPORT
{ "numa", "NUMA scheduling and MM benchmarks", numa_benchmarks },
#endif
{"futex", "Futex stressing benchmarks", futex_benchmarks },
-#ifdef HAVE_EVENTFD
+#ifdef HAVE_EVENTFD_SUPPORT
{"epoll", "Epoll stressing benchmarks", epoll_benchmarks },
#endif
+ { "internals", "Perf-internals benchmarks", internals_benchmarks },
+ { "breakpoint", "Breakpoint benchmarks", breakpoint_benchmarks },
{ "all", "All benchmarks", NULL },
{ NULL, NULL, NULL }
};
@@ -124,7 +154,7 @@ unsigned int bench_repeat = 10; /* default number of times to repeat the run */
static const struct option bench_options[] = {
OPT_STRING('f', "format", &bench_format_str, "default|simple", "Specify the output formatting style"),
- OPT_UINTEGER('r', "repeat", &bench_repeat, "Specify amount of times to repeat the run"),
+ OPT_UINTEGER('r', "repeat", &bench_repeat, "Specify number of times to repeat the run"),
OPT_END()
};
@@ -208,7 +238,6 @@ static void run_collection(struct collection *coll)
if (!bench->fn)
break;
printf("# Running %s/%s benchmark...\n", coll->name, bench->name);
- fflush(stdout);
argv[1] = bench->name;
run_bench(coll->name, bench->name, bench->fn, 1, argv);
@@ -229,6 +258,9 @@ int cmd_bench(int argc, const char **argv)
struct collection *coll;
int ret = 0;
+ /* Unbuffered output */
+ setvbuf(stdout, NULL, _IONBF, 0);
+
if (argc < 2) {
/* No collection specified. */
print_usage();
@@ -282,7 +314,6 @@ int cmd_bench(int argc, const char **argv)
if (bench_format == BENCH_FORMAT_DEFAULT)
printf("# Running '%s/%s' benchmark:\n", coll->name, bench->name);
- fflush(stdout);
ret = run_bench(coll->name, bench->name, bench->fn, argc-1, argv+1);
goto end;
}
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index 39efa51d7fb3..cd381693658b 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -27,6 +27,7 @@
#include "util/time-utils.h"
#include "util/util.h"
#include "util/probe-file.h"
+#include "util/config.h"
#include <linux/string.h>
#include <linux/err.h>
@@ -174,19 +175,19 @@ static int build_id_cache__add_kcore(const char *filename, bool force)
static int build_id_cache__add_file(const char *filename, struct nsinfo *nsi)
{
char sbuild_id[SBUILD_ID_SIZE];
- u8 build_id[BUILD_ID_SIZE];
+ struct build_id bid;
int err;
struct nscookie nsc;
nsinfo__mountns_enter(nsi, &nsc);
- err = filename__read_build_id(filename, &build_id, sizeof(build_id));
+ err = filename__read_build_id(filename, &bid);
nsinfo__mountns_exit(&nsc);
if (err < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
return -1;
}
- build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+ build_id__sprintf(&bid, sbuild_id);
err = build_id_cache__add_s(sbuild_id, filename, nsi,
false, false);
pr_debug("Adding %s %s: %s\n", sbuild_id, filename,
@@ -196,21 +197,21 @@ static int build_id_cache__add_file(const char *filename, struct nsinfo *nsi)
static int build_id_cache__remove_file(const char *filename, struct nsinfo *nsi)
{
- u8 build_id[BUILD_ID_SIZE];
char sbuild_id[SBUILD_ID_SIZE];
+ struct build_id bid;
struct nscookie nsc;
int err;
nsinfo__mountns_enter(nsi, &nsc);
- err = filename__read_build_id(filename, &build_id, sizeof(build_id));
+ err = filename__read_build_id(filename, &bid);
nsinfo__mountns_exit(&nsc);
if (err < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
return -1;
}
- build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+ build_id__sprintf(&bid, sbuild_id);
err = build_id_cache__remove_s(sbuild_id);
pr_debug("Removing %s %s: %s\n", sbuild_id, filename,
err ? "FAIL" : "Ok");
@@ -274,17 +275,16 @@ static int build_id_cache__purge_all(void)
static bool dso__missing_buildid_cache(struct dso *dso, int parm __maybe_unused)
{
char filename[PATH_MAX];
- u8 build_id[BUILD_ID_SIZE];
+ struct build_id bid;
if (dso__build_id_filename(dso, filename, sizeof(filename), false) &&
- filename__read_build_id(filename, build_id,
- sizeof(build_id)) != sizeof(build_id)) {
+ filename__read_build_id(filename, &bid) == -1) {
if (errno == ENOENT)
return false;
pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
- } else if (memcmp(dso->build_id, build_id, sizeof(dso->build_id))) {
+ } else if (memcmp(dso->bid.data, bid.data, bid.size)) {
pr_warning("Problems with %s file, consider removing it from the cache\n",
filename);
}
@@ -300,14 +300,14 @@ static int build_id_cache__fprintf_missing(struct perf_session *session, FILE *f
static int build_id_cache__update_file(const char *filename, struct nsinfo *nsi)
{
- u8 build_id[BUILD_ID_SIZE];
char sbuild_id[SBUILD_ID_SIZE];
+ struct build_id bid;
struct nscookie nsc;
int err;
nsinfo__mountns_enter(nsi, &nsc);
- err = filename__read_build_id(filename, &build_id, sizeof(build_id));
+ err = filename__read_build_id(filename, &bid);
nsinfo__mountns_exit(&nsc);
if (err < 0) {
pr_debug("Couldn't read a build-id in %s\n", filename);
@@ -315,7 +315,7 @@ static int build_id_cache__update_file(const char *filename, struct nsinfo *nsi)
}
err = 0;
- build_id__sprintf(build_id, sizeof(build_id), sbuild_id);
+ build_id__sprintf(&bid, sbuild_id);
if (build_id_cache__cached(sbuild_id))
err = build_id_cache__remove_s(sbuild_id);
@@ -349,12 +349,25 @@ static int build_id_cache__show_all(void)
return 0;
}
+static int perf_buildid_cache_config(const char *var, const char *value, void *cb)
+{
+ struct perf_debuginfod *di = cb;
+
+ if (!strcmp(var, "buildid-cache.debuginfod")) {
+ di->urls = strdup(value);
+ if (!di->urls)
+ return -ENOMEM;
+ di->set = true;
+ }
+
+ return 0;
+}
+
int cmd_buildid_cache(int argc, const char **argv)
{
struct strlist *list;
struct str_node *pos;
- int ret = 0;
- int ns_id = -1;
+ int ret, ns_id = -1;
bool force = false;
bool list_files = false;
bool opts_flag = false;
@@ -365,6 +378,7 @@ int cmd_buildid_cache(int argc, const char **argv)
*missing_filename = NULL,
*update_name_list_str = NULL,
*kcore_filename = NULL;
+ struct perf_debuginfod debuginfod = { };
char sbuf[STRERR_BUFSIZE];
struct perf_data data = {
@@ -389,6 +403,10 @@ int cmd_buildid_cache(int argc, const char **argv)
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_STRING('u', "update", &update_name_list_str, "file list",
"file(s) to update"),
+ OPT_STRING_OPTARG_SET(0, "debuginfod", &debuginfod.urls,
+ &debuginfod.set, "debuginfod urls",
+ "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
+ "system"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_INTEGER(0, "target-ns", &ns_id, "target pid for namespace context"),
OPT_END()
@@ -398,6 +416,10 @@ int cmd_buildid_cache(int argc, const char **argv)
NULL
};
+ ret = perf_config(perf_buildid_cache_config, &debuginfod);
+ if (ret)
+ return ret;
+
argc = parse_options(argc, argv, buildid_cache_options,
buildid_cache_usage, 0);
@@ -409,6 +431,8 @@ int cmd_buildid_cache(int argc, const char **argv)
if (argc || !(list_files || opts_flag))
usage_with_options(buildid_cache_usage, buildid_cache_options);
+ perf_debuginfod_setup(&debuginfod);
+
/* -l is exclusive. It can not be used with other options. */
if (list_files && opts_flag) {
usage_with_options_msg(buildid_cache_usage,
@@ -422,7 +446,7 @@ int cmd_buildid_cache(int argc, const char **argv)
data.path = missing_filename;
data.force = force;
- session = perf_session__new(&data, false, NULL);
+ session = perf_session__new(&data, NULL);
if (IS_ERR(session))
return PTR_ERR(session);
}
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c
index e3ef75583514..c9037477865a 100644
--- a/tools/perf/builtin-buildid-list.c
+++ b/tools/perf/builtin-buildid-list.c
@@ -8,18 +8,48 @@
* Copyright (C) 2009, Arnaldo Carvalho de Melo <acme@redhat.com>
*/
#include "builtin.h"
-#include "perf.h"
#include "util/build-id.h"
#include "util/debug.h"
#include "util/dso.h"
+#include "util/map.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/session.h"
#include "util/symbol.h"
#include "util/data.h"
+#include "util/util.h"
#include <errno.h>
+#include <inttypes.h>
#include <linux/err.h>
+static int buildid__map_cb(struct map *map, void *arg __maybe_unused)
+{
+ const struct dso *dso = map__dso(map);
+ char bid_buf[SBUILD_ID_SIZE];
+
+ memset(bid_buf, 0, sizeof(bid_buf));
+ if (dso->has_build_id)
+ build_id__sprintf(&dso->bid, bid_buf);
+ printf("%s %16" PRIx64 " %16" PRIx64, bid_buf, map__start(map), map__end(map));
+ if (dso->long_name != NULL) {
+ printf(" %s", dso->long_name);
+ } else if (dso->short_name != NULL) {
+ printf(" %s", dso->short_name);
+ }
+ printf("\n");
+
+ return 0;
+}
+
+static void buildid__show_kernel_maps(void)
+{
+ struct machine *machine;
+
+ machine = machine__new_host();
+ machine__for_each_kernel_map(machine, buildid__map_cb, NULL);
+ machine__delete(machine);
+}
+
static int sysfs__fprintf_build_id(FILE *fp)
{
char sbuild_id[SBUILD_ID_SIZE];
@@ -65,7 +95,7 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
if (filename__fprintf_build_id(input_name, stdout) > 0)
goto out;
- session = perf_session__new(&data, false, &build_id__mark_dso_hit_ops);
+ session = perf_session__new(&data, &build_id__mark_dso_hit_ops);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -77,6 +107,12 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
perf_header__has_feat(&session->header, HEADER_AUXTRACE))
with_hits = false;
+ if (!perf_header__has_feat(&session->header, HEADER_BUILD_ID))
+ with_hits = true;
+
+ if (zstd_init(&(session->zstd_data), 0) < 0)
+ pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
+
/*
* in pipe-mode, the only way to get the buildids is to parse
* the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
@@ -93,6 +129,7 @@ out:
int cmd_buildid_list(int argc, const char **argv)
{
bool show_kernel = false;
+ bool show_kernel_maps = false;
bool with_hits = false;
bool force = false;
const struct option options[] = {
@@ -100,6 +137,8 @@ int cmd_buildid_list(int argc, const char **argv)
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_BOOLEAN('k', "kernel", &show_kernel, "Show current kernel build id"),
+ OPT_BOOLEAN('m', "kernel-maps", &show_kernel_maps,
+ "Show build id of current kernel + modules"),
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_END()
};
@@ -111,8 +150,12 @@ int cmd_buildid_list(int argc, const char **argv)
argc = parse_options(argc, argv, options, buildid_list_usage, 0);
setup_pager();
- if (show_kernel)
+ if (show_kernel) {
return !(sysfs__fprintf_build_id(stdout) > 0);
+ } else if (show_kernel_maps) {
+ buildid__show_kernel_maps();
+ return 0;
+ }
return perf_session__list_build_ids(force, with_hits);
}
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 246ac0b4d54f..08455e26b606 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -41,7 +41,10 @@
#include "symbol.h"
#include "ui/ui.h"
#include "ui/progress.h"
-#include "../perf.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"
+#include "string2.h"
+#include "util/util.h"
struct c2c_hists {
struct hists hists;
@@ -52,6 +55,8 @@ struct c2c_hists {
struct compute_stats {
struct stats lcl_hitm;
struct stats rmt_hitm;
+ struct stats lcl_peer;
+ struct stats rmt_peer;
struct stats load;
};
@@ -95,9 +100,10 @@ struct perf_c2c {
bool use_stdio;
bool stats_only;
bool symbol_full;
+ bool stitch_lbr;
- /* HITM shared clines stats */
- struct c2c_stats hitm_stats;
+ /* Shared cache line stats */
+ struct c2c_stats shared_clines_stats;
int shared_clines;
int display;
@@ -109,16 +115,18 @@ struct perf_c2c {
};
enum {
- DISPLAY_LCL,
- DISPLAY_RMT,
- DISPLAY_TOT,
+ DISPLAY_LCL_HITM,
+ DISPLAY_RMT_HITM,
+ DISPLAY_TOT_HITM,
+ DISPLAY_SNP_PEER,
DISPLAY_MAX,
};
static const char *display_str[DISPLAY_MAX] = {
- [DISPLAY_LCL] = "Local",
- [DISPLAY_RMT] = "Remote",
- [DISPLAY_TOT] = "Total",
+ [DISPLAY_LCL_HITM] = "Local HITMs",
+ [DISPLAY_RMT_HITM] = "Remote HITMs",
+ [DISPLAY_TOT_HITM] = "Total HITMs",
+ [DISPLAY_SNP_PEER] = "Peer Snoop",
};
static const struct option c2c_options[] = {
@@ -136,23 +144,31 @@ static void *c2c_he_zalloc(size_t size)
if (!c2c_he)
return NULL;
- c2c_he->cpuset = bitmap_alloc(c2c.cpus_cnt);
+ c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
if (!c2c_he->cpuset)
- return NULL;
+ goto out_free;
- c2c_he->nodeset = bitmap_alloc(c2c.nodes_cnt);
+ c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
if (!c2c_he->nodeset)
- return NULL;
+ goto out_free;
c2c_he->node_stats = zalloc(c2c.nodes_cnt * sizeof(*c2c_he->node_stats));
if (!c2c_he->node_stats)
- return NULL;
+ goto out_free;
init_stats(&c2c_he->cstats.lcl_hitm);
init_stats(&c2c_he->cstats.rmt_hitm);
+ init_stats(&c2c_he->cstats.lcl_peer);
+ init_stats(&c2c_he->cstats.rmt_peer);
init_stats(&c2c_he->cstats.load);
return &c2c_he->he;
+
+out_free:
+ zfree(&c2c_he->nodeset);
+ zfree(&c2c_he->cpuset);
+ free(c2c_he);
+ return NULL;
}
static void c2c_he_free(void *he)
@@ -162,13 +178,13 @@ static void c2c_he_free(void *he)
c2c_he = container_of(he, struct c2c_hist_entry, he);
if (c2c_he->hists) {
hists__delete_entries(&c2c_he->hists->hists);
- free(c2c_he->hists);
+ zfree(&c2c_he->hists);
}
- free(c2c_he->cpuset);
- free(c2c_he->nodeset);
- free(c2c_he->nodestr);
- free(c2c_he->node_stats);
+ zfree(&c2c_he->cpuset);
+ zfree(&c2c_he->nodeset);
+ zfree(&c2c_he->nodestr);
+ zfree(&c2c_he->node_stats);
free(c2c_he);
}
@@ -214,7 +230,7 @@ static void c2c_he__set_cpu(struct c2c_hist_entry *c2c_he,
"WARNING: no sample cpu value"))
return;
- set_bit(sample->cpu, c2c_he->cpuset);
+ __set_bit(sample->cpu, c2c_he->cpuset);
}
static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
@@ -231,7 +247,7 @@ static void c2c_he__set_node(struct c2c_hist_entry *c2c_he,
if (WARN_ONCE(node < 0, "WARNING: failed to find node\n"))
return;
- set_bit(node, c2c_he->nodeset);
+ __set_bit(node, c2c_he->nodeset);
if (c2c_he->paddr != sample->phys_addr) {
c2c_he->paddr_cnt++;
@@ -249,6 +265,10 @@ static void compute_stats(struct c2c_hist_entry *c2c_he,
update_stats(&cstats->rmt_hitm, weight);
else if (stats->lcl_hitm)
update_stats(&cstats->lcl_hitm, weight);
+ else if (stats->rmt_peer)
+ update_stats(&cstats->rmt_peer, weight);
+ else if (stats->lcl_peer)
+ update_stats(&cstats->lcl_peer, weight);
else if (stats->load)
update_stats(&cstats->load, weight);
}
@@ -273,6 +293,9 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return -1;
}
+ if (c2c.stitch_lbr)
+ al.thread->lbr_stitch_enable = true;
+
ret = sample__resolve_callchain(sample, &callchain_cursor, NULL,
evsel, &al, sysctl_perf_event_max_stack);
if (ret)
@@ -292,7 +315,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
c2c_decode_stats(&stats, mi);
he = hists__add_entry_ops(&c2c_hists->hists, &c2c_entry_ops,
- &al, NULL, NULL, mi,
+ &al, NULL, NULL, mi, NULL,
sample, true);
if (he == NULL)
goto free_mi;
@@ -326,7 +349,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
goto free_mi;
he = hists__add_entry_ops(&c2c_hists->hists, &c2c_entry_ops,
- &al, NULL, NULL, mi,
+ &al, NULL, NULL, mi, NULL,
sample, true);
if (he == NULL)
goto free_mi;
@@ -365,6 +388,10 @@ static struct perf_c2c c2c = {
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
+ .attr = perf_event__process_attr,
+ .auxtrace_info = perf_event__process_auxtrace_info,
+ .auxtrace = perf_event__process_auxtrace,
+ .auxtrace_error = perf_event__process_auxtrace_error,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
@@ -497,7 +524,7 @@ static int dcacheline_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
char buf[20];
if (he->mem_info)
- addr = cl_address(he->mem_info->daddr.addr);
+ addr = cl_address(he->mem_info->daddr.addr, chk_double_cl);
return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr));
}
@@ -535,7 +562,7 @@ static int offset_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
char buf[20];
if (he->mem_info)
- addr = cl_offset(he->mem_info->daddr.al_addr);
+ addr = cl_offset(he->mem_info->daddr.al_addr, chk_double_cl);
return scnprintf(hpp->buf, hpp->size, "%*s", width, HEX_STR(buf, addr));
}
@@ -547,9 +574,10 @@ offset_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
uint64_t l = 0, r = 0;
if (left->mem_info)
- l = cl_offset(left->mem_info->daddr.addr);
+ l = cl_offset(left->mem_info->daddr.addr, chk_double_cl);
+
if (right->mem_info)
- r = cl_offset(right->mem_info->daddr.addr);
+ r = cl_offset(right->mem_info->daddr.addr, chk_double_cl);
return (int64_t)(r - l);
}
@@ -639,75 +667,48 @@ __f ## _cmp(struct perf_hpp_fmt *fmt __maybe_unused, \
STAT_FN(rmt_hitm)
STAT_FN(lcl_hitm)
+STAT_FN(rmt_peer)
+STAT_FN(lcl_peer)
+STAT_FN(tot_peer)
STAT_FN(store)
STAT_FN(st_l1hit)
STAT_FN(st_l1miss)
+STAT_FN(st_na)
STAT_FN(ld_fbhit)
STAT_FN(ld_l1hit)
STAT_FN(ld_l2hit)
STAT_FN(ld_llchit)
STAT_FN(rmt_hit)
-static uint64_t llc_miss(struct c2c_stats *stats)
+static uint64_t get_load_llc_misses(struct c2c_stats *stats)
{
- uint64_t llcmiss;
-
- llcmiss = stats->lcl_dram +
- stats->rmt_dram +
- stats->rmt_hitm +
- stats->rmt_hit;
-
- return llcmiss;
+ return stats->lcl_dram +
+ stats->rmt_dram +
+ stats->rmt_hitm +
+ stats->rmt_hit;
}
-static int
-ld_llcmiss_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
- struct hist_entry *he)
+static uint64_t get_load_cache_hits(struct c2c_stats *stats)
{
- struct c2c_hist_entry *c2c_he;
- int width = c2c_width(fmt, hpp, he->hists);
-
- c2c_he = container_of(he, struct c2c_hist_entry, he);
-
- return scnprintf(hpp->buf, hpp->size, "%*lu", width,
- llc_miss(&c2c_he->stats));
+ return stats->ld_fbhit +
+ stats->ld_l1hit +
+ stats->ld_l2hit +
+ stats->ld_llchit +
+ stats->lcl_hitm;
}
-static int64_t
-ld_llcmiss_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
- struct hist_entry *left, struct hist_entry *right)
+static uint64_t get_stores(struct c2c_stats *stats)
{
- struct c2c_hist_entry *c2c_left;
- struct c2c_hist_entry *c2c_right;
-
- c2c_left = container_of(left, struct c2c_hist_entry, he);
- c2c_right = container_of(right, struct c2c_hist_entry, he);
-
- return (uint64_t) llc_miss(&c2c_left->stats) -
- (uint64_t) llc_miss(&c2c_right->stats);
+ return stats->st_l1hit +
+ stats->st_l1miss +
+ stats->st_na;
}
static uint64_t total_records(struct c2c_stats *stats)
{
- uint64_t lclmiss, ldcnt, total;
-
- lclmiss = stats->lcl_dram +
- stats->rmt_dram +
- stats->rmt_hitm +
- stats->rmt_hit;
-
- ldcnt = lclmiss +
- stats->ld_fbhit +
- stats->ld_l1hit +
- stats->ld_l2hit +
- stats->ld_llchit +
- stats->lcl_hitm;
-
- total = ldcnt +
- stats->st_l1hit +
- stats->st_l1miss;
-
- return total;
+ return get_load_llc_misses(stats) +
+ get_load_cache_hits(stats) +
+ get_stores(stats);
}
static int
@@ -744,21 +745,8 @@ tot_recs_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
static uint64_t total_loads(struct c2c_stats *stats)
{
- uint64_t lclmiss, ldcnt;
-
- lclmiss = stats->lcl_dram +
- stats->rmt_dram +
- stats->rmt_hitm +
- stats->rmt_hit;
-
- ldcnt = lclmiss +
- stats->ld_fbhit +
- stats->ld_l1hit +
- stats->ld_l2hit +
- stats->ld_llchit +
- stats->lcl_hitm;
-
- return ldcnt;
+ return get_load_llc_misses(stats) +
+ get_load_cache_hits(stats);
}
static int
@@ -813,7 +801,7 @@ percent_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
return hpp_color_scnprintf(hpp, "%*.2f%%", width - 1, per);
}
-static double percent_hitm(struct c2c_hist_entry *c2c_he)
+static double percent_costly_snoop(struct c2c_hist_entry *c2c_he)
{
struct c2c_hists *hists;
struct c2c_stats *stats;
@@ -826,17 +814,22 @@ static double percent_hitm(struct c2c_hist_entry *c2c_he)
total = &hists->stats;
switch (c2c.display) {
- case DISPLAY_RMT:
+ case DISPLAY_RMT_HITM:
st = stats->rmt_hitm;
tot = total->rmt_hitm;
break;
- case DISPLAY_LCL:
+ case DISPLAY_LCL_HITM:
st = stats->lcl_hitm;
tot = total->lcl_hitm;
break;
- case DISPLAY_TOT:
+ case DISPLAY_TOT_HITM:
st = stats->tot_hitm;
tot = total->tot_hitm;
+ break;
+ case DISPLAY_SNP_PEER:
+ st = stats->tot_peer;
+ tot = total->tot_peer;
+ break;
default:
break;
}
@@ -853,8 +846,8 @@ static double percent_hitm(struct c2c_hist_entry *c2c_he)
})
static int
-percent_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
- struct hist_entry *he)
+percent_costly_snoop_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
int width = c2c_width(fmt, hpp, he->hists);
@@ -862,20 +855,20 @@ percent_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
double per;
c2c_he = container_of(he, struct c2c_hist_entry, he);
- per = percent_hitm(c2c_he);
+ per = percent_costly_snoop(c2c_he);
return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
}
static int
-percent_hitm_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
- struct hist_entry *he)
+percent_costly_snoop_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
{
- return percent_color(fmt, hpp, he, percent_hitm);
+ return percent_color(fmt, hpp, he, percent_costly_snoop);
}
static int64_t
-percent_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
- struct hist_entry *left, struct hist_entry *right)
+percent_costly_snoop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
{
struct c2c_hist_entry *c2c_left;
struct c2c_hist_entry *c2c_right;
@@ -885,8 +878,8 @@ percent_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
c2c_left = container_of(left, struct c2c_hist_entry, he);
c2c_right = container_of(right, struct c2c_hist_entry, he);
- per_left = percent_hitm(c2c_left);
- per_right = percent_hitm(c2c_right);
+ per_left = percent_costly_snoop(c2c_left);
+ per_right = percent_costly_snoop(c2c_right);
return per_left - per_right;
}
@@ -907,7 +900,7 @@ static struct c2c_stats *total_stats(struct hist_entry *he)
return &hists->stats;
}
-static double percent(int st, int tot)
+static double percent(u32 st, u32 tot)
{
return tot ? 100. * (double) st / (double) tot : 0;
}
@@ -925,8 +918,11 @@ static double percent_ ## __f(struct c2c_hist_entry *c2c_he) \
PERCENT_FN(rmt_hitm)
PERCENT_FN(lcl_hitm)
+PERCENT_FN(rmt_peer)
+PERCENT_FN(lcl_peer)
PERCENT_FN(st_l1hit)
PERCENT_FN(st_l1miss)
+PERCENT_FN(st_na)
static int
percent_rmt_hitm_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
@@ -953,8 +949,8 @@ percent_rmt_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
double per_left;
double per_right;
- per_left = PERCENT(left, lcl_hitm);
- per_right = PERCENT(right, lcl_hitm);
+ per_left = PERCENT(left, rmt_hitm);
+ per_right = PERCENT(right, rmt_hitm);
return per_left - per_right;
}
@@ -991,6 +987,68 @@ percent_lcl_hitm_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
}
static int
+percent_lcl_peer_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, lcl_peer);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_lcl_peer_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_lcl_peer);
+}
+
+static int64_t
+percent_lcl_peer_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, lcl_peer);
+ per_right = PERCENT(right, lcl_peer);
+
+ return per_left - per_right;
+}
+
+static int
+percent_rmt_peer_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, rmt_peer);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_rmt_peer_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_rmt_peer);
+}
+
+static int64_t
+percent_rmt_peer_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, rmt_peer);
+ per_right = PERCENT(right, rmt_peer);
+
+ return per_left - per_right;
+}
+
+static int
percent_stores_l1hit_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he)
{
@@ -1052,6 +1110,37 @@ percent_stores_l1miss_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
return per_left - per_right;
}
+static int
+percent_stores_na_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = c2c_width(fmt, hpp, he->hists);
+ double per = PERCENT(he, st_na);
+ char buf[10];
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int
+percent_stores_na_color(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ return percent_color(fmt, hpp, he, percent_st_na);
+}
+
+static int64_t
+percent_stores_na_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = PERCENT(left, st_na);
+ per_right = PERCENT(right, st_na);
+
+ return per_left - per_right;
+}
+
STAT_FN(lcl_dram)
STAT_FN(rmt_dram)
@@ -1079,6 +1168,19 @@ empty_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
return 0;
}
+static int display_metrics(struct perf_hpp *hpp, u32 val, u32 sum)
+{
+ int ret;
+
+ if (sum != 0)
+ ret = scnprintf(hpp->buf, hpp->size, "%5.1f%% ",
+ percent(val, sum));
+ else
+ ret = scnprintf(hpp->buf, hpp->size, "%6s ", "n/a");
+
+ return ret;
+}
+
static int
node_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
struct hist_entry *he)
@@ -1096,7 +1198,7 @@ node_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
bitmap_zero(set, c2c.cpus_cnt);
bitmap_and(set, c2c_he->cpuset, c2c.nodes[node], c2c.cpus_cnt);
- if (!bitmap_weight(set, c2c.cpus_cnt)) {
+ if (bitmap_empty(set, c2c.cpus_cnt)) {
if (c2c.node_info == 1) {
ret = scnprintf(hpp->buf, hpp->size, "%21s", " ");
advance_hpp(hpp, ret);
@@ -1122,29 +1224,27 @@ node_entry(struct perf_hpp_fmt *fmt __maybe_unused, struct perf_hpp *hpp,
ret = scnprintf(hpp->buf, hpp->size, "%2d{%2d ", node, num);
advance_hpp(hpp, ret);
- #define DISPLAY_HITM(__h) \
- if (c2c_he->stats.__h> 0) { \
- ret = scnprintf(hpp->buf, hpp->size, "%5.1f%% ", \
- percent(stats->__h, c2c_he->stats.__h));\
- } else { \
- ret = scnprintf(hpp->buf, hpp->size, "%6s ", "n/a"); \
- }
-
switch (c2c.display) {
- case DISPLAY_RMT:
- DISPLAY_HITM(rmt_hitm);
+ case DISPLAY_RMT_HITM:
+ ret = display_metrics(hpp, stats->rmt_hitm,
+ c2c_he->stats.rmt_hitm);
+ break;
+ case DISPLAY_LCL_HITM:
+ ret = display_metrics(hpp, stats->lcl_hitm,
+ c2c_he->stats.lcl_hitm);
break;
- case DISPLAY_LCL:
- DISPLAY_HITM(lcl_hitm);
+ case DISPLAY_TOT_HITM:
+ ret = display_metrics(hpp, stats->tot_hitm,
+ c2c_he->stats.tot_hitm);
+ break;
+ case DISPLAY_SNP_PEER:
+ ret = display_metrics(hpp, stats->tot_peer,
+ c2c_he->stats.tot_peer);
break;
- case DISPLAY_TOT:
- DISPLAY_HITM(tot_hitm);
default:
break;
}
- #undef DISPLAY_HITM
-
advance_hpp(hpp, ret);
if (c2c_he->stats.store > 0) {
@@ -1200,6 +1300,8 @@ __func(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct hist_entry *he) \
MEAN_ENTRY(mean_rmt_entry, rmt_hitm);
MEAN_ENTRY(mean_lcl_entry, lcl_hitm);
MEAN_ENTRY(mean_load_entry, load);
+MEAN_ENTRY(mean_rmt_peer_entry, rmt_peer);
+MEAN_ENTRY(mean_lcl_peer_entry, lcl_peer);
static int
cpucnt_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
@@ -1324,7 +1426,7 @@ static struct c2c_dimension dim_iaddr = {
};
static struct c2c_dimension dim_tot_hitm = {
- .header = HEADER_SPAN("----- LLC Load Hitm -----", "Total", 2),
+ .header = HEADER_SPAN("------- Load Hitm -------", "Total", 2),
.name = "tot_hitm",
.cmp = tot_hitm_cmp,
.entry = tot_hitm_entry,
@@ -1332,7 +1434,7 @@ static struct c2c_dimension dim_tot_hitm = {
};
static struct c2c_dimension dim_lcl_hitm = {
- .header = HEADER_SPAN_LOW("Lcl"),
+ .header = HEADER_SPAN_LOW("LclHitm"),
.name = "lcl_hitm",
.cmp = lcl_hitm_cmp,
.entry = lcl_hitm_entry,
@@ -1340,13 +1442,37 @@ static struct c2c_dimension dim_lcl_hitm = {
};
static struct c2c_dimension dim_rmt_hitm = {
- .header = HEADER_SPAN_LOW("Rmt"),
+ .header = HEADER_SPAN_LOW("RmtHitm"),
.name = "rmt_hitm",
.cmp = rmt_hitm_cmp,
.entry = rmt_hitm_entry,
.width = 7,
};
+static struct c2c_dimension dim_tot_peer = {
+ .header = HEADER_SPAN("------- Load Peer -------", "Total", 2),
+ .name = "tot_peer",
+ .cmp = tot_peer_cmp,
+ .entry = tot_peer_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_lcl_peer = {
+ .header = HEADER_SPAN_LOW("Local"),
+ .name = "lcl_peer",
+ .cmp = lcl_peer_cmp,
+ .entry = lcl_peer_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_rmt_peer = {
+ .header = HEADER_SPAN_LOW("Remote"),
+ .name = "rmt_peer",
+ .cmp = rmt_peer_cmp,
+ .entry = rmt_peer_entry,
+ .width = 7,
+};
+
static struct c2c_dimension dim_cl_rmt_hitm = {
.header = HEADER_SPAN("----- HITM -----", "Rmt", 1),
.name = "cl_rmt_hitm",
@@ -1363,16 +1489,32 @@ static struct c2c_dimension dim_cl_lcl_hitm = {
.width = 7,
};
-static struct c2c_dimension dim_stores = {
- .header = HEADER_SPAN("---- Store Reference ----", "Total", 2),
- .name = "stores",
+static struct c2c_dimension dim_cl_rmt_peer = {
+ .header = HEADER_SPAN("----- Peer -----", "Rmt", 1),
+ .name = "cl_rmt_peer",
+ .cmp = rmt_peer_cmp,
+ .entry = rmt_peer_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_cl_lcl_peer = {
+ .header = HEADER_SPAN_LOW("Lcl"),
+ .name = "cl_lcl_peer",
+ .cmp = lcl_peer_cmp,
+ .entry = lcl_peer_entry,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_tot_stores = {
+ .header = HEADER_BOTH("Total", "Stores"),
+ .name = "tot_stores",
.cmp = store_cmp,
.entry = store_entry,
.width = 7,
};
static struct c2c_dimension dim_stores_l1hit = {
- .header = HEADER_SPAN_LOW("L1Hit"),
+ .header = HEADER_SPAN("--------- Stores --------", "L1Hit", 2),
.name = "stores_l1hit",
.cmp = st_l1hit_cmp,
.entry = st_l1hit_entry,
@@ -1387,8 +1529,16 @@ static struct c2c_dimension dim_stores_l1miss = {
.width = 7,
};
+static struct c2c_dimension dim_stores_na = {
+ .header = HEADER_SPAN_LOW("N/A"),
+ .name = "stores_na",
+ .cmp = st_na_cmp,
+ .entry = st_na_entry,
+ .width = 7,
+};
+
static struct c2c_dimension dim_cl_stores_l1hit = {
- .header = HEADER_SPAN("-- Store Refs --", "L1 Hit", 1),
+ .header = HEADER_SPAN("------- Store Refs ------", "L1 Hit", 2),
.name = "cl_stores_l1hit",
.cmp = st_l1hit_cmp,
.entry = st_l1hit_entry,
@@ -1403,6 +1553,14 @@ static struct c2c_dimension dim_cl_stores_l1miss = {
.width = 7,
};
+static struct c2c_dimension dim_cl_stores_na = {
+ .header = HEADER_SPAN_LOW("N/A"),
+ .name = "cl_stores_na",
+ .cmp = st_na_cmp,
+ .entry = st_na_entry,
+ .width = 7,
+};
+
static struct c2c_dimension dim_ld_fbhit = {
.header = HEADER_SPAN("----- Core Load Hit -----", "FB", 2),
.name = "ld_fbhit",
@@ -1428,7 +1586,7 @@ static struct c2c_dimension dim_ld_l2hit = {
};
static struct c2c_dimension dim_ld_llchit = {
- .header = HEADER_SPAN("-- LLC Load Hit --", "Llc", 1),
+ .header = HEADER_SPAN("- LLC Load Hit --", "LclHit", 1),
.name = "ld_lclhit",
.cmp = ld_llchit_cmp,
.entry = ld_llchit_entry,
@@ -1436,21 +1594,13 @@ static struct c2c_dimension dim_ld_llchit = {
};
static struct c2c_dimension dim_ld_rmthit = {
- .header = HEADER_SPAN_LOW("Rmt"),
+ .header = HEADER_SPAN("- RMT Load Hit --", "RmtHit", 1),
.name = "ld_rmthit",
.cmp = rmt_hit_cmp,
.entry = rmt_hit_entry,
.width = 8,
};
-static struct c2c_dimension dim_ld_llcmiss = {
- .header = HEADER_BOTH("LLC", "Ld Miss"),
- .name = "ld_llcmiss",
- .cmp = ld_llcmiss_cmp,
- .entry = ld_llcmiss_entry,
- .width = 7,
-};
-
static struct c2c_dimension dim_tot_recs = {
.header = HEADER_BOTH("Total", "records"),
.name = "tot_recs",
@@ -1467,22 +1617,23 @@ static struct c2c_dimension dim_tot_loads = {
.width = 7,
};
-static struct c2c_header percent_hitm_header[] = {
- [DISPLAY_LCL] = HEADER_BOTH("Lcl", "Hitm"),
- [DISPLAY_RMT] = HEADER_BOTH("Rmt", "Hitm"),
- [DISPLAY_TOT] = HEADER_BOTH("Tot", "Hitm"),
+static struct c2c_header percent_costly_snoop_header[] = {
+ [DISPLAY_LCL_HITM] = HEADER_BOTH("Lcl", "Hitm"),
+ [DISPLAY_RMT_HITM] = HEADER_BOTH("Rmt", "Hitm"),
+ [DISPLAY_TOT_HITM] = HEADER_BOTH("Tot", "Hitm"),
+ [DISPLAY_SNP_PEER] = HEADER_BOTH("Peer", "Snoop"),
};
-static struct c2c_dimension dim_percent_hitm = {
- .name = "percent_hitm",
- .cmp = percent_hitm_cmp,
- .entry = percent_hitm_entry,
- .color = percent_hitm_color,
+static struct c2c_dimension dim_percent_costly_snoop = {
+ .name = "percent_costly_snoop",
+ .cmp = percent_costly_snoop_cmp,
+ .entry = percent_costly_snoop_entry,
+ .color = percent_costly_snoop_color,
.width = 7,
};
static struct c2c_dimension dim_percent_rmt_hitm = {
- .header = HEADER_SPAN("----- HITM -----", "Rmt", 1),
+ .header = HEADER_SPAN("----- HITM -----", "RmtHitm", 1),
.name = "percent_rmt_hitm",
.cmp = percent_rmt_hitm_cmp,
.entry = percent_rmt_hitm_entry,
@@ -1491,7 +1642,7 @@ static struct c2c_dimension dim_percent_rmt_hitm = {
};
static struct c2c_dimension dim_percent_lcl_hitm = {
- .header = HEADER_SPAN_LOW("Lcl"),
+ .header = HEADER_SPAN_LOW("LclHitm"),
.name = "percent_lcl_hitm",
.cmp = percent_lcl_hitm_cmp,
.entry = percent_lcl_hitm_entry,
@@ -1499,8 +1650,26 @@ static struct c2c_dimension dim_percent_lcl_hitm = {
.width = 7,
};
+static struct c2c_dimension dim_percent_rmt_peer = {
+ .header = HEADER_SPAN("-- Peer Snoop --", "Rmt", 1),
+ .name = "percent_rmt_peer",
+ .cmp = percent_rmt_peer_cmp,
+ .entry = percent_rmt_peer_entry,
+ .color = percent_rmt_peer_color,
+ .width = 7,
+};
+
+static struct c2c_dimension dim_percent_lcl_peer = {
+ .header = HEADER_SPAN_LOW("Lcl"),
+ .name = "percent_lcl_peer",
+ .cmp = percent_lcl_peer_cmp,
+ .entry = percent_lcl_peer_entry,
+ .color = percent_lcl_peer_color,
+ .width = 7,
+};
+
static struct c2c_dimension dim_percent_stores_l1hit = {
- .header = HEADER_SPAN("-- Store Refs --", "L1 Hit", 1),
+ .header = HEADER_SPAN("------- Store Refs ------", "L1 Hit", 2),
.name = "percent_stores_l1hit",
.cmp = percent_stores_l1hit_cmp,
.entry = percent_stores_l1hit_entry,
@@ -1517,6 +1686,15 @@ static struct c2c_dimension dim_percent_stores_l1miss = {
.width = 7,
};
+static struct c2c_dimension dim_percent_stores_na = {
+ .header = HEADER_SPAN_LOW("N/A"),
+ .name = "percent_stores_na",
+ .cmp = percent_stores_na_cmp,
+ .entry = percent_stores_na_entry,
+ .color = percent_stores_na_color,
+ .width = 7,
+};
+
static struct c2c_dimension dim_dram_lcl = {
.header = HEADER_SPAN("--- Load Dram ----", "Lcl", 1),
.name = "dram_lcl",
@@ -1558,12 +1736,6 @@ static struct c2c_dimension dim_dso = {
.se = &sort_dso,
};
-static struct c2c_header header_node[3] = {
- HEADER_LOW("Node"),
- HEADER_LOW("Node{cpus %hitms %stores}"),
- HEADER_LOW("Node{cpu list}"),
-};
-
static struct c2c_dimension dim_node = {
.name = "node",
.cmp = empty_cmp,
@@ -1595,6 +1767,22 @@ static struct c2c_dimension dim_mean_load = {
.width = 8,
};
+static struct c2c_dimension dim_mean_rmt_peer = {
+ .header = HEADER_SPAN("---------- cycles ----------", "rmt peer", 2),
+ .name = "mean_rmt_peer",
+ .cmp = empty_cmp,
+ .entry = mean_rmt_peer_entry,
+ .width = 8,
+};
+
+static struct c2c_dimension dim_mean_lcl_peer = {
+ .header = HEADER_SPAN_LOW("lcl peer"),
+ .name = "mean_lcl_peer",
+ .cmp = empty_cmp,
+ .entry = mean_lcl_peer_entry,
+ .width = 8,
+};
+
static struct c2c_dimension dim_cpucnt = {
.header = HEADER_BOTH("cpu", "cnt"),
.name = "cpucnt",
@@ -1642,26 +1830,35 @@ static struct c2c_dimension *dimensions[] = {
&dim_tot_hitm,
&dim_lcl_hitm,
&dim_rmt_hitm,
+ &dim_tot_peer,
+ &dim_lcl_peer,
+ &dim_rmt_peer,
&dim_cl_lcl_hitm,
&dim_cl_rmt_hitm,
- &dim_stores,
+ &dim_cl_lcl_peer,
+ &dim_cl_rmt_peer,
+ &dim_tot_stores,
&dim_stores_l1hit,
&dim_stores_l1miss,
+ &dim_stores_na,
&dim_cl_stores_l1hit,
&dim_cl_stores_l1miss,
+ &dim_cl_stores_na,
&dim_ld_fbhit,
&dim_ld_l1hit,
&dim_ld_l2hit,
&dim_ld_llchit,
&dim_ld_rmthit,
- &dim_ld_llcmiss,
&dim_tot_recs,
&dim_tot_loads,
- &dim_percent_hitm,
+ &dim_percent_costly_snoop,
&dim_percent_rmt_hitm,
&dim_percent_lcl_hitm,
+ &dim_percent_rmt_peer,
+ &dim_percent_lcl_peer,
&dim_percent_stores_l1hit,
&dim_percent_stores_l1miss,
+ &dim_percent_stores_na,
&dim_dram_lcl,
&dim_dram_rmt,
&dim_pid,
@@ -1671,6 +1868,8 @@ static struct c2c_dimension *dimensions[] = {
&dim_node,
&dim_mean_rmt,
&dim_mean_lcl,
+ &dim_mean_rmt_peer,
+ &dim_mean_lcl_peer,
&dim_mean_load,
&dim_cpucnt,
&dim_srcline,
@@ -1705,7 +1904,7 @@ static struct c2c_dimension *get_dimension(const char *name)
if (!strcmp(dim->name, name))
return dim;
- };
+ }
return NULL;
}
@@ -1891,53 +2090,75 @@ static int c2c_hists__reinit(struct c2c_hists *c2c_hists,
#define DISPLAY_LINE_LIMIT 0.001
+static u8 filter_display(u32 val, u32 sum)
+{
+ if (sum == 0 || ((double)val / sum) < DISPLAY_LINE_LIMIT)
+ return HIST_FILTER__C2C;
+
+ return 0;
+}
+
static bool he__display(struct hist_entry *he, struct c2c_stats *stats)
{
struct c2c_hist_entry *c2c_he;
- double ld_dist;
if (c2c.show_all)
return true;
c2c_he = container_of(he, struct c2c_hist_entry, he);
-#define FILTER_HITM(__h) \
- if (stats->__h) { \
- ld_dist = ((double)c2c_he->stats.__h / stats->__h); \
- if (ld_dist < DISPLAY_LINE_LIMIT) \
- he->filtered = HIST_FILTER__C2C; \
- } else { \
- he->filtered = HIST_FILTER__C2C; \
- }
-
switch (c2c.display) {
- case DISPLAY_LCL:
- FILTER_HITM(lcl_hitm);
+ case DISPLAY_LCL_HITM:
+ he->filtered = filter_display(c2c_he->stats.lcl_hitm,
+ stats->lcl_hitm);
+ break;
+ case DISPLAY_RMT_HITM:
+ he->filtered = filter_display(c2c_he->stats.rmt_hitm,
+ stats->rmt_hitm);
break;
- case DISPLAY_RMT:
- FILTER_HITM(rmt_hitm);
+ case DISPLAY_TOT_HITM:
+ he->filtered = filter_display(c2c_he->stats.tot_hitm,
+ stats->tot_hitm);
+ break;
+ case DISPLAY_SNP_PEER:
+ he->filtered = filter_display(c2c_he->stats.tot_peer,
+ stats->tot_peer);
break;
- case DISPLAY_TOT:
- FILTER_HITM(tot_hitm);
default:
break;
- };
-
-#undef FILTER_HITM
+ }
return he->filtered == 0;
}
-static inline int valid_hitm_or_store(struct hist_entry *he)
+static inline bool is_valid_hist_entry(struct hist_entry *he)
{
struct c2c_hist_entry *c2c_he;
- bool has_hitm;
+ bool has_record = false;
c2c_he = container_of(he, struct c2c_hist_entry, he);
- has_hitm = c2c.display == DISPLAY_TOT ? c2c_he->stats.tot_hitm :
- c2c.display == DISPLAY_LCL ? c2c_he->stats.lcl_hitm :
- c2c_he->stats.rmt_hitm;
- return has_hitm || c2c_he->stats.store;
+
+ /* It's a valid entry if contains stores */
+ if (c2c_he->stats.store)
+ return true;
+
+ switch (c2c.display) {
+ case DISPLAY_LCL_HITM:
+ has_record = !!c2c_he->stats.lcl_hitm;
+ break;
+ case DISPLAY_RMT_HITM:
+ has_record = !!c2c_he->stats.rmt_hitm;
+ break;
+ case DISPLAY_TOT_HITM:
+ has_record = !!c2c_he->stats.tot_hitm;
+ break;
+ case DISPLAY_SNP_PEER:
+ has_record = !!c2c_he->stats.tot_peer;
+ default:
+ break;
+ }
+
+ return has_record;
}
static void set_node_width(struct c2c_hist_entry *c2c_he, int len)
@@ -1959,7 +2180,7 @@ static int set_nodestr(struct c2c_hist_entry *c2c_he)
if (c2c_he->nodestr)
return 0;
- if (bitmap_weight(c2c_he->nodeset, c2c.nodes_cnt)) {
+ if (!bitmap_empty(c2c_he->nodeset, c2c.nodes_cnt)) {
len = bitmap_scnprintf(c2c_he->nodeset, c2c.nodes_cnt,
buf, sizeof(buf));
} else {
@@ -1991,7 +2212,7 @@ static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
calc_width(c2c_he);
- if (!valid_hitm_or_store(he))
+ if (!is_valid_hist_entry(he))
he->filtered = HIST_FILTER__C2C;
return 0;
@@ -2001,7 +2222,7 @@ static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
struct c2c_hists *c2c_hists;
- bool display = he__display(he, &c2c.hitm_stats);
+ bool display = he__display(he, &c2c.shared_clines_stats);
c2c_he = container_of(he, struct c2c_hist_entry, he);
c2c_hists = c2c_he->hists;
@@ -2021,16 +2242,41 @@ static int resort_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
return 0;
}
+static struct c2c_header header_node_0 = HEADER_LOW("Node");
+static struct c2c_header header_node_1_hitms_stores =
+ HEADER_LOW("Node{cpus %hitms %stores}");
+static struct c2c_header header_node_1_peers_stores =
+ HEADER_LOW("Node{cpus %peers %stores}");
+static struct c2c_header header_node_2 = HEADER_LOW("Node{cpu list}");
+
static void setup_nodes_header(void)
{
- dim_node.header = header_node[c2c.node_info];
+ switch (c2c.node_info) {
+ case 0:
+ dim_node.header = header_node_0;
+ break;
+ case 1:
+ if (c2c.display == DISPLAY_SNP_PEER)
+ dim_node.header = header_node_1_peers_stores;
+ else
+ dim_node.header = header_node_1_hitms_stores;
+ break;
+ case 2:
+ dim_node.header = header_node_2;
+ break;
+ default:
+ break;
+ }
+
+ return;
}
static int setup_nodes(struct perf_session *session)
{
struct numa_node *n;
unsigned long **nodes;
- int node, cpu;
+ int node, idx;
+ struct perf_cpu cpu;
int *cpu2node;
if (c2c.node_info > 2)
@@ -2053,8 +2299,8 @@ static int setup_nodes(struct perf_session *session)
if (!cpu2node)
return -ENOMEM;
- for (cpu = 0; cpu < c2c.cpus_cnt; cpu++)
- cpu2node[cpu] = -1;
+ for (idx = 0; idx < c2c.cpus_cnt; idx++)
+ cpu2node[idx] = -1;
c2c.cpu2node = cpu2node;
@@ -2062,7 +2308,7 @@ static int setup_nodes(struct perf_session *session)
struct perf_cpu_map *map = n[node].map;
unsigned long *set;
- set = bitmap_alloc(c2c.cpus_cnt);
+ set = bitmap_zalloc(c2c.cpus_cnt);
if (!set)
return -ENOMEM;
@@ -2072,13 +2318,13 @@ static int setup_nodes(struct perf_session *session)
if (perf_cpu_map__empty(map))
continue;
- for (cpu = 0; cpu < map->nr; cpu++) {
- set_bit(map->map[cpu], set);
+ perf_cpu_map__for_each_cpu(cpu, idx, map) {
+ __set_bit(cpu.cpu, set);
- if (WARN_ONCE(cpu2node[map->map[cpu]] != -1, "node/cpu topology bug"))
+ if (WARN_ONCE(cpu2node[cpu.cpu] != -1, "node/cpu topology bug"))
return -EINVAL;
- cpu2node[map->map[cpu]] = node;
+ cpu2node[cpu.cpu] = node;
}
}
@@ -2087,15 +2333,16 @@ static int setup_nodes(struct perf_session *session)
}
#define HAS_HITMS(__h) ((__h)->stats.lcl_hitm || (__h)->stats.rmt_hitm)
+#define HAS_PEER(__h) ((__h)->stats.lcl_peer || (__h)->stats.rmt_peer)
-static int resort_hitm_cb(struct hist_entry *he, void *arg __maybe_unused)
+static int resort_shared_cl_cb(struct hist_entry *he, void *arg __maybe_unused)
{
struct c2c_hist_entry *c2c_he;
c2c_he = container_of(he, struct c2c_hist_entry, he);
- if (HAS_HITMS(c2c_he)) {
+ if (HAS_HITMS(c2c_he) || HAS_PEER(c2c_he)) {
c2c.shared_clines++;
- c2c_add_stats(&c2c.hitm_stats, &c2c_he->stats);
+ c2c_add_stats(&c2c.shared_clines_stats, &c2c_he->stats);
}
return 0;
@@ -2124,10 +2371,7 @@ static void print_c2c__display_stats(FILE *out)
int llc_misses;
struct c2c_stats *stats = &c2c.hists.stats;
- llc_misses = stats->lcl_dram +
- stats->rmt_dram +
- stats->rmt_hit +
- stats->rmt_hitm;
+ llc_misses = get_load_llc_misses(stats);
fprintf(out, "=================================================\n");
fprintf(out, " Trace Event Information \n");
@@ -2151,6 +2395,10 @@ static void print_c2c__display_stats(FILE *out)
fprintf(out, " Load MESI State Exclusive : %10d\n", stats->ld_excl);
fprintf(out, " Load MESI State Shared : %10d\n", stats->ld_shared);
fprintf(out, " Load LLC Misses : %10d\n", llc_misses);
+ fprintf(out, " Load access blocked by data : %10d\n", stats->blk_data);
+ fprintf(out, " Load access blocked by address : %10d\n", stats->blk_addr);
+ fprintf(out, " Load HIT Local Peer : %10d\n", stats->lcl_peer);
+ fprintf(out, " Load HIT Remote Peer : %10d\n", stats->rmt_peer);
fprintf(out, " LLC Misses to Local DRAM : %10.1f%%\n", ((double)stats->lcl_dram/(double)llc_misses) * 100.);
fprintf(out, " LLC Misses to Remote DRAM : %10.1f%%\n", ((double)stats->rmt_dram/(double)llc_misses) * 100.);
fprintf(out, " LLC Misses to Remote cache (HIT) : %10.1f%%\n", ((double)stats->rmt_hit /(double)llc_misses) * 100.);
@@ -2160,13 +2408,14 @@ static void print_c2c__display_stats(FILE *out)
fprintf(out, " Store - no mapping : %10d\n", stats->st_noadrs);
fprintf(out, " Store L1D Hit : %10d\n", stats->st_l1hit);
fprintf(out, " Store L1D Miss : %10d\n", stats->st_l1miss);
+ fprintf(out, " Store No available memory level : %10d\n", stats->st_na);
fprintf(out, " No Page Map Rejects : %10d\n", stats->nomap);
fprintf(out, " Unable to parse data source : %10d\n", stats->noparse);
}
static void print_shared_cacheline_info(FILE *out)
{
- struct c2c_stats *stats = &c2c.hitm_stats;
+ struct c2c_stats *stats = &c2c.shared_clines_stats;
int hitm_cnt = stats->lcl_hitm + stats->rmt_hitm;
fprintf(out, "=================================================\n");
@@ -2178,9 +2427,12 @@ static void print_shared_cacheline_info(FILE *out)
fprintf(out, " L1D hits on shared lines : %10d\n", stats->ld_l1hit);
fprintf(out, " L2D hits on shared lines : %10d\n", stats->ld_l2hit);
fprintf(out, " LLC hits on shared lines : %10d\n", stats->ld_llchit + stats->lcl_hitm);
+ fprintf(out, " Load hits on peer cache or nodes : %10d\n", stats->lcl_peer + stats->rmt_peer);
fprintf(out, " Locked Access on shared lines : %10d\n", stats->locks);
+ fprintf(out, " Blocked Access on shared lines : %10d\n", stats->blk_data + stats->blk_addr);
fprintf(out, " Store HITs on shared lines : %10d\n", stats->store);
fprintf(out, " Store L1D hits on shared lines : %10d\n", stats->st_l1hit);
+ fprintf(out, " Store No available memory level : %10d\n", stats->st_na);
fprintf(out, " Total Merged records : %10d\n", hitm_cnt + stats->store);
}
@@ -2203,10 +2455,10 @@ static void print_cacheline(struct c2c_hists *c2c_hists,
fprintf(out, "\n");
}
- fprintf(out, " -------------------------------------------------------------\n");
+ fprintf(out, " ----------------------------------------------------------------------\n");
__hist_entry__snprintf(he_cl, &hpp, hpp_list);
fprintf(out, "%s\n", bf);
- fprintf(out, " -------------------------------------------------------------\n");
+ fprintf(out, " ----------------------------------------------------------------------\n");
hists__fprintf(&c2c_hists->hists, false, 0, 0, 0, out, false);
}
@@ -2216,16 +2468,27 @@ static void print_pareto(FILE *out)
struct perf_hpp_list hpp_list;
struct rb_node *nd;
int ret;
+ const char *cl_output;
+
+ if (c2c.display != DISPLAY_SNP_PEER)
+ cl_output = "cl_num,"
+ "cl_rmt_hitm,"
+ "cl_lcl_hitm,"
+ "cl_stores_l1hit,"
+ "cl_stores_l1miss,"
+ "cl_stores_na,"
+ "dcacheline";
+ else
+ cl_output = "cl_num,"
+ "cl_rmt_peer,"
+ "cl_lcl_peer,"
+ "cl_stores_l1hit,"
+ "cl_stores_l1miss,"
+ "cl_stores_na,"
+ "dcacheline";
perf_hpp_list__init(&hpp_list);
- ret = hpp_list__parse(&hpp_list,
- "cl_num,"
- "cl_rmt_hitm,"
- "cl_lcl_hitm,"
- "cl_stores_l1hit,"
- "cl_stores_l1miss,"
- "dcacheline",
- NULL);
+ ret = hpp_list__parse(&hpp_list, cl_output, NULL);
if (WARN_ONCE(ret, "failed to setup sort entries\n"))
return;
@@ -2255,11 +2518,10 @@ static void print_c2c_info(FILE *out, struct perf_session *session)
fprintf(out, "=================================================\n");
evlist__for_each_entry(evlist, evsel) {
- fprintf(out, "%-36s: %s\n", first ? " Events" : "",
- perf_evsel__name(evsel));
+ fprintf(out, "%-36s: %s\n", first ? " Events" : "", evsel__name(evsel));
first = false;
}
- fprintf(out, " Cachelines sort on : %s HITMs\n",
+ fprintf(out, " Cachelines sort on : %s\n",
display_str[c2c.display]);
fprintf(out, " Cacheline data grouping : %s\n", c2c.cl_sort);
}
@@ -2329,7 +2591,7 @@ perf_c2c_cacheline_browser__title(struct hist_browser *browser,
he = cl_browser->he;
if (he->mem_info)
- addr = cl_address(he->mem_info->daddr.addr);
+ addr = cl_address(he->mem_info->daddr.addr, chk_double_cl);
scnprintf(bf, size, "Cacheline 0x%lx", addr);
return 0;
@@ -2416,7 +2678,7 @@ static int perf_c2c_browser__title(struct hist_browser *browser,
{
scnprintf(bf, size,
"Shared Data Cache Line Table "
- "(%lu entries, sorted on %s HITMs)",
+ "(%lu entries, sorted on %s)",
browser->nr_non_filtered_entries,
display_str[c2c.display]);
return 0;
@@ -2527,15 +2789,16 @@ static int ui_quirks(void)
if (!c2c.use_stdio) {
dim_offset.width = 5;
dim_offset.header = header_offset_tui;
- nodestr = "CL";
+ nodestr = chk_double_cl ? "Double-CL" : "CL";
}
- dim_percent_hitm.header = percent_hitm_header[c2c.display];
+ dim_percent_costly_snoop.header = percent_costly_snoop_header[c2c.display];
/* Fix the zero line for dcacheline column. */
- buf = fill_line("Cacheline", dim_dcacheline.width +
- dim_dcacheline_node.width +
- dim_dcacheline_count.width + 4);
+ buf = fill_line(chk_double_cl ? "Double-Cacheline" : "Cacheline",
+ dim_dcacheline.width +
+ dim_dcacheline_node.width +
+ dim_dcacheline_count.width + 4);
if (!buf)
return -ENOMEM;
@@ -2579,7 +2842,7 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
static int setup_callchain(struct evlist *evlist)
{
- u64 sample_type = perf_evlist__combined_sample_type(evlist);
+ u64 sample_type = evlist__combined_sample_type(evlist);
enum perf_call_graph_mode mode = CALLCHAIN_NONE;
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
@@ -2601,6 +2864,12 @@ static int setup_callchain(struct evlist *evlist)
}
}
+ if (c2c.stitch_lbr && (mode != CALLCHAIN_LBR)) {
+ ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
+ "Please apply --call-graph lbr when recording.\n");
+ c2c.stitch_lbr = false;
+ }
+
callchain_param.record_mode = mode;
callchain_param.min_percent = 0;
return 0;
@@ -2608,14 +2877,16 @@ static int setup_callchain(struct evlist *evlist)
static int setup_display(const char *str)
{
- const char *display = str ?: "tot";
+ const char *display = str;
if (!strcmp(display, "tot"))
- c2c.display = DISPLAY_TOT;
+ c2c.display = DISPLAY_TOT_HITM;
else if (!strcmp(display, "rmt"))
- c2c.display = DISPLAY_RMT;
+ c2c.display = DISPLAY_RMT_HITM;
else if (!strcmp(display, "lcl"))
- c2c.display = DISPLAY_LCL;
+ c2c.display = DISPLAY_LCL_HITM;
+ else if (!strcmp(display, "peer"))
+ c2c.display = DISPLAY_SNP_PEER;
else {
pr_err("failed: unknown display type: %s\n", str);
return -1;
@@ -2662,18 +2933,23 @@ static int build_cl_output(char *cl_sort, bool no_source)
}
if (asprintf(&c2c.cl_output,
- "%s%s%s%s%s%s%s%s%s%s",
+ "%s%s%s%s%s%s%s%s%s%s%s%s",
c2c.use_stdio ? "cl_num_empty," : "",
- "percent_rmt_hitm,"
- "percent_lcl_hitm,"
+ c2c.display == DISPLAY_SNP_PEER ? "percent_rmt_peer,"
+ "percent_lcl_peer," :
+ "percent_rmt_hitm,"
+ "percent_lcl_hitm,",
"percent_stores_l1hit,"
"percent_stores_l1miss,"
+ "percent_stores_na,"
"offset,offset_node,dcacheline_count,",
add_pid ? "pid," : "",
add_tid ? "tid," : "",
add_iaddr ? "iaddr," : "",
- "mean_rmt,"
- "mean_lcl,"
+ c2c.display == DISPLAY_SNP_PEER ? "mean_rmt_peer,"
+ "mean_lcl_peer," :
+ "mean_rmt,"
+ "mean_lcl,",
"mean_load,"
"tot_recs,"
"cpucnt,",
@@ -2694,6 +2970,7 @@ err:
static int setup_coalesce(const char *coalesce, bool no_source)
{
const char *c = coalesce ?: coalesce_default;
+ const char *sort_str = NULL;
if (asprintf(&c2c.cl_sort, "offset,%s", c) < 0)
return -ENOMEM;
@@ -2701,12 +2978,16 @@ static int setup_coalesce(const char *coalesce, bool no_source)
if (build_cl_output(c2c.cl_sort, no_source))
return -1;
- if (asprintf(&c2c.cl_resort, "offset,%s",
- c2c.display == DISPLAY_TOT ?
- "tot_hitm" :
- c2c.display == DISPLAY_RMT ?
- "rmt_hitm,lcl_hitm" :
- "lcl_hitm,rmt_hitm") < 0)
+ if (c2c.display == DISPLAY_TOT_HITM)
+ sort_str = "tot_hitm";
+ else if (c2c.display == DISPLAY_RMT_HITM)
+ sort_str = "rmt_hitm,lcl_hitm";
+ else if (c2c.display == DISPLAY_LCL_HITM)
+ sort_str = "lcl_hitm,rmt_hitm";
+ else if (c2c.display == DISPLAY_SNP_PEER)
+ sort_str = "tot_peer";
+
+ if (asprintf(&c2c.cl_resort, "offset,%s", sort_str) < 0)
return -ENOMEM;
pr_debug("coalesce sort fields: %s\n", c2c.cl_sort);
@@ -2717,6 +2998,12 @@ static int setup_coalesce(const char *coalesce, bool no_source)
static int perf_c2c__report(int argc, const char **argv)
{
+ struct itrace_synth_opts itrace_synth_opts = {
+ .set = true,
+ .mem = true, /* Only enable memory event */
+ .default_no_sample = true,
+ };
+
struct perf_session *session;
struct ui_progress prog;
struct perf_data data = {
@@ -2733,9 +3020,7 @@ static int perf_c2c__report(int argc, const char **argv)
"the input file to process"),
OPT_INCR('N', "node-info", &c2c.node_info,
"show extra node info in report (repeat for more info)"),
-#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "stdio", &c2c.use_stdio, "Use the stdio interface"),
-#endif
OPT_BOOLEAN(0, "stats", &c2c.stats_only,
"Display only statistic tables (implies --stdio)"),
OPT_BOOLEAN(0, "full-symbols", &c2c.symbol_full,
@@ -2748,56 +3033,82 @@ static int perf_c2c__report(int argc, const char **argv)
"print_type,threshold[,print_limit],order,sort_key[,branch],value",
callchain_help, &parse_callchain_opt,
callchain_default_opt),
- OPT_STRING('d', "display", &display, "Switch HITM output type", "lcl,rmt"),
+ OPT_STRING('d', "display", &display, "Switch HITM output type", "tot,lcl,rmt,peer"),
OPT_STRING('c', "coalesce", &coalesce, "coalesce fields",
"coalesce fields: pid,tid,iaddr,dso"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
+ OPT_BOOLEAN(0, "stitch-lbr", &c2c.stitch_lbr,
+ "Enable LBR callgraph stitching approach"),
+ OPT_BOOLEAN(0, "double-cl", &chk_double_cl, "Detect adjacent cacheline false sharing"),
OPT_PARENT(c2c_options),
OPT_END()
};
int err = 0;
+ const char *output_str, *sort_str = NULL;
argc = parse_options(argc, argv, options, report_c2c_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (argc)
usage_with_options(report_c2c_usage, options);
+#ifndef HAVE_SLANG_SUPPORT
+ c2c.use_stdio = true;
+#endif
+
if (c2c.stats_only)
c2c.use_stdio = true;
+ err = symbol__validate_sym_arguments();
+ if (err)
+ goto out;
+
if (!input_name || !strlen(input_name))
input_name = "perf.data";
data.path = input_name;
data.force = symbol_conf.force;
+ session = perf_session__new(&data, &c2c.tool);
+ if (IS_ERR(session)) {
+ err = PTR_ERR(session);
+ pr_debug("Error creating perf session\n");
+ goto out;
+ }
+
+ /*
+ * Use the 'tot' as default display type if user doesn't specify it;
+ * since Arm64 platform doesn't support HITMs flag, use 'peer' as the
+ * default display type.
+ */
+ if (!display) {
+ if (!strcmp(perf_env__arch(&session->header.env), "arm64"))
+ display = "peer";
+ else
+ display = "tot";
+ }
+
err = setup_display(display);
if (err)
- goto out;
+ goto out_session;
err = setup_coalesce(coalesce, no_source);
if (err) {
pr_debug("Failed to initialize hists\n");
- goto out;
+ goto out_session;
}
err = c2c_hists__init(&c2c.hists, "dcacheline", 2);
if (err) {
pr_debug("Failed to initialize hists\n");
- goto out;
+ goto out_session;
}
- session = perf_session__new(&data, 0, &c2c.tool);
- if (IS_ERR(session)) {
- err = PTR_ERR(session);
- pr_debug("Error creating perf session\n");
- goto out;
- }
+ session->itrace_synth_opts = &itrace_synth_opts;
err = setup_nodes(session);
if (err) {
pr_err("Failed setup nodes\n");
- goto out;
+ goto out_session;
}
err = mem2node__init(&c2c.mem2node, &session->header.env);
@@ -2830,28 +3141,52 @@ static int perf_c2c__report(int argc, const char **argv)
goto out_mem2node;
}
- c2c_hists__reinit(&c2c.hists,
- "cl_idx,"
- "dcacheline,"
- "dcacheline_node,"
- "dcacheline_count,"
- "tot_recs,"
- "percent_hitm,"
- "tot_hitm,lcl_hitm,rmt_hitm,"
- "stores,stores_l1hit,stores_l1miss,"
- "dram_lcl,dram_rmt,"
- "ld_llcmiss,"
- "tot_loads,"
- "ld_fbhit,ld_l1hit,ld_l2hit,"
- "ld_lclhit,ld_rmthit",
- c2c.display == DISPLAY_TOT ? "tot_hitm" :
- c2c.display == DISPLAY_LCL ? "lcl_hitm" : "rmt_hitm"
- );
+ if (c2c.display != DISPLAY_SNP_PEER)
+ output_str = "cl_idx,"
+ "dcacheline,"
+ "dcacheline_node,"
+ "dcacheline_count,"
+ "percent_costly_snoop,"
+ "tot_hitm,lcl_hitm,rmt_hitm,"
+ "tot_recs,"
+ "tot_loads,"
+ "tot_stores,"
+ "stores_l1hit,stores_l1miss,stores_na,"
+ "ld_fbhit,ld_l1hit,ld_l2hit,"
+ "ld_lclhit,lcl_hitm,"
+ "ld_rmthit,rmt_hitm,"
+ "dram_lcl,dram_rmt";
+ else
+ output_str = "cl_idx,"
+ "dcacheline,"
+ "dcacheline_node,"
+ "dcacheline_count,"
+ "percent_costly_snoop,"
+ "tot_peer,lcl_peer,rmt_peer,"
+ "tot_recs,"
+ "tot_loads,"
+ "tot_stores,"
+ "stores_l1hit,stores_l1miss,stores_na,"
+ "ld_fbhit,ld_l1hit,ld_l2hit,"
+ "ld_lclhit,lcl_hitm,"
+ "ld_rmthit,rmt_hitm,"
+ "dram_lcl,dram_rmt";
+
+ if (c2c.display == DISPLAY_TOT_HITM)
+ sort_str = "tot_hitm";
+ else if (c2c.display == DISPLAY_RMT_HITM)
+ sort_str = "rmt_hitm";
+ else if (c2c.display == DISPLAY_LCL_HITM)
+ sort_str = "lcl_hitm";
+ else if (c2c.display == DISPLAY_SNP_PEER)
+ sort_str = "tot_peer";
+
+ c2c_hists__reinit(&c2c.hists, output_str, sort_str);
ui_progress__init(&prog, c2c.hists.hists.nr_entries, "Sorting...");
hists__collapse_resort(&c2c.hists.hists, NULL);
- hists__output_resort_cb(&c2c.hists.hists, &prog, resort_hitm_cb);
+ hists__output_resort_cb(&c2c.hists.hists, &prog, resort_shared_cl_cb);
hists__iterate_cb(&c2c.hists.hists, resort_cl_cb);
ui_progress__finish();
@@ -2876,8 +3211,15 @@ static int parse_record_events(const struct option *opt,
{
bool *event_set = (bool *) opt->value;
+ if (!strcmp(str, "list")) {
+ perf_mem_events__list();
+ exit(0);
+ }
+ if (perf_mem_events__parse(str))
+ exit(-1);
+
*event_set = true;
- return perf_mem_events__parse(str);
+ return 0;
}
@@ -2891,14 +3233,16 @@ static const char * const *record_mem_usage = __usage_record;
static int perf_c2c__record(int argc, const char **argv)
{
- int rec_argc, i = 0, j;
+ int rec_argc, i = 0, j, rec_tmp_nr = 0;
const char **rec_argv;
+ char **rec_tmp;
int ret;
bool all_user = false, all_kernel = false;
bool event_set = false;
+ struct perf_mem_event *e;
struct option options[] = {
OPT_CALLBACK('e', "event", &event_set, "event",
- "event selector. Use 'perf mem record -e list' to list available events",
+ "event selector. Use 'perf c2c record -e list' to list available events",
parse_record_events),
OPT_BOOLEAN('u', "all-user", &all_user, "collect only user level data"),
OPT_BOOLEAN('k', "all-kernel", &all_kernel, "collect only kernel level data"),
@@ -2915,39 +3259,52 @@ static int perf_c2c__record(int argc, const char **argv)
argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_KEEP_UNKNOWN);
- rec_argc = argc + 11; /* max number of arguments */
+ if (!perf_pmu__has_hybrid())
+ rec_argc = argc + 11; /* max number of arguments */
+ else
+ rec_argc = argc + 11 * perf_pmu__hybrid_pmu_num();
+
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
+ rec_tmp = calloc(rec_argc + 1, sizeof(char *));
+ if (!rec_tmp) {
+ free(rec_argv);
+ return -1;
+ }
+
rec_argv[i++] = "record";
if (!event_set) {
- perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true;
- perf_mem_events[PERF_MEM_EVENTS__STORE].record = true;
+ e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
+ /*
+ * The load and store operations are required, use the event
+ * PERF_MEM_EVENTS__LOAD_STORE if it is supported.
+ */
+ if (e->tag) {
+ e->record = true;
+ rec_argv[i++] = "-W";
+ } else {
+ e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
+ e->record = true;
+
+ e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE);
+ e->record = true;
+ }
}
- if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record)
+ e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
+ if (e->record)
rec_argv[i++] = "-W";
rec_argv[i++] = "-d";
rec_argv[i++] = "--phys-data";
rec_argv[i++] = "--sample-cpu";
- for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- if (!perf_mem_events[j].record)
- continue;
-
- if (!perf_mem_events[j].supported) {
- pr_err("failed: event '%s' not supported\n",
- perf_mem_events[j].name);
- free(rec_argv);
- return -1;
- }
-
- rec_argv[i++] = "-e";
- rec_argv[i++] = perf_mem_events__name(j);
- };
+ ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &rec_tmp_nr);
+ if (ret)
+ goto out;
if (all_user)
rec_argv[i++] = "--all-user";
@@ -2971,6 +3328,11 @@ static int perf_c2c__record(int argc, const char **argv)
}
ret = cmd_record(i, rec_argv);
+out:
+ for (i = 0; i < rec_tmp_nr; i++)
+ free(rec_tmp[i]);
+
+ free(rec_tmp);
free(rec_argv);
return ret;
}
@@ -2983,9 +3345,9 @@ int cmd_c2c(int argc, const char **argv)
if (!argc)
usage_with_options(c2c_usage, c2c_options);
- if (!strncmp(argv[0], "rec", 3)) {
+ if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
return perf_c2c__record(argc, argv);
- } else if (!strncmp(argv[0], "rep", 3)) {
+ } else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
return perf_c2c__report(argc, argv);
} else {
usage_with_options(c2c_usage, c2c_options);
diff --git a/tools/perf/builtin-daemon.c b/tools/perf/builtin-daemon.c
new file mode 100644
index 000000000000..34cbe3e959aa
--- /dev/null
+++ b/tools/perf/builtin-daemon.c
@@ -0,0 +1,1532 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <internal/lib.h>
+#include <subcmd/parse-options.h>
+#include <api/fd/array.h>
+#include <api/fs/fs.h>
+#include <linux/zalloc.h>
+#include <linux/string.h>
+#include <linux/limits.h>
+#include <string.h>
+#include <sys/file.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <time.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sys/inotify.h>
+#include <libgen.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/stat.h>
+#include <sys/signalfd.h>
+#include <sys/wait.h>
+#include <poll.h>
+#include "builtin.h"
+#include "perf.h"
+#include "debug.h"
+#include "config.h"
+#include "util.h"
+
+#define SESSION_OUTPUT "output"
+#define SESSION_CONTROL "control"
+#define SESSION_ACK "ack"
+
+/*
+ * Session states:
+ *
+ * OK - session is up and running
+ * RECONFIG - session is pending for reconfiguration,
+ * new values are already loaded in session object
+ * KILL - session is pending to be killed
+ *
+ * Session object life and its state is maintained by
+ * following functions:
+ *
+ * setup_server_config
+ * - reads config file and setup session objects
+ * with following states:
+ *
+ * OK - no change needed
+ * RECONFIG - session needs to be changed
+ * (run variable changed)
+ * KILL - session needs to be killed
+ * (session is no longer in config file)
+ *
+ * daemon__reconfig
+ * - scans session objects and does following actions
+ * for states:
+ *
+ * OK - skip
+ * RECONFIG - session is killed and re-run with new config
+ * KILL - session is killed
+ *
+ * - all sessions have OK state on the function exit
+ */
+enum daemon_session_state {
+ OK,
+ RECONFIG,
+ KILL,
+};
+
+struct daemon_session {
+ char *base;
+ char *name;
+ char *run;
+ char *control;
+ int pid;
+ struct list_head list;
+ enum daemon_session_state state;
+ time_t start;
+};
+
+struct daemon {
+ const char *config;
+ char *config_real;
+ char *config_base;
+ const char *csv_sep;
+ const char *base_user;
+ char *base;
+ struct list_head sessions;
+ FILE *out;
+ char perf[PATH_MAX];
+ int signal_fd;
+ time_t start;
+};
+
+static struct daemon __daemon = {
+ .sessions = LIST_HEAD_INIT(__daemon.sessions),
+};
+
+static const char * const daemon_usage[] = {
+ "perf daemon {start|signal|stop|ping} [<options>]",
+ "perf daemon [<options>]",
+ NULL
+};
+
+static volatile sig_atomic_t done;
+
+static void sig_handler(int sig __maybe_unused)
+{
+ done = true;
+}
+
+static struct daemon_session *daemon__add_session(struct daemon *config, char *name)
+{
+ struct daemon_session *session = zalloc(sizeof(*session));
+
+ if (!session)
+ return NULL;
+
+ session->name = strdup(name);
+ if (!session->name) {
+ free(session);
+ return NULL;
+ }
+
+ session->pid = -1;
+ list_add_tail(&session->list, &config->sessions);
+ return session;
+}
+
+static struct daemon_session *daemon__find_session(struct daemon *daemon, char *name)
+{
+ struct daemon_session *session;
+
+ list_for_each_entry(session, &daemon->sessions, list) {
+ if (!strcmp(session->name, name))
+ return session;
+ }
+
+ return NULL;
+}
+
+static int get_session_name(const char *var, char *session, int len)
+{
+ const char *p = var + sizeof("session-") - 1;
+
+ while (*p != '.' && *p != 0x0 && len--)
+ *session++ = *p++;
+
+ *session = 0;
+ return *p == '.' ? 0 : -EINVAL;
+}
+
+static int session_config(struct daemon *daemon, const char *var, const char *value)
+{
+ struct daemon_session *session;
+ char name[100];
+
+ if (get_session_name(var, name, sizeof(name) - 1))
+ return -EINVAL;
+
+ var = strchr(var, '.');
+ if (!var)
+ return -EINVAL;
+
+ var++;
+
+ session = daemon__find_session(daemon, name);
+
+ if (!session) {
+ /* New session is defined. */
+ session = daemon__add_session(daemon, name);
+ if (!session)
+ return -ENOMEM;
+
+ pr_debug("reconfig: found new session %s\n", name);
+
+ /* Trigger reconfig to start it. */
+ session->state = RECONFIG;
+ } else if (session->state == KILL) {
+ /* Current session is defined, no action needed. */
+ pr_debug("reconfig: found current session %s\n", name);
+ session->state = OK;
+ }
+
+ if (!strcmp(var, "run")) {
+ bool same = false;
+
+ if (session->run)
+ same = !strcmp(session->run, value);
+
+ if (!same) {
+ if (session->run) {
+ zfree(&session->run);
+ pr_debug("reconfig: session %s is changed\n", name);
+ }
+
+ session->run = strdup(value);
+ if (!session->run)
+ return -ENOMEM;
+
+ /*
+ * Either new or changed run value is defined,
+ * trigger reconfig for the session.
+ */
+ session->state = RECONFIG;
+ }
+ }
+
+ return 0;
+}
+
+static int server_config(const char *var, const char *value, void *cb)
+{
+ struct daemon *daemon = cb;
+
+ if (strstarts(var, "session-")) {
+ return session_config(daemon, var, value);
+ } else if (!strcmp(var, "daemon.base") && !daemon->base_user) {
+ if (daemon->base && strcmp(daemon->base, value)) {
+ pr_err("failed: can't redefine base, bailing out\n");
+ return -EINVAL;
+ }
+ daemon->base = strdup(value);
+ if (!daemon->base)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int client_config(const char *var, const char *value, void *cb)
+{
+ struct daemon *daemon = cb;
+
+ if (!strcmp(var, "daemon.base") && !daemon->base_user) {
+ daemon->base = strdup(value);
+ if (!daemon->base)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int check_base(struct daemon *daemon)
+{
+ struct stat st;
+
+ if (!daemon->base) {
+ pr_err("failed: base not defined\n");
+ return -EINVAL;
+ }
+
+ if (stat(daemon->base, &st)) {
+ switch (errno) {
+ case EACCES:
+ pr_err("failed: permission denied for '%s' base\n",
+ daemon->base);
+ return -EACCES;
+ case ENOENT:
+ pr_err("failed: base '%s' does not exists\n",
+ daemon->base);
+ return -EACCES;
+ default:
+ pr_err("failed: can't access base '%s': %s\n",
+ daemon->base, strerror(errno));
+ return -errno;
+ }
+ }
+
+ if ((st.st_mode & S_IFMT) != S_IFDIR) {
+ pr_err("failed: base '%s' is not directory\n",
+ daemon->base);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int setup_client_config(struct daemon *daemon)
+{
+ struct perf_config_set *set = perf_config_set__load_file(daemon->config_real);
+ int err = -ENOMEM;
+
+ if (set) {
+ err = perf_config_set(set, client_config, daemon);
+ perf_config_set__delete(set);
+ }
+
+ return err ?: check_base(daemon);
+}
+
+static int setup_server_config(struct daemon *daemon)
+{
+ struct perf_config_set *set;
+ struct daemon_session *session;
+ int err = -ENOMEM;
+
+ pr_debug("reconfig: started\n");
+
+ /*
+ * Mark all sessions for kill, the server config
+ * will set following states, see explanation at
+ * enum daemon_session_state declaration.
+ */
+ list_for_each_entry(session, &daemon->sessions, list)
+ session->state = KILL;
+
+ set = perf_config_set__load_file(daemon->config_real);
+ if (set) {
+ err = perf_config_set(set, server_config, daemon);
+ perf_config_set__delete(set);
+ }
+
+ return err ?: check_base(daemon);
+}
+
+static int daemon_session__run(struct daemon_session *session,
+ struct daemon *daemon)
+{
+ char buf[PATH_MAX];
+ char **argv;
+ int argc, fd;
+
+ if (asprintf(&session->base, "%s/session-%s",
+ daemon->base, session->name) < 0) {
+ perror("failed: asprintf");
+ return -1;
+ }
+
+ if (mkdir(session->base, 0755) && errno != EEXIST) {
+ perror("failed: mkdir");
+ return -1;
+ }
+
+ session->start = time(NULL);
+
+ session->pid = fork();
+ if (session->pid < 0)
+ return -1;
+ if (session->pid > 0) {
+ pr_info("reconfig: ruining session [%s:%d]: %s\n",
+ session->name, session->pid, session->run);
+ return 0;
+ }
+
+ if (chdir(session->base)) {
+ perror("failed: chdir");
+ return -1;
+ }
+
+ fd = open("/dev/null", O_RDONLY);
+ if (fd < 0) {
+ perror("failed: open /dev/null");
+ return -1;
+ }
+
+ dup2(fd, 0);
+ close(fd);
+
+ fd = open(SESSION_OUTPUT, O_RDWR|O_CREAT|O_TRUNC, 0644);
+ if (fd < 0) {
+ perror("failed: open session output");
+ return -1;
+ }
+
+ dup2(fd, 1);
+ dup2(fd, 2);
+ close(fd);
+
+ if (mkfifo(SESSION_CONTROL, 0600) && errno != EEXIST) {
+ perror("failed: create control fifo");
+ return -1;
+ }
+
+ if (mkfifo(SESSION_ACK, 0600) && errno != EEXIST) {
+ perror("failed: create ack fifo");
+ return -1;
+ }
+
+ scnprintf(buf, sizeof(buf), "%s record --control=fifo:%s,%s %s",
+ daemon->perf, SESSION_CONTROL, SESSION_ACK, session->run);
+
+ argv = argv_split(buf, &argc);
+ if (!argv)
+ exit(-1);
+
+ exit(execve(daemon->perf, argv, NULL));
+ return -1;
+}
+
+static pid_t handle_signalfd(struct daemon *daemon)
+{
+ struct daemon_session *session;
+ struct signalfd_siginfo si;
+ ssize_t err;
+ int status;
+ pid_t pid;
+
+ /*
+ * Take signal fd data as pure signal notification and check all
+ * the sessions state. The reason is that multiple signals can get
+ * coalesced in kernel and we can receive only single signal even
+ * if multiple SIGCHLD were generated.
+ */
+ err = read(daemon->signal_fd, &si, sizeof(struct signalfd_siginfo));
+ if (err != sizeof(struct signalfd_siginfo)) {
+ pr_err("failed to read signal fd\n");
+ return -1;
+ }
+
+ list_for_each_entry(session, &daemon->sessions, list) {
+ if (session->pid == -1)
+ continue;
+
+ pid = waitpid(session->pid, &status, WNOHANG);
+ if (pid <= 0)
+ continue;
+
+ if (WIFEXITED(status)) {
+ pr_info("session '%s' exited, status=%d\n",
+ session->name, WEXITSTATUS(status));
+ } else if (WIFSIGNALED(status)) {
+ pr_info("session '%s' killed (signal %d)\n",
+ session->name, WTERMSIG(status));
+ } else if (WIFSTOPPED(status)) {
+ pr_info("session '%s' stopped (signal %d)\n",
+ session->name, WSTOPSIG(status));
+ } else {
+ pr_info("session '%s' Unexpected status (0x%x)\n",
+ session->name, status);
+ }
+
+ session->state = KILL;
+ session->pid = -1;
+ }
+
+ return 0;
+}
+
+static int daemon_session__wait(struct daemon_session *session, struct daemon *daemon,
+ int secs)
+{
+ struct pollfd pollfd = {
+ .fd = daemon->signal_fd,
+ .events = POLLIN,
+ };
+ time_t start;
+
+ start = time(NULL);
+
+ do {
+ int err = poll(&pollfd, 1, 1000);
+
+ if (err > 0) {
+ handle_signalfd(daemon);
+ } else if (err < 0) {
+ perror("failed: poll\n");
+ return -1;
+ }
+
+ if (start + secs < time(NULL))
+ return -1;
+ } while (session->pid != -1);
+
+ return 0;
+}
+
+static bool daemon__has_alive_session(struct daemon *daemon)
+{
+ struct daemon_session *session;
+
+ list_for_each_entry(session, &daemon->sessions, list) {
+ if (session->pid != -1)
+ return true;
+ }
+
+ return false;
+}
+
+static int daemon__wait(struct daemon *daemon, int secs)
+{
+ struct pollfd pollfd = {
+ .fd = daemon->signal_fd,
+ .events = POLLIN,
+ };
+ time_t start;
+
+ start = time(NULL);
+
+ do {
+ int err = poll(&pollfd, 1, 1000);
+
+ if (err > 0) {
+ handle_signalfd(daemon);
+ } else if (err < 0) {
+ perror("failed: poll\n");
+ return -1;
+ }
+
+ if (start + secs < time(NULL))
+ return -1;
+ } while (daemon__has_alive_session(daemon));
+
+ return 0;
+}
+
+static int daemon_session__control(struct daemon_session *session,
+ const char *msg, bool do_ack)
+{
+ struct pollfd pollfd = { .events = POLLIN, };
+ char control_path[PATH_MAX];
+ char ack_path[PATH_MAX];
+ int control, ack = -1, len;
+ char buf[20];
+ int ret = -1;
+ ssize_t err;
+
+ /* open the control file */
+ scnprintf(control_path, sizeof(control_path), "%s/%s",
+ session->base, SESSION_CONTROL);
+
+ control = open(control_path, O_WRONLY|O_NONBLOCK);
+ if (!control)
+ return -1;
+
+ if (do_ack) {
+ /* open the ack file */
+ scnprintf(ack_path, sizeof(ack_path), "%s/%s",
+ session->base, SESSION_ACK);
+
+ ack = open(ack_path, O_RDONLY, O_NONBLOCK);
+ if (!ack) {
+ close(control);
+ return -1;
+ }
+ }
+
+ /* write the command */
+ len = strlen(msg);
+
+ err = writen(control, msg, len);
+ if (err != len) {
+ pr_err("failed: write to control pipe: %d (%s)\n",
+ errno, control_path);
+ goto out;
+ }
+
+ if (!do_ack)
+ goto out;
+
+ /* wait for an ack */
+ pollfd.fd = ack;
+
+ if (!poll(&pollfd, 1, 2000)) {
+ pr_err("failed: control ack timeout\n");
+ goto out;
+ }
+
+ if (!(pollfd.revents & POLLIN)) {
+ pr_err("failed: did not received an ack\n");
+ goto out;
+ }
+
+ err = read(ack, buf, sizeof(buf));
+ if (err > 0)
+ ret = strcmp(buf, "ack\n");
+ else
+ perror("failed: read ack %d\n");
+
+out:
+ if (ack != -1)
+ close(ack);
+
+ close(control);
+ return ret;
+}
+
+static int setup_server_socket(struct daemon *daemon)
+{
+ struct sockaddr_un addr;
+ char path[PATH_MAX];
+ int fd = socket(AF_UNIX, SOCK_STREAM, 0);
+
+ if (fd < 0) {
+ fprintf(stderr, "socket: %s\n", strerror(errno));
+ return -1;
+ }
+
+ if (fcntl(fd, F_SETFD, FD_CLOEXEC)) {
+ perror("failed: fcntl FD_CLOEXEC");
+ close(fd);
+ return -1;
+ }
+
+ scnprintf(path, sizeof(path), "%s/control", daemon->base);
+
+ if (strlen(path) + 1 >= sizeof(addr.sun_path)) {
+ pr_err("failed: control path too long '%s'\n", path);
+ close(fd);
+ return -1;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+
+ strlcpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
+ unlink(path);
+
+ if (bind(fd, (struct sockaddr *)&addr, sizeof(addr)) == -1) {
+ perror("failed: bind");
+ close(fd);
+ return -1;
+ }
+
+ if (listen(fd, 1) == -1) {
+ perror("failed: listen");
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+enum {
+ CMD_LIST = 0,
+ CMD_SIGNAL = 1,
+ CMD_STOP = 2,
+ CMD_PING = 3,
+ CMD_MAX,
+};
+
+#define SESSION_MAX 64
+
+union cmd {
+ int cmd;
+
+ /* CMD_LIST */
+ struct {
+ int cmd;
+ int verbose;
+ char csv_sep;
+ } list;
+
+ /* CMD_SIGNAL */
+ struct {
+ int cmd;
+ int sig;
+ char name[SESSION_MAX];
+ } signal;
+
+ /* CMD_PING */
+ struct {
+ int cmd;
+ char name[SESSION_MAX];
+ } ping;
+};
+
+enum {
+ PING_OK = 0,
+ PING_FAIL = 1,
+ PING_MAX,
+};
+
+static int daemon_session__ping(struct daemon_session *session)
+{
+ return daemon_session__control(session, "ping", true) ? PING_FAIL : PING_OK;
+}
+
+static int cmd_session_list(struct daemon *daemon, union cmd *cmd, FILE *out)
+{
+ char csv_sep = cmd->list.csv_sep;
+ struct daemon_session *session;
+ time_t curr = time(NULL);
+
+ if (csv_sep) {
+ fprintf(out, "%d%c%s%c%s%c%s/%s",
+ /* pid daemon */
+ getpid(), csv_sep, "daemon",
+ /* base */
+ csv_sep, daemon->base,
+ /* output */
+ csv_sep, daemon->base, SESSION_OUTPUT);
+
+ fprintf(out, "%c%s/%s",
+ /* lock */
+ csv_sep, daemon->base, "lock");
+
+ fprintf(out, "%c%lu",
+ /* session up time */
+ csv_sep, (curr - daemon->start) / 60);
+
+ fprintf(out, "\n");
+ } else {
+ fprintf(out, "[%d:daemon] base: %s\n", getpid(), daemon->base);
+ if (cmd->list.verbose) {
+ fprintf(out, " output: %s/%s\n",
+ daemon->base, SESSION_OUTPUT);
+ fprintf(out, " lock: %s/lock\n",
+ daemon->base);
+ fprintf(out, " up: %lu minutes\n",
+ (curr - daemon->start) / 60);
+ }
+ }
+
+ list_for_each_entry(session, &daemon->sessions, list) {
+ if (csv_sep) {
+ fprintf(out, "%d%c%s%c%s",
+ /* pid */
+ session->pid,
+ /* name */
+ csv_sep, session->name,
+ /* base */
+ csv_sep, session->run);
+
+ fprintf(out, "%c%s%c%s/%s",
+ /* session dir */
+ csv_sep, session->base,
+ /* session output */
+ csv_sep, session->base, SESSION_OUTPUT);
+
+ fprintf(out, "%c%s/%s%c%s/%s",
+ /* session control */
+ csv_sep, session->base, SESSION_CONTROL,
+ /* session ack */
+ csv_sep, session->base, SESSION_ACK);
+
+ fprintf(out, "%c%lu",
+ /* session up time */
+ csv_sep, (curr - session->start) / 60);
+
+ fprintf(out, "\n");
+ } else {
+ fprintf(out, "[%d:%s] perf record %s\n",
+ session->pid, session->name, session->run);
+ if (!cmd->list.verbose)
+ continue;
+ fprintf(out, " base: %s\n",
+ session->base);
+ fprintf(out, " output: %s/%s\n",
+ session->base, SESSION_OUTPUT);
+ fprintf(out, " control: %s/%s\n",
+ session->base, SESSION_CONTROL);
+ fprintf(out, " ack: %s/%s\n",
+ session->base, SESSION_ACK);
+ fprintf(out, " up: %lu minutes\n",
+ (curr - session->start) / 60);
+ }
+ }
+
+ return 0;
+}
+
+static int daemon_session__signal(struct daemon_session *session, int sig)
+{
+ if (session->pid < 0)
+ return -1;
+ return kill(session->pid, sig);
+}
+
+static int cmd_session_kill(struct daemon *daemon, union cmd *cmd, FILE *out)
+{
+ struct daemon_session *session;
+ bool all = false;
+
+ all = !strcmp(cmd->signal.name, "all");
+
+ list_for_each_entry(session, &daemon->sessions, list) {
+ if (all || !strcmp(cmd->signal.name, session->name)) {
+ daemon_session__signal(session, cmd->signal.sig);
+ fprintf(out, "signal %d sent to session '%s [%d]'\n",
+ cmd->signal.sig, session->name, session->pid);
+ }
+ }
+
+ return 0;
+}
+
+static const char *ping_str[PING_MAX] = {
+ [PING_OK] = "OK",
+ [PING_FAIL] = "FAIL",
+};
+
+static int cmd_session_ping(struct daemon *daemon, union cmd *cmd, FILE *out)
+{
+ struct daemon_session *session;
+ bool all = false, found = false;
+
+ all = !strcmp(cmd->ping.name, "all");
+
+ list_for_each_entry(session, &daemon->sessions, list) {
+ if (all || !strcmp(cmd->ping.name, session->name)) {
+ int state = daemon_session__ping(session);
+
+ fprintf(out, "%-4s %s\n", ping_str[state], session->name);
+ found = true;
+ }
+ }
+
+ if (!found && !all) {
+ fprintf(out, "%-4s %s (not found)\n",
+ ping_str[PING_FAIL], cmd->ping.name);
+ }
+ return 0;
+}
+
+static int handle_server_socket(struct daemon *daemon, int sock_fd)
+{
+ int ret = -1, fd;
+ FILE *out = NULL;
+ union cmd cmd;
+
+ fd = accept(sock_fd, NULL, NULL);
+ if (fd < 0) {
+ perror("failed: accept");
+ return -1;
+ }
+
+ if (sizeof(cmd) != readn(fd, &cmd, sizeof(cmd))) {
+ perror("failed: read");
+ goto out;
+ }
+
+ out = fdopen(fd, "w");
+ if (!out) {
+ perror("failed: fdopen");
+ goto out;
+ }
+
+ switch (cmd.cmd) {
+ case CMD_LIST:
+ ret = cmd_session_list(daemon, &cmd, out);
+ break;
+ case CMD_SIGNAL:
+ ret = cmd_session_kill(daemon, &cmd, out);
+ break;
+ case CMD_STOP:
+ done = 1;
+ ret = 0;
+ pr_debug("perf daemon is exciting\n");
+ break;
+ case CMD_PING:
+ ret = cmd_session_ping(daemon, &cmd, out);
+ break;
+ default:
+ break;
+ }
+
+ fclose(out);
+out:
+ /* If out is defined, then fd is closed via fclose. */
+ if (!out)
+ close(fd);
+ return ret;
+}
+
+static int setup_client_socket(struct daemon *daemon)
+{
+ struct sockaddr_un addr;
+ char path[PATH_MAX];
+ int fd = socket(AF_UNIX, SOCK_STREAM, 0);
+
+ if (fd == -1) {
+ perror("failed: socket");
+ return -1;
+ }
+
+ scnprintf(path, sizeof(path), "%s/control", daemon->base);
+
+ if (strlen(path) + 1 >= sizeof(addr.sun_path)) {
+ pr_err("failed: control path too long '%s'\n", path);
+ close(fd);
+ return -1;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strlcpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
+
+ if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) == -1) {
+ perror("failed: connect");
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+static void daemon_session__kill(struct daemon_session *session,
+ struct daemon *daemon)
+{
+ int how = 0;
+
+ do {
+ switch (how) {
+ case 0:
+ daemon_session__control(session, "stop", false);
+ break;
+ case 1:
+ daemon_session__signal(session, SIGTERM);
+ break;
+ case 2:
+ daemon_session__signal(session, SIGKILL);
+ break;
+ default:
+ pr_err("failed to wait for session %s\n",
+ session->name);
+ return;
+ }
+ how++;
+
+ } while (daemon_session__wait(session, daemon, 10));
+}
+
+static void daemon__signal(struct daemon *daemon, int sig)
+{
+ struct daemon_session *session;
+
+ list_for_each_entry(session, &daemon->sessions, list)
+ daemon_session__signal(session, sig);
+}
+
+static void daemon_session__delete(struct daemon_session *session)
+{
+ zfree(&session->base);
+ zfree(&session->name);
+ zfree(&session->run);
+ free(session);
+}
+
+static void daemon_session__remove(struct daemon_session *session)
+{
+ list_del(&session->list);
+ daemon_session__delete(session);
+}
+
+static void daemon__stop(struct daemon *daemon)
+{
+ struct daemon_session *session;
+
+ list_for_each_entry(session, &daemon->sessions, list)
+ daemon_session__control(session, "stop", false);
+}
+
+static void daemon__kill(struct daemon *daemon)
+{
+ int how = 0;
+
+ do {
+ switch (how) {
+ case 0:
+ daemon__stop(daemon);
+ break;
+ case 1:
+ daemon__signal(daemon, SIGTERM);
+ break;
+ case 2:
+ daemon__signal(daemon, SIGKILL);
+ break;
+ default:
+ pr_err("failed to wait for sessions\n");
+ return;
+ }
+ how++;
+
+ } while (daemon__wait(daemon, 10));
+}
+
+static void daemon__exit(struct daemon *daemon)
+{
+ struct daemon_session *session, *h;
+
+ list_for_each_entry_safe(session, h, &daemon->sessions, list)
+ daemon_session__remove(session);
+
+ zfree(&daemon->config_real);
+ zfree(&daemon->config_base);
+ zfree(&daemon->base);
+}
+
+static int daemon__reconfig(struct daemon *daemon)
+{
+ struct daemon_session *session, *n;
+
+ list_for_each_entry_safe(session, n, &daemon->sessions, list) {
+ /* No change. */
+ if (session->state == OK)
+ continue;
+
+ /* Remove session. */
+ if (session->state == KILL) {
+ if (session->pid > 0) {
+ daemon_session__kill(session, daemon);
+ pr_info("reconfig: session '%s' killed\n", session->name);
+ }
+ daemon_session__remove(session);
+ continue;
+ }
+
+ /* Reconfig session. */
+ if (session->pid > 0) {
+ daemon_session__kill(session, daemon);
+ pr_info("reconfig: session '%s' killed\n", session->name);
+ }
+ if (daemon_session__run(session, daemon))
+ return -1;
+
+ session->state = OK;
+ }
+
+ return 0;
+}
+
+static int setup_config_changes(struct daemon *daemon)
+{
+ char *basen = strdup(daemon->config_real);
+ char *dirn = strdup(daemon->config_real);
+ char *base, *dir;
+ int fd, wd = -1;
+
+ if (!dirn || !basen)
+ goto out;
+
+ fd = inotify_init1(IN_NONBLOCK|O_CLOEXEC);
+ if (fd < 0) {
+ perror("failed: inotify_init");
+ goto out;
+ }
+
+ dir = dirname(dirn);
+ base = basename(basen);
+ pr_debug("config file: %s, dir: %s\n", base, dir);
+
+ wd = inotify_add_watch(fd, dir, IN_CLOSE_WRITE);
+ if (wd >= 0) {
+ daemon->config_base = strdup(base);
+ if (!daemon->config_base) {
+ close(fd);
+ wd = -1;
+ }
+ } else {
+ perror("failed: inotify_add_watch");
+ }
+
+out:
+ free(basen);
+ free(dirn);
+ return wd < 0 ? -1 : fd;
+}
+
+static bool process_inotify_event(struct daemon *daemon, char *buf, ssize_t len)
+{
+ char *p = buf;
+
+ while (p < (buf + len)) {
+ struct inotify_event *event = (struct inotify_event *) p;
+
+ /*
+ * We monitor config directory, check if our
+ * config file was changes.
+ */
+ if ((event->mask & IN_CLOSE_WRITE) &&
+ !(event->mask & IN_ISDIR)) {
+ if (!strcmp(event->name, daemon->config_base))
+ return true;
+ }
+ p += sizeof(*event) + event->len;
+ }
+ return false;
+}
+
+static int handle_config_changes(struct daemon *daemon, int conf_fd,
+ bool *config_changed)
+{
+ char buf[4096];
+ ssize_t len;
+
+ while (!(*config_changed)) {
+ len = read(conf_fd, buf, sizeof(buf));
+ if (len == -1) {
+ if (errno != EAGAIN) {
+ perror("failed: read");
+ return -1;
+ }
+ return 0;
+ }
+ *config_changed = process_inotify_event(daemon, buf, len);
+ }
+ return 0;
+}
+
+static int setup_config(struct daemon *daemon)
+{
+ if (daemon->base_user) {
+ daemon->base = strdup(daemon->base_user);
+ if (!daemon->base)
+ return -ENOMEM;
+ }
+
+ if (daemon->config) {
+ char *real = realpath(daemon->config, NULL);
+
+ if (!real) {
+ perror("failed: realpath");
+ return -1;
+ }
+ daemon->config_real = real;
+ return 0;
+ }
+
+ if (perf_config_system() && !access(perf_etc_perfconfig(), R_OK))
+ daemon->config_real = strdup(perf_etc_perfconfig());
+ else if (perf_config_global() && perf_home_perfconfig())
+ daemon->config_real = strdup(perf_home_perfconfig());
+
+ return daemon->config_real ? 0 : -1;
+}
+
+#ifndef F_TLOCK
+#define F_TLOCK 2
+
+static int lockf(int fd, int cmd, off_t len)
+{
+ if (cmd != F_TLOCK || len != 0)
+ return -1;
+
+ return flock(fd, LOCK_EX | LOCK_NB);
+}
+#endif // F_TLOCK
+
+/*
+ * Each daemon tries to create and lock BASE/lock file,
+ * if it's successful we are sure we're the only daemon
+ * running over the BASE.
+ *
+ * Once daemon is finished, file descriptor to lock file
+ * is closed and lock is released.
+ */
+static int check_lock(struct daemon *daemon)
+{
+ char path[PATH_MAX];
+ char buf[20];
+ int fd, pid;
+ ssize_t len;
+
+ scnprintf(path, sizeof(path), "%s/lock", daemon->base);
+
+ fd = open(path, O_RDWR|O_CREAT|O_CLOEXEC, 0640);
+ if (fd < 0)
+ return -1;
+
+ if (lockf(fd, F_TLOCK, 0) < 0) {
+ filename__read_int(path, &pid);
+ fprintf(stderr, "failed: another perf daemon (pid %d) owns %s\n",
+ pid, daemon->base);
+ close(fd);
+ return -1;
+ }
+
+ scnprintf(buf, sizeof(buf), "%d", getpid());
+ len = strlen(buf);
+
+ if (write(fd, buf, len) != len) {
+ perror("failed: write");
+ close(fd);
+ return -1;
+ }
+
+ if (ftruncate(fd, len)) {
+ perror("failed: ftruncate");
+ close(fd);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int go_background(struct daemon *daemon)
+{
+ int pid, fd;
+
+ pid = fork();
+ if (pid < 0)
+ return -1;
+
+ if (pid > 0)
+ return 1;
+
+ if (setsid() < 0)
+ return -1;
+
+ if (check_lock(daemon))
+ return -1;
+
+ umask(0);
+
+ if (chdir(daemon->base)) {
+ perror("failed: chdir");
+ return -1;
+ }
+
+ fd = open("output", O_RDWR|O_CREAT|O_TRUNC, 0644);
+ if (fd < 0) {
+ perror("failed: open");
+ return -1;
+ }
+
+ if (fcntl(fd, F_SETFD, FD_CLOEXEC)) {
+ perror("failed: fcntl FD_CLOEXEC");
+ close(fd);
+ return -1;
+ }
+
+ close(0);
+ dup2(fd, 1);
+ dup2(fd, 2);
+ close(fd);
+
+ daemon->out = fdopen(1, "w");
+ if (!daemon->out) {
+ close(1);
+ close(2);
+ return -1;
+ }
+
+ setbuf(daemon->out, NULL);
+ return 0;
+}
+
+static int setup_signalfd(struct daemon *daemon)
+{
+ sigset_t mask;
+
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGCHLD);
+
+ if (sigprocmask(SIG_BLOCK, &mask, NULL) == -1)
+ return -1;
+
+ daemon->signal_fd = signalfd(-1, &mask, SFD_NONBLOCK|SFD_CLOEXEC);
+ return daemon->signal_fd;
+}
+
+static int __cmd_start(struct daemon *daemon, struct option parent_options[],
+ int argc, const char **argv)
+{
+ bool foreground = false;
+ struct option start_options[] = {
+ OPT_BOOLEAN('f', "foreground", &foreground, "stay on console"),
+ OPT_PARENT(parent_options),
+ OPT_END()
+ };
+ int sock_fd = -1, conf_fd = -1, signal_fd = -1;
+ int sock_pos, file_pos, signal_pos;
+ struct fdarray fda;
+ int err = 0;
+
+ argc = parse_options(argc, argv, start_options, daemon_usage, 0);
+ if (argc)
+ usage_with_options(daemon_usage, start_options);
+
+ daemon->start = time(NULL);
+
+ if (setup_config(daemon)) {
+ pr_err("failed: config not found\n");
+ return -1;
+ }
+
+ if (setup_server_config(daemon))
+ return -1;
+
+ if (foreground && check_lock(daemon))
+ return -1;
+
+ if (!foreground) {
+ err = go_background(daemon);
+ if (err) {
+ /* original process, exit normally */
+ if (err == 1)
+ err = 0;
+ daemon__exit(daemon);
+ return err;
+ }
+ }
+
+ debug_set_file(daemon->out);
+ debug_set_display_time(true);
+
+ pr_info("daemon started (pid %d)\n", getpid());
+
+ fdarray__init(&fda, 3);
+
+ sock_fd = setup_server_socket(daemon);
+ if (sock_fd < 0)
+ goto out;
+
+ conf_fd = setup_config_changes(daemon);
+ if (conf_fd < 0)
+ goto out;
+
+ signal_fd = setup_signalfd(daemon);
+ if (signal_fd < 0)
+ goto out;
+
+ sock_pos = fdarray__add(&fda, sock_fd, POLLIN|POLLERR|POLLHUP, 0);
+ if (sock_pos < 0)
+ goto out;
+
+ file_pos = fdarray__add(&fda, conf_fd, POLLIN|POLLERR|POLLHUP, 0);
+ if (file_pos < 0)
+ goto out;
+
+ signal_pos = fdarray__add(&fda, signal_fd, POLLIN|POLLERR|POLLHUP, 0);
+ if (signal_pos < 0)
+ goto out;
+
+ signal(SIGINT, sig_handler);
+ signal(SIGTERM, sig_handler);
+ signal(SIGPIPE, SIG_IGN);
+
+ while (!done && !err) {
+ err = daemon__reconfig(daemon);
+
+ if (!err && fdarray__poll(&fda, -1)) {
+ bool reconfig = false;
+
+ if (fda.entries[sock_pos].revents & POLLIN)
+ err = handle_server_socket(daemon, sock_fd);
+ if (fda.entries[file_pos].revents & POLLIN)
+ err = handle_config_changes(daemon, conf_fd, &reconfig);
+ if (fda.entries[signal_pos].revents & POLLIN)
+ err = handle_signalfd(daemon) < 0;
+
+ if (reconfig)
+ err = setup_server_config(daemon);
+ }
+ }
+
+out:
+ fdarray__exit(&fda);
+
+ daemon__kill(daemon);
+ daemon__exit(daemon);
+
+ if (sock_fd != -1)
+ close(sock_fd);
+ if (conf_fd != -1)
+ close(conf_fd);
+ if (signal_fd != -1)
+ close(signal_fd);
+
+ pr_info("daemon exited\n");
+ fclose(daemon->out);
+ return err;
+}
+
+static int send_cmd(struct daemon *daemon, union cmd *cmd)
+{
+ int ret = -1, fd;
+ char *line = NULL;
+ size_t len = 0;
+ ssize_t nread;
+ FILE *in = NULL;
+
+ if (setup_client_config(daemon))
+ return -1;
+
+ fd = setup_client_socket(daemon);
+ if (fd < 0)
+ return -1;
+
+ if (sizeof(*cmd) != writen(fd, cmd, sizeof(*cmd))) {
+ perror("failed: write");
+ goto out;
+ }
+
+ in = fdopen(fd, "r");
+ if (!in) {
+ perror("failed: fdopen");
+ goto out;
+ }
+
+ while ((nread = getline(&line, &len, in)) != -1) {
+ if (fwrite(line, nread, 1, stdout) != 1)
+ goto out_fclose;
+ fflush(stdout);
+ }
+
+ ret = 0;
+out_fclose:
+ fclose(in);
+ free(line);
+out:
+ /* If in is defined, then fd is closed via fclose. */
+ if (!in)
+ close(fd);
+ return ret;
+}
+
+static int send_cmd_list(struct daemon *daemon)
+{
+ union cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.list.cmd = CMD_LIST;
+ cmd.list.verbose = verbose;
+ cmd.list.csv_sep = daemon->csv_sep ? *daemon->csv_sep : 0;
+
+ return send_cmd(daemon, &cmd);
+}
+
+static int __cmd_signal(struct daemon *daemon, struct option parent_options[],
+ int argc, const char **argv)
+{
+ const char *name = "all";
+ struct option start_options[] = {
+ OPT_STRING(0, "session", &name, "session",
+ "Sent signal to specific session"),
+ OPT_PARENT(parent_options),
+ OPT_END()
+ };
+ union cmd cmd;
+
+ argc = parse_options(argc, argv, start_options, daemon_usage, 0);
+ if (argc)
+ usage_with_options(daemon_usage, start_options);
+
+ if (setup_config(daemon)) {
+ pr_err("failed: config not found\n");
+ return -1;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.signal.cmd = CMD_SIGNAL,
+ cmd.signal.sig = SIGUSR2;
+ strncpy(cmd.signal.name, name, sizeof(cmd.signal.name) - 1);
+
+ return send_cmd(daemon, &cmd);
+}
+
+static int __cmd_stop(struct daemon *daemon, struct option parent_options[],
+ int argc, const char **argv)
+{
+ struct option start_options[] = {
+ OPT_PARENT(parent_options),
+ OPT_END()
+ };
+ union cmd cmd;
+
+ argc = parse_options(argc, argv, start_options, daemon_usage, 0);
+ if (argc)
+ usage_with_options(daemon_usage, start_options);
+
+ if (setup_config(daemon)) {
+ pr_err("failed: config not found\n");
+ return -1;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = CMD_STOP;
+ return send_cmd(daemon, &cmd);
+}
+
+static int __cmd_ping(struct daemon *daemon, struct option parent_options[],
+ int argc, const char **argv)
+{
+ const char *name = "all";
+ struct option ping_options[] = {
+ OPT_STRING(0, "session", &name, "session",
+ "Ping to specific session"),
+ OPT_PARENT(parent_options),
+ OPT_END()
+ };
+ union cmd cmd;
+
+ argc = parse_options(argc, argv, ping_options, daemon_usage, 0);
+ if (argc)
+ usage_with_options(daemon_usage, ping_options);
+
+ if (setup_config(daemon)) {
+ pr_err("failed: config not found\n");
+ return -1;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd = CMD_PING;
+ scnprintf(cmd.ping.name, sizeof(cmd.ping.name), "%s", name);
+ return send_cmd(daemon, &cmd);
+}
+
+int cmd_daemon(int argc, const char **argv)
+{
+ struct option daemon_options[] = {
+ OPT_INCR('v', "verbose", &verbose, "be more verbose"),
+ OPT_STRING(0, "config", &__daemon.config,
+ "config file", "config file path"),
+ OPT_STRING(0, "base", &__daemon.base_user,
+ "directory", "base directory"),
+ OPT_STRING_OPTARG('x', "field-separator", &__daemon.csv_sep,
+ "field separator", "print counts with custom separator", ","),
+ OPT_END()
+ };
+
+ perf_exe(__daemon.perf, sizeof(__daemon.perf));
+ __daemon.out = stdout;
+
+ argc = parse_options(argc, argv, daemon_options, daemon_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (argc) {
+ if (!strcmp(argv[0], "start"))
+ return __cmd_start(&__daemon, daemon_options, argc, argv);
+ if (!strcmp(argv[0], "signal"))
+ return __cmd_signal(&__daemon, daemon_options, argc, argv);
+ else if (!strcmp(argv[0], "stop"))
+ return __cmd_stop(&__daemon, daemon_options, argc, argv);
+ else if (!strcmp(argv[0], "ping"))
+ return __cmd_ping(&__daemon, daemon_options, argc, argv);
+
+ pr_err("failed: unknown command '%s'\n", argv[0]);
+ return -1;
+ }
+
+ if (setup_config(&__daemon)) {
+ pr_err("failed: config not found\n");
+ return -1;
+ }
+
+ return send_cmd_list(&__daemon);
+}
diff --git a/tools/perf/builtin-data.c b/tools/perf/builtin-data.c
index ca2fb44874e4..ce51cbf6dc97 100644
--- a/tools/perf/builtin-data.c
+++ b/tools/perf/builtin-data.c
@@ -3,11 +3,10 @@
#include <stdio.h>
#include <string.h>
#include "builtin.h"
-#include "perf.h"
#include "debug.h"
#include <subcmd/parse-options.h>
#include "data-convert.h"
-#include "data-convert-bt.h"
+#include "util/util.h"
typedef int (*data_cmd_fn_t)(int argc, const char **argv);
@@ -22,72 +21,70 @@ static struct data_cmd data_cmds[];
#define for_each_cmd(cmd) \
for (cmd = data_cmds; cmd && cmd->name; cmd++)
-static const struct option data_options[] = {
- OPT_END()
-};
-
static const char * const data_subcommands[] = { "convert", NULL };
static const char *data_usage[] = {
- "perf data [<common options>] <command> [<options>]",
+ "perf data convert [<options>]",
NULL
};
-static void print_usage(void)
-{
- struct data_cmd *cmd;
-
- printf("Usage:\n");
- printf("\t%s\n\n", data_usage[0]);
- printf("\tAvailable commands:\n");
-
- for_each_cmd(cmd) {
- printf("\t %s\t- %s\n", cmd->name, cmd->summary);
- }
-
- printf("\n");
-}
-
-static const char * const data_convert_usage[] = {
- "perf data convert [<options>]",
- NULL
+const char *to_json;
+const char *to_ctf;
+struct perf_data_convert_opts opts = {
+ .force = false,
+ .all = false,
};
-static int cmd_data_convert(int argc, const char **argv)
-{
- const char *to_ctf = NULL;
- struct perf_data_convert_opts opts = {
- .force = false,
- .all = false,
- };
- const struct option options[] = {
+const struct option data_options[] = {
OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
+ OPT_STRING(0, "to-json", &to_json, NULL, "Convert to JSON format"),
#ifdef HAVE_LIBBABELTRACE_SUPPORT
OPT_STRING(0, "to-ctf", &to_ctf, NULL, "Convert to CTF format"),
+ OPT_BOOLEAN(0, "tod", &opts.tod, "Convert time to wall clock time"),
#endif
OPT_BOOLEAN('f', "force", &opts.force, "don't complain, do it"),
OPT_BOOLEAN(0, "all", &opts.all, "Convert all events"),
OPT_END()
};
-#ifndef HAVE_LIBBABELTRACE_SUPPORT
- pr_err("No conversion support compiled in. perf should be compiled with environment variables LIBBABELTRACE=1 and LIBBABELTRACE_DIR=/path/to/libbabeltrace/\n");
- return -1;
-#endif
+static int cmd_data_convert(int argc, const char **argv)
+{
- argc = parse_options(argc, argv, options,
- data_convert_usage, 0);
+ argc = parse_options(argc, argv, data_options,
+ data_usage, 0);
if (argc) {
- usage_with_options(data_convert_usage, options);
+ usage_with_options(data_usage, data_options);
return -1;
}
- if (to_ctf) {
+ if (to_json && to_ctf) {
+ pr_err("You cannot specify both --to-ctf and --to-json.\n");
+ return -1;
+ }
#ifdef HAVE_LIBBABELTRACE_SUPPORT
+ if (!to_json && !to_ctf) {
+ pr_err("You must specify one of --to-ctf or --to-json.\n");
+ return -1;
+ }
+#else
+ if (!to_json) {
+ pr_err("You must specify --to-json.\n");
+ return -1;
+}
+#endif
+
+ if (to_json)
+ return bt_convert__perf2json(input_name, to_json, &opts);
+
+ if (to_ctf) {
+#if defined(HAVE_LIBBABELTRACE_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
return bt_convert__perf2ctf(input_name, to_ctf, &opts);
#else
- pr_err("The libbabeltrace support is not compiled in.\n");
+ pr_err("The libbabeltrace support is not compiled in. perf should be "
+ "compiled with environment variables LIBBABELTRACE=1 and "
+ "LIBBABELTRACE_DIR=/path/to/libbabeltrace/.\n"
+ "Check also if libbtraceevent devel files are available.\n");
return -1;
#endif
}
@@ -105,14 +102,13 @@ int cmd_data(int argc, const char **argv)
struct data_cmd *cmd;
const char *cmdstr;
- /* No command specified. */
- if (argc < 2)
- goto usage;
-
argc = parse_options_subcommand(argc, argv, data_options, data_subcommands, data_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (argc < 1)
- goto usage;
+
+ if (!argc) {
+ usage_with_options(data_usage, data_options);
+ return -1;
+ }
cmdstr = argv[0];
@@ -124,7 +120,6 @@ int cmd_data(int argc, const char **argv)
}
pr_err("Unknown command: %s\n", cmdstr);
-usage:
- print_usage();
+ usage_with_options(data_usage, data_options);
return -1;
}
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index c94a002f295e..dbb0562d6a4f 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -6,7 +6,6 @@
* DSOs and symbol information, sort them and produce a diff.
*/
#include "builtin.h"
-#include "perf.h"
#include "util/debug.h"
#include "util/event.h"
@@ -25,6 +24,8 @@
#include "util/map.h"
#include "util/spark.h"
#include "util/block-info.h"
+#include "util/stream.h"
+#include "util/util.h"
#include <linux/err.h>
#include <linux/zalloc.h>
#include <subcmd/pager.h>
@@ -42,6 +43,7 @@ struct perf_diff {
int range_size;
int range_num;
bool has_br_stack;
+ bool stream;
};
/* Diff command specific HPP columns. */
@@ -72,6 +74,7 @@ struct data__file {
struct perf_data data;
int idx;
struct hists *hists;
+ struct evlist_streams *evlist_streams;
struct diff_hpp_fmt fmt[PERF_HPP_DIFF__MAX_INDEX];
};
@@ -106,6 +109,7 @@ enum {
COMPUTE_DELTA_ABS,
COMPUTE_CYCLES,
COMPUTE_MAX,
+ COMPUTE_STREAM, /* After COMPUTE_MAX to avoid use current compute arrays */
};
const char *compute_names[COMPUTE_MAX] = {
@@ -393,6 +397,11 @@ static int diff__process_sample_event(struct perf_tool *tool,
struct perf_diff *pdiff = container_of(tool, struct perf_diff, tool);
struct addr_location al;
struct hists *hists = evsel__hists(evsel);
+ struct hist_entry_iter iter = {
+ .evsel = evsel,
+ .sample = sample,
+ .ops = &hist_iter_normal,
+ };
int ret = -1;
if (perf_time__ranges_skip_sample(pdiff->ptime_range, pdiff->range_num,
@@ -411,16 +420,10 @@ static int diff__process_sample_event(struct perf_tool *tool,
goto out_put;
}
- if (compute != COMPUTE_CYCLES) {
- if (!hists__add_entry(hists, &al, NULL, NULL, NULL, sample,
- true)) {
- pr_warning("problem incrementing symbol period, "
- "skipping event\n");
- goto out_put;
- }
- } else {
+ switch (compute) {
+ case COMPUTE_CYCLES:
if (!hists__add_entry_ops(hists, &block_hist_ops, &al, NULL,
- NULL, NULL, sample, true)) {
+ NULL, NULL, NULL, sample, true)) {
pr_warning("problem incrementing symbol period, "
"skipping event\n");
goto out_put;
@@ -428,6 +431,23 @@ static int diff__process_sample_event(struct perf_tool *tool,
hist__account_cycles(sample->branch_stack, &al, sample, false,
NULL);
+ break;
+
+ case COMPUTE_STREAM:
+ if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
+ NULL)) {
+ pr_debug("problem adding hist entry, skipping event\n");
+ goto out_put;
+ }
+ break;
+
+ default:
+ if (!hists__add_entry(hists, &al, NULL, NULL, NULL, NULL, sample,
+ true)) {
+ pr_warning("problem incrementing symbol period, "
+ "skipping event\n");
+ goto out_put;
+ }
}
/*
@@ -467,14 +487,14 @@ static struct evsel *evsel_match(struct evsel *evsel,
struct evsel *e;
evlist__for_each_entry(evlist, e) {
- if (perf_evsel__match2(evsel, e))
+ if (evsel__match2(evsel, e))
return e;
}
return NULL;
}
-static void perf_evlist__collapse_resort(struct evlist *evlist)
+static void evlist__collapse_resort(struct evlist *evlist)
{
struct evsel *evsel;
@@ -981,7 +1001,7 @@ static void data_process(void)
if (!quiet) {
fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n",
- perf_evsel__name(evsel_base));
+ evsel__name(evsel_base));
}
first = false;
@@ -990,16 +1010,61 @@ static void data_process(void)
data__fprintf();
/* Don't sort callchain for perf diff */
- perf_evsel__reset_sample_bit(evsel_base, CALLCHAIN);
+ evsel__reset_sample_bit(evsel_base, CALLCHAIN);
hists__process(hists_base);
}
}
+static int process_base_stream(struct data__file *data_base,
+ struct data__file *data_pair,
+ const char *title __maybe_unused)
+{
+ struct evlist *evlist_base = data_base->session->evlist;
+ struct evlist *evlist_pair = data_pair->session->evlist;
+ struct evsel *evsel_base, *evsel_pair;
+ struct evsel_streams *es_base, *es_pair;
+
+ evlist__for_each_entry(evlist_base, evsel_base) {
+ evsel_pair = evsel_match(evsel_base, evlist_pair);
+ if (!evsel_pair)
+ continue;
+
+ es_base = evsel_streams__entry(data_base->evlist_streams,
+ evsel_base->core.idx);
+ if (!es_base)
+ return -1;
+
+ es_pair = evsel_streams__entry(data_pair->evlist_streams,
+ evsel_pair->core.idx);
+ if (!es_pair)
+ return -1;
+
+ evsel_streams__match(es_base, es_pair);
+ evsel_streams__report(es_base, es_pair);
+ }
+
+ return 0;
+}
+
+static void stream_process(void)
+{
+ /*
+ * Stream comparison only supports two data files.
+ * perf.data.old and perf.data. data__files[0] is perf.data.old,
+ * data__files[1] is perf.data.
+ */
+ process_base_stream(&data__files[0], &data__files[1],
+ "# Output based on old perf data:\n#\n");
+}
+
static void data__free(struct data__file *d)
{
int col;
+ if (d->evlist_streams)
+ evlist_streams__delete(d->evlist_streams);
+
for (col = 0; col < PERF_HPP_DIFF__MAX_INDEX; col++) {
struct diff_hpp_fmt *fmt = &d->fmt[col];
@@ -1091,7 +1156,7 @@ static int check_file_brstack(void)
int i;
data__for_each_file(i, d) {
- d->session = perf_session__new(&d->data, false, &pdiff.tool);
+ d->session = perf_session__new(&d->data, &pdiff.tool);
if (IS_ERR(d->session)) {
pr_err("Failed to open %s\n", d->data.path);
return PTR_ERR(d->session);
@@ -1123,7 +1188,7 @@ static int __cmd_diff(void)
ret = -EINVAL;
data__for_each_file(i, d) {
- d->session = perf_session__new(&d->data, false, &pdiff.tool);
+ d->session = perf_session__new(&d->data, &pdiff.tool);
if (IS_ERR(d->session)) {
ret = PTR_ERR(d->session);
pr_err("Failed to open %s\n", d->data.path);
@@ -1149,17 +1214,30 @@ static int __cmd_diff(void)
goto out_delete;
}
- perf_evlist__collapse_resort(d->session->evlist);
+ evlist__collapse_resort(d->session->evlist);
if (pdiff.ptime_range)
zfree(&pdiff.ptime_range);
+
+ if (compute == COMPUTE_STREAM) {
+ d->evlist_streams = evlist__create_streams(
+ d->session->evlist, 5);
+ if (!d->evlist_streams) {
+ ret = -ENOMEM;
+ goto out_delete;
+ }
+ }
}
- data_process();
+ if (compute == COMPUTE_STREAM)
+ stream_process();
+ else
+ data_process();
out_delete:
data__for_each_file(i, d) {
- perf_session__delete(d->session);
+ if (!IS_ERR(d->session))
+ perf_session__delete(d->session);
data__free(d);
}
@@ -1182,7 +1260,7 @@ static const char * const diff_usage[] = {
static const struct option options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
+ OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
OPT_BOOLEAN('b', "baseline-only", &show_baseline_only,
"Show only items with match in baseline"),
OPT_CALLBACK('c', "compute", &compute,
@@ -1228,6 +1306,8 @@ static const struct option options[] = {
"only consider symbols in these pids"),
OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
"only consider symbols in these tids"),
+ OPT_BOOLEAN(0, "stream", &pdiff.stream,
+ "Enable hot streams comparison."),
OPT_END()
};
@@ -1562,7 +1642,7 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
default:
BUG_ON(1);
- };
+ }
}
static void
@@ -1716,7 +1796,7 @@ static int ui_init(void)
data__for_each_file(i, d) {
/*
- * Baseline or compute realted columns:
+ * Baseline or compute related columns:
*
* PERF_HPP_DIFF__BASELINE
* PERF_HPP_DIFF__DELTA
@@ -1887,6 +1967,9 @@ int cmd_diff(int argc, const char **argv)
if (cycles_hist && (compute != COMPUTE_CYCLES))
usage_with_options(diff_usage, options);
+ if (pdiff.stream)
+ compute = COMPUTE_STREAM;
+
symbol__annotation_init();
if (symbol__init(NULL) < 0)
@@ -1898,13 +1981,26 @@ int cmd_diff(int argc, const char **argv)
if (check_file_brstack() < 0)
return -1;
- if (compute == COMPUTE_CYCLES && !pdiff.has_br_stack)
+ if ((compute == COMPUTE_CYCLES || compute == COMPUTE_STREAM)
+ && !pdiff.has_br_stack) {
return -1;
+ }
- if (ui_init() < 0)
- return -1;
+ if (compute == COMPUTE_STREAM) {
+ symbol_conf.show_branchflag_count = true;
+ symbol_conf.disable_add2line_warn = true;
+ callchain_param.mode = CHAIN_FLAT;
+ callchain_param.key = CCKEY_SRCLINE;
+ callchain_param.branch_callstack = 1;
+ symbol_conf.use_callchain = true;
+ callchain_register_param(&callchain_param);
+ sort_order = "srcline,symbol,dso";
+ } else {
+ if (ui_init() < 0)
+ return -1;
- sort__mode = SORT_MODE__DIFF;
+ sort__mode = SORT_MODE__DIFF;
+ }
if (setup_sorting(NULL) < 0)
usage_with_options(diff_usage, options);
diff --git a/tools/perf/builtin-evlist.c b/tools/perf/builtin-evlist.c
index 440501994931..7117656939e7 100644
--- a/tools/perf/builtin-evlist.c
+++ b/tools/perf/builtin-evlist.c
@@ -7,7 +7,6 @@
#include <linux/list.h>
-#include "perf.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/evsel_fprintf.h"
@@ -17,6 +16,15 @@
#include "util/data.h"
#include "util/debug.h"
#include <linux/err.h>
+#include "util/tool.h"
+#include "util/util.h"
+
+static int process_header_feature(struct perf_session *session __maybe_unused,
+ union perf_event *event __maybe_unused)
+{
+ session_done = 1;
+ return 0;
+}
static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
{
@@ -27,14 +35,22 @@ static int __cmd_evlist(const char *file_name, struct perf_attr_details *details
.mode = PERF_DATA_MODE_READ,
.force = details->force,
};
+ struct perf_tool tool = {
+ /* only needed for pipe mode */
+ .attr = perf_event__process_attr,
+ .feature = process_header_feature,
+ };
bool has_tracepoint = false;
- session = perf_session__new(&data, 0, NULL);
+ session = perf_session__new(&data, &tool);
if (IS_ERR(session))
return PTR_ERR(session);
+ if (data.is_pipe)
+ perf_session__process_events(session);
+
evlist__for_each_entry(session->evlist, pos) {
- perf_evsel__fprintf(pos, details, stdout);
+ evsel__fprintf(pos, details, stdout);
if (pos->core.attr.type == PERF_TYPE_TRACEPOINT)
has_tracepoint = true;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index d5adc417a4ca..810e3376c7d6 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -3,6 +3,7 @@
* builtin-ftrace.c
*
* Copyright (c) 2013 LG Electronics, Namhyung Kim <namhyung@kernel.org>
+ * Copyright (c) 2020 Changbin Du <changbin.du@gmail.com>, significant enhancement.
*/
#include "builtin.h"
@@ -12,7 +13,9 @@
#include <signal.h>
#include <stdlib.h>
#include <fcntl.h>
+#include <math.h>
#include <poll.h>
+#include <ctype.h>
#include <linux/capability.h>
#include <linux/string.h>
@@ -24,28 +27,17 @@
#include "target.h"
#include "cpumap.h"
#include "thread_map.h"
+#include "strfilter.h"
#include "util/cap.h"
#include "util/config.h"
+#include "util/ftrace.h"
+#include "util/units.h"
+#include "util/parse-sublevel-options.h"
#define DEFAULT_TRACER "function_graph"
-struct perf_ftrace {
- struct evlist *evlist;
- struct target target;
- const char *tracer;
- struct list_head filters;
- struct list_head notrace;
- struct list_head graph_funcs;
- struct list_head nograph_funcs;
- int graph_depth;
-};
-
-struct filter_entry {
- struct list_head list;
- char name[];
-};
-
-static bool done;
+static volatile sig_atomic_t workload_exec_errno;
+static volatile sig_atomic_t done;
static void sig_handler(int sig __maybe_unused)
{
@@ -53,7 +45,7 @@ static void sig_handler(int sig __maybe_unused)
}
/*
- * perf_evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
+ * evlist__prepare_workload will send a SIGUSR1 if the fork fails, since
* we asked by setting its exec_error to the function below,
* ftrace__workload_exec_failed_signal.
*
@@ -63,7 +55,7 @@ static void ftrace__workload_exec_failed_signal(int signo __maybe_unused,
siginfo_t *info __maybe_unused,
void *ucontext __maybe_unused)
{
- /* workload_exec_errno = info->si_value.sival_int; */
+ workload_exec_errno = info->si_value.sival_int;
done = true;
}
@@ -127,9 +119,119 @@ static int append_tracing_file(const char *name, const char *val)
return __write_tracing_file(name, val, true);
}
+static int read_tracing_file_to_stdout(const char *name)
+{
+ char buf[4096];
+ char *file;
+ int fd;
+ int ret = -1;
+
+ file = get_tracing_file(name);
+ if (!file) {
+ pr_debug("cannot get tracing file: %s\n", name);
+ return -1;
+ }
+
+ fd = open(file, O_RDONLY);
+ if (fd < 0) {
+ pr_debug("cannot open tracing file: %s: %s\n",
+ name, str_error_r(errno, buf, sizeof(buf)));
+ goto out;
+ }
+
+ /* read contents to stdout */
+ while (true) {
+ int n = read(fd, buf, sizeof(buf));
+ if (n == 0)
+ break;
+ else if (n < 0)
+ goto out_close;
+
+ if (fwrite(buf, n, 1, stdout) != 1)
+ goto out_close;
+ }
+ ret = 0;
+
+out_close:
+ close(fd);
+out:
+ put_tracing_file(file);
+ return ret;
+}
+
+static int read_tracing_file_by_line(const char *name,
+ void (*cb)(char *str, void *arg),
+ void *cb_arg)
+{
+ char *line = NULL;
+ size_t len = 0;
+ char *file;
+ FILE *fp;
+
+ file = get_tracing_file(name);
+ if (!file) {
+ pr_debug("cannot get tracing file: %s\n", name);
+ return -1;
+ }
+
+ fp = fopen(file, "r");
+ if (fp == NULL) {
+ pr_debug("cannot open tracing file: %s\n", name);
+ put_tracing_file(file);
+ return -1;
+ }
+
+ while (getline(&line, &len, fp) != -1) {
+ cb(line, cb_arg);
+ }
+
+ if (line)
+ free(line);
+
+ fclose(fp);
+ put_tracing_file(file);
+ return 0;
+}
+
+static int write_tracing_file_int(const char *name, int value)
+{
+ char buf[16];
+
+ snprintf(buf, sizeof(buf), "%d", value);
+ if (write_tracing_file(name, buf) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int write_tracing_option_file(const char *name, const char *val)
+{
+ char *file;
+ int ret;
+
+ if (asprintf(&file, "options/%s", name) < 0)
+ return -1;
+
+ ret = __write_tracing_file(file, val, false);
+ free(file);
+ return ret;
+}
+
static int reset_tracing_cpu(void);
static void reset_tracing_filters(void);
+static void reset_tracing_options(struct perf_ftrace *ftrace __maybe_unused)
+{
+ write_tracing_option_file("function-fork", "0");
+ write_tracing_option_file("func_stack_trace", "0");
+ write_tracing_option_file("sleep-time", "1");
+ write_tracing_option_file("funcgraph-irqs", "1");
+ write_tracing_option_file("funcgraph-proc", "0");
+ write_tracing_option_file("funcgraph-abstime", "0");
+ write_tracing_option_file("latency-format", "0");
+ write_tracing_option_file("irq-info", "0");
+}
+
static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
{
if (write_tracing_file("tracing_on", "0") < 0)
@@ -147,7 +249,11 @@ static int reset_tracing_files(struct perf_ftrace *ftrace __maybe_unused)
if (write_tracing_file("max_graph_depth", "0") < 0)
return -1;
+ if (write_tracing_file("tracing_thresh", "0") < 0)
+ return -1;
+
reset_tracing_filters();
+ reset_tracing_options(ftrace);
return 0;
}
@@ -161,7 +267,7 @@ static int set_tracing_pid(struct perf_ftrace *ftrace)
for (i = 0; i < perf_thread_map__nr(ftrace->evlist->core.threads); i++) {
scnprintf(buf, sizeof(buf), "%d",
- ftrace->evlist->core.threads->map[i]);
+ perf_thread_map__pid(ftrace->evlist->core.threads, i));
if (append_tracing_file("set_ftrace_pid", buf) < 0)
return -1;
}
@@ -175,7 +281,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
int ret;
int last_cpu;
- last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
+ last_cpu = perf_cpu_map__cpu(cpumap, perf_cpu_map__nr(cpumap) - 1).cpu;
mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
@@ -195,7 +301,7 @@ static int set_tracing_cpumask(struct perf_cpu_map *cpumap)
static int set_tracing_cpu(struct perf_ftrace *ftrace)
{
- struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
+ struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
if (!target__has_cpu(&ftrace->target))
return 0;
@@ -203,6 +309,28 @@ static int set_tracing_cpu(struct perf_ftrace *ftrace)
return set_tracing_cpumask(cpumap);
}
+static int set_tracing_func_stack_trace(struct perf_ftrace *ftrace)
+{
+ if (!ftrace->func_stack_trace)
+ return 0;
+
+ if (write_tracing_option_file("func_stack_trace", "1") < 0)
+ return -1;
+
+ return 0;
+}
+
+static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace)
+{
+ if (!ftrace->func_irq_info)
+ return 0;
+
+ if (write_tracing_option_file("irq-info", "1") < 0)
+ return -1;
+
+ return 0;
+}
+
static int reset_tracing_cpu(void)
{
struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL);
@@ -257,8 +385,6 @@ static void reset_tracing_filters(void)
static int set_tracing_depth(struct perf_ftrace *ftrace)
{
- char buf[16];
-
if (ftrace->graph_depth == 0)
return 0;
@@ -267,15 +393,174 @@ static int set_tracing_depth(struct perf_ftrace *ftrace)
return -1;
}
- snprintf(buf, sizeof(buf), "%d", ftrace->graph_depth);
+ if (write_tracing_file_int("max_graph_depth", ftrace->graph_depth) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int set_tracing_percpu_buffer_size(struct perf_ftrace *ftrace)
+{
+ int ret;
+
+ if (ftrace->percpu_buffer_size == 0)
+ return 0;
+
+ ret = write_tracing_file_int("buffer_size_kb",
+ ftrace->percpu_buffer_size / 1024);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int set_tracing_trace_inherit(struct perf_ftrace *ftrace)
+{
+ if (!ftrace->inherit)
+ return 0;
- if (write_tracing_file("max_graph_depth", buf) < 0)
+ if (write_tracing_option_file("function-fork", "1") < 0)
return -1;
return 0;
}
-static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
+static int set_tracing_sleep_time(struct perf_ftrace *ftrace)
+{
+ if (!ftrace->graph_nosleep_time)
+ return 0;
+
+ if (write_tracing_option_file("sleep-time", "0") < 0)
+ return -1;
+
+ return 0;
+}
+
+static int set_tracing_funcgraph_irqs(struct perf_ftrace *ftrace)
+{
+ if (!ftrace->graph_noirqs)
+ return 0;
+
+ if (write_tracing_option_file("funcgraph-irqs", "0") < 0)
+ return -1;
+
+ return 0;
+}
+
+static int set_tracing_funcgraph_verbose(struct perf_ftrace *ftrace)
+{
+ if (!ftrace->graph_verbose)
+ return 0;
+
+ if (write_tracing_option_file("funcgraph-proc", "1") < 0)
+ return -1;
+
+ if (write_tracing_option_file("funcgraph-abstime", "1") < 0)
+ return -1;
+
+ if (write_tracing_option_file("latency-format", "1") < 0)
+ return -1;
+
+ return 0;
+}
+
+static int set_tracing_thresh(struct perf_ftrace *ftrace)
+{
+ int ret;
+
+ if (ftrace->graph_thresh == 0)
+ return 0;
+
+ ret = write_tracing_file_int("tracing_thresh", ftrace->graph_thresh);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int set_tracing_options(struct perf_ftrace *ftrace)
+{
+ if (set_tracing_pid(ftrace) < 0) {
+ pr_err("failed to set ftrace pid\n");
+ return -1;
+ }
+
+ if (set_tracing_cpu(ftrace) < 0) {
+ pr_err("failed to set tracing cpumask\n");
+ return -1;
+ }
+
+ if (set_tracing_func_stack_trace(ftrace) < 0) {
+ pr_err("failed to set tracing option func_stack_trace\n");
+ return -1;
+ }
+
+ if (set_tracing_func_irqinfo(ftrace) < 0) {
+ pr_err("failed to set tracing option irq-info\n");
+ return -1;
+ }
+
+ if (set_tracing_filters(ftrace) < 0) {
+ pr_err("failed to set tracing filters\n");
+ return -1;
+ }
+
+ if (set_tracing_depth(ftrace) < 0) {
+ pr_err("failed to set graph depth\n");
+ return -1;
+ }
+
+ if (set_tracing_percpu_buffer_size(ftrace) < 0) {
+ pr_err("failed to set tracing per-cpu buffer size\n");
+ return -1;
+ }
+
+ if (set_tracing_trace_inherit(ftrace) < 0) {
+ pr_err("failed to set tracing option function-fork\n");
+ return -1;
+ }
+
+ if (set_tracing_sleep_time(ftrace) < 0) {
+ pr_err("failed to set tracing option sleep-time\n");
+ return -1;
+ }
+
+ if (set_tracing_funcgraph_irqs(ftrace) < 0) {
+ pr_err("failed to set tracing option funcgraph-irqs\n");
+ return -1;
+ }
+
+ if (set_tracing_funcgraph_verbose(ftrace) < 0) {
+ pr_err("failed to set tracing option funcgraph-proc/funcgraph-abstime\n");
+ return -1;
+ }
+
+ if (set_tracing_thresh(ftrace) < 0) {
+ pr_err("failed to set tracing thresh\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void select_tracer(struct perf_ftrace *ftrace)
+{
+ bool graph = !list_empty(&ftrace->graph_funcs) ||
+ !list_empty(&ftrace->nograph_funcs);
+ bool func = !list_empty(&ftrace->filters) ||
+ !list_empty(&ftrace->notrace);
+
+ /* The function_graph has priority over function tracer. */
+ if (graph)
+ ftrace->tracer = "function_graph";
+ else if (func)
+ ftrace->tracer = "function";
+ /* Otherwise, the default tracer is used. */
+
+ pr_debug("%s tracer is used\n", ftrace->tracer);
+}
+
+static int __cmd_ftrace(struct perf_ftrace *ftrace)
{
char *trace_file;
int trace_fd;
@@ -284,10 +569,11 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
.events = POLLIN,
};
- if (!perf_cap__capable(CAP_SYS_ADMIN)) {
+ if (!(perf_cap__capable(CAP_PERFMON) ||
+ perf_cap__capable(CAP_SYS_ADMIN))) {
pr_err("ftrace only works for %s!\n",
#ifdef HAVE_LIBCAP_SUPPORT
- "users with the SYS_ADMIN capability"
+ "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
#else
"root"
#endif
@@ -295,10 +581,7 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
return -1;
}
- signal(SIGINT, sig_handler);
- signal(SIGUSR1, sig_handler);
- signal(SIGCHLD, sig_handler);
- signal(SIGPIPE, sig_handler);
+ select_tracer(ftrace);
if (reset_tracing_files(ftrace) < 0) {
pr_err("failed to reset ftrace\n");
@@ -309,31 +592,8 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
if (write_tracing_file("trace", "0") < 0)
goto out;
- if (argc && perf_evlist__prepare_workload(ftrace->evlist,
- &ftrace->target, argv, false,
- ftrace__workload_exec_failed_signal) < 0) {
- goto out;
- }
-
- if (set_tracing_pid(ftrace) < 0) {
- pr_err("failed to set ftrace pid\n");
- goto out_reset;
- }
-
- if (set_tracing_cpu(ftrace) < 0) {
- pr_err("failed to set tracing cpumask\n");
- goto out_reset;
- }
-
- if (set_tracing_filters(ftrace) < 0) {
- pr_err("failed to set tracing filters\n");
- goto out_reset;
- }
-
- if (set_tracing_depth(ftrace) < 0) {
- pr_err("failed to set graph depth\n");
+ if (set_tracing_options(ftrace) < 0)
goto out_reset;
- }
if (write_tracing_file("current_tracer", ftrace->tracer) < 0) {
pr_err("failed to set current_tracer to %s\n", ftrace->tracer);
@@ -360,12 +620,25 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
fcntl(trace_fd, F_SETFL, O_NONBLOCK);
pollfd.fd = trace_fd;
- if (write_tracing_file("tracing_on", "1") < 0) {
- pr_err("can't enable tracing\n");
- goto out_close_fd;
+ /* display column headers */
+ read_tracing_file_to_stdout("trace");
+
+ if (!ftrace->target.initial_delay) {
+ if (write_tracing_file("tracing_on", "1") < 0) {
+ pr_err("can't enable tracing\n");
+ goto out_close_fd;
+ }
}
- perf_evlist__start_workload(ftrace->evlist);
+ evlist__start_workload(ftrace->evlist);
+
+ if (ftrace->target.initial_delay > 0) {
+ usleep(ftrace->target.initial_delay * 1000);
+ if (write_tracing_file("tracing_on", "1") < 0) {
+ pr_err("can't enable tracing\n");
+ goto out_close_fd;
+ }
+ }
while (!done) {
if (poll(&pollfd, 1, -1) < 0)
@@ -382,6 +655,14 @@ static int __cmd_ftrace(struct perf_ftrace *ftrace, int argc, const char **argv)
write_tracing_file("tracing_on", "0");
+ if (workload_exec_errno) {
+ const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
+ /* flush stdout first so below error msg appears at the end. */
+ fflush(stdout);
+ pr_err("workload failed: %s\n", emsg);
+ goto out_close_fd;
+ }
+
/* read remaining buffer contents */
while (true) {
int n = read(trace_fd, buf, sizeof(buf));
@@ -396,7 +677,275 @@ out_close_fd:
out_reset:
reset_tracing_files(ftrace);
out:
- return done ? 0 : -1;
+ return (done && !workload_exec_errno) ? 0 : -1;
+}
+
+static void make_histogram(int buckets[], char *buf, size_t len, char *linebuf,
+ bool use_nsec)
+{
+ char *p, *q;
+ char *unit;
+ double num;
+ int i;
+
+ /* ensure NUL termination */
+ buf[len] = '\0';
+
+ /* handle data line by line */
+ for (p = buf; (q = strchr(p, '\n')) != NULL; p = q + 1) {
+ *q = '\0';
+ /* move it to the line buffer */
+ strcat(linebuf, p);
+
+ /*
+ * parse trace output to get function duration like in
+ *
+ * # tracer: function_graph
+ * #
+ * # CPU DURATION FUNCTION CALLS
+ * # | | | | | | |
+ * 1) + 10.291 us | do_filp_open();
+ * 1) 4.889 us | do_filp_open();
+ * 1) 6.086 us | do_filp_open();
+ *
+ */
+ if (linebuf[0] == '#')
+ goto next;
+
+ /* ignore CPU */
+ p = strchr(linebuf, ')');
+ if (p == NULL)
+ p = linebuf;
+
+ while (*p && !isdigit(*p) && (*p != '|'))
+ p++;
+
+ /* no duration */
+ if (*p == '\0' || *p == '|')
+ goto next;
+
+ num = strtod(p, &unit);
+ if (!unit || strncmp(unit, " us", 3))
+ goto next;
+
+ if (use_nsec)
+ num *= 1000;
+
+ i = log2(num);
+ if (i < 0)
+ i = 0;
+ if (i >= NUM_BUCKET)
+ i = NUM_BUCKET - 1;
+
+ buckets[i]++;
+
+next:
+ /* empty the line buffer for the next output */
+ linebuf[0] = '\0';
+ }
+
+ /* preserve any remaining output (before newline) */
+ strcat(linebuf, p);
+}
+
+static void display_histogram(int buckets[], bool use_nsec)
+{
+ int i;
+ int total = 0;
+ int bar_total = 46; /* to fit in 80 column */
+ char bar[] = "###############################################";
+ int bar_len;
+
+ for (i = 0; i < NUM_BUCKET; i++)
+ total += buckets[i];
+
+ if (total == 0) {
+ printf("No data found\n");
+ return;
+ }
+
+ printf("# %14s | %10s | %-*s |\n",
+ " DURATION ", "COUNT", bar_total, "GRAPH");
+
+ bar_len = buckets[0] * bar_total / total;
+ printf(" %4d - %-4d %s | %10d | %.*s%*s |\n",
+ 0, 1, "us", buckets[0], bar_len, bar, bar_total - bar_len, "");
+
+ for (i = 1; i < NUM_BUCKET - 1; i++) {
+ int start = (1 << (i - 1));
+ int stop = 1 << i;
+ const char *unit = use_nsec ? "ns" : "us";
+
+ if (start >= 1024) {
+ start >>= 10;
+ stop >>= 10;
+ unit = use_nsec ? "us" : "ms";
+ }
+ bar_len = buckets[i] * bar_total / total;
+ printf(" %4d - %-4d %s | %10d | %.*s%*s |\n",
+ start, stop, unit, buckets[i], bar_len, bar,
+ bar_total - bar_len, "");
+ }
+
+ bar_len = buckets[NUM_BUCKET - 1] * bar_total / total;
+ printf(" %4d - %-4s %s | %10d | %.*s%*s |\n",
+ 1, "...", use_nsec ? "ms" : " s", buckets[NUM_BUCKET - 1],
+ bar_len, bar, bar_total - bar_len, "");
+
+}
+
+static int prepare_func_latency(struct perf_ftrace *ftrace)
+{
+ char *trace_file;
+ int fd;
+
+ if (ftrace->target.use_bpf)
+ return perf_ftrace__latency_prepare_bpf(ftrace);
+
+ if (reset_tracing_files(ftrace) < 0) {
+ pr_err("failed to reset ftrace\n");
+ return -1;
+ }
+
+ /* reset ftrace buffer */
+ if (write_tracing_file("trace", "0") < 0)
+ return -1;
+
+ if (set_tracing_options(ftrace) < 0)
+ return -1;
+
+ /* force to use the function_graph tracer to track duration */
+ if (write_tracing_file("current_tracer", "function_graph") < 0) {
+ pr_err("failed to set current_tracer to function_graph\n");
+ return -1;
+ }
+
+ trace_file = get_tracing_file("trace_pipe");
+ if (!trace_file) {
+ pr_err("failed to open trace_pipe\n");
+ return -1;
+ }
+
+ fd = open(trace_file, O_RDONLY);
+ if (fd < 0)
+ pr_err("failed to open trace_pipe\n");
+
+ put_tracing_file(trace_file);
+ return fd;
+}
+
+static int start_func_latency(struct perf_ftrace *ftrace)
+{
+ if (ftrace->target.use_bpf)
+ return perf_ftrace__latency_start_bpf(ftrace);
+
+ if (write_tracing_file("tracing_on", "1") < 0) {
+ pr_err("can't enable tracing\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int stop_func_latency(struct perf_ftrace *ftrace)
+{
+ if (ftrace->target.use_bpf)
+ return perf_ftrace__latency_stop_bpf(ftrace);
+
+ write_tracing_file("tracing_on", "0");
+ return 0;
+}
+
+static int read_func_latency(struct perf_ftrace *ftrace, int buckets[])
+{
+ if (ftrace->target.use_bpf)
+ return perf_ftrace__latency_read_bpf(ftrace, buckets);
+
+ return 0;
+}
+
+static int cleanup_func_latency(struct perf_ftrace *ftrace)
+{
+ if (ftrace->target.use_bpf)
+ return perf_ftrace__latency_cleanup_bpf(ftrace);
+
+ reset_tracing_files(ftrace);
+ return 0;
+}
+
+static int __cmd_latency(struct perf_ftrace *ftrace)
+{
+ int trace_fd;
+ char buf[4096];
+ char line[256];
+ struct pollfd pollfd = {
+ .events = POLLIN,
+ };
+ int buckets[NUM_BUCKET] = { };
+
+ if (!(perf_cap__capable(CAP_PERFMON) ||
+ perf_cap__capable(CAP_SYS_ADMIN))) {
+ pr_err("ftrace only works for %s!\n",
+#ifdef HAVE_LIBCAP_SUPPORT
+ "users with the CAP_PERFMON or CAP_SYS_ADMIN capability"
+#else
+ "root"
+#endif
+ );
+ return -1;
+ }
+
+ trace_fd = prepare_func_latency(ftrace);
+ if (trace_fd < 0)
+ goto out;
+
+ fcntl(trace_fd, F_SETFL, O_NONBLOCK);
+ pollfd.fd = trace_fd;
+
+ if (start_func_latency(ftrace) < 0)
+ goto out;
+
+ evlist__start_workload(ftrace->evlist);
+
+ line[0] = '\0';
+ while (!done) {
+ if (poll(&pollfd, 1, -1) < 0)
+ break;
+
+ if (pollfd.revents & POLLIN) {
+ int n = read(trace_fd, buf, sizeof(buf) - 1);
+ if (n < 0)
+ break;
+
+ make_histogram(buckets, buf, n, line, ftrace->use_nsec);
+ }
+ }
+
+ stop_func_latency(ftrace);
+
+ if (workload_exec_errno) {
+ const char *emsg = str_error_r(workload_exec_errno, buf, sizeof(buf));
+ pr_err("workload failed: %s\n", emsg);
+ goto out;
+ }
+
+ /* read remaining buffer contents */
+ while (!ftrace->target.use_bpf) {
+ int n = read(trace_fd, buf, sizeof(buf) - 1);
+ if (n <= 0)
+ break;
+ make_histogram(buckets, buf, n, line, ftrace->use_nsec);
+ }
+
+ read_func_latency(ftrace, buckets);
+
+ display_histogram(buckets, ftrace->use_nsec);
+
+out:
+ close(trace_fd);
+ cleanup_func_latency(ftrace);
+
+ return (done && !workload_exec_errno) ? 0 : -1;
}
static int perf_ftrace_config(const char *var, const char *value, void *cb)
@@ -419,6 +968,46 @@ static int perf_ftrace_config(const char *var, const char *value, void *cb)
return -1;
}
+static void list_function_cb(char *str, void *arg)
+{
+ struct strfilter *filter = (struct strfilter *)arg;
+
+ if (strfilter__compare(filter, str))
+ printf("%s", str);
+}
+
+static int opt_list_avail_functions(const struct option *opt __maybe_unused,
+ const char *str, int unset)
+{
+ struct strfilter *filter;
+ const char *err = NULL;
+ int ret;
+
+ if (unset || !str)
+ return -1;
+
+ filter = strfilter__new(str, &err);
+ if (!filter)
+ return err ? -EINVAL : -ENOMEM;
+
+ ret = strfilter__or(filter, str, &err);
+ if (ret == -EINVAL) {
+ pr_err("Filter parse error at %td.\n", err - str + 1);
+ pr_err("Source: \"%s\"\n", str);
+ pr_err(" %*c\n", (int)(err - str + 1), '^');
+ strfilter__delete(filter);
+ return ret;
+ }
+
+ ret = read_tracing_file_by_line("available_filter_functions",
+ list_function_cb, filter);
+ strfilter__delete(filter);
+ if (ret < 0)
+ return ret;
+
+ exit(0);
+}
+
static int parse_filter_func(const struct option *opt, const char *str,
int unset __maybe_unused)
{
@@ -445,55 +1034,223 @@ static void delete_filter_func(struct list_head *head)
}
}
+static int parse_buffer_size(const struct option *opt,
+ const char *str, int unset)
+{
+ unsigned long *s = (unsigned long *)opt->value;
+ static struct parse_tag tags_size[] = {
+ { .tag = 'B', .mult = 1 },
+ { .tag = 'K', .mult = 1 << 10 },
+ { .tag = 'M', .mult = 1 << 20 },
+ { .tag = 'G', .mult = 1 << 30 },
+ { .tag = 0 },
+ };
+ unsigned long val;
+
+ if (unset) {
+ *s = 0;
+ return 0;
+ }
+
+ val = parse_tag_value(str, tags_size);
+ if (val != (unsigned long) -1) {
+ if (val < 1024) {
+ pr_err("buffer size too small, must larger than 1KB.");
+ return -1;
+ }
+ *s = val;
+ return 0;
+ }
+
+ return -1;
+}
+
+static int parse_func_tracer_opts(const struct option *opt,
+ const char *str, int unset)
+{
+ int ret;
+ struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
+ struct sublevel_option func_tracer_opts[] = {
+ { .name = "call-graph", .value_ptr = &ftrace->func_stack_trace },
+ { .name = "irq-info", .value_ptr = &ftrace->func_irq_info },
+ { .name = NULL, }
+ };
+
+ if (unset)
+ return 0;
+
+ ret = perf_parse_sublevel_options(str, func_tracer_opts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int parse_graph_tracer_opts(const struct option *opt,
+ const char *str, int unset)
+{
+ int ret;
+ struct perf_ftrace *ftrace = (struct perf_ftrace *) opt->value;
+ struct sublevel_option graph_tracer_opts[] = {
+ { .name = "nosleep-time", .value_ptr = &ftrace->graph_nosleep_time },
+ { .name = "noirqs", .value_ptr = &ftrace->graph_noirqs },
+ { .name = "verbose", .value_ptr = &ftrace->graph_verbose },
+ { .name = "thresh", .value_ptr = &ftrace->graph_thresh },
+ { .name = "depth", .value_ptr = &ftrace->graph_depth },
+ { .name = NULL, }
+ };
+
+ if (unset)
+ return 0;
+
+ ret = perf_parse_sublevel_options(str, graph_tracer_opts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+enum perf_ftrace_subcommand {
+ PERF_FTRACE_NONE,
+ PERF_FTRACE_TRACE,
+ PERF_FTRACE_LATENCY,
+};
+
int cmd_ftrace(int argc, const char **argv)
{
int ret;
+ int (*cmd_func)(struct perf_ftrace *) = NULL;
struct perf_ftrace ftrace = {
.tracer = DEFAULT_TRACER,
.target = { .uid = UINT_MAX, },
};
- const char * const ftrace_usage[] = {
- "perf ftrace [<options>] [<command>]",
- "perf ftrace [<options>] -- <command> [<options>]",
- NULL
- };
- const struct option ftrace_options[] = {
- OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
- "tracer to use: function_graph(default) or function"),
+ const struct option common_options[] = {
OPT_STRING('p', "pid", &ftrace.target.pid, "pid",
- "trace on existing process id"),
+ "Trace on existing process id"),
+ /* TODO: Add short option -t after -t/--tracer can be removed. */
+ OPT_STRING(0, "tid", &ftrace.target.tid, "tid",
+ "Trace on existing thread id (exclusive to --pid)"),
OPT_INCR('v', "verbose", &verbose,
- "be more verbose"),
+ "Be more verbose"),
OPT_BOOLEAN('a', "all-cpus", &ftrace.target.system_wide,
- "system-wide collection from all CPUs"),
+ "System-wide collection from all CPUs"),
OPT_STRING('C', "cpu", &ftrace.target.cpu_list, "cpu",
- "list of cpus to monitor"),
+ "List of cpus to monitor"),
+ OPT_END()
+ };
+ const struct option ftrace_options[] = {
+ OPT_STRING('t', "tracer", &ftrace.tracer, "tracer",
+ "Tracer to use: function_graph(default) or function"),
+ OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
+ "Show available functions to filter",
+ opt_list_avail_functions, "*"),
OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
- "trace given functions only", parse_filter_func),
+ "Trace given functions using function tracer",
+ parse_filter_func),
OPT_CALLBACK('N', "notrace-funcs", &ftrace.notrace, "func",
- "do not trace given functions", parse_filter_func),
+ "Do not trace given functions", parse_filter_func),
+ OPT_CALLBACK(0, "func-opts", &ftrace, "options",
+ "Function tracer options, available options: call-graph,irq-info",
+ parse_func_tracer_opts),
OPT_CALLBACK('G', "graph-funcs", &ftrace.graph_funcs, "func",
- "Set graph filter on given functions", parse_filter_func),
+ "Trace given functions using function_graph tracer",
+ parse_filter_func),
OPT_CALLBACK('g', "nograph-funcs", &ftrace.nograph_funcs, "func",
"Set nograph filter on given functions", parse_filter_func),
- OPT_INTEGER('D', "graph-depth", &ftrace.graph_depth,
- "Max depth for function graph tracer"),
- OPT_END()
+ OPT_CALLBACK(0, "graph-opts", &ftrace, "options",
+ "Graph tracer options, available options: nosleep-time,noirqs,verbose,thresh=<n>,depth=<n>",
+ parse_graph_tracer_opts),
+ OPT_CALLBACK('m', "buffer-size", &ftrace.percpu_buffer_size, "size",
+ "Size of per cpu buffer, needs to use a B, K, M or G suffix.", parse_buffer_size),
+ OPT_BOOLEAN(0, "inherit", &ftrace.inherit,
+ "Trace children processes"),
+ OPT_INTEGER('D', "delay", &ftrace.target.initial_delay,
+ "Number of milliseconds to wait before starting tracing after program start"),
+ OPT_PARENT(common_options),
+ };
+ const struct option latency_options[] = {
+ OPT_CALLBACK('T', "trace-funcs", &ftrace.filters, "func",
+ "Show latency of given function", parse_filter_func),
+#ifdef HAVE_BPF_SKEL
+ OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf,
+ "Use BPF to measure function latency"),
+#endif
+ OPT_BOOLEAN('n', "--use-nsec", &ftrace.use_nsec,
+ "Use nano-second histogram"),
+ OPT_PARENT(common_options),
+ };
+ const struct option *options = ftrace_options;
+
+ const char * const ftrace_usage[] = {
+ "perf ftrace [<options>] [<command>]",
+ "perf ftrace [<options>] -- [<command>] [<options>]",
+ "perf ftrace {trace|latency} [<options>] [<command>]",
+ "perf ftrace {trace|latency} [<options>] -- [<command>] [<options>]",
+ NULL
};
+ enum perf_ftrace_subcommand subcmd = PERF_FTRACE_NONE;
INIT_LIST_HEAD(&ftrace.filters);
INIT_LIST_HEAD(&ftrace.notrace);
INIT_LIST_HEAD(&ftrace.graph_funcs);
INIT_LIST_HEAD(&ftrace.nograph_funcs);
+ signal(SIGINT, sig_handler);
+ signal(SIGUSR1, sig_handler);
+ signal(SIGCHLD, sig_handler);
+ signal(SIGPIPE, sig_handler);
+
ret = perf_config(perf_ftrace_config, &ftrace);
if (ret < 0)
return -1;
- argc = parse_options(argc, argv, ftrace_options, ftrace_usage,
+ if (argc > 1) {
+ if (!strcmp(argv[1], "trace")) {
+ subcmd = PERF_FTRACE_TRACE;
+ } else if (!strcmp(argv[1], "latency")) {
+ subcmd = PERF_FTRACE_LATENCY;
+ options = latency_options;
+ }
+
+ if (subcmd != PERF_FTRACE_NONE) {
+ argc--;
+ argv++;
+ }
+ }
+ /* for backward compatibility */
+ if (subcmd == PERF_FTRACE_NONE)
+ subcmd = PERF_FTRACE_TRACE;
+
+ argc = parse_options(argc, argv, options, ftrace_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+ if (argc < 0) {
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+
+ /* Make system wide (-a) the default target. */
if (!argc && target__none(&ftrace.target))
- usage_with_options(ftrace_usage, ftrace_options);
+ ftrace.target.system_wide = true;
+
+ switch (subcmd) {
+ case PERF_FTRACE_TRACE:
+ cmd_func = __cmd_ftrace;
+ break;
+ case PERF_FTRACE_LATENCY:
+ if (list_empty(&ftrace.filters)) {
+ pr_err("Should provide a function to measure\n");
+ parse_options_usage(ftrace_usage, options, "T", 1);
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
+ cmd_func = __cmd_latency;
+ break;
+ case PERF_FTRACE_NONE:
+ default:
+ pr_err("Invalid subcommand\n");
+ ret = -EINVAL;
+ goto out_delete_filters;
+ }
ret = target__validate(&ftrace.target);
if (ret) {
@@ -510,11 +1267,19 @@ int cmd_ftrace(int argc, const char **argv)
goto out_delete_filters;
}
- ret = perf_evlist__create_maps(ftrace.evlist, &ftrace.target);
+ ret = evlist__create_maps(ftrace.evlist, &ftrace.target);
if (ret < 0)
goto out_delete_evlist;
- ret = __cmd_ftrace(&ftrace, argc, argv);
+ if (argc) {
+ ret = evlist__prepare_workload(ftrace.evlist, &ftrace.target,
+ argv, false,
+ ftrace__workload_exec_failed_signal);
+ if (ret < 0)
+ goto out_delete_evlist;
+ }
+
+ ret = cmd_func(&ftrace);
out_delete_evlist:
evlist__delete(ftrace.evlist);
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 3976aebe3677..3e7f52054fac 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -14,6 +14,7 @@
#include <subcmd/run-command.h>
#include <subcmd/help.h>
#include "util/debug.h"
+#include "util/util.h"
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 7e124a7b8bfd..61766eead4f4 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -10,6 +10,7 @@
#include "util/color.h"
#include "util/dso.h"
+#include "util/vdso.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/map.h"
@@ -20,25 +21,101 @@
#include "util/data.h"
#include "util/auxtrace.h"
#include "util/jit.h"
+#include "util/string2.h"
#include "util/symbol.h"
#include "util/synthetic-events.h"
#include "util/thread.h"
-#include <linux/err.h>
+#include "util/namespaces.h"
+#include "util/util.h"
+#include "util/tsc.h"
+
+#include <internal/lib.h>
+#include <linux/err.h>
#include <subcmd/parse-options.h>
+#include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+#include <linux/hash.h>
+#include <ctype.h>
#include <errno.h>
#include <signal.h>
+#include <inttypes.h>
+
+struct guest_event {
+ struct perf_sample sample;
+ union perf_event *event;
+ char event_buf[PERF_SAMPLE_MAX_SIZE];
+};
+
+struct guest_id {
+ /* hlist_node must be first, see free_hlist() */
+ struct hlist_node node;
+ u64 id;
+ u64 host_id;
+ u32 vcpu;
+};
+
+struct guest_tid {
+ /* hlist_node must be first, see free_hlist() */
+ struct hlist_node node;
+ /* Thread ID of QEMU thread */
+ u32 tid;
+ u32 vcpu;
+};
+
+struct guest_vcpu {
+ /* Current host CPU */
+ u32 cpu;
+ /* Thread ID of QEMU thread */
+ u32 tid;
+};
+
+struct guest_session {
+ char *perf_data_file;
+ u32 machine_pid;
+ u64 time_offset;
+ double time_scale;
+ struct perf_tool tool;
+ struct perf_data data;
+ struct perf_session *session;
+ char *tmp_file_name;
+ int tmp_fd;
+ struct perf_tsc_conversion host_tc;
+ struct perf_tsc_conversion guest_tc;
+ bool copy_kcore_dir;
+ bool have_tc;
+ bool fetched;
+ bool ready;
+ u16 dflt_id_hdr_size;
+ u64 dflt_id;
+ u64 highest_id;
+ /* Array of guest_vcpu */
+ struct guest_vcpu *vcpu;
+ size_t vcpu_cnt;
+ /* Hash table for guest_id */
+ struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
+ /* Hash table for guest_tid */
+ struct hlist_head tids[PERF_EVLIST__HLIST_SIZE];
+ /* Place to stash next guest event */
+ struct guest_event ev;
+};
struct perf_inject {
struct perf_tool tool;
struct perf_session *session;
bool build_ids;
+ bool build_id_all;
bool sched_stat;
bool have_auxtrace;
bool strip;
bool jit_mode;
+ bool in_place_update;
+ bool in_place_update_dry_run;
+ bool is_pipe;
+ bool copy_kcore_dir;
const char *input_name;
struct perf_data output;
u64 bytes_written;
@@ -46,14 +123,20 @@ struct perf_inject {
struct list_head samples;
struct itrace_synth_opts itrace_synth_opts;
char event_copy[PERF_SAMPLE_MAX_SIZE];
+ struct perf_file_section secs[HEADER_FEAT_BITS];
+ struct guest_session guest_session;
+ struct strlist *known_build_ids;
};
struct event_entry {
struct list_head node;
u32 tid;
- union perf_event event[0];
+ union perf_event event[];
};
+static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
+ struct machine *machine, u8 cpumode, u32 flags);
+
static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
{
ssize_t size;
@@ -97,6 +180,14 @@ static int perf_event__repipe_op2_synth(struct perf_session *session,
return perf_event__repipe_synth(session->tool, event);
}
+static int perf_event__repipe_op4_synth(struct perf_session *session,
+ union perf_event *event,
+ u64 data __maybe_unused,
+ const char *str __maybe_unused)
+{
+ return perf_event__repipe_synth(session->tool, event);
+}
+
static int perf_event__repipe_attr(struct perf_tool *tool,
union perf_event *event,
struct evlist **pevlist)
@@ -109,22 +200,29 @@ static int perf_event__repipe_attr(struct perf_tool *tool,
if (ret)
return ret;
- if (!inject->output.is_pipe)
+ if (!inject->is_pipe)
return 0;
return perf_event__repipe_synth(tool, event);
}
+static int perf_event__repipe_event_update(struct perf_tool *tool,
+ union perf_event *event,
+ struct evlist **pevlist __maybe_unused)
+{
+ return perf_event__repipe_synth(tool, event);
+}
+
#ifdef HAVE_AUXTRACE_SUPPORT
-static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
+static int copy_bytes(struct perf_inject *inject, struct perf_data *data, off_t size)
{
char buf[4096];
ssize_t ssz;
int ret;
while (size > 0) {
- ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
+ ssz = perf_data__read(data, buf, min(size, (off_t)sizeof(buf)));
if (ssz < 0)
return -errno;
ret = output_bytes(inject, buf, ssz);
@@ -162,7 +260,7 @@ static s64 perf_event__repipe_auxtrace(struct perf_session *session,
ret = output_bytes(inject, event, event->header.size);
if (ret < 0)
return ret;
- ret = copy_bytes(inject, perf_data__fd(session->data),
+ ret = copy_bytes(inject, session->data,
event->auxtrace.size);
} else {
ret = output_bytes(inject, event,
@@ -292,7 +390,7 @@ static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
* if jit marker, then inject jit mmaps and generate ELF images
*/
ret = jit_process(inject->session, &inject->output, machine,
- event->mmap.filename, sample->pid, &n);
+ event->mmap.filename, event->mmap.pid, event->mmap.tid, &n);
if (ret < 0)
return ret;
if (ret) {
@@ -303,6 +401,71 @@ static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
}
#endif
+static struct dso *findnew_dso(int pid, int tid, const char *filename,
+ struct dso_id *id, struct machine *machine)
+{
+ struct thread *thread;
+ struct nsinfo *nsi = NULL;
+ struct nsinfo *nnsi;
+ struct dso *dso;
+ bool vdso;
+
+ thread = machine__findnew_thread(machine, pid, tid);
+ if (thread == NULL) {
+ pr_err("cannot find or create a task %d/%d.\n", tid, pid);
+ return NULL;
+ }
+
+ vdso = is_vdso_map(filename);
+ nsi = nsinfo__get(thread->nsinfo);
+
+ if (vdso) {
+ /* The vdso maps are always on the host and not the
+ * container. Ensure that we don't use setns to look
+ * them up.
+ */
+ nnsi = nsinfo__copy(nsi);
+ if (nnsi) {
+ nsinfo__put(nsi);
+ nsinfo__clear_need_setns(nnsi);
+ nsi = nnsi;
+ }
+ dso = machine__findnew_vdso(machine, thread);
+ } else {
+ dso = machine__findnew_dso_id(machine, filename, id);
+ }
+
+ if (dso) {
+ mutex_lock(&dso->lock);
+ nsinfo__put(dso->nsinfo);
+ dso->nsinfo = nsi;
+ mutex_unlock(&dso->lock);
+ } else
+ nsinfo__put(nsi);
+
+ thread__put(thread);
+ return dso;
+}
+
+static int perf_event__repipe_buildid_mmap(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct dso *dso;
+
+ dso = findnew_dso(event->mmap.pid, event->mmap.tid,
+ event->mmap.filename, NULL, machine);
+
+ if (dso && !dso->hit) {
+ dso->hit = 1;
+ dso__inject_build_id(dso, tool, machine, sample->cpumode, 0);
+ }
+ dso__put(dso);
+
+ return perf_event__repipe(tool, event, sample, machine);
+}
+
static int perf_event__repipe_mmap2(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -313,6 +476,18 @@ static int perf_event__repipe_mmap2(struct perf_tool *tool,
err = perf_event__process_mmap2(tool, event, sample, machine);
perf_event__repipe(tool, event, sample, machine);
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ struct dso *dso;
+
+ dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
+ event->mmap2.filename, NULL, machine);
+ if (dso) {
+ /* mark it not to inject build-id */
+ dso->hit = 1;
+ }
+ dso__put(dso);
+ }
+
return err;
}
@@ -330,7 +505,7 @@ static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
* if jit marker, then inject jit mmaps and generate ELF images
*/
ret = jit_process(inject->session, &inject->output, machine,
- event->mmap2.filename, sample->pid, &n);
+ event->mmap2.filename, event->mmap2.pid, event->mmap2.tid, &n);
if (ret < 0)
return ret;
if (ret) {
@@ -341,6 +516,47 @@ static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
}
#endif
+static int perf_event__repipe_buildid_mmap2(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct dso_id dso_id = {
+ .maj = event->mmap2.maj,
+ .min = event->mmap2.min,
+ .ino = event->mmap2.ino,
+ .ino_generation = event->mmap2.ino_generation,
+ };
+ struct dso *dso;
+
+ if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
+ /* cannot use dso_id since it'd have invalid info */
+ dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
+ event->mmap2.filename, NULL, machine);
+ if (dso) {
+ /* mark it not to inject build-id */
+ dso->hit = 1;
+ }
+ dso__put(dso);
+ perf_event__repipe(tool, event, sample, machine);
+ return 0;
+ }
+
+ dso = findnew_dso(event->mmap2.pid, event->mmap2.tid,
+ event->mmap2.filename, &dso_id, machine);
+
+ if (dso && !dso->hit) {
+ dso->hit = 1;
+ dso__inject_build_id(dso, tool, machine, sample->cpumode,
+ event->mmap2.flags);
+ }
+ dso__put(dso);
+
+ perf_event__repipe(tool, event, sample, machine);
+
+ return 0;
+}
+
static int perf_event__repipe_fork(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -392,47 +608,125 @@ static int perf_event__repipe_exit(struct perf_tool *tool,
return err;
}
+#ifdef HAVE_LIBTRACEEVENT
static int perf_event__repipe_tracing_data(struct perf_session *session,
union perf_event *event)
{
- int err;
-
perf_event__repipe_synth(session->tool, event);
- err = perf_event__process_tracing_data(session, event);
- return err;
+ return perf_event__process_tracing_data(session, event);
}
+#endif
static int dso__read_build_id(struct dso *dso)
{
+ struct nscookie nsc;
+
if (dso->has_build_id)
return 0;
- if (filename__read_build_id(dso->long_name, dso->build_id,
- sizeof(dso->build_id)) > 0) {
+ mutex_lock(&dso->lock);
+ nsinfo__mountns_enter(dso->nsinfo, &nsc);
+ if (filename__read_build_id(dso->long_name, &dso->bid) > 0)
dso->has_build_id = true;
- return 0;
+ else if (dso->nsinfo) {
+ char *new_name = dso__filename_with_chroot(dso, dso->long_name);
+
+ if (new_name && filename__read_build_id(new_name, &dso->bid) > 0)
+ dso->has_build_id = true;
+ free(new_name);
}
+ nsinfo__mountns_exit(&nsc);
+ mutex_unlock(&dso->lock);
- return -1;
+ return dso->has_build_id ? 0 : -1;
+}
+
+static struct strlist *perf_inject__parse_known_build_ids(
+ const char *known_build_ids_string)
+{
+ struct str_node *pos, *tmp;
+ struct strlist *known_build_ids;
+ int bid_len;
+
+ known_build_ids = strlist__new(known_build_ids_string, NULL);
+ if (known_build_ids == NULL)
+ return NULL;
+ strlist__for_each_entry_safe(pos, tmp, known_build_ids) {
+ const char *build_id, *dso_name;
+
+ build_id = skip_spaces(pos->s);
+ dso_name = strchr(build_id, ' ');
+ if (dso_name == NULL) {
+ strlist__remove(known_build_ids, pos);
+ continue;
+ }
+ bid_len = dso_name - pos->s;
+ dso_name = skip_spaces(dso_name);
+ if (bid_len % 2 != 0 || bid_len >= SBUILD_ID_SIZE) {
+ strlist__remove(known_build_ids, pos);
+ continue;
+ }
+ for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
+ if (!isxdigit(build_id[2 * ix]) ||
+ !isxdigit(build_id[2 * ix + 1])) {
+ strlist__remove(known_build_ids, pos);
+ break;
+ }
+ }
+ }
+ return known_build_ids;
+}
+
+static bool perf_inject__lookup_known_build_id(struct perf_inject *inject,
+ struct dso *dso)
+{
+ struct str_node *pos;
+ int bid_len;
+
+ strlist__for_each_entry(pos, inject->known_build_ids) {
+ const char *build_id, *dso_name;
+
+ build_id = skip_spaces(pos->s);
+ dso_name = strchr(build_id, ' ');
+ bid_len = dso_name - pos->s;
+ dso_name = skip_spaces(dso_name);
+ if (strcmp(dso->long_name, dso_name))
+ continue;
+ for (int ix = 0; 2 * ix + 1 < bid_len; ++ix) {
+ dso->bid.data[ix] = (hex(build_id[2 * ix]) << 4 |
+ hex(build_id[2 * ix + 1]));
+ }
+ dso->bid.size = bid_len / 2;
+ dso->has_build_id = 1;
+ return true;
+ }
+ return false;
}
static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
- struct machine *machine)
+ struct machine *machine, u8 cpumode, u32 flags)
{
- u16 misc = PERF_RECORD_MISC_USER;
+ struct perf_inject *inject = container_of(tool, struct perf_inject,
+ tool);
int err;
+ if (is_anon_memory(dso->long_name) || flags & MAP_HUGETLB)
+ return 0;
+ if (is_no_dso_memory(dso->long_name))
+ return 0;
+
+ if (inject->known_build_ids != NULL &&
+ perf_inject__lookup_known_build_id(inject, dso))
+ return 1;
+
if (dso__read_build_id(dso) < 0) {
pr_debug("no build_id found for %s\n", dso->long_name);
return -1;
}
- if (dso->kernel)
- misc = PERF_RECORD_MISC_KERNEL;
-
- err = perf_event__synthesize_build_id(tool, dso, misc, perf_event__repipe,
- machine);
+ err = perf_event__synthesize_build_id(tool, dso, cpumode,
+ perf_event__repipe, machine);
if (err) {
pr_err("Can't synthesize build_id event for %s\n", dso->long_name);
return -1;
@@ -441,11 +735,10 @@ static int dso__inject_build_id(struct dso *dso, struct perf_tool *tool,
return 0;
}
-static int perf_event__inject_buildid(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct evsel *evsel __maybe_unused,
- struct machine *machine)
+int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample,
+ struct evsel *evsel __maybe_unused,
+ struct machine *machine)
{
struct addr_location al;
struct thread *thread;
@@ -458,21 +751,12 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
}
if (thread__find_map(thread, sample->cpumode, sample->ip, &al)) {
- if (!al.map->dso->hit) {
- al.map->dso->hit = 1;
- if (map__load(al.map) >= 0) {
- dso__inject_build_id(al.map->dso, tool, machine);
- /*
- * If this fails, too bad, let the other side
- * account this as unresolved.
- */
- } else {
-#ifdef HAVE_LIBELF_SUPPORT
- pr_warning("no symbols found in %s, maybe "
- "install a debug package?\n",
- al.map->dso->long_name);
-#endif
- }
+ struct dso *dso = map__dso(al.map);
+
+ if (!dso->hit) {
+ dso->hit = 1;
+ dso__inject_build_id(dso, tool, machine,
+ sample->cpumode, map__flags(al.map));
}
}
@@ -526,6 +810,7 @@ static int perf_inject__sched_switch(struct perf_tool *tool,
return 0;
}
+#ifdef HAVE_LIBTRACEEVENT
static int perf_inject__sched_stat(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample,
@@ -536,7 +821,7 @@ static int perf_inject__sched_stat(struct perf_tool *tool,
union perf_event *event_sw;
struct perf_sample sample_sw;
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
- u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ u32 pid = evsel__intval(evsel, sample, "pid");
list_for_each_entry(ent, &inject->samples, node) {
if (pid == ent->tid)
@@ -546,7 +831,7 @@ static int perf_inject__sched_stat(struct perf_tool *tool,
return 0;
found:
event_sw = &ent->event[0];
- perf_evsel__parse_sample(evsel, event_sw, &sample_sw);
+ evsel__parse_sample(evsel, event_sw, &sample_sw);
sample_sw.period = sample->period;
sample_sw.time = sample->time;
@@ -555,17 +840,852 @@ found:
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
}
+#endif
+
+static struct guest_vcpu *guest_session__vcpu(struct guest_session *gs, u32 vcpu)
+{
+ if (realloc_array_as_needed(gs->vcpu, gs->vcpu_cnt, vcpu, NULL))
+ return NULL;
+ return &gs->vcpu[vcpu];
+}
+
+static int guest_session__output_bytes(struct guest_session *gs, void *buf, size_t sz)
+{
+ ssize_t ret = writen(gs->tmp_fd, buf, sz);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int guest_session__repipe(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct guest_session *gs = container_of(tool, struct guest_session, tool);
+
+ return guest_session__output_bytes(gs, event, event->header.size);
+}
+
+static int guest_session__map_tid(struct guest_session *gs, u32 tid, u32 vcpu)
+{
+ struct guest_tid *guest_tid = zalloc(sizeof(*guest_tid));
+ int hash;
+
+ if (!guest_tid)
+ return -ENOMEM;
+
+ guest_tid->tid = tid;
+ guest_tid->vcpu = vcpu;
+ hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&guest_tid->node, &gs->tids[hash]);
+
+ return 0;
+}
+
+static int host_peek_vm_comms_cb(struct perf_session *session __maybe_unused,
+ union perf_event *event,
+ u64 offset __maybe_unused, void *data)
+{
+ struct guest_session *gs = data;
+ unsigned int vcpu;
+ struct guest_vcpu *guest_vcpu;
+ int ret;
+
+ if (event->header.type != PERF_RECORD_COMM ||
+ event->comm.pid != gs->machine_pid)
+ return 0;
+
+ /*
+ * QEMU option -name debug-threads=on, causes thread names formatted as
+ * below, although it is not an ABI. Also libvirt seems to use this by
+ * default. Here we rely on it to tell us which thread is which VCPU.
+ */
+ ret = sscanf(event->comm.comm, "CPU %u/KVM", &vcpu);
+ if (ret <= 0)
+ return ret;
+ pr_debug("Found VCPU: tid %u comm %s vcpu %u\n",
+ event->comm.tid, event->comm.comm, vcpu);
+ if (vcpu > INT_MAX) {
+ pr_err("Invalid VCPU %u\n", vcpu);
+ return -EINVAL;
+ }
+ guest_vcpu = guest_session__vcpu(gs, vcpu);
+ if (!guest_vcpu)
+ return -ENOMEM;
+ if (guest_vcpu->tid && guest_vcpu->tid != event->comm.tid) {
+ pr_err("Fatal error: Two threads found with the same VCPU\n");
+ return -EINVAL;
+ }
+ guest_vcpu->tid = event->comm.tid;
+
+ return guest_session__map_tid(gs, event->comm.tid, vcpu);
+}
+
+static int host_peek_vm_comms(struct perf_session *session, struct guest_session *gs)
+{
+ return perf_session__peek_events(session, session->header.data_offset,
+ session->header.data_size,
+ host_peek_vm_comms_cb, gs);
+}
+
+static bool evlist__is_id_used(struct evlist *evlist, u64 id)
+{
+ return evlist__id2sid(evlist, id);
+}
+
+static u64 guest_session__allocate_new_id(struct guest_session *gs, struct evlist *host_evlist)
+{
+ do {
+ gs->highest_id += 1;
+ } while (!gs->highest_id || evlist__is_id_used(host_evlist, gs->highest_id));
+
+ return gs->highest_id;
+}
+
+static int guest_session__map_id(struct guest_session *gs, u64 id, u64 host_id, u32 vcpu)
+{
+ struct guest_id *guest_id = zalloc(sizeof(*guest_id));
+ int hash;
+
+ if (!guest_id)
+ return -ENOMEM;
+
+ guest_id->id = id;
+ guest_id->host_id = host_id;
+ guest_id->vcpu = vcpu;
+ hash = hash_64(guest_id->id, PERF_EVLIST__HLIST_BITS);
+ hlist_add_head(&guest_id->node, &gs->heads[hash]);
+
+ return 0;
+}
+
+static u64 evlist__find_highest_id(struct evlist *evlist)
+{
+ struct evsel *evsel;
+ u64 highest_id = 1;
+
+ evlist__for_each_entry(evlist, evsel) {
+ u32 j;
+
+ for (j = 0; j < evsel->core.ids; j++) {
+ u64 id = evsel->core.id[j];
+
+ if (id > highest_id)
+ highest_id = id;
+ }
+ }
+
+ return highest_id;
+}
+
+static int guest_session__map_ids(struct guest_session *gs, struct evlist *host_evlist)
+{
+ struct evlist *evlist = gs->session->evlist;
+ struct evsel *evsel;
+ int ret;
+
+ evlist__for_each_entry(evlist, evsel) {
+ u32 j;
+
+ for (j = 0; j < evsel->core.ids; j++) {
+ struct perf_sample_id *sid;
+ u64 host_id;
+ u64 id;
+
+ id = evsel->core.id[j];
+ sid = evlist__id2sid(evlist, id);
+ if (!sid || sid->cpu.cpu == -1)
+ continue;
+ host_id = guest_session__allocate_new_id(gs, host_evlist);
+ ret = guest_session__map_id(gs, id, host_id, sid->cpu.cpu);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static struct guest_id *guest_session__lookup_id(struct guest_session *gs, u64 id)
+{
+ struct hlist_head *head;
+ struct guest_id *guest_id;
+ int hash;
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &gs->heads[hash];
+
+ hlist_for_each_entry(guest_id, head, node)
+ if (guest_id->id == id)
+ return guest_id;
+
+ return NULL;
+}
+
+static int process_attr(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+
+ return perf_event__process_attr(tool, event, &inject->session->evlist);
+}
+
+static int guest_session__add_attr(struct guest_session *gs, struct evsel *evsel)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+ struct perf_event_attr attr = evsel->core.attr;
+ u64 *id_array;
+ u32 *vcpu_array;
+ int ret = -ENOMEM;
+ u32 i;
+
+ id_array = calloc(evsel->core.ids, sizeof(*id_array));
+ if (!id_array)
+ return -ENOMEM;
+
+ vcpu_array = calloc(evsel->core.ids, sizeof(*vcpu_array));
+ if (!vcpu_array)
+ goto out;
+
+ for (i = 0; i < evsel->core.ids; i++) {
+ u64 id = evsel->core.id[i];
+ struct guest_id *guest_id = guest_session__lookup_id(gs, id);
+
+ if (!guest_id) {
+ pr_err("Failed to find guest id %"PRIu64"\n", id);
+ ret = -EINVAL;
+ goto out;
+ }
+ id_array[i] = guest_id->host_id;
+ vcpu_array[i] = guest_id->vcpu;
+ }
+
+ attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
+ attr.exclude_host = 1;
+ attr.exclude_guest = 0;
+
+ ret = perf_event__synthesize_attr(&inject->tool, &attr, evsel->core.ids,
+ id_array, process_attr);
+ if (ret)
+ pr_err("Failed to add guest attr.\n");
+
+ for (i = 0; i < evsel->core.ids; i++) {
+ struct perf_sample_id *sid;
+ u32 vcpu = vcpu_array[i];
+
+ sid = evlist__id2sid(inject->session->evlist, id_array[i]);
+ /* Guest event is per-thread from the host point of view */
+ sid->cpu.cpu = -1;
+ sid->tid = gs->vcpu[vcpu].tid;
+ sid->machine_pid = gs->machine_pid;
+ sid->vcpu.cpu = vcpu;
+ }
+out:
+ free(vcpu_array);
+ free(id_array);
+ return ret;
+}
+
+static int guest_session__add_attrs(struct guest_session *gs)
+{
+ struct evlist *evlist = gs->session->evlist;
+ struct evsel *evsel;
+ int ret;
+
+ evlist__for_each_entry(evlist, evsel) {
+ ret = guest_session__add_attr(gs, evsel);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int synthesize_id_index(struct perf_inject *inject, size_t new_cnt)
+{
+ struct perf_session *session = inject->session;
+ struct evlist *evlist = session->evlist;
+ struct machine *machine = &session->machines.host;
+ size_t from = evlist->core.nr_entries - new_cnt;
+
+ return __perf_event__synthesize_id_index(&inject->tool, perf_event__repipe,
+ evlist, machine, from);
+}
+
+static struct guest_tid *guest_session__lookup_tid(struct guest_session *gs, u32 tid)
+{
+ struct hlist_head *head;
+ struct guest_tid *guest_tid;
+ int hash;
+
+ hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
+ head = &gs->tids[hash];
+
+ hlist_for_each_entry(guest_tid, head, node)
+ if (guest_tid->tid == tid)
+ return guest_tid;
+
+ return NULL;
+}
+
+static bool dso__is_in_kernel_space(struct dso *dso)
+{
+ if (dso__is_vdso(dso))
+ return false;
+
+ return dso__is_kcore(dso) ||
+ dso->kernel ||
+ is_kernel_module(dso->long_name, PERF_RECORD_MISC_CPUMODE_UNKNOWN);
+}
+
+static u64 evlist__first_id(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.ids)
+ return evsel->core.id[0];
+ }
+ return 0;
+}
+
+static int process_build_id(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+
+ return perf_event__process_build_id(inject->session, event);
+}
+
+static int synthesize_build_id(struct perf_inject *inject, struct dso *dso, pid_t machine_pid)
+{
+ struct machine *machine = perf_session__findnew_machine(inject->session, machine_pid);
+ u8 cpumode = dso__is_in_kernel_space(dso) ?
+ PERF_RECORD_MISC_GUEST_KERNEL :
+ PERF_RECORD_MISC_GUEST_USER;
+
+ if (!machine)
+ return -ENOMEM;
+
+ dso->hit = 1;
+
+ return perf_event__synthesize_build_id(&inject->tool, dso, cpumode,
+ process_build_id, machine);
+}
+
+static int guest_session__add_build_ids(struct guest_session *gs)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+ struct machine *machine = &gs->session->machines.host;
+ struct dso *dso;
+ int ret;
+
+ /* Build IDs will be put in the Build ID feature section */
+ perf_header__set_feat(&inject->session->header, HEADER_BUILD_ID);
+
+ dsos__for_each_with_build_id(dso, &machine->dsos.head) {
+ ret = synthesize_build_id(inject, dso, gs->machine_pid);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int guest_session__ksymbol_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct guest_session *gs = container_of(tool, struct guest_session, tool);
+
+ /* Only support out-of-line i.e. no BPF support */
+ if (event->ksymbol.ksym_type != PERF_RECORD_KSYMBOL_TYPE_OOL)
+ return 0;
+
+ return guest_session__output_bytes(gs, event, event->header.size);
+}
+
+static int guest_session__start(struct guest_session *gs, const char *name, bool force)
+{
+ char tmp_file_name[] = "/tmp/perf-inject-guest_session-XXXXXX";
+ struct perf_session *session;
+ int ret;
+
+ /* Only these events will be injected */
+ gs->tool.mmap = guest_session__repipe;
+ gs->tool.mmap2 = guest_session__repipe;
+ gs->tool.comm = guest_session__repipe;
+ gs->tool.fork = guest_session__repipe;
+ gs->tool.exit = guest_session__repipe;
+ gs->tool.lost = guest_session__repipe;
+ gs->tool.context_switch = guest_session__repipe;
+ gs->tool.ksymbol = guest_session__ksymbol_event;
+ gs->tool.text_poke = guest_session__repipe;
+ /*
+ * Processing a build ID creates a struct dso with that build ID. Later,
+ * all guest dsos are iterated and the build IDs processed into the host
+ * session where they will be output to the Build ID feature section
+ * when the perf.data file header is written.
+ */
+ gs->tool.build_id = perf_event__process_build_id;
+ /* Process the id index to know what VCPU an ID belongs to */
+ gs->tool.id_index = perf_event__process_id_index;
+
+ gs->tool.ordered_events = true;
+ gs->tool.ordering_requires_timestamps = true;
+
+ gs->data.path = name;
+ gs->data.force = force;
+ gs->data.mode = PERF_DATA_MODE_READ;
+
+ session = perf_session__new(&gs->data, &gs->tool);
+ if (IS_ERR(session))
+ return PTR_ERR(session);
+ gs->session = session;
+
+ /*
+ * Initial events have zero'd ID samples. Get default ID sample size
+ * used for removing them.
+ */
+ gs->dflt_id_hdr_size = session->machines.host.id_hdr_size;
+ /* And default ID for adding back a host-compatible ID sample */
+ gs->dflt_id = evlist__first_id(session->evlist);
+ if (!gs->dflt_id) {
+ pr_err("Guest data has no sample IDs");
+ return -EINVAL;
+ }
+
+ /* Temporary file for guest events */
+ gs->tmp_file_name = strdup(tmp_file_name);
+ if (!gs->tmp_file_name)
+ return -ENOMEM;
+ gs->tmp_fd = mkstemp(gs->tmp_file_name);
+ if (gs->tmp_fd < 0)
+ return -errno;
+
+ if (zstd_init(&gs->session->zstd_data, 0) < 0)
+ pr_warning("Guest session decompression initialization failed.\n");
+
+ /*
+ * perf does not support processing 2 sessions simultaneously, so output
+ * guest events to a temporary file.
+ */
+ ret = perf_session__process_events(gs->session);
+ if (ret)
+ return ret;
+
+ if (lseek(gs->tmp_fd, 0, SEEK_SET))
+ return -errno;
+
+ return 0;
+}
+
+/* Free hlist nodes assuming hlist_node is the first member of hlist entries */
+static void free_hlist(struct hlist_head *heads, size_t hlist_sz)
+{
+ struct hlist_node *pos, *n;
+ size_t i;
+
+ for (i = 0; i < hlist_sz; ++i) {
+ hlist_for_each_safe(pos, n, &heads[i]) {
+ hlist_del(pos);
+ free(pos);
+ }
+ }
+}
+
+static void guest_session__exit(struct guest_session *gs)
+{
+ if (gs->session) {
+ perf_session__delete(gs->session);
+ free_hlist(gs->heads, PERF_EVLIST__HLIST_SIZE);
+ free_hlist(gs->tids, PERF_EVLIST__HLIST_SIZE);
+ }
+ if (gs->tmp_file_name) {
+ if (gs->tmp_fd >= 0)
+ close(gs->tmp_fd);
+ unlink(gs->tmp_file_name);
+ zfree(&gs->tmp_file_name);
+ }
+ zfree(&gs->vcpu);
+ zfree(&gs->perf_data_file);
+}
+
+static void get_tsc_conv(struct perf_tsc_conversion *tc, struct perf_record_time_conv *time_conv)
+{
+ tc->time_shift = time_conv->time_shift;
+ tc->time_mult = time_conv->time_mult;
+ tc->time_zero = time_conv->time_zero;
+ tc->time_cycles = time_conv->time_cycles;
+ tc->time_mask = time_conv->time_mask;
+ tc->cap_user_time_zero = time_conv->cap_user_time_zero;
+ tc->cap_user_time_short = time_conv->cap_user_time_short;
+}
+
+static void guest_session__get_tc(struct guest_session *gs)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+
+ get_tsc_conv(&gs->host_tc, &inject->session->time_conv);
+ get_tsc_conv(&gs->guest_tc, &gs->session->time_conv);
+}
+
+static void guest_session__convert_time(struct guest_session *gs, u64 guest_time, u64 *host_time)
+{
+ u64 tsc;
+
+ if (!guest_time) {
+ *host_time = 0;
+ return;
+ }
+
+ if (gs->guest_tc.cap_user_time_zero)
+ tsc = perf_time_to_tsc(guest_time, &gs->guest_tc);
+ else
+ tsc = guest_time;
+
+ /*
+ * This is the correct order of operations for x86 if the TSC Offset and
+ * Multiplier values are used.
+ */
+ tsc -= gs->time_offset;
+ tsc /= gs->time_scale;
+
+ if (gs->host_tc.cap_user_time_zero)
+ *host_time = tsc_to_perf_time(tsc, &gs->host_tc);
+ else
+ *host_time = tsc;
+}
+
+static int guest_session__fetch(struct guest_session *gs)
+{
+ void *buf = gs->ev.event_buf;
+ struct perf_event_header *hdr = buf;
+ size_t hdr_sz = sizeof(*hdr);
+ ssize_t ret;
+
+ ret = readn(gs->tmp_fd, buf, hdr_sz);
+ if (ret < 0)
+ return ret;
+
+ if (!ret) {
+ /* Zero size means EOF */
+ hdr->size = 0;
+ return 0;
+ }
+
+ buf += hdr_sz;
+
+ ret = readn(gs->tmp_fd, buf, hdr->size - hdr_sz);
+ if (ret < 0)
+ return ret;
+
+ gs->ev.event = (union perf_event *)gs->ev.event_buf;
+ gs->ev.sample.time = 0;
+
+ if (hdr->type >= PERF_RECORD_USER_TYPE_START) {
+ pr_err("Unexpected type fetching guest event");
+ return 0;
+ }
+
+ ret = evlist__parse_sample(gs->session->evlist, gs->ev.event, &gs->ev.sample);
+ if (ret) {
+ pr_err("Parse failed fetching guest event");
+ return ret;
+ }
+
+ if (!gs->have_tc) {
+ guest_session__get_tc(gs);
+ gs->have_tc = true;
+ }
+
+ guest_session__convert_time(gs, gs->ev.sample.time, &gs->ev.sample.time);
+
+ return 0;
+}
+
+static int evlist__append_id_sample(struct evlist *evlist, union perf_event *ev,
+ const struct perf_sample *sample)
+{
+ struct evsel *evsel;
+ void *array;
+ int ret;
+
+ evsel = evlist__id2evsel(evlist, sample->id);
+ array = ev;
+
+ if (!evsel) {
+ pr_err("No evsel for id %"PRIu64"\n", sample->id);
+ return -EINVAL;
+ }
+
+ array += ev->header.size;
+ ret = perf_event__synthesize_id_sample(array, evsel->core.attr.sample_type, sample);
+ if (ret < 0)
+ return ret;
+
+ if (ret & 7) {
+ pr_err("Bad id sample size %d\n", ret);
+ return -EINVAL;
+ }
+
+ ev->header.size += ret;
+
+ return 0;
+}
+
+static int guest_session__inject_events(struct guest_session *gs, u64 timestamp)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+ int ret;
+
+ if (!gs->ready)
+ return 0;
+
+ while (1) {
+ struct perf_sample *sample;
+ struct guest_id *guest_id;
+ union perf_event *ev;
+ u16 id_hdr_size;
+ u8 cpumode;
+ u64 id;
+
+ if (!gs->fetched) {
+ ret = guest_session__fetch(gs);
+ if (ret)
+ return ret;
+ gs->fetched = true;
+ }
+
+ ev = gs->ev.event;
+ sample = &gs->ev.sample;
+
+ if (!ev->header.size)
+ return 0; /* EOF */
+
+ if (sample->time > timestamp)
+ return 0;
+
+ /* Change cpumode to guest */
+ cpumode = ev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ if (cpumode & PERF_RECORD_MISC_USER)
+ cpumode = PERF_RECORD_MISC_GUEST_USER;
+ else
+ cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
+ ev->header.misc &= ~PERF_RECORD_MISC_CPUMODE_MASK;
+ ev->header.misc |= cpumode;
+
+ id = sample->id;
+ if (!id) {
+ id = gs->dflt_id;
+ id_hdr_size = gs->dflt_id_hdr_size;
+ } else {
+ struct evsel *evsel = evlist__id2evsel(gs->session->evlist, id);
+
+ id_hdr_size = evsel__id_hdr_size(evsel);
+ }
+
+ if (id_hdr_size & 7) {
+ pr_err("Bad id_hdr_size %u\n", id_hdr_size);
+ return -EINVAL;
+ }
+
+ if (ev->header.size & 7) {
+ pr_err("Bad event size %u\n", ev->header.size);
+ return -EINVAL;
+ }
+
+ /* Remove guest id sample */
+ ev->header.size -= id_hdr_size;
+
+ if (ev->header.size & 7) {
+ pr_err("Bad raw event size %u\n", ev->header.size);
+ return -EINVAL;
+ }
+
+ guest_id = guest_session__lookup_id(gs, id);
+ if (!guest_id) {
+ pr_err("Guest event with unknown id %llu\n",
+ (unsigned long long)id);
+ return -EINVAL;
+ }
+
+ /* Change to host ID to avoid conflicting ID values */
+ sample->id = guest_id->host_id;
+ sample->stream_id = guest_id->host_id;
+
+ if (sample->cpu != (u32)-1) {
+ if (sample->cpu >= gs->vcpu_cnt) {
+ pr_err("Guest event with unknown VCPU %u\n",
+ sample->cpu);
+ return -EINVAL;
+ }
+ /* Change to host CPU instead of guest VCPU */
+ sample->cpu = gs->vcpu[sample->cpu].cpu;
+ }
+
+ /* New id sample with new ID and CPU */
+ ret = evlist__append_id_sample(inject->session->evlist, ev, sample);
+ if (ret)
+ return ret;
+
+ if (ev->header.size & 7) {
+ pr_err("Bad new event size %u\n", ev->header.size);
+ return -EINVAL;
+ }
+
+ gs->fetched = false;
+
+ ret = output_bytes(inject, ev, ev->header.size);
+ if (ret)
+ return ret;
+ }
+}
+
+static int guest_session__flush_events(struct guest_session *gs)
+{
+ return guest_session__inject_events(gs, -1);
+}
+
+static int host__repipe(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ int ret;
+
+ ret = guest_session__inject_events(&inject->guest_session, sample->time);
+ if (ret)
+ return ret;
+
+ return perf_event__repipe(tool, event, sample, machine);
+}
+
+static int host__finished_init(struct perf_session *session, union perf_event *event)
+{
+ struct perf_inject *inject = container_of(session->tool, struct perf_inject, tool);
+ struct guest_session *gs = &inject->guest_session;
+ int ret;
+
+ /*
+ * Peek through host COMM events to find QEMU threads and the VCPU they
+ * are running.
+ */
+ ret = host_peek_vm_comms(session, gs);
+ if (ret)
+ return ret;
+
+ if (!gs->vcpu_cnt) {
+ pr_err("No VCPU threads found for pid %u\n", gs->machine_pid);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate new (unused) host sample IDs and map them to the guest IDs.
+ */
+ gs->highest_id = evlist__find_highest_id(session->evlist);
+ ret = guest_session__map_ids(gs, session->evlist);
+ if (ret)
+ return ret;
+
+ ret = guest_session__add_attrs(gs);
+ if (ret)
+ return ret;
+
+ ret = synthesize_id_index(inject, gs->session->evlist->core.nr_entries);
+ if (ret) {
+ pr_err("Failed to synthesize id_index\n");
+ return ret;
+ }
+
+ ret = guest_session__add_build_ids(gs);
+ if (ret) {
+ pr_err("Failed to add guest build IDs\n");
+ return ret;
+ }
+
+ gs->ready = true;
+
+ ret = guest_session__inject_events(gs, 0);
+ if (ret)
+ return ret;
+
+ return perf_event__repipe_op2_synth(session, event);
+}
+
+/*
+ * Obey finished-round ordering. The FINISHED_ROUND event is first processed
+ * which flushes host events to file up until the last flush time. Then inject
+ * guest events up to the same time. Finally write out the FINISHED_ROUND event
+ * itself.
+ */
+static int host__finished_round(struct perf_tool *tool,
+ union perf_event *event,
+ struct ordered_events *oe)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ int ret = perf_event__process_finished_round(tool, event, oe);
+ u64 timestamp = ordered_events__last_flush_time(oe);
+
+ if (ret)
+ return ret;
+
+ ret = guest_session__inject_events(&inject->guest_session, timestamp);
+ if (ret)
+ return ret;
+
+ return perf_event__repipe_oe_synth(tool, event, oe);
+}
+
+static int host__context_switch(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+ bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
+ struct guest_session *gs = &inject->guest_session;
+ u32 pid = event->context_switch.next_prev_pid;
+ u32 tid = event->context_switch.next_prev_tid;
+ struct guest_tid *guest_tid;
+ u32 vcpu;
+
+ if (out || pid != gs->machine_pid)
+ goto out;
+
+ guest_tid = guest_session__lookup_tid(gs, tid);
+ if (!guest_tid)
+ goto out;
+
+ if (sample->cpu == (u32)-1) {
+ pr_err("Switch event does not have CPU\n");
+ return -EINVAL;
+ }
+
+ vcpu = guest_tid->vcpu;
+ if (vcpu >= gs->vcpu_cnt)
+ return -EINVAL;
+
+ /* Guest is switching in, record which CPU the VCPU is now running on */
+ gs->vcpu[vcpu].cpu = sample->cpu;
+out:
+ return host__repipe(tool, event, sample, machine);
+}
static void sig_handler(int sig __maybe_unused)
{
session_done = 1;
}
-static int perf_evsel__check_stype(struct evsel *evsel,
- u64 sample_type, const char *sample_msg)
+static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg)
{
struct perf_event_attr *attr = &evsel->core.attr;
- const char *name = perf_evsel__name(evsel);
+ const char *name = evsel__name(evsel);
if (!(attr->sample_type & sample_type)) {
pr_err("Samples for %s event do not have %s attribute set.",
@@ -596,44 +1716,295 @@ static void strip_init(struct perf_inject *inject)
evsel->handler = drop_sample;
}
+static int parse_vm_time_correlation(const struct option *opt, const char *str, int unset)
+{
+ struct perf_inject *inject = opt->value;
+ const char *args;
+ char *dry_run;
+
+ if (unset)
+ return 0;
+
+ inject->itrace_synth_opts.set = true;
+ inject->itrace_synth_opts.vm_time_correlation = true;
+ inject->in_place_update = true;
+
+ if (!str)
+ return 0;
+
+ dry_run = skip_spaces(str);
+ if (!strncmp(dry_run, "dry-run", strlen("dry-run"))) {
+ inject->itrace_synth_opts.vm_tm_corr_dry_run = true;
+ inject->in_place_update_dry_run = true;
+ args = dry_run + strlen("dry-run");
+ } else {
+ args = str;
+ }
+
+ inject->itrace_synth_opts.vm_tm_corr_args = strdup(args);
+
+ return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM;
+}
+
+static int parse_guest_data(const struct option *opt, const char *str, int unset)
+{
+ struct perf_inject *inject = opt->value;
+ struct guest_session *gs = &inject->guest_session;
+ char *tok;
+ char *s;
+
+ if (unset)
+ return 0;
+
+ if (!str)
+ goto bad_args;
+
+ s = strdup(str);
+ if (!s)
+ return -ENOMEM;
+
+ gs->perf_data_file = strsep(&s, ",");
+ if (!gs->perf_data_file)
+ goto bad_args;
+
+ gs->copy_kcore_dir = has_kcore_dir(gs->perf_data_file);
+ if (gs->copy_kcore_dir)
+ inject->output.is_dir = true;
+
+ tok = strsep(&s, ",");
+ if (!tok)
+ goto bad_args;
+ gs->machine_pid = strtoul(tok, NULL, 0);
+ if (!inject->guest_session.machine_pid)
+ goto bad_args;
+
+ gs->time_scale = 1;
+
+ tok = strsep(&s, ",");
+ if (!tok)
+ goto out;
+ gs->time_offset = strtoull(tok, NULL, 0);
+
+ tok = strsep(&s, ",");
+ if (!tok)
+ goto out;
+ gs->time_scale = strtod(tok, NULL);
+ if (!gs->time_scale)
+ goto bad_args;
+out:
+ return 0;
+
+bad_args:
+ pr_err("--guest-data option requires guest perf.data file name, "
+ "guest machine PID, and optionally guest timestamp offset, "
+ "and guest timestamp scale factor, separated by commas.\n");
+ return -1;
+}
+
+static int save_section_info_cb(struct perf_file_section *section,
+ struct perf_header *ph __maybe_unused,
+ int feat, int fd __maybe_unused, void *data)
+{
+ struct perf_inject *inject = data;
+
+ inject->secs[feat] = *section;
+ return 0;
+}
+
+static int save_section_info(struct perf_inject *inject)
+{
+ struct perf_header *header = &inject->session->header;
+ int fd = perf_data__fd(inject->session->data);
+
+ return perf_header__process_sections(header, fd, inject, save_section_info_cb);
+}
+
+static bool keep_feat(int feat)
+{
+ switch (feat) {
+ /* Keep original information that describes the machine or software */
+ case HEADER_TRACING_DATA:
+ case HEADER_HOSTNAME:
+ case HEADER_OSRELEASE:
+ case HEADER_VERSION:
+ case HEADER_ARCH:
+ case HEADER_NRCPUS:
+ case HEADER_CPUDESC:
+ case HEADER_CPUID:
+ case HEADER_TOTAL_MEM:
+ case HEADER_CPU_TOPOLOGY:
+ case HEADER_NUMA_TOPOLOGY:
+ case HEADER_PMU_MAPPINGS:
+ case HEADER_CACHE:
+ case HEADER_MEM_TOPOLOGY:
+ case HEADER_CLOCKID:
+ case HEADER_BPF_PROG_INFO:
+ case HEADER_BPF_BTF:
+ case HEADER_CPU_PMU_CAPS:
+ case HEADER_CLOCK_DATA:
+ case HEADER_HYBRID_TOPOLOGY:
+ case HEADER_PMU_CAPS:
+ return true;
+ /* Information that can be updated */
+ case HEADER_BUILD_ID:
+ case HEADER_CMDLINE:
+ case HEADER_EVENT_DESC:
+ case HEADER_BRANCH_STACK:
+ case HEADER_GROUP_DESC:
+ case HEADER_AUXTRACE:
+ case HEADER_STAT:
+ case HEADER_SAMPLE_TIME:
+ case HEADER_DIR_FORMAT:
+ case HEADER_COMPRESSED:
+ default:
+ return false;
+ };
+}
+
+static int read_file(int fd, u64 offs, void *buf, size_t sz)
+{
+ ssize_t ret = preadn(fd, buf, sz, offs);
+
+ if (ret < 0)
+ return -errno;
+ if ((size_t)ret != sz)
+ return -EINVAL;
+ return 0;
+}
+
+static int feat_copy(struct perf_inject *inject, int feat, struct feat_writer *fw)
+{
+ int fd = perf_data__fd(inject->session->data);
+ u64 offs = inject->secs[feat].offset;
+ size_t sz = inject->secs[feat].size;
+ void *buf = malloc(sz);
+ int ret;
+
+ if (!buf)
+ return -ENOMEM;
+
+ ret = read_file(fd, offs, buf, sz);
+ if (ret)
+ goto out_free;
+
+ ret = fw->write(fw, buf, sz);
+out_free:
+ free(buf);
+ return ret;
+}
+
+struct inject_fc {
+ struct feat_copier fc;
+ struct perf_inject *inject;
+};
+
+static int feat_copy_cb(struct feat_copier *fc, int feat, struct feat_writer *fw)
+{
+ struct inject_fc *inj_fc = container_of(fc, struct inject_fc, fc);
+ struct perf_inject *inject = inj_fc->inject;
+ int ret;
+
+ if (!inject->secs[feat].offset ||
+ !keep_feat(feat))
+ return 0;
+
+ ret = feat_copy(inject, feat, fw);
+ if (ret < 0)
+ return ret;
+
+ return 1; /* Feature section copied */
+}
+
+static int copy_kcore_dir(struct perf_inject *inject)
+{
+ char *cmd;
+ int ret;
+
+ ret = asprintf(&cmd, "cp -r -n %s/kcore_dir* %s >/dev/null 2>&1",
+ inject->input_name, inject->output.path);
+ if (ret < 0)
+ return ret;
+ pr_debug("%s\n", cmd);
+ ret = system(cmd);
+ free(cmd);
+ return ret;
+}
+
+static int guest_session__copy_kcore_dir(struct guest_session *gs)
+{
+ struct perf_inject *inject = container_of(gs, struct perf_inject, guest_session);
+ char *cmd;
+ int ret;
+
+ ret = asprintf(&cmd, "cp -r -n %s/kcore_dir %s/kcore_dir__%u >/dev/null 2>&1",
+ gs->perf_data_file, inject->output.path, gs->machine_pid);
+ if (ret < 0)
+ return ret;
+ pr_debug("%s\n", cmd);
+ ret = system(cmd);
+ free(cmd);
+ return ret;
+}
+
+static int output_fd(struct perf_inject *inject)
+{
+ return inject->in_place_update ? -1 : perf_data__fd(&inject->output);
+}
+
static int __cmd_inject(struct perf_inject *inject)
{
int ret = -EINVAL;
+ struct guest_session *gs = &inject->guest_session;
struct perf_session *session = inject->session;
- struct perf_data *data_out = &inject->output;
- int fd = perf_data__fd(data_out);
+ int fd = output_fd(inject);
u64 output_data_offset;
signal(SIGINT, sig_handler);
if (inject->build_ids || inject->sched_stat ||
- inject->itrace_synth_opts.set) {
+ inject->itrace_synth_opts.set || inject->build_id_all) {
inject->tool.mmap = perf_event__repipe_mmap;
inject->tool.mmap2 = perf_event__repipe_mmap2;
inject->tool.fork = perf_event__repipe_fork;
+#ifdef HAVE_LIBTRACEEVENT
inject->tool.tracing_data = perf_event__repipe_tracing_data;
+#endif
}
- output_data_offset = session->header.data_offset;
+ output_data_offset = perf_session__data_offset(session->evlist);
- if (inject->build_ids) {
+ if (inject->build_id_all) {
+ inject->tool.mmap = perf_event__repipe_buildid_mmap;
+ inject->tool.mmap2 = perf_event__repipe_buildid_mmap2;
+ } else if (inject->build_ids) {
inject->tool.sample = perf_event__inject_buildid;
} else if (inject->sched_stat) {
struct evsel *evsel;
evlist__for_each_entry(session->evlist, evsel) {
- const char *name = perf_evsel__name(evsel);
+ const char *name = evsel__name(evsel);
if (!strcmp(name, "sched:sched_switch")) {
- if (perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
+ if (evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID"))
return -EINVAL;
evsel->handler = perf_inject__sched_switch;
} else if (!strcmp(name, "sched:sched_process_exit"))
evsel->handler = perf_inject__sched_process_exit;
+#ifdef HAVE_LIBTRACEEVENT
else if (!strncmp(name, "sched:sched_stat_", 17))
evsel->handler = perf_inject__sched_stat;
+#endif
}
+ } else if (inject->itrace_synth_opts.vm_time_correlation) {
+ session->itrace_synth_opts = &inject->itrace_synth_opts;
+ memset(&inject->tool, 0, sizeof(inject->tool));
+ inject->tool.id_index = perf_event__process_id_index;
+ inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
+ inject->tool.auxtrace = perf_event__process_auxtrace;
+ inject->tool.auxtrace_error = perf_event__process_auxtrace_error;
+ inject->tool.ordered_events = true;
+ inject->tool.ordering_requires_timestamps = true;
} else if (inject->itrace_synth_opts.set) {
session->itrace_synth_opts = &inject->itrace_synth_opts;
inject->itrace_synth_opts.inject = true;
@@ -644,26 +2015,85 @@ static int __cmd_inject(struct perf_inject *inject)
inject->tool.auxtrace_info = perf_event__process_auxtrace_info;
inject->tool.auxtrace = perf_event__process_auxtrace;
inject->tool.aux = perf_event__drop_aux;
- inject->tool.itrace_start = perf_event__drop_aux,
+ inject->tool.itrace_start = perf_event__drop_aux;
+ inject->tool.aux_output_hw_id = perf_event__drop_aux;
inject->tool.ordered_events = true;
inject->tool.ordering_requires_timestamps = true;
/* Allow space in the header for new attributes */
- output_data_offset = 4096;
+ output_data_offset = roundup(8192 + session->header.data_offset, 4096);
if (inject->strip)
strip_init(inject);
+ } else if (gs->perf_data_file) {
+ char *name = gs->perf_data_file;
+
+ /*
+ * Not strictly necessary, but keep these events in order wrt
+ * guest events.
+ */
+ inject->tool.mmap = host__repipe;
+ inject->tool.mmap2 = host__repipe;
+ inject->tool.comm = host__repipe;
+ inject->tool.fork = host__repipe;
+ inject->tool.exit = host__repipe;
+ inject->tool.lost = host__repipe;
+ inject->tool.context_switch = host__repipe;
+ inject->tool.ksymbol = host__repipe;
+ inject->tool.text_poke = host__repipe;
+ /*
+ * Once the host session has initialized, set up sample ID
+ * mapping and feed in guest attrs, build IDs and initial
+ * events.
+ */
+ inject->tool.finished_init = host__finished_init;
+ /* Obey finished round ordering */
+ inject->tool.finished_round = host__finished_round,
+ /* Keep track of which CPU a VCPU is runnng on */
+ inject->tool.context_switch = host__context_switch;
+ /*
+ * Must order events to be able to obey finished round
+ * ordering.
+ */
+ inject->tool.ordered_events = true;
+ inject->tool.ordering_requires_timestamps = true;
+ /* Set up a separate session to process guest perf.data file */
+ ret = guest_session__start(gs, name, session->data->force);
+ if (ret) {
+ pr_err("Failed to process %s, error %d\n", name, ret);
+ return ret;
+ }
+ /* Allow space in the header for guest attributes */
+ output_data_offset += gs->session->header.data_offset;
+ output_data_offset = roundup(output_data_offset, 4096);
}
if (!inject->itrace_synth_opts.set)
auxtrace_index__free(&session->auxtrace_index);
- if (!data_out->is_pipe)
+ if (!inject->is_pipe && !inject->in_place_update)
lseek(fd, output_data_offset, SEEK_SET);
ret = perf_session__process_events(session);
if (ret)
return ret;
- if (!data_out->is_pipe) {
+ if (gs->session) {
+ /*
+ * Remaining guest events have later timestamps. Flush them
+ * out to file.
+ */
+ ret = guest_session__flush_events(gs);
+ if (ret) {
+ pr_err("Failed to flush guest events\n");
+ return ret;
+ }
+ }
+
+ if (!inject->is_pipe && !inject->in_place_update) {
+ struct inject_fc inj_fc = {
+ .fc.copy = feat_copy_cb,
+ .inject = inject,
+ };
+
if (inject->build_ids)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
@@ -676,29 +2106,34 @@ static int __cmd_inject(struct perf_inject *inject)
dsos__hit_all(session);
/*
* The AUX areas have been removed and replaced with
- * synthesized hardware events, so clear the feature flag and
- * remove the evsel.
+ * synthesized hardware events, so clear the feature flag.
*/
if (inject->itrace_synth_opts.set) {
- struct evsel *evsel;
-
perf_header__clear_feat(&session->header,
HEADER_AUXTRACE);
- if (inject->itrace_synth_opts.last_branch)
+ if (inject->itrace_synth_opts.last_branch ||
+ inject->itrace_synth_opts.add_last_branch)
perf_header__set_feat(&session->header,
HEADER_BRANCH_STACK);
- evsel = perf_evlist__id2evsel_strict(session->evlist,
- inject->aux_id);
- if (evsel) {
- pr_debug("Deleting %s\n",
- perf_evsel__name(evsel));
- evlist__remove(session->evlist, evsel);
- evsel__delete(evsel);
- }
}
session->header.data_offset = output_data_offset;
session->header.data_size = inject->bytes_written;
- perf_session__write_header(session, session->evlist, fd, true);
+ perf_session__inject_header(session, session->evlist, fd, &inj_fc.fc);
+
+ if (inject->copy_kcore_dir) {
+ ret = copy_kcore_dir(inject);
+ if (ret) {
+ pr_err("Failed to copy kcore\n");
+ return ret;
+ }
+ }
+ if (gs->copy_kcore_dir) {
+ ret = guest_session__copy_kcore_dir(gs);
+ if (ret) {
+ pr_err("Failed to copy guest kcore\n");
+ return ret;
+ }
+ }
}
return ret;
@@ -709,45 +2144,68 @@ int cmd_inject(int argc, const char **argv)
struct perf_inject inject = {
.tool = {
.sample = perf_event__repipe_sample,
+ .read = perf_event__repipe_sample,
.mmap = perf_event__repipe,
.mmap2 = perf_event__repipe,
.comm = perf_event__repipe,
+ .namespaces = perf_event__repipe,
+ .cgroup = perf_event__repipe,
.fork = perf_event__repipe,
.exit = perf_event__repipe,
.lost = perf_event__repipe,
.lost_samples = perf_event__repipe,
.aux = perf_event__repipe,
.itrace_start = perf_event__repipe,
+ .aux_output_hw_id = perf_event__repipe,
.context_switch = perf_event__repipe,
- .read = perf_event__repipe_sample,
.throttle = perf_event__repipe,
.unthrottle = perf_event__repipe,
+ .ksymbol = perf_event__repipe,
+ .bpf = perf_event__repipe,
+ .text_poke = perf_event__repipe,
.attr = perf_event__repipe_attr,
+ .event_update = perf_event__repipe_event_update,
.tracing_data = perf_event__repipe_op2_synth,
- .auxtrace_info = perf_event__repipe_op2_synth,
- .auxtrace = perf_event__repipe_auxtrace,
- .auxtrace_error = perf_event__repipe_op2_synth,
- .time_conv = perf_event__repipe_op2_synth,
.finished_round = perf_event__repipe_oe_synth,
.build_id = perf_event__repipe_op2_synth,
.id_index = perf_event__repipe_op2_synth,
+ .auxtrace_info = perf_event__repipe_op2_synth,
+ .auxtrace_error = perf_event__repipe_op2_synth,
+ .time_conv = perf_event__repipe_op2_synth,
+ .thread_map = perf_event__repipe_op2_synth,
+ .cpu_map = perf_event__repipe_op2_synth,
+ .stat_config = perf_event__repipe_op2_synth,
+ .stat = perf_event__repipe_op2_synth,
+ .stat_round = perf_event__repipe_op2_synth,
.feature = perf_event__repipe_op2_synth,
+ .finished_init = perf_event__repipe_op2_synth,
+ .compressed = perf_event__repipe_op4_synth,
+ .auxtrace = perf_event__repipe_auxtrace,
},
.input_name = "-",
.samples = LIST_HEAD_INIT(inject.samples),
.output = {
.path = "-",
.mode = PERF_DATA_MODE_WRITE,
+ .use_stdio = true,
},
};
struct perf_data data = {
.mode = PERF_DATA_MODE_READ,
+ .use_stdio = true,
};
int ret;
+ bool repipe = true;
+ const char *known_build_ids = NULL;
struct option options[] = {
OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
"Inject build-ids into the output stream"),
+ OPT_BOOLEAN(0, "buildid-all", &inject.build_id_all,
+ "Inject build-ids of all DSOs into the output stream"),
+ OPT_STRING(0, "known-build-ids", &known_build_ids,
+ "buildid path [,buildid path...]",
+ "build-ids to use for given paths"),
OPT_STRING('i', "input", &inject.input_name, "file",
"input file name"),
OPT_STRING('o', "output", &inject.output.path, "file",
@@ -760,6 +2218,10 @@ int cmd_inject(int argc, const char **argv)
#endif
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show build ids, etc)"),
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
+ "don't load vmlinux even if found"),
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
"kallsyms pathname"),
OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
@@ -769,6 +2231,15 @@ int cmd_inject(int argc, const char **argv)
itrace_parse_synth_opts),
OPT_BOOLEAN(0, "strip", &inject.strip,
"strip non-synthesized events (use with --itrace)"),
+ OPT_CALLBACK_OPTARG(0, "vm-time-correlation", &inject, NULL, "opts",
+ "correlate time between VM guests and the host",
+ parse_vm_time_correlation),
+ OPT_CALLBACK_OPTARG(0, "guest-data", &inject, NULL, "opts",
+ "inject events from a guest perf.data file",
+ parse_guest_data),
+ OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory",
+ "guest mount directory under which every guest os"
+ " instance has a subdir"),
OPT_END()
};
const char * const inject_usage[] = {
@@ -791,22 +2262,81 @@ int cmd_inject(int argc, const char **argv)
return -1;
}
- if (perf_data__open(&inject.output)) {
- perror("failed to create output file");
+ if (symbol__validate_sym_arguments())
return -1;
- }
- inject.tool.ordered_events = inject.sched_stat;
+ if (inject.in_place_update) {
+ if (!strcmp(inject.input_name, "-")) {
+ pr_err("Input file name required for in-place updating\n");
+ return -1;
+ }
+ if (strcmp(inject.output.path, "-")) {
+ pr_err("Output file name must not be specified for in-place updating\n");
+ return -1;
+ }
+ if (!data.force && !inject.in_place_update_dry_run) {
+ pr_err("The input file would be updated in place, "
+ "the --force option is required.\n");
+ return -1;
+ }
+ if (!inject.in_place_update_dry_run)
+ data.in_place_update = true;
+ } else {
+ if (strcmp(inject.output.path, "-") && !inject.strip &&
+ has_kcore_dir(inject.input_name)) {
+ inject.output.is_dir = true;
+ inject.copy_kcore_dir = true;
+ }
+ if (perf_data__open(&inject.output)) {
+ perror("failed to create output file");
+ return -1;
+ }
+ }
data.path = inject.input_name;
- inject.session = perf_session__new(&data, true, &inject.tool);
- if (IS_ERR(inject.session))
- return PTR_ERR(inject.session);
+ if (!strcmp(inject.input_name, "-") || inject.output.is_pipe) {
+ inject.is_pipe = true;
+ /*
+ * Do not repipe header when input is a regular file
+ * since either it can rewrite the header at the end
+ * or write a new pipe header.
+ */
+ if (strcmp(inject.input_name, "-"))
+ repipe = false;
+ }
+
+ inject.session = __perf_session__new(&data, repipe,
+ output_fd(&inject),
+ &inject.tool);
+ if (IS_ERR(inject.session)) {
+ ret = PTR_ERR(inject.session);
+ goto out_close_output;
+ }
if (zstd_init(&(inject.session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed.\n");
- if (inject.build_ids) {
+ /* Save original section info before feature bits change */
+ ret = save_section_info(&inject);
+ if (ret)
+ goto out_delete;
+
+ if (!data.is_pipe && inject.output.is_pipe) {
+ ret = perf_header__write_pipe(perf_data__fd(&inject.output));
+ if (ret < 0) {
+ pr_err("Couldn't write a new pipe header.\n");
+ goto out_delete;
+ }
+
+ ret = perf_event__synthesize_for_pipe(&inject.tool,
+ inject.session,
+ &inject.output,
+ perf_event__repipe);
+ if (ret < 0)
+ goto out_delete;
+ }
+
+ if (inject.build_ids && !inject.build_id_all) {
/*
* to make sure the mmap records are ordered correctly
* and so that the correct especially due to jitted code
@@ -815,7 +2345,21 @@ int cmd_inject(int argc, const char **argv)
*/
inject.tool.ordered_events = true;
inject.tool.ordering_requires_timestamps = true;
+ if (known_build_ids != NULL) {
+ inject.known_build_ids =
+ perf_inject__parse_known_build_ids(known_build_ids);
+
+ if (inject.known_build_ids == NULL) {
+ pr_err("Couldn't parse known build ids.\n");
+ goto out_delete;
+ }
+ }
}
+
+ if (inject.sched_stat) {
+ inject.tool.ordered_events = true;
+ }
+
#ifdef HAVE_JITDUMP
if (inject.jit_mode) {
inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
@@ -835,8 +2379,15 @@ int cmd_inject(int argc, const char **argv)
ret = __cmd_inject(&inject);
+ guest_session__exit(&inject.guest_session);
+
out_delete:
+ strlist__delete(inject.known_build_ids);
zstd_fini(&(inject.session->zstd_data));
perf_session__delete(inject.session);
+out_close_output:
+ if (!inject.in_place_update)
+ perf_data__close(&inject.output);
+ free(inject.itrace_synth_opts.vm_tm_corr_args);
return ret;
}
diff --git a/tools/perf/builtin-kallsyms.c b/tools/perf/builtin-kallsyms.c
index c08ee81529e8..3751df744577 100644
--- a/tools/perf/builtin-kallsyms.c
+++ b/tools/perf/builtin-kallsyms.c
@@ -28,6 +28,7 @@ static int __cmd_kallsyms(int argc, const char **argv)
for (i = 0; i < argc; ++i) {
struct map *map;
+ const struct dso *dso;
struct symbol *symbol = machine__find_kernel_symbol_by_name(machine, argv[i], &map);
if (symbol == NULL) {
@@ -35,9 +36,10 @@ static int __cmd_kallsyms(int argc, const char **argv)
continue;
}
+ dso = map__dso(map);
printf("%s: %s %s %#" PRIx64 "-%#" PRIx64 " (%#" PRIx64 "-%#" PRIx64")\n",
- symbol->name, map->dso->short_name, map->dso->long_name,
- map->unmap_ip(map, symbol->start), map->unmap_ip(map, symbol->end),
+ symbol->name, dso->short_name, dso->long_name,
+ map__unmap_ip(map, symbol->start), map__unmap_ip(map, symbol->end),
symbol->start, symbol->end);
}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 003c85f5f56c..2150eeced892 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
-#include "perf.h"
#include "util/dso.h"
#include "util/evlist.h"
@@ -24,8 +23,10 @@
#include "util/debug.h"
#include "util/string2.h"
+#include "util/util.h"
#include <linux/kernel.h>
+#include <linux/numa.h>
#include <linux/rbtree.h>
#include <linux/string.h>
#include <linux/zalloc.h>
@@ -35,6 +36,7 @@
#include <regex.h>
#include <linux/ctype.h>
+#include <traceevent/event-parse.h>
static int kmem_slab;
static int kmem_page;
@@ -169,13 +171,12 @@ static int insert_caller_stat(unsigned long call_site,
return 0;
}
-static int perf_evsel__process_alloc_event(struct evsel *evsel,
- struct perf_sample *sample)
+static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *sample)
{
- unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
- call_site = perf_evsel__intval(evsel, sample, "call_site");
- int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
- bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
+ unsigned long ptr = evsel__intval(evsel, sample, "ptr"),
+ call_site = evsel__intval(evsel, sample, "call_site");
+ int bytes_req = evsel__intval(evsel, sample, "bytes_req"),
+ bytes_alloc = evsel__intval(evsel, sample, "bytes_alloc");
if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
insert_caller_stat(call_site, bytes_req, bytes_alloc))
@@ -185,23 +186,33 @@ static int perf_evsel__process_alloc_event(struct evsel *evsel,
total_allocated += bytes_alloc;
nr_allocs++;
- return 0;
-}
-static int perf_evsel__process_alloc_node_event(struct evsel *evsel,
- struct perf_sample *sample)
-{
- int ret = perf_evsel__process_alloc_event(evsel, sample);
+ /*
+ * Commit 11e9734bcb6a ("mm/slab_common: unify NUMA and UMA
+ * version of tracepoints") adds the field "node" into the
+ * tracepoints 'kmalloc' and 'kmem_cache_alloc'.
+ *
+ * The legacy tracepoints 'kmalloc_node' and 'kmem_cache_alloc_node'
+ * also contain the field "node".
+ *
+ * If the tracepoint contains the field "node" the tool stats the
+ * cross allocation.
+ */
+ if (evsel__field(evsel, "node")) {
+ int node1, node2;
- if (!ret) {
- int node1 = cpu__get_node(sample->cpu),
- node2 = perf_evsel__intval(evsel, sample, "node");
+ node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu});
+ node2 = evsel__intval(evsel, sample, "node");
- if (node1 != node2)
+ /*
+ * If the field "node" is NUMA_NO_NODE (-1), we don't take it
+ * as a cross allocation.
+ */
+ if ((node2 != NUMA_NO_NODE) && (node1 != node2))
nr_cross_allocs++;
}
- return ret;
+ return 0;
}
static int ptr_cmp(void *, void *);
@@ -232,10 +243,9 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr,
return NULL;
}
-static int perf_evsel__process_free_event(struct evsel *evsel,
- struct perf_sample *sample)
+static int evsel__process_free_event(struct evsel *evsel, struct perf_sample *sample)
{
- unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
+ unsigned long ptr = evsel__intval(evsel, sample, "ptr");
struct alloc_stat *s_alloc, *s_caller;
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
@@ -413,7 +423,7 @@ static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
if (!caller) {
/* found */
if (node->ms.map)
- addr = map__unmap_ip(node->ms.map, node->ip);
+ addr = map__dso_unmap_ip(node->ms.map, node->ip);
else
addr = node->ip;
@@ -643,7 +653,6 @@ static const struct {
{ "__GFP_HIGHMEM", "HM" },
{ "GFP_DMA32", "D32" },
{ "__GFP_HIGH", "H" },
- { "__GFP_ATOMIC", "_A" },
{ "__GFP_IO", "I" },
{ "__GFP_FS", "F" },
{ "__GFP_NOWARN", "NWR" },
@@ -784,13 +793,12 @@ static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
return 0;
}
-static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
- struct perf_sample *sample)
+static int evsel__process_page_alloc_event(struct evsel *evsel, struct perf_sample *sample)
{
u64 page;
- unsigned int order = perf_evsel__intval(evsel, sample, "order");
- unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
- unsigned int migrate_type = perf_evsel__intval(evsel, sample,
+ unsigned int order = evsel__intval(evsel, sample, "order");
+ unsigned int gfp_flags = evsel__intval(evsel, sample, "gfp_flags");
+ unsigned int migrate_type = evsel__intval(evsel, sample,
"migratetype");
u64 bytes = kmem_page_size << order;
u64 callsite;
@@ -802,9 +810,9 @@ static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
};
if (use_pfn)
- page = perf_evsel__intval(evsel, sample, "pfn");
+ page = evsel__intval(evsel, sample, "pfn");
else
- page = perf_evsel__intval(evsel, sample, "page");
+ page = evsel__intval(evsel, sample, "page");
nr_page_allocs++;
total_page_alloc_bytes += bytes;
@@ -857,11 +865,10 @@ static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
return 0;
}
-static int perf_evsel__process_page_free_event(struct evsel *evsel,
- struct perf_sample *sample)
+static int evsel__process_page_free_event(struct evsel *evsel, struct perf_sample *sample)
{
u64 page;
- unsigned int order = perf_evsel__intval(evsel, sample, "order");
+ unsigned int order = evsel__intval(evsel, sample, "order");
u64 bytes = kmem_page_size << order;
struct page_stat *pstat;
struct page_stat this = {
@@ -869,9 +876,9 @@ static int perf_evsel__process_page_free_event(struct evsel *evsel,
};
if (use_pfn)
- page = perf_evsel__intval(evsel, sample, "pfn");
+ page = evsel__intval(evsel, sample, "pfn");
else
- page = perf_evsel__intval(evsel, sample, "page");
+ page = evsel__intval(evsel, sample, "page");
nr_page_frees++;
total_page_free_bytes += bytes;
@@ -1017,7 +1024,7 @@ static void __print_slab_result(struct rb_root *root,
if (sym != NULL)
snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
- addr - map->unmap_ip(map, sym->start));
+ addr - map__unmap_ip(map, sym->start));
else
snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
printf(" %-34s |", buf);
@@ -1371,15 +1378,15 @@ static int __cmd_kmem(struct perf_session *session)
struct evsel *evsel;
const struct evsel_str_handler kmem_tracepoints[] = {
/* slab allocator */
- { "kmem:kmalloc", perf_evsel__process_alloc_event, },
- { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
- { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
- { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
- { "kmem:kfree", perf_evsel__process_free_event, },
- { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
+ { "kmem:kmalloc", evsel__process_alloc_event, },
+ { "kmem:kmem_cache_alloc", evsel__process_alloc_event, },
+ { "kmem:kmalloc_node", evsel__process_alloc_event, },
+ { "kmem:kmem_cache_alloc_node", evsel__process_alloc_event, },
+ { "kmem:kfree", evsel__process_free_event, },
+ { "kmem:kmem_cache_free", evsel__process_free_event, },
/* page allocator */
- { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
- { "kmem:mm_page_free", perf_evsel__process_page_free_event, },
+ { "kmem:mm_page_alloc", evsel__process_page_alloc_event, },
+ { "kmem:mm_page_free", evsel__process_page_free_event, },
};
if (!perf_session__has_traces(session, "kmem record"))
@@ -1391,8 +1398,8 @@ static int __cmd_kmem(struct perf_session *session)
}
evlist__for_each_entry(session->evlist, evsel) {
- if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
- perf_evsel__field(evsel, "pfn")) {
+ if (!strcmp(evsel__name(evsel), "kmem:mm_page_alloc") &&
+ evsel__field(evsel, "pfn")) {
use_pfn = true;
break;
}
@@ -1828,6 +1835,19 @@ static int parse_line_opt(const struct option *opt __maybe_unused,
return 0;
}
+static bool slab_legacy_tp_is_exposed(void)
+{
+ /*
+ * The tracepoints "kmem:kmalloc_node" and
+ * "kmem:kmem_cache_alloc_node" have been removed on the latest
+ * kernel, if the tracepoint "kmem:kmalloc_node" is existed it
+ * means the tool is running on an old kernel, we need to
+ * rollback to support these legacy tracepoints.
+ */
+ return IS_ERR(trace_event__tp_format("kmem", "kmalloc_node")) ?
+ false : true;
+}
+
static int __cmd_record(int argc, const char **argv)
{
const char * const record_args[] = {
@@ -1835,22 +1855,28 @@ static int __cmd_record(int argc, const char **argv)
};
const char * const slab_events[] = {
"-e", "kmem:kmalloc",
- "-e", "kmem:kmalloc_node",
"-e", "kmem:kfree",
"-e", "kmem:kmem_cache_alloc",
- "-e", "kmem:kmem_cache_alloc_node",
"-e", "kmem:kmem_cache_free",
};
+ const char * const slab_legacy_events[] = {
+ "-e", "kmem:kmalloc_node",
+ "-e", "kmem:kmem_cache_alloc_node",
+ };
const char * const page_events[] = {
"-e", "kmem:mm_page_alloc",
"-e", "kmem:mm_page_free",
};
unsigned int rec_argc, i, j;
const char **rec_argv;
+ unsigned int slab_legacy_tp_exposed = slab_legacy_tp_is_exposed();
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
- if (kmem_slab)
+ if (kmem_slab) {
rec_argc += ARRAY_SIZE(slab_events);
+ if (slab_legacy_tp_exposed)
+ rec_argc += ARRAY_SIZE(slab_legacy_events);
+ }
if (kmem_page)
rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
@@ -1865,6 +1891,10 @@ static int __cmd_record(int argc, const char **argv)
if (kmem_slab) {
for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
rec_argv[i] = strdup(slab_events[j]);
+ if (slab_legacy_tp_exposed) {
+ for (j = 0; j < ARRAY_SIZE(slab_legacy_events); j++, i++)
+ rec_argv[i] = strdup(slab_legacy_events[j]);
+ }
}
if (kmem_page) {
rec_argv[i++] = strdup("-g");
@@ -1938,7 +1968,8 @@ int cmd_kmem(int argc, const char **argv)
return ret;
argc = parse_options_subcommand(argc, argv, kmem_options,
- kmem_subcommands, kmem_usage, 0);
+ kmem_subcommands, kmem_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(kmem_usage, kmem_options);
@@ -1950,32 +1981,29 @@ int cmd_kmem(int argc, const char **argv)
kmem_page = 1;
}
- if (!strncmp(argv[0], "rec", 3)) {
+ if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
symbol__init(NULL);
return __cmd_record(argc, argv);
}
data.path = input_name;
- kmem_session = session = perf_session__new(&data, false, &perf_kmem);
+ kmem_session = session = perf_session__new(&data, &perf_kmem);
if (IS_ERR(session))
return PTR_ERR(session);
ret = -1;
if (kmem_slab) {
- if (!perf_evlist__find_tracepoint_by_name(session->evlist,
- "kmem:kmalloc")) {
+ if (!evlist__find_tracepoint_by_name(session->evlist, "kmem:kmalloc")) {
pr_err(errmsg, "slab", "slab");
goto out_delete;
}
}
if (kmem_page) {
- struct evsel *evsel;
+ struct evsel *evsel = evlist__find_tracepoint_by_name(session->evlist, "kmem:mm_page_alloc");
- evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
- "kmem:mm_page_alloc");
if (evsel == NULL) {
pr_err(errmsg, "page", "page");
goto out_delete;
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 577af4f3297a..71165036e4ca 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -23,7 +23,11 @@
#include "util/data.h"
#include "util/ordered-events.h"
#include "util/kvm-stat.h"
+#include "util/util.h"
+#include "ui/browsers/hists.h"
+#include "ui/progress.h"
#include "ui/ui.h"
+#include "util/string2.h"
#include <sys/prctl.h>
#ifdef HAVE_TIMERFD_SUPPORT
@@ -48,6 +52,553 @@
#include <math.h>
#include <perf/mmap.h>
+#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+#define GET_EVENT_KEY(func, field) \
+static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
+{ \
+ if (vcpu == -1) \
+ return event->total.field; \
+ \
+ if (vcpu >= event->max_vcpu) \
+ return 0; \
+ \
+ return event->vcpu[vcpu].field; \
+}
+
+#define COMPARE_EVENT_KEY(func, field) \
+GET_EVENT_KEY(func, field) \
+static int64_t cmp_event_ ## func(struct kvm_event *one, \
+ struct kvm_event *two, int vcpu) \
+{ \
+ return get_event_ ##func(one, vcpu) - \
+ get_event_ ##func(two, vcpu); \
+}
+
+COMPARE_EVENT_KEY(time, time);
+COMPARE_EVENT_KEY(max, stats.max);
+COMPARE_EVENT_KEY(min, stats.min);
+COMPARE_EVENT_KEY(count, stats.n);
+COMPARE_EVENT_KEY(mean, stats.mean);
+
+struct kvm_hists {
+ struct hists hists;
+ struct perf_hpp_list list;
+};
+
+struct kvm_dimension {
+ const char *name;
+ const char *header;
+ int width;
+ int64_t (*cmp)(struct perf_hpp_fmt *fmt, struct hist_entry *left,
+ struct hist_entry *right);
+ int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
+};
+
+struct kvm_fmt {
+ struct perf_hpp_fmt fmt;
+ struct kvm_dimension *dim;
+};
+
+static struct kvm_hists kvm_hists;
+
+static int64_t ev_name_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left,
+ struct hist_entry *right)
+{
+ /* Return opposite number for sorting in alphabetical order */
+ return -strcmp(left->kvm_info->name, right->kvm_info->name);
+}
+
+static int fmt_width(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp __maybe_unused,
+ struct hists *hists __maybe_unused);
+
+static int ev_name_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = fmt_width(fmt, hpp, he->hists);
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, he->kvm_info->name);
+}
+
+static struct kvm_dimension dim_event = {
+ .header = "Event name",
+ .name = "ev_name",
+ .cmp = ev_name_cmp,
+ .entry = ev_name_entry,
+ .width = 40,
+};
+
+#define EV_METRIC_CMP(metric) \
+static int64_t ev_cmp_##metric(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct hist_entry *left, \
+ struct hist_entry *right) \
+{ \
+ struct kvm_event *event_left; \
+ struct kvm_event *event_right; \
+ struct perf_kvm_stat *perf_kvm; \
+ \
+ event_left = container_of(left, struct kvm_event, he); \
+ event_right = container_of(right, struct kvm_event, he); \
+ \
+ perf_kvm = event_left->perf_kvm; \
+ return cmp_event_##metric(event_left, event_right, \
+ perf_kvm->trace_vcpu); \
+}
+
+EV_METRIC_CMP(time)
+EV_METRIC_CMP(count)
+EV_METRIC_CMP(max)
+EV_METRIC_CMP(min)
+EV_METRIC_CMP(mean)
+
+#define EV_METRIC_ENTRY(metric) \
+static int ev_entry_##metric(struct perf_hpp_fmt *fmt, \
+ struct perf_hpp *hpp, \
+ struct hist_entry *he) \
+{ \
+ struct kvm_event *event; \
+ int width = fmt_width(fmt, hpp, he->hists); \
+ struct perf_kvm_stat *perf_kvm; \
+ \
+ event = container_of(he, struct kvm_event, he); \
+ perf_kvm = event->perf_kvm; \
+ return scnprintf(hpp->buf, hpp->size, "%*lu", width, \
+ get_event_##metric(event, perf_kvm->trace_vcpu)); \
+}
+
+EV_METRIC_ENTRY(time)
+EV_METRIC_ENTRY(count)
+EV_METRIC_ENTRY(max)
+EV_METRIC_ENTRY(min)
+
+static struct kvm_dimension dim_time = {
+ .header = "Time (ns)",
+ .name = "time",
+ .cmp = ev_cmp_time,
+ .entry = ev_entry_time,
+ .width = 12,
+};
+
+static struct kvm_dimension dim_count = {
+ .header = "Samples",
+ .name = "sample",
+ .cmp = ev_cmp_count,
+ .entry = ev_entry_count,
+ .width = 12,
+};
+
+static struct kvm_dimension dim_max_time = {
+ .header = "Max Time (ns)",
+ .name = "max_t",
+ .cmp = ev_cmp_max,
+ .entry = ev_entry_max,
+ .width = 14,
+};
+
+static struct kvm_dimension dim_min_time = {
+ .header = "Min Time (ns)",
+ .name = "min_t",
+ .cmp = ev_cmp_min,
+ .entry = ev_entry_min,
+ .width = 14,
+};
+
+static int ev_entry_mean(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct kvm_event *event;
+ int width = fmt_width(fmt, hpp, he->hists);
+ struct perf_kvm_stat *perf_kvm;
+
+ event = container_of(he, struct kvm_event, he);
+ perf_kvm = event->perf_kvm;
+ return scnprintf(hpp->buf, hpp->size, "%*lu", width,
+ get_event_mean(event, perf_kvm->trace_vcpu));
+}
+
+static struct kvm_dimension dim_mean_time = {
+ .header = "Mean Time (ns)",
+ .name = "mean_t",
+ .cmp = ev_cmp_mean,
+ .entry = ev_entry_mean,
+ .width = 14,
+};
+
+#define PERC_STR(__s, __v) \
+({ \
+ scnprintf(__s, sizeof(__s), "%.2F%%", __v); \
+ __s; \
+})
+
+static double percent(u64 st, u64 tot)
+{
+ return tot ? 100. * (double) st / (double) tot : 0;
+}
+
+#define EV_METRIC_PERCENT(metric) \
+static int ev_percent_##metric(struct hist_entry *he) \
+{ \
+ struct kvm_event *event; \
+ struct perf_kvm_stat *perf_kvm; \
+ \
+ event = container_of(he, struct kvm_event, he); \
+ perf_kvm = event->perf_kvm; \
+ \
+ return percent(get_event_##metric(event, perf_kvm->trace_vcpu), \
+ perf_kvm->total_##metric); \
+}
+
+EV_METRIC_PERCENT(time)
+EV_METRIC_PERCENT(count)
+
+static int ev_entry_time_precent(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = fmt_width(fmt, hpp, he->hists);
+ double per;
+ char buf[10];
+
+ per = ev_percent_time(he);
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int64_t
+ev_cmp_time_precent(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = ev_percent_time(left);
+ per_right = ev_percent_time(right);
+
+ return per_left - per_right;
+}
+
+static struct kvm_dimension dim_time_percent = {
+ .header = "Time%",
+ .name = "percent_time",
+ .cmp = ev_cmp_time_precent,
+ .entry = ev_entry_time_precent,
+ .width = 12,
+};
+
+static int ev_entry_count_precent(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ int width = fmt_width(fmt, hpp, he->hists);
+ double per;
+ char buf[10];
+
+ per = ev_percent_count(he);
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, PERC_STR(buf, per));
+}
+
+static int64_t
+ev_cmp_count_precent(struct perf_hpp_fmt *fmt __maybe_unused,
+ struct hist_entry *left, struct hist_entry *right)
+{
+ double per_left;
+ double per_right;
+
+ per_left = ev_percent_count(left);
+ per_right = ev_percent_count(right);
+
+ return per_left - per_right;
+}
+
+static struct kvm_dimension dim_count_percent = {
+ .header = "Sample%",
+ .name = "percent_sample",
+ .cmp = ev_cmp_count_precent,
+ .entry = ev_entry_count_precent,
+ .width = 12,
+};
+
+static struct kvm_dimension *dimensions[] = {
+ &dim_event,
+ &dim_time,
+ &dim_time_percent,
+ &dim_count,
+ &dim_count_percent,
+ &dim_max_time,
+ &dim_min_time,
+ &dim_mean_time,
+ NULL,
+};
+
+static int fmt_width(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp __maybe_unused,
+ struct hists *hists __maybe_unused)
+{
+ struct kvm_fmt *kvm_fmt;
+
+ kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
+ return kvm_fmt->dim->width;
+}
+
+static int fmt_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hists *hists, int line __maybe_unused,
+ int *span __maybe_unused)
+{
+ struct kvm_fmt *kvm_fmt;
+ struct kvm_dimension *dim;
+ int width = fmt_width(fmt, hpp, hists);
+
+ kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
+ dim = kvm_fmt->dim;
+
+ return scnprintf(hpp->buf, hpp->size, "%*s", width, dim->header);
+}
+
+static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+ struct kvm_fmt *kvm_fmt_a = container_of(a, struct kvm_fmt, fmt);
+ struct kvm_fmt *kvm_fmt_b = container_of(b, struct kvm_fmt, fmt);
+
+ return kvm_fmt_a->dim == kvm_fmt_b->dim;
+}
+
+static void fmt_free(struct perf_hpp_fmt *fmt)
+{
+ struct kvm_fmt *kvm_fmt;
+
+ kvm_fmt = container_of(fmt, struct kvm_fmt, fmt);
+ free(kvm_fmt);
+}
+
+static struct kvm_dimension *get_dimension(const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; dimensions[i] != NULL; i++) {
+ if (!strcmp(dimensions[i]->name, name))
+ return dimensions[i];
+ }
+
+ return NULL;
+}
+
+static struct kvm_fmt *get_format(const char *name)
+{
+ struct kvm_dimension *dim = get_dimension(name);
+ struct kvm_fmt *kvm_fmt;
+ struct perf_hpp_fmt *fmt;
+
+ if (!dim)
+ return NULL;
+
+ kvm_fmt = zalloc(sizeof(*kvm_fmt));
+ if (!kvm_fmt)
+ return NULL;
+
+ kvm_fmt->dim = dim;
+
+ fmt = &kvm_fmt->fmt;
+ INIT_LIST_HEAD(&fmt->list);
+ INIT_LIST_HEAD(&fmt->sort_list);
+ fmt->cmp = dim->cmp;
+ fmt->sort = dim->cmp;
+ fmt->color = NULL;
+ fmt->entry = dim->entry;
+ fmt->header = fmt_header;
+ fmt->width = fmt_width;
+ fmt->collapse = dim->cmp;
+ fmt->equal = fmt_equal;
+ fmt->free = fmt_free;
+
+ return kvm_fmt;
+}
+
+static int kvm_hists__init_output(struct perf_hpp_list *hpp_list, char *name)
+{
+ struct kvm_fmt *kvm_fmt = get_format(name);
+
+ if (!kvm_fmt) {
+ pr_warning("Fail to find format for output field %s.\n", name);
+ return -EINVAL;
+ }
+
+ perf_hpp_list__column_register(hpp_list, &kvm_fmt->fmt);
+ return 0;
+}
+
+static int kvm_hists__init_sort(struct perf_hpp_list *hpp_list, char *name)
+{
+ struct kvm_fmt *kvm_fmt = get_format(name);
+
+ if (!kvm_fmt) {
+ pr_warning("Fail to find format for sorting %s.\n", name);
+ return -EINVAL;
+ }
+
+ perf_hpp_list__register_sort_field(hpp_list, &kvm_fmt->fmt);
+ return 0;
+}
+
+static int kvm_hpp_list__init(char *list,
+ struct perf_hpp_list *hpp_list,
+ int (*fn)(struct perf_hpp_list *hpp_list,
+ char *name))
+{
+ char *tmp, *tok;
+ int ret;
+
+ if (!list || !fn)
+ return 0;
+
+ for (tok = strtok_r(list, ", ", &tmp); tok;
+ tok = strtok_r(NULL, ", ", &tmp)) {
+ ret = fn(hpp_list, tok);
+ if (!ret)
+ continue;
+
+ /* Handle errors */
+ if (ret == -EINVAL)
+ pr_err("Invalid field key: '%s'", tok);
+ else if (ret == -ESRCH)
+ pr_err("Unknown field key: '%s'", tok);
+ else
+ pr_err("Fail to initialize for field key: '%s'", tok);
+
+ break;
+ }
+
+ return ret;
+}
+
+static int kvm_hpp_list__parse(struct perf_hpp_list *hpp_list,
+ const char *output_, const char *sort_)
+{
+ char *output = output_ ? strdup(output_) : NULL;
+ char *sort = sort_ ? strdup(sort_) : NULL;
+ int ret;
+
+ ret = kvm_hpp_list__init(output, hpp_list, kvm_hists__init_output);
+ if (ret)
+ goto out;
+
+ ret = kvm_hpp_list__init(sort, hpp_list, kvm_hists__init_sort);
+ if (ret)
+ goto out;
+
+ /* Copy sort keys to output fields */
+ perf_hpp__setup_output_field(hpp_list);
+
+ /* and then copy output fields to sort keys */
+ perf_hpp__append_sort_keys(hpp_list);
+out:
+ free(output);
+ free(sort);
+ return ret;
+}
+
+static int kvm_hists__init(void)
+{
+ kvm_hists.list.nr_header_lines = 1;
+ __hists__init(&kvm_hists.hists, &kvm_hists.list);
+ perf_hpp_list__init(&kvm_hists.list);
+ return kvm_hpp_list__parse(&kvm_hists.list, NULL, "ev_name");
+}
+
+static int kvm_hists__reinit(const char *output, const char *sort)
+{
+ perf_hpp__reset_output_field(&kvm_hists.list);
+ return kvm_hpp_list__parse(&kvm_hists.list, output, sort);
+}
+static void print_result(struct perf_kvm_stat *kvm);
+
+#ifdef HAVE_SLANG_SUPPORT
+static void kvm_browser__update_nr_entries(struct hist_browser *hb)
+{
+ struct rb_node *nd = rb_first_cached(&hb->hists->entries);
+ u64 nr_entries = 0;
+
+ for (; nd; nd = rb_next(nd)) {
+ struct hist_entry *he = rb_entry(nd, struct hist_entry,
+ rb_node);
+
+ if (!he->filtered)
+ nr_entries++;
+ }
+
+ hb->nr_non_filtered_entries = nr_entries;
+}
+
+static int kvm_browser__title(struct hist_browser *browser,
+ char *buf, size_t size)
+{
+ scnprintf(buf, size, "KVM event statistics (%lu entries)",
+ browser->nr_non_filtered_entries);
+ return 0;
+}
+
+static struct hist_browser*
+perf_kvm_browser__new(struct hists *hists)
+{
+ struct hist_browser *browser = hist_browser__new(hists);
+
+ if (browser)
+ browser->title = kvm_browser__title;
+
+ return browser;
+}
+
+static int kvm__hists_browse(struct hists *hists)
+{
+ struct hist_browser *browser;
+ int key = -1;
+
+ browser = perf_kvm_browser__new(hists);
+ if (browser == NULL)
+ return -1;
+
+ /* reset abort key so that it can get Ctrl-C as a key */
+ SLang_reset_tty();
+ SLang_init_tty(0, 0, 0);
+
+ kvm_browser__update_nr_entries(browser);
+
+ while (1) {
+ key = hist_browser__run(browser, "? - help", true, 0);
+
+ switch (key) {
+ case 'q':
+ goto out;
+ default:
+ break;
+ }
+ }
+
+out:
+ hist_browser__delete(browser);
+ return 0;
+}
+
+static void kvm_display(struct perf_kvm_stat *kvm)
+{
+ if (!use_browser)
+ print_result(kvm);
+ else
+ kvm__hists_browse(&kvm_hists.hists);
+}
+
+#else
+
+static void kvm_display(struct perf_kvm_stat *kvm)
+{
+ use_browser = 0;
+ print_result(kvm);
+}
+
+#endif /* HAVE_SLANG_SUPPORT */
+
+#endif // defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+
static const char *get_filename_for_perf_kvm(void)
{
const char *filename;
@@ -62,19 +613,19 @@ static const char *get_filename_for_perf_kvm(void)
return filename;
}
-#ifdef HAVE_KVM_STAT_SUPPORT
+#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
void exit_event_get_key(struct evsel *evsel,
struct perf_sample *sample,
struct event_key *key)
{
key->info = 0;
- key->key = perf_evsel__intval(evsel, sample, kvm_exit_reason);
+ key->key = evsel__intval(evsel, sample, kvm_exit_reason);
}
bool kvm_exit_event(struct evsel *evsel)
{
- return !strcmp(evsel->name, kvm_exit_trace);
+ return evsel__name_is(evsel, kvm_exit_trace);
}
bool exit_event_begin(struct evsel *evsel,
@@ -90,7 +641,7 @@ bool exit_event_begin(struct evsel *evsel,
bool kvm_entry_event(struct evsel *evsel)
{
- return !strcmp(evsel->name, kvm_entry_trace);
+ return evsel__name_is(evsel, kvm_entry_trace);
}
bool exit_event_end(struct evsel *evsel,
@@ -122,7 +673,7 @@ void exit_event_decode_key(struct perf_kvm_stat *kvm,
const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
key->key);
- scnprintf(decode, decode_str_len, "%s", exit_reason);
+ scnprintf(decode, KVM_EVENT_NAME_LEN, "%s", exit_reason);
}
static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
@@ -145,44 +696,37 @@ struct vcpu_event_record {
struct kvm_event *last_event;
};
-
-static void init_kvm_event_record(struct perf_kvm_stat *kvm)
-{
- unsigned int i;
-
- for (i = 0; i < EVENTS_CACHE_SIZE; i++)
- INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
-}
-
#ifdef HAVE_TIMERFD_SUPPORT
-static void clear_events_cache_stats(struct list_head *kvm_events_cache)
+static void clear_events_cache_stats(void)
{
- struct list_head *head;
+ struct rb_root_cached *root;
+ struct rb_node *nd;
struct kvm_event *event;
- unsigned int i;
- int j;
-
- for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
- head = &kvm_events_cache[i];
- list_for_each_entry(event, head, hash_entry) {
- /* reset stats for event */
- event->total.time = 0;
- init_stats(&event->total.stats);
-
- for (j = 0; j < event->max_vcpu; ++j) {
- event->vcpu[j].time = 0;
- init_stats(&event->vcpu[j].stats);
- }
+ int i;
+
+ if (hists__has(&kvm_hists.hists, need_collapse))
+ root = &kvm_hists.hists.entries_collapsed;
+ else
+ root = kvm_hists.hists.entries_in;
+
+ for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
+ struct hist_entry *he;
+
+ he = rb_entry(nd, struct hist_entry, rb_node_in);
+ event = container_of(he, struct kvm_event, he);
+
+ /* reset stats for event */
+ event->total.time = 0;
+ init_stats(&event->total.stats);
+
+ for (i = 0; i < event->max_vcpu; ++i) {
+ event->vcpu[i].time = 0;
+ init_stats(&event->vcpu[i].stats);
}
}
}
#endif
-static int kvm_events_hash_fn(u64 key)
-{
- return key & (EVENTS_CACHE_SIZE - 1);
-}
-
static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
{
int old_max_vcpu = event->max_vcpu;
@@ -208,54 +752,78 @@ static bool kvm_event_expand(struct kvm_event *event, int vcpu_id)
return true;
}
-static struct kvm_event *kvm_alloc_init_event(struct event_key *key)
+static void *kvm_he_zalloc(size_t size)
{
- struct kvm_event *event;
+ struct kvm_event *kvm_ev;
- event = zalloc(sizeof(*event));
- if (!event) {
- pr_err("Not enough memory\n");
+ kvm_ev = zalloc(size + sizeof(*kvm_ev));
+ if (!kvm_ev)
return NULL;
- }
- event->key = *key;
- init_stats(&event->total.stats);
- return event;
+ init_stats(&kvm_ev->total.stats);
+ hists__inc_nr_samples(&kvm_hists.hists, 0);
+ return &kvm_ev->he;
+}
+
+static void kvm_he_free(void *he)
+{
+ struct kvm_event *kvm_ev;
+
+ kvm_ev = container_of(he, struct kvm_event, he);
+ free(kvm_ev);
}
+static struct hist_entry_ops kvm_ev_entry_ops = {
+ .new = kvm_he_zalloc,
+ .free = kvm_he_free,
+};
+
static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm,
- struct event_key *key)
+ struct event_key *key,
+ struct perf_sample *sample)
{
struct kvm_event *event;
- struct list_head *head;
+ struct hist_entry *he;
+ struct kvm_info *ki;
BUG_ON(key->key == INVALID_KEY);
- head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)];
- list_for_each_entry(event, head, hash_entry) {
- if (event->key.key == key->key && event->key.info == key->info)
- return event;
+ ki = kvm_info__new();
+ if (!ki) {
+ pr_err("Failed to allocate kvm info\n");
+ return NULL;
}
- event = kvm_alloc_init_event(key);
- if (!event)
+ kvm->events_ops->decode_key(kvm, key, ki->name);
+ he = hists__add_entry_ops(&kvm_hists.hists, &kvm_ev_entry_ops,
+ &kvm->al, NULL, NULL, NULL, ki, sample, true);
+ if (he == NULL) {
+ pr_err("Failed to allocate hist entry\n");
+ free(ki);
return NULL;
+ }
+
+ event = container_of(he, struct kvm_event, he);
+ if (!event->perf_kvm) {
+ event->perf_kvm = kvm;
+ event->key = *key;
+ }
- list_add(&event->hash_entry, head);
return event;
}
static bool handle_begin_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
- struct event_key *key, u64 timestamp)
+ struct event_key *key,
+ struct perf_sample *sample)
{
struct kvm_event *event = NULL;
if (key->key != INVALID_KEY)
- event = find_create_kvm_event(kvm, key);
+ event = find_create_kvm_event(kvm, key, sample);
vcpu_record->last_event = event;
- vcpu_record->start_time = timestamp;
+ vcpu_record->start_time = sample->time;
return true;
}
@@ -277,9 +845,14 @@ static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event)
avg_stats(&kvm_stats->stats));
}
-static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
+static bool update_kvm_event(struct perf_kvm_stat *kvm,
+ struct kvm_event *event, int vcpu_id,
u64 time_diff)
{
+ /* Update overall statistics */
+ kvm->total_count++;
+ kvm->total_time += time_diff;
+
if (vcpu_id == -1) {
kvm_update_event_stats(&event->total, time_diff);
return true;
@@ -305,7 +878,7 @@ static bool is_child_event(struct perf_kvm_stat *kvm,
return false;
for (; child_ops->name; child_ops++) {
- if (!strcmp(evsel->name, child_ops->name)) {
+ if (evsel__name_is(evsel, child_ops->name)) {
child_ops->get_key(evsel, sample, key);
return true;
}
@@ -317,12 +890,12 @@ static bool is_child_event(struct perf_kvm_stat *kvm,
static bool handle_child_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key,
- struct perf_sample *sample __maybe_unused)
+ struct perf_sample *sample)
{
struct kvm_event *event = NULL;
if (key->key != INVALID_KEY)
- event = find_create_kvm_event(kvm, key);
+ event = find_create_kvm_event(kvm, key, sample);
vcpu_record->last_event = event;
@@ -371,7 +944,7 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
return true;
if (!event)
- event = find_create_kvm_event(kvm, key);
+ event = find_create_kvm_event(kvm, key, sample);
if (!event)
return false;
@@ -388,7 +961,7 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
time_diff = sample->time - time_begin;
if (kvm->duration && time_diff > kvm->duration) {
- char decode[decode_str_len];
+ char decode[KVM_EVENT_NAME_LEN];
kvm->events_ops->decode_key(kvm, &event->key, decode);
if (!skip_event(decode)) {
@@ -398,7 +971,7 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
}
}
- return update_kvm_event(event, vcpu, time_diff);
+ return update_kvm_event(kvm, event, vcpu, time_diff);
}
static
@@ -416,8 +989,7 @@ struct vcpu_event_record *per_vcpu_record(struct thread *thread,
return NULL;
}
- vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample,
- vcpu_id_str);
+ vcpu_record->vcpu_id = evsel__intval(evsel, sample, vcpu_id_str);
thread__set_priv(thread, vcpu_record);
}
@@ -443,7 +1015,7 @@ static bool handle_kvm_event(struct perf_kvm_stat *kvm,
return true;
if (kvm->events_ops->is_begin_event(evsel, sample, &key))
- return handle_begin_event(kvm, vcpu_record, &key, sample->time);
+ return handle_begin_event(kvm, vcpu_record, &key, sample);
if (is_child_event(kvm, evsel, sample, &key))
return handle_child_event(kvm, vcpu_record, &key, sample);
@@ -454,119 +1026,51 @@ static bool handle_kvm_event(struct perf_kvm_stat *kvm,
return true;
}
-#define GET_EVENT_KEY(func, field) \
-static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \
-{ \
- if (vcpu == -1) \
- return event->total.field; \
- \
- if (vcpu >= event->max_vcpu) \
- return 0; \
- \
- return event->vcpu[vcpu].field; \
-}
-
-#define COMPARE_EVENT_KEY(func, field) \
-GET_EVENT_KEY(func, field) \
-static int compare_kvm_event_ ## func(struct kvm_event *one, \
- struct kvm_event *two, int vcpu)\
-{ \
- return get_event_ ##func(one, vcpu) > \
- get_event_ ##func(two, vcpu); \
-}
-
-GET_EVENT_KEY(time, time);
-COMPARE_EVENT_KEY(count, stats.n);
-COMPARE_EVENT_KEY(mean, stats.mean);
-GET_EVENT_KEY(max, stats.max);
-GET_EVENT_KEY(min, stats.min);
-
-#define DEF_SORT_NAME_KEY(name, compare_key) \
- { #name, compare_kvm_event_ ## compare_key }
-
-static struct kvm_event_key keys[] = {
- DEF_SORT_NAME_KEY(sample, count),
- DEF_SORT_NAME_KEY(time, mean),
- { NULL, NULL }
-};
-
-static bool select_key(struct perf_kvm_stat *kvm)
+static bool is_valid_key(struct perf_kvm_stat *kvm)
{
- int i;
+ static const char *key_array[] = {
+ "ev_name", "sample", "time", "max_t", "min_t", "mean_t",
+ };
+ unsigned int i;
- for (i = 0; keys[i].name; i++) {
- if (!strcmp(keys[i].name, kvm->sort_key)) {
- kvm->compare = keys[i].key;
+ for (i = 0; i < ARRAY_SIZE(key_array); i++)
+ if (!strcmp(key_array[i], kvm->sort_key))
return true;
- }
- }
- pr_err("Unknown compare key:%s\n", kvm->sort_key);
+ pr_err("Unsupported sort key: %s\n", kvm->sort_key);
return false;
}
-static void insert_to_result(struct rb_root *result, struct kvm_event *event,
- key_cmp_fun bigger, int vcpu)
-{
- struct rb_node **rb = &result->rb_node;
- struct rb_node *parent = NULL;
- struct kvm_event *p;
-
- while (*rb) {
- p = container_of(*rb, struct kvm_event, rb);
- parent = *rb;
-
- if (bigger(event, p, vcpu))
- rb = &(*rb)->rb_left;
- else
- rb = &(*rb)->rb_right;
- }
-
- rb_link_node(&event->rb, parent, rb);
- rb_insert_color(&event->rb, result);
-}
-
-static void
-update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event)
-{
- int vcpu = kvm->trace_vcpu;
-
- kvm->total_count += get_event_count(event, vcpu);
- kvm->total_time += get_event_time(event, vcpu);
-}
-
static bool event_is_valid(struct kvm_event *event, int vcpu)
{
return !!get_event_count(event, vcpu);
}
-static void sort_result(struct perf_kvm_stat *kvm)
+static int filter_cb(struct hist_entry *he, void *arg __maybe_unused)
{
- unsigned int i;
- int vcpu = kvm->trace_vcpu;
struct kvm_event *event;
+ struct perf_kvm_stat *perf_kvm;
- for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
- list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) {
- if (event_is_valid(event, vcpu)) {
- update_total_count(kvm, event);
- insert_to_result(&kvm->result, event,
- kvm->compare, vcpu);
- }
- }
- }
+ event = container_of(he, struct kvm_event, he);
+ perf_kvm = event->perf_kvm;
+ if (!event_is_valid(event, perf_kvm->trace_vcpu))
+ he->filtered = 1;
+ else
+ he->filtered = 0;
+ return 0;
}
-/* returns left most element of result, and erase it */
-static struct kvm_event *pop_from_result(struct rb_root *result)
+static void sort_result(struct perf_kvm_stat *kvm)
{
- struct rb_node *node = rb_first(result);
-
- if (!node)
- return NULL;
+ struct ui_progress prog;
+ const char *output_columns = "ev_name,sample,percent_sample,"
+ "time,percent_time,max_t,min_t,mean_t";
- rb_erase(node, result);
- return container_of(node, struct kvm_event, rb);
+ kvm_hists__reinit(output_columns, kvm->sort_key);
+ ui_progress__init(&prog, kvm_hists.hists.nr_entries, "Sorting...");
+ hists__collapse_resort(&kvm_hists.hists, NULL);
+ hists__output_resort_cb(&kvm_hists.hists, NULL, filter_cb);
+ ui_progress__finish();
}
static void print_vcpu_info(struct perf_kvm_stat *kvm)
@@ -606,9 +1110,10 @@ static void show_timeofday(void)
static void print_result(struct perf_kvm_stat *kvm)
{
- char decode[decode_str_len];
+ char decode[KVM_EVENT_NAME_LEN];
struct kvm_event *event;
int vcpu = kvm->trace_vcpu;
+ struct rb_node *nd;
if (kvm->live) {
puts(CONSOLE_CLEAR);
@@ -617,7 +1122,7 @@ static void print_result(struct perf_kvm_stat *kvm)
pr_info("\n\n");
print_vcpu_info(kvm);
- pr_info("%*s ", decode_str_len, kvm->events_ops->name);
+ pr_info("%*s ", KVM_EVENT_NAME_LEN, kvm->events_ops->name);
pr_info("%10s ", "Samples");
pr_info("%9s ", "Samples%");
@@ -627,16 +1132,22 @@ static void print_result(struct perf_kvm_stat *kvm)
pr_info("%16s ", "Avg time");
pr_info("\n\n");
- while ((event = pop_from_result(&kvm->result))) {
+ for (nd = rb_first_cached(&kvm_hists.hists.entries); nd; nd = rb_next(nd)) {
+ struct hist_entry *he;
u64 ecount, etime, max, min;
+ he = rb_entry(nd, struct hist_entry, rb_node);
+ if (he->filtered)
+ continue;
+
+ event = container_of(he, struct kvm_event, he);
ecount = get_event_count(event, vcpu);
etime = get_event_time(event, vcpu);
max = get_event_max(event, vcpu);
min = get_event_min(event, vcpu);
kvm->events_ops->decode_key(kvm, &event->key, decode);
- pr_info("%*s ", decode_str_len, decode);
+ pr_info("%*s ", KVM_EVENT_NAME_LEN, decode);
pr_info("%10llu ", (unsigned long long)ecount);
pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
@@ -654,7 +1165,7 @@ static void print_result(struct perf_kvm_stat *kvm)
pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
}
-#ifdef HAVE_TIMERFD_SUPPORT
+#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
static int process_lost_event(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
@@ -690,6 +1201,11 @@ static int process_sample_event(struct perf_tool *tool,
if (skip_sample(kvm, sample))
return 0;
+ if (machine__resolve(machine, &kvm->al, sample) < 0) {
+ pr_warning("Fail to resolve address location, skip sample.\n");
+ return 0;
+ }
+
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -742,7 +1258,7 @@ static bool verify_vcpu(int vcpu)
return true;
}
-#ifdef HAVE_TIMERFD_SUPPORT
+#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
/* keeping the max events to a modest level to keep
* the processing of samples per mmap smooth.
*/
@@ -765,14 +1281,14 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
return (err == -EAGAIN) ? 0 : -1;
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
- err = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
+ err = evlist__parse_sample_timestamp(evlist, event, &timestamp);
if (err) {
perf_mmap__consume(&md->core);
pr_err("Failed to parse sample\n");
return -1;
}
- err = perf_session__queue_event(kvm->session, event, timestamp, 0);
+ err = perf_session__queue_event(kvm->session, event, timestamp, 0, NULL);
/*
* FIXME: Here we can't consume the event, as perf_session__queue_event will
* point to it, and it'll get possibly overwritten by the kernel.
@@ -901,8 +1417,11 @@ static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
sort_result(kvm);
print_result(kvm);
+ /* Reset sort list to "ev_name" */
+ kvm_hists__reinit(NULL, "ev_name");
+
/* reset counts */
- clear_events_cache_stats(kvm->kvm_events_cache);
+ clear_events_cache_stats();
kvm->total_count = 0;
kvm->total_time = 0;
kvm->lost_events = 0;
@@ -952,13 +1471,14 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
return ret;
if (!verify_vcpu(kvm->trace_vcpu) ||
- !select_key(kvm) ||
+ !is_valid_key(kvm) ||
!register_kvm_events_ops(kvm)) {
goto out;
}
set_term_quiet_input(&save);
- init_kvm_event_record(kvm);
+
+ kvm_hists__init();
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
@@ -1009,6 +1529,8 @@ static int kvm_events_live_report(struct perf_kvm_stat *kvm)
}
out:
+ hists__delete_entries(&kvm_hists.hists);
+
if (kvm->timerfd >= 0)
close(kvm->timerfd);
@@ -1023,7 +1545,7 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
struct evlist *evlist = kvm->evlist;
char sbuf[STRERR_BUFSIZE];
- perf_evlist__config(evlist, &kvm->opts, NULL);
+ evlist__config(evlist, &kvm->opts, NULL);
/*
* Note: exclude_{guest,host} do not apply here.
@@ -1033,16 +1555,16 @@ static int kvm_live_open_events(struct perf_kvm_stat *kvm)
struct perf_event_attr *attr = &pos->core.attr;
/* make sure these *are* set */
- perf_evsel__set_sample_bit(pos, TID);
- perf_evsel__set_sample_bit(pos, TIME);
- perf_evsel__set_sample_bit(pos, CPU);
- perf_evsel__set_sample_bit(pos, RAW);
+ evsel__set_sample_bit(pos, TID);
+ evsel__set_sample_bit(pos, TIME);
+ evsel__set_sample_bit(pos, CPU);
+ evsel__set_sample_bit(pos, RAW);
/* make sure these are *not*; want as small a sample as possible */
- perf_evsel__reset_sample_bit(pos, PERIOD);
- perf_evsel__reset_sample_bit(pos, IP);
- perf_evsel__reset_sample_bit(pos, CALLCHAIN);
- perf_evsel__reset_sample_bit(pos, ADDR);
- perf_evsel__reset_sample_bit(pos, READ);
+ evsel__reset_sample_bit(pos, PERIOD);
+ evsel__reset_sample_bit(pos, IP);
+ evsel__reset_sample_bit(pos, CALLCHAIN);
+ evsel__reset_sample_bit(pos, ADDR);
+ evsel__reset_sample_bit(pos, READ);
attr->mmap = 0;
attr->comm = 0;
attr->task = 0;
@@ -1094,7 +1616,7 @@ static int read_events(struct perf_kvm_stat *kvm)
};
kvm->tool = eops;
- kvm->session = perf_session__new(&file, false, &kvm->tool);
+ kvm->session = perf_session__new(&file, &kvm->tool);
if (IS_ERR(kvm->session)) {
pr_err("Initializing perf session failed\n");
return PTR_ERR(kvm->session);
@@ -1146,23 +1668,32 @@ static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
if (!verify_vcpu(vcpu))
goto exit;
- if (!select_key(kvm))
+ if (!is_valid_key(kvm))
goto exit;
if (!register_kvm_events_ops(kvm))
goto exit;
- init_kvm_event_record(kvm);
- setup_pager();
+ if (kvm->use_stdio) {
+ use_browser = 0;
+ setup_pager();
+ } else {
+ use_browser = 1;
+ }
+
+ setup_browser(false);
+
+ kvm_hists__init();
ret = read_events(kvm);
if (ret)
goto exit;
sort_result(kvm);
- print_result(kvm);
+ kvm_display(kvm);
exit:
+ hists__delete_entries(&kvm_hists.hists);
return ret;
}
@@ -1267,6 +1798,7 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
"analyze events only for given process id(s)"),
OPT_BOOLEAN('f', "force", &kvm->force, "don't complain, do it"),
+ OPT_BOOLEAN(0, "stdio", &kvm->use_stdio, "use the stdio interface"),
OPT_END()
};
@@ -1284,13 +1816,17 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
kvm_events_report_options);
}
+#ifndef HAVE_SLANG_SUPPORT
+ kvm->use_stdio = true;
+#endif
+
if (!kvm->opts.target.pid)
kvm->opts.target.system_wide = true;
return kvm_events_report_vcpu(kvm);
}
-#ifdef HAVE_TIMERFD_SUPPORT
+#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
static struct evlist *kvm_live_event_list(void)
{
struct evlist *evlist;
@@ -1320,7 +1856,7 @@ static struct evlist *kvm_live_event_list(void)
*name = '\0';
name++;
- if (perf_evlist__add_newtp(evlist, sys, name, NULL)) {
+ if (evlist__add_newtp(evlist, sys, name, NULL)) {
pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
free(tp);
goto out;
@@ -1350,8 +1886,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
"record events on existing process id"),
OPT_CALLBACK('m', "mmap-pages", &kvm->opts.mmap_pages, "pages",
- "number of mmap data pages",
- perf_evlist__parse_mmap_pages),
+ "number of mmap data pages", evlist__parse_mmap_pages),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
@@ -1443,13 +1978,13 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
goto out;
}
- if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
+ if (evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
usage_with_options(live_usage, live_options);
/*
* perf session
*/
- kvm->session = perf_session__new(&data, false, &kvm->tool);
+ kvm->session = perf_session__new(&data, &kvm->tool);
if (IS_ERR(kvm->session)) {
err = PTR_ERR(kvm->session);
goto out;
@@ -1458,7 +1993,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
perf_session__set_id_hdr_size(kvm->session);
ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
- kvm->evlist->core.threads, false, 1);
+ kvm->evlist->core.threads, true, false, 1);
err = kvm_live_open_events(kvm);
if (err)
goto out;
@@ -1502,13 +2037,13 @@ static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
goto perf_stat;
}
- if (!strncmp(argv[1], "rec", 3))
+ if (strlen(argv[1]) > 2 && strstarts("record", argv[1]))
return kvm_events_record(&kvm, argc - 1, argv + 1);
- if (!strncmp(argv[1], "rep", 3))
+ if (strlen(argv[1]) > 2 && strstarts("report", argv[1]))
return kvm_events_report(&kvm, argc - 1 , argv + 1);
-#ifdef HAVE_TIMERFD_SUPPORT
+#if defined(HAVE_TIMERFD_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
if (!strncmp(argv[1], "live", 4))
return kvm_events_live(&kvm, argc - 1 , argv + 1);
#endif
@@ -1604,6 +2139,8 @@ int cmd_kvm(int argc, const char **argv)
"file", "file saving guest os /proc/kallsyms"),
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
"file", "file saving guest os /proc/modules"),
+ OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
+ "Guest code can be found in hypervisor process"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_END()
@@ -1633,18 +2170,18 @@ int cmd_kvm(int argc, const char **argv)
}
}
- if (!strncmp(argv[0], "rec", 3))
+ if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
return __cmd_record(file_name, argc, argv);
- else if (!strncmp(argv[0], "rep", 3))
+ else if (strlen(argv[0]) > 2 && strstarts("report", argv[0]))
return __cmd_report(file_name, argc, argv);
- else if (!strncmp(argv[0], "diff", 4))
+ else if (strlen(argv[0]) > 2 && strstarts("diff", argv[0]))
return cmd_diff(argc, argv);
- else if (!strncmp(argv[0], "top", 3))
+ else if (!strcmp(argv[0], "top"))
return cmd_top(argc, argv);
- else if (!strncmp(argv[0], "buildid-list", 12))
+ else if (strlen(argv[0]) > 2 && strstarts("buildid-list", argv[0]))
return __cmd_buildid_list(file_name, argc, argv);
-#ifdef HAVE_KVM_STAT_SUPPORT
- else if (!strncmp(argv[0], "stat", 4))
+#if defined(HAVE_KVM_STAT_SUPPORT) && defined(HAVE_LIBTRACEEVENT)
+ else if (strlen(argv[0]) > 2 && strstarts("stat", argv[0]))
return kvm_cmd_stat(file_name, argc, argv);
#endif
else
diff --git a/tools/perf/builtin-kwork.c b/tools/perf/builtin-kwork.c
new file mode 100644
index 000000000000..a9395c52b23b
--- /dev/null
+++ b/tools/perf/builtin-kwork.c
@@ -0,0 +1,1839 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * builtin-kwork.c
+ *
+ * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
+ */
+
+#include "builtin.h"
+
+#include "util/data.h"
+#include "util/evlist.h"
+#include "util/evsel.h"
+#include "util/header.h"
+#include "util/kwork.h"
+#include "util/debug.h"
+#include "util/session.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/string2.h"
+#include "util/callchain.h"
+#include "util/evsel_fprintf.h"
+#include "util/util.h"
+
+#include <subcmd/pager.h>
+#include <subcmd/parse-options.h>
+#include <traceevent/event-parse.h>
+
+#include <errno.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <linux/err.h>
+#include <linux/time64.h>
+#include <linux/zalloc.h>
+
+/*
+ * report header elements width
+ */
+#define PRINT_CPU_WIDTH 4
+#define PRINT_COUNT_WIDTH 9
+#define PRINT_RUNTIME_WIDTH 10
+#define PRINT_LATENCY_WIDTH 10
+#define PRINT_TIMESTAMP_WIDTH 17
+#define PRINT_KWORK_NAME_WIDTH 30
+#define RPINT_DECIMAL_WIDTH 3
+#define PRINT_BRACKETPAIR_WIDTH 2
+#define PRINT_TIME_UNIT_SEC_WIDTH 2
+#define PRINT_TIME_UNIT_MESC_WIDTH 3
+#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
+#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
+#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
+#define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
+
+struct sort_dimension {
+ const char *name;
+ int (*cmp)(struct kwork_work *l, struct kwork_work *r);
+ struct list_head list;
+};
+
+static int id_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->cpu > r->cpu)
+ return 1;
+ if (l->cpu < r->cpu)
+ return -1;
+
+ if (l->id > r->id)
+ return 1;
+ if (l->id < r->id)
+ return -1;
+
+ return 0;
+}
+
+static int count_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->nr_atoms > r->nr_atoms)
+ return 1;
+ if (l->nr_atoms < r->nr_atoms)
+ return -1;
+
+ return 0;
+}
+
+static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->total_runtime > r->total_runtime)
+ return 1;
+ if (l->total_runtime < r->total_runtime)
+ return -1;
+
+ return 0;
+}
+
+static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->max_runtime > r->max_runtime)
+ return 1;
+ if (l->max_runtime < r->max_runtime)
+ return -1;
+
+ return 0;
+}
+
+static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ u64 avgl, avgr;
+
+ if (!r->nr_atoms)
+ return 1;
+ if (!l->nr_atoms)
+ return -1;
+
+ avgl = l->total_latency / l->nr_atoms;
+ avgr = r->total_latency / r->nr_atoms;
+
+ if (avgl > avgr)
+ return 1;
+ if (avgl < avgr)
+ return -1;
+
+ return 0;
+}
+
+static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
+{
+ if (l->max_latency > r->max_latency)
+ return 1;
+ if (l->max_latency < r->max_latency)
+ return -1;
+
+ return 0;
+}
+
+static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
+ const char *tok, struct list_head *list)
+{
+ size_t i;
+ static struct sort_dimension max_sort_dimension = {
+ .name = "max",
+ .cmp = max_runtime_cmp,
+ };
+ static struct sort_dimension id_sort_dimension = {
+ .name = "id",
+ .cmp = id_cmp,
+ };
+ static struct sort_dimension runtime_sort_dimension = {
+ .name = "runtime",
+ .cmp = runtime_cmp,
+ };
+ static struct sort_dimension count_sort_dimension = {
+ .name = "count",
+ .cmp = count_cmp,
+ };
+ static struct sort_dimension avg_sort_dimension = {
+ .name = "avg",
+ .cmp = avg_latency_cmp,
+ };
+ struct sort_dimension *available_sorts[] = {
+ &id_sort_dimension,
+ &max_sort_dimension,
+ &count_sort_dimension,
+ &runtime_sort_dimension,
+ &avg_sort_dimension,
+ };
+
+ if (kwork->report == KWORK_REPORT_LATENCY)
+ max_sort_dimension.cmp = max_latency_cmp;
+
+ for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
+ if (!strcmp(available_sorts[i]->name, tok)) {
+ list_add_tail(&available_sorts[i]->list, list);
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+static void setup_sorting(struct perf_kwork *kwork,
+ const struct option *options,
+ const char * const usage_msg[])
+{
+ char *tmp, *tok, *str = strdup(kwork->sort_order);
+
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
+ usage_with_options_msg(usage_msg, options,
+ "Unknown --sort key: `%s'", tok);
+ }
+
+ pr_debug("Sort order: %s\n", kwork->sort_order);
+ free(str);
+}
+
+static struct kwork_atom *atom_new(struct perf_kwork *kwork,
+ struct perf_sample *sample)
+{
+ unsigned long i;
+ struct kwork_atom_page *page;
+ struct kwork_atom *atom = NULL;
+
+ list_for_each_entry(page, &kwork->atom_page_list, list) {
+ if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
+ i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
+ BUG_ON(i >= NR_ATOM_PER_PAGE);
+ atom = &page->atoms[i];
+ goto found_atom;
+ }
+ }
+
+ /*
+ * new page
+ */
+ page = zalloc(sizeof(*page));
+ if (page == NULL) {
+ pr_err("Failed to zalloc kwork atom page\n");
+ return NULL;
+ }
+
+ i = 0;
+ atom = &page->atoms[0];
+ list_add_tail(&page->list, &kwork->atom_page_list);
+
+found_atom:
+ __set_bit(i, page->bitmap);
+ atom->time = sample->time;
+ atom->prev = NULL;
+ atom->page_addr = page;
+ atom->bit_inpage = i;
+ return atom;
+}
+
+static void atom_free(struct kwork_atom *atom)
+{
+ if (atom->prev != NULL)
+ atom_free(atom->prev);
+
+ __clear_bit(atom->bit_inpage,
+ ((struct kwork_atom_page *)atom->page_addr)->bitmap);
+}
+
+static void atom_del(struct kwork_atom *atom)
+{
+ list_del(&atom->list);
+ atom_free(atom);
+}
+
+static int work_cmp(struct list_head *list,
+ struct kwork_work *l, struct kwork_work *r)
+{
+ int ret = 0;
+ struct sort_dimension *sort;
+
+ BUG_ON(list_empty(list));
+
+ list_for_each_entry(sort, list, list) {
+ ret = sort->cmp(l, r);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct kwork_work *work_search(struct rb_root_cached *root,
+ struct kwork_work *key,
+ struct list_head *sort_list)
+{
+ int cmp;
+ struct kwork_work *work;
+ struct rb_node *node = root->rb_root.rb_node;
+
+ while (node) {
+ work = container_of(node, struct kwork_work, node);
+ cmp = work_cmp(sort_list, key, work);
+ if (cmp > 0)
+ node = node->rb_left;
+ else if (cmp < 0)
+ node = node->rb_right;
+ else {
+ if (work->name == NULL)
+ work->name = key->name;
+ return work;
+ }
+ }
+ return NULL;
+}
+
+static void work_insert(struct rb_root_cached *root,
+ struct kwork_work *key, struct list_head *sort_list)
+{
+ int cmp;
+ bool leftmost = true;
+ struct kwork_work *cur;
+ struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
+
+ while (*new) {
+ cur = container_of(*new, struct kwork_work, node);
+ parent = *new;
+ cmp = work_cmp(sort_list, key, cur);
+
+ if (cmp > 0)
+ new = &((*new)->rb_left);
+ else {
+ new = &((*new)->rb_right);
+ leftmost = false;
+ }
+ }
+
+ rb_link_node(&key->node, parent, new);
+ rb_insert_color_cached(&key->node, root, leftmost);
+}
+
+static struct kwork_work *work_new(struct kwork_work *key)
+{
+ int i;
+ struct kwork_work *work = zalloc(sizeof(*work));
+
+ if (work == NULL) {
+ pr_err("Failed to zalloc kwork work\n");
+ return NULL;
+ }
+
+ for (i = 0; i < KWORK_TRACE_MAX; i++)
+ INIT_LIST_HEAD(&work->atom_list[i]);
+
+ work->id = key->id;
+ work->cpu = key->cpu;
+ work->name = key->name;
+ work->class = key->class;
+ return work;
+}
+
+static struct kwork_work *work_findnew(struct rb_root_cached *root,
+ struct kwork_work *key,
+ struct list_head *sort_list)
+{
+ struct kwork_work *work = work_search(root, key, sort_list);
+
+ if (work != NULL)
+ return work;
+
+ work = work_new(key);
+ if (work)
+ work_insert(root, work, sort_list);
+
+ return work;
+}
+
+static void profile_update_timespan(struct perf_kwork *kwork,
+ struct perf_sample *sample)
+{
+ if (!kwork->summary)
+ return;
+
+ if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
+ kwork->timestart = sample->time;
+
+ if (kwork->timeend < sample->time)
+ kwork->timeend = sample->time;
+}
+
+static bool profile_event_match(struct perf_kwork *kwork,
+ struct kwork_work *work,
+ struct perf_sample *sample)
+{
+ int cpu = work->cpu;
+ u64 time = sample->time;
+ struct perf_time_interval *ptime = &kwork->ptime;
+
+ if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
+ return false;
+
+ if (((ptime->start != 0) && (ptime->start > time)) ||
+ ((ptime->end != 0) && (ptime->end < time)))
+ return false;
+
+ if ((kwork->profile_name != NULL) &&
+ (work->name != NULL) &&
+ (strcmp(work->name, kwork->profile_name) != 0))
+ return false;
+
+ profile_update_timespan(kwork, sample);
+ return true;
+}
+
+static int work_push_atom(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ enum kwork_trace_type src_type,
+ enum kwork_trace_type dst_type,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct kwork_work **ret_work)
+{
+ struct kwork_atom *atom, *dst_atom;
+ struct kwork_work *work, key;
+
+ BUG_ON(class->work_init == NULL);
+ class->work_init(class, &key, evsel, sample, machine);
+
+ atom = atom_new(kwork, sample);
+ if (atom == NULL)
+ return -1;
+
+ work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
+ if (work == NULL) {
+ free(atom);
+ return -1;
+ }
+
+ if (!profile_event_match(kwork, work, sample))
+ return 0;
+
+ if (dst_type < KWORK_TRACE_MAX) {
+ dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
+ struct kwork_atom, list);
+ if (dst_atom != NULL) {
+ atom->prev = dst_atom;
+ list_del(&dst_atom->list);
+ }
+ }
+
+ if (ret_work != NULL)
+ *ret_work = work;
+
+ list_add_tail(&atom->list, &work->atom_list[src_type]);
+
+ return 0;
+}
+
+static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ enum kwork_trace_type src_type,
+ enum kwork_trace_type dst_type,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine,
+ struct kwork_work **ret_work)
+{
+ struct kwork_atom *atom, *src_atom;
+ struct kwork_work *work, key;
+
+ BUG_ON(class->work_init == NULL);
+ class->work_init(class, &key, evsel, sample, machine);
+
+ work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
+ if (ret_work != NULL)
+ *ret_work = work;
+
+ if (work == NULL)
+ return NULL;
+
+ if (!profile_event_match(kwork, work, sample))
+ return NULL;
+
+ atom = list_last_entry_or_null(&work->atom_list[dst_type],
+ struct kwork_atom, list);
+ if (atom != NULL)
+ return atom;
+
+ src_atom = atom_new(kwork, sample);
+ if (src_atom != NULL)
+ list_add_tail(&src_atom->list, &work->atom_list[src_type]);
+ else {
+ if (ret_work != NULL)
+ *ret_work = NULL;
+ }
+
+ return NULL;
+}
+
+static void report_update_exit_event(struct kwork_work *work,
+ struct kwork_atom *atom,
+ struct perf_sample *sample)
+{
+ u64 delta;
+ u64 exit_time = sample->time;
+ u64 entry_time = atom->time;
+
+ if ((entry_time != 0) && (exit_time >= entry_time)) {
+ delta = exit_time - entry_time;
+ if ((delta > work->max_runtime) ||
+ (work->max_runtime == 0)) {
+ work->max_runtime = delta;
+ work->max_runtime_start = entry_time;
+ work->max_runtime_end = exit_time;
+ }
+ work->total_runtime += delta;
+ work->nr_atoms++;
+ }
+}
+
+static int report_entry_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
+ KWORK_TRACE_MAX, evsel, sample,
+ machine, NULL);
+}
+
+static int report_exit_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct kwork_atom *atom = NULL;
+ struct kwork_work *work = NULL;
+
+ atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
+ KWORK_TRACE_ENTRY, evsel, sample,
+ machine, &work);
+ if (work == NULL)
+ return -1;
+
+ if (atom != NULL) {
+ report_update_exit_event(work, atom, sample);
+ atom_del(atom);
+ }
+
+ return 0;
+}
+
+static void latency_update_entry_event(struct kwork_work *work,
+ struct kwork_atom *atom,
+ struct perf_sample *sample)
+{
+ u64 delta;
+ u64 entry_time = sample->time;
+ u64 raise_time = atom->time;
+
+ if ((raise_time != 0) && (entry_time >= raise_time)) {
+ delta = entry_time - raise_time;
+ if ((delta > work->max_latency) ||
+ (work->max_latency == 0)) {
+ work->max_latency = delta;
+ work->max_latency_start = raise_time;
+ work->max_latency_end = entry_time;
+ }
+ work->total_latency += delta;
+ work->nr_atoms++;
+ }
+}
+
+static int latency_raise_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
+ KWORK_TRACE_MAX, evsel, sample,
+ machine, NULL);
+}
+
+static int latency_entry_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct kwork_atom *atom = NULL;
+ struct kwork_work *work = NULL;
+
+ atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
+ KWORK_TRACE_RAISE, evsel, sample,
+ machine, &work);
+ if (work == NULL)
+ return -1;
+
+ if (atom != NULL) {
+ latency_update_entry_event(work, atom, sample);
+ atom_del(atom);
+ }
+
+ return 0;
+}
+
+static void timehist_save_callchain(struct perf_kwork *kwork,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine)
+{
+ struct symbol *sym;
+ struct thread *thread;
+ struct callchain_cursor_node *node;
+ struct callchain_cursor *cursor = &callchain_cursor;
+
+ if (!kwork->show_callchain || sample->callchain == NULL)
+ return;
+
+ /* want main thread for process - has maps */
+ thread = machine__findnew_thread(machine, sample->pid, sample->pid);
+ if (thread == NULL) {
+ pr_debug("Failed to get thread for pid %d\n", sample->pid);
+ return;
+ }
+
+ if (thread__resolve_callchain(thread, cursor, evsel, sample,
+ NULL, NULL, kwork->max_stack + 2) != 0) {
+ pr_debug("Failed to resolve callchain, skipping\n");
+ goto out_put;
+ }
+
+ callchain_cursor_commit(cursor);
+
+ while (true) {
+ node = callchain_cursor_current(cursor);
+ if (node == NULL)
+ break;
+
+ sym = node->ms.sym;
+ if (sym) {
+ if (!strcmp(sym->name, "__softirqentry_text_start") ||
+ !strcmp(sym->name, "__do_softirq"))
+ sym->ignore = 1;
+ }
+
+ callchain_cursor_advance(cursor);
+ }
+
+out_put:
+ thread__put(thread);
+}
+
+static void timehist_print_event(struct perf_kwork *kwork,
+ struct kwork_work *work,
+ struct kwork_atom *atom,
+ struct perf_sample *sample,
+ struct addr_location *al)
+{
+ char entrytime[32], exittime[32];
+ char kwork_name[PRINT_KWORK_NAME_WIDTH];
+
+ /*
+ * runtime start
+ */
+ timestamp__scnprintf_usec(atom->time,
+ entrytime, sizeof(entrytime));
+ printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
+
+ /*
+ * runtime end
+ */
+ timestamp__scnprintf_usec(sample->time,
+ exittime, sizeof(exittime));
+ printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
+
+ /*
+ * cpu
+ */
+ printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
+
+ /*
+ * kwork name
+ */
+ if (work->class && work->class->work_name) {
+ work->class->work_name(work, kwork_name,
+ PRINT_KWORK_NAME_WIDTH);
+ printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
+ } else
+ printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
+
+ /*
+ *runtime
+ */
+ printf(" %*.*f ",
+ PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)(sample->time - atom->time) / NSEC_PER_MSEC);
+
+ /*
+ * delaytime
+ */
+ if (atom->prev != NULL)
+ printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
+ else
+ printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
+
+ /*
+ * callchain
+ */
+ if (kwork->show_callchain) {
+ printf(" ");
+ sample__fprintf_sym(sample, al, 0,
+ EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
+ EVSEL__PRINT_CALLCHAIN_ARROW |
+ EVSEL__PRINT_SKIP_IGNORED,
+ &callchain_cursor, symbol_conf.bt_stop_list,
+ stdout);
+ }
+
+ printf("\n");
+}
+
+static int timehist_raise_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
+ KWORK_TRACE_MAX, evsel, sample,
+ machine, NULL);
+}
+
+static int timehist_entry_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ int ret;
+ struct kwork_work *work = NULL;
+
+ ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
+ KWORK_TRACE_RAISE, evsel, sample,
+ machine, &work);
+ if (ret)
+ return ret;
+
+ if (work != NULL)
+ timehist_save_callchain(kwork, sample, evsel, machine);
+
+ return 0;
+}
+
+static int timehist_exit_event(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct kwork_atom *atom = NULL;
+ struct kwork_work *work = NULL;
+ struct addr_location al;
+
+ if (machine__resolve(machine, &al, sample) < 0) {
+ pr_debug("Problem processing event, skipping it\n");
+ return -1;
+ }
+
+ atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
+ KWORK_TRACE_ENTRY, evsel, sample,
+ machine, &work);
+ if (work == NULL)
+ return -1;
+
+ if (atom != NULL) {
+ work->nr_atoms++;
+ timehist_print_event(kwork, work, atom, sample, &al);
+ atom_del(atom);
+ }
+
+ return 0;
+}
+
+static struct kwork_class kwork_irq;
+static int process_irq_handler_entry_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->entry_event)
+ return kwork->tp_handler->entry_event(kwork, &kwork_irq,
+ evsel, sample, machine);
+ return 0;
+}
+
+static int process_irq_handler_exit_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->exit_event)
+ return kwork->tp_handler->exit_event(kwork, &kwork_irq,
+ evsel, sample, machine);
+ return 0;
+}
+
+const struct evsel_str_handler irq_tp_handlers[] = {
+ { "irq:irq_handler_entry", process_irq_handler_entry_event, },
+ { "irq:irq_handler_exit", process_irq_handler_exit_event, },
+};
+
+static int irq_class_init(struct kwork_class *class,
+ struct perf_session *session)
+{
+ if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
+ pr_err("Failed to set irq tracepoints handlers\n");
+ return -1;
+ }
+
+ class->work_root = RB_ROOT_CACHED;
+ return 0;
+}
+
+static void irq_work_init(struct kwork_class *class,
+ struct kwork_work *work,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ work->class = class;
+ work->cpu = sample->cpu;
+ work->id = evsel__intval(evsel, sample, "irq");
+ work->name = evsel__strval(evsel, sample, "name");
+}
+
+static void irq_work_name(struct kwork_work *work, char *buf, int len)
+{
+ snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
+}
+
+static struct kwork_class kwork_irq = {
+ .name = "irq",
+ .type = KWORK_CLASS_IRQ,
+ .nr_tracepoints = 2,
+ .tp_handlers = irq_tp_handlers,
+ .class_init = irq_class_init,
+ .work_init = irq_work_init,
+ .work_name = irq_work_name,
+};
+
+static struct kwork_class kwork_softirq;
+static int process_softirq_raise_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->raise_event)
+ return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+static int process_softirq_entry_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->entry_event)
+ return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+static int process_softirq_exit_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->exit_event)
+ return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+const struct evsel_str_handler softirq_tp_handlers[] = {
+ { "irq:softirq_raise", process_softirq_raise_event, },
+ { "irq:softirq_entry", process_softirq_entry_event, },
+ { "irq:softirq_exit", process_softirq_exit_event, },
+};
+
+static int softirq_class_init(struct kwork_class *class,
+ struct perf_session *session)
+{
+ if (perf_session__set_tracepoints_handlers(session,
+ softirq_tp_handlers)) {
+ pr_err("Failed to set softirq tracepoints handlers\n");
+ return -1;
+ }
+
+ class->work_root = RB_ROOT_CACHED;
+ return 0;
+}
+
+static char *evsel__softirq_name(struct evsel *evsel, u64 num)
+{
+ char *name = NULL;
+ bool found = false;
+ struct tep_print_flag_sym *sym = NULL;
+ struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
+
+ if ((args == NULL) || (args->next == NULL))
+ return NULL;
+
+ /* skip softirq field: "REC->vec" */
+ for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
+ if ((eval_flag(sym->value) == (unsigned long long)num) &&
+ (strlen(sym->str) != 0)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return NULL;
+
+ name = strdup(sym->str);
+ if (name == NULL) {
+ pr_err("Failed to copy symbol name\n");
+ return NULL;
+ }
+ return name;
+}
+
+static void softirq_work_init(struct kwork_class *class,
+ struct kwork_work *work,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ u64 num = evsel__intval(evsel, sample, "vec");
+
+ work->id = num;
+ work->class = class;
+ work->cpu = sample->cpu;
+ work->name = evsel__softirq_name(evsel, num);
+}
+
+static void softirq_work_name(struct kwork_work *work, char *buf, int len)
+{
+ snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
+}
+
+static struct kwork_class kwork_softirq = {
+ .name = "softirq",
+ .type = KWORK_CLASS_SOFTIRQ,
+ .nr_tracepoints = 3,
+ .tp_handlers = softirq_tp_handlers,
+ .class_init = softirq_class_init,
+ .work_init = softirq_work_init,
+ .work_name = softirq_work_name,
+};
+
+static struct kwork_class kwork_workqueue;
+static int process_workqueue_activate_work_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->raise_event)
+ return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+static int process_workqueue_execute_start_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->entry_event)
+ return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+static int process_workqueue_execute_end_event(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
+
+ if (kwork->tp_handler->exit_event)
+ return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
+ evsel, sample, machine);
+
+ return 0;
+}
+
+const struct evsel_str_handler workqueue_tp_handlers[] = {
+ { "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
+ { "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
+ { "workqueue:workqueue_execute_end", process_workqueue_execute_end_event, },
+};
+
+static int workqueue_class_init(struct kwork_class *class,
+ struct perf_session *session)
+{
+ if (perf_session__set_tracepoints_handlers(session,
+ workqueue_tp_handlers)) {
+ pr_err("Failed to set workqueue tracepoints handlers\n");
+ return -1;
+ }
+
+ class->work_root = RB_ROOT_CACHED;
+ return 0;
+}
+
+static void workqueue_work_init(struct kwork_class *class,
+ struct kwork_work *work,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ char *modp = NULL;
+ unsigned long long function_addr = evsel__intval(evsel,
+ sample, "function");
+
+ work->class = class;
+ work->cpu = sample->cpu;
+ work->id = evsel__intval(evsel, sample, "work");
+ work->name = function_addr == 0 ? NULL :
+ machine__resolve_kernel_addr(machine, &function_addr, &modp);
+}
+
+static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
+{
+ if (work->name != NULL)
+ snprintf(buf, len, "(w)%s", work->name);
+ else
+ snprintf(buf, len, "(w)0x%" PRIx64, work->id);
+}
+
+static struct kwork_class kwork_workqueue = {
+ .name = "workqueue",
+ .type = KWORK_CLASS_WORKQUEUE,
+ .nr_tracepoints = 3,
+ .tp_handlers = workqueue_tp_handlers,
+ .class_init = workqueue_class_init,
+ .work_init = workqueue_work_init,
+ .work_name = workqueue_work_name,
+};
+
+static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
+ [KWORK_CLASS_IRQ] = &kwork_irq,
+ [KWORK_CLASS_SOFTIRQ] = &kwork_softirq,
+ [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
+};
+
+static void print_separator(int len)
+{
+ printf(" %.*s\n", len, graph_dotted_line);
+}
+
+static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
+{
+ int ret = 0;
+ char kwork_name[PRINT_KWORK_NAME_WIDTH];
+ char max_runtime_start[32], max_runtime_end[32];
+ char max_latency_start[32], max_latency_end[32];
+
+ printf(" ");
+
+ /*
+ * kwork name
+ */
+ if (work->class && work->class->work_name) {
+ work->class->work_name(work, kwork_name,
+ PRINT_KWORK_NAME_WIDTH);
+ ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
+ } else {
+ ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
+ }
+
+ /*
+ * cpu
+ */
+ ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
+
+ /*
+ * total runtime
+ */
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ ret += printf(" %*.*f ms |",
+ PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)work->total_runtime / NSEC_PER_MSEC);
+ } else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
+ ret += printf(" %*.*f ms |",
+ PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)work->total_latency /
+ work->nr_atoms / NSEC_PER_MSEC);
+ }
+
+ /*
+ * count
+ */
+ ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
+
+ /*
+ * max runtime, max runtime start, max runtime end
+ */
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ timestamp__scnprintf_usec(work->max_runtime_start,
+ max_runtime_start,
+ sizeof(max_runtime_start));
+ timestamp__scnprintf_usec(work->max_runtime_end,
+ max_runtime_end,
+ sizeof(max_runtime_end));
+ ret += printf(" %*.*f ms | %*s s | %*s s |",
+ PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)work->max_runtime / NSEC_PER_MSEC,
+ PRINT_TIMESTAMP_WIDTH, max_runtime_start,
+ PRINT_TIMESTAMP_WIDTH, max_runtime_end);
+ }
+ /*
+ * max delay, max delay start, max delay end
+ */
+ else if (kwork->report == KWORK_REPORT_LATENCY) {
+ timestamp__scnprintf_usec(work->max_latency_start,
+ max_latency_start,
+ sizeof(max_latency_start));
+ timestamp__scnprintf_usec(work->max_latency_end,
+ max_latency_end,
+ sizeof(max_latency_end));
+ ret += printf(" %*.*f ms | %*s s | %*s s |",
+ PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
+ (double)work->max_latency / NSEC_PER_MSEC,
+ PRINT_TIMESTAMP_WIDTH, max_latency_start,
+ PRINT_TIMESTAMP_WIDTH, max_latency_end);
+ }
+
+ printf("\n");
+ return ret;
+}
+
+static int report_print_header(struct perf_kwork *kwork)
+{
+ int ret;
+
+ printf("\n ");
+ ret = printf(" %-*s | %-*s |",
+ PRINT_KWORK_NAME_WIDTH, "Kwork Name",
+ PRINT_CPU_WIDTH, "Cpu");
+
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ ret += printf(" %-*s |",
+ PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
+ } else if (kwork->report == KWORK_REPORT_LATENCY) {
+ ret += printf(" %-*s |",
+ PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
+ }
+
+ ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
+
+ if (kwork->report == KWORK_REPORT_RUNTIME) {
+ ret += printf(" %-*s | %-*s | %-*s |",
+ PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
+ PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
+ PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
+ } else if (kwork->report == KWORK_REPORT_LATENCY) {
+ ret += printf(" %-*s | %-*s | %-*s |",
+ PRINT_LATENCY_HEADER_WIDTH, "Max delay",
+ PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
+ PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
+ }
+
+ printf("\n");
+ print_separator(ret);
+ return ret;
+}
+
+static void timehist_print_header(void)
+{
+ /*
+ * header row
+ */
+ printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
+ PRINT_TIMESTAMP_WIDTH, "Runtime start",
+ PRINT_TIMESTAMP_WIDTH, "Runtime end",
+ PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
+ PRINT_KWORK_NAME_WIDTH, "Kwork name",
+ PRINT_RUNTIME_WIDTH, "Runtime",
+ PRINT_RUNTIME_WIDTH, "Delaytime");
+
+ /*
+ * units row
+ */
+ printf(" %-*s %-*s %-*s %-*s %-*s %-*s\n",
+ PRINT_TIMESTAMP_WIDTH, "",
+ PRINT_TIMESTAMP_WIDTH, "",
+ PRINT_TIMEHIST_CPU_WIDTH, "",
+ PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
+ PRINT_RUNTIME_WIDTH, "(msec)",
+ PRINT_RUNTIME_WIDTH, "(msec)");
+
+ /*
+ * separator
+ */
+ printf(" %.*s %.*s %.*s %.*s %.*s %.*s\n",
+ PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
+ PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
+ PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
+ PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
+ PRINT_RUNTIME_WIDTH, graph_dotted_line,
+ PRINT_RUNTIME_WIDTH, graph_dotted_line);
+}
+
+static void print_summary(struct perf_kwork *kwork)
+{
+ u64 time = kwork->timeend - kwork->timestart;
+
+ printf(" Total count : %9" PRIu64 "\n", kwork->all_count);
+ printf(" Total runtime (msec) : %9.3f (%.3f%% load average)\n",
+ (double)kwork->all_runtime / NSEC_PER_MSEC,
+ time == 0 ? 0 : (double)kwork->all_runtime / time);
+ printf(" Total time span (msec) : %9.3f\n",
+ (double)time / NSEC_PER_MSEC);
+}
+
+static unsigned long long nr_list_entry(struct list_head *head)
+{
+ struct list_head *pos;
+ unsigned long long n = 0;
+
+ list_for_each(pos, head)
+ n++;
+
+ return n;
+}
+
+static void print_skipped_events(struct perf_kwork *kwork)
+{
+ int i;
+ const char *const kwork_event_str[] = {
+ [KWORK_TRACE_RAISE] = "raise",
+ [KWORK_TRACE_ENTRY] = "entry",
+ [KWORK_TRACE_EXIT] = "exit",
+ };
+
+ if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
+ (kwork->nr_events != 0)) {
+ printf(" INFO: %.3f%% skipped events (%" PRIu64 " including ",
+ (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
+ (double)kwork->nr_events * 100.0,
+ kwork->nr_skipped_events[KWORK_TRACE_MAX]);
+
+ for (i = 0; i < KWORK_TRACE_MAX; i++) {
+ printf("%" PRIu64 " %s%s",
+ kwork->nr_skipped_events[i],
+ kwork_event_str[i],
+ (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
+ }
+ }
+
+ if (verbose > 0)
+ printf(" INFO: use %lld atom pages\n",
+ nr_list_entry(&kwork->atom_page_list));
+}
+
+static void print_bad_events(struct perf_kwork *kwork)
+{
+ if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
+ printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
+ (double)kwork->nr_lost_events /
+ (double)kwork->nr_events * 100.0,
+ kwork->nr_lost_events, kwork->nr_events,
+ kwork->nr_lost_chunks);
+ }
+}
+
+static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
+{
+ struct rb_node *node;
+ struct kwork_work *data;
+ struct rb_root_cached *root = &class->work_root;
+
+ pr_debug("Sorting %s ...\n", class->name);
+ for (;;) {
+ node = rb_first_cached(root);
+ if (!node)
+ break;
+
+ rb_erase_cached(node, root);
+ data = rb_entry(node, struct kwork_work, node);
+ work_insert(&kwork->sorted_work_root,
+ data, &kwork->sort_list);
+ }
+}
+
+static void perf_kwork__sort(struct perf_kwork *kwork)
+{
+ struct kwork_class *class;
+
+ list_for_each_entry(class, &kwork->class_list, list)
+ work_sort(kwork, class);
+}
+
+static int perf_kwork__check_config(struct perf_kwork *kwork,
+ struct perf_session *session)
+{
+ int ret;
+ struct evsel *evsel;
+ struct kwork_class *class;
+
+ static struct trace_kwork_handler report_ops = {
+ .entry_event = report_entry_event,
+ .exit_event = report_exit_event,
+ };
+ static struct trace_kwork_handler latency_ops = {
+ .raise_event = latency_raise_event,
+ .entry_event = latency_entry_event,
+ };
+ static struct trace_kwork_handler timehist_ops = {
+ .raise_event = timehist_raise_event,
+ .entry_event = timehist_entry_event,
+ .exit_event = timehist_exit_event,
+ };
+
+ switch (kwork->report) {
+ case KWORK_REPORT_RUNTIME:
+ kwork->tp_handler = &report_ops;
+ break;
+ case KWORK_REPORT_LATENCY:
+ kwork->tp_handler = &latency_ops;
+ break;
+ case KWORK_REPORT_TIMEHIST:
+ kwork->tp_handler = &timehist_ops;
+ break;
+ default:
+ pr_debug("Invalid report type %d\n", kwork->report);
+ return -1;
+ }
+
+ list_for_each_entry(class, &kwork->class_list, list)
+ if ((class->class_init != NULL) &&
+ (class->class_init(class, session) != 0))
+ return -1;
+
+ if (kwork->cpu_list != NULL) {
+ ret = perf_session__cpu_bitmap(session,
+ kwork->cpu_list,
+ kwork->cpu_bitmap);
+ if (ret < 0) {
+ pr_err("Invalid cpu bitmap\n");
+ return -1;
+ }
+ }
+
+ if (kwork->time_str != NULL) {
+ ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
+ if (ret != 0) {
+ pr_err("Invalid time span\n");
+ return -1;
+ }
+ }
+
+ list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
+ if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
+ pr_debug("Samples do not have callchains\n");
+ kwork->show_callchain = 0;
+ symbol_conf.use_callchain = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int perf_kwork__read_events(struct perf_kwork *kwork)
+{
+ int ret = -1;
+ struct perf_session *session = NULL;
+
+ struct perf_data data = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = kwork->force,
+ };
+
+ session = perf_session__new(&data, &kwork->tool);
+ if (IS_ERR(session)) {
+ pr_debug("Error creating perf session\n");
+ return PTR_ERR(session);
+ }
+
+ symbol__init(&session->header.env);
+
+ if (perf_kwork__check_config(kwork, session) != 0)
+ goto out_delete;
+
+ if (session->tevent.pevent &&
+ tep_set_function_resolver(session->tevent.pevent,
+ machine__resolve_kernel_addr,
+ &session->machines.host) < 0) {
+ pr_err("Failed to set libtraceevent function resolver\n");
+ goto out_delete;
+ }
+
+ if (kwork->report == KWORK_REPORT_TIMEHIST)
+ timehist_print_header();
+
+ ret = perf_session__process_events(session);
+ if (ret) {
+ pr_debug("Failed to process events, error %d\n", ret);
+ goto out_delete;
+ }
+
+ kwork->nr_events = session->evlist->stats.nr_events[0];
+ kwork->nr_lost_events = session->evlist->stats.total_lost;
+ kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
+
+out_delete:
+ perf_session__delete(session);
+ return ret;
+}
+
+static void process_skipped_events(struct perf_kwork *kwork,
+ struct kwork_work *work)
+{
+ int i;
+ unsigned long long count;
+
+ for (i = 0; i < KWORK_TRACE_MAX; i++) {
+ count = nr_list_entry(&work->atom_list[i]);
+ kwork->nr_skipped_events[i] += count;
+ kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
+ }
+}
+
+struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
+ struct kwork_class *class,
+ struct kwork_work *key)
+{
+ struct kwork_work *work = NULL;
+
+ work = work_new(key);
+ if (work == NULL)
+ return NULL;
+
+ work_insert(&class->work_root, work, &kwork->cmp_id);
+ return work;
+}
+
+static void sig_handler(int sig)
+{
+ /*
+ * Simply capture termination signal so that
+ * the program can continue after pause returns
+ */
+ pr_debug("Captuer signal %d\n", sig);
+}
+
+static int perf_kwork__report_bpf(struct perf_kwork *kwork)
+{
+ int ret;
+
+ signal(SIGINT, sig_handler);
+ signal(SIGTERM, sig_handler);
+
+ ret = perf_kwork__trace_prepare_bpf(kwork);
+ if (ret)
+ return -1;
+
+ printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
+
+ perf_kwork__trace_start();
+
+ /*
+ * a simple pause, wait here for stop signal
+ */
+ pause();
+
+ perf_kwork__trace_finish();
+
+ perf_kwork__report_read_bpf(kwork);
+
+ perf_kwork__report_cleanup_bpf();
+
+ return 0;
+}
+
+static int perf_kwork__report(struct perf_kwork *kwork)
+{
+ int ret;
+ struct rb_node *next;
+ struct kwork_work *work;
+
+ if (kwork->use_bpf)
+ ret = perf_kwork__report_bpf(kwork);
+ else
+ ret = perf_kwork__read_events(kwork);
+
+ if (ret != 0)
+ return -1;
+
+ perf_kwork__sort(kwork);
+
+ setup_pager();
+
+ ret = report_print_header(kwork);
+ next = rb_first_cached(&kwork->sorted_work_root);
+ while (next) {
+ work = rb_entry(next, struct kwork_work, node);
+ process_skipped_events(kwork, work);
+
+ if (work->nr_atoms != 0) {
+ report_print_work(kwork, work);
+ if (kwork->summary) {
+ kwork->all_runtime += work->total_runtime;
+ kwork->all_count += work->nr_atoms;
+ }
+ }
+ next = rb_next(next);
+ }
+ print_separator(ret);
+
+ if (kwork->summary) {
+ print_summary(kwork);
+ print_separator(ret);
+ }
+
+ print_bad_events(kwork);
+ print_skipped_events(kwork);
+ printf("\n");
+
+ return 0;
+}
+
+typedef int (*tracepoint_handler)(struct perf_tool *tool,
+ struct evsel *evsel,
+ struct perf_sample *sample,
+ struct machine *machine);
+
+static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct evsel *evsel,
+ struct machine *machine)
+{
+ int err = 0;
+
+ if (evsel->handler != NULL) {
+ tracepoint_handler f = evsel->handler;
+
+ err = f(tool, evsel, sample, machine);
+ }
+
+ return err;
+}
+
+static int perf_kwork__timehist(struct perf_kwork *kwork)
+{
+ /*
+ * event handlers for timehist option
+ */
+ kwork->tool.comm = perf_event__process_comm;
+ kwork->tool.exit = perf_event__process_exit;
+ kwork->tool.fork = perf_event__process_fork;
+ kwork->tool.attr = perf_event__process_attr;
+ kwork->tool.tracing_data = perf_event__process_tracing_data;
+ kwork->tool.build_id = perf_event__process_build_id;
+ kwork->tool.ordered_events = true;
+ kwork->tool.ordering_requires_timestamps = true;
+ symbol_conf.use_callchain = kwork->show_callchain;
+
+ if (symbol__validate_sym_arguments()) {
+ pr_err("Failed to validate sym arguments\n");
+ return -1;
+ }
+
+ setup_pager();
+
+ return perf_kwork__read_events(kwork);
+}
+
+static void setup_event_list(struct perf_kwork *kwork,
+ const struct option *options,
+ const char * const usage_msg[])
+{
+ int i;
+ struct kwork_class *class;
+ char *tmp, *tok, *str;
+
+ if (kwork->event_list_str == NULL)
+ goto null_event_list_str;
+
+ str = strdup(kwork->event_list_str);
+ for (tok = strtok_r(str, ", ", &tmp);
+ tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ for (i = 0; i < KWORK_CLASS_MAX; i++) {
+ class = kwork_class_supported_list[i];
+ if (strcmp(tok, class->name) == 0) {
+ list_add_tail(&class->list, &kwork->class_list);
+ break;
+ }
+ }
+ if (i == KWORK_CLASS_MAX) {
+ usage_with_options_msg(usage_msg, options,
+ "Unknown --event key: `%s'", tok);
+ }
+ }
+ free(str);
+
+null_event_list_str:
+ /*
+ * config all kwork events if not specified
+ */
+ if (list_empty(&kwork->class_list)) {
+ for (i = 0; i < KWORK_CLASS_MAX; i++) {
+ list_add_tail(&kwork_class_supported_list[i]->list,
+ &kwork->class_list);
+ }
+ }
+
+ pr_debug("Config event list:");
+ list_for_each_entry(class, &kwork->class_list, list)
+ pr_debug(" %s", class->name);
+ pr_debug("\n");
+}
+
+static int perf_kwork__record(struct perf_kwork *kwork,
+ int argc, const char **argv)
+{
+ const char **rec_argv;
+ unsigned int rec_argc, i, j;
+ struct kwork_class *class;
+
+ const char *const record_args[] = {
+ "record",
+ "-a",
+ "-R",
+ "-m", "1024",
+ "-c", "1",
+ };
+
+ rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+
+ list_for_each_entry(class, &kwork->class_list, list)
+ rec_argc += 2 * class->nr_tracepoints;
+
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ if (rec_argv == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(record_args); i++)
+ rec_argv[i] = strdup(record_args[i]);
+
+ list_for_each_entry(class, &kwork->class_list, list) {
+ for (j = 0; j < class->nr_tracepoints; j++) {
+ rec_argv[i++] = strdup("-e");
+ rec_argv[i++] = strdup(class->tp_handlers[j].name);
+ }
+ }
+
+ for (j = 1; j < (unsigned int)argc; j++, i++)
+ rec_argv[i] = argv[j];
+
+ BUG_ON(i != rec_argc);
+
+ pr_debug("record comm: ");
+ for (j = 0; j < rec_argc; j++)
+ pr_debug("%s ", rec_argv[j]);
+ pr_debug("\n");
+
+ return cmd_record(i, rec_argv);
+}
+
+int cmd_kwork(int argc, const char **argv)
+{
+ static struct perf_kwork kwork = {
+ .class_list = LIST_HEAD_INIT(kwork.class_list),
+ .tool = {
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .sample = perf_kwork__process_tracepoint_sample,
+ },
+ .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
+ .sort_list = LIST_HEAD_INIT(kwork.sort_list),
+ .cmp_id = LIST_HEAD_INIT(kwork.cmp_id),
+ .sorted_work_root = RB_ROOT_CACHED,
+ .tp_handler = NULL,
+ .profile_name = NULL,
+ .cpu_list = NULL,
+ .time_str = NULL,
+ .force = false,
+ .event_list_str = NULL,
+ .summary = false,
+ .sort_order = NULL,
+ .show_callchain = false,
+ .max_stack = 5,
+ .timestart = 0,
+ .timeend = 0,
+ .nr_events = 0,
+ .nr_lost_chunks = 0,
+ .nr_lost_events = 0,
+ .all_runtime = 0,
+ .all_count = 0,
+ .nr_skipped_events = { 0 },
+ };
+ static const char default_report_sort_order[] = "runtime, max, count";
+ static const char default_latency_sort_order[] = "avg, max, count";
+ const struct option kwork_options[] = {
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show symbol address, etc)"),
+ OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+ "dump raw trace in ASCII"),
+ OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
+ "list of kwork to profile (irq, softirq, workqueue, etc)"),
+ OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
+ OPT_END()
+ };
+ const struct option report_options[] = {
+ OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
+ "sort by key(s): runtime, max, count"),
+ OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_STRING('n', "name", &kwork.profile_name, "name",
+ "event name to profile"),
+ OPT_STRING(0, "time", &kwork.time_str, "str",
+ "Time span for analysis (start,stop)"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_BOOLEAN('S', "with-summary", &kwork.summary,
+ "Show summary with statistics"),
+#ifdef HAVE_BPF_SKEL
+ OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
+ "Use BPF to measure kwork runtime"),
+#endif
+ OPT_PARENT(kwork_options)
+ };
+ const struct option latency_options[] = {
+ OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
+ "sort by key(s): avg, max, count"),
+ OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_STRING('n', "name", &kwork.profile_name, "name",
+ "event name to profile"),
+ OPT_STRING(0, "time", &kwork.time_str, "str",
+ "Time span for analysis (start,stop)"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+#ifdef HAVE_BPF_SKEL
+ OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
+ "Use BPF to measure kwork latency"),
+#endif
+ OPT_PARENT(kwork_options)
+ };
+ const struct option timehist_options[] = {
+ OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
+ "file", "kallsyms pathname"),
+ OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
+ "Display call chains if present"),
+ OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
+ "Maximum number of functions to display backtrace."),
+ OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
+ "Look for files with symbols relative to this directory"),
+ OPT_STRING(0, "time", &kwork.time_str, "str",
+ "Time span for analysis (start,stop)"),
+ OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
+ "list of cpus to profile"),
+ OPT_STRING('n', "name", &kwork.profile_name, "name",
+ "event name to profile"),
+ OPT_STRING('i', "input", &input_name, "file",
+ "input file name"),
+ OPT_PARENT(kwork_options)
+ };
+ const char *kwork_usage[] = {
+ NULL,
+ NULL
+ };
+ const char * const report_usage[] = {
+ "perf kwork report [<options>]",
+ NULL
+ };
+ const char * const latency_usage[] = {
+ "perf kwork latency [<options>]",
+ NULL
+ };
+ const char * const timehist_usage[] = {
+ "perf kwork timehist [<options>]",
+ NULL
+ };
+ const char *const kwork_subcommands[] = {
+ "record", "report", "latency", "timehist", NULL
+ };
+
+ argc = parse_options_subcommand(argc, argv, kwork_options,
+ kwork_subcommands, kwork_usage,
+ PARSE_OPT_STOP_AT_NON_OPTION);
+ if (!argc)
+ usage_with_options(kwork_usage, kwork_options);
+
+ setup_event_list(&kwork, kwork_options, kwork_usage);
+ sort_dimension__add(&kwork, "id", &kwork.cmp_id);
+
+ if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
+ return perf_kwork__record(&kwork, argc, argv);
+ else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
+ kwork.sort_order = default_report_sort_order;
+ if (argc > 1) {
+ argc = parse_options(argc, argv, report_options, report_usage, 0);
+ if (argc)
+ usage_with_options(report_usage, report_options);
+ }
+ kwork.report = KWORK_REPORT_RUNTIME;
+ setup_sorting(&kwork, report_options, report_usage);
+ return perf_kwork__report(&kwork);
+ } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
+ kwork.sort_order = default_latency_sort_order;
+ if (argc > 1) {
+ argc = parse_options(argc, argv, latency_options, latency_usage, 0);
+ if (argc)
+ usage_with_options(latency_usage, latency_options);
+ }
+ kwork.report = KWORK_REPORT_LATENCY;
+ setup_sorting(&kwork, latency_options, latency_usage);
+ return perf_kwork__report(&kwork);
+ } else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
+ if (argc > 1) {
+ argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
+ if (argc)
+ usage_with_options(timehist_usage, timehist_options);
+ }
+ kwork.report = KWORK_REPORT_TIMEHIST;
+ return perf_kwork__timehist(&kwork);
+ } else
+ usage_with_options(kwork_usage, kwork_options);
+
+ return 0;
+}
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 965ef017496f..1f5dbd5f0ba4 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -10,125 +10,568 @@
*/
#include "builtin.h"
-#include "util/parse-events.h"
+#include "util/print-events.h"
#include "util/pmu.h"
+#include "util/pmu-hybrid.h"
#include "util/debug.h"
#include "util/metricgroup.h"
+#include "util/string2.h"
+#include "util/strlist.h"
+#include "util/strbuf.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
+#include <linux/zalloc.h>
+#include <stdarg.h>
#include <stdio.h>
-static bool desc_flag = true;
-static bool details_flag;
+/**
+ * struct print_state - State and configuration passed to the default_print
+ * functions.
+ */
+struct print_state {
+ /**
+ * @pmu_glob: Optionally restrict PMU and metric matching to PMU or
+ * debugfs subsystem name.
+ */
+ char *pmu_glob;
+ /** @event_glob: Optional pattern matching glob. */
+ char *event_glob;
+ /** @name_only: Print event or metric names only. */
+ bool name_only;
+ /** @desc: Print the event or metric description. */
+ bool desc;
+ /** @long_desc: Print longer event or metric description. */
+ bool long_desc;
+ /** @deprecated: Print deprecated events or metrics. */
+ bool deprecated;
+ /**
+ * @detailed: Print extra information on the perf event such as names
+ * and expressions used internally by events.
+ */
+ bool detailed;
+ /** @metrics: Controls printing of metric and metric groups. */
+ bool metrics;
+ /** @metricgroups: Controls printing of metric and metric groups. */
+ bool metricgroups;
+ /** @last_topic: The last printed event topic. */
+ char *last_topic;
+ /** @last_metricgroups: The last printed metric group. */
+ char *last_metricgroups;
+ /** @visited_metrics: Metrics that are printed to avoid duplicates. */
+ struct strlist *visited_metrics;
+};
+
+static void default_print_start(void *ps)
+{
+ struct print_state *print_state = ps;
+
+ if (!print_state->name_only && pager_in_use())
+ printf("\nList of pre-defined events (to be used in -e or -M):\n\n");
+}
+
+static void default_print_end(void *print_state __maybe_unused) {}
+
+static void wordwrap(const char *s, int start, int max, int corr)
+{
+ int column = start;
+ int n;
+ bool saw_newline = false;
+
+ while (*s) {
+ int wlen = strcspn(s, " \t\n");
+
+ if ((column + wlen >= max && column > start) || saw_newline) {
+ printf("\n%*s", start, "");
+ column = start + corr;
+ }
+ n = printf("%s%.*s", column > start ? " " : "", wlen, s);
+ if (n <= 0)
+ break;
+ saw_newline = s[wlen] == '\n';
+ s += wlen;
+ column += n;
+ s = skip_spaces(s);
+ }
+}
+
+static void default_print_event(void *ps, const char *pmu_name, const char *topic,
+ const char *event_name, const char *event_alias,
+ const char *scale_unit __maybe_unused,
+ bool deprecated, const char *event_type_desc,
+ const char *desc, const char *long_desc,
+ const char *encoding_desc)
+{
+ struct print_state *print_state = ps;
+ int pos;
+
+ if (deprecated && !print_state->deprecated)
+ return;
+
+ if (print_state->pmu_glob && pmu_name && !strglobmatch(pmu_name, print_state->pmu_glob))
+ return;
+
+ if (print_state->event_glob &&
+ (!event_name || !strglobmatch(event_name, print_state->event_glob)) &&
+ (!event_alias || !strglobmatch(event_alias, print_state->event_glob)) &&
+ (!topic || !strglobmatch_nocase(topic, print_state->event_glob)))
+ return;
+
+ if (print_state->name_only) {
+ if (event_alias && strlen(event_alias))
+ printf("%s ", event_alias);
+ else
+ printf("%s ", event_name);
+ return;
+ }
+
+ if (strcmp(print_state->last_topic, topic ?: "")) {
+ if (topic)
+ printf("\n%s:\n", topic);
+ zfree(&print_state->last_topic);
+ print_state->last_topic = strdup(topic ?: "");
+ }
+
+ if (event_alias && strlen(event_alias))
+ pos = printf(" %s OR %s", event_name, event_alias);
+ else
+ pos = printf(" %s", event_name);
+
+ if (!topic && event_type_desc) {
+ for (; pos < 53; pos++)
+ putchar(' ');
+ printf("[%s]\n", event_type_desc);
+ } else
+ putchar('\n');
+
+ if (desc && print_state->desc) {
+ printf("%*s", 8, "[");
+ wordwrap(desc, 8, pager_get_columns(), 0);
+ printf("]\n");
+ }
+ long_desc = long_desc ?: desc;
+ if (long_desc && print_state->long_desc) {
+ printf("%*s", 8, "[");
+ wordwrap(long_desc, 8, pager_get_columns(), 0);
+ printf("]\n");
+ }
+
+ if (print_state->detailed && encoding_desc) {
+ printf("%*s", 8, "");
+ wordwrap(encoding_desc, 8, pager_get_columns(), 0);
+ putchar('\n');
+ }
+}
+
+static void default_print_metric(void *ps,
+ const char *group,
+ const char *name,
+ const char *desc,
+ const char *long_desc,
+ const char *expr,
+ const char *threshold,
+ const char *unit __maybe_unused)
+{
+ struct print_state *print_state = ps;
+
+ if (print_state->event_glob &&
+ (!print_state->metrics || !name || !strglobmatch(name, print_state->event_glob)) &&
+ (!print_state->metricgroups || !group || !strglobmatch(group, print_state->event_glob)))
+ return;
+
+ if (!print_state->name_only && !print_state->last_metricgroups) {
+ if (print_state->metricgroups) {
+ printf("\nMetric Groups:\n");
+ if (!print_state->metrics)
+ putchar('\n');
+ } else {
+ printf("\nMetrics:\n\n");
+ }
+ }
+ if (!print_state->last_metricgroups ||
+ strcmp(print_state->last_metricgroups, group ?: "")) {
+ if (group && print_state->metricgroups) {
+ if (print_state->name_only)
+ printf("%s ", group);
+ else if (print_state->metrics)
+ printf("\n%s:\n", group);
+ else
+ printf("%s\n", group);
+ }
+ zfree(&print_state->last_metricgroups);
+ print_state->last_metricgroups = strdup(group ?: "");
+ }
+ if (!print_state->metrics)
+ return;
+
+ if (print_state->name_only) {
+ if (print_state->metrics &&
+ !strlist__has_entry(print_state->visited_metrics, name)) {
+ printf("%s ", name);
+ strlist__add(print_state->visited_metrics, name);
+ }
+ return;
+ }
+ printf(" %s\n", name);
+
+ if (desc && print_state->desc) {
+ printf("%*s", 8, "[");
+ wordwrap(desc, 8, pager_get_columns(), 0);
+ printf("]\n");
+ }
+ if (long_desc && print_state->long_desc) {
+ printf("%*s", 8, "[");
+ wordwrap(long_desc, 8, pager_get_columns(), 0);
+ printf("]\n");
+ }
+ if (expr && print_state->detailed) {
+ printf("%*s", 8, "[");
+ wordwrap(expr, 8, pager_get_columns(), 0);
+ printf("]\n");
+ }
+ if (threshold && print_state->detailed) {
+ printf("%*s", 8, "[");
+ wordwrap(threshold, 8, pager_get_columns(), 0);
+ printf("]\n");
+ }
+}
+
+struct json_print_state {
+ /** Should a separator be printed prior to the next item? */
+ bool need_sep;
+};
+
+static void json_print_start(void *print_state __maybe_unused)
+{
+ printf("[\n");
+}
+
+static void json_print_end(void *ps)
+{
+ struct json_print_state *print_state = ps;
+
+ printf("%s]\n", print_state->need_sep ? "\n" : "");
+}
+
+static void fix_escape_printf(struct strbuf *buf, const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ strbuf_setlen(buf, 0);
+ for (size_t fmt_pos = 0; fmt_pos < strlen(fmt); fmt_pos++) {
+ switch (fmt[fmt_pos]) {
+ case '%':
+ fmt_pos++;
+ switch (fmt[fmt_pos]) {
+ case 's': {
+ const char *s = va_arg(args, const char*);
+
+ strbuf_addstr(buf, s);
+ break;
+ }
+ case 'S': {
+ const char *s = va_arg(args, const char*);
+
+ for (size_t s_pos = 0; s_pos < strlen(s); s_pos++) {
+ switch (s[s_pos]) {
+ case '\n':
+ strbuf_addstr(buf, "\\n");
+ break;
+ case '\\':
+ fallthrough;
+ case '\"':
+ strbuf_addch(buf, '\\');
+ fallthrough;
+ default:
+ strbuf_addch(buf, s[s_pos]);
+ break;
+ }
+ }
+ break;
+ }
+ default:
+ pr_err("Unexpected format character '%c'\n", fmt[fmt_pos]);
+ strbuf_addch(buf, '%');
+ strbuf_addch(buf, fmt[fmt_pos]);
+ }
+ break;
+ default:
+ strbuf_addch(buf, fmt[fmt_pos]);
+ break;
+ }
+ }
+ va_end(args);
+ fputs(buf->buf, stdout);
+}
+
+static void json_print_event(void *ps, const char *pmu_name, const char *topic,
+ const char *event_name, const char *event_alias,
+ const char *scale_unit,
+ bool deprecated, const char *event_type_desc,
+ const char *desc, const char *long_desc,
+ const char *encoding_desc)
+{
+ struct json_print_state *print_state = ps;
+ bool need_sep = false;
+ struct strbuf buf;
+
+ strbuf_init(&buf, 0);
+ printf("%s{\n", print_state->need_sep ? ",\n" : "");
+ print_state->need_sep = true;
+ if (pmu_name) {
+ fix_escape_printf(&buf, "\t\"Unit\": \"%S\"", pmu_name);
+ need_sep = true;
+ }
+ if (topic) {
+ fix_escape_printf(&buf, "%s\t\"Topic\": \"%S\"", need_sep ? ",\n" : "", topic);
+ need_sep = true;
+ }
+ if (event_name) {
+ fix_escape_printf(&buf, "%s\t\"EventName\": \"%S\"", need_sep ? ",\n" : "",
+ event_name);
+ need_sep = true;
+ }
+ if (event_alias && strlen(event_alias)) {
+ fix_escape_printf(&buf, "%s\t\"EventAlias\": \"%S\"", need_sep ? ",\n" : "",
+ event_alias);
+ need_sep = true;
+ }
+ if (scale_unit && strlen(scale_unit)) {
+ fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "",
+ scale_unit);
+ need_sep = true;
+ }
+ if (event_type_desc) {
+ fix_escape_printf(&buf, "%s\t\"EventType\": \"%S\"", need_sep ? ",\n" : "",
+ event_type_desc);
+ need_sep = true;
+ }
+ if (deprecated) {
+ fix_escape_printf(&buf, "%s\t\"Deprecated\": \"%S\"", need_sep ? ",\n" : "",
+ deprecated ? "1" : "0");
+ need_sep = true;
+ }
+ if (desc) {
+ fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "",
+ desc);
+ need_sep = true;
+ }
+ if (long_desc) {
+ fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "",
+ long_desc);
+ need_sep = true;
+ }
+ if (encoding_desc) {
+ fix_escape_printf(&buf, "%s\t\"Encoding\": \"%S\"", need_sep ? ",\n" : "",
+ encoding_desc);
+ need_sep = true;
+ }
+ printf("%s}", need_sep ? "\n" : "");
+ strbuf_release(&buf);
+}
+
+static void json_print_metric(void *ps __maybe_unused, const char *group,
+ const char *name, const char *desc,
+ const char *long_desc, const char *expr,
+ const char *threshold, const char *unit)
+{
+ struct json_print_state *print_state = ps;
+ bool need_sep = false;
+ struct strbuf buf;
+
+ strbuf_init(&buf, 0);
+ printf("%s{\n", print_state->need_sep ? ",\n" : "");
+ print_state->need_sep = true;
+ if (group) {
+ fix_escape_printf(&buf, "\t\"MetricGroup\": \"%S\"", group);
+ need_sep = true;
+ }
+ if (name) {
+ fix_escape_printf(&buf, "%s\t\"MetricName\": \"%S\"", need_sep ? ",\n" : "", name);
+ need_sep = true;
+ }
+ if (expr) {
+ fix_escape_printf(&buf, "%s\t\"MetricExpr\": \"%S\"", need_sep ? ",\n" : "", expr);
+ need_sep = true;
+ }
+ if (threshold) {
+ fix_escape_printf(&buf, "%s\t\"MetricThreshold\": \"%S\"", need_sep ? ",\n" : "",
+ threshold);
+ need_sep = true;
+ }
+ if (unit) {
+ fix_escape_printf(&buf, "%s\t\"ScaleUnit\": \"%S\"", need_sep ? ",\n" : "", unit);
+ need_sep = true;
+ }
+ if (desc) {
+ fix_escape_printf(&buf, "%s\t\"BriefDescription\": \"%S\"", need_sep ? ",\n" : "",
+ desc);
+ need_sep = true;
+ }
+ if (long_desc) {
+ fix_escape_printf(&buf, "%s\t\"PublicDescription\": \"%S\"", need_sep ? ",\n" : "",
+ long_desc);
+ need_sep = true;
+ }
+ printf("%s}", need_sep ? "\n" : "");
+ strbuf_release(&buf);
+}
int cmd_list(int argc, const char **argv)
{
- int i;
- bool raw_dump = false;
- bool long_desc_flag = false;
- bool deprecated = false;
+ int i, ret = 0;
+ struct print_state default_ps = {};
+ struct print_state json_ps = {};
+ void *ps = &default_ps;
+ struct print_callbacks print_cb = {
+ .print_start = default_print_start,
+ .print_end = default_print_end,
+ .print_event = default_print_event,
+ .print_metric = default_print_metric,
+ };
+ const char *hybrid_name = NULL;
+ const char *unit_name = NULL;
+ bool json = false;
struct option list_options[] = {
- OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
- OPT_BOOLEAN('d', "desc", &desc_flag,
+ OPT_BOOLEAN(0, "raw-dump", &default_ps.name_only, "Dump raw events"),
+ OPT_BOOLEAN('j', "json", &json, "JSON encode events and metrics"),
+ OPT_BOOLEAN('d', "desc", &default_ps.desc,
"Print extra event descriptions. --no-desc to not print."),
- OPT_BOOLEAN('v', "long-desc", &long_desc_flag,
+ OPT_BOOLEAN('v', "long-desc", &default_ps.long_desc,
"Print longer event descriptions."),
- OPT_BOOLEAN(0, "details", &details_flag,
+ OPT_BOOLEAN(0, "details", &default_ps.detailed,
"Print information on the perf event names and expressions used internally by events."),
- OPT_BOOLEAN(0, "deprecated", &deprecated,
+ OPT_BOOLEAN(0, "deprecated", &default_ps.deprecated,
"Print deprecated events."),
+ OPT_STRING(0, "cputype", &hybrid_name, "hybrid cpu type",
+ "Limit PMU or metric printing to the given hybrid PMU (e.g. core or atom)."),
+ OPT_STRING(0, "unit", &unit_name, "PMU name",
+ "Limit PMU or metric printing to the specified PMU."),
OPT_INCR(0, "debug", &verbose,
"Enable debugging output"),
OPT_END()
};
const char * const list_usage[] = {
- "perf list [<options>] [hw|sw|cache|tracepoint|pmu|sdt|event_glob]",
+ "perf list [<options>] [hw|sw|cache|tracepoint|pmu|sdt|metric|metricgroup|event_glob]",
NULL
};
set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN);
+ /* Hide hybrid flag for the more generic 'unit' flag. */
+ set_option_flag(list_options, 0, "cputype", PARSE_OPT_HIDDEN);
argc = parse_options(argc, argv, list_options, list_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
setup_pager();
- if (!raw_dump && pager_in_use())
- printf("\nList of pre-defined events (to be used in -e):\n\n");
+ if (!default_ps.name_only)
+ setup_pager();
+
+ if (json) {
+ print_cb = (struct print_callbacks){
+ .print_start = json_print_start,
+ .print_end = json_print_end,
+ .print_event = json_print_event,
+ .print_metric = json_print_metric,
+ };
+ ps = &json_ps;
+ } else {
+ default_ps.desc = !default_ps.long_desc;
+ default_ps.last_topic = strdup("");
+ assert(default_ps.last_topic);
+ default_ps.visited_metrics = strlist__new(NULL, NULL);
+ assert(default_ps.visited_metrics);
+ if (unit_name)
+ default_ps.pmu_glob = strdup(unit_name);
+ else if (hybrid_name) {
+ default_ps.pmu_glob = perf_pmu__hybrid_type_to_pmu(hybrid_name);
+ if (!default_ps.pmu_glob)
+ pr_warning("WARNING: hybrid cputype is not supported!\n");
+ }
+ }
+ print_cb.print_start(ps);
if (argc == 0) {
- print_events(NULL, raw_dump, !desc_flag, long_desc_flag,
- details_flag, deprecated);
- return 0;
+ default_ps.metrics = true;
+ default_ps.metricgroups = true;
+ print_events(&print_cb, ps);
+ goto out;
}
for (i = 0; i < argc; ++i) {
char *sep, *s;
if (strcmp(argv[i], "tracepoint") == 0)
- print_tracepoint_events(NULL, NULL, raw_dump);
+ print_tracepoint_events(&print_cb, ps);
else if (strcmp(argv[i], "hw") == 0 ||
strcmp(argv[i], "hardware") == 0)
- print_symbol_events(NULL, PERF_TYPE_HARDWARE,
- event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
+ print_symbol_events(&print_cb, ps, PERF_TYPE_HARDWARE,
+ event_symbols_hw, PERF_COUNT_HW_MAX);
else if (strcmp(argv[i], "sw") == 0 ||
strcmp(argv[i], "software") == 0) {
- print_symbol_events(NULL, PERF_TYPE_SOFTWARE,
- event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
- print_tool_events(NULL, raw_dump);
+ print_symbol_events(&print_cb, ps, PERF_TYPE_SOFTWARE,
+ event_symbols_sw, PERF_COUNT_SW_MAX);
+ print_tool_events(&print_cb, ps);
} else if (strcmp(argv[i], "cache") == 0 ||
strcmp(argv[i], "hwcache") == 0)
- print_hwcache_events(NULL, raw_dump);
+ print_hwcache_events(&print_cb, ps);
else if (strcmp(argv[i], "pmu") == 0)
- print_pmu_events(NULL, raw_dump, !desc_flag,
- long_desc_flag, details_flag,
- deprecated);
+ print_pmu_events(&print_cb, ps);
else if (strcmp(argv[i], "sdt") == 0)
- print_sdt_events(NULL, NULL, raw_dump);
- else if (strcmp(argv[i], "metric") == 0 || strcmp(argv[i], "metrics") == 0)
- metricgroup__print(true, false, NULL, raw_dump, details_flag);
- else if (strcmp(argv[i], "metricgroup") == 0 || strcmp(argv[i], "metricgroups") == 0)
- metricgroup__print(false, true, NULL, raw_dump, details_flag);
- else if ((sep = strchr(argv[i], ':')) != NULL) {
- int sep_idx;
-
- if (sep == NULL) {
- print_events(argv[i], raw_dump, !desc_flag,
- long_desc_flag,
- details_flag,
- deprecated);
- continue;
+ print_sdt_events(&print_cb, ps);
+ else if (strcmp(argv[i], "metric") == 0 || strcmp(argv[i], "metrics") == 0) {
+ default_ps.metricgroups = false;
+ default_ps.metrics = true;
+ metricgroup__print(&print_cb, ps);
+ } else if (strcmp(argv[i], "metricgroup") == 0 ||
+ strcmp(argv[i], "metricgroups") == 0) {
+ default_ps.metricgroups = true;
+ default_ps.metrics = false;
+ metricgroup__print(&print_cb, ps);
+ } else if ((sep = strchr(argv[i], ':')) != NULL) {
+ char *old_pmu_glob = default_ps.pmu_glob;
+
+ default_ps.event_glob = strdup(argv[i]);
+ if (!default_ps.event_glob) {
+ ret = -1;
+ goto out;
}
- sep_idx = sep - argv[i];
- s = strdup(argv[i]);
- if (s == NULL)
- return -1;
-
- s[sep_idx] = '\0';
- print_tracepoint_events(s, s + sep_idx + 1, raw_dump);
- print_sdt_events(s, s + sep_idx + 1, raw_dump);
- metricgroup__print(true, true, s, raw_dump, details_flag);
- free(s);
+
+ print_tracepoint_events(&print_cb, ps);
+ print_sdt_events(&print_cb, ps);
+ default_ps.metrics = true;
+ default_ps.metricgroups = true;
+ metricgroup__print(&print_cb, ps);
+ zfree(&default_ps.event_glob);
+ default_ps.pmu_glob = old_pmu_glob;
} else {
if (asprintf(&s, "*%s*", argv[i]) < 0) {
printf("Critical: Not enough memory! Trying to continue...\n");
continue;
}
- print_symbol_events(s, PERF_TYPE_HARDWARE,
- event_symbols_hw, PERF_COUNT_HW_MAX, raw_dump);
- print_symbol_events(s, PERF_TYPE_SOFTWARE,
- event_symbols_sw, PERF_COUNT_SW_MAX, raw_dump);
- print_tool_events(s, raw_dump);
- print_hwcache_events(s, raw_dump);
- print_pmu_events(s, raw_dump, !desc_flag,
- long_desc_flag,
- details_flag,
- deprecated);
- print_tracepoint_events(NULL, s, raw_dump);
- print_sdt_events(NULL, s, raw_dump);
- metricgroup__print(true, true, s, raw_dump, details_flag);
+ default_ps.event_glob = s;
+ print_symbol_events(&print_cb, ps, PERF_TYPE_HARDWARE,
+ event_symbols_hw, PERF_COUNT_HW_MAX);
+ print_symbol_events(&print_cb, ps, PERF_TYPE_SOFTWARE,
+ event_symbols_sw, PERF_COUNT_SW_MAX);
+ print_tool_events(&print_cb, ps);
+ print_hwcache_events(&print_cb, ps);
+ print_pmu_events(&print_cb, ps);
+ print_tracepoint_events(&print_cb, ps);
+ print_sdt_events(&print_cb, ps);
+ default_ps.metrics = true;
+ default_ps.metricgroups = true;
+ metricgroup__print(&print_cb, ps);
free(s);
}
}
- return 0;
+
+out:
+ print_cb.print_end(ps);
+ free(default_ps.pmu_glob);
+ free(default_ps.last_topic);
+ free(default_ps.last_metricgroups);
+ strlist__delete(default_ps.visited_metrics);
+ return ret;
}
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 474dfd59d7eb..70b14ba5fdd5 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -9,116 +9,76 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
+#include "util/target.h"
+#include "util/callchain.h"
+#include "util/lock-contention.h"
+#include "util/bpf_skel/lock_data.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
#include "util/trace-event.h"
+#include "util/tracepoint.h"
#include "util/debug.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/data.h"
+#include "util/string2.h"
+#include "util/map.h"
+#include "util/util.h"
#include <sys/types.h>
#include <sys/prctl.h>
#include <semaphore.h>
-#include <pthread.h>
#include <math.h>
#include <limits.h>
+#include <ctype.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/kernel.h>
#include <linux/zalloc.h>
#include <linux/err.h>
+#include <linux/stringify.h>
static struct perf_session *session;
+static struct target target;
/* based on kernel/lockdep.c */
#define LOCKHASH_BITS 12
#define LOCKHASH_SIZE (1UL << LOCKHASH_BITS)
-static struct list_head lockhash_table[LOCKHASH_SIZE];
+static struct hlist_head lockhash_table[LOCKHASH_SIZE];
#define __lockhashfn(key) hash_long((unsigned long)key, LOCKHASH_BITS)
#define lockhashentry(key) (lockhash_table + __lockhashfn((key)))
-struct lock_stat {
- struct list_head hash_entry;
- struct rb_node rb; /* used for sorting */
-
- /*
- * FIXME: perf_evsel__intval() returns u64,
- * so address of lockdep_map should be dealed as 64bit.
- * Is there more better solution?
- */
- void *addr; /* address of lockdep_map, used as ID */
- char *name; /* for strcpy(), we cannot use const */
-
- unsigned int nr_acquire;
- unsigned int nr_acquired;
- unsigned int nr_contended;
- unsigned int nr_release;
-
- unsigned int nr_readlock;
- unsigned int nr_trylock;
-
- /* these times are in nano sec. */
- u64 avg_wait_time;
- u64 wait_time_total;
- u64 wait_time_min;
- u64 wait_time_max;
-
- int discard; /* flag of blacklist */
-};
+static struct rb_root thread_stats;
-/*
- * States of lock_seq_stat
- *
- * UNINITIALIZED is required for detecting first event of acquire.
- * As the nature of lock events, there is no guarantee
- * that the first event for the locks are acquire,
- * it can be acquired, contended or release.
- */
-#define SEQ_STATE_UNINITIALIZED 0 /* initial state */
-#define SEQ_STATE_RELEASED 1
-#define SEQ_STATE_ACQUIRING 2
-#define SEQ_STATE_ACQUIRED 3
-#define SEQ_STATE_READ_ACQUIRED 4
-#define SEQ_STATE_CONTENDED 5
-
-/*
- * MAX_LOCK_DEPTH
- * Imported from include/linux/sched.h.
- * Should this be synchronized?
- */
-#define MAX_LOCK_DEPTH 48
-
-/*
- * struct lock_seq_stat:
- * Place to put on state of one lock sequence
- * 1) acquire -> acquired -> release
- * 2) acquire -> contended -> acquired -> release
- * 3) acquire (with read or try) -> release
- * 4) Are there other patterns?
- */
-struct lock_seq_stat {
- struct list_head list;
- int state;
- u64 prev_event_time;
- void *addr;
-
- int read_count;
+static bool combine_locks;
+static bool show_thread_stats;
+static bool show_lock_addrs;
+static bool show_lock_owner;
+static bool use_bpf;
+static unsigned long bpf_map_entries = MAX_ENTRIES;
+static int max_stack_depth = CONTENTION_STACK_DEPTH;
+static int stack_skip = CONTENTION_STACK_SKIP;
+static int print_nr_entries = INT_MAX / 2;
+static LIST_HEAD(callstack_filters);
+
+struct callstack_filter {
+ struct list_head list;
+ char name[];
};
-struct thread_stat {
- struct rb_node rb;
+static struct lock_filter filters;
- u32 tid;
- struct list_head seq_list;
-};
+static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
-static struct rb_root thread_stats;
+static bool needs_callstack(void)
+{
+ return !list_empty(&callstack_filters);
+}
static struct thread_stat *thread_stat_find(u32 tid)
{
@@ -239,46 +199,234 @@ struct lock_key {
* e.g. nr_acquired -> acquired, wait_time_total -> wait_total
*/
const char *name;
+ /* header: the string printed on the header line */
+ const char *header;
+ /* len: the printing width of the field */
+ int len;
+ /* key: a pointer to function to compare two lock stats for sorting */
int (*key)(struct lock_stat*, struct lock_stat*);
+ /* print: a pointer to function to print a given lock stats */
+ void (*print)(struct lock_key*, struct lock_stat*);
+ /* list: list entry to link this */
+ struct list_head list;
};
+static void lock_stat_key_print_time(unsigned long long nsec, int len)
+{
+ static const struct {
+ float base;
+ const char *unit;
+ } table[] = {
+ { 1e9 * 3600, "h " },
+ { 1e9 * 60, "m " },
+ { 1e9, "s " },
+ { 1e6, "ms" },
+ { 1e3, "us" },
+ { 0, NULL },
+ };
+
+ for (int i = 0; table[i].unit; i++) {
+ if (nsec < table[i].base)
+ continue;
+
+ pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
+ return;
+ }
+
+ pr_info("%*llu %s", len - 3, nsec, "ns");
+}
+
+#define PRINT_KEY(member) \
+static void lock_stat_key_print_ ## member(struct lock_key *key, \
+ struct lock_stat *ls) \
+{ \
+ pr_info("%*llu", key->len, (unsigned long long)ls->member); \
+}
+
+#define PRINT_TIME(member) \
+static void lock_stat_key_print_ ## member(struct lock_key *key, \
+ struct lock_stat *ls) \
+{ \
+ lock_stat_key_print_time((unsigned long long)ls->member, key->len); \
+}
+
+PRINT_KEY(nr_acquired)
+PRINT_KEY(nr_contended)
+PRINT_TIME(avg_wait_time)
+PRINT_TIME(wait_time_total)
+PRINT_TIME(wait_time_max)
+
+static void lock_stat_key_print_wait_time_min(struct lock_key *key,
+ struct lock_stat *ls)
+{
+ u64 wait_time = ls->wait_time_min;
+
+ if (wait_time == ULLONG_MAX)
+ wait_time = 0;
+
+ lock_stat_key_print_time(wait_time, key->len);
+}
+
+
static const char *sort_key = "acquired";
static int (*compare)(struct lock_stat *, struct lock_stat *);
+static struct rb_root sorted; /* place to store intermediate data */
static struct rb_root result; /* place to store sorted data */
-#define DEF_KEY_LOCK(name, fn_suffix) \
- { #name, lock_stat_key_ ## fn_suffix }
-struct lock_key keys[] = {
- DEF_KEY_LOCK(acquired, nr_acquired),
- DEF_KEY_LOCK(contended, nr_contended),
- DEF_KEY_LOCK(avg_wait, avg_wait_time),
- DEF_KEY_LOCK(wait_total, wait_time_total),
- DEF_KEY_LOCK(wait_min, wait_time_min),
- DEF_KEY_LOCK(wait_max, wait_time_max),
+static LIST_HEAD(lock_keys);
+static const char *output_fields;
+
+#define DEF_KEY_LOCK(name, header, fn_suffix, len) \
+ { #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} }
+static struct lock_key report_keys[] = {
+ DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10),
+ DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
+ DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
+ DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
+ DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
+ DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
/* extra comparisons much complicated should be here */
+ { }
+};
+
+static struct lock_key contention_keys[] = {
+ DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
+ DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
+ DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
+ DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
+ DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
- { NULL, NULL }
+ /* extra comparisons much complicated should be here */
+ { }
};
-static int select_key(void)
+static int select_key(bool contention)
{
int i;
+ struct lock_key *keys = report_keys;
+
+ if (contention)
+ keys = contention_keys;
for (i = 0; keys[i].name; i++) {
if (!strcmp(keys[i].name, sort_key)) {
compare = keys[i].key;
+
+ /* selected key should be in the output fields */
+ if (list_empty(&keys[i].list))
+ list_add_tail(&keys[i].list, &lock_keys);
+
return 0;
}
}
pr_err("Unknown compare key: %s\n", sort_key);
+ return -1;
+}
+
+static int add_output_field(bool contention, char *name)
+{
+ int i;
+ struct lock_key *keys = report_keys;
+
+ if (contention)
+ keys = contention_keys;
+ for (i = 0; keys[i].name; i++) {
+ if (strcmp(keys[i].name, name))
+ continue;
+
+ /* prevent double link */
+ if (list_empty(&keys[i].list))
+ list_add_tail(&keys[i].list, &lock_keys);
+
+ return 0;
+ }
+
+ pr_err("Unknown output field: %s\n", name);
return -1;
}
+static int setup_output_field(bool contention, const char *str)
+{
+ char *tok, *tmp, *orig;
+ int i, ret = 0;
+ struct lock_key *keys = report_keys;
+
+ if (contention)
+ keys = contention_keys;
+
+ /* no output field given: use all of them */
+ if (str == NULL) {
+ for (i = 0; keys[i].name; i++)
+ list_add_tail(&keys[i].list, &lock_keys);
+ return 0;
+ }
+
+ for (i = 0; keys[i].name; i++)
+ INIT_LIST_HEAD(&keys[i].list);
+
+ orig = tmp = strdup(str);
+ if (orig == NULL)
+ return -ENOMEM;
+
+ while ((tok = strsep(&tmp, ",")) != NULL){
+ ret = add_output_field(contention, tok);
+ if (ret < 0)
+ break;
+ }
+ free(orig);
+
+ return ret;
+}
+
+static void combine_lock_stats(struct lock_stat *st)
+{
+ struct rb_node **rb = &sorted.rb_node;
+ struct rb_node *parent = NULL;
+ struct lock_stat *p;
+ int ret;
+
+ while (*rb) {
+ p = container_of(*rb, struct lock_stat, rb);
+ parent = *rb;
+
+ if (st->name && p->name)
+ ret = strcmp(st->name, p->name);
+ else
+ ret = !!st->name - !!p->name;
+
+ if (ret == 0) {
+ p->nr_acquired += st->nr_acquired;
+ p->nr_contended += st->nr_contended;
+ p->wait_time_total += st->wait_time_total;
+
+ if (p->nr_contended)
+ p->avg_wait_time = p->wait_time_total / p->nr_contended;
+
+ if (p->wait_time_min > st->wait_time_min)
+ p->wait_time_min = st->wait_time_min;
+ if (p->wait_time_max < st->wait_time_max)
+ p->wait_time_max = st->wait_time_max;
+
+ p->broken |= st->broken;
+ st->combined = 1;
+ return;
+ }
+
+ if (ret < 0)
+ rb = &(*rb)->rb_left;
+ else
+ rb = &(*rb)->rb_right;
+ }
+
+ rb_link_node(&st->rb, parent, rb);
+ rb_insert_color(&st->rb, &sorted);
+}
+
static void insert_to_result(struct lock_stat *st,
int (*bigger)(struct lock_stat *, struct lock_stat *))
{
@@ -286,6 +434,9 @@ static void insert_to_result(struct lock_stat *st,
struct rb_node *parent = NULL;
struct lock_stat *p;
+ if (combine_locks && st->combined)
+ return;
+
while (*rb) {
p = container_of(*rb, struct lock_stat, rb);
parent = *rb;
@@ -315,12 +466,24 @@ static struct lock_stat *pop_from_result(void)
return container_of(node, struct lock_stat, rb);
}
-static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
+struct lock_stat *lock_stat_find(u64 addr)
{
- struct list_head *entry = lockhashentry(addr);
+ struct hlist_head *entry = lockhashentry(addr);
+ struct lock_stat *ret;
+
+ hlist_for_each_entry(ret, entry, hash_entry) {
+ if (ret->addr == addr)
+ return ret;
+ }
+ return NULL;
+}
+
+struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
+{
+ struct hlist_head *entry = lockhashentry(addr);
struct lock_stat *ret, *new;
- list_for_each_entry(ret, entry, hash_entry) {
+ hlist_for_each_entry(ret, entry, hash_entry) {
if (ret->addr == addr)
return ret;
}
@@ -330,16 +493,16 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
goto alloc_failed;
new->addr = addr;
- new->name = zalloc(sizeof(char) * strlen(name) + 1);
+ new->name = strdup(name);
if (!new->name) {
free(new);
goto alloc_failed;
}
- strcpy(new->name, name);
+ new->flags = flags;
new->wait_time_min = ULLONG_MAX;
- list_add(&new->hash_entry, entry);
+ hlist_add_head(&new->hash_entry, entry);
return new;
alloc_failed:
@@ -347,21 +510,61 @@ alloc_failed:
return NULL;
}
+bool match_callstack_filter(struct machine *machine, u64 *callstack)
+{
+ struct map *kmap;
+ struct symbol *sym;
+ u64 ip;
+
+ if (list_empty(&callstack_filters))
+ return true;
+
+ for (int i = 0; i < max_stack_depth; i++) {
+ struct callstack_filter *filter;
+
+ if (!callstack || !callstack[i])
+ break;
+
+ ip = callstack[i];
+ sym = machine__find_kernel_symbol(machine, ip, &kmap);
+ if (sym == NULL)
+ continue;
+
+ list_for_each_entry(filter, &callstack_filters, list) {
+ if (strstr(sym->name, filter->name))
+ return true;
+ }
+ }
+ return false;
+}
+
struct trace_lock_handler {
+ /* it's used on CONFIG_LOCKDEP */
int (*acquire_event)(struct evsel *evsel,
struct perf_sample *sample);
+ /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
int (*acquired_event)(struct evsel *evsel,
struct perf_sample *sample);
+ /* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
int (*contended_event)(struct evsel *evsel,
struct perf_sample *sample);
+ /* it's used on CONFIG_LOCKDEP */
int (*release_event)(struct evsel *evsel,
struct perf_sample *sample);
+
+ /* it's used when CONFIG_LOCKDEP is off */
+ int (*contention_begin_event)(struct evsel *evsel,
+ struct perf_sample *sample);
+
+ /* it's used when CONFIG_LOCKDEP is off */
+ int (*contention_end_event)(struct evsel *evsel,
+ struct perf_sample *sample);
};
-static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
+static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr)
{
struct lock_seq_stat *seq;
@@ -397,24 +600,54 @@ enum acquire_flags {
READ_LOCK = 2,
};
+static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
+{
+ switch (aggr_mode) {
+ case LOCK_AGGR_ADDR:
+ *key = addr;
+ break;
+ case LOCK_AGGR_TASK:
+ *key = tid;
+ break;
+ case LOCK_AGGR_CALLER:
+ default:
+ pr_err("Invalid aggregation mode: %d\n", aggr_mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample);
+
+static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel,
+ struct perf_sample *sample)
+{
+ if (aggr_mode == LOCK_AGGR_CALLER) {
+ *key = callchain_id(evsel, sample);
+ return 0;
+ }
+ return get_key_by_aggr_mode_simple(key, addr, sample->tid);
+}
+
static int report_lock_acquire_event(struct evsel *evsel,
struct perf_sample *sample)
{
- void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
- const char *name = perf_evsel__strval(evsel, sample, "name");
- u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
- int flag = perf_evsel__intval(evsel, sample, "flag");
+ const char *name = evsel__strval(evsel, sample, "name");
+ u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ int flag = evsel__intval(evsel, sample, "flags");
+ u64 key;
+ int ret;
- memcpy(&addr, &tmp, sizeof(void *));
+ ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
+ if (ret < 0)
+ return ret;
- ls = lock_stat_findnew(addr, name);
+ ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
- if (ls->discard)
- return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
@@ -452,9 +685,11 @@ static int report_lock_acquire_event(struct evsel *evsel,
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_CONTENDED:
broken:
- /* broken lock sequence, discard it */
- ls->discard = 1;
- bad_hist[BROKEN_ACQUIRE]++;
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_ACQUIRE]++;
+ }
list_del_init(&seq->list);
free(seq);
goto end;
@@ -472,21 +707,22 @@ end:
static int report_lock_acquired_event(struct evsel *evsel,
struct perf_sample *sample)
{
- void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
u64 contended_term;
- const char *name = perf_evsel__strval(evsel, sample, "name");
- u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
+ const char *name = evsel__strval(evsel, sample, "name");
+ u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ u64 key;
+ int ret;
- memcpy(&addr, &tmp, sizeof(void *));
+ ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
+ if (ret < 0)
+ return ret;
- ls = lock_stat_findnew(addr, name);
+ ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
- if (ls->discard)
- return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
@@ -513,9 +749,11 @@ static int report_lock_acquired_event(struct evsel *evsel,
case SEQ_STATE_RELEASED:
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_READ_ACQUIRED:
- /* broken lock sequence, discard it */
- ls->discard = 1;
- bad_hist[BROKEN_ACQUIRED]++;
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_ACQUIRED]++;
+ }
list_del_init(&seq->list);
free(seq);
goto end;
@@ -535,20 +773,21 @@ end:
static int report_lock_contended_event(struct evsel *evsel,
struct perf_sample *sample)
{
- void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
- const char *name = perf_evsel__strval(evsel, sample, "name");
- u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
+ const char *name = evsel__strval(evsel, sample, "name");
+ u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ u64 key;
+ int ret;
- memcpy(&addr, &tmp, sizeof(void *));
+ ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
+ if (ret < 0)
+ return ret;
- ls = lock_stat_findnew(addr, name);
+ ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
- if (ls->discard)
- return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
@@ -568,9 +807,11 @@ static int report_lock_contended_event(struct evsel *evsel,
case SEQ_STATE_ACQUIRED:
case SEQ_STATE_READ_ACQUIRED:
case SEQ_STATE_CONTENDED:
- /* broken lock sequence, discard it */
- ls->discard = 1;
- bad_hist[BROKEN_CONTENDED]++;
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_CONTENDED]++;
+ }
list_del_init(&seq->list);
free(seq);
goto end;
@@ -590,20 +831,21 @@ end:
static int report_lock_release_event(struct evsel *evsel,
struct perf_sample *sample)
{
- void *addr;
struct lock_stat *ls;
struct thread_stat *ts;
struct lock_seq_stat *seq;
- const char *name = perf_evsel__strval(evsel, sample, "name");
- u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr");
+ const char *name = evsel__strval(evsel, sample, "name");
+ u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
+ u64 key;
+ int ret;
- memcpy(&addr, &tmp, sizeof(void *));
+ ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
+ if (ret < 0)
+ return ret;
- ls = lock_stat_findnew(addr, name);
+ ls = lock_stat_findnew(key, name, 0);
if (!ls)
return -ENOMEM;
- if (ls->discard)
- return 0;
ts = thread_stat_findnew(sample->tid);
if (!ts)
@@ -621,7 +863,7 @@ static int report_lock_release_event(struct evsel *evsel,
case SEQ_STATE_READ_ACQUIRED:
seq->read_count--;
BUG_ON(seq->read_count < 0);
- if (!seq->read_count) {
+ if (seq->read_count) {
ls->nr_release++;
goto end;
}
@@ -629,9 +871,11 @@ static int report_lock_release_event(struct evsel *evsel,
case SEQ_STATE_ACQUIRING:
case SEQ_STATE_CONTENDED:
case SEQ_STATE_RELEASED:
- /* broken lock sequence, discard it */
- ls->discard = 1;
- bad_hist[BROKEN_RELEASE]++;
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_RELEASE]++;
+ }
goto free_seq;
default:
BUG_ON("Unknown state of lock sequence found!\n");
@@ -646,6 +890,367 @@ end:
return 0;
}
+static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
+ char *buf, int size)
+{
+ u64 offset;
+
+ if (map == NULL || sym == NULL) {
+ buf[0] = '\0';
+ return 0;
+ }
+
+ offset = map__map_ip(map, ip) - sym->start;
+
+ if (offset)
+ return scnprintf(buf, size, "%s+%#lx", sym->name, offset);
+ else
+ return strlcpy(buf, sym->name, size);
+}
+static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample,
+ char *buf, int size)
+{
+ struct thread *thread;
+ struct callchain_cursor *cursor = &callchain_cursor;
+ struct machine *machine = &session->machines.host;
+ struct symbol *sym;
+ int skip = 0;
+ int ret;
+
+ /* lock names will be replaced to task name later */
+ if (show_thread_stats)
+ return -1;
+
+ thread = machine__findnew_thread(machine, -1, sample->pid);
+ if (thread == NULL)
+ return -1;
+
+ /* use caller function name from the callchain */
+ ret = thread__resolve_callchain(thread, cursor, evsel, sample,
+ NULL, NULL, max_stack_depth);
+ if (ret != 0) {
+ thread__put(thread);
+ return -1;
+ }
+
+ callchain_cursor_commit(cursor);
+ thread__put(thread);
+
+ while (true) {
+ struct callchain_cursor_node *node;
+
+ node = callchain_cursor_current(cursor);
+ if (node == NULL)
+ break;
+
+ /* skip first few entries - for lock functions */
+ if (++skip <= stack_skip)
+ goto next;
+
+ sym = node->ms.sym;
+ if (sym && !machine__is_lock_function(machine, node->ip)) {
+ get_symbol_name_offset(node->ms.map, sym, node->ip,
+ buf, size);
+ return 0;
+ }
+
+next:
+ callchain_cursor_advance(cursor);
+ }
+ return -1;
+}
+
+static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
+{
+ struct callchain_cursor *cursor = &callchain_cursor;
+ struct machine *machine = &session->machines.host;
+ struct thread *thread;
+ u64 hash = 0;
+ int skip = 0;
+ int ret;
+
+ thread = machine__findnew_thread(machine, -1, sample->pid);
+ if (thread == NULL)
+ return -1;
+
+ /* use caller function name from the callchain */
+ ret = thread__resolve_callchain(thread, cursor, evsel, sample,
+ NULL, NULL, max_stack_depth);
+ thread__put(thread);
+
+ if (ret != 0)
+ return -1;
+
+ callchain_cursor_commit(cursor);
+
+ while (true) {
+ struct callchain_cursor_node *node;
+
+ node = callchain_cursor_current(cursor);
+ if (node == NULL)
+ break;
+
+ /* skip first few entries - for lock functions */
+ if (++skip <= stack_skip)
+ goto next;
+
+ if (node->ms.sym && machine__is_lock_function(machine, node->ip))
+ goto next;
+
+ hash ^= hash_long((unsigned long)node->ip, 64);
+
+next:
+ callchain_cursor_advance(cursor);
+ }
+ return hash;
+}
+
+static u64 *get_callstack(struct perf_sample *sample, int max_stack)
+{
+ u64 *callstack;
+ u64 i;
+ int c;
+
+ callstack = calloc(max_stack, sizeof(*callstack));
+ if (callstack == NULL)
+ return NULL;
+
+ for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) {
+ u64 ip = sample->callchain->ips[i];
+
+ if (ip >= PERF_CONTEXT_MAX)
+ continue;
+
+ callstack[c++] = ip;
+ }
+ return callstack;
+}
+
+static int report_lock_contention_begin_event(struct evsel *evsel,
+ struct perf_sample *sample)
+{
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+ u64 addr = evsel__intval(evsel, sample, "lock_addr");
+ unsigned int flags = evsel__intval(evsel, sample, "flags");
+ u64 key;
+ int i, ret;
+ static bool kmap_loaded;
+ struct machine *machine = &session->machines.host;
+ struct map *kmap;
+ struct symbol *sym;
+
+ ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
+ if (ret < 0)
+ return ret;
+
+ if (!kmap_loaded) {
+ unsigned long *addrs;
+
+ /* make sure it loads the kernel map to find lock symbols */
+ map__load(machine__kernel_map(machine));
+ kmap_loaded = true;
+
+ /* convert (kernel) symbols to addresses */
+ for (i = 0; i < filters.nr_syms; i++) {
+ sym = machine__find_kernel_symbol_by_name(machine,
+ filters.syms[i],
+ &kmap);
+ if (sym == NULL) {
+ pr_warning("ignore unknown symbol: %s\n",
+ filters.syms[i]);
+ continue;
+ }
+
+ addrs = realloc(filters.addrs,
+ (filters.nr_addrs + 1) * sizeof(*addrs));
+ if (addrs == NULL) {
+ pr_warning("memory allocation failure\n");
+ return -ENOMEM;
+ }
+
+ addrs[filters.nr_addrs++] = map__unmap_ip(kmap, sym->start);
+ filters.addrs = addrs;
+ }
+ }
+
+ ls = lock_stat_find(key);
+ if (!ls) {
+ char buf[128];
+ const char *name = "";
+
+ switch (aggr_mode) {
+ case LOCK_AGGR_ADDR:
+ sym = machine__find_kernel_symbol(machine, key, &kmap);
+ if (sym)
+ name = sym->name;
+ break;
+ case LOCK_AGGR_CALLER:
+ name = buf;
+ if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
+ name = "Unknown";
+ break;
+ case LOCK_AGGR_TASK:
+ default:
+ break;
+ }
+
+ ls = lock_stat_findnew(key, name, flags);
+ if (!ls)
+ return -ENOMEM;
+ }
+
+ if (filters.nr_types) {
+ bool found = false;
+
+ for (i = 0; i < filters.nr_types; i++) {
+ if (flags == filters.types[i]) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+ }
+
+ if (filters.nr_addrs) {
+ bool found = false;
+
+ for (i = 0; i < filters.nr_addrs; i++) {
+ if (addr == filters.addrs[i]) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+ }
+
+ if (needs_callstack()) {
+ u64 *callstack = get_callstack(sample, max_stack_depth);
+ if (callstack == NULL)
+ return -ENOMEM;
+
+ if (!match_callstack_filter(machine, callstack)) {
+ free(callstack);
+ return 0;
+ }
+
+ if (ls->callstack == NULL)
+ ls->callstack = callstack;
+ else
+ free(callstack);
+ }
+
+ ts = thread_stat_findnew(sample->tid);
+ if (!ts)
+ return -ENOMEM;
+
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -ENOMEM;
+
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ case SEQ_STATE_ACQUIRED:
+ break;
+ case SEQ_STATE_CONTENDED:
+ /*
+ * It can have nested contention begin with mutex spinning,
+ * then we would use the original contention begin event and
+ * ignore the second one.
+ */
+ goto end;
+ case SEQ_STATE_ACQUIRING:
+ case SEQ_STATE_READ_ACQUIRED:
+ case SEQ_STATE_RELEASED:
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_CONTENDED]++;
+ }
+ list_del_init(&seq->list);
+ free(seq);
+ goto end;
+ default:
+ BUG_ON("Unknown state of lock sequence found!\n");
+ break;
+ }
+
+ if (seq->state != SEQ_STATE_CONTENDED) {
+ seq->state = SEQ_STATE_CONTENDED;
+ seq->prev_event_time = sample->time;
+ ls->nr_contended++;
+ }
+end:
+ return 0;
+}
+
+static int report_lock_contention_end_event(struct evsel *evsel,
+ struct perf_sample *sample)
+{
+ struct lock_stat *ls;
+ struct thread_stat *ts;
+ struct lock_seq_stat *seq;
+ u64 contended_term;
+ u64 addr = evsel__intval(evsel, sample, "lock_addr");
+ u64 key;
+ int ret;
+
+ ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
+ if (ret < 0)
+ return ret;
+
+ ls = lock_stat_find(key);
+ if (!ls)
+ return 0;
+
+ ts = thread_stat_find(sample->tid);
+ if (!ts)
+ return 0;
+
+ seq = get_seq(ts, addr);
+ if (!seq)
+ return -ENOMEM;
+
+ switch (seq->state) {
+ case SEQ_STATE_UNINITIALIZED:
+ goto end;
+ case SEQ_STATE_CONTENDED:
+ contended_term = sample->time - seq->prev_event_time;
+ ls->wait_time_total += contended_term;
+ if (contended_term < ls->wait_time_min)
+ ls->wait_time_min = contended_term;
+ if (ls->wait_time_max < contended_term)
+ ls->wait_time_max = contended_term;
+ break;
+ case SEQ_STATE_ACQUIRING:
+ case SEQ_STATE_ACQUIRED:
+ case SEQ_STATE_READ_ACQUIRED:
+ case SEQ_STATE_RELEASED:
+ /* broken lock sequence */
+ if (!ls->broken) {
+ ls->broken = 1;
+ bad_hist[BROKEN_ACQUIRED]++;
+ }
+ list_del_init(&seq->list);
+ free(seq);
+ goto end;
+ default:
+ BUG_ON("Unknown state of lock sequence found!\n");
+ break;
+ }
+
+ seq->state = SEQ_STATE_ACQUIRED;
+ ls->nr_acquired++;
+ ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
+end:
+ return 0;
+}
+
/* lock oriented handlers */
/* TODO: handlers for CPU oriented, thread oriented */
static struct trace_lock_handler report_lock_ops = {
@@ -653,50 +1258,75 @@ static struct trace_lock_handler report_lock_ops = {
.acquired_event = report_lock_acquired_event,
.contended_event = report_lock_contended_event,
.release_event = report_lock_release_event,
+ .contention_begin_event = report_lock_contention_begin_event,
+ .contention_end_event = report_lock_contention_end_event,
};
+static struct trace_lock_handler contention_lock_ops = {
+ .contention_begin_event = report_lock_contention_begin_event,
+ .contention_end_event = report_lock_contention_end_event,
+};
+
+
static struct trace_lock_handler *trace_handler;
-static int perf_evsel__process_lock_acquire(struct evsel *evsel,
- struct perf_sample *sample)
+static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->acquire_event)
return trace_handler->acquire_event(evsel, sample);
return 0;
}
-static int perf_evsel__process_lock_acquired(struct evsel *evsel,
- struct perf_sample *sample)
+static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->acquired_event)
return trace_handler->acquired_event(evsel, sample);
return 0;
}
-static int perf_evsel__process_lock_contended(struct evsel *evsel,
- struct perf_sample *sample)
+static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->contended_event)
return trace_handler->contended_event(evsel, sample);
return 0;
}
-static int perf_evsel__process_lock_release(struct evsel *evsel,
- struct perf_sample *sample)
+static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample)
{
if (trace_handler->release_event)
return trace_handler->release_event(evsel, sample);
return 0;
}
+static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample)
+{
+ if (trace_handler->contention_begin_event)
+ return trace_handler->contention_begin_event(evsel, sample);
+ return 0;
+}
+
+static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample)
+{
+ if (trace_handler->contention_end_event)
+ return trace_handler->contention_end_event(evsel, sample);
+ return 0;
+}
+
static void print_bad_events(int bad, int total)
{
/* Output for debug, this have to be removed */
int i;
+ int broken = 0;
const char *name[4] =
{ "acquire", "acquired", "contended", "release" };
- pr_info("\n=== output for debug===\n\n");
+ for (i = 0; i < BROKEN_MAX; i++)
+ broken += bad_hist[i];
+
+ if (quiet || total == 0 || (broken == 0 && verbose <= 0))
+ return;
+
+ pr_info("\n=== output for debug ===\n\n");
pr_info("bad: %d, total: %d\n", bad, total);
pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
pr_info("histogram of events caused bad sequence\n");
@@ -708,32 +1338,40 @@ static void print_bad_events(int bad, int total)
static void print_result(void)
{
struct lock_stat *st;
+ struct lock_key *key;
char cut_name[20];
- int bad, total;
-
- pr_info("%20s ", "Name");
- pr_info("%10s ", "acquired");
- pr_info("%10s ", "contended");
-
- pr_info("%15s ", "avg wait (ns)");
- pr_info("%15s ", "total wait (ns)");
- pr_info("%15s ", "max wait (ns)");
- pr_info("%15s ", "min wait (ns)");
+ int bad, total, printed;
- pr_info("\n\n");
+ if (!quiet) {
+ pr_info("%20s ", "Name");
+ list_for_each_entry(key, &lock_keys, list)
+ pr_info("%*s ", key->len, key->header);
+ pr_info("\n\n");
+ }
- bad = total = 0;
+ bad = total = printed = 0;
while ((st = pop_from_result())) {
total++;
- if (st->discard) {
+ if (st->broken)
bad++;
+ if (!st->nr_acquired)
continue;
- }
+
bzero(cut_name, 20);
- if (strlen(st->name) < 16) {
+ if (strlen(st->name) < 20) {
/* output raw name */
- pr_info("%20s ", st->name);
+ const char *name = st->name;
+
+ if (show_thread_stats) {
+ struct thread *t;
+
+ /* st->addr contains tid of thread */
+ t = perf_session__findnew(session, st->addr);
+ name = thread__comm_str(t);
+ }
+
+ pr_info("%20s ", name);
} else {
strncpy(cut_name, st->name, 16);
cut_name[16] = '.';
@@ -744,15 +1382,14 @@ static void print_result(void)
pr_info("%20s ", cut_name);
}
- pr_info("%10u ", st->nr_acquired);
- pr_info("%10u ", st->nr_contended);
-
- pr_info("%15" PRIu64 " ", st->avg_wait_time);
- pr_info("%15" PRIu64 " ", st->wait_time_total);
- pr_info("%15" PRIu64 " ", st->wait_time_max);
- pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
- 0 : st->wait_time_min);
+ list_for_each_entry(key, &lock_keys, list) {
+ key->print(key, st);
+ pr_info(" ");
+ }
pr_info("\n");
+
+ if (++printed >= print_nr_entries)
+ break;
}
print_bad_events(bad, total);
@@ -775,7 +1412,22 @@ static void dump_threads(void)
pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
node = rb_next(node);
thread__put(t);
- };
+ }
+}
+
+static int compare_maps(struct lock_stat *a, struct lock_stat *b)
+{
+ int ret;
+
+ if (a->name && b->name)
+ ret = strcmp(a->name, b->name);
+ else
+ ret = !!a->name - !!b->name;
+
+ if (!ret)
+ return a->addr < b->addr;
+ else
+ return ret < 0;
}
static void dump_map(void)
@@ -785,10 +1437,13 @@ static void dump_map(void)
pr_info("Address of instance: name of class\n");
for (i = 0; i < LOCKHASH_SIZE; i++) {
- list_for_each_entry(st, &lockhash_table[i], hash_entry) {
- pr_info(" %p: %s\n", st->addr, st->name);
+ hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ insert_to_result(st, compare_maps);
}
}
+
+ while ((st = pop_from_result()))
+ pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name);
}
static int dump_info(void)
@@ -807,6 +1462,34 @@ static int dump_info(void)
return rc;
}
+static const struct evsel_str_handler lock_tracepoints[] = {
+ { "lock:lock_acquire", evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
+ { "lock:lock_acquired", evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
+ { "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
+ { "lock:lock_release", evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
+};
+
+static const struct evsel_str_handler contention_tracepoints[] = {
+ { "lock:contention_begin", evsel__process_contention_begin, },
+ { "lock:contention_end", evsel__process_contention_end, },
+};
+
+static int process_event_update(struct perf_tool *tool,
+ union perf_event *event,
+ struct evlist **pevlist)
+{
+ int ret;
+
+ ret = perf_event__process_event_update(tool, event, pevlist);
+ if (ret < 0)
+ return ret;
+
+ /* this can return -EEXIST since we call it for each evsel */
+ perf_session__set_tracepoints_handlers(session, lock_tracepoints);
+ perf_session__set_tracepoints_handlers(session, contention_tracepoints);
+ return 0;
+}
+
typedef int (*tracepoint_handler)(struct evsel *evsel,
struct perf_sample *sample);
@@ -836,34 +1519,243 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return err;
}
+static void combine_result(void)
+{
+ unsigned int i;
+ struct lock_stat *st;
+
+ if (!combine_locks)
+ return;
+
+ for (i = 0; i < LOCKHASH_SIZE; i++) {
+ hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ combine_lock_stats(st);
+ }
+ }
+}
+
static void sort_result(void)
{
unsigned int i;
struct lock_stat *st;
for (i = 0; i < LOCKHASH_SIZE; i++) {
- list_for_each_entry(st, &lockhash_table[i], hash_entry) {
+ hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
insert_to_result(st, compare);
}
}
}
-static const struct evsel_str_handler lock_tracepoints[] = {
- { "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */
- { "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
- { "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
- { "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */
+static const struct {
+ unsigned int flags;
+ const char *str;
+ const char *name;
+} lock_type_table[] = {
+ { 0, "semaphore", "semaphore" },
+ { LCB_F_SPIN, "spinlock", "spinlock" },
+ { LCB_F_SPIN | LCB_F_READ, "rwlock:R", "rwlock" },
+ { LCB_F_SPIN | LCB_F_WRITE, "rwlock:W", "rwlock" },
+ { LCB_F_READ, "rwsem:R", "rwsem" },
+ { LCB_F_WRITE, "rwsem:W", "rwsem" },
+ { LCB_F_RT, "rt-mutex", "rt-mutex" },
+ { LCB_F_RT | LCB_F_READ, "rwlock-rt:R", "rwlock-rt" },
+ { LCB_F_RT | LCB_F_WRITE, "rwlock-rt:W", "rwlock-rt" },
+ { LCB_F_PERCPU | LCB_F_READ, "pcpu-sem:R", "percpu-rwsem" },
+ { LCB_F_PERCPU | LCB_F_WRITE, "pcpu-sem:W", "percpu-rwsem" },
+ { LCB_F_MUTEX, "mutex", "mutex" },
+ { LCB_F_MUTEX | LCB_F_SPIN, "mutex", "mutex" },
+ /* alias for get_type_flag() */
+ { LCB_F_MUTEX | LCB_F_SPIN, "mutex-spin", "mutex" },
};
+static const char *get_type_str(unsigned int flags)
+{
+ flags &= LCB_F_MAX_FLAGS - 1;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+ if (lock_type_table[i].flags == flags)
+ return lock_type_table[i].str;
+ }
+ return "unknown";
+}
+
+static const char *get_type_name(unsigned int flags)
+{
+ flags &= LCB_F_MAX_FLAGS - 1;
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+ if (lock_type_table[i].flags == flags)
+ return lock_type_table[i].name;
+ }
+ return "unknown";
+}
+
+static unsigned int get_type_flag(const char *str)
+{
+ for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+ if (!strcmp(lock_type_table[i].name, str))
+ return lock_type_table[i].flags;
+ }
+ for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
+ if (!strcmp(lock_type_table[i].str, str))
+ return lock_type_table[i].flags;
+ }
+ return UINT_MAX;
+}
+
+static void lock_filter_finish(void)
+{
+ zfree(&filters.types);
+ filters.nr_types = 0;
+
+ zfree(&filters.addrs);
+ filters.nr_addrs = 0;
+
+ for (int i = 0; i < filters.nr_syms; i++)
+ free(filters.syms[i]);
+
+ zfree(&filters.syms);
+ filters.nr_syms = 0;
+}
+
+static void sort_contention_result(void)
+{
+ sort_result();
+}
+
+static void print_bpf_events(int total, struct lock_contention_fails *fails)
+{
+ /* Output for debug, this have to be removed */
+ int broken = fails->task + fails->stack + fails->time + fails->data;
+
+ if (quiet || total == 0 || (broken == 0 && verbose <= 0))
+ return;
+
+ total += broken;
+ pr_info("\n=== output for debug ===\n\n");
+ pr_info("bad: %d, total: %d\n", broken, total);
+ pr_info("bad rate: %.2f %%\n", (double)broken / (double)total * 100);
+
+ pr_info("histogram of failure reasons\n");
+ pr_info(" %10s: %d\n", "task", fails->task);
+ pr_info(" %10s: %d\n", "stack", fails->stack);
+ pr_info(" %10s: %d\n", "time", fails->time);
+ pr_info(" %10s: %d\n", "data", fails->data);
+}
+
+static void print_contention_result(struct lock_contention *con)
+{
+ struct lock_stat *st;
+ struct lock_key *key;
+ int bad, total, printed;
+
+ if (!quiet) {
+ list_for_each_entry(key, &lock_keys, list)
+ pr_info("%*s ", key->len, key->header);
+
+ switch (aggr_mode) {
+ case LOCK_AGGR_TASK:
+ pr_info(" %10s %s\n\n", "pid",
+ show_lock_owner ? "owner" : "comm");
+ break;
+ case LOCK_AGGR_CALLER:
+ pr_info(" %10s %s\n\n", "type", "caller");
+ break;
+ case LOCK_AGGR_ADDR:
+ pr_info(" %16s %s\n\n", "address", "symbol");
+ break;
+ default:
+ break;
+ }
+ }
+
+ bad = total = printed = 0;
+
+ while ((st = pop_from_result())) {
+ struct thread *t;
+ int pid;
+
+ total += use_bpf ? st->nr_contended : 1;
+ if (st->broken)
+ bad++;
+
+ if (!st->wait_time_total)
+ continue;
+
+ list_for_each_entry(key, &lock_keys, list) {
+ key->print(key, st);
+ pr_info(" ");
+ }
+
+ switch (aggr_mode) {
+ case LOCK_AGGR_CALLER:
+ pr_info(" %10s %s\n", get_type_str(st->flags), st->name);
+ break;
+ case LOCK_AGGR_TASK:
+ pid = st->addr;
+ t = perf_session__findnew(session, pid);
+ pr_info(" %10d %s\n",
+ pid, pid == -1 ? "Unknown" : thread__comm_str(t));
+ break;
+ case LOCK_AGGR_ADDR:
+ pr_info(" %016llx %s (%s)\n", (unsigned long long)st->addr,
+ st->name, get_type_name(st->flags));
+ break;
+ default:
+ break;
+ }
+
+ if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
+ struct map *kmap;
+ struct symbol *sym;
+ char buf[128];
+ u64 ip;
+
+ for (int i = 0; i < max_stack_depth; i++) {
+ if (!st->callstack || !st->callstack[i])
+ break;
+
+ ip = st->callstack[i];
+ sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
+ get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
+ pr_info("\t\t\t%#lx %s\n", (unsigned long)ip, buf);
+ }
+ }
+
+ if (++printed >= print_nr_entries)
+ break;
+ }
+
+ if (print_nr_entries) {
+ /* update the total/bad stats */
+ while ((st = pop_from_result())) {
+ total += use_bpf ? st->nr_contended : 1;
+ if (st->broken)
+ bad++;
+ }
+ }
+ /* some entries are collected but hidden by the callstack filter */
+ total += con->nr_filtered;
+
+ if (use_bpf)
+ print_bpf_events(total, &con->fails);
+ else
+ print_bad_events(bad, total);
+}
+
static bool force;
static int __cmd_report(bool display_info)
{
int err = -EINVAL;
struct perf_tool eops = {
+ .attr = perf_event__process_attr,
+ .event_update = process_event_update,
.sample = process_sample_event,
.comm = perf_event__process_comm,
+ .mmap = perf_event__process_mmap,
.namespaces = perf_event__process_namespaces,
+ .tracing_data = perf_event__process_tracing_data,
.ordered_events = true,
};
struct perf_data data = {
@@ -872,25 +1764,41 @@ static int __cmd_report(bool display_info)
.force = force,
};
- session = perf_session__new(&data, false, &eops);
+ session = perf_session__new(&data, &eops);
if (IS_ERR(session)) {
pr_err("Initializing perf session failed\n");
return PTR_ERR(session);
}
+ /* for lock function check */
+ symbol_conf.sort_by_name = true;
+ symbol_conf.allow_aliases = true;
symbol__init(&session->header.env);
- if (!perf_session__has_traces(session, "lock record"))
- goto out_delete;
+ if (!data.is_pipe) {
+ if (!perf_session__has_traces(session, "lock record"))
+ goto out_delete;
- if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
- pr_err("Initializing perf session tracepoint handlers failed\n");
- goto out_delete;
+ if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
+ pr_err("Initializing perf session tracepoint handlers failed\n");
+ goto out_delete;
+ }
+
+ if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
+ pr_err("Initializing perf session tracepoint handlers failed\n");
+ goto out_delete;
+ }
}
- if (select_key())
+ if (setup_output_field(false, output_fields))
goto out_delete;
+ if (select_key(false))
+ goto out_delete;
+
+ if (show_thread_stats)
+ aggr_mode = LOCK_AGGR_TASK;
+
err = perf_session__process_events(session);
if (err)
goto out_delete;
@@ -899,6 +1807,7 @@ static int __cmd_report(bool display_info)
if (display_info) /* used for info subcommand */
err = dump_info();
else {
+ combine_result();
sort_result();
print_result();
}
@@ -908,26 +1817,223 @@ out_delete:
return err;
}
+static void sighandler(int sig __maybe_unused)
+{
+}
+
+static int check_lock_contention_options(const struct option *options,
+ const char * const *usage)
+
+{
+ if (show_thread_stats && show_lock_addrs) {
+ pr_err("Cannot use thread and addr mode together\n");
+ parse_options_usage(usage, options, "threads", 0);
+ parse_options_usage(NULL, options, "lock-addr", 0);
+ return -1;
+ }
+
+ if (show_lock_owner && !use_bpf) {
+ pr_err("Lock owners are available only with BPF\n");
+ parse_options_usage(usage, options, "lock-owner", 0);
+ parse_options_usage(NULL, options, "use-bpf", 0);
+ return -1;
+ }
+
+ if (show_lock_owner && show_lock_addrs) {
+ pr_err("Cannot use owner and addr mode together\n");
+ parse_options_usage(usage, options, "lock-owner", 0);
+ parse_options_usage(NULL, options, "lock-addr", 0);
+ return -1;
+ }
+
+ if (show_lock_owner)
+ show_thread_stats = true;
+
+ return 0;
+}
+
+static int __cmd_contention(int argc, const char **argv)
+{
+ int err = -EINVAL;
+ struct perf_tool eops = {
+ .attr = perf_event__process_attr,
+ .event_update = process_event_update,
+ .sample = process_sample_event,
+ .comm = perf_event__process_comm,
+ .mmap = perf_event__process_mmap,
+ .tracing_data = perf_event__process_tracing_data,
+ .ordered_events = true,
+ };
+ struct perf_data data = {
+ .path = input_name,
+ .mode = PERF_DATA_MODE_READ,
+ .force = force,
+ };
+ struct lock_contention con = {
+ .target = &target,
+ .result = &lockhash_table[0],
+ .map_nr_entries = bpf_map_entries,
+ .max_stack = max_stack_depth,
+ .stack_skip = stack_skip,
+ .filters = &filters,
+ .save_callstack = needs_callstack(),
+ .owner = show_lock_owner,
+ };
+
+ session = perf_session__new(use_bpf ? NULL : &data, &eops);
+ if (IS_ERR(session)) {
+ pr_err("Initializing perf session failed\n");
+ return PTR_ERR(session);
+ }
+
+ con.machine = &session->machines.host;
+
+ con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK :
+ show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER;
+
+ if (con.aggr_mode == LOCK_AGGR_CALLER)
+ con.save_callstack = true;
+
+ /* for lock function check */
+ symbol_conf.sort_by_name = true;
+ symbol_conf.allow_aliases = true;
+ symbol__init(&session->header.env);
+
+ if (use_bpf) {
+ err = target__validate(&target);
+ if (err) {
+ char errbuf[512];
+
+ target__strerror(&target, err, errbuf, 512);
+ pr_err("%s\n", errbuf);
+ goto out_delete;
+ }
+
+ signal(SIGINT, sighandler);
+ signal(SIGCHLD, sighandler);
+ signal(SIGTERM, sighandler);
+
+ con.evlist = evlist__new();
+ if (con.evlist == NULL) {
+ err = -ENOMEM;
+ goto out_delete;
+ }
+
+ err = evlist__create_maps(con.evlist, &target);
+ if (err < 0)
+ goto out_delete;
+
+ if (argc) {
+ err = evlist__prepare_workload(con.evlist, &target,
+ argv, false, NULL);
+ if (err < 0)
+ goto out_delete;
+ }
+
+ if (lock_contention_prepare(&con) < 0) {
+ pr_err("lock contention BPF setup failed\n");
+ goto out_delete;
+ }
+ } else if (!data.is_pipe) {
+ if (!perf_session__has_traces(session, "lock record"))
+ goto out_delete;
+
+ if (!evlist__find_evsel_by_str(session->evlist,
+ "lock:contention_begin")) {
+ pr_err("lock contention evsel not found\n");
+ goto out_delete;
+ }
+
+ if (perf_session__set_tracepoints_handlers(session,
+ contention_tracepoints)) {
+ pr_err("Initializing perf session tracepoint handlers failed\n");
+ goto out_delete;
+ }
+ }
+
+ if (setup_output_field(true, output_fields))
+ goto out_delete;
+
+ if (select_key(true))
+ goto out_delete;
+
+ if (use_bpf) {
+ lock_contention_start();
+ if (argc)
+ evlist__start_workload(con.evlist);
+
+ /* wait for signal */
+ pause();
+
+ lock_contention_stop();
+ lock_contention_read(&con);
+ } else {
+ err = perf_session__process_events(session);
+ if (err)
+ goto out_delete;
+ }
+
+ setup_pager();
+
+ sort_contention_result();
+ print_contention_result(&con);
+
+out_delete:
+ lock_filter_finish();
+ evlist__delete(con.evlist);
+ lock_contention_finish();
+ perf_session__delete(session);
+ return err;
+}
+
+
static int __cmd_record(int argc, const char **argv)
{
const char *record_args[] = {
- "record", "-R", "-m", "1024", "-c", "1",
+ "record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
+ };
+ const char *callgraph_args[] = {
+ "--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH),
};
unsigned int rec_argc, i, j, ret;
+ unsigned int nr_tracepoints;
+ unsigned int nr_callgraph_args = 0;
const char **rec_argv;
+ bool has_lock_stat = true;
for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
- pr_err("tracepoint %s is not enabled. "
- "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
- lock_tracepoints[i].name);
- return 1;
+ pr_debug("tracepoint %s is not enabled. "
+ "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
+ lock_tracepoints[i].name);
+ has_lock_stat = false;
+ break;
+ }
+ }
+
+ if (has_lock_stat)
+ goto setup_args;
+
+ for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) {
+ if (!is_valid_tracepoint(contention_tracepoints[i].name)) {
+ pr_err("tracepoint %s is not enabled.\n",
+ contention_tracepoints[i].name);
+ return 1;
}
}
- rec_argc = ARRAY_SIZE(record_args) + argc - 1;
+ nr_callgraph_args = ARRAY_SIZE(callgraph_args);
+
+setup_args:
+ rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1;
+
+ if (has_lock_stat)
+ nr_tracepoints = ARRAY_SIZE(lock_tracepoints);
+ else
+ nr_tracepoints = ARRAY_SIZE(contention_tracepoints);
+
/* factor of 2 is for -e in front of each tracepoint */
- rec_argc += 2 * ARRAY_SIZE(lock_tracepoints);
+ rec_argc += 2 * nr_tracepoints;
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
@@ -936,11 +2042,24 @@ static int __cmd_record(int argc, const char **argv)
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
- for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) {
+ for (j = 0; j < nr_tracepoints; j++) {
+ const char *ev_name;
+
+ if (has_lock_stat)
+ ev_name = strdup(lock_tracepoints[j].name);
+ else
+ ev_name = strdup(contention_tracepoints[j].name);
+
+ if (!ev_name)
+ return -ENOMEM;
+
rec_argv[i++] = "-e";
- rec_argv[i++] = strdup(lock_tracepoints[j].name);
+ rec_argv[i++] = ev_name;
}
+ for (j = 0; j < nr_callgraph_args; j++, i++)
+ rec_argv[i] = callgraph_args[j];
+
for (j = 1; j < (unsigned int)argc; j++, i++)
rec_argv[i] = argv[j];
@@ -951,6 +2070,190 @@ static int __cmd_record(int argc, const char **argv)
return ret;
}
+static int parse_map_entry(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ unsigned long *len = (unsigned long *)opt->value;
+ unsigned long val;
+ char *endptr;
+
+ errno = 0;
+ val = strtoul(str, &endptr, 0);
+ if (*endptr != '\0' || errno != 0) {
+ pr_err("invalid BPF map length: %s\n", str);
+ return -1;
+ }
+
+ *len = val;
+ return 0;
+}
+
+static int parse_max_stack(const struct option *opt, const char *str,
+ int unset __maybe_unused)
+{
+ unsigned long *len = (unsigned long *)opt->value;
+ long val;
+ char *endptr;
+
+ errno = 0;
+ val = strtol(str, &endptr, 0);
+ if (*endptr != '\0' || errno != 0) {
+ pr_err("invalid max stack depth: %s\n", str);
+ return -1;
+ }
+
+ if (val < 0 || val > sysctl__max_stack()) {
+ pr_err("invalid max stack depth: %ld\n", val);
+ return -1;
+ }
+
+ *len = val;
+ return 0;
+}
+
+static bool add_lock_type(unsigned int flags)
+{
+ unsigned int *tmp;
+
+ tmp = realloc(filters.types, (filters.nr_types + 1) * sizeof(*filters.types));
+ if (tmp == NULL)
+ return false;
+
+ tmp[filters.nr_types++] = flags;
+ filters.types = tmp;
+ return true;
+}
+
+static int parse_lock_type(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ char *s, *tmp, *tok;
+ int ret = 0;
+
+ s = strdup(str);
+ if (s == NULL)
+ return -1;
+
+ for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ unsigned int flags = get_type_flag(tok);
+
+ if (flags == -1U) {
+ pr_err("Unknown lock flags: %s\n", tok);
+ ret = -1;
+ break;
+ }
+
+ if (!add_lock_type(flags)) {
+ ret = -1;
+ break;
+ }
+ }
+
+ free(s);
+ return ret;
+}
+
+static bool add_lock_addr(unsigned long addr)
+{
+ unsigned long *tmp;
+
+ tmp = realloc(filters.addrs, (filters.nr_addrs + 1) * sizeof(*filters.addrs));
+ if (tmp == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+
+ tmp[filters.nr_addrs++] = addr;
+ filters.addrs = tmp;
+ return true;
+}
+
+static bool add_lock_sym(char *name)
+{
+ char **tmp;
+ char *sym = strdup(name);
+
+ if (sym == NULL) {
+ pr_err("Memory allocation failure\n");
+ return false;
+ }
+
+ tmp = realloc(filters.syms, (filters.nr_syms + 1) * sizeof(*filters.syms));
+ if (tmp == NULL) {
+ pr_err("Memory allocation failure\n");
+ free(sym);
+ return false;
+ }
+
+ tmp[filters.nr_syms++] = sym;
+ filters.syms = tmp;
+ return true;
+}
+
+static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ char *s, *tmp, *tok;
+ int ret = 0;
+ u64 addr;
+
+ s = strdup(str);
+ if (s == NULL)
+ return -1;
+
+ for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ char *end;
+
+ addr = strtoul(tok, &end, 16);
+ if (*end == '\0') {
+ if (!add_lock_addr(addr)) {
+ ret = -1;
+ break;
+ }
+ continue;
+ }
+
+ /*
+ * At this moment, we don't have kernel symbols. Save the symbols
+ * in a separate list and resolve them to addresses later.
+ */
+ if (!add_lock_sym(tok)) {
+ ret = -1;
+ break;
+ }
+ }
+
+ free(s);
+ return ret;
+}
+
+static int parse_call_stack(const struct option *opt __maybe_unused, const char *str,
+ int unset __maybe_unused)
+{
+ char *s, *tmp, *tok;
+ int ret = 0;
+
+ s = strdup(str);
+ if (s == NULL)
+ return -1;
+
+ for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
+ struct callstack_filter *entry;
+
+ entry = malloc(sizeof(*entry) + strlen(tok) + 1);
+ if (entry == NULL) {
+ pr_err("Memory allocation failure\n");
+ return -1;
+ }
+
+ strcpy(entry->name, tok);
+ list_add_tail(&entry->list, &callstack_filters);
+ }
+
+ free(s);
+ return ret;
+}
+
int cmd_lock(int argc, const char **argv)
{
const struct option lock_options[] = {
@@ -958,6 +2261,11 @@ int cmd_lock(int argc, const char **argv)
OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+ OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
+ "file", "vmlinux pathname"),
+ OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
+ "file", "kallsyms pathname"),
+ OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
OPT_END()
};
@@ -972,7 +2280,50 @@ int cmd_lock(int argc, const char **argv)
const struct option report_options[] = {
OPT_STRING('k', "key", &sort_key, "acquired",
"key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
+ OPT_STRING('F', "field", &output_fields, NULL,
+ "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
/* TODO: type */
+ OPT_BOOLEAN('c', "combine-locks", &combine_locks,
+ "combine locks in the same class"),
+ OPT_BOOLEAN('t', "threads", &show_thread_stats,
+ "show per-thread lock stats"),
+ OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
+ OPT_PARENT(lock_options)
+ };
+
+ struct option contention_options[] = {
+ OPT_STRING('k', "key", &sort_key, "wait_total",
+ "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"),
+ OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait",
+ "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"),
+ OPT_BOOLEAN('t', "threads", &show_thread_stats,
+ "show per-thread lock stats"),
+ OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"),
+ OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
+ "System-wide collection from all CPUs"),
+ OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
+ "List of cpus to monitor"),
+ OPT_STRING('p', "pid", &target.pid, "pid",
+ "Trace on existing process id"),
+ OPT_STRING(0, "tid", &target.tid, "tid",
+ "Trace on existing thread id (exclusive to --pid)"),
+ OPT_CALLBACK('M', "map-nr-entries", &bpf_map_entries, "num",
+ "Max number of BPF map entries", parse_map_entry),
+ OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
+ "Set the maximum stack depth when collecting lopck contention, "
+ "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
+ OPT_INTEGER(0, "stack-skip", &stack_skip,
+ "Set the number of stack depth to skip when finding a lock caller, "
+ "Default: " __stringify(CONTENTION_STACK_SKIP)),
+ OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
+ OPT_BOOLEAN('l', "lock-addr", &show_lock_addrs, "show lock stats by address"),
+ OPT_CALLBACK('Y', "type-filter", NULL, "FLAGS",
+ "Filter specific type of locks", parse_lock_type),
+ OPT_CALLBACK('L', "lock-filter", NULL, "ADDRS/NAMES",
+ "Filter specific address/symbol of locks", parse_lock_addr),
+ OPT_CALLBACK('S', "callstack-filter", NULL, "NAMES",
+ "Filter specific function in the callstack", parse_call_stack),
+ OPT_BOOLEAN('o', "lock-owner", &show_lock_owner, "show lock owners instead of waiters"),
OPT_PARENT(lock_options)
};
@@ -981,7 +2332,7 @@ int cmd_lock(int argc, const char **argv)
NULL
};
const char *const lock_subcommands[] = { "record", "report", "script",
- "info", NULL };
+ "info", "contention", NULL };
const char *lock_usage[] = {
NULL,
NULL
@@ -990,20 +2341,24 @@ int cmd_lock(int argc, const char **argv)
"perf lock report [<options>]",
NULL
};
+ const char * const contention_usage[] = {
+ "perf lock contention [<options>]",
+ NULL
+ };
unsigned int i;
int rc = 0;
for (i = 0; i < LOCKHASH_SIZE; i++)
- INIT_LIST_HEAD(lockhash_table + i);
+ INIT_HLIST_HEAD(lockhash_table + i);
argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(lock_usage, lock_options);
- if (!strncmp(argv[0], "rec", 3)) {
+ if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
return __cmd_record(argc, argv);
- } else if (!strncmp(argv[0], "report", 6)) {
+ } else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
trace_handler = &report_lock_ops;
if (argc) {
argc = parse_options(argc, argv,
@@ -1025,6 +2380,25 @@ int cmd_lock(int argc, const char **argv)
/* recycling report_lock_ops */
trace_handler = &report_lock_ops;
rc = __cmd_report(true);
+ } else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) {
+ trace_handler = &contention_lock_ops;
+ sort_key = "wait_total";
+ output_fields = "contended,wait_total,wait_max,avg_wait";
+
+#ifndef HAVE_BPF_SKEL
+ set_option_nobuild(contention_options, 'b', "use-bpf",
+ "no BUILD_BPF_SKEL=1", false);
+#endif
+ if (argc) {
+ argc = parse_options(argc, argv, contention_options,
+ contention_usage, 0);
+ }
+
+ if (check_lock_contention_options(contention_options,
+ contention_usage) < 0)
+ return -1;
+
+ rc = __cmd_contention(argc, argv);
} else {
usage_with_options(lock_usage, lock_options);
}
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index a13f5817d6fc..65465930ef8e 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -4,9 +4,9 @@
#include <sys/stat.h>
#include <unistd.h>
#include "builtin.h"
-#include "perf.h"
#include <subcmd/parse-options.h>
+#include "util/auxtrace.h"
#include "util/trace-event.h"
#include "util/tool.h"
#include "util/session.h"
@@ -17,6 +17,11 @@
#include "util/dso.h"
#include "util/map.h"
#include "util/symbol.h"
+#include "util/pmu.h"
+#include "util/pmu-hybrid.h"
+#include "util/sample.h"
+#include "util/string2.h"
+#include "util/util.h"
#include <linux/err.h>
#define MEM_OPERATION_LOAD 0x1
@@ -29,6 +34,7 @@ struct perf_mem {
bool dump_raw;
bool force;
bool phys_addr;
+ bool data_page_size;
int operation;
const char *cpu_list;
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -38,26 +44,16 @@ static int parse_record_events(const struct option *opt,
const char *str, int unset __maybe_unused)
{
struct perf_mem *mem = *(struct perf_mem **)opt->value;
- int j;
- if (strcmp(str, "list")) {
- if (!perf_mem_events__parse(str)) {
- mem->operation = 0;
- return 0;
- }
- exit(-1);
+ if (!strcmp(str, "list")) {
+ perf_mem_events__list();
+ exit(0);
}
+ if (perf_mem_events__parse(str))
+ exit(-1);
- for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- struct perf_mem_event *e = &perf_mem_events[j];
-
- fprintf(stderr, "%-13s%-*s%s\n",
- e->tag,
- verbose > 0 ? 25 : 0,
- verbose > 0 ? perf_mem_events__name(j) : "",
- e->supported ? ": available" : "");
- }
- exit(0);
+ mem->operation = 0;
+ return 0;
}
static const char * const __usage[] = {
@@ -70,10 +66,13 @@ static const char * const *record_mem_usage = __usage;
static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
{
- int rec_argc, i = 0, j;
+ int rec_argc, i = 0, j, tmp_nr = 0;
+ int start, end;
const char **rec_argv;
+ char **rec_tmp;
int ret;
bool all_user = false, all_kernel = false;
+ struct perf_mem_event *e;
struct option options[] = {
OPT_CALLBACK('e', "event", &mem, "event",
"event selector. use 'perf mem record -e list' to list available events",
@@ -86,23 +85,62 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
OPT_END()
};
+ if (perf_mem_events__init()) {
+ pr_err("failed: memory events not supported\n");
+ return -1;
+ }
+
argc = parse_options(argc, argv, options, record_mem_usage,
PARSE_OPT_KEEP_UNKNOWN);
- rec_argc = argc + 9; /* max number of arguments */
+ if (!perf_pmu__has_hybrid())
+ rec_argc = argc + 9; /* max number of arguments */
+ else
+ rec_argc = argc + 9 * perf_pmu__hybrid_pmu_num();
+
+ if (mem->cpu_list)
+ rec_argc += 2;
+
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (!rec_argv)
return -1;
+ /*
+ * Save the allocated event name strings.
+ */
+ rec_tmp = calloc(rec_argc + 1, sizeof(char *));
+ if (!rec_tmp) {
+ free(rec_argv);
+ return -1;
+ }
+
rec_argv[i++] = "record";
- if (mem->operation & MEM_OPERATION_LOAD)
- perf_mem_events[PERF_MEM_EVENTS__LOAD].record = true;
+ e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD_STORE);
- if (mem->operation & MEM_OPERATION_STORE)
- perf_mem_events[PERF_MEM_EVENTS__STORE].record = true;
+ /*
+ * The load and store operations are required, use the event
+ * PERF_MEM_EVENTS__LOAD_STORE if it is supported.
+ */
+ if (e->tag &&
+ (mem->operation & MEM_OPERATION_LOAD) &&
+ (mem->operation & MEM_OPERATION_STORE)) {
+ e->record = true;
+ rec_argv[i++] = "-W";
+ } else {
+ if (mem->operation & MEM_OPERATION_LOAD) {
+ e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
+ e->record = true;
+ }
+
+ if (mem->operation & MEM_OPERATION_STORE) {
+ e = perf_mem_events__ptr(PERF_MEM_EVENTS__STORE);
+ e->record = true;
+ }
+ }
- if (perf_mem_events[PERF_MEM_EVENTS__LOAD].record)
+ e = perf_mem_events__ptr(PERF_MEM_EVENTS__LOAD);
+ if (e->record)
rec_argv[i++] = "-W";
rec_argv[i++] = "-d";
@@ -110,20 +148,14 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (mem->phys_addr)
rec_argv[i++] = "--phys-data";
- for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- if (!perf_mem_events[j].record)
- continue;
+ if (mem->data_page_size)
+ rec_argv[i++] = "--data-page-size";
- if (!perf_mem_events[j].supported) {
- pr_err("failed: event '%s' not supported\n",
- perf_mem_events__name(j));
- free(rec_argv);
- return -1;
- }
-
- rec_argv[i++] = "-e";
- rec_argv[i++] = perf_mem_events__name(j);
- };
+ start = i;
+ ret = perf_mem_events__record_args(rec_argv, &i, rec_tmp, &tmp_nr);
+ if (ret)
+ goto out;
+ end = i;
if (all_user)
rec_argv[i++] = "--all-user";
@@ -131,20 +163,29 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
if (all_kernel)
rec_argv[i++] = "--all-kernel";
+ if (mem->cpu_list) {
+ rec_argv[i++] = "-C";
+ rec_argv[i++] = mem->cpu_list;
+ }
+
for (j = 0; j < argc; j++, i++)
rec_argv[i] = argv[j];
if (verbose > 0) {
pr_debug("calling: record ");
- while (rec_argv[j]) {
+ for (j = start; j < end; j++)
pr_debug("%s ", rec_argv[j]);
- j++;
- }
+
pr_debug("\n");
}
ret = cmd_record(i, rec_argv);
+out:
+ for (i = 0; i < tmp_nr; i++)
+ free(rec_tmp[i]);
+
+ free(rec_tmp);
free(rec_argv);
return ret;
}
@@ -157,7 +198,9 @@ dump_raw_samples(struct perf_tool *tool,
{
struct perf_mem *mem = container_of(tool, struct perf_mem, tool);
struct addr_location al;
- const char *fmt;
+ const char *fmt, *field_sep;
+ char str[PAGE_SIZE_NAME_LEN];
+ struct dso *dso = NULL;
if (machine__resolve(machine, &al, sample) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
@@ -168,63 +211,53 @@ dump_raw_samples(struct perf_tool *tool,
if (al.filtered || (mem->hide_unresolved && al.sym == NULL))
goto out_put;
- if (al.map != NULL)
- al.map->dso->hit = 1;
+ if (al.map != NULL) {
+ dso = map__dso(al.map);
+ if (dso)
+ dso->hit = 1;
+ }
- if (mem->phys_addr) {
- if (symbol_conf.field_sep) {
- fmt = "%d%s%d%s0x%"PRIx64"%s0x%"PRIx64"%s0x%016"PRIx64
- "%s%"PRIu64"%s0x%"PRIx64"%s%s:%s\n";
- } else {
- fmt = "%5d%s%5d%s0x%016"PRIx64"%s0x016%"PRIx64
- "%s0x%016"PRIx64"%s%5"PRIu64"%s0x%06"PRIx64
- "%s%s:%s\n";
- symbol_conf.field_sep = " ";
- }
+ field_sep = symbol_conf.field_sep;
+ if (field_sep) {
+ fmt = "%d%s%d%s0x%"PRIx64"%s0x%"PRIx64"%s";
+ } else {
+ fmt = "%5d%s%5d%s0x%016"PRIx64"%s0x016%"PRIx64"%s";
+ symbol_conf.field_sep = " ";
+ }
+ printf(fmt,
+ sample->pid,
+ symbol_conf.field_sep,
+ sample->tid,
+ symbol_conf.field_sep,
+ sample->ip,
+ symbol_conf.field_sep,
+ sample->addr,
+ symbol_conf.field_sep);
- printf(fmt,
- sample->pid,
- symbol_conf.field_sep,
- sample->tid,
- symbol_conf.field_sep,
- sample->ip,
- symbol_conf.field_sep,
- sample->addr,
- symbol_conf.field_sep,
+ if (mem->phys_addr) {
+ printf("0x%016"PRIx64"%s",
sample->phys_addr,
- symbol_conf.field_sep,
- sample->weight,
- symbol_conf.field_sep,
- sample->data_src,
- symbol_conf.field_sep,
- al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???",
- al.sym ? al.sym->name : "???");
- } else {
- if (symbol_conf.field_sep) {
- fmt = "%d%s%d%s0x%"PRIx64"%s0x%"PRIx64"%s%"PRIu64
- "%s0x%"PRIx64"%s%s:%s\n";
- } else {
- fmt = "%5d%s%5d%s0x%016"PRIx64"%s0x016%"PRIx64
- "%s%5"PRIu64"%s0x%06"PRIx64"%s%s:%s\n";
- symbol_conf.field_sep = " ";
- }
+ symbol_conf.field_sep);
+ }
- printf(fmt,
- sample->pid,
- symbol_conf.field_sep,
- sample->tid,
- symbol_conf.field_sep,
- sample->ip,
- symbol_conf.field_sep,
- sample->addr,
- symbol_conf.field_sep,
- sample->weight,
- symbol_conf.field_sep,
- sample->data_src,
- symbol_conf.field_sep,
- al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???",
- al.sym ? al.sym->name : "???");
+ if (mem->data_page_size) {
+ printf("%s%s",
+ get_page_size_name(sample->data_page_size, str),
+ symbol_conf.field_sep);
}
+
+ if (field_sep)
+ fmt = "%"PRIu64"%s0x%"PRIx64"%s%s:%s\n";
+ else
+ fmt = "%5"PRIu64"%s0x%06"PRIx64"%s%s:%s\n";
+
+ printf(fmt,
+ sample->weight,
+ symbol_conf.field_sep,
+ sample->data_src,
+ symbol_conf.field_sep,
+ dso ? dso->long_name : "???",
+ al.sym ? al.sym->name : "???");
out_put:
addr_location__put(&al);
return 0;
@@ -241,18 +274,25 @@ static int process_sample_event(struct perf_tool *tool,
static int report_raw_events(struct perf_mem *mem)
{
+ struct itrace_synth_opts itrace_synth_opts = {
+ .set = true,
+ .mem = true, /* Only enable memory event */
+ .default_no_sample = true,
+ };
+
struct perf_data data = {
.path = input_name,
.mode = PERF_DATA_MODE_READ,
.force = mem->force,
};
int ret;
- struct perf_session *session = perf_session__new(&data, false,
- &mem->tool);
+ struct perf_session *session = perf_session__new(&data, &mem->tool);
if (IS_ERR(session))
return PTR_ERR(session);
+ session->itrace_synth_opts = &itrace_synth_opts;
+
if (mem->cpu_list) {
ret = perf_session__cpu_bitmap(session, mem->cpu_list,
mem->cpu_bitmap);
@@ -264,10 +304,15 @@ static int report_raw_events(struct perf_mem *mem)
if (ret < 0)
goto out_delete;
+ printf("# PID, TID, IP, ADDR, ");
+
if (mem->phys_addr)
- printf("# PID, TID, IP, ADDR, PHYS ADDR, LOCAL WEIGHT, DSRC, SYMBOL\n");
- else
- printf("# PID, TID, IP, ADDR, LOCAL WEIGHT, DSRC, SYMBOL\n");
+ printf("PHYS ADDR, ");
+
+ if (mem->data_page_size)
+ printf("DATA PAGE SIZE, ");
+
+ printf("LOCAL WEIGHT, DSRC, SYMBOL\n");
ret = perf_session__process_events(session);
@@ -275,11 +320,38 @@ out_delete:
perf_session__delete(session);
return ret;
}
+static char *get_sort_order(struct perf_mem *mem)
+{
+ bool has_extra_options = (mem->phys_addr | mem->data_page_size) ? true : false;
+ char sort[128];
+
+ /*
+ * there is no weight (cost) associated with stores, so don't print
+ * the column
+ */
+ if (!(mem->operation & MEM_OPERATION_LOAD)) {
+ strcpy(sort, "--sort=mem,sym,dso,symbol_daddr,"
+ "dso_daddr,tlb,locked");
+ } else if (has_extra_options) {
+ strcpy(sort, "--sort=local_weight,mem,sym,dso,symbol_daddr,"
+ "dso_daddr,snoop,tlb,locked,blocked");
+ } else
+ return NULL;
+
+ if (mem->phys_addr)
+ strcat(sort, ",phys_daddr");
+
+ if (mem->data_page_size)
+ strcat(sort, ",data_page_size");
+
+ return strdup(sort);
+}
static int report_events(int argc, const char **argv, struct perf_mem *mem)
{
const char **rep_argv;
int ret, i = 0, j, rep_argc;
+ char *new_sort_order;
if (mem->dump_raw)
return report_raw_events(mem);
@@ -293,20 +365,9 @@ static int report_events(int argc, const char **argv, struct perf_mem *mem)
rep_argv[i++] = "--mem-mode";
rep_argv[i++] = "-n"; /* display number of samples */
- /*
- * there is no weight (cost) associated with stores, so don't print
- * the column
- */
- if (!(mem->operation & MEM_OPERATION_LOAD)) {
- if (mem->phys_addr)
- rep_argv[i++] = "--sort=mem,sym,dso,symbol_daddr,"
- "dso_daddr,tlb,locked,phys_daddr";
- else
- rep_argv[i++] = "--sort=mem,sym,dso,symbol_daddr,"
- "dso_daddr,tlb,locked";
- } else if (mem->phys_addr)
- rep_argv[i++] = "--sort=local_weight,mem,sym,dso,symbol_daddr,"
- "dso_daddr,snoop,tlb,locked,phys_daddr";
+ new_sort_order = get_sort_order(mem);
+ if (new_sort_order)
+ rep_argv[i++] = new_sort_order;
for (j = 1; j < argc; j++, i++)
rep_argv[i] = argv[j];
@@ -396,8 +457,12 @@ int cmd_mem(int argc, const char **argv)
.comm = perf_event__process_comm,
.lost = perf_event__process_lost,
.fork = perf_event__process_fork,
+ .attr = perf_event__process_attr,
.build_id = perf_event__process_build_id,
.namespaces = perf_event__process_namespaces,
+ .auxtrace_info = perf_event__process_auxtrace_info,
+ .auxtrace = perf_event__process_auxtrace,
+ .auxtrace_error = perf_event__process_auxtrace_error,
.ordered_events = true,
},
.input_name = "perf.data",
@@ -424,6 +489,7 @@ int cmd_mem(int argc, const char **argv)
" between columns '.' is reserved."),
OPT_BOOLEAN('f', "force", &mem.force, "don't complain, do it"),
OPT_BOOLEAN('p', "phys-data", &mem.phys_addr, "Record/Report sample physical addresses"),
+ OPT_BOOLEAN(0, "data-page-size", &mem.data_page_size, "Record/Report sample data address page size"),
OPT_END()
};
const char *const mem_subcommands[] = { "record", "report", NULL };
@@ -432,11 +498,6 @@ int cmd_mem(int argc, const char **argv)
NULL
};
- if (perf_mem_events__init()) {
- pr_err("failed: memory events not supported\n");
- return -1;
- }
-
argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands,
mem_usage, PARSE_OPT_KEEP_UNKNOWN);
@@ -450,9 +511,9 @@ int cmd_mem(int argc, const char **argv)
mem.input_name = "perf.data";
}
- if (!strncmp(argv[0], "rec", 3))
+ if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
return __cmd_record(argc, argv, &mem);
- else if (!strncmp(argv[0], "rep", 3))
+ else if (strlen(argv[0]) > 2 && strstarts("report", argv[0]))
return report_events(argc, argv, &mem);
else
usage_with_options(mem_usage, mem_options);
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 70548df2abb9..4df05b992093 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -21,6 +21,7 @@
#include "util/build-id.h"
#include "util/strlist.h"
#include "util/strfilter.h"
+#include "util/symbol.h"
#include "util/symbol_conf.h"
#include "util/debug.h"
#include <subcmd/parse-options.h>
@@ -31,7 +32,7 @@
#include <linux/zalloc.h>
#define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
-#define DEFAULT_FUNC_FILTER "!_*"
+#define DEFAULT_FUNC_FILTER "!_* & !*@plt"
#define DEFAULT_LIST_FILTER "*"
/* Session management structure */
@@ -39,7 +40,6 @@ static struct {
int command; /* Command short_name */
bool list_events;
bool uprobes;
- bool quiet;
bool target_used;
int nevents;
struct perf_probe_event events[MAX_PROBES];
@@ -216,7 +216,7 @@ static int opt_set_target_ns(const struct option *opt __maybe_unused,
return ret;
}
nsip = nsinfo__new(ns_pid);
- if (nsip && nsip->need_setns)
+ if (nsip && nsinfo__need_setns(nsip))
params.nsi = nsinfo__get(nsip);
nsinfo__put(nsip);
@@ -347,7 +347,10 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
goto out_cleanup;
if (params.command == 'D') { /* it shows definition */
- ret = show_probe_trace_events(pevs, npevs);
+ if (probe_conf.bootconfig)
+ ret = show_bootconfig_events(pevs, npevs);
+ else
+ ret = show_probe_trace_events(pevs, npevs);
goto out_cleanup;
}
@@ -364,6 +367,9 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
for (k = 0; k < pev->ntevs; k++) {
struct probe_trace_event *tev = &pev->tevs[k];
+ /* Skipped events have no event name */
+ if (!tev->event)
+ continue;
/* We use tev's name for showing new events */
show_perf_probe_event(tev->group, tev->event, pev,
@@ -377,9 +383,18 @@ static int perf_add_probe_events(struct perf_probe_event *pevs, int npevs)
/* Note that it is possible to skip all events because of blacklist */
if (event) {
+#ifndef HAVE_LIBTRACEEVENT
+ pr_info("\nperf is not linked with libtraceevent, to use the new probe you can use tracefs:\n\n");
+ pr_info("\tcd /sys/kernel/tracing/\n");
+ pr_info("\techo 1 > events/%s/%s/enable\n", group, event);
+ pr_info("\techo 1 > tracing_on\n");
+ pr_info("\tcat trace_pipe\n");
+ pr_info("\tBefore removing the probe, echo 0 > events/%s/%s/enable\n", group, event);
+#else
/* Show how to use the event. */
pr_info("\nYou can now use it in all perf tools, such as:\n\n");
pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", group, event);
+#endif
}
out_cleanup:
@@ -507,8 +522,8 @@ __cmd_probe(int argc, const char **argv)
struct option options[] = {
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show parsed arguments, etc)"),
- OPT_BOOLEAN('q', "quiet", &params.quiet,
- "be quiet (do not show any messages)"),
+ OPT_BOOLEAN('q', "quiet", &quiet,
+ "be quiet (do not show any warnings or messages)"),
OPT_CALLBACK_DEFAULT('l', "list", NULL, "[GROUP:]EVENT",
"list up probe events",
opt_set_filter_with_command, DEFAULT_LIST_FILTER),
@@ -578,6 +593,8 @@ __cmd_probe(int argc, const char **argv)
"Look for files with symbols relative to this directory"),
OPT_CALLBACK(0, "target-ns", NULL, "pid",
"target pid for namespace contexts", opt_set_target_ns),
+ OPT_BOOLEAN(0, "bootconfig", &probe_conf.bootconfig,
+ "Output probe definition with bootconfig format"),
OPT_END()
};
int ret;
@@ -604,6 +621,15 @@ __cmd_probe(int argc, const char **argv)
argc = parse_options(argc, argv, options, probe_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
+
+ if (quiet) {
+ if (verbose != 0) {
+ pr_err(" Error: -v and -q are exclusive.\n");
+ return -EINVAL;
+ }
+ verbose = -1;
+ }
+
if (argc > 0) {
if (strcmp(argv[0], "-") == 0) {
usage_with_options_msg(probe_usage, options,
@@ -621,13 +647,9 @@ __cmd_probe(int argc, const char **argv)
params.command = 'a';
}
- if (params.quiet) {
- if (verbose != 0) {
- pr_err(" Error: -v and -q are exclusive.\n");
- return -EINVAL;
- }
- verbose = -1;
- }
+ ret = symbol__validate_sym_arguments();
+ if (ret)
+ return ret;
if (probe_conf.max_probes == 0)
probe_conf.max_probes = MAX_PROBES;
@@ -689,6 +711,11 @@ __cmd_probe(int argc, const char **argv)
}
break;
case 'D':
+ if (probe_conf.bootconfig && params.uprobes) {
+ pr_err(" Error: --bootconfig doesn't support uprobes.\n");
+ return -EINVAL;
+ }
+ fallthrough;
case 'a':
/* Ensure the last given target is used */
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 1ab349abe904..efa03e4ac2c9 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -10,6 +10,7 @@
#include "util/build-id.h"
#include <subcmd/parse-options.h>
+#include <internal/xyarray.h>
#include "util/parse-events.h"
#include "util/config.h"
@@ -21,6 +22,7 @@
#include "util/evsel.h"
#include "util/debug.h"
#include "util/mmap.h"
+#include "util/mutex.h"
#include "util/target.h"
#include "util/session.h"
#include "util/tool.h"
@@ -34,6 +36,7 @@
#include "util/tsc.h"
#include "util/parse-branch-options.h"
#include "util/parse-regs-options.h"
+#include "util/perf_api_probe.h"
#include "util/llvm-utils.h"
#include "util/bpf-loader.h"
#include "util/trigger.h"
@@ -43,16 +46,31 @@
#include "util/time-utils.h"
#include "util/units.h"
#include "util/bpf-event.h"
+#include "util/util.h"
+#include "util/pfm.h"
+#include "util/clockid.h"
+#include "util/pmu-hybrid.h"
+#include "util/evlist-hybrid.h"
+#include "util/off_cpu.h"
+#include "util/bpf-filter.h"
#include "asm/bug.h"
#include "perf.h"
+#include "cputopo.h"
#include <errno.h>
#include <inttypes.h>
#include <locale.h>
#include <poll.h>
+#include <pthread.h>
#include <unistd.h>
+#ifndef HAVE_GETTID
+#include <syscall.h>
+#endif
#include <sched.h>
#include <signal.h>
+#ifdef HAVE_EVENTFD_SUPPORT
+#include <sys/eventfd.h>
+#endif
#include <sys/mman.h>
#include <sys/wait.h>
#include <sys/types.h>
@@ -63,6 +81,7 @@
#include <linux/time64.h>
#include <linux/zalloc.h>
#include <linux/bitmap.h>
+#include <sys/time.h>
struct switch_output {
bool enabled;
@@ -76,26 +95,94 @@ struct switch_output {
int cur_file;
};
+struct thread_mask {
+ struct mmap_cpu_mask maps;
+ struct mmap_cpu_mask affinity;
+};
+
+struct record_thread {
+ pid_t tid;
+ struct thread_mask *mask;
+ struct {
+ int msg[2];
+ int ack[2];
+ } pipes;
+ struct fdarray pollfd;
+ int ctlfd_pos;
+ int nr_mmaps;
+ struct mmap **maps;
+ struct mmap **overwrite_maps;
+ struct record *rec;
+ unsigned long long samples;
+ unsigned long waking;
+ u64 bytes_written;
+ u64 bytes_transferred;
+ u64 bytes_compressed;
+};
+
+static __thread struct record_thread *thread;
+
+enum thread_msg {
+ THREAD_MSG__UNDEFINED = 0,
+ THREAD_MSG__READY,
+ THREAD_MSG__MAX,
+};
+
+static const char *thread_msg_tags[THREAD_MSG__MAX] = {
+ "UNDEFINED", "READY"
+};
+
+enum thread_spec {
+ THREAD_SPEC__UNDEFINED = 0,
+ THREAD_SPEC__CPU,
+ THREAD_SPEC__CORE,
+ THREAD_SPEC__PACKAGE,
+ THREAD_SPEC__NUMA,
+ THREAD_SPEC__USER,
+ THREAD_SPEC__MAX,
+};
+
+static const char *thread_spec_tags[THREAD_SPEC__MAX] = {
+ "undefined", "cpu", "core", "package", "numa", "user"
+};
+
+struct pollfd_index_map {
+ int evlist_pollfd_index;
+ int thread_pollfd_index;
+};
+
struct record {
struct perf_tool tool;
struct record_opts opts;
u64 bytes_written;
+ u64 thread_bytes_written;
struct perf_data data;
struct auxtrace_record *itr;
struct evlist *evlist;
struct perf_session *session;
+ struct evlist *sb_evlist;
+ pthread_t thread_id;
int realtime_prio;
+ bool switch_output_event_set;
bool no_buildid;
bool no_buildid_set;
bool no_buildid_cache;
bool no_buildid_cache_set;
bool buildid_all;
+ bool buildid_mmap;
bool timestamp_filename;
bool timestamp_boundary;
+ bool off_cpu;
struct switch_output switch_output;
unsigned long long samples;
- struct mmap_cpu_mask affinity_mask;
unsigned long output_max_size; /* = 0: unlimited */
+ struct perf_debuginfod debuginfod;
+ int nr_threads;
+ struct thread_mask *thread_masks;
+ struct record_thread *thread_data;
+ struct pollfd_index_map *index_map;
+ size_t index_map_sz;
+ size_t index_map_cnt;
};
static volatile int done;
@@ -108,6 +195,18 @@ static const char *affinity_tags[PERF_AFFINITY_MAX] = {
"SYS", "NODE", "CPU"
};
+#ifndef HAVE_GETTID
+static inline pid_t gettid(void)
+{
+ return (pid_t)syscall(__NR_gettid);
+}
+#endif
+
+static int record__threads_enabled(struct record *rec)
+{
+ return rec->opts.threads_spec;
+}
+
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
@@ -127,10 +226,15 @@ static bool switch_output_time(struct record *rec)
trigger_is_ready(&switch_output_trigger);
}
+static u64 record__bytes_written(struct record *rec)
+{
+ return rec->bytes_written + rec->thread_bytes_written;
+}
+
static bool record__output_max_size_exceeded(struct record *rec)
{
return rec->output_max_size &&
- (rec->bytes_written >= rec->output_max_size);
+ (record__bytes_written(rec) >= rec->output_max_size);
}
static int record__write(struct record *rec, struct mmap *map __maybe_unused,
@@ -138,17 +242,25 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
{
struct perf_data_file *file = &rec->session->data->file;
+ if (map && map->file)
+ file = map->file;
+
if (perf_data_file__write(file, bf, size) < 0) {
pr_err("failed to write perf data, error: %m\n");
return -1;
}
- rec->bytes_written += size;
+ if (map && map->file) {
+ thread->bytes_written += size;
+ rec->thread_bytes_written += size;
+ } else {
+ rec->bytes_written += size;
+ }
if (record__output_max_size_exceeded(rec) && !done) {
fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
" stopping session ]\n",
- rec->bytes_written >> 10);
+ record__bytes_written(rec) >> 10);
done = 1;
}
@@ -160,8 +272,8 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused,
static int record__aio_enabled(struct record *rec);
static int record__comp_enabled(struct record *rec);
-static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
- void *src, size_t src_size);
+static size_t zstd_compress(struct perf_session *session, struct mmap *map,
+ void *dst, size_t dst_size, void *src, size_t src_size);
#ifdef HAVE_AIO_SUPPORT
static int record__aio_write(struct aiocb *cblock, int trace_fd,
@@ -295,7 +407,7 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size
*/
if (record__comp_enabled(aio->rec)) {
- size = zstd_compress(aio->rec->session, aio->data + aio->size,
+ size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
mmap__mmap_len(map) - aio->size,
buf, size);
} else {
@@ -503,21 +615,39 @@ static int process_synthesized_event(struct perf_tool *tool,
return record__write(rec, NULL, event, event->header.size);
}
+static struct mutex synth_lock;
+
+static int process_locked_synthesized_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ int ret;
+
+ mutex_lock(&synth_lock);
+ ret = process_synthesized_event(tool, event, sample, machine);
+ mutex_unlock(&synth_lock);
+ return ret;
+}
+
static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
{
struct record *rec = to;
if (record__comp_enabled(rec)) {
- size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
+ size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size);
bf = map->data;
}
- rec->samples++;
+ thread->samples++;
return record__write(rec, map, bf, size);
}
-static volatile int signr = -1;
-static volatile int child_finished;
+static volatile sig_atomic_t signr = -1;
+static volatile sig_atomic_t child_finished;
+#ifdef HAVE_EVENTFD_SUPPORT
+static volatile sig_atomic_t done_fd = -1;
+#endif
static void sig_handler(int sig)
{
@@ -527,6 +657,26 @@ static void sig_handler(int sig)
signr = sig;
done = 1;
+#ifdef HAVE_EVENTFD_SUPPORT
+ if (done_fd >= 0) {
+ u64 tmp = 1;
+ int orig_errno = errno;
+
+ /*
+ * It is possible for this signal handler to run after done is
+ * checked in the main loop, but before the perf counter fds are
+ * polled. If this happens, the poll() will continue to wait
+ * even though done is set, and will only break out if either
+ * another signal is received, or the counters are ready for
+ * read. To ensure the poll() doesn't sleep when done is set,
+ * use an eventfd (done_fd) to wake up the poll().
+ */
+ if (write(done_fd, &tmp, sizeof(tmp)) < 0)
+ pr_err("failed to signal wakeup fd, error: %m\n");
+
+ errno = orig_errno;
+ }
+#endif // HAVE_EVENTFD_SUPPORT
}
static void sigsegv_handler(int sig)
@@ -670,6 +820,12 @@ static int record__auxtrace_init(struct record *rec)
{
int err;
+ if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts)
+ && record__threads_enabled(rec)) {
+ pr_err("AUX area tracing options are not available in parallel streaming mode.\n");
+ return -EINVAL;
+ }
+
if (!rec->itr) {
rec->itr = auxtrace_record__init(rec->evlist, &err);
if (err)
@@ -686,6 +842,8 @@ static int record__auxtrace_init(struct record *rec)
if (err)
return err;
+ auxtrace_regroup_aux_output(rec->evlist);
+
return auxtrace_parse_filters(rec->evlist);
}
@@ -723,6 +881,33 @@ static int record__auxtrace_init(struct record *rec __maybe_unused)
#endif
+static int record__config_text_poke(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ /* Nothing to do if text poke is already configured */
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->core.attr.text_poke)
+ return 0;
+ }
+
+ evsel = evlist__add_dummy_on_all_cpus(evlist);
+ if (!evsel)
+ return -ENOMEM;
+
+ evsel->core.attr.text_poke = 1;
+ evsel->core.attr.ksymbol = 1;
+ evsel->immediate = true;
+ evsel__set_sample_bit(evsel, TIME);
+
+ return 0;
+}
+
+static int record__config_off_cpu(struct record *rec)
+{
+ return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
+}
+
static bool record__kcore_readable(struct machine *machine)
{
char kcore[PATH_MAX];
@@ -754,9 +939,286 @@ static int record__kcore_copy(struct machine *machine, struct perf_data *data)
return kcore_copy(from_dir, kcore_dir);
}
+static void record__thread_data_init_pipes(struct record_thread *thread_data)
+{
+ thread_data->pipes.msg[0] = -1;
+ thread_data->pipes.msg[1] = -1;
+ thread_data->pipes.ack[0] = -1;
+ thread_data->pipes.ack[1] = -1;
+}
+
+static int record__thread_data_open_pipes(struct record_thread *thread_data)
+{
+ if (pipe(thread_data->pipes.msg))
+ return -EINVAL;
+
+ if (pipe(thread_data->pipes.ack)) {
+ close(thread_data->pipes.msg[0]);
+ thread_data->pipes.msg[0] = -1;
+ close(thread_data->pipes.msg[1]);
+ thread_data->pipes.msg[1] = -1;
+ return -EINVAL;
+ }
+
+ pr_debug2("thread_data[%p]: msg=[%d,%d], ack=[%d,%d]\n", thread_data,
+ thread_data->pipes.msg[0], thread_data->pipes.msg[1],
+ thread_data->pipes.ack[0], thread_data->pipes.ack[1]);
+
+ return 0;
+}
+
+static void record__thread_data_close_pipes(struct record_thread *thread_data)
+{
+ if (thread_data->pipes.msg[0] != -1) {
+ close(thread_data->pipes.msg[0]);
+ thread_data->pipes.msg[0] = -1;
+ }
+ if (thread_data->pipes.msg[1] != -1) {
+ close(thread_data->pipes.msg[1]);
+ thread_data->pipes.msg[1] = -1;
+ }
+ if (thread_data->pipes.ack[0] != -1) {
+ close(thread_data->pipes.ack[0]);
+ thread_data->pipes.ack[0] = -1;
+ }
+ if (thread_data->pipes.ack[1] != -1) {
+ close(thread_data->pipes.ack[1]);
+ thread_data->pipes.ack[1] = -1;
+ }
+}
+
+static bool evlist__per_thread(struct evlist *evlist)
+{
+ return cpu_map__is_dummy(evlist->core.user_requested_cpus);
+}
+
+static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist)
+{
+ int m, tm, nr_mmaps = evlist->core.nr_mmaps;
+ struct mmap *mmap = evlist->mmap;
+ struct mmap *overwrite_mmap = evlist->overwrite_mmap;
+ struct perf_cpu_map *cpus = evlist->core.all_cpus;
+ bool per_thread = evlist__per_thread(evlist);
+
+ if (per_thread)
+ thread_data->nr_mmaps = nr_mmaps;
+ else
+ thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
+ thread_data->mask->maps.nbits);
+ if (mmap) {
+ thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
+ if (!thread_data->maps)
+ return -ENOMEM;
+ }
+ if (overwrite_mmap) {
+ thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
+ if (!thread_data->overwrite_maps) {
+ zfree(&thread_data->maps);
+ return -ENOMEM;
+ }
+ }
+ pr_debug2("thread_data[%p]: nr_mmaps=%d, maps=%p, ow_maps=%p\n", thread_data,
+ thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
+
+ for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
+ if (per_thread ||
+ test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
+ if (thread_data->maps) {
+ thread_data->maps[tm] = &mmap[m];
+ pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
+ thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
+ }
+ if (thread_data->overwrite_maps) {
+ thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
+ pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
+ thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
+ }
+ tm++;
+ }
+ }
+
+ return 0;
+}
+
+static int record__thread_data_init_pollfd(struct record_thread *thread_data, struct evlist *evlist)
+{
+ int f, tm, pos;
+ struct mmap *map, *overwrite_map;
+
+ fdarray__init(&thread_data->pollfd, 64);
+
+ for (tm = 0; tm < thread_data->nr_mmaps; tm++) {
+ map = thread_data->maps ? thread_data->maps[tm] : NULL;
+ overwrite_map = thread_data->overwrite_maps ?
+ thread_data->overwrite_maps[tm] : NULL;
+
+ for (f = 0; f < evlist->core.pollfd.nr; f++) {
+ void *ptr = evlist->core.pollfd.priv[f].ptr;
+
+ if ((map && ptr == map) || (overwrite_map && ptr == overwrite_map)) {
+ pos = fdarray__dup_entry_from(&thread_data->pollfd, f,
+ &evlist->core.pollfd);
+ if (pos < 0)
+ return pos;
+ pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n",
+ thread_data, pos, evlist->core.pollfd.entries[f].fd);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void record__free_thread_data(struct record *rec)
+{
+ int t;
+ struct record_thread *thread_data = rec->thread_data;
+
+ if (thread_data == NULL)
+ return;
+
+ for (t = 0; t < rec->nr_threads; t++) {
+ record__thread_data_close_pipes(&thread_data[t]);
+ zfree(&thread_data[t].maps);
+ zfree(&thread_data[t].overwrite_maps);
+ fdarray__exit(&thread_data[t].pollfd);
+ }
+
+ zfree(&rec->thread_data);
+}
+
+static int record__map_thread_evlist_pollfd_indexes(struct record *rec,
+ int evlist_pollfd_index,
+ int thread_pollfd_index)
+{
+ size_t x = rec->index_map_cnt;
+
+ if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL))
+ return -ENOMEM;
+ rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index;
+ rec->index_map[x].thread_pollfd_index = thread_pollfd_index;
+ rec->index_map_cnt += 1;
+ return 0;
+}
+
+static int record__update_evlist_pollfd_from_thread(struct record *rec,
+ struct evlist *evlist,
+ struct record_thread *thread_data)
+{
+ struct pollfd *e_entries = evlist->core.pollfd.entries;
+ struct pollfd *t_entries = thread_data->pollfd.entries;
+ int err = 0;
+ size_t i;
+
+ for (i = 0; i < rec->index_map_cnt; i++) {
+ int e_pos = rec->index_map[i].evlist_pollfd_index;
+ int t_pos = rec->index_map[i].thread_pollfd_index;
+
+ if (e_entries[e_pos].fd != t_entries[t_pos].fd ||
+ e_entries[e_pos].events != t_entries[t_pos].events) {
+ pr_err("Thread and evlist pollfd index mismatch\n");
+ err = -EINVAL;
+ continue;
+ }
+ e_entries[e_pos].revents = t_entries[t_pos].revents;
+ }
+ return err;
+}
+
+static int record__dup_non_perf_events(struct record *rec,
+ struct evlist *evlist,
+ struct record_thread *thread_data)
+{
+ struct fdarray *fda = &evlist->core.pollfd;
+ int i, ret;
+
+ for (i = 0; i < fda->nr; i++) {
+ if (!(fda->priv[i].flags & fdarray_flag__non_perf_event))
+ continue;
+ ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda);
+ if (ret < 0) {
+ pr_err("Failed to duplicate descriptor in main thread pollfd\n");
+ return ret;
+ }
+ pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n",
+ thread_data, ret, fda->entries[i].fd);
+ ret = record__map_thread_evlist_pollfd_indexes(rec, i, ret);
+ if (ret < 0) {
+ pr_err("Failed to map thread and evlist pollfd indexes\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int record__alloc_thread_data(struct record *rec, struct evlist *evlist)
+{
+ int t, ret;
+ struct record_thread *thread_data;
+
+ rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data)));
+ if (!rec->thread_data) {
+ pr_err("Failed to allocate thread data\n");
+ return -ENOMEM;
+ }
+ thread_data = rec->thread_data;
+
+ for (t = 0; t < rec->nr_threads; t++)
+ record__thread_data_init_pipes(&thread_data[t]);
+
+ for (t = 0; t < rec->nr_threads; t++) {
+ thread_data[t].rec = rec;
+ thread_data[t].mask = &rec->thread_masks[t];
+ ret = record__thread_data_init_maps(&thread_data[t], evlist);
+ if (ret) {
+ pr_err("Failed to initialize thread[%d] maps\n", t);
+ goto out_free;
+ }
+ ret = record__thread_data_init_pollfd(&thread_data[t], evlist);
+ if (ret) {
+ pr_err("Failed to initialize thread[%d] pollfd\n", t);
+ goto out_free;
+ }
+ if (t) {
+ thread_data[t].tid = -1;
+ ret = record__thread_data_open_pipes(&thread_data[t]);
+ if (ret) {
+ pr_err("Failed to open thread[%d] communication pipes\n", t);
+ goto out_free;
+ }
+ ret = fdarray__add(&thread_data[t].pollfd, thread_data[t].pipes.msg[0],
+ POLLIN | POLLERR | POLLHUP, fdarray_flag__nonfilterable);
+ if (ret < 0) {
+ pr_err("Failed to add descriptor to thread[%d] pollfd\n", t);
+ goto out_free;
+ }
+ thread_data[t].ctlfd_pos = ret;
+ pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n",
+ thread_data, thread_data[t].ctlfd_pos,
+ thread_data[t].pipes.msg[0]);
+ } else {
+ thread_data[t].tid = gettid();
+
+ ret = record__dup_non_perf_events(rec, evlist, &thread_data[t]);
+ if (ret < 0)
+ goto out_free;
+
+ thread_data[t].ctlfd_pos = -1; /* Not used */
+ }
+ }
+
+ return 0;
+
+out_free:
+ record__free_thread_data(rec);
+
+ return ret;
+}
+
static int record__mmap_evlist(struct record *rec,
struct evlist *evlist)
{
+ int i, ret;
struct record_opts *opts = &rec->opts;
bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
opts->auxtrace_sample_mode;
@@ -787,6 +1249,28 @@ static int record__mmap_evlist(struct record *rec,
return -EINVAL;
}
}
+
+ if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack))
+ return -1;
+
+ ret = record__alloc_thread_data(rec, evlist);
+ if (ret)
+ return ret;
+
+ if (record__threads_enabled(rec)) {
+ ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps);
+ if (ret) {
+ pr_err("Failed to create data directory: %s\n", strerror(-ret));
+ return ret;
+ }
+ for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ if (evlist->mmap)
+ evlist->mmap[i].file = &rec->data.dir.files[i];
+ if (evlist->overwrite_mmap)
+ evlist->overwrite_mmap[i].file = &rec->data.dir.files[i];
+ }
+ }
+
return 0;
}
@@ -805,40 +1289,50 @@ static int record__open(struct record *rec)
int rc = 0;
/*
- * For initial_delay we need to add a dummy event so that we can track
- * PERF_RECORD_MMAP while we wait for the initial delay to enable the
- * real events, the ones asked by the user.
+ * For initial_delay, system wide or a hybrid system, we need to add a
+ * dummy event so that we can track PERF_RECORD_MMAP to cover the delay
+ * of waiting or event synthesis.
*/
- if (opts->initial_delay) {
- if (perf_evlist__add_dummy(evlist))
- return -ENOMEM;
+ if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
+ perf_pmu__has_hybrid()) {
+ pos = evlist__get_tracking_event(evlist);
+ if (!evsel__is_dummy_event(pos)) {
+ /* Set up dummy event. */
+ if (evlist__add_dummy(evlist))
+ return -ENOMEM;
+ pos = evlist__last(evlist);
+ evlist__set_tracking_event(evlist, pos);
+ }
- pos = evlist__first(evlist);
- pos->tracking = 0;
- pos = evlist__last(evlist);
- pos->tracking = 1;
- pos->core.attr.enable_on_exec = 1;
+ /*
+ * Enable the dummy event when the process is forked for
+ * initial_delay, immediately for system wide.
+ */
+ if (opts->target.initial_delay && !pos->immediate &&
+ !target__has_cpu(&opts->target))
+ pos->core.attr.enable_on_exec = 1;
+ else
+ pos->immediate = 1;
}
- perf_evlist__config(evlist, opts, &callchain_param);
+ evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, pos) {
try_again:
if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
- if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
+ if (evsel__fallback(pos, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
}
if ((errno == EINVAL || errno == EBADF) &&
- pos->leader != pos &&
+ pos->core.leader != &pos->core &&
pos->weak_group) {
- pos = perf_evlist__reset_weak_group(evlist, pos, true);
+ pos = evlist__reset_weak_group(evlist, pos, true);
goto try_again;
}
rc = -errno;
- perf_evsel__open_strerror(pos, &opts->target,
- errno, msg, sizeof(msg));
+ evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
goto out;
}
@@ -846,7 +1340,7 @@ try_again:
pos->supported = true;
}
- if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(evlist)) {
+ if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
pr_warning(
"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
"check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
@@ -857,9 +1351,9 @@ try_again:
"even with a suitable vmlinux or kallsyms file.\n\n");
}
- if (perf_evlist__apply_filters(evlist, &pos)) {
+ if (evlist__apply_filters(evlist, &pos)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
- pos->filter, perf_evsel__name(pos), errno,
+ pos->filter ?: "BPF", evsel__name(pos), errno,
str_error_r(errno, msg, sizeof(msg)));
rc = -1;
goto out;
@@ -875,6 +1369,15 @@ out:
return rc;
}
+static void set_timestamp_boundary(struct record *rec, u64 sample_time)
+{
+ if (rec->evlist->first_sample_time == 0)
+ rec->evlist->first_sample_time = sample_time;
+
+ if (sample_time)
+ rec->evlist->last_sample_time = sample_time;
+}
+
static int process_sample_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
@@ -883,10 +1386,7 @@ static int process_sample_event(struct perf_tool *tool,
{
struct record *rec = container_of(tool, struct record, tool);
- if (rec->evlist->first_sample_time == 0)
- rec->evlist->first_sample_time = sample->time;
-
- rec->evlist->last_sample_time = sample->time;
+ set_timestamp_boundary(rec, sample->time);
if (rec->buildid_all)
return 0;
@@ -959,18 +1459,25 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
+static struct perf_event_header finished_init_event = {
+ .size = sizeof(struct perf_event_header),
+ .type = PERF_RECORD_FINISHED_INIT,
+};
+
static void record__adjust_affinity(struct record *rec, struct mmap *map)
{
if (rec->opts.affinity != PERF_AFFINITY_SYS &&
- !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits,
- rec->affinity_mask.nbits)) {
- bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits);
- bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits,
- map->affinity_mask.bits, rec->affinity_mask.nbits);
- sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask),
- (cpu_set_t *)rec->affinity_mask.bits);
- if (verbose == 2)
- mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread");
+ !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits,
+ thread->mask->affinity.nbits)) {
+ bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits);
+ bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits,
+ map->affinity_mask.bits, thread->mask->affinity.nbits);
+ sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
+ (cpu_set_t *)thread->mask->affinity.bits);
+ if (verbose == 2) {
+ pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu());
+ mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity");
+ }
}
}
@@ -990,17 +1497,26 @@ static size_t process_comp_header(void *record, size_t increment)
return size;
}
-static size_t zstd_compress(struct perf_session *session, void *dst, size_t dst_size,
- void *src, size_t src_size)
+static size_t zstd_compress(struct perf_session *session, struct mmap *map,
+ void *dst, size_t dst_size, void *src, size_t src_size)
{
size_t compressed;
size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
+ struct zstd_data *zstd_data = &session->zstd_data;
+
+ if (map && map->file)
+ zstd_data = &map->zstd_data;
- compressed = zstd_compress_stream_to_records(&session->zstd_data, dst, dst_size, src, src_size,
+ compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size,
max_record_size, process_comp_header);
- session->bytes_transferred += src_size;
- session->bytes_compressed += compressed;
+ if (map && map->file) {
+ thread->bytes_transferred += src_size;
+ thread->bytes_compressed += compressed;
+ } else {
+ session->bytes_transferred += src_size;
+ session->bytes_compressed += compressed;
+ }
return compressed;
}
@@ -1011,14 +1527,17 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
u64 bytes_written = rec->bytes_written;
int i;
int rc = 0;
- struct mmap *maps;
+ int nr_mmaps;
+ struct mmap **maps;
int trace_fd = rec->data.file.fd;
off_t off = 0;
if (!evlist)
return 0;
- maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
+ nr_mmaps = thread->nr_mmaps;
+ maps = overwrite ? thread->overwrite_maps : thread->maps;
+
if (!maps)
return 0;
@@ -1028,9 +1547,9 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
if (record__aio_enabled(rec))
off = record__aio_get_pos(trace_fd);
- for (i = 0; i < evlist->core.nr_mmaps; i++) {
+ for (i = 0; i < nr_mmaps; i++) {
u64 flush = 0;
- struct mmap *map = &maps[i];
+ struct mmap *map = maps[i];
if (map->core.base) {
record__adjust_affinity(rec, map);
@@ -1072,12 +1591,16 @@ static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
/*
* Mark the round finished in case we wrote
* at least one event.
+ *
+ * No need for round events in directory mode,
+ * because per-cpu maps and files have data
+ * sorted by kernel.
*/
- if (bytes_written != rec->bytes_written)
+ if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written)
rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
if (overwrite)
- perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
+ evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
out:
return rc;
}
@@ -1093,6 +1616,77 @@ static int record__mmap_read_all(struct record *rec, bool synch)
return record__mmap_read_evlist(rec, rec->evlist, true, synch);
}
+static void record__thread_munmap_filtered(struct fdarray *fda, int fd,
+ void *arg __maybe_unused)
+{
+ struct perf_mmap *map = fda->priv[fd].ptr;
+
+ if (map)
+ perf_mmap__put(map);
+}
+
+static void *record__thread(void *arg)
+{
+ enum thread_msg msg = THREAD_MSG__READY;
+ bool terminate = false;
+ struct fdarray *pollfd;
+ int err, ctlfd_pos;
+
+ thread = arg;
+ thread->tid = gettid();
+
+ err = write(thread->pipes.ack[1], &msg, sizeof(msg));
+ if (err == -1)
+ pr_warning("threads[%d]: failed to notify on start: %s\n",
+ thread->tid, strerror(errno));
+
+ pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
+
+ pollfd = &thread->pollfd;
+ ctlfd_pos = thread->ctlfd_pos;
+
+ for (;;) {
+ unsigned long long hits = thread->samples;
+
+ if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
+ break;
+
+ if (hits == thread->samples) {
+
+ err = fdarray__poll(pollfd, -1);
+ /*
+ * Propagate error, only if there's any. Ignore positive
+ * number of returned events and interrupt error.
+ */
+ if (err > 0 || (err < 0 && errno == EINTR))
+ err = 0;
+ thread->waking++;
+
+ if (fdarray__filter(pollfd, POLLERR | POLLHUP,
+ record__thread_munmap_filtered, NULL) == 0)
+ break;
+ }
+
+ if (pollfd->entries[ctlfd_pos].revents & POLLHUP) {
+ terminate = true;
+ close(thread->pipes.msg[0]);
+ thread->pipes.msg[0] = -1;
+ pollfd->entries[ctlfd_pos].fd = -1;
+ pollfd->entries[ctlfd_pos].events = 0;
+ }
+
+ pollfd->entries[ctlfd_pos].revents = 0;
+ }
+ record__mmap_read_all(thread->rec, true);
+
+ err = write(thread->pipes.ack[1], &msg, sizeof(msg));
+ if (err == -1)
+ pr_warning("threads[%d]: failed to notify on termination: %s\n",
+ thread->tid, strerror(errno));
+
+ return NULL;
+}
+
static void record__init_features(struct record *rec)
{
struct perf_session *session = rec->session;
@@ -1104,8 +1698,10 @@ static void record__init_features(struct record *rec)
if (rec->no_buildid)
perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
+#ifdef HAVE_LIBTRACEEVENT
if (!have_tracepoints(&rec->evlist->core.entries))
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
+#endif
if (!rec->opts.branch_stack)
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
@@ -1116,7 +1712,12 @@ static void record__init_features(struct record *rec)
if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
perf_header__clear_feat(&session->header, HEADER_CLOCKID);
- perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
+ if (!rec->opts.use_clockid)
+ perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
+
+ if (!record__threads_enabled(rec))
+ perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
+
if (!record__comp_enabled(rec))
perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
@@ -1126,6 +1727,7 @@ static void record__init_features(struct record *rec)
static void
record__finish_output(struct record *rec)
{
+ int i;
struct perf_data *data = &rec->data;
int fd = perf_data__fd(data);
@@ -1134,6 +1736,10 @@ record__finish_output(struct record *rec)
rec->session->header.data_size += rec->bytes_written;
data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
+ if (record__threads_enabled(rec)) {
+ for (i = 0; i < data->dir.nr; i++)
+ data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR);
+ }
if (!rec->no_buildid) {
process_buildids(rec);
@@ -1150,6 +1756,7 @@ static int record__synthesize_workload(struct record *rec, bool tail)
{
int err;
struct perf_thread_map *thread_map;
+ bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
if (rec->opts.tail_synthesize != tail)
return 0;
@@ -1161,11 +1768,20 @@ static int record__synthesize_workload(struct record *rec, bool tail)
err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
process_synthesized_event,
&rec->session->machines.host,
+ needs_mmap,
rec->opts.sample_address);
perf_thread_map__put(thread_map);
return err;
}
+static int write_finished_init(struct record *rec, bool tail)
+{
+ if (rec->opts.tail_synthesize != tail)
+ return 0;
+
+ return record__write(rec, NULL, &finished_init_event, sizeof(finished_init_event));
+}
+
static int record__synthesize(struct record *rec, bool tail);
static int
@@ -1180,6 +1796,8 @@ record__switch_output(struct record *rec, bool at_exit)
record__aio_mmap_read_sync(rec);
+ write_finished_init(rec, true);
+
record__synthesize(rec, true);
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, true);
@@ -1234,14 +1852,92 @@ record__switch_output(struct record *rec, bool at_exit)
*/
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, false);
+ write_finished_init(rec, false);
}
return fd;
}
-static volatile int workload_exec_errno;
+static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
+ struct perf_record_lost_samples *lost,
+ int cpu_idx, int thread_idx, u64 lost_count,
+ u16 misc_flag)
+{
+ struct perf_sample_id *sid;
+ struct perf_sample sample = {};
+ int id_hdr_size;
+
+ lost->lost = lost_count;
+ if (evsel->core.ids) {
+ sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx);
+ sample.id = sid->id;
+ }
+
+ id_hdr_size = perf_event__synthesize_id_sample((void *)(lost + 1),
+ evsel->core.attr.sample_type, &sample);
+ lost->header.size = sizeof(*lost) + id_hdr_size;
+ lost->header.misc = misc_flag;
+ record__write(rec, NULL, lost, lost->header.size);
+}
+
+static void record__read_lost_samples(struct record *rec)
+{
+ struct perf_session *session = rec->session;
+ struct perf_record_lost_samples *lost;
+ struct evsel *evsel;
+
+ /* there was an error during record__open */
+ if (session->evlist == NULL)
+ return;
+
+ lost = zalloc(PERF_SAMPLE_MAX_SIZE);
+ if (lost == NULL) {
+ pr_debug("Memory allocation failed\n");
+ return;
+ }
+
+ lost->header.type = PERF_RECORD_LOST_SAMPLES;
+
+ evlist__for_each_entry(session->evlist, evsel) {
+ struct xyarray *xy = evsel->core.sample_id;
+ u64 lost_count;
+
+ if (xy == NULL || evsel->core.fd == NULL)
+ continue;
+ if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) ||
+ xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) {
+ pr_debug("Unmatched FD vs. sample ID: skip reading LOST count\n");
+ continue;
+ }
+
+ for (int x = 0; x < xyarray__max_x(xy); x++) {
+ for (int y = 0; y < xyarray__max_y(xy); y++) {
+ struct perf_counts_values count;
+
+ if (perf_evsel__read(&evsel->core, x, y, &count) < 0) {
+ pr_debug("read LOST count failed\n");
+ goto out;
+ }
+
+ if (count.lost) {
+ __record__save_lost_samples(rec, evsel, lost,
+ x, y, count.lost, 0);
+ }
+ }
+ }
+
+ lost_count = perf_bpf_filter__lost_count(evsel);
+ if (lost_count)
+ __record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
+ PERF_RECORD_MISC_LOST_SAMPLES_BPF);
+ }
+out:
+ free(lost);
+}
+
+static volatile sig_atomic_t workload_exec_errno;
/*
- * perf_evlist__prepare_workload will send a SIGUSR1
+ * evlist__prepare_workload will send a SIGUSR1
* if the fork fails, since we asked by setting its
* want_signal to true.
*/
@@ -1257,8 +1953,7 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
static void snapshot_sig_handler(int sig);
static void alarm_sig_handler(int sig);
-static const struct perf_event_mmap_page *
-perf_evlist__pick_pc(struct evlist *evlist)
+static const struct perf_event_mmap_page *evlist__pick_pc(struct evlist *evlist)
{
if (evlist) {
if (evlist->mmap && evlist->mmap[0].core.base)
@@ -1271,9 +1966,7 @@ perf_evlist__pick_pc(struct evlist *evlist)
static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
{
- const struct perf_event_mmap_page *pc;
-
- pc = perf_evlist__pick_pc(rec->evlist);
+ const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
if (pc)
return pc;
return NULL;
@@ -1286,48 +1979,19 @@ static int record__synthesize(struct record *rec, bool tail)
struct perf_data *data = &rec->data;
struct record_opts *opts = &rec->opts;
struct perf_tool *tool = &rec->tool;
- int fd = perf_data__fd(data);
int err = 0;
+ event_op f = process_synthesized_event;
if (rec->opts.tail_synthesize != tail)
return 0;
if (data->is_pipe) {
- /*
- * We need to synthesize events first, because some
- * features works on top of them (on report side).
- */
- err = perf_event__synthesize_attrs(tool, rec->evlist,
- process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize attrs.\n");
- goto out;
- }
-
- err = perf_event__synthesize_features(tool, session, rec->evlist,
+ err = perf_event__synthesize_for_pipe(tool, session, data,
process_synthesized_event);
- if (err < 0) {
- pr_err("Couldn't synthesize features.\n");
- return err;
- }
+ if (err < 0)
+ goto out;
- if (have_tracepoints(&rec->evlist->core.entries)) {
- /*
- * FIXME err <= 0 here actually means that
- * there were no tracepoints so its not really
- * an error, just that we don't need to
- * synthesize anything. We really have to
- * return this more properly and also
- * propagate errors that now are calling die()
- */
- err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
- process_synthesized_event);
- if (err <= 0) {
- pr_err("Couldn't record tracing data.\n");
- goto out;
- }
- rec->bytes_written += err;
- }
+ rec->bytes_written += err;
}
err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
@@ -1336,13 +2000,11 @@ static int record__synthesize(struct record *rec, bool tail)
goto out;
/* Synthesize id_index before auxtrace_info */
- if (rec->opts.auxtrace_sample_mode) {
- err = perf_event__synthesize_id_index(tool,
- process_synthesized_event,
- session->evlist, machine);
- if (err)
- goto out;
- }
+ err = perf_event__synthesize_id_index(tool,
+ process_synthesized_event,
+ session->evlist, machine);
+ if (err)
+ goto out;
if (rec->opts.full_auxtrace) {
err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
@@ -1351,7 +2013,7 @@ static int record__synthesize(struct record *rec, bool tail)
goto out;
}
- if (!perf_evlist__exclude_kernel(rec->evlist)) {
+ if (!evlist__exclude_kernel(rec->evlist)) {
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
machine);
WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
@@ -1385,7 +2047,7 @@ static int record__synthesize(struct record *rec, bool tail)
return err;
}
- err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
+ err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
process_synthesized_event, NULL);
if (err < 0) {
pr_err("Couldn't synthesize cpu map.\n");
@@ -1394,35 +2056,294 @@ static int record__synthesize(struct record *rec, bool tail)
err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
machine, opts);
- if (err < 0)
+ if (err < 0) {
pr_warning("Couldn't synthesize bpf events.\n");
+ err = 0;
+ }
- err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
- machine);
- if (err < 0)
- pr_warning("Couldn't synthesize cgroup events.\n");
+ if (rec->opts.synth & PERF_SYNTH_CGROUP) {
+ err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
+ machine);
+ if (err < 0) {
+ pr_warning("Couldn't synthesize cgroup events.\n");
+ err = 0;
+ }
+ }
+
+ if (rec->opts.nr_threads_synthesize > 1) {
+ mutex_init(&synth_lock);
+ perf_set_multithreaded();
+ f = process_locked_synthesized_event;
+ }
+
+ if (rec->opts.synth & PERF_SYNTH_TASK) {
+ bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
+
+ err = __machine__synthesize_threads(machine, tool, &opts->target,
+ rec->evlist->core.threads,
+ f, needs_mmap, opts->sample_address,
+ rec->opts.nr_threads_synthesize);
+ }
+
+ if (rec->opts.nr_threads_synthesize > 1) {
+ perf_set_singlethreaded();
+ mutex_destroy(&synth_lock);
+ }
- err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
- process_synthesized_event, opts->sample_address,
- 1);
out:
return err;
}
+static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
+{
+ struct record *rec = data;
+ pthread_kill(rec->thread_id, SIGUSR2);
+ return 0;
+}
+
+static int record__setup_sb_evlist(struct record *rec)
+{
+ struct record_opts *opts = &rec->opts;
+
+ if (rec->sb_evlist != NULL) {
+ /*
+ * We get here if --switch-output-event populated the
+ * sb_evlist, so associate a callback that will send a SIGUSR2
+ * to the main thread.
+ */
+ evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
+ rec->thread_id = pthread_self();
+ }
+#ifdef HAVE_LIBBPF_SUPPORT
+ if (!opts->no_bpf_event) {
+ if (rec->sb_evlist == NULL) {
+ rec->sb_evlist = evlist__new();
+
+ if (rec->sb_evlist == NULL) {
+ pr_err("Couldn't create side band evlist.\n.");
+ return -1;
+ }
+ }
+
+ if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
+ pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
+ return -1;
+ }
+ }
+#endif
+ if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
+ pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
+ opts->no_bpf_event = true;
+ }
+
+ return 0;
+}
+
+static int record__init_clock(struct record *rec)
+{
+ struct perf_session *session = rec->session;
+ struct timespec ref_clockid;
+ struct timeval ref_tod;
+ u64 ref;
+
+ if (!rec->opts.use_clockid)
+ return 0;
+
+ if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
+ session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
+
+ session->header.env.clock.clockid = rec->opts.clockid;
+
+ if (gettimeofday(&ref_tod, NULL) != 0) {
+ pr_err("gettimeofday failed, cannot set reference time.\n");
+ return -1;
+ }
+
+ if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
+ pr_err("clock_gettime failed, cannot set reference time.\n");
+ return -1;
+ }
+
+ ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
+ (u64) ref_tod.tv_usec * NSEC_PER_USEC;
+
+ session->header.env.clock.tod_ns = ref;
+
+ ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
+ (u64) ref_clockid.tv_nsec;
+
+ session->header.env.clock.clockid_ns = ref;
+ return 0;
+}
+
+static void hit_auxtrace_snapshot_trigger(struct record *rec)
+{
+ if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
+ trigger_hit(&auxtrace_snapshot_trigger);
+ auxtrace_record__snapshot_started = 1;
+ if (auxtrace_record__snapshot_start(rec->itr))
+ trigger_error(&auxtrace_snapshot_trigger);
+ }
+}
+
+static void record__uniquify_name(struct record *rec)
+{
+ struct evsel *pos;
+ struct evlist *evlist = rec->evlist;
+ char *new_name;
+ int ret;
+
+ if (!perf_pmu__has_hybrid())
+ return;
+
+ evlist__for_each_entry(evlist, pos) {
+ if (!evsel__is_hybrid(pos))
+ continue;
+
+ if (strchr(pos->name, '/'))
+ continue;
+
+ ret = asprintf(&new_name, "%s/%s/",
+ pos->pmu_name, pos->name);
+ if (ret) {
+ free(pos->name);
+ pos->name = new_name;
+ }
+ }
+}
+
+static int record__terminate_thread(struct record_thread *thread_data)
+{
+ int err;
+ enum thread_msg ack = THREAD_MSG__UNDEFINED;
+ pid_t tid = thread_data->tid;
+
+ close(thread_data->pipes.msg[1]);
+ thread_data->pipes.msg[1] = -1;
+ err = read(thread_data->pipes.ack[0], &ack, sizeof(ack));
+ if (err > 0)
+ pr_debug2("threads[%d]: sent %s\n", tid, thread_msg_tags[ack]);
+ else
+ pr_warning("threads[%d]: failed to receive termination notification from %d\n",
+ thread->tid, tid);
+
+ return 0;
+}
+
+static int record__start_threads(struct record *rec)
+{
+ int t, tt, err, ret = 0, nr_threads = rec->nr_threads;
+ struct record_thread *thread_data = rec->thread_data;
+ sigset_t full, mask;
+ pthread_t handle;
+ pthread_attr_t attrs;
+
+ thread = &thread_data[0];
+
+ if (!record__threads_enabled(rec))
+ return 0;
+
+ sigfillset(&full);
+ if (sigprocmask(SIG_SETMASK, &full, &mask)) {
+ pr_err("Failed to block signals on threads start: %s\n", strerror(errno));
+ return -1;
+ }
+
+ pthread_attr_init(&attrs);
+ pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
+
+ for (t = 1; t < nr_threads; t++) {
+ enum thread_msg msg = THREAD_MSG__UNDEFINED;
+
+#ifdef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
+ pthread_attr_setaffinity_np(&attrs,
+ MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)),
+ (cpu_set_t *)(thread_data[t].mask->affinity.bits));
+#endif
+ if (pthread_create(&handle, &attrs, record__thread, &thread_data[t])) {
+ for (tt = 1; tt < t; tt++)
+ record__terminate_thread(&thread_data[t]);
+ pr_err("Failed to start threads: %s\n", strerror(errno));
+ ret = -1;
+ goto out_err;
+ }
+
+ err = read(thread_data[t].pipes.ack[0], &msg, sizeof(msg));
+ if (err > 0)
+ pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid,
+ thread_msg_tags[msg]);
+ else
+ pr_warning("threads[%d]: failed to receive start notification from %d\n",
+ thread->tid, rec->thread_data[t].tid);
+ }
+
+ sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
+ (cpu_set_t *)thread->mask->affinity.bits);
+
+ pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
+
+out_err:
+ pthread_attr_destroy(&attrs);
+
+ if (sigprocmask(SIG_SETMASK, &mask, NULL)) {
+ pr_err("Failed to unblock signals on threads start: %s\n", strerror(errno));
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int record__stop_threads(struct record *rec)
+{
+ int t;
+ struct record_thread *thread_data = rec->thread_data;
+
+ for (t = 1; t < rec->nr_threads; t++)
+ record__terminate_thread(&thread_data[t]);
+
+ for (t = 0; t < rec->nr_threads; t++) {
+ rec->samples += thread_data[t].samples;
+ if (!record__threads_enabled(rec))
+ continue;
+ rec->session->bytes_transferred += thread_data[t].bytes_transferred;
+ rec->session->bytes_compressed += thread_data[t].bytes_compressed;
+ pr_debug("threads[%d]: samples=%lld, wakes=%ld, ", thread_data[t].tid,
+ thread_data[t].samples, thread_data[t].waking);
+ if (thread_data[t].bytes_transferred && thread_data[t].bytes_compressed)
+ pr_debug("transferred=%" PRIu64 ", compressed=%" PRIu64 "\n",
+ thread_data[t].bytes_transferred, thread_data[t].bytes_compressed);
+ else
+ pr_debug("written=%" PRIu64 "\n", thread_data[t].bytes_written);
+ }
+
+ return 0;
+}
+
+static unsigned long record__waking(struct record *rec)
+{
+ int t;
+ unsigned long waking = 0;
+ struct record_thread *thread_data = rec->thread_data;
+
+ for (t = 0; t < rec->nr_threads; t++)
+ waking += thread_data[t].waking;
+
+ return waking;
+}
+
static int __cmd_record(struct record *rec, int argc, const char **argv)
{
int err;
int status = 0;
- unsigned long waking = 0;
const bool forks = argc > 0;
struct perf_tool *tool = &rec->tool;
struct record_opts *opts = &rec->opts;
struct perf_data *data = &rec->data;
struct perf_session *session;
bool disabled = false, draining = false;
- struct evlist *sb_evlist = NULL;
int fd;
float ratio = 0;
+ enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
atexit(record__sig_exit);
signal(SIGCHLD, sig_handler);
@@ -1452,12 +2373,23 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
signal(SIGUSR2, SIG_IGN);
}
- session = perf_session__new(data, false, tool);
+ session = perf_session__new(data, tool);
if (IS_ERR(session)) {
pr_err("Perf session creation failed.\n");
return PTR_ERR(session);
}
+ if (record__threads_enabled(rec)) {
+ if (perf_data__is_pipe(&rec->data)) {
+ pr_err("Parallel trace streaming is not available in pipe mode.\n");
+ return -1;
+ }
+ if (rec->opts.full_auxtrace) {
+ pr_err("Parallel trace streaming is not available in AUX area tracing mode.\n");
+ return -1;
+ }
+ }
+
fd = perf_data__fd(data);
rec->session = session;
@@ -1465,6 +2397,20 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
pr_err("Compression initialization failed.\n");
return -1;
}
+#ifdef HAVE_EVENTFD_SUPPORT
+ done_fd = eventfd(0, EFD_NONBLOCK);
+ if (done_fd < 0) {
+ pr_err("Failed to create wakeup eventfd, error: %m\n");
+ status = -1;
+ goto out_delete_session;
+ }
+ err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
+ if (err < 0) {
+ pr_err("Failed to add wakeup eventfd to poll list\n");
+ status = err;
+ goto out_delete_session;
+ }
+#endif // HAVE_EVENTFD_SUPPORT
session->header.env.comp_type = PERF_COMP_ZSTD;
session->header.env.comp_level = rec->opts.comp_level;
@@ -1475,15 +2421,14 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
return -1;
}
- record__init_features(rec);
+ if (record__init_clock(rec))
+ return -1;
- if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
- session->header.env.clockid_res_ns = rec->opts.clockid_res_ns;
+ record__init_features(rec);
if (forks) {
- err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
- argv, data->is_pipe,
- workload_exec_failed_signal);
+ err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
+ workload_exec_failed_signal);
if (err < 0) {
pr_err("Couldn't run the workload!\n");
status = err;
@@ -1500,17 +2445,23 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (data->is_pipe && rec->evlist->core.nr_entries == 1)
rec->opts.sample_id = true;
+ record__uniquify_name(rec);
+
+ /* Debug message used by test scripts */
+ pr_debug3("perf record opening and mmapping events\n");
if (record__open(rec) != 0) {
err = -1;
- goto out_child;
+ goto out_free_threads;
}
+ /* Debug message used by test scripts */
+ pr_debug3("perf record done opening and mmapping events\n");
session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
if (rec->opts.kcore) {
err = record__kcore_copy(&session->machines.host, data);
if (err) {
pr_err("ERROR: Failed to copy kcore\n");
- goto out_child;
+ goto out_free_threads;
}
}
@@ -1521,50 +2472,46 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
pr_err("ERROR: Apply config to BPF failed: %s\n",
errbuf);
- goto out_child;
+ goto out_free_threads;
}
/*
* Normally perf_session__new would do this, but it doesn't have the
* evlist.
*/
- if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
+ if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
rec->tool.ordered_events = false;
}
- if (!rec->evlist->nr_groups)
+ if (evlist__nr_groups(rec->evlist) == 0)
perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
if (data->is_pipe) {
err = perf_header__write_pipe(fd);
if (err < 0)
- goto out_child;
+ goto out_free_threads;
} else {
err = perf_session__write_header(session, rec->evlist, fd, false);
if (err < 0)
- goto out_child;
+ goto out_free_threads;
}
+ err = -1;
if (!rec->no_buildid
&& !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
pr_err("Couldn't generate buildids. "
"Use --no-buildid to profile anyway.\n");
- err = -1;
- goto out_child;
+ goto out_free_threads;
}
- if (!opts->no_bpf_event)
- bpf_event__add_sb_event(&sb_evlist, &session->header.env);
-
- if (perf_evlist__start_sb_thread(sb_evlist, &rec->opts.target)) {
- pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
- opts->no_bpf_event = true;
- }
+ err = record__setup_sb_evlist(rec);
+ if (err)
+ goto out_free_threads;
err = record__synthesize(rec, false);
if (err < 0)
- goto out_child;
+ goto out_free_threads;
if (rec->realtime_prio) {
struct sched_param param;
@@ -1573,16 +2520,19 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
if (sched_setscheduler(0, SCHED_FIFO, &param)) {
pr_err("Could not set realtime priority.\n");
err = -1;
- goto out_child;
+ goto out_free_threads;
}
}
+ if (record__start_threads(rec))
+ goto out_free_threads;
+
/*
* When perf is starting the traced process, all the events
* (apart from group members) have enable_on_exec=1 set,
* so don't spoil it by prematurely enabling them.
*/
- if (!target__none(&opts->target) && !opts->initial_delay)
+ if (!target__none(&opts->target) && !opts->target.initial_delay)
evlist__enable(rec->evlist);
/*
@@ -1631,30 +2581,51 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
machine);
free(event);
- perf_evlist__start_workload(rec->evlist);
+ evlist__start_workload(rec->evlist);
}
- if (opts->initial_delay) {
- usleep(opts->initial_delay * USEC_PER_MSEC);
- evlist__enable(rec->evlist);
+ if (opts->target.initial_delay) {
+ pr_info(EVLIST_DISABLED_MSG);
+ if (opts->target.initial_delay > 0) {
+ usleep(opts->target.initial_delay * USEC_PER_MSEC);
+ evlist__enable(rec->evlist);
+ pr_info(EVLIST_ENABLED_MSG);
+ }
}
+ err = event_enable_timer__start(rec->evlist->eet);
+ if (err)
+ goto out_child;
+
+ /* Debug message used by test scripts */
+ pr_debug3("perf record has started\n");
+ fflush(stderr);
+
trigger_ready(&auxtrace_snapshot_trigger);
trigger_ready(&switch_output_trigger);
perf_hooks__invoke_record_start();
+
+ /*
+ * Must write FINISHED_INIT so it will be seen after all other
+ * synthesized user events, but before any regular events.
+ */
+ err = write_finished_init(rec, false);
+ if (err < 0)
+ goto out_child;
+
for (;;) {
- unsigned long long hits = rec->samples;
+ unsigned long long hits = thread->samples;
/*
* rec->evlist->bkw_mmap_state is possible to be
* BKW_MMAP_EMPTY here: when done == true and
* hits != rec->samples in previous round.
*
- * perf_evlist__toggle_bkw_mmap ensure we never
+ * evlist__toggle_bkw_mmap ensure we never
* convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
*/
if (trigger_is_hit(&switch_output_trigger) || done || draining)
- perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
+ evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
if (record__mmap_read_all(rec, false) < 0) {
trigger_error(&auxtrace_snapshot_trigger);
@@ -1693,12 +2664,12 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
* record__mmap_read_all(): we should have collected
* data from it.
*/
- perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
+ evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
if (!quiet)
fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
- waking);
- waking = 0;
+ record__waking(rec));
+ thread->waking = 0;
fd = record__switch_output(rec, false);
if (fd < 0) {
pr_err("Failed to switch to new file\n");
@@ -1712,20 +2683,53 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
alarm(rec->switch_output.time);
}
- if (hits == rec->samples) {
+ if (hits == thread->samples) {
if (done || draining)
break;
- err = evlist__poll(rec->evlist, -1);
+ err = fdarray__poll(&thread->pollfd, -1);
/*
* Propagate error, only if there's any. Ignore positive
* number of returned events and interrupt error.
*/
if (err > 0 || (err < 0 && errno == EINTR))
err = 0;
- waking++;
+ thread->waking++;
- if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
+ if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP,
+ record__thread_munmap_filtered, NULL) == 0)
draining = true;
+
+ err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread);
+ if (err)
+ goto out_child;
+ }
+
+ if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
+ switch (cmd) {
+ case EVLIST_CTL_CMD_SNAPSHOT:
+ hit_auxtrace_snapshot_trigger(rec);
+ evlist__ctlfd_ack(rec->evlist);
+ break;
+ case EVLIST_CTL_CMD_STOP:
+ done = 1;
+ break;
+ case EVLIST_CTL_CMD_ACK:
+ case EVLIST_CTL_CMD_UNSUPPORTED:
+ case EVLIST_CTL_CMD_ENABLE:
+ case EVLIST_CTL_CMD_DISABLE:
+ case EVLIST_CTL_CMD_EVLIST:
+ case EVLIST_CTL_CMD_PING:
+ default:
+ break;
+ }
+ }
+
+ err = event_enable_timer__process(rec->evlist->eet);
+ if (err < 0)
+ goto out_child;
+ if (err) {
+ err = 0;
+ done = 1;
}
/*
@@ -1747,21 +2751,32 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
record__auxtrace_snapshot_exit(rec);
if (forks && workload_exec_errno) {
- char msg[STRERR_BUFSIZE];
+ char msg[STRERR_BUFSIZE], strevsels[2048];
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
- pr_err("Workload failed: %s\n", emsg);
+
+ evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
+
+ pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
+ strevsels, argv[0], emsg);
err = -1;
goto out_child;
}
if (!quiet)
- fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
+ fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n",
+ record__waking(rec));
+
+ write_finished_init(rec, true);
if (target__none(&rec->opts.target))
record__synthesize_workload(rec, true);
out_child:
+ record__stop_threads(rec);
record__mmap_read_all(rec, true);
+out_free_threads:
+ record__free_thread_data(rec);
+ evlist__finalize_ctlfd(rec->evlist);
record__aio_mmap_read_sync(rec);
if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
@@ -1786,6 +2801,10 @@ out_child:
} else
status = err;
+ if (rec->off_cpu)
+ rec->bytes_written += off_cpu_write(rec->session);
+
+ record__read_lost_samples(rec);
record__synthesize(rec, true);
/* this will be recalculated during process_buildids() */
rec->samples = 0;
@@ -1827,11 +2846,19 @@ out_child:
}
out_delete_session:
+#ifdef HAVE_EVENTFD_SUPPORT
+ if (done_fd >= 0) {
+ fd = done_fd;
+ done_fd = -1;
+
+ close(fd);
+ }
+#endif
zstd_fini(&session->zstd_data);
perf_session__delete(session);
if (!opts->no_bpf_event)
- perf_evlist__stop_sb_thread(sb_evlist);
+ evlist__stop_sb_thread(rec->sb_evlist);
return status;
}
@@ -1904,6 +2931,8 @@ static int perf_record_config(const char *var, const char *value, void *cb)
rec->no_buildid_cache = true;
else if (!strcmp(value, "skip"))
rec->no_buildid = true;
+ else if (!strcmp(value, "mmap"))
+ rec->buildid_mmap = true;
else
return -1;
return 0;
@@ -1919,119 +2948,106 @@ static int perf_record_config(const char *var, const char *value, void *cb)
rec->opts.nr_cblocks = nr_cblocks_default;
}
#endif
+ if (!strcmp(var, "record.debuginfod")) {
+ rec->debuginfod.urls = strdup(value);
+ if (!rec->debuginfod.urls)
+ return -ENOMEM;
+ rec->debuginfod.set = true;
+ }
return 0;
}
-struct clockid_map {
- const char *name;
- int clockid;
-};
-
-#define CLOCKID_MAP(n, c) \
- { .name = n, .clockid = (c), }
-
-#define CLOCKID_END { .name = NULL, }
-
+static int record__parse_event_enable_time(const struct option *opt, const char *str, int unset)
+{
+ struct record *rec = (struct record *)opt->value;
-/*
- * Add the missing ones, we need to build on many distros...
- */
-#ifndef CLOCK_MONOTONIC_RAW
-#define CLOCK_MONOTONIC_RAW 4
-#endif
-#ifndef CLOCK_BOOTTIME
-#define CLOCK_BOOTTIME 7
-#endif
-#ifndef CLOCK_TAI
-#define CLOCK_TAI 11
-#endif
+ return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset);
+}
-static const struct clockid_map clockids[] = {
- /* available for all events, NMI safe */
- CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
- CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
+static int record__parse_affinity(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
- /* available for some events */
- CLOCKID_MAP("realtime", CLOCK_REALTIME),
- CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
- CLOCKID_MAP("tai", CLOCK_TAI),
+ if (unset || !str)
+ return 0;
- /* available for the lazy */
- CLOCKID_MAP("mono", CLOCK_MONOTONIC),
- CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
- CLOCKID_MAP("real", CLOCK_REALTIME),
- CLOCKID_MAP("boot", CLOCK_BOOTTIME),
+ if (!strcasecmp(str, "node"))
+ opts->affinity = PERF_AFFINITY_NODE;
+ else if (!strcasecmp(str, "cpu"))
+ opts->affinity = PERF_AFFINITY_CPU;
- CLOCKID_END,
-};
+ return 0;
+}
-static int get_clockid_res(clockid_t clk_id, u64 *res_ns)
+static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
{
- struct timespec res;
-
- *res_ns = 0;
- if (!clock_getres(clk_id, &res))
- *res_ns = res.tv_nsec + res.tv_sec * NSEC_PER_SEC;
- else
- pr_warning("WARNING: Failed to determine specified clock resolution.\n");
+ mask->nbits = nr_bits;
+ mask->bits = bitmap_zalloc(mask->nbits);
+ if (!mask->bits)
+ return -ENOMEM;
return 0;
}
-static int parse_clockid(const struct option *opt, const char *str, int unset)
+static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
{
- struct record_opts *opts = (struct record_opts *)opt->value;
- const struct clockid_map *cm;
- const char *ostr = str;
-
- if (unset) {
- opts->use_clockid = 0;
- return 0;
- }
-
- /* no arg passed */
- if (!str)
- return 0;
-
- /* no setting it twice */
- if (opts->use_clockid)
- return -1;
-
- opts->use_clockid = true;
+ bitmap_free(mask->bits);
+ mask->nbits = 0;
+}
- /* if its a number, we're done */
- if (sscanf(str, "%d", &opts->clockid) == 1)
- return get_clockid_res(opts->clockid, &opts->clockid_res_ns);
+static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
+{
+ int ret;
- /* allow a "CLOCK_" prefix to the name */
- if (!strncasecmp(str, "CLOCK_", 6))
- str += 6;
+ ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
+ if (ret) {
+ mask->affinity.bits = NULL;
+ return ret;
+ }
- for (cm = clockids; cm->name; cm++) {
- if (!strcasecmp(str, cm->name)) {
- opts->clockid = cm->clockid;
- return get_clockid_res(opts->clockid,
- &opts->clockid_res_ns);
- }
+ ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
+ if (ret) {
+ record__mmap_cpu_mask_free(&mask->maps);
+ mask->maps.bits = NULL;
}
- opts->use_clockid = false;
- ui__warning("unknown clockid %s, check man page\n", ostr);
- return -1;
+ return ret;
}
-static int record__parse_affinity(const struct option *opt, const char *str, int unset)
+static void record__thread_mask_free(struct thread_mask *mask)
{
- struct record_opts *opts = (struct record_opts *)opt->value;
+ record__mmap_cpu_mask_free(&mask->maps);
+ record__mmap_cpu_mask_free(&mask->affinity);
+}
- if (unset || !str)
- return 0;
+static int record__parse_threads(const struct option *opt, const char *str, int unset)
+{
+ int s;
+ struct record_opts *opts = opt->value;
- if (!strcasecmp(str, "node"))
- opts->affinity = PERF_AFFINITY_NODE;
- else if (!strcasecmp(str, "cpu"))
- opts->affinity = PERF_AFFINITY_CPU;
+ if (unset || !str || !strlen(str)) {
+ opts->threads_spec = THREAD_SPEC__CPU;
+ } else {
+ for (s = 1; s < THREAD_SPEC__MAX; s++) {
+ if (s == THREAD_SPEC__USER) {
+ opts->threads_user_spec = strdup(str);
+ if (!opts->threads_user_spec)
+ return -ENOMEM;
+ opts->threads_spec = THREAD_SPEC__USER;
+ break;
+ }
+ if (!strncasecmp(str, thread_spec_tags[s], strlen(thread_spec_tags[s]))) {
+ opts->threads_spec = s;
+ break;
+ }
+ }
+ }
+
+ if (opts->threads_spec == THREAD_SPEC__USER)
+ pr_debug("threads_spec: %s\n", opts->threads_user_spec);
+ else
+ pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]);
return 0;
}
@@ -2084,7 +3100,7 @@ static int record__parse_mmap_pages(const struct option *opt,
*p = '\0';
if (*s) {
- ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
+ ret = __evlist__parse_mmap_pages(&mmap_pages, s);
if (ret)
goto out_free;
opts->mmap_pages = mmap_pages;
@@ -2095,7 +3111,7 @@ static int record__parse_mmap_pages(const struct option *opt,
goto out_free;
}
- ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
+ ret = __evlist__parse_mmap_pages(&mmap_pages, p + 1);
if (ret)
goto out_free;
@@ -2106,6 +3122,19 @@ out_free:
return ret;
}
+void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
+{
+}
+
+static int parse_control_option(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct record_opts *opts = opt->value;
+
+ return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
+}
+
static void switch_output_size_warn(struct record *rec)
{
u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
@@ -2142,10 +3171,29 @@ static int switch_output_setup(struct record *rec)
};
unsigned long val;
+ /*
+ * If we're using --switch-output-events, then we imply its
+ * --switch-output=signal, as we'll send a SIGUSR2 from the side band
+ * thread to its parent.
+ */
+ if (rec->switch_output_event_set) {
+ if (record__threads_enabled(rec)) {
+ pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n");
+ return 0;
+ }
+ goto do_signal;
+ }
+
if (!s->set)
return 0;
+ if (record__threads_enabled(rec)) {
+ pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n");
+ return 0;
+ }
+
if (!strcmp(s->str, "signal")) {
+do_signal:
s->signal = true;
pr_debug("switch-output with SIGUSR2 signal\n");
goto enabled;
@@ -2210,6 +3258,37 @@ static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *eve
return perf_event__process_mmap2(tool, event, sample, machine);
}
+static int process_timestamp_boundary(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ struct record *rec = container_of(tool, struct record, tool);
+
+ set_timestamp_boundary(rec, sample->time);
+ return 0;
+}
+
+static int parse_record_synth_option(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct record_opts *opts = opt->value;
+ char *p = strdup(str);
+
+ if (p == NULL)
+ return -1;
+
+ opts->synth = parse_synth_opt(p);
+ free(p);
+
+ if (opts->synth < 0) {
+ pr_err("Invalid synth option: %s\n", str);
+ return -1;
+ }
+ return 0;
+}
+
/*
* XXX Ideally would be local to cmd_record() and passed to a record__new
* because we need to have access to it in record__exit, that is called
@@ -2232,6 +3311,10 @@ static struct record record = {
.default_per_cpu = true,
},
.mmap_flush = MMAP_FLUSH_DEFAULT,
+ .nr_threads_synthesize = 1,
+ .ctl_fd = -1,
+ .ctl_fd_ack = -1,
+ .synth = PERF_SYNTH_ALL,
},
.tool = {
.sample = process_sample_event,
@@ -2241,6 +3324,8 @@ static struct record record = {
.namespaces = perf_event__process_namespaces,
.mmap = build_id__process_mmap,
.mmap2 = build_id__process_mmap2,
+ .itrace_start = process_timestamp_boundary,
+ .aux = process_timestamp_boundary,
.ordered_events = true,
},
};
@@ -2254,7 +3339,7 @@ static bool dry_run;
* XXX Will stay a global variable till we fix builtin-script.c to stop messing
* with it and switch to use the library functions in perf_evlist that came
* from builtin-record.c, i.e. use record_opts,
- * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
+ * evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
* using pipes, etc.
*/
static struct option __record_options[] = {
@@ -2289,7 +3374,7 @@ static struct option __record_options[] = {
OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
"synthesize non-sample events at the end of output"),
OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
- OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "record bpf events"),
+ OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
"Fail if the specified frequency can't be used"),
OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
@@ -2301,8 +3386,6 @@ static struct option __record_options[] = {
OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
"Minimal number of bytes that is extracted from mmap data pages (default: 1)",
record__mmap_flush_parse),
- OPT_BOOLEAN(0, "group", &record.opts.group,
- "put the counters into a counter group"),
OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
NULL, "enables call-graph recording" ,
&record_callchain_opt),
@@ -2311,13 +3394,19 @@ static struct option __record_options[] = {
&record_parse_callchain_opt),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
- OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
+ OPT_BOOLEAN('q', "quiet", &quiet, "don't print any warnings or messages"),
OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
"per thread counts"),
OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
"Record the sample physical addresses"),
+ OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
+ "Record the sampled data address data page size"),
+ OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
+ "Record the sampled code address (ip) page size"),
OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
+ OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
+ "Record the sample identifier"),
OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
&record.opts.sample_time_set,
"Record the sample timestamps"),
@@ -2334,8 +3423,10 @@ static struct option __record_options[] = {
OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
"monitor event in cgroup name only",
parse_cgroups),
- OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
- "ms to wait before starting measurement after program start"),
+ OPT_CALLBACK('D', "delay", &record, "ms",
+ "ms to wait before starting measurement after program start (-1: start with events disabled), "
+ "or ranges of time to enable events e.g. '-D 10-20,30-40'",
+ record__parse_event_enable_time),
OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
"user to profile"),
@@ -2374,8 +3465,9 @@ static struct option __record_options[] = {
"Record namespaces events"),
OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
"Record cgroup events"),
- OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
- "Record context switch events"),
+ OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
+ &record.opts.record_switch_events_set,
+ "Record context switch events"),
OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
"Configure all used events to run in kernel space.",
PARSE_OPT_EXCLUSIVE),
@@ -2394,6 +3486,8 @@ static struct option __record_options[] = {
"file", "vmlinux pathname"),
OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
"Record build-id of all DSOs regardless of hits"),
+ OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
+ "Record build-id in map events"),
OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
"append timestamp to output filename"),
OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
@@ -2402,6 +3496,9 @@ static struct option __record_options[] = {
&record.switch_output.set, "signal or size[BKMG] or time[smhd]",
"Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
"signal"),
+ OPT_CALLBACK_SET(0, "switch-output-event", &record.sb_evlist, &record.switch_output_event_set, "switch output event",
+ "switch output event selector. use 'perf list' to list available events",
+ parse_events_option_new_evlist),
OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
"Limit number of switch output generated files"),
OPT_BOOLEAN(0, "dry-run", &dry_run,
@@ -2415,17 +3512,444 @@ static struct option __record_options[] = {
"Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
record__parse_affinity),
#ifdef HAVE_ZSTD_SUPPORT
- OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default,
- "n", "Compressed records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
+ OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
+ "Compress records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
record__parse_comp_level),
#endif
OPT_CALLBACK(0, "max-size", &record.output_max_size,
"size", "Limit the maximum size of the output file", parse_output_max_size),
+ OPT_UINTEGER(0, "num-thread-synthesize",
+ &record.opts.nr_threads_synthesize,
+ "number of threads to run for event synthesis"),
+#ifdef HAVE_LIBPFM
+ OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
+ "libpfm4 event selector. use 'perf list' to list available events",
+ parse_libpfm_events_option),
+#endif
+ OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
+ "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
+ "\t\t\t 'snapshot': AUX area tracing snapshot).\n"
+ "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
+ "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
+ parse_control_option),
+ OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup",
+ "Fine-tune event synthesis: default=all", parse_record_synth_option),
+ OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls,
+ &record.debuginfod.set, "debuginfod urls",
+ "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
+ "system"),
+ OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
+ "write collected trace data into several data files using parallel threads",
+ record__parse_threads),
+ OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
OPT_END()
};
struct option *record_options = __record_options;
+static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
+{
+ struct perf_cpu cpu;
+ int idx;
+
+ if (cpu_map__is_dummy(cpus))
+ return 0;
+
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+ if (cpu.cpu == -1)
+ continue;
+ /* Return ENODEV is input cpu is greater than max cpu */
+ if ((unsigned long)cpu.cpu > mask->nbits)
+ return -ENODEV;
+ __set_bit(cpu.cpu, mask->bits);
+ }
+
+ return 0;
+}
+
+static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
+{
+ struct perf_cpu_map *cpus;
+
+ cpus = perf_cpu_map__new(mask_spec);
+ if (!cpus)
+ return -ENOMEM;
+
+ bitmap_zero(mask->bits, mask->nbits);
+ if (record__mmap_cpu_mask_init(mask, cpus))
+ return -ENODEV;
+
+ perf_cpu_map__put(cpus);
+
+ return 0;
+}
+
+static void record__free_thread_masks(struct record *rec, int nr_threads)
+{
+ int t;
+
+ if (rec->thread_masks)
+ for (t = 0; t < nr_threads; t++)
+ record__thread_mask_free(&rec->thread_masks[t]);
+
+ zfree(&rec->thread_masks);
+}
+
+static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
+{
+ int t, ret;
+
+ rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
+ if (!rec->thread_masks) {
+ pr_err("Failed to allocate thread masks\n");
+ return -ENOMEM;
+ }
+
+ for (t = 0; t < nr_threads; t++) {
+ ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
+ if (ret) {
+ pr_err("Failed to allocate thread masks[%d]\n", t);
+ goto out_free;
+ }
+ }
+
+ return 0;
+
+out_free:
+ record__free_thread_masks(rec, nr_threads);
+
+ return ret;
+}
+
+static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus)
+{
+ int t, ret, nr_cpus = perf_cpu_map__nr(cpus);
+
+ ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
+ if (ret)
+ return ret;
+
+ rec->nr_threads = nr_cpus;
+ pr_debug("nr_threads: %d\n", rec->nr_threads);
+
+ for (t = 0; t < rec->nr_threads; t++) {
+ __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
+ __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
+ if (verbose > 0) {
+ pr_debug("thread_masks[%d]: ", t);
+ mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
+ pr_debug("thread_masks[%d]: ", t);
+ mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
+ }
+ }
+
+ return 0;
+}
+
+static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
+ const char **maps_spec, const char **affinity_spec,
+ u32 nr_spec)
+{
+ u32 s;
+ int ret = 0, t = 0;
+ struct mmap_cpu_mask cpus_mask;
+ struct thread_mask thread_mask, full_mask, *thread_masks;
+
+ ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu);
+ if (ret) {
+ pr_err("Failed to allocate CPUs mask\n");
+ return ret;
+ }
+
+ ret = record__mmap_cpu_mask_init(&cpus_mask, cpus);
+ if (ret) {
+ pr_err("Failed to init cpu mask\n");
+ goto out_free_cpu_mask;
+ }
+
+ ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
+ if (ret) {
+ pr_err("Failed to allocate full mask\n");
+ goto out_free_cpu_mask;
+ }
+
+ ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
+ if (ret) {
+ pr_err("Failed to allocate thread mask\n");
+ goto out_free_full_and_cpu_masks;
+ }
+
+ for (s = 0; s < nr_spec; s++) {
+ ret = record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
+ if (ret) {
+ pr_err("Failed to initialize maps thread mask\n");
+ goto out_free;
+ }
+ ret = record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
+ if (ret) {
+ pr_err("Failed to initialize affinity thread mask\n");
+ goto out_free;
+ }
+
+ /* ignore invalid CPUs but do not allow empty masks */
+ if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
+ cpus_mask.bits, thread_mask.maps.nbits)) {
+ pr_err("Empty maps mask: %s\n", maps_spec[s]);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
+ cpus_mask.bits, thread_mask.affinity.nbits)) {
+ pr_err("Empty affinity mask: %s\n", affinity_spec[s]);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ /* do not allow intersection with other masks (full_mask) */
+ if (bitmap_intersects(thread_mask.maps.bits, full_mask.maps.bits,
+ thread_mask.maps.nbits)) {
+ pr_err("Intersecting maps mask: %s\n", maps_spec[s]);
+ ret = -EINVAL;
+ goto out_free;
+ }
+ if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits,
+ thread_mask.affinity.nbits)) {
+ pr_err("Intersecting affinity mask: %s\n", affinity_spec[s]);
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ bitmap_or(full_mask.maps.bits, full_mask.maps.bits,
+ thread_mask.maps.bits, full_mask.maps.nbits);
+ bitmap_or(full_mask.affinity.bits, full_mask.affinity.bits,
+ thread_mask.affinity.bits, full_mask.maps.nbits);
+
+ thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask));
+ if (!thread_masks) {
+ pr_err("Failed to reallocate thread masks\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ rec->thread_masks = thread_masks;
+ rec->thread_masks[t] = thread_mask;
+ if (verbose > 0) {
+ pr_debug("thread_masks[%d]: ", t);
+ mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
+ pr_debug("thread_masks[%d]: ", t);
+ mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
+ }
+ t++;
+ ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
+ if (ret) {
+ pr_err("Failed to allocate thread mask\n");
+ goto out_free_full_and_cpu_masks;
+ }
+ }
+ rec->nr_threads = t;
+ pr_debug("nr_threads: %d\n", rec->nr_threads);
+ if (!rec->nr_threads)
+ ret = -EINVAL;
+
+out_free:
+ record__thread_mask_free(&thread_mask);
+out_free_full_and_cpu_masks:
+ record__thread_mask_free(&full_mask);
+out_free_cpu_mask:
+ record__mmap_cpu_mask_free(&cpus_mask);
+
+ return ret;
+}
+
+static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus)
+{
+ int ret;
+ struct cpu_topology *topo;
+
+ topo = cpu_topology__new();
+ if (!topo) {
+ pr_err("Failed to allocate CPU topology\n");
+ return -ENOMEM;
+ }
+
+ ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list,
+ topo->core_cpus_list, topo->core_cpus_lists);
+ cpu_topology__delete(topo);
+
+ return ret;
+}
+
+static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus)
+{
+ int ret;
+ struct cpu_topology *topo;
+
+ topo = cpu_topology__new();
+ if (!topo) {
+ pr_err("Failed to allocate CPU topology\n");
+ return -ENOMEM;
+ }
+
+ ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list,
+ topo->package_cpus_list, topo->package_cpus_lists);
+ cpu_topology__delete(topo);
+
+ return ret;
+}
+
+static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus)
+{
+ u32 s;
+ int ret;
+ const char **spec;
+ struct numa_topology *topo;
+
+ topo = numa_topology__new();
+ if (!topo) {
+ pr_err("Failed to allocate NUMA topology\n");
+ return -ENOMEM;
+ }
+
+ spec = zalloc(topo->nr * sizeof(char *));
+ if (!spec) {
+ pr_err("Failed to allocate NUMA spec\n");
+ ret = -ENOMEM;
+ goto out_delete_topo;
+ }
+ for (s = 0; s < topo->nr; s++)
+ spec[s] = topo->nodes[s].cpus;
+
+ ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr);
+
+ zfree(&spec);
+
+out_delete_topo:
+ numa_topology__delete(topo);
+
+ return ret;
+}
+
+static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus)
+{
+ int t, ret;
+ u32 s, nr_spec = 0;
+ char **maps_spec = NULL, **affinity_spec = NULL, **tmp_spec;
+ char *user_spec, *spec, *spec_ptr, *mask, *mask_ptr, *dup_mask = NULL;
+
+ for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) {
+ spec = strtok_r(user_spec, ":", &spec_ptr);
+ if (spec == NULL)
+ break;
+ pr_debug2("threads_spec[%d]: %s\n", t, spec);
+ mask = strtok_r(spec, "/", &mask_ptr);
+ if (mask == NULL)
+ break;
+ pr_debug2(" maps mask: %s\n", mask);
+ tmp_spec = realloc(maps_spec, (nr_spec + 1) * sizeof(char *));
+ if (!tmp_spec) {
+ pr_err("Failed to reallocate maps spec\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ maps_spec = tmp_spec;
+ maps_spec[nr_spec] = dup_mask = strdup(mask);
+ if (!maps_spec[nr_spec]) {
+ pr_err("Failed to allocate maps spec[%d]\n", nr_spec);
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ mask = strtok_r(NULL, "/", &mask_ptr);
+ if (mask == NULL) {
+ pr_err("Invalid thread maps or affinity specs\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+ pr_debug2(" affinity mask: %s\n", mask);
+ tmp_spec = realloc(affinity_spec, (nr_spec + 1) * sizeof(char *));
+ if (!tmp_spec) {
+ pr_err("Failed to reallocate affinity spec\n");
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ affinity_spec = tmp_spec;
+ affinity_spec[nr_spec] = strdup(mask);
+ if (!affinity_spec[nr_spec]) {
+ pr_err("Failed to allocate affinity spec[%d]\n", nr_spec);
+ ret = -ENOMEM;
+ goto out_free;
+ }
+ dup_mask = NULL;
+ nr_spec++;
+ }
+
+ ret = record__init_thread_masks_spec(rec, cpus, (const char **)maps_spec,
+ (const char **)affinity_spec, nr_spec);
+
+out_free:
+ free(dup_mask);
+ for (s = 0; s < nr_spec; s++) {
+ if (maps_spec)
+ free(maps_spec[s]);
+ if (affinity_spec)
+ free(affinity_spec[s]);
+ }
+ free(affinity_spec);
+ free(maps_spec);
+
+ return ret;
+}
+
+static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
+{
+ int ret;
+
+ ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
+ if (ret)
+ return ret;
+
+ if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
+ return -ENODEV;
+
+ rec->nr_threads = 1;
+
+ return 0;
+}
+
+static int record__init_thread_masks(struct record *rec)
+{
+ int ret = 0;
+ struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
+
+ if (!record__threads_enabled(rec))
+ return record__init_thread_default_masks(rec, cpus);
+
+ if (evlist__per_thread(rec->evlist)) {
+ pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
+ return -EINVAL;
+ }
+
+ switch (rec->opts.threads_spec) {
+ case THREAD_SPEC__CPU:
+ ret = record__init_thread_cpu_masks(rec, cpus);
+ break;
+ case THREAD_SPEC__CORE:
+ ret = record__init_thread_core_masks(rec, cpus);
+ break;
+ case THREAD_SPEC__PACKAGE:
+ ret = record__init_thread_package_masks(rec, cpus);
+ break;
+ case THREAD_SPEC__NUMA:
+ ret = record__init_thread_numa_masks(rec, cpus);
+ break;
+ case THREAD_SPEC__USER:
+ ret = record__init_thread_user_masks(rec, cpus);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
int cmd_record(int argc, const char **argv)
{
int err;
@@ -2455,6 +3979,12 @@ int cmd_record(int argc, const char **argv)
# undef REASON
#endif
+#ifndef HAVE_BPF_SKEL
+# define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
+ set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
+# undef set_nobuild
+#endif
+
rec->opts.affinity = PERF_AFFINITY_SYS;
rec->evlist = evlist__new();
@@ -2470,6 +4000,12 @@ int cmd_record(int argc, const char **argv)
if (quiet)
perf_quiet_option();
+ err = symbol__validate_sym_arguments();
+ if (err)
+ return err;
+
+ perf_debuginfod_setup(&record.debuginfod);
+
/* Make system wide (-a) the default target. */
if (!argc && target__none(&rec->opts.target))
rec->opts.target.system_wide = true;
@@ -2480,9 +4016,44 @@ int cmd_record(int argc, const char **argv)
}
+ if (rec->buildid_mmap) {
+ if (!perf_can_record_build_id()) {
+ pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
+ err = -EINVAL;
+ goto out_opts;
+ }
+ pr_debug("Enabling build id in mmap2 events.\n");
+ /* Enable mmap build id synthesizing. */
+ symbol_conf.buildid_mmap2 = true;
+ /* Enable perf_event_attr::build_id bit. */
+ rec->opts.build_id = true;
+ /* Disable build id cache. */
+ rec->no_buildid = true;
+ }
+
+ if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
+ pr_err("Kernel has no cgroup sampling support.\n");
+ err = -EINVAL;
+ goto out_opts;
+ }
+
if (rec->opts.kcore)
+ rec->opts.text_poke = true;
+
+ if (rec->opts.kcore || record__threads_enabled(rec))
rec->data.is_dir = true;
+ if (record__threads_enabled(rec)) {
+ if (rec->opts.affinity != PERF_AFFINITY_SYS) {
+ pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n");
+ goto out_opts;
+ }
+ if (record__aio_enabled(rec)) {
+ pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n");
+ goto out_opts;
+ }
+ }
+
if (rec->opts.comp_level != 0) {
pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
rec->no_buildid = true;
@@ -2492,12 +4063,14 @@ int cmd_record(int argc, const char **argv)
!perf_can_record_switch_events()) {
ui__error("kernel does not support recording context switch events\n");
parse_options_usage(record_usage, record_options, "switch-events", 0);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_opts;
}
if (switch_output_setup(rec)) {
parse_options_usage(record_usage, record_options, "switch-output", 0);
- return -EINVAL;
+ err = -EINVAL;
+ goto out_opts;
}
if (rec->switch_output.time) {
@@ -2508,8 +4081,15 @@ int cmd_record(int argc, const char **argv)
if (rec->switch_output.num_files) {
rec->switch_output.filenames = calloc(sizeof(char *),
rec->switch_output.num_files);
- if (!rec->switch_output.filenames)
- return -EINVAL;
+ if (!rec->switch_output.filenames) {
+ err = -EINVAL;
+ goto out_opts;
+ }
+ }
+
+ if (rec->timestamp_filename && record__threads_enabled(rec)) {
+ rec->timestamp_filename = false;
+ pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n");
}
/*
@@ -2520,16 +4100,6 @@ int cmd_record(int argc, const char **argv)
symbol__init(NULL);
- if (rec->opts.affinity != PERF_AFFINITY_SYS) {
- rec->affinity_mask.nbits = cpu__max_cpu();
- rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
- if (!rec->affinity_mask.bits) {
- pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
- return -ENOMEM;
- }
- pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits);
- }
-
err = record__auxtrace_init(rec);
if (err)
goto out;
@@ -2581,10 +4151,19 @@ int cmd_record(int argc, const char **argv)
if (record.opts.overwrite)
record.opts.tail_synthesize = true;
- if (rec->evlist->core.nr_entries == 0 &&
- __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
- pr_err("Not enough memory for event selector list\n");
- goto out;
+ if (rec->evlist->core.nr_entries == 0) {
+ if (perf_pmu__has_hybrid()) {
+ err = evlist__add_default_hybrid(rec->evlist,
+ !record.opts.no_samples);
+ } else {
+ err = __evlist__add_default(rec->evlist,
+ !record.opts.no_samples);
+ }
+
+ if (err < 0) {
+ pr_err("Not enough memory for event selector list\n");
+ goto out;
+ }
}
if (rec->opts.target.tid && !rec->opts.no_inherit_set)
@@ -2610,9 +4189,27 @@ int cmd_record(int argc, const char **argv)
/* Enable ignoring missing threads when -u/-p option is defined. */
rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
+ if (evlist__fix_hybrid_cpus(rec->evlist, rec->opts.target.cpu_list)) {
+ pr_err("failed to use cpu list %s\n",
+ rec->opts.target.cpu_list);
+ goto out;
+ }
+
+ rec->opts.target.hybrid = perf_pmu__has_hybrid();
+
+ if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP)
+ arch__add_leaf_frame_record_opts(&rec->opts);
+
err = -ENOMEM;
- if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
- usage_with_options(record_usage, record_options);
+ if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) {
+ if (rec->opts.target.pid != NULL) {
+ pr_err("Couldn't create thread/CPU maps: %s\n",
+ errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
+ goto out;
+ }
+ else
+ usage_with_options(record_usage, record_options);
+ }
err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
if (err)
@@ -2626,11 +4223,33 @@ int cmd_record(int argc, const char **argv)
if (rec->opts.full_auxtrace)
rec->buildid_all = true;
+ if (rec->opts.text_poke) {
+ err = record__config_text_poke(rec->evlist);
+ if (err) {
+ pr_err("record__config_text_poke failed, error %d\n", err);
+ goto out;
+ }
+ }
+
+ if (rec->off_cpu) {
+ err = record__config_off_cpu(rec);
+ if (err) {
+ pr_err("record__config_off_cpu failed, error %d\n", err);
+ goto out;
+ }
+ }
+
if (record_opts__config(&rec->opts)) {
err = -EINVAL;
goto out;
}
+ err = record__init_thread_masks(rec);
+ if (err) {
+ pr_err("Failed to initialize parallel data streaming masks\n");
+ goto out;
+ }
+
if (rec->opts.nr_cblocks > nr_cblocks_max)
rec->opts.nr_cblocks = nr_cblocks_max;
pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
@@ -2644,10 +4263,13 @@ int cmd_record(int argc, const char **argv)
err = __cmd_record(&record, argc, argv);
out:
- bitmap_free(rec->affinity_mask.bits);
evlist__delete(rec->evlist);
symbol__exit();
auxtrace_record__free(rec->itr);
+out_opts:
+ record__free_thread_masks(rec, rec->nr_threads);
+ rec->nr_threads = 0;
+ evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
return err;
}
@@ -2655,12 +4277,7 @@ static void snapshot_sig_handler(int sig __maybe_unused)
{
struct record *rec = &record;
- if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
- trigger_hit(&auxtrace_snapshot_trigger);
- auxtrace_record__snapshot_started = 1;
- if (auxtrace_record__snapshot_start(record.itr))
- trigger_error(&auxtrace_snapshot_trigger);
- }
+ hit_auxtrace_snapshot_trigger(rec);
if (switch_output_signal(rec))
trigger_hit(&switch_output_trigger);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 26d8fc27e427..92c6797e7cba 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -47,7 +47,6 @@
#include "util/time-utils.h"
#include "util/auxtrace.h"
#include "util/units.h"
-#include "util/branch.h"
#include "util/util.h" // perf_tip()
#include "ui/ui.h"
#include "ui/progress.h"
@@ -68,11 +67,21 @@
#include <unistd.h>
#include <linux/mman.h>
+#ifdef HAVE_LIBTRACEEVENT
+#include <traceevent/event-parse.h>
+#endif
+
struct report {
struct perf_tool tool;
struct perf_session *session;
struct evswitch evswitch;
- bool use_tui, use_gtk, use_stdio;
+#ifdef HAVE_SLANG_SUPPORT
+ bool use_tui;
+#endif
+#ifdef HAVE_GTK2_SUPPORT
+ bool use_gtk;
+#endif
+ bool use_stdio;
bool show_full_info;
bool show_threads;
bool inverted_callchain;
@@ -84,6 +93,9 @@ struct report {
bool header_only;
bool nonany_branch_mode;
bool group_set;
+ bool stitch_lbr;
+ bool disable_order;
+ bool skip_empty;
int max_stack;
struct perf_read_values show_threads_values;
struct annotation_options annotation_opts;
@@ -131,9 +143,19 @@ static int report__config(const char *var, const char *value, void *cb)
if (!strcmp(var, "report.sort_order")) {
default_sort_order = strdup(value);
+ if (!default_sort_order) {
+ pr_err("Not enough memory for report.sort_order\n");
+ return -1;
+ }
+ return 0;
+ }
+
+ if (!strcmp(var, "report.skip-empty")) {
+ rep->skip_empty = perf_config_bool(var, value);
return 0;
}
+ pr_debug("%s variable unknown, ignoring...", var);
return 0;
}
@@ -211,7 +233,7 @@ static void setup_forced_leader(struct report *report,
struct evlist *evlist)
{
if (report->group_set)
- perf_evlist__force_leader(evlist);
+ evlist__force_leader(evlist);
}
static int process_feature_event(struct perf_session *session,
@@ -226,6 +248,8 @@ static int process_feature_event(struct perf_session *session,
pr_err("failed: wrong feature ID: %" PRI_lu64 "\n",
event->feat.feat_id);
return -1;
+ } else if (rep->header_only) {
+ session_done = 1;
}
/*
@@ -267,6 +291,9 @@ static int process_sample_event(struct perf_tool *tool,
return -1;
}
+ if (rep->stitch_lbr)
+ al.thread->lbr_stitch_enable = true;
+
if (symbol_conf.hide_unresolved && al.sym == NULL)
goto out_put;
@@ -292,7 +319,7 @@ static int process_sample_event(struct perf_tool *tool,
}
if (al.map != NULL)
- al.map->dso->hit = 1;
+ map__dso(al.map)->hit = 1;
if (ui__has_annotation() || rep->symbol_ipc || rep->total_cycles_mode) {
hist__account_cycles(sample->branch_stack, &al, sample,
@@ -317,10 +344,10 @@ static int process_read_event(struct perf_tool *tool,
struct report *rep = container_of(tool, struct report, tool);
if (rep->show_threads) {
- const char *name = perf_evsel__name(evsel);
+ const char *name = evsel__name(evsel);
int err = perf_read_values_add_value(&rep->show_threads_values,
event->read.pid, event->read.tid,
- evsel->idx,
+ evsel->core.idx,
name,
event->read.value);
@@ -335,16 +362,19 @@ static int process_read_event(struct perf_tool *tool,
static int report__setup_sample_type(struct report *rep)
{
struct perf_session *session = rep->session;
- u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
+ u64 sample_type = evlist__combined_sample_type(session->evlist);
bool is_pipe = perf_data__is_pipe(session->data);
+ struct evsel *evsel;
if (session->itrace_synth_opts->callchain ||
+ session->itrace_synth_opts->add_callchain ||
(!is_pipe &&
perf_header__has_feat(&session->header, HEADER_AUXTRACE) &&
!session->itrace_synth_opts->set))
sample_type |= PERF_SAMPLE_CALLCHAIN;
- if (session->itrace_synth_opts->last_branch)
+ if (session->itrace_synth_opts->last_branch ||
+ session->itrace_synth_opts->add_last_branch)
sample_type |= PERF_SAMPLE_BRANCH_STACK;
if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
@@ -389,6 +419,19 @@ static int report__setup_sample_type(struct report *rep)
}
if (sort__mode == SORT_MODE__MEMORY) {
+ /*
+ * FIXUP: prior to kernel 5.18, Arm SPE missed to set
+ * PERF_SAMPLE_DATA_SRC bit in sample type. For backward
+ * compatibility, set the bit if it's an old perf data file.
+ */
+ evlist__for_each_entry(session->evlist, evsel) {
+ if (strstr(evsel->name, "arm_spe") &&
+ !(sample_type & PERF_SAMPLE_DATA_SRC)) {
+ evsel->core.attr.sample_type |= PERF_SAMPLE_DATA_SRC;
+ sample_type |= PERF_SAMPLE_DATA_SRC;
+ }
+ }
+
if (!is_pipe && !(sample_type & PERF_SAMPLE_DATA_SRC)) {
ui__error("Selected --mem-mode but no mem data. "
"Did you call perf record without -d?\n");
@@ -396,20 +439,16 @@ static int report__setup_sample_type(struct report *rep)
}
}
- if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
- if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER)) {
- callchain_param.record_mode = CALLCHAIN_DWARF;
- dwarf_callchain_users = true;
- } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
- callchain_param.record_mode = CALLCHAIN_LBR;
- else
- callchain_param.record_mode = CALLCHAIN_FP;
+ callchain_param_setup(sample_type, perf_env__arch(&rep->session->header.env));
+
+ if (rep->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
+ ui__warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
+ "Please apply --call-graph lbr when recording.\n");
+ rep->stitch_lbr = false;
}
/* ??? handle more cases than just ANY? */
- if (!(perf_evlist__combined_branch_type(session->evlist) &
- PERF_SAMPLE_BRANCH_ANY))
+ if (!(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY))
rep->nonany_branch_mode = true;
#if !defined(HAVE_LIBUNWIND_SUPPORT) && !defined(HAVE_DWARF_SUPPORT)
@@ -432,7 +471,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
{
size_t ret;
char unit;
- unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ unsigned long nr_samples = hists->stats.nr_samples;
u64 nr_events = hists->stats.total_period;
struct evsel *evsel = hists_to_evsel(hists);
char buf[512];
@@ -447,10 +486,10 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
nr_events = hists->stats.total_non_filtered_period;
}
- if (perf_evsel__is_group_event(evsel)) {
+ if (evsel__is_group_event(evsel)) {
struct evsel *pos;
- perf_evsel__group_desc(evsel, buf, size);
+ evsel__group_desc(evsel, buf, size);
evname = buf;
for_each_group_member(pos, evsel) {
@@ -460,7 +499,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
nr_samples += pos_hists->stats.nr_non_filtered_samples;
nr_events += pos_hists->stats.total_non_filtered_period;
} else {
- nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ nr_samples += pos_hists->stats.nr_samples;
nr_events += pos_hists->stats.total_period;
}
}
@@ -476,8 +515,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
if (rep->time_str)
ret += fprintf(fp, " (time slices: %s)", rep->time_str);
- if (symbol_conf.show_ref_callgraph &&
- strstr(evname, "call-graph=no")) {
+ if (symbol_conf.show_ref_callgraph && evname && strstr(evname, "call-graph=no")) {
ret += fprintf(fp, ", show reference callgraph");
}
@@ -493,8 +531,7 @@ static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report
return ret + fprintf(fp, "\n#\n");
}
-static int perf_evlist__tui_block_hists_browse(struct evlist *evlist,
- struct report *rep)
+static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report *rep)
{
struct evsel *pos;
int i = 0, ret;
@@ -511,9 +548,7 @@ static int perf_evlist__tui_block_hists_browse(struct evlist *evlist,
return 0;
}
-static int perf_evlist__tty_browse_hists(struct evlist *evlist,
- struct report *rep,
- const char *help)
+static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, const char *help)
{
struct evsel *pos;
int i = 0;
@@ -525,10 +560,12 @@ static int perf_evlist__tty_browse_hists(struct evlist *evlist,
evlist__for_each_entry(evlist, pos) {
struct hists *hists = evsel__hists(pos);
- const char *evname = perf_evsel__name(pos);
+ const char *evname = evsel__name(pos);
- if (symbol_conf.event_group &&
- !perf_evsel__is_group_leader(pos))
+ if (symbol_conf.event_group && !evsel__is_group_leader(pos))
+ continue;
+
+ if (rep->skip_empty && !hists->stats.nr_samples)
continue;
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
@@ -567,11 +604,11 @@ static void report__warn_kptr_restrict(const struct report *rep)
struct map *kernel_map = machine__kernel_map(&rep->session->machines.host);
struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
- if (perf_evlist__exclude_kernel(rep->session->evlist))
+ if (evlist__exclude_kernel(rep->session->evlist))
return;
if (kernel_map == NULL ||
- (kernel_map->dso->hit &&
+ (map__dso(kernel_map)->hit &&
(kernel_kmap->ref_reloc_sym == NULL ||
kernel_kmap->ref_reloc_sym->addr == 0))) {
const char *desc =
@@ -596,7 +633,7 @@ static int report__gtk_browse_hists(struct report *rep, const char *help)
int (*hist_browser)(struct evlist *evlist, const char *help,
struct hist_browser_timer *timer, float min_pcnt);
- hist_browser = dlsym(perf_gtk_handle, "perf_evlist__gtk_browse_hists");
+ hist_browser = dlsym(perf_gtk_handle, "evlist__gtk_browse_hists");
if (hist_browser == NULL) {
ui__error("GTK browser not found!\n");
@@ -611,26 +648,27 @@ static int report__browse_hists(struct report *rep)
int ret;
struct perf_session *session = rep->session;
struct evlist *evlist = session->evlist;
- const char *help = perf_tip(system_path(TIPDIR));
+ char *help = NULL, *path = NULL;
- if (help == NULL) {
+ path = system_path(TIPDIR);
+ if (perf_tip(&help, path) || help == NULL) {
/* fallback for people who don't install perf ;-) */
- help = perf_tip(DOCDIR);
- if (help == NULL)
- help = "Cannot load tips.txt file, please install perf!";
+ free(path);
+ path = system_path(DOCDIR);
+ if (perf_tip(&help, path) || help == NULL)
+ help = strdup("Cannot load tips.txt file, please install perf!");
}
+ free(path);
switch (use_browser) {
case 1:
if (rep->total_cycles_mode) {
- ret = perf_evlist__tui_block_hists_browse(evlist, rep);
+ ret = evlist__tui_block_hists_browse(evlist, rep);
break;
}
- ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
- rep->min_percent,
- &session->header.env,
- true, &rep->annotation_opts);
+ ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent,
+ &session->header.env, true, &rep->annotation_opts);
/*
* Usually "ret" is the last pressed key, and we only
* care if the key notifies us to switch data file.
@@ -642,10 +680,10 @@ static int report__browse_hists(struct report *rep)
ret = report__gtk_browse_hists(rep, help);
break;
default:
- ret = perf_evlist__tty_browse_hists(evlist, rep, help);
+ ret = evlist__tty_browse_hists(evlist, rep, help);
break;
}
-
+ free(help);
return ret;
}
@@ -660,7 +698,7 @@ static int report__collapse_hists(struct report *rep)
evlist__for_each_entry(rep->session->evlist, pos) {
struct hists *hists = evsel__hists(pos);
- if (pos->idx == 0)
+ if (pos->core.idx == 0)
hists->symbol_filter_str = rep->symbol_filter_str;
hists->socket_filter = rep->socket_filter;
@@ -670,9 +708,8 @@ static int report__collapse_hists(struct report *rep)
break;
/* Non-group events are considered as leader */
- if (symbol_conf.event_group &&
- !perf_evsel__is_group_leader(pos)) {
- struct hists *leader_hists = evsel__hists(pos->leader);
+ if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
+ struct hists *leader_hists = evsel__hists(evsel__leader(pos));
hists__match(leader_hists, hists);
hists__link(leader_hists, hists);
@@ -691,8 +728,7 @@ static int hists__resort_cb(struct hist_entry *he, void *arg)
if (rep->symbol_ipc && sym && !sym->annotate2) {
struct evsel *evsel = hists_to_evsel(he->hists);
- symbol__annotate2(&he->ms, evsel,
- &annotation__default_options, NULL);
+ symbol__annotate2(&he->ms, evsel, &rep->annotation_opts, NULL);
}
return 0;
@@ -706,16 +742,50 @@ static void report__output_resort(struct report *rep)
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
evlist__for_each_entry(rep->session->evlist, pos) {
- perf_evsel__output_resort_cb(pos, &prog,
- hists__resort_cb, rep);
+ evsel__output_resort_cb(pos, &prog, hists__resort_cb, rep);
}
ui_progress__finish();
}
+static int count_sample_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct evsel *evsel,
+ struct machine *machine __maybe_unused)
+{
+ struct hists *hists = evsel__hists(evsel);
+
+ hists__inc_nr_events(hists);
+ return 0;
+}
+
+static int count_lost_samples_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine __maybe_unused)
+{
+ struct report *rep = container_of(tool, struct report, tool);
+ struct evsel *evsel;
+
+ evsel = evlist__id2evsel(rep->session->evlist, sample->id);
+ if (evsel) {
+ hists__inc_nr_lost_samples(evsel__hists(evsel),
+ event->lost_samples.lost);
+ }
+ return 0;
+}
+
+static int process_attr(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct evlist **pevlist);
+
static void stats_setup(struct report *rep)
{
memset(&rep->tool, 0, sizeof(rep->tool));
+ rep->tool.attr = process_attr;
+ rep->tool.sample = count_sample_event;
+ rep->tool.lost_samples = count_lost_samples_event;
rep->tool.no_warn = true;
}
@@ -723,7 +793,8 @@ static int stats_print(struct report *rep)
{
struct perf_session *session = rep->session;
- perf_session__fprintf_nr_events(session, stdout);
+ perf_session__fprintf_nr_events(session, stdout, rep->skip_empty);
+ evlist__fprintf_nr_events(session->evlist, stdout, rep->skip_empty);
return 0;
}
@@ -735,6 +806,7 @@ static void tasks_setup(struct report *rep)
rep->tool.mmap = perf_event__process_mmap;
rep->tool.mmap2 = perf_event__process_mmap2;
}
+ rep->tool.attr = process_attr;
rep->tool.comm = perf_event__process_comm;
rep->tool.exit = perf_event__process_exit;
rep->tool.fork = perf_event__process_fork;
@@ -772,17 +844,21 @@ static struct task *tasks_list(struct task *task, struct machine *machine)
static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp)
{
size_t printed = 0;
- struct map *map;
+ struct map_rb_node *rb_node;
+
+ maps__for_each_entry(maps, rb_node) {
+ struct map *map = rb_node->map;
+ const struct dso *dso = map__dso(map);
+ u32 prot = map__prot(map);
- maps__for_each_entry(maps, map) {
printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n",
- indent, "", map->start, map->end,
- map->prot & PROT_READ ? 'r' : '-',
- map->prot & PROT_WRITE ? 'w' : '-',
- map->prot & PROT_EXEC ? 'x' : '-',
- map->flags & MAP_SHARED ? 's' : 'p',
- map->pgoff,
- map->dso->id.ino, map->dso->name);
+ indent, "", map__start(map), map__end(map),
+ prot & PROT_READ ? 'r' : '-',
+ prot & PROT_WRITE ? 'w' : '-',
+ prot & PROT_EXEC ? 'x' : '-',
+ map__flags(map) ? 's' : 'p',
+ map__pgoff(map),
+ dso->id.ino, dso->name);
}
return printed;
@@ -916,6 +992,8 @@ static int __cmd_report(struct report *rep)
return ret;
}
+ evlist__check_mem_load_aux(session->evlist);
+
if (rep->stats_mode)
return stats_print(rep);
@@ -935,8 +1013,10 @@ static int __cmd_report(struct report *rep)
perf_session__fprintf_dsos(session, stdout);
if (dump_trace) {
- perf_session__fprintf_nr_events(session, stdout);
- perf_evlist__fprintf_nr_events(session->evlist, stdout);
+ perf_session__fprintf_nr_events(session, stdout,
+ rep->skip_empty);
+ evlist__fprintf_nr_events(session->evlist, stdout,
+ rep->skip_empty);
return 0;
}
}
@@ -1080,6 +1160,26 @@ parse_percent_limit(const struct option *opt, const char *str,
return 0;
}
+static int process_attr(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct evlist **pevlist)
+{
+ u64 sample_type;
+ int err;
+
+ err = perf_event__process_attr(tool, event, pevlist);
+ if (err)
+ return err;
+
+ /*
+ * Check if we need to enable callchains based
+ * on events sample_type.
+ */
+ sample_type = evlist__combined_sample_type(*pevlist);
+ callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env));
+ return 0;
+}
+
int cmd_report(int argc, const char **argv)
{
struct perf_session *session;
@@ -1110,8 +1210,10 @@ int cmd_report(int argc, const char **argv)
.fork = perf_event__process_fork,
.lost = perf_event__process_lost,
.read = process_read_event,
- .attr = perf_event__process_attr,
+ .attr = process_attr,
+#ifdef HAVE_LIBTRACEEVENT
.tracing_data = perf_event__process_tracing_data,
+#endif
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_event__process_auxtrace_info,
@@ -1124,14 +1226,17 @@ int cmd_report(int argc, const char **argv)
.max_stack = PERF_MAX_STACK_DEPTH,
.pretty_printing_style = "normal",
.socket_filter = -1,
- .annotation_opts = annotation__default_options,
+ .skip_empty = true,
};
+ char *sort_order_help = sort_help("sort by key(s):");
+ char *field_order_help = sort_help("output field(s): overhead period sample ");
+ const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
const struct option options[] = {
OPT_STRING('i', "input", &input_name, "file",
"input file name"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any message"),
+ OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
OPT_BOOLEAN(0, "stats", &report.stats_mode, "Display event stats"),
@@ -1152,17 +1257,21 @@ int cmd_report(int argc, const char **argv)
"Show per-thread event counters"),
OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
"pretty printing style key: normal raw"),
+#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
+#endif
+#ifdef HAVE_GTK2_SUPPORT
OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
+#endif
OPT_BOOLEAN(0, "stdio", &report.use_stdio,
"Use the stdio interface"),
OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
OPT_BOOLEAN(0, "header-only", &report.header_only,
"Show only data header."),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
- sort_help("sort by key(s):")),
+ sort_order_help),
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
- sort_help("output field(s): overhead period sample ")),
+ field_order_help),
OPT_BOOLEAN(0, "show-cpu-utilization", &symbol_conf.show_cpu_utilization,
"Show sample percentage for different cpu modes"),
OPT_BOOLEAN_FLAG(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
@@ -1218,7 +1327,7 @@ int cmd_report(int argc, const char **argv)
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw,
"Display raw encoding of assembly instructions (default)"),
- OPT_STRING('M', "disassembler-style", &report.annotation_opts.disassembler_style, "disassembler style",
+ OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING(0, "prefix", &report.annotation_opts.prefix, "prefix",
"Add prefix to source file path names in programs (with --prefix-strip)"),
@@ -1237,8 +1346,10 @@ int cmd_report(int argc, const char **argv)
parse_branch_mode),
OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
"add last branch records to call history"),
- OPT_STRING(0, "objdump", &report.annotation_opts.objdump_path, "path",
+ OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
+ OPT_STRING(0, "addr2line", &addr2line_path, "path",
+ "addr2line binary to use for line numbers"),
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
"Disable symbol demangling"),
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
@@ -1257,6 +1368,8 @@ int cmd_report(int argc, const char **argv)
"Show full source file name path for source lines"),
OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
"Show callgraph from reference event"),
+ OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr,
+ "Enable LBR callgraph stitching approach"),
OPT_INTEGER(0, "socket-filter", &report.socket_filter,
"only show processor socket that match with this filter"),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
@@ -1280,6 +1393,10 @@ int cmd_report(int argc, const char **argv)
OPTS_EVSWITCH(&report.evswitch),
OPT_BOOLEAN(0, "total-cycles", &report.total_cycles_mode,
"Sort all blocks by 'Sampled Cycles%'"),
+ OPT_BOOLEAN(0, "disable-order", &report.disable_order,
+ "Disable raw trace ordering"),
+ OPT_BOOLEAN(0, "skip-empty", &report.skip_empty,
+ "Do not display empty (or dummy) events in the output"),
OPT_END()
};
struct perf_data data = {
@@ -1289,11 +1406,13 @@ int cmd_report(int argc, const char **argv)
char sort_tmp[128];
if (ret < 0)
- return ret;
+ goto exit;
+
+ annotation_options__init(&report.annotation_opts);
ret = perf_config(report__config, &report);
if (ret)
- return ret;
+ goto exit;
argc = parse_options(argc, argv, options, report_usage, 0);
if (argc) {
@@ -1307,32 +1426,46 @@ int cmd_report(int argc, const char **argv)
report.symbol_filter_str = argv[0];
}
- if (annotate_check_args(&report.annotation_opts) < 0)
- return -EINVAL;
+ if (disassembler_style) {
+ report.annotation_opts.disassembler_style = strdup(disassembler_style);
+ if (!report.annotation_opts.disassembler_style)
+ return -ENOMEM;
+ }
+ if (objdump_path) {
+ report.annotation_opts.objdump_path = strdup(objdump_path);
+ if (!report.annotation_opts.objdump_path)
+ return -ENOMEM;
+ }
+ if (addr2line_path) {
+ symbol_conf.addr2line_path = strdup(addr2line_path);
+ if (!symbol_conf.addr2line_path)
+ return -ENOMEM;
+ }
+
+ if (annotate_check_args(&report.annotation_opts) < 0) {
+ ret = -EINVAL;
+ goto exit;
+ }
if (report.mmaps_mode)
report.tasks_mode = true;
+ if (dump_trace && report.disable_order)
+ report.tool.ordered_events = false;
+
if (quiet)
perf_quiet_option();
- if (symbol_conf.vmlinux_name &&
- access(symbol_conf.vmlinux_name, R_OK)) {
- pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
- return -EINVAL;
- }
- if (symbol_conf.kallsyms_name &&
- access(symbol_conf.kallsyms_name, R_OK)) {
- pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);
- return -EINVAL;
- }
+ ret = symbol__validate_sym_arguments();
+ if (ret)
+ goto exit;
if (report.inverted_callchain)
callchain_param.order = ORDER_CALLER;
if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
callchain_param.order = ORDER_CALLER;
- if (itrace_synth_opts.callchain &&
+ if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
(int)itrace_synth_opts.callchain_sz > report.max_stack)
report.max_stack = itrace_synth_opts.callchain_sz;
@@ -1347,13 +1480,15 @@ int cmd_report(int argc, const char **argv)
data.force = symbol_conf.force;
repeat:
- session = perf_session__new(&data, false, &report.tool);
- if (IS_ERR(session))
- return PTR_ERR(session);
+ session = perf_session__new(&data, &report.tool);
+ if (IS_ERR(session)) {
+ ret = PTR_ERR(session);
+ goto exit;
+ }
ret = evswitch__init(&report.evswitch, session->evlist, stderr);
if (ret)
- return ret;
+ goto exit;
if (zstd_init(&(session->zstd_data), 0) < 0)
pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
@@ -1369,18 +1504,18 @@ repeat:
has_br_stack = perf_header__has_feat(&session->header,
HEADER_BRANCH_STACK);
- if (perf_evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
+ if (evlist__combined_sample_type(session->evlist) & PERF_SAMPLE_STACK_USER)
has_br_stack = false;
setup_forced_leader(&report, session->evlist);
- if (symbol_conf.group_sort_idx && !session->evlist->nr_groups) {
+ if (symbol_conf.group_sort_idx && evlist__nr_groups(session->evlist) == 0) {
parse_options_usage(NULL, options, "group-sort-idx", 0);
ret = -EINVAL;
goto error;
}
- if (itrace_synth_opts.last_branch)
+ if (itrace_synth_opts.last_branch || itrace_synth_opts.add_last_branch)
has_br_stack = true;
if (has_br_stack && branch_call_mode)
@@ -1400,7 +1535,7 @@ repeat:
}
if (branch_call_mode) {
callchain_param.key = CCKEY_ADDRESS;
- callchain_param.branch_callstack = 1;
+ callchain_param.branch_callstack = true;
symbol_conf.use_callchain = true;
callchain_register_param(&callchain_param);
if (sort_order == NULL)
@@ -1432,10 +1567,14 @@ repeat:
if (report.use_stdio)
use_browser = 0;
+#ifdef HAVE_SLANG_SUPPORT
else if (report.use_tui)
use_browser = 1;
+#endif
+#ifdef HAVE_GTK2_SUPPORT
else if (report.use_gtk)
use_browser = 2;
+#endif
/* Force tty output for header output and per-thread stat. */
if (report.header || report.header_only || report.show_threads)
@@ -1495,6 +1634,13 @@ repeat:
perf_session__fprintf_info(session, stdout,
report.show_full_info);
if (report.header_only) {
+ if (data.is_pipe) {
+ /*
+ * we need to process first few records
+ * which contains PERF_RECORD_HEADER_FEATURE.
+ */
+ perf_session__process_events(session);
+ }
ret = 0;
goto error;
}
@@ -1548,6 +1694,7 @@ repeat:
report.range_num);
}
+#ifdef HAVE_LIBTRACEEVENT
if (session->tevent.pevent &&
tep_set_function_resolver(session->tevent.pevent,
machine__resolve_kernel_addr,
@@ -1556,7 +1703,7 @@ repeat:
__func__);
return -1;
}
-
+#endif
sort__setup_elide(stdout);
ret = __cmd_report(&report);
@@ -1581,5 +1728,9 @@ error:
zstd_fini(&(session->zstd_data));
perf_session__delete(session);
+exit:
+ annotation_options__exit(&report.annotation_opts);
+ free(sort_order_help);
+ free(field_order_help);
return ret;
}
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 82fcc2c15fe4..cc4ba506e119 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
-#include "perf.h"
#include "perf-sys.h"
#include "util/cpumap.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/evsel_fprintf.h"
+#include "util/mutex.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
@@ -26,6 +26,7 @@
#include "util/debug.h"
#include "util/event.h"
+#include "util/util.h"
#include <linux/kernel.h>
#include <linux/log2.h>
@@ -130,7 +131,8 @@ struct work_atoms {
struct thread *thread;
struct rb_node node;
u64 max_lat;
- u64 max_lat_at;
+ u64 max_lat_start;
+ u64 max_lat_end;
u64 total_lat;
u64 nb_atoms;
u64 total_runtime;
@@ -166,7 +168,7 @@ struct trace_sched_handler {
struct perf_sched_map {
DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
- int *comp_cpus;
+ struct perf_cpu *comp_cpus;
bool comp;
struct perf_thread_map *color_pids;
const char *color_pids_str;
@@ -183,14 +185,14 @@ struct perf_sched {
struct task_desc **pid_to_task;
struct task_desc **tasks;
const struct trace_sched_handler *tp_handler;
- pthread_mutex_t start_work_mutex;
- pthread_mutex_t work_done_wait_mutex;
+ struct mutex start_work_mutex;
+ struct mutex work_done_wait_mutex;
int profile_cpu;
/*
* Track the current task - that way we can know whether there's any
* weird events, such as a task being switched away that is not current.
*/
- int max_cpu;
+ struct perf_cpu max_cpu;
u32 curr_pid[MAX_CPUS];
struct thread *curr_thread[MAX_CPUS];
char next_shortname1;
@@ -244,6 +246,7 @@ struct perf_sched {
const char *time_str;
struct perf_time_interval ptime;
struct perf_time_interval hist_time;
+ volatile bool thread_funcs_exit;
};
/* per thread run time data */
@@ -631,35 +634,34 @@ static void *thread_func(void *ctx)
prctl(PR_SET_NAME, comm2);
if (fd < 0)
return NULL;
-again:
- ret = sem_post(&this_task->ready_for_work);
- BUG_ON(ret);
- ret = pthread_mutex_lock(&sched->start_work_mutex);
- BUG_ON(ret);
- ret = pthread_mutex_unlock(&sched->start_work_mutex);
- BUG_ON(ret);
- cpu_usage_0 = get_cpu_usage_nsec_self(fd);
+ while (!sched->thread_funcs_exit) {
+ ret = sem_post(&this_task->ready_for_work);
+ BUG_ON(ret);
+ mutex_lock(&sched->start_work_mutex);
+ mutex_unlock(&sched->start_work_mutex);
- for (i = 0; i < this_task->nr_events; i++) {
- this_task->curr_event = i;
- perf_sched__process_event(sched, this_task->atoms[i]);
- }
+ cpu_usage_0 = get_cpu_usage_nsec_self(fd);
- cpu_usage_1 = get_cpu_usage_nsec_self(fd);
- this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
- ret = sem_post(&this_task->work_done_sem);
- BUG_ON(ret);
+ for (i = 0; i < this_task->nr_events; i++) {
+ this_task->curr_event = i;
+ perf_sched__process_event(sched, this_task->atoms[i]);
+ }
- ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
- BUG_ON(ret);
- ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
- BUG_ON(ret);
+ cpu_usage_1 = get_cpu_usage_nsec_self(fd);
+ this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
+ ret = sem_post(&this_task->work_done_sem);
+ BUG_ON(ret);
- goto again;
+ mutex_lock(&sched->work_done_wait_mutex);
+ mutex_unlock(&sched->work_done_wait_mutex);
+ }
+ return NULL;
}
static void create_tasks(struct perf_sched *sched)
+ EXCLUSIVE_LOCK_FUNCTION(sched->start_work_mutex)
+ EXCLUSIVE_LOCK_FUNCTION(sched->work_done_wait_mutex)
{
struct task_desc *task;
pthread_attr_t attr;
@@ -669,12 +671,10 @@ static void create_tasks(struct perf_sched *sched)
err = pthread_attr_init(&attr);
BUG_ON(err);
err = pthread_attr_setstacksize(&attr,
- (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
- BUG_ON(err);
- err = pthread_mutex_lock(&sched->start_work_mutex);
- BUG_ON(err);
- err = pthread_mutex_lock(&sched->work_done_wait_mutex);
+ (size_t) max(16 * 1024, (int)PTHREAD_STACK_MIN));
BUG_ON(err);
+ mutex_lock(&sched->start_work_mutex);
+ mutex_lock(&sched->work_done_wait_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
struct sched_thread_parms *parms = malloc(sizeof(*parms));
BUG_ON(parms == NULL);
@@ -690,7 +690,30 @@ static void create_tasks(struct perf_sched *sched)
}
}
+static void destroy_tasks(struct perf_sched *sched)
+ UNLOCK_FUNCTION(sched->start_work_mutex)
+ UNLOCK_FUNCTION(sched->work_done_wait_mutex)
+{
+ struct task_desc *task;
+ unsigned long i;
+ int err;
+
+ mutex_unlock(&sched->start_work_mutex);
+ mutex_unlock(&sched->work_done_wait_mutex);
+ /* Get rid of threads so they won't be upset by mutex destrunction */
+ for (i = 0; i < sched->nr_tasks; i++) {
+ task = sched->tasks[i];
+ err = pthread_join(task->thread, NULL);
+ BUG_ON(err);
+ sem_destroy(&task->sleep_sem);
+ sem_destroy(&task->ready_for_work);
+ sem_destroy(&task->work_done_sem);
+ }
+}
+
static void wait_for_tasks(struct perf_sched *sched)
+ EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
+ EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
{
u64 cpu_usage_0, cpu_usage_1;
struct task_desc *task;
@@ -698,7 +721,7 @@ static void wait_for_tasks(struct perf_sched *sched)
sched->start_time = get_nsecs();
sched->cpu_usage = 0;
- pthread_mutex_unlock(&sched->work_done_wait_mutex);
+ mutex_unlock(&sched->work_done_wait_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
@@ -706,12 +729,11 @@ static void wait_for_tasks(struct perf_sched *sched)
BUG_ON(ret);
sem_init(&task->ready_for_work, 0, 0);
}
- ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
- BUG_ON(ret);
+ mutex_lock(&sched->work_done_wait_mutex);
cpu_usage_0 = get_cpu_usage_nsec_parent();
- pthread_mutex_unlock(&sched->start_work_mutex);
+ mutex_unlock(&sched->start_work_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
@@ -733,8 +755,7 @@ static void wait_for_tasks(struct perf_sched *sched)
sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
sched->parent_cpu_usage)/sched->replay_repeat;
- ret = pthread_mutex_lock(&sched->start_work_mutex);
- BUG_ON(ret);
+ mutex_lock(&sched->start_work_mutex);
for (i = 0; i < sched->nr_tasks; i++) {
task = sched->tasks[i];
@@ -744,6 +765,8 @@ static void wait_for_tasks(struct perf_sched *sched)
}
static void run_one_test(struct perf_sched *sched)
+ EXCLUSIVE_LOCKS_REQUIRED(sched->work_done_wait_mutex)
+ EXCLUSIVE_LOCKS_REQUIRED(sched->start_work_mutex)
{
u64 T0, T1, delta, avg_delta, fluct;
@@ -811,8 +834,8 @@ replay_wakeup_event(struct perf_sched *sched,
struct evsel *evsel, struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
- const char *comm = perf_evsel__strval(evsel, sample, "comm");
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ const char *comm = evsel__strval(evsel, sample, "comm");
+ const u32 pid = evsel__intval(evsel, sample, "pid");
struct task_desc *waker, *wakee;
if (verbose > 0) {
@@ -833,11 +856,11 @@ static int replay_switch_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine __maybe_unused)
{
- const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
- *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
- const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
- next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
+ const char *prev_comm = evsel__strval(evsel, sample, "prev_comm"),
+ *next_comm = evsel__strval(evsel, sample, "next_comm");
+ const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = evsel__intval(evsel, sample, "next_pid");
+ const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
struct task_desc *prev, __maybe_unused *next;
u64 timestamp0, timestamp = sample->time;
int cpu = sample->cpu;
@@ -1096,7 +1119,8 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
atoms->total_lat += delta;
if (delta > atoms->max_lat) {
atoms->max_lat = delta;
- atoms->max_lat_at = timestamp;
+ atoms->max_lat_start = atom->wake_up_time;
+ atoms->max_lat_end = timestamp;
}
atoms->nb_atoms++;
}
@@ -1106,9 +1130,9 @@ static int latency_switch_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine)
{
- const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
- next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
+ const u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = evsel__intval(evsel, sample, "next_pid");
+ const u64 prev_state = evsel__intval(evsel, sample, "prev_state");
struct work_atoms *out_events, *in_events;
struct thread *sched_out, *sched_in;
u64 timestamp0, timestamp = sample->time;
@@ -1176,8 +1200,8 @@ static int latency_runtime_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine)
{
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
- const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
+ const u32 pid = evsel__intval(evsel, sample, "pid");
+ const u64 runtime = evsel__intval(evsel, sample, "runtime");
struct thread *thread = machine__findnew_thread(machine, -1, pid);
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
u64 timestamp = sample->time;
@@ -1211,7 +1235,7 @@ static int latency_wakeup_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine)
{
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ const u32 pid = evsel__intval(evsel, sample, "pid");
struct work_atoms *atoms;
struct work_atom *atom;
struct thread *wakee;
@@ -1272,7 +1296,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
struct perf_sample *sample,
struct machine *machine)
{
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ const u32 pid = evsel__intval(evsel, sample, "pid");
u64 timestamp = sample->time;
struct work_atoms *atoms;
struct work_atom *atom;
@@ -1322,7 +1346,7 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
int i;
int ret;
u64 avg;
- char max_lat_at[32];
+ char max_lat_start[32], max_lat_end[32];
if (!work_list->nb_atoms)
return;
@@ -1344,13 +1368,14 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
printf(" ");
avg = work_list->total_lat / work_list->nb_atoms;
- timestamp__scnprintf_usec(work_list->max_lat_at, max_lat_at, sizeof(max_lat_at));
+ timestamp__scnprintf_usec(work_list->max_lat_start, max_lat_start, sizeof(max_lat_start));
+ timestamp__scnprintf_usec(work_list->max_lat_end, max_lat_end, sizeof(max_lat_end));
- printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13s s\n",
+ printf("|%11.3f ms |%9" PRIu64 " | avg:%8.3f ms | max:%8.3f ms | max start: %12s s | max end: %12s s\n",
(double)work_list->total_runtime / NSEC_PER_MSEC,
work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
(double)work_list->max_lat / NSEC_PER_MSEC,
- max_lat_at);
+ max_lat_start, max_lat_end);
}
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
@@ -1491,6 +1516,14 @@ static int process_sched_wakeup_event(struct perf_tool *tool,
return 0;
}
+static int process_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return 0;
+}
+
union map_priv {
void *ptr;
bool color;
@@ -1526,34 +1559,37 @@ map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid
static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
struct perf_sample *sample, struct machine *machine)
{
- const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
+ const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
struct thread *sched_in;
struct thread_runtime *tr;
int new_shortname;
u64 timestamp0, timestamp = sample->time;
s64 delta;
- int i, this_cpu = sample->cpu;
+ int i;
+ struct perf_cpu this_cpu = {
+ .cpu = sample->cpu,
+ };
int cpus_nr;
bool new_cpu = false;
const char *color = PERF_COLOR_NORMAL;
char stimestamp[32];
- BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
+ BUG_ON(this_cpu.cpu >= MAX_CPUS || this_cpu.cpu < 0);
- if (this_cpu > sched->max_cpu)
+ if (this_cpu.cpu > sched->max_cpu.cpu)
sched->max_cpu = this_cpu;
if (sched->map.comp) {
cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
- if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
+ if (!__test_and_set_bit(this_cpu.cpu, sched->map.comp_cpus_mask)) {
sched->map.comp_cpus[cpus_nr++] = this_cpu;
new_cpu = true;
}
} else
- cpus_nr = sched->max_cpu;
+ cpus_nr = sched->max_cpu.cpu;
- timestamp0 = sched->cpu_last_switched[this_cpu];
- sched->cpu_last_switched[this_cpu] = timestamp;
+ timestamp0 = sched->cpu_last_switched[this_cpu.cpu];
+ sched->cpu_last_switched[this_cpu.cpu] = timestamp;
if (timestamp0)
delta = timestamp - timestamp0;
else
@@ -1574,7 +1610,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
return -1;
}
- sched->curr_thread[this_cpu] = thread__get(sched_in);
+ sched->curr_thread[this_cpu.cpu] = thread__get(sched_in);
printf(" ");
@@ -1605,8 +1641,10 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
}
for (i = 0; i < cpus_nr; i++) {
- int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
- struct thread *curr_thread = sched->curr_thread[cpu];
+ struct perf_cpu cpu = {
+ .cpu = sched->map.comp ? sched->map.comp_cpus[i].cpu : i,
+ };
+ struct thread *curr_thread = sched->curr_thread[cpu.cpu];
struct thread_runtime *curr_tr;
const char *pid_color = color;
const char *cpu_color = color;
@@ -1614,19 +1652,19 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
if (curr_thread && thread__has_color(curr_thread))
pid_color = COLOR_PIDS;
- if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
+ if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, cpu))
continue;
- if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
+ if (sched->map.color_cpus && perf_cpu_map__has(sched->map.color_cpus, cpu))
cpu_color = COLOR_CPUS;
- if (cpu != this_cpu)
+ if (cpu.cpu != this_cpu.cpu)
color_fprintf(stdout, color, " ");
else
color_fprintf(stdout, cpu_color, "*");
- if (sched->curr_thread[cpu]) {
- curr_tr = thread__get_runtime(sched->curr_thread[cpu]);
+ if (sched->curr_thread[cpu.cpu]) {
+ curr_tr = thread__get_runtime(sched->curr_thread[cpu.cpu]);
if (curr_tr == NULL) {
thread__put(sched_in);
return -1;
@@ -1636,7 +1674,7 @@ static int map_switch_event(struct perf_sched *sched, struct evsel *evsel,
color_fprintf(stdout, color, " ");
}
- if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
+ if (sched->map.cpus && !perf_cpu_map__has(sched->map.cpus, this_cpu))
goto out;
timestamp__scnprintf_usec(timestamp, stimestamp, sizeof(stimestamp));
@@ -1670,8 +1708,8 @@ static int process_sched_switch_event(struct perf_tool *tool,
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
int this_cpu = sample->cpu, err = 0;
- u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
- next_pid = perf_evsel__intval(evsel, sample, "next_pid");
+ u32 prev_pid = evsel__intval(evsel, sample, "prev_pid"),
+ next_pid = evsel__intval(evsel, sample, "next_pid");
if (sched->curr_pid[this_cpu] != (u32)-1) {
/*
@@ -1709,7 +1747,7 @@ static int perf_sched__process_fork_event(struct perf_tool *tool,
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- /* run the fork event through the perf machineruy */
+ /* run the fork event through the perf machinery */
perf_event__process_fork(tool, event, sample, machine);
/* and then run additional processing needed for this command */
@@ -1786,10 +1824,11 @@ static int perf_sched__process_comm(struct perf_tool *tool __maybe_unused,
static int perf_sched__read_events(struct perf_sched *sched)
{
- const struct evsel_str_handler handlers[] = {
+ struct evsel_str_handler handlers[] = {
{ "sched:sched_switch", process_sched_switch_event, },
{ "sched:sched_stat_runtime", process_sched_runtime_event, },
{ "sched:sched_wakeup", process_sched_wakeup_event, },
+ { "sched:sched_waking", process_sched_wakeup_event, },
{ "sched:sched_wakeup_new", process_sched_wakeup_event, },
{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
};
@@ -1801,7 +1840,7 @@ static int perf_sched__read_events(struct perf_sched *sched)
};
int rc = -1;
- session = perf_session__new(&data, false, &sched->tool);
+ session = perf_session__new(&data, &sched->tool);
if (IS_ERR(session)) {
pr_debug("Error creating perf session");
return PTR_ERR(session);
@@ -1809,6 +1848,10 @@ static int perf_sched__read_events(struct perf_sched *sched)
symbol__init(&session->header.env);
+ /* prefer sched_waking if it is captured */
+ if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
+ handlers[2].handler = process_sched_wakeup_ignore;
+
if (perf_session__set_tracepoints_handlers(session, handlers))
goto out_delete;
@@ -1848,7 +1891,7 @@ static inline void print_sched_time(unsigned long long nsecs, int width)
* returns runtime data for event, allocating memory for it the
* first time it is used.
*/
-static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel)
+static struct evsel_runtime *evsel__get_runtime(struct evsel *evsel)
{
struct evsel_runtime *r = evsel->priv;
@@ -1863,10 +1906,9 @@ static struct evsel_runtime *perf_evsel__get_runtime(struct evsel *evsel)
/*
* save last time event was seen per cpu
*/
-static void perf_evsel__save_time(struct evsel *evsel,
- u64 timestamp, u32 cpu)
+static void evsel__save_time(struct evsel *evsel, u64 timestamp, u32 cpu)
{
- struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
+ struct evsel_runtime *r = evsel__get_runtime(evsel);
if (r == NULL)
return;
@@ -1890,9 +1932,9 @@ static void perf_evsel__save_time(struct evsel *evsel,
}
/* returns last time this event was seen on the given cpu */
-static u64 perf_evsel__get_time(struct evsel *evsel, u32 cpu)
+static u64 evsel__get_time(struct evsel *evsel, u32 cpu)
{
- struct evsel_runtime *r = perf_evsel__get_runtime(evsel);
+ struct evsel_runtime *r = evsel__get_runtime(evsel);
if ((r == NULL) || (r->last_time == NULL) || (cpu >= r->ncpu))
return 0;
@@ -1927,7 +1969,7 @@ static char *timehist_get_commstr(struct thread *thread)
static void timehist_header(struct perf_sched *sched)
{
- u32 ncpus = sched->max_cpu + 1;
+ u32 ncpus = sched->max_cpu.cpu + 1;
u32 i, j;
printf("%15s %6s ", "time", "cpu");
@@ -2004,9 +2046,9 @@ static void timehist_print_sample(struct perf_sched *sched,
u64 t, int state)
{
struct thread_runtime *tr = thread__priv(thread);
- const char *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
- const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- u32 max_cpus = sched->max_cpu + 1;
+ const char *next_comm = evsel__strval(evsel, sample, "next_comm");
+ const u32 next_pid = evsel__intval(evsel, sample, "next_pid");
+ u32 max_cpus = sched->max_cpu.cpu + 1;
char tstr[64];
char nstr[30];
u64 wait_time;
@@ -2136,8 +2178,8 @@ static bool is_idle_sample(struct perf_sample *sample,
struct evsel *evsel)
{
/* pid 0 == swapper == idle task */
- if (strcmp(perf_evsel__name(evsel), "sched:sched_switch") == 0)
- return perf_evsel__intval(evsel, sample, "prev_pid") == 0;
+ if (strcmp(evsel__name(evsel), "sched:sched_switch") == 0)
+ return evsel__intval(evsel, sample, "prev_pid") == 0;
return sample->pid == 0;
}
@@ -2334,7 +2376,7 @@ static struct thread *timehist_get_thread(struct perf_sched *sched,
itr->last_thread = thread;
/* copy task callchain when entering to idle */
- if (perf_evsel__intval(evsel, sample, "next_pid") == 0)
+ if (evsel__intval(evsel, sample, "next_pid") == 0)
save_idle_callchain(sched, itr, sample);
}
}
@@ -2355,10 +2397,10 @@ static bool timehist_skip_sample(struct perf_sched *sched,
}
if (sched->idle_hist) {
- if (strcmp(perf_evsel__name(evsel), "sched:sched_switch"))
+ if (strcmp(evsel__name(evsel), "sched:sched_switch"))
rc = true;
- else if (perf_evsel__intval(evsel, sample, "prev_pid") != 0 &&
- perf_evsel__intval(evsel, sample, "next_pid") != 0)
+ else if (evsel__intval(evsel, sample, "prev_pid") != 0 &&
+ evsel__intval(evsel, sample, "next_pid") != 0)
rc = true;
}
@@ -2387,7 +2429,7 @@ static void timehist_print_wakeup_event(struct perf_sched *sched,
timestamp__scnprintf_usec(sample->time, tstr, sizeof(tstr));
printf("%15s [%04d] ", tstr, sample->cpu);
if (sched->show_cpu_visual)
- printf(" %*s ", sched->max_cpu + 1, "");
+ printf(" %*s ", sched->max_cpu.cpu + 1, "");
printf(" %-*s ", comm_width, timehist_get_commstr(thread));
@@ -2399,6 +2441,15 @@ static void timehist_print_wakeup_event(struct perf_sched *sched,
printf("\n");
}
+static int timehist_sched_wakeup_ignore(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ return 0;
+}
+
static int timehist_sched_wakeup_event(struct perf_tool *tool,
union perf_event *event __maybe_unused,
struct evsel *evsel,
@@ -2409,7 +2460,7 @@ static int timehist_sched_wakeup_event(struct perf_tool *tool,
struct thread *thread;
struct thread_runtime *tr = NULL;
/* want pid of awakened task not pid in sample */
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ const u32 pid = evsel__intval(evsel, sample, "pid");
thread = machine__findnew_thread(machine, 0, pid);
if (thread == NULL)
@@ -2438,15 +2489,15 @@ static void timehist_print_migration_event(struct perf_sched *sched,
{
struct thread *thread;
char tstr[64];
- u32 max_cpus = sched->max_cpu + 1;
+ u32 max_cpus;
u32 ocpu, dcpu;
if (sched->summary_only)
return;
- max_cpus = sched->max_cpu + 1;
- ocpu = perf_evsel__intval(evsel, sample, "orig_cpu");
- dcpu = perf_evsel__intval(evsel, sample, "dest_cpu");
+ max_cpus = sched->max_cpu.cpu + 1;
+ ocpu = evsel__intval(evsel, sample, "orig_cpu");
+ dcpu = evsel__intval(evsel, sample, "dest_cpu");
thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL)
@@ -2493,7 +2544,7 @@ static int timehist_migrate_task_event(struct perf_tool *tool,
struct thread *thread;
struct thread_runtime *tr = NULL;
/* want pid of migrated task not pid in sample */
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
+ const u32 pid = evsel__intval(evsel, sample, "pid");
thread = machine__findnew_thread(machine, 0, pid);
if (thread == NULL)
@@ -2524,8 +2575,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
struct thread_runtime *tr = NULL;
u64 tprev, t = sample->time;
int rc = 0;
- int state = perf_evsel__intval(evsel, sample, "prev_state");
-
+ int state = evsel__intval(evsel, sample, "prev_state");
if (machine__resolve(machine, &al, sample) < 0) {
pr_err("problem processing %d event. skipping it\n",
@@ -2549,7 +2599,7 @@ static int timehist_sched_change_event(struct perf_tool *tool,
goto out;
}
- tprev = perf_evsel__get_time(evsel, sample->cpu);
+ tprev = evsel__get_time(evsel, sample->cpu);
/*
* If start time given:
@@ -2577,7 +2627,8 @@ static int timehist_sched_change_event(struct perf_tool *tool,
}
if (!sched->idle_hist || thread->tid == 0) {
- timehist_update_runtime_stats(tr, t, tprev);
+ if (!cpu_list || test_bit(sample->cpu, cpu_bitmap))
+ timehist_update_runtime_stats(tr, t, tprev);
if (sched->idle_hist) {
struct idle_thread_runtime *itr = (void *)tr;
@@ -2632,7 +2683,7 @@ out:
tr->ready_to_run = 0;
}
- perf_evsel__save_time(evsel, sample->time, sample->cpu);
+ evsel__save_time(evsel, sample->time, sample->cpu);
return rc;
}
@@ -2850,6 +2901,9 @@ static void timehist_print_summary(struct perf_sched *sched,
printf("\nIdle stats:\n");
for (i = 0; i < idle_max_cpu; ++i) {
+ if (cpu_list && !test_bit(i, cpu_bitmap))
+ continue;
+
t = idle_threads[i];
if (!t)
continue;
@@ -2904,7 +2958,7 @@ static void timehist_print_summary(struct perf_sched *sched,
printf(" Total scheduling time (msec): ");
print_sched_time(hist_time, 2);
- printf(" (x %d)\n", sched->max_cpu);
+ printf(" (x %d)\n", sched->max_cpu.cpu);
}
typedef int (*sched_handler)(struct perf_tool *tool,
@@ -2921,9 +2975,11 @@ static int perf_timehist__process_sample(struct perf_tool *tool,
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
int err = 0;
- int this_cpu = sample->cpu;
+ struct perf_cpu this_cpu = {
+ .cpu = sample->cpu,
+ };
- if (this_cpu > sched->max_cpu)
+ if (this_cpu.cpu > sched->max_cpu.cpu)
sched->max_cpu = this_cpu;
if (evsel->handler != NULL) {
@@ -2942,7 +2998,7 @@ static int timehist_check_attr(struct perf_sched *sched,
struct evsel_runtime *er;
list_for_each_entry(evsel, &evlist->core.entries, core.node) {
- er = perf_evsel__get_runtime(evsel);
+ er = evsel__get_runtime(evsel);
if (er == NULL) {
pr_err("Failed to allocate memory for evsel runtime data\n");
return -1;
@@ -2960,9 +3016,10 @@ static int timehist_check_attr(struct perf_sched *sched,
static int perf_sched__timehist(struct perf_sched *sched)
{
- const struct evsel_str_handler handlers[] = {
+ struct evsel_str_handler handlers[] = {
{ "sched:sched_switch", timehist_sched_switch_event, },
{ "sched:sched_wakeup", timehist_sched_wakeup_event, },
+ { "sched:sched_waking", timehist_sched_wakeup_event, },
{ "sched:sched_wakeup_new", timehist_sched_wakeup_event, },
};
const struct evsel_str_handler migrate_handlers[] = {
@@ -2996,7 +3053,7 @@ static int perf_sched__timehist(struct perf_sched *sched)
symbol_conf.use_callchain = sched->show_callchain;
- session = perf_session__new(&data, false, &sched->tool);
+ session = perf_session__new(&data, &sched->tool);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -3020,13 +3077,16 @@ static int perf_sched__timehist(struct perf_sched *sched)
setup_pager();
+ /* prefer sched_waking if it is captured */
+ if (evlist__find_tracepoint_by_name(session->evlist, "sched:sched_waking"))
+ handlers[1].handler = timehist_sched_wakeup_ignore;
+
/* setup per-evsel handlers */
if (perf_session__set_tracepoints_handlers(session, handlers))
goto out;
/* sched_switch event at a minimum needs to exist */
- if (!perf_evlist__find_tracepoint_by_name(session->evlist,
- "sched:sched_switch")) {
+ if (!evlist__find_tracepoint_by_name(session->evlist, "sched:sched_switch")) {
pr_err("No sched_switch events found. Have you run 'perf sched record'?\n");
goto out;
}
@@ -3036,10 +3096,10 @@ static int perf_sched__timehist(struct perf_sched *sched)
goto out;
/* pre-allocate struct for per-CPU idle stats */
- sched->max_cpu = session->header.env.nr_cpus_online;
- if (sched->max_cpu == 0)
- sched->max_cpu = 4;
- if (init_idle_threads(sched->max_cpu))
+ sched->max_cpu.cpu = session->header.env.nr_cpus_online;
+ if (sched->max_cpu.cpu == 0)
+ sched->max_cpu.cpu = 4;
+ if (init_idle_threads(sched->max_cpu.cpu))
goto out;
/* summary_only implies summary option, but don't overwrite summary if set */
@@ -3120,7 +3180,8 @@ static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *d
list_splice(&data->work_list, &this->work_list);
if (this->max_lat < data->max_lat) {
this->max_lat = data->max_lat;
- this->max_lat_at = data->max_lat_at;
+ this->max_lat_start = data->max_lat_start;
+ this->max_lat_end = data->max_lat_end;
}
zfree(&data);
return;
@@ -3159,9 +3220,9 @@ static int perf_sched__lat(struct perf_sched *sched)
perf_sched__merge_lat(sched);
perf_sched__sort_lat(sched);
- printf("\n -----------------------------------------------------------------------------------------------------------------\n");
- printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
- printf(" -----------------------------------------------------------------------------------------------------------------\n");
+ printf("\n -------------------------------------------------------------------------------------------------------------------------------------------\n");
+ printf(" Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Max delay start | Max delay end |\n");
+ printf(" -------------------------------------------------------------------------------------------------------------------------------------------\n");
next = rb_first_cached(&sched->sorted_atom_root);
@@ -3190,10 +3251,10 @@ static int setup_map_cpus(struct perf_sched *sched)
{
struct perf_cpu_map *map;
- sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
+ sched->max_cpu.cpu = sysconf(_SC_NPROCESSORS_CONF);
if (sched->map.comp) {
- sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
+ sched->map.comp_cpus = zalloc(sched->max_cpu.cpu * sizeof(int));
if (!sched->map.comp_cpus)
return -1;
}
@@ -3290,11 +3351,14 @@ static int perf_sched__replay(struct perf_sched *sched)
print_task_traces(sched);
add_cross_task_wakeups(sched);
+ sched->thread_funcs_exit = false;
create_tasks(sched);
printf("------------------------------------------------------------\n");
for (i = 0; i < sched->replay_repeat; i++)
run_one_test(sched);
+ sched->thread_funcs_exit = true;
+ destroy_tasks(sched);
return 0;
}
@@ -3316,10 +3380,21 @@ static void setup_sorting(struct perf_sched *sched, const struct option *options
sort_dimension__add("pid", &sched->cmp_pid);
}
+static bool schedstat_events_exposed(void)
+{
+ /*
+ * Select "sched:sched_stat_wait" event to check
+ * whether schedstat tracepoints are exposed.
+ */
+ return IS_ERR(trace_event__tp_format("sched", "sched_stat_wait")) ?
+ false : true;
+}
+
static int __cmd_record(int argc, const char **argv)
{
unsigned int rec_argc, i, j;
- const char **rec_argv;
+ char **rec_argv;
+ const char **rec_argv_copy;
const char * const record_args[] = {
"record",
"-a",
@@ -3327,31 +3402,70 @@ static int __cmd_record(int argc, const char **argv)
"-m", "1024",
"-c", "1",
"-e", "sched:sched_switch",
- "-e", "sched:sched_stat_wait",
- "-e", "sched:sched_stat_sleep",
- "-e", "sched:sched_stat_iowait",
"-e", "sched:sched_stat_runtime",
"-e", "sched:sched_process_fork",
- "-e", "sched:sched_wakeup",
"-e", "sched:sched_wakeup_new",
"-e", "sched:sched_migrate_task",
};
- rec_argc = ARRAY_SIZE(record_args) + argc - 1;
- rec_argv = calloc(rec_argc + 1, sizeof(char *));
+ /*
+ * The tracepoints trace_sched_stat_{wait, sleep, iowait}
+ * are not exposed to user if CONFIG_SCHEDSTATS is not set,
+ * to prevent "perf sched record" execution failure, determine
+ * whether to record schedstat events according to actual situation.
+ */
+ const char * const schedstat_args[] = {
+ "-e", "sched:sched_stat_wait",
+ "-e", "sched:sched_stat_sleep",
+ "-e", "sched:sched_stat_iowait",
+ };
+ unsigned int schedstat_argc = schedstat_events_exposed() ?
+ ARRAY_SIZE(schedstat_args) : 0;
+ struct tep_event *waking_event;
+ int ret;
+
+ /*
+ * +2 for either "-e", "sched:sched_wakeup" or
+ * "-e", "sched:sched_waking"
+ */
+ rec_argc = ARRAY_SIZE(record_args) + 2 + schedstat_argc + argc - 1;
+ rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
return -ENOMEM;
+ rec_argv_copy = calloc(rec_argc + 1, sizeof(char *));
+ if (rec_argv_copy == NULL) {
+ free(rec_argv);
+ return -ENOMEM;
+ }
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = strdup(record_args[i]);
+ rec_argv[i++] = strdup("-e");
+ waking_event = trace_event__tp_format("sched", "sched_waking");
+ if (!IS_ERR(waking_event))
+ rec_argv[i++] = strdup("sched:sched_waking");
+ else
+ rec_argv[i++] = strdup("sched:sched_wakeup");
+
+ for (j = 0; j < schedstat_argc; j++)
+ rec_argv[i++] = strdup(schedstat_args[j]);
+
for (j = 1; j < (unsigned int)argc; j++, i++)
- rec_argv[i] = argv[j];
+ rec_argv[i] = strdup(argv[j]);
BUG_ON(i != rec_argc);
- return cmd_record(i, rec_argv);
+ memcpy(rec_argv_copy, rec_argv, sizeof(char *) * rec_argc);
+ ret = cmd_record(rec_argc, rec_argv_copy);
+
+ for (i = 0; i < rec_argc; i++)
+ free(rec_argv[i]);
+ free(rec_argv);
+ free(rec_argv_copy);
+
+ return ret;
}
int cmd_sched(int argc, const char **argv)
@@ -3368,8 +3482,6 @@ int cmd_sched(int argc, const char **argv)
},
.cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
.sort_list = LIST_HEAD_INIT(sched.sort_list),
- .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
- .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
.sort_order = default_sort_order,
.replay_repeat = 10,
.profile_cpu = -1,
@@ -3483,7 +3595,10 @@ int cmd_sched(int argc, const char **argv)
.fork_event = replay_fork_event,
};
unsigned int i;
+ int ret = 0;
+ mutex_init(&sched.start_work_mutex);
+ mutex_init(&sched.work_done_wait_mutex);
for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
sched.curr_pid[i] = -1;
@@ -3495,12 +3610,11 @@ int cmd_sched(int argc, const char **argv)
/*
* Aliased to 'perf script' for now:
*/
- if (!strcmp(argv[0], "script"))
- return cmd_script(argc, argv);
-
- if (!strncmp(argv[0], "rec", 3)) {
- return __cmd_record(argc, argv);
- } else if (!strncmp(argv[0], "lat", 3)) {
+ if (!strcmp(argv[0], "script")) {
+ ret = cmd_script(argc, argv);
+ } else if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
+ ret = __cmd_record(argc, argv);
+ } else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
sched.tp_handler = &lat_ops;
if (argc > 1) {
argc = parse_options(argc, argv, latency_options, latency_usage, 0);
@@ -3508,7 +3622,7 @@ int cmd_sched(int argc, const char **argv)
usage_with_options(latency_usage, latency_options);
}
setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__lat(&sched);
+ ret = perf_sched__lat(&sched);
} else if (!strcmp(argv[0], "map")) {
if (argc) {
argc = parse_options(argc, argv, map_options, map_usage, 0);
@@ -3517,15 +3631,15 @@ int cmd_sched(int argc, const char **argv)
}
sched.tp_handler = &map_ops;
setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__map(&sched);
- } else if (!strncmp(argv[0], "rep", 3)) {
+ ret = perf_sched__map(&sched);
+ } else if (strlen(argv[0]) > 2 && strstarts("replay", argv[0])) {
sched.tp_handler = &replay_ops;
if (argc) {
argc = parse_options(argc, argv, replay_options, replay_usage, 0);
if (argc)
usage_with_options(replay_usage, replay_options);
}
- return perf_sched__replay(&sched);
+ ret = perf_sched__replay(&sched);
} else if (!strcmp(argv[0], "timehist")) {
if (argc) {
argc = parse_options(argc, argv, timehist_options,
@@ -3541,13 +3655,21 @@ int cmd_sched(int argc, const char **argv)
parse_options_usage(NULL, timehist_options, "w", true);
if (sched.show_next)
parse_options_usage(NULL, timehist_options, "n", true);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
+ ret = symbol__validate_sym_arguments();
+ if (ret)
+ goto out;
- return perf_sched__timehist(&sched);
+ ret = perf_sched__timehist(&sched);
} else {
usage_with_options(sched_usage, sched_options);
}
- return 0;
+out:
+ mutex_destroy(&sched.start_work_mutex);
+ mutex_destroy(&sched.work_done_wait_mutex);
+
+ return ret;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 1f57a7ecdf3d..006f522d0e7f 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -15,6 +15,7 @@
#include "util/symbol.h"
#include "util/thread.h"
#include "util/trace-event.h"
+#include "util/env.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/evsel_fprintf.h"
@@ -30,6 +31,7 @@
#include "util/thread-stack.h"
#include "util/time-utils.h"
#include "util/path.h"
+#include "util/event.h"
#include "ui/ui.h"
#include "print_binary.h"
#include "archinsn.h"
@@ -54,11 +56,16 @@
#include <subcmd/pager.h>
#include <perf/evlist.h>
#include <linux/err.h>
+#include "util/dlfilter.h"
#include "util/record.h"
#include "util/util.h"
+#include "util/cgroup.h"
#include "perf.h"
#include <linux/ctype.h>
+#ifdef HAVE_LIBTRACEEVENT
+#include <traceevent/event-parse.h>
+#endif
static char const *script_name;
static char const *generate_script_lang;
@@ -78,42 +85,79 @@ static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
static struct perf_stat_config stat_config;
static int max_blocks;
static bool native_arch;
+static struct dlfilter *dlfilter;
+static int dlargc;
+static char **dlargv;
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
enum perf_output_field {
- PERF_OUTPUT_COMM = 1U << 0,
- PERF_OUTPUT_TID = 1U << 1,
- PERF_OUTPUT_PID = 1U << 2,
- PERF_OUTPUT_TIME = 1U << 3,
- PERF_OUTPUT_CPU = 1U << 4,
- PERF_OUTPUT_EVNAME = 1U << 5,
- PERF_OUTPUT_TRACE = 1U << 6,
- PERF_OUTPUT_IP = 1U << 7,
- PERF_OUTPUT_SYM = 1U << 8,
- PERF_OUTPUT_DSO = 1U << 9,
- PERF_OUTPUT_ADDR = 1U << 10,
- PERF_OUTPUT_SYMOFFSET = 1U << 11,
- PERF_OUTPUT_SRCLINE = 1U << 12,
- PERF_OUTPUT_PERIOD = 1U << 13,
- PERF_OUTPUT_IREGS = 1U << 14,
- PERF_OUTPUT_BRSTACK = 1U << 15,
- PERF_OUTPUT_BRSTACKSYM = 1U << 16,
- PERF_OUTPUT_DATA_SRC = 1U << 17,
- PERF_OUTPUT_WEIGHT = 1U << 18,
- PERF_OUTPUT_BPF_OUTPUT = 1U << 19,
- PERF_OUTPUT_CALLINDENT = 1U << 20,
- PERF_OUTPUT_INSN = 1U << 21,
- PERF_OUTPUT_INSNLEN = 1U << 22,
- PERF_OUTPUT_BRSTACKINSN = 1U << 23,
- PERF_OUTPUT_BRSTACKOFF = 1U << 24,
- PERF_OUTPUT_SYNTH = 1U << 25,
- PERF_OUTPUT_PHYS_ADDR = 1U << 26,
- PERF_OUTPUT_UREGS = 1U << 27,
- PERF_OUTPUT_METRIC = 1U << 28,
- PERF_OUTPUT_MISC = 1U << 29,
- PERF_OUTPUT_SRCCODE = 1U << 30,
- PERF_OUTPUT_IPC = 1U << 31,
+ PERF_OUTPUT_COMM = 1ULL << 0,
+ PERF_OUTPUT_TID = 1ULL << 1,
+ PERF_OUTPUT_PID = 1ULL << 2,
+ PERF_OUTPUT_TIME = 1ULL << 3,
+ PERF_OUTPUT_CPU = 1ULL << 4,
+ PERF_OUTPUT_EVNAME = 1ULL << 5,
+ PERF_OUTPUT_TRACE = 1ULL << 6,
+ PERF_OUTPUT_IP = 1ULL << 7,
+ PERF_OUTPUT_SYM = 1ULL << 8,
+ PERF_OUTPUT_DSO = 1ULL << 9,
+ PERF_OUTPUT_ADDR = 1ULL << 10,
+ PERF_OUTPUT_SYMOFFSET = 1ULL << 11,
+ PERF_OUTPUT_SRCLINE = 1ULL << 12,
+ PERF_OUTPUT_PERIOD = 1ULL << 13,
+ PERF_OUTPUT_IREGS = 1ULL << 14,
+ PERF_OUTPUT_BRSTACK = 1ULL << 15,
+ PERF_OUTPUT_BRSTACKSYM = 1ULL << 16,
+ PERF_OUTPUT_DATA_SRC = 1ULL << 17,
+ PERF_OUTPUT_WEIGHT = 1ULL << 18,
+ PERF_OUTPUT_BPF_OUTPUT = 1ULL << 19,
+ PERF_OUTPUT_CALLINDENT = 1ULL << 20,
+ PERF_OUTPUT_INSN = 1ULL << 21,
+ PERF_OUTPUT_INSNLEN = 1ULL << 22,
+ PERF_OUTPUT_BRSTACKINSN = 1ULL << 23,
+ PERF_OUTPUT_BRSTACKOFF = 1ULL << 24,
+ PERF_OUTPUT_SYNTH = 1ULL << 25,
+ PERF_OUTPUT_PHYS_ADDR = 1ULL << 26,
+ PERF_OUTPUT_UREGS = 1ULL << 27,
+ PERF_OUTPUT_METRIC = 1ULL << 28,
+ PERF_OUTPUT_MISC = 1ULL << 29,
+ PERF_OUTPUT_SRCCODE = 1ULL << 30,
+ PERF_OUTPUT_IPC = 1ULL << 31,
+ PERF_OUTPUT_TOD = 1ULL << 32,
+ PERF_OUTPUT_DATA_PAGE_SIZE = 1ULL << 33,
+ PERF_OUTPUT_CODE_PAGE_SIZE = 1ULL << 34,
+ PERF_OUTPUT_INS_LAT = 1ULL << 35,
+ PERF_OUTPUT_BRSTACKINSNLEN = 1ULL << 36,
+ PERF_OUTPUT_MACHINE_PID = 1ULL << 37,
+ PERF_OUTPUT_VCPU = 1ULL << 38,
+ PERF_OUTPUT_CGROUP = 1ULL << 39,
+ PERF_OUTPUT_RETIRE_LAT = 1ULL << 40,
+};
+
+struct perf_script {
+ struct perf_tool tool;
+ struct perf_session *session;
+ bool show_task_events;
+ bool show_mmap_events;
+ bool show_switch_events;
+ bool show_namespace_events;
+ bool show_lost_events;
+ bool show_round_events;
+ bool show_bpf_events;
+ bool show_cgroup_events;
+ bool show_text_poke_events;
+ bool allocated;
+ bool per_event_dump;
+ bool stitch_lbr;
+ struct evswitch evswitch;
+ struct perf_cpu_map *cpus;
+ struct perf_thread_map *threads;
+ int name_width;
+ const char *time_str;
+ struct perf_time_interval *ptime_range;
+ int range_size;
+ int range_num;
};
struct output_option {
@@ -152,10 +196,20 @@ struct output_option {
{.str = "misc", .field = PERF_OUTPUT_MISC},
{.str = "srccode", .field = PERF_OUTPUT_SRCCODE},
{.str = "ipc", .field = PERF_OUTPUT_IPC},
+ {.str = "tod", .field = PERF_OUTPUT_TOD},
+ {.str = "data_page_size", .field = PERF_OUTPUT_DATA_PAGE_SIZE},
+ {.str = "code_page_size", .field = PERF_OUTPUT_CODE_PAGE_SIZE},
+ {.str = "ins_lat", .field = PERF_OUTPUT_INS_LAT},
+ {.str = "brstackinsnlen", .field = PERF_OUTPUT_BRSTACKINSNLEN},
+ {.str = "machine_pid", .field = PERF_OUTPUT_MACHINE_PID},
+ {.str = "vcpu", .field = PERF_OUTPUT_VCPU},
+ {.str = "cgroup", .field = PERF_OUTPUT_CGROUP},
+ {.str = "retire_lat", .field = PERF_OUTPUT_RETIRE_LAT},
};
enum {
OUTPUT_TYPE_SYNTH = PERF_TYPE_MAX,
+ OUTPUT_TYPE_OTHER,
OUTPUT_TYPE_MAX
};
@@ -167,6 +221,7 @@ static struct {
u64 fields;
u64 invalid_fields;
u64 user_set_fields;
+ u64 user_unset_fields;
} output[OUTPUT_TYPE_MAX] = {
[PERF_TYPE_HARDWARE] = {
@@ -223,7 +278,9 @@ static struct {
PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD |
PERF_OUTPUT_ADDR | PERF_OUTPUT_DATA_SRC |
- PERF_OUTPUT_WEIGHT | PERF_OUTPUT_PHYS_ADDR,
+ PERF_OUTPUT_WEIGHT | PERF_OUTPUT_PHYS_ADDR |
+ PERF_OUTPUT_DATA_PAGE_SIZE | PERF_OUTPUT_CODE_PAGE_SIZE |
+ PERF_OUTPUT_INS_LAT | PERF_OUTPUT_RETIRE_LAT,
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
@@ -251,6 +308,18 @@ static struct {
.invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
},
+
+ [OUTPUT_TYPE_OTHER] = {
+ .user_set = false,
+
+ .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+ PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+ PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+ PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+ PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+ .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+ },
};
struct evsel_script {
@@ -267,13 +336,12 @@ static inline struct evsel_script *evsel_script(struct evsel *evsel)
return (struct evsel_script *)evsel->priv;
}
-static struct evsel_script *perf_evsel_script__new(struct evsel *evsel,
- struct perf_data *data)
+static struct evsel_script *evsel_script__new(struct evsel *evsel, struct perf_data *data)
{
struct evsel_script *es = zalloc(sizeof(*es));
if (es != NULL) {
- if (asprintf(&es->filename, "%s.%s.dump", data->file.path, perf_evsel__name(evsel)) < 0)
+ if (asprintf(&es->filename, "%s.%s.dump", data->file.path, evsel__name(evsel)) < 0)
goto out_free;
es->fp = fopen(es->filename, "w");
if (es->fp == NULL)
@@ -288,7 +356,7 @@ out_free:
return NULL;
}
-static void perf_evsel_script__delete(struct evsel_script *es)
+static void evsel_script__delete(struct evsel_script *es)
{
zfree(&es->filename);
fclose(es->fp);
@@ -296,7 +364,7 @@ static void perf_evsel_script__delete(struct evsel_script *es)
free(es);
}
-static int perf_evsel_script__fprintf(struct evsel_script *es, FILE *fp)
+static int evsel_script__fprintf(struct evsel_script *es, FILE *fp)
{
struct stat st;
@@ -311,18 +379,11 @@ static inline int output_type(unsigned int type)
case PERF_TYPE_SYNTH:
return OUTPUT_TYPE_SYNTH;
default:
- return type;
+ if (type < PERF_TYPE_MAX)
+ return type;
}
-}
-static inline unsigned int attr_type(unsigned int type)
-{
- switch (type) {
- case OUTPUT_TYPE_SYNTH:
- return PERF_TYPE_SYNTH;
- default:
- return type;
- }
+ return OUTPUT_TYPE_OTHER;
}
static bool output_set_by_user(void)
@@ -351,10 +412,8 @@ static const char *output_field2str(enum perf_output_field field)
#define PRINT_FIELD(x) (output[output_type(attr->type)].fields & PERF_OUTPUT_##x)
-static int perf_evsel__do_check_stype(struct evsel *evsel,
- u64 sample_type, const char *sample_msg,
- enum perf_output_field field,
- bool allow_user_set)
+static int evsel__do_check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
+ enum perf_output_field field, bool allow_user_set)
{
struct perf_event_attr *attr = &evsel->core.attr;
int type = output_type(attr->type);
@@ -366,7 +425,7 @@ static int perf_evsel__do_check_stype(struct evsel *evsel,
if (output[type].user_set_fields & field) {
if (allow_user_set)
return 0;
- evname = perf_evsel__name(evsel);
+ evname = evsel__name(evsel);
pr_err("Samples for '%s' event do not have %s attribute set. "
"Cannot print '%s' field.\n",
evname, sample_msg, output_field2str(field));
@@ -375,7 +434,7 @@ static int perf_evsel__do_check_stype(struct evsel *evsel,
/* user did not ask for it explicitly so remove from the default list */
output[type].fields &= ~field;
- evname = perf_evsel__name(evsel);
+ evname = evsel__name(evsel);
pr_debug("Samples for '%s' event do not have %s attribute set. "
"Skipping '%s' field.\n",
evname, sample_msg, output_field2str(field));
@@ -383,20 +442,20 @@ static int perf_evsel__do_check_stype(struct evsel *evsel,
return 0;
}
-static int perf_evsel__check_stype(struct evsel *evsel,
- u64 sample_type, const char *sample_msg,
- enum perf_output_field field)
+static int evsel__check_stype(struct evsel *evsel, u64 sample_type, const char *sample_msg,
+ enum perf_output_field field)
{
- return perf_evsel__do_check_stype(evsel, sample_type, sample_msg, field,
- false);
+ return evsel__do_check_stype(evsel, sample_type, sample_msg, field, false);
}
-static int perf_evsel__check_attr(struct evsel *evsel,
- struct perf_session *session)
+static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
{
struct perf_event_attr *attr = &evsel->core.attr;
bool allow_user_set;
+ if (evsel__is_dummy_event(evsel))
+ return 0;
+
if (perf_header__has_feat(&session->header, HEADER_STAT))
return 0;
@@ -404,32 +463,28 @@ static int perf_evsel__check_attr(struct evsel *evsel,
HEADER_AUXTRACE);
if (PRINT_FIELD(TRACE) &&
- !perf_session__has_traces(session, "record -R"))
+ !perf_session__has_traces(session, "record -R"))
return -EINVAL;
if (PRINT_FIELD(IP)) {
- if (perf_evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP",
- PERF_OUTPUT_IP))
+ if (evsel__check_stype(evsel, PERF_SAMPLE_IP, "IP", PERF_OUTPUT_IP))
return -EINVAL;
}
if (PRINT_FIELD(ADDR) &&
- perf_evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR",
- PERF_OUTPUT_ADDR, allow_user_set))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR", PERF_OUTPUT_ADDR, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(DATA_SRC) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC",
- PERF_OUTPUT_DATA_SRC))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_DATA_SRC, "DATA_SRC", PERF_OUTPUT_DATA_SRC, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(WEIGHT) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT, "WEIGHT",
- PERF_OUTPUT_WEIGHT))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_WEIGHT_TYPE, "WEIGHT", PERF_OUTPUT_WEIGHT, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(SYM) &&
- !(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
+ !(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
pr_err("Display of symbols requested but neither sample IP nor "
"sample address\navailable. Hence, no addresses to convert "
"to symbols.\n");
@@ -441,7 +496,7 @@ static int perf_evsel__check_attr(struct evsel *evsel,
return -EINVAL;
}
if (PRINT_FIELD(DSO) &&
- !(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
+ !(evsel->core.attr.sample_type & (PERF_SAMPLE_IP|PERF_SAMPLE_ADDR))) {
pr_err("Display of DSO requested but no address to convert.\n");
return -EINVAL;
}
@@ -450,41 +505,56 @@ static int perf_evsel__check_attr(struct evsel *evsel,
"selected. Hence, no address to lookup the source line number.\n");
return -EINVAL;
}
- if (PRINT_FIELD(BRSTACKINSN) && !allow_user_set &&
- !(perf_evlist__combined_branch_type(session->evlist) &
- PERF_SAMPLE_BRANCH_ANY)) {
+ if ((PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN)) && !allow_user_set &&
+ !(evlist__combined_branch_type(session->evlist) & PERF_SAMPLE_BRANCH_ANY)) {
pr_err("Display of branch stack assembler requested, but non all-branch filter set\n"
"Hint: run 'perf record -b ...'\n");
return -EINVAL;
}
if ((PRINT_FIELD(PID) || PRINT_FIELD(TID)) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID",
- PERF_OUTPUT_TID|PERF_OUTPUT_PID))
+ evsel__check_stype(evsel, PERF_SAMPLE_TID, "TID", PERF_OUTPUT_TID|PERF_OUTPUT_PID))
return -EINVAL;
if (PRINT_FIELD(TIME) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME",
- PERF_OUTPUT_TIME))
+ evsel__check_stype(evsel, PERF_SAMPLE_TIME, "TIME", PERF_OUTPUT_TIME))
return -EINVAL;
if (PRINT_FIELD(CPU) &&
- perf_evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU",
- PERF_OUTPUT_CPU, allow_user_set))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU", PERF_OUTPUT_CPU, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(IREGS) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS",
- PERF_OUTPUT_IREGS))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_REGS_INTR, "IREGS", PERF_OUTPUT_IREGS, allow_user_set))
return -EINVAL;
if (PRINT_FIELD(UREGS) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS",
- PERF_OUTPUT_UREGS))
+ evsel__check_stype(evsel, PERF_SAMPLE_REGS_USER, "UREGS", PERF_OUTPUT_UREGS))
return -EINVAL;
if (PRINT_FIELD(PHYS_ADDR) &&
- perf_evsel__check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR",
- PERF_OUTPUT_PHYS_ADDR))
+ evsel__do_check_stype(evsel, PERF_SAMPLE_PHYS_ADDR, "PHYS_ADDR", PERF_OUTPUT_PHYS_ADDR, allow_user_set))
+ return -EINVAL;
+
+ if (PRINT_FIELD(DATA_PAGE_SIZE) &&
+ evsel__check_stype(evsel, PERF_SAMPLE_DATA_PAGE_SIZE, "DATA_PAGE_SIZE", PERF_OUTPUT_DATA_PAGE_SIZE))
+ return -EINVAL;
+
+ if (PRINT_FIELD(CODE_PAGE_SIZE) &&
+ evsel__check_stype(evsel, PERF_SAMPLE_CODE_PAGE_SIZE, "CODE_PAGE_SIZE", PERF_OUTPUT_CODE_PAGE_SIZE))
+ return -EINVAL;
+
+ if (PRINT_FIELD(INS_LAT) &&
+ evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_STRUCT, "WEIGHT_STRUCT", PERF_OUTPUT_INS_LAT))
+ return -EINVAL;
+
+ if (PRINT_FIELD(CGROUP) &&
+ evsel__check_stype(evsel, PERF_SAMPLE_CGROUP, "CGROUP", PERF_OUTPUT_CGROUP)) {
+ pr_err("Hint: run 'perf record --all-cgroups ...'\n");
+ return -EINVAL;
+ }
+
+ if (PRINT_FIELD(RETIRE_LAT) &&
+ evsel__check_stype(evsel, PERF_SAMPLE_WEIGHT_STRUCT, "WEIGHT_STRUCT", PERF_OUTPUT_RETIRE_LAT))
return -EINVAL;
return 0;
@@ -511,17 +581,32 @@ static void set_print_ip_opts(struct perf_event_attr *attr)
output[type].print_ip_opts |= EVSEL__PRINT_SRCLINE;
}
+static struct evsel *find_first_output_type(struct evlist *evlist,
+ unsigned int type)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_dummy_event(evsel))
+ continue;
+ if (output_type(evsel->core.attr.type) == (int)type)
+ return evsel;
+ }
+ return NULL;
+}
+
/*
* verify all user requested events exist and the samples
* have the expected data
*/
static int perf_session__check_output_opt(struct perf_session *session)
{
+ bool tod = false;
unsigned int j;
struct evsel *evsel;
for (j = 0; j < OUTPUT_TYPE_MAX; ++j) {
- evsel = perf_session__find_first_evtype(session, attr_type(j));
+ evsel = find_first_output_type(session->evlist, j);
/*
* even if fields is set to 0 (ie., show nothing) event must
@@ -536,13 +621,14 @@ static int perf_session__check_output_opt(struct perf_session *session)
}
if (evsel && output[j].fields &&
- perf_evsel__check_attr(evsel, session))
+ evsel__check_attr(evsel, session))
return -1;
if (evsel == NULL)
continue;
set_print_ip_opts(&evsel->core.attr);
+ tod |= output[j].fields & PERF_OUTPUT_TOD;
}
if (!no_callchain) {
@@ -583,13 +669,17 @@ static int perf_session__check_output_opt(struct perf_session *session)
}
}
+ if (tod && !session->header.env.clock.enabled) {
+ pr_err("Can't provide 'tod' time, missing clock data. "
+ "Please record with -k/--clockid option.\n");
+ return -1;
+ }
out:
return 0;
}
-static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask,
- FILE *fp
-)
+static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask, const char *arch,
+ FILE *fp)
{
unsigned i = 0, r;
int printed = 0;
@@ -601,29 +691,78 @@ static int perf_sample__fprintf_regs(struct regs_dump *regs, uint64_t mask,
for_each_set_bit(r, (unsigned long *) &mask, sizeof(mask) * 8) {
u64 val = regs->regs[i++];
- printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r), val);
+ printed += fprintf(fp, "%5s:0x%"PRIx64" ", perf_reg_name(r, arch), val);
}
- fprintf(fp, "\n");
-
return printed;
}
+#define DEFAULT_TOD_FMT "%F %H:%M:%S"
+
+static char*
+tod_scnprintf(struct perf_script *script, char *buf, int buflen,
+ u64 timestamp)
+{
+ u64 tod_ns, clockid_ns;
+ struct perf_env *env;
+ unsigned long nsec;
+ struct tm ltime;
+ char date[64];
+ time_t sec;
+
+ buf[0] = '\0';
+ if (buflen < 64 || !script)
+ return buf;
+
+ env = &script->session->header.env;
+ if (!env->clock.enabled) {
+ scnprintf(buf, buflen, "disabled");
+ return buf;
+ }
+
+ clockid_ns = env->clock.clockid_ns;
+ tod_ns = env->clock.tod_ns;
+
+ if (timestamp > clockid_ns)
+ tod_ns += timestamp - clockid_ns;
+ else
+ tod_ns -= clockid_ns - timestamp;
+
+ sec = (time_t) (tod_ns / NSEC_PER_SEC);
+ nsec = tod_ns - sec * NSEC_PER_SEC;
+
+ if (localtime_r(&sec, &ltime) == NULL) {
+ scnprintf(buf, buflen, "failed");
+ } else {
+ strftime(date, sizeof(date), DEFAULT_TOD_FMT, &ltime);
+
+ if (symbol_conf.nanosecs) {
+ snprintf(buf, buflen, "%s.%09lu", date, nsec);
+ } else {
+ snprintf(buf, buflen, "%s.%06lu",
+ date, nsec / NSEC_PER_USEC);
+ }
+ }
+
+ return buf;
+}
+
static int perf_sample__fprintf_iregs(struct perf_sample *sample,
- struct perf_event_attr *attr, FILE *fp)
+ struct perf_event_attr *attr, const char *arch, FILE *fp)
{
return perf_sample__fprintf_regs(&sample->intr_regs,
- attr->sample_regs_intr, fp);
+ attr->sample_regs_intr, arch, fp);
}
static int perf_sample__fprintf_uregs(struct perf_sample *sample,
- struct perf_event_attr *attr, FILE *fp)
+ struct perf_event_attr *attr, const char *arch, FILE *fp)
{
return perf_sample__fprintf_regs(&sample->user_regs,
- attr->sample_regs_user, fp);
+ attr->sample_regs_user, arch, fp);
}
-static int perf_sample__fprintf_start(struct perf_sample *sample,
+static int perf_sample__fprintf_start(struct perf_script *script,
+ struct perf_sample *sample,
struct thread *thread,
struct evsel *evsel,
u32 type, FILE *fp)
@@ -632,14 +771,24 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
unsigned long secs;
unsigned long long nsecs;
int printed = 0;
+ char tstr[128];
+
+ if (PRINT_FIELD(MACHINE_PID) && sample->machine_pid)
+ printed += fprintf(fp, "VM:%5d ", sample->machine_pid);
+
+ /* Print VCPU only for guest events i.e. with machine_pid */
+ if (PRINT_FIELD(VCPU) && sample->machine_pid)
+ printed += fprintf(fp, "VCPU:%03d ", sample->vcpu);
if (PRINT_FIELD(COMM)) {
+ const char *comm = thread ? thread__comm_str(thread) : ":-1";
+
if (latency_format)
- printed += fprintf(fp, "%8.8s ", thread__comm_str(thread));
+ printed += fprintf(fp, "%8.8s ", comm);
else if (PRINT_FIELD(IP) && evsel__has_callchain(evsel) && symbol_conf.use_callchain)
- printed += fprintf(fp, "%s ", thread__comm_str(thread));
+ printed += fprintf(fp, "%s ", comm);
else
- printed += fprintf(fp, "%16s ", thread__comm_str(thread));
+ printed += fprintf(fp, "%16s ", comm);
}
if (PRINT_FIELD(PID) && PRINT_FIELD(TID))
@@ -700,6 +849,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample,
printed += ret;
}
+ if (PRINT_FIELD(TOD)) {
+ tod_scnprintf(script, tstr, sizeof(tstr), sample->time);
+ printed += fprintf(fp, "%s ", tstr);
+ }
+
if (PRINT_FIELD(TIME)) {
u64 t = sample->time;
if (reltime) {
@@ -739,6 +893,17 @@ mispred_str(struct branch_entry *br)
return br->flags.predicted ? 'P' : 'M';
}
+static int print_bstack_flags(FILE *fp, struct branch_entry *br)
+{
+ return fprintf(fp, "/%c/%c/%c/%d/%s/%s ",
+ mispred_str(br),
+ br->flags.in_tx ? 'X' : '-',
+ br->flags.abort ? 'A' : '-',
+ br->flags.cycles,
+ get_branch_type(br),
+ br->flags.spec ? branch_spec_desc(br->flags.spec) : "-");
+}
+
static int perf_sample__fprintf_brstack(struct perf_sample *sample,
struct thread *thread,
struct perf_event_attr *attr, FILE *fp)
@@ -777,11 +942,7 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
printed += fprintf(fp, ")");
}
- printed += fprintf(fp, "/%c/%c/%c/%d ",
- mispred_str(entries + i),
- entries[i].flags.in_tx ? 'X' : '-',
- entries[i].flags.abort ? 'A' : '-',
- entries[i].flags.cycles);
+ printed += print_bstack_flags(fp, entries + i);
}
return printed;
@@ -823,11 +984,7 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
printed += map__fprintf_dsoname(alt.map, fp);
printed += fprintf(fp, ")");
}
- printed += fprintf(fp, "/%c/%c/%c/%d ",
- mispred_str(entries + i),
- entries[i].flags.in_tx ? 'X' : '-',
- entries[i].flags.abort ? 'A' : '-',
- entries[i].flags.cycles);
+ printed += print_bstack_flags(fp, entries + i);
}
return printed;
@@ -854,12 +1011,12 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
to = entries[i].to;
if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
- !alf.map->dso->adjust_symbols)
- from = map__map_ip(alf.map, from);
+ !map__dso(alf.map)->adjust_symbols)
+ from = map__dso_map_ip(alf.map, from);
if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
- !alt.map->dso->adjust_symbols)
- to = map__map_ip(alt.map, to);
+ !map__dso(alt.map)->adjust_symbols)
+ to = map__dso_map_ip(alt.map, to);
printed += fprintf(fp, " 0x%"PRIx64, from);
if (PRINT_FIELD(DSO)) {
@@ -873,11 +1030,7 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
printed += map__fprintf_dsoname(alt.map, fp);
printed += fprintf(fp, ")");
}
- printed += fprintf(fp, "/%c/%c/%c/%d ",
- mispred_str(entries + i),
- entries[i].flags.in_tx ? 'X' : '-',
- entries[i].flags.abort ? 'A' : '-',
- entries[i].flags.cycles);
+ printed += print_bstack_flags(fp, entries + i);
}
return printed;
@@ -891,6 +1044,7 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
long offset, len;
struct addr_location al;
bool kernel;
+ struct dso *dso;
if (!start || !end)
return 0;
@@ -921,11 +1075,11 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
return 0;
}
- if (!thread__find_map(thread, *cpumode, start, &al) || !al.map->dso) {
+ if (!thread__find_map(thread, *cpumode, start, &al) || (dso = map__dso(al.map)) == NULL) {
pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
return 0;
}
- if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR) {
+ if (dso->data.status == DSO_DATA_STATUS_ERROR) {
pr_debug("\tcannot resolve %" PRIx64 "-%" PRIx64 "\n", start, end);
return 0;
}
@@ -933,11 +1087,11 @@ static int grab_bb(u8 *buffer, u64 start, u64 end,
/* Load maps to ensure dso->is_64_bit has been updated */
map__load(al.map);
- offset = al.map->map_ip(al.map, start);
- len = dso__data_read_offset(al.map->dso, machine, offset, (u8 *)buffer,
+ offset = map__map_ip(al.map, start);
+ len = dso__data_read_offset(dso, machine, offset, (u8 *)buffer,
end - start + MAXINSN);
- *is64bit = al.map->dso->is_64_bit;
+ *is64bit = dso->is_64_bit;
if (len <= 0)
pr_debug("\tcannot fetch code for block at %" PRIx64 "-%" PRIx64 "\n",
start, end);
@@ -951,10 +1105,11 @@ static int map__fprintf_srccode(struct map *map, u64 addr, FILE *fp, struct srcc
unsigned line;
int len;
char *srccode;
+ struct dso *dso;
- if (!map || !map->dso)
+ if (!map || (dso = map__dso(map)) == NULL)
return 0;
- srcfile = get_srcline_split(map->dso,
+ srcfile = get_srcline_split(dso,
map__rip_2objdump(map, addr),
&line);
if (!srcfile)
@@ -1004,10 +1159,17 @@ static int print_srccode(struct thread *thread, u8 cpumode, uint64_t addr)
static int ip__fprintf_jump(uint64_t ip, struct branch_entry *en,
struct perf_insn *x, u8 *inbuf, int len,
- int insn, FILE *fp, int *total_cycles)
+ int insn, FILE *fp, int *total_cycles,
+ struct perf_event_attr *attr)
{
- int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t#%s%s%s%s", ip,
- dump_insn(x, ip, inbuf, len, NULL),
+ int ilen = 0;
+ int printed = fprintf(fp, "\t%016" PRIx64 "\t%-30s\t", ip,
+ dump_insn(x, ip, inbuf, len, &ilen));
+
+ if (PRINT_FIELD(BRSTACKINSNLEN))
+ printed += fprintf(fp, "ilen: %d\t", ilen);
+
+ printed += fprintf(fp, "#%s%s%s%s",
en->flags.predicted ? " PRED" : "",
en->flags.mispred ? " MISPRED" : "",
en->flags.in_tx ? " INTX" : "",
@@ -1046,7 +1208,7 @@ static int ip__fprintf_sym(uint64_t addr, struct thread *thread,
if (al.addr < al.sym->end)
off = al.addr - al.sym->start;
else
- off = al.addr - al.map->start - al.sym->start;
+ off = al.addr - map__start(al.map) - al.sym->start;
printed += fprintf(fp, "\t%s", al.sym->name);
if (off)
printed += fprintf(fp, "%+d", off);
@@ -1093,7 +1255,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
printed += ip__fprintf_sym(entries[nr - 1].from, thread,
x.cpumode, x.cpu, &lastsym, attr, fp);
printed += ip__fprintf_jump(entries[nr - 1].from, &entries[nr - 1],
- &x, buffer, len, 0, fp, &total_cycles);
+ &x, buffer, len, 0, fp, &total_cycles,
+ attr);
if (PRINT_FIELD(SRCCODE))
printed += print_srccode(thread, x.cpumode, entries[nr - 1].from);
}
@@ -1124,14 +1287,17 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (ip == end) {
printed += ip__fprintf_jump(ip, &entries[i], &x, buffer + off, len - off, ++insn, fp,
- &total_cycles);
+ &total_cycles, attr);
if (PRINT_FIELD(SRCCODE))
printed += print_srccode(thread, x.cpumode, ip);
break;
} else {
ilen = 0;
- printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
+ printed += fprintf(fp, "\t%016" PRIx64 "\t%s", ip,
dump_insn(&x, ip, buffer + off, len - off, &ilen));
+ if (PRINT_FIELD(BRSTACKINSNLEN))
+ printed += fprintf(fp, "\tilen: %d", ilen);
+ printed += fprintf(fp, "\n");
if (ilen == 0)
break;
if (PRINT_FIELD(SRCCODE))
@@ -1153,7 +1319,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
goto out;
/*
- * Print final block upto sample
+ * Print final block up to sample
*
* Due to pipeline delays the LBRs might be missing a branch
* or two, which can result in very large or negative blocks
@@ -1174,16 +1340,23 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
machine, thread, &x.is64bit, &x.cpumode, false);
if (len <= 0)
goto out;
- printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", sample->ip,
- dump_insn(&x, sample->ip, buffer, len, NULL));
+ ilen = 0;
+ printed += fprintf(fp, "\t%016" PRIx64 "\t%s", sample->ip,
+ dump_insn(&x, sample->ip, buffer, len, &ilen));
+ if (PRINT_FIELD(BRSTACKINSNLEN))
+ printed += fprintf(fp, "\tilen: %d", ilen);
+ printed += fprintf(fp, "\n");
if (PRINT_FIELD(SRCCODE))
print_srccode(thread, x.cpumode, sample->ip);
goto out;
}
for (off = 0; off <= end - start; off += ilen) {
ilen = 0;
- printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off,
+ printed += fprintf(fp, "\t%016" PRIx64 "\t%s", start + off,
dump_insn(&x, start + off, buffer + off, len - off, &ilen));
+ if (PRINT_FIELD(BRSTACKINSNLEN))
+ printed += fprintf(fp, "\tilen: %d", ilen);
+ printed += fprintf(fp, "\n");
if (ilen == 0)
break;
if (arch_is_branch(buffer + off, len - off, x.is64bit) && start + off != sample->ip) {
@@ -1233,17 +1406,18 @@ static const char *resolve_branch_sym(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
+ struct addr_location *addr_al,
u64 *ip)
{
- struct addr_location addr_al;
struct perf_event_attr *attr = &evsel->core.attr;
const char *name = NULL;
if (sample->flags & (PERF_IP_FLAG_CALL | PERF_IP_FLAG_TRACE_BEGIN)) {
if (sample_addr_correlates_sym(attr)) {
- thread__resolve(thread, &addr_al, sample);
- if (addr_al.sym)
- name = addr_al.sym->name;
+ if (!addr_al->thread)
+ thread__resolve(thread, addr_al, sample);
+ if (addr_al->sym)
+ name = addr_al->sym->name;
else
*ip = sample->addr;
} else {
@@ -1261,7 +1435,9 @@ static const char *resolve_branch_sym(struct perf_sample *sample,
static int perf_sample__fprintf_callindent(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
- struct addr_location *al, FILE *fp)
+ struct addr_location *al,
+ struct addr_location *addr_al,
+ FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
size_t depth = thread_stack__depth(thread, sample->cpu);
@@ -1278,7 +1454,7 @@ static int perf_sample__fprintf_callindent(struct perf_sample *sample,
if (thread->ts && sample->flags & PERF_IP_FLAG_RETURN)
depth += 1;
- name = resolve_branch_sym(sample, evsel, thread, al, &ip);
+ name = resolve_branch_sym(sample, evsel, thread, al, addr_al, &ip);
if (PRINT_FIELD(DSO) && !(PRINT_FIELD(IP) || PRINT_FIELD(ADDR))) {
dlen += fprintf(fp, "(");
@@ -1313,6 +1489,13 @@ __weak void arch_fetch_insn(struct perf_sample *sample __maybe_unused,
{
}
+void script_fetch_insn(struct perf_sample *sample, struct thread *thread,
+ struct machine *machine)
+{
+ if (sample->insn_len == 0 && native_arch)
+ arch_fetch_insn(sample, thread, machine);
+}
+
static int perf_sample__fprintf_insn(struct perf_sample *sample,
struct perf_event_attr *attr,
struct thread *thread,
@@ -1320,8 +1503,7 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
{
int printed = 0;
- if (sample->insn_len == 0 && native_arch)
- arch_fetch_insn(sample, thread, machine);
+ script_fetch_insn(sample, thread, machine);
if (PRINT_FIELD(INSNLEN))
printed += fprintf(fp, " ilen: %d", sample->insn_len);
@@ -1332,7 +1514,7 @@ static int perf_sample__fprintf_insn(struct perf_sample *sample,
for (i = 0; i < sample->insn_len; i++)
printed += fprintf(fp, " %02x", (unsigned char)sample->insn[i]);
}
- if (PRINT_FIELD(BRSTACKINSN))
+ if (PRINT_FIELD(BRSTACKINSN) || PRINT_FIELD(BRSTACKINSNLEN))
printed += perf_sample__fprintf_brstackinsn(sample, thread, attr, machine, fp);
return printed;
@@ -1356,6 +1538,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
struct addr_location *al,
+ struct addr_location *addr_al,
struct machine *machine, FILE *fp)
{
struct perf_event_attr *attr = &evsel->core.attr;
@@ -1364,7 +1547,7 @@ static int perf_sample__fprintf_bts(struct perf_sample *sample,
int printed = 0;
if (PRINT_FIELD(CALLINDENT))
- printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, fp);
+ printed += perf_sample__fprintf_callindent(sample, evsel, thread, al, addr_al, fp);
/* print branch_from information */
if (PRINT_FIELD(IP)) {
@@ -1432,6 +1615,8 @@ static struct {
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT, "tx abrt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_BEGIN, "tr strt"},
{PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TRACE_END, "tr end"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMENTRY, "vmentry"},
+ {PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL | PERF_IP_FLAG_VMEXIT, "vmexit"},
{0, NULL}
};
@@ -1447,42 +1632,58 @@ static const char *sample_flags_to_name(u32 flags)
return NULL;
}
-static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
+int perf_sample__sprintf_flags(u32 flags, char *str, size_t sz)
{
+ u32 xf = PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_INTR_DISABLE |
+ PERF_IP_FLAG_INTR_TOGGLE;
const char *chars = PERF_IP_FLAG_CHARS;
- const int n = strlen(PERF_IP_FLAG_CHARS);
- bool in_tx = flags & PERF_IP_FLAG_IN_TX;
+ const size_t n = strlen(PERF_IP_FLAG_CHARS);
const char *name = NULL;
- char str[33];
- int i, pos = 0;
+ size_t i, pos = 0;
+ char xs[16] = {0};
+
+ if (flags & xf)
+ snprintf(xs, sizeof(xs), "(%s%s%s)",
+ flags & PERF_IP_FLAG_IN_TX ? "x" : "",
+ flags & PERF_IP_FLAG_INTR_DISABLE ? "D" : "",
+ flags & PERF_IP_FLAG_INTR_TOGGLE ? "t" : "");
- name = sample_flags_to_name(flags & ~PERF_IP_FLAG_IN_TX);
+ name = sample_flags_to_name(flags & ~xf);
if (name)
- return fprintf(fp, " %-15s%4s ", name, in_tx ? "(x)" : "");
+ return snprintf(str, sz, "%-15s%6s", name, xs);
if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
- name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_BEGIN));
+ name = sample_flags_to_name(flags & ~(xf | PERF_IP_FLAG_TRACE_BEGIN));
if (name)
- return fprintf(fp, " tr strt %-7s%4s ", name, in_tx ? "(x)" : "");
+ return snprintf(str, sz, "tr strt %-7s%6s", name, xs);
}
if (flags & PERF_IP_FLAG_TRACE_END) {
- name = sample_flags_to_name(flags & ~(PERF_IP_FLAG_IN_TX | PERF_IP_FLAG_TRACE_END));
+ name = sample_flags_to_name(flags & ~(xf | PERF_IP_FLAG_TRACE_END));
if (name)
- return fprintf(fp, " tr end %-7s%4s ", name, in_tx ? "(x)" : "");
+ return snprintf(str, sz, "tr end %-7s%6s", name, xs);
}
for (i = 0; i < n; i++, flags >>= 1) {
- if (flags & 1)
+ if ((flags & 1) && pos < sz)
str[pos++] = chars[i];
}
for (; i < 32; i++, flags >>= 1) {
- if (flags & 1)
+ if ((flags & 1) && pos < sz)
str[pos++] = '?';
}
- str[pos] = 0;
+ if (pos < sz)
+ str[pos] = 0;
+
+ return pos;
+}
+
+static int perf_sample__fprintf_flags(u32 flags, FILE *fp)
+{
+ char str[SAMPLE_FLAGS_BUF_SIZE];
- return fprintf(fp, " %-19s ", str);
+ perf_sample__sprintf_flags(flags, str, sizeof(str));
+ return fprintf(fp, " %-21s ", str);
}
struct printer_data {
@@ -1578,16 +1779,44 @@ static int perf_sample__fprintf_pt_spacing(int len, FILE *fp)
return perf_sample__fprintf_spacing(len, 34, fp);
}
+/* If a value contains only printable ASCII characters padded with NULLs */
+static bool ptw_is_prt(u64 val)
+{
+ char c;
+ u32 i;
+
+ for (i = 0; i < sizeof(val); i++) {
+ c = ((char *)&val)[i];
+ if (!c)
+ break;
+ if (!isprint(c) || !isascii(c))
+ return false;
+ }
+ for (; i < sizeof(val); i++) {
+ c = ((char *)&val)[i];
+ if (c)
+ return false;
+ }
+ return true;
+}
+
static int perf_sample__fprintf_synth_ptwrite(struct perf_sample *sample, FILE *fp)
{
struct perf_synth_intel_ptwrite *data = perf_sample__synth_ptr(sample);
+ char str[sizeof(u64) + 1] = "";
int len;
+ u64 val;
if (perf_sample__bad_synth_size(sample, *data))
return 0;
- len = fprintf(fp, " IP: %u payload: %#" PRIx64 " ",
- data->ip, le64_to_cpu(data->payload));
+ val = le64_to_cpu(data->payload);
+ if (ptw_is_prt(val)) {
+ memcpy(str, &val, sizeof(val));
+ str[sizeof(val)] = 0;
+ }
+ len = fprintf(fp, " IP: %u payload: %#" PRIx64 " %s ",
+ data->ip, val, str);
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
@@ -1661,6 +1890,68 @@ static int perf_sample__fprintf_synth_cbr(struct perf_sample *sample, FILE *fp)
return len + perf_sample__fprintf_pt_spacing(len, fp);
}
+static int perf_sample__fprintf_synth_psb(struct perf_sample *sample, FILE *fp)
+{
+ struct perf_synth_intel_psb *data = perf_sample__synth_ptr(sample);
+ int len;
+
+ if (perf_sample__bad_synth_size(sample, *data))
+ return 0;
+
+ len = fprintf(fp, " psb offs: %#" PRIx64, data->offset);
+ return len + perf_sample__fprintf_pt_spacing(len, fp);
+}
+
+/* Intel PT Event Trace */
+static int perf_sample__fprintf_synth_evt(struct perf_sample *sample, FILE *fp)
+{
+ struct perf_synth_intel_evt *data = perf_sample__synth_ptr(sample);
+ const char *cfe[32] = {NULL, "INTR", "IRET", "SMI", "RSM", "SIPI",
+ "INIT", "VMENTRY", "VMEXIT", "VMEXIT_INTR",
+ "SHUTDOWN", NULL, "UINTR", "UIRET"};
+ const char *evd[64] = {"PFA", "VMXQ", "VMXR"};
+ const char *s;
+ int len, i;
+
+ if (perf_sample__bad_synth_size(sample, *data))
+ return 0;
+
+ s = cfe[data->type];
+ if (s) {
+ len = fprintf(fp, " cfe: %s IP: %d vector: %u",
+ s, data->ip, data->vector);
+ } else {
+ len = fprintf(fp, " cfe: %u IP: %d vector: %u",
+ data->type, data->ip, data->vector);
+ }
+ for (i = 0; i < data->evd_cnt; i++) {
+ unsigned int et = data->evd[i].evd_type & 0x3f;
+
+ s = evd[et];
+ if (s) {
+ len += fprintf(fp, " %s: %#" PRIx64,
+ s, data->evd[i].payload);
+ } else {
+ len += fprintf(fp, " EVD_%u: %#" PRIx64,
+ et, data->evd[i].payload);
+ }
+ }
+ return len + perf_sample__fprintf_pt_spacing(len, fp);
+}
+
+static int perf_sample__fprintf_synth_iflag_chg(struct perf_sample *sample, FILE *fp)
+{
+ struct perf_synth_intel_iflag_chg *data = perf_sample__synth_ptr(sample);
+ int len;
+
+ if (perf_sample__bad_synth_size(sample, *data))
+ return 0;
+
+ len = fprintf(fp, " IFLAG: %d->%d %s branch", !data->iflag, data->iflag,
+ data->via_branch ? "via" : "non");
+ return len + perf_sample__fprintf_pt_spacing(len, fp);
+}
+
static int perf_sample__fprintf_synth(struct perf_sample *sample,
struct evsel *evsel, FILE *fp)
{
@@ -1677,6 +1968,12 @@ static int perf_sample__fprintf_synth(struct perf_sample *sample,
return perf_sample__fprintf_synth_pwrx(sample, fp);
case PERF_SYNTH_INTEL_CBR:
return perf_sample__fprintf_synth_cbr(sample, fp);
+ case PERF_SYNTH_INTEL_PSB:
+ return perf_sample__fprintf_synth_psb(sample, fp);
+ case PERF_SYNTH_INTEL_EVT:
+ return perf_sample__fprintf_synth_evt(sample, fp);
+ case PERF_SYNTH_INTEL_IFLAG_CHG:
+ return perf_sample__fprintf_synth_iflag_chg(sample, fp);
default:
break;
}
@@ -1684,36 +1981,13 @@ static int perf_sample__fprintf_synth(struct perf_sample *sample,
return 0;
}
-struct perf_script {
- struct perf_tool tool;
- struct perf_session *session;
- bool show_task_events;
- bool show_mmap_events;
- bool show_switch_events;
- bool show_namespace_events;
- bool show_lost_events;
- bool show_round_events;
- bool show_bpf_events;
- bool show_cgroup_events;
- bool allocated;
- bool per_event_dump;
- struct evswitch evswitch;
- struct perf_cpu_map *cpus;
- struct perf_thread_map *threads;
- int name_width;
- const char *time_str;
- struct perf_time_interval *ptime_range;
- int range_size;
- int range_num;
-};
-
-static int perf_evlist__max_name_len(struct evlist *evlist)
+static int evlist__max_name_len(struct evlist *evlist)
{
struct evsel *evsel;
int max = 0;
evlist__for_each_entry(evlist, evsel) {
- int len = strlen(perf_evsel__name(evsel));
+ int len = strlen(evsel__name(evsel));
max = MAX(len, max);
}
@@ -1754,7 +2028,7 @@ static void script_print_metric(struct perf_stat_config *config __maybe_unused,
if (!fmt)
return;
- perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+ perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
PERF_RECORD_SAMPLE, mctx->fp);
fputs("\tmetric: ", mctx->fp);
if (color)
@@ -1769,7 +2043,7 @@ static void script_new_line(struct perf_stat_config *config __maybe_unused,
{
struct metric_ctx *mctx = ctx;
- perf_sample__fprintf_start(mctx->sample, mctx->thread, mctx->evsel,
+ perf_sample__fprintf_start(NULL, mctx->sample, mctx->thread, mctx->evsel,
PERF_RECORD_SAMPLE, mctx->fp);
fputs("\tmetric: ", mctx->fp);
}
@@ -1780,6 +2054,7 @@ static void perf_sample__fprint_metric(struct perf_script *script,
struct perf_sample *sample,
FILE *fp)
{
+ struct evsel *leader = evsel__leader(evsel);
struct perf_stat_output_ctx ctx = {
.print_metric = script_print_metric,
.new_line = script_new_line,
@@ -1795,32 +2070,28 @@ static void perf_sample__fprint_metric(struct perf_script *script,
u64 val;
if (!evsel->stats)
- perf_evlist__alloc_stats(script->session->evlist, false);
- if (evsel_script(evsel->leader)->gnum++ == 0)
+ evlist__alloc_stats(&stat_config, script->session->evlist, /*alloc_raw=*/false);
+ if (evsel_script(leader)->gnum++ == 0)
perf_stat__reset_shadow_stats();
val = sample->period * evsel->scale;
- perf_stat__update_shadow_stats(evsel,
- val,
- sample->cpu,
- &rt_stat);
evsel_script(evsel)->val = val;
- if (evsel_script(evsel->leader)->gnum == evsel->leader->core.nr_members) {
- for_each_group_member (ev2, evsel->leader) {
+ if (evsel_script(leader)->gnum == leader->core.nr_members) {
+ for_each_group_member (ev2, leader) {
perf_stat__print_shadow_stats(&stat_config, ev2,
evsel_script(ev2)->val,
sample->cpu,
&ctx,
- NULL,
- &rt_stat);
+ NULL);
}
- evsel_script(evsel->leader)->gnum = 0;
+ evsel_script(leader)->gnum = 0;
}
}
static bool show_event(struct perf_sample *sample,
struct evsel *evsel,
struct thread *thread,
- struct addr_location *al)
+ struct addr_location *al,
+ struct addr_location *addr_al)
{
int depth = thread_stack__depth(thread, sample->cpu);
@@ -1836,7 +2107,7 @@ static bool show_event(struct perf_sample *sample,
} else {
const char *s = symbol_conf.graph_function;
u64 ip;
- const char *name = resolve_branch_sym(sample, evsel, thread, al,
+ const char *name = resolve_branch_sym(sample, evsel, thread, al, addr_al,
&ip);
unsigned nlen;
@@ -1861,6 +2132,7 @@ static bool show_event(struct perf_sample *sample,
static void process_event(struct perf_script *script,
struct perf_sample *sample, struct evsel *evsel,
struct addr_location *al,
+ struct addr_location *addr_al,
struct machine *machine)
{
struct thread *thread = al->thread;
@@ -1868,29 +2140,25 @@ static void process_event(struct perf_script *script,
unsigned int type = output_type(attr->type);
struct evsel_script *es = evsel->priv;
FILE *fp = es->fp;
+ char str[PAGE_SIZE_NAME_LEN];
+ const char *arch = perf_env__arch(machine->env);
if (output[type].fields == 0)
return;
- if (!show_event(sample, evsel, thread, al))
- return;
-
- if (evswitch__discard(&script->evswitch, evsel))
- return;
-
++es->samples;
- perf_sample__fprintf_start(sample, thread, evsel,
+ perf_sample__fprintf_start(script, sample, thread, evsel,
PERF_RECORD_SAMPLE, fp);
if (PRINT_FIELD(PERIOD))
fprintf(fp, "%10" PRIu64 " ", sample->period);
if (PRINT_FIELD(EVNAME)) {
- const char *evname = perf_evsel__name(evsel);
+ const char *evname = evsel__name(evsel);
if (!script->name_width)
- script->name_width = perf_evlist__max_name_len(script->session->evlist);
+ script->name_width = evlist__max_name_len(script->session->evlist);
fprintf(fp, "%*s: ", script->name_width, evname ?: "[unknown]");
}
@@ -1899,15 +2167,15 @@ static void process_event(struct perf_script *script,
perf_sample__fprintf_flags(sample->flags, fp);
if (is_bts_event(attr)) {
- perf_sample__fprintf_bts(sample, evsel, thread, al, machine, fp);
+ perf_sample__fprintf_bts(sample, evsel, thread, al, addr_al, machine, fp);
return;
}
-
+#ifdef HAVE_LIBTRACEEVENT
if (PRINT_FIELD(TRACE) && sample->raw_data) {
event_format__fprintf(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size, fp);
}
-
+#endif
if (attr->type == PERF_TYPE_SYNTH && PRINT_FIELD(SYNTH))
perf_sample__fprintf_synth(sample, evsel, fp);
@@ -1920,9 +2188,18 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(WEIGHT))
fprintf(fp, "%16" PRIu64, sample->weight);
+ if (PRINT_FIELD(INS_LAT))
+ fprintf(fp, "%16" PRIu16, sample->ins_lat);
+
+ if (PRINT_FIELD(RETIRE_LAT))
+ fprintf(fp, "%16" PRIu16, sample->retire_lat);
+
if (PRINT_FIELD(IP)) {
struct callchain_cursor *cursor = NULL;
+ if (script->stitch_lbr)
+ al->thread->lbr_stitch_enable = true;
+
if (symbol_conf.use_callchain && sample->callchain &&
thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
sample, NULL, NULL, scripting_max_stack) == 0)
@@ -1934,10 +2211,10 @@ static void process_event(struct perf_script *script,
}
if (PRINT_FIELD(IREGS))
- perf_sample__fprintf_iregs(sample, attr, fp);
+ perf_sample__fprintf_iregs(sample, attr, arch, fp);
if (PRINT_FIELD(UREGS))
- perf_sample__fprintf_uregs(sample, attr, fp);
+ perf_sample__fprintf_uregs(sample, attr, arch, fp);
if (PRINT_FIELD(BRSTACK))
perf_sample__fprintf_brstack(sample, thread, attr, fp);
@@ -1946,13 +2223,30 @@ static void process_event(struct perf_script *script,
else if (PRINT_FIELD(BRSTACKOFF))
perf_sample__fprintf_brstackoff(sample, thread, attr, fp);
- if (perf_evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
+ if (evsel__is_bpf_output(evsel) && PRINT_FIELD(BPF_OUTPUT))
perf_sample__fprintf_bpf_output(sample, fp);
perf_sample__fprintf_insn(sample, attr, thread, machine, fp);
if (PRINT_FIELD(PHYS_ADDR))
fprintf(fp, "%16" PRIx64, sample->phys_addr);
+ if (PRINT_FIELD(DATA_PAGE_SIZE))
+ fprintf(fp, " %s", get_page_size_name(sample->data_page_size, str));
+
+ if (PRINT_FIELD(CODE_PAGE_SIZE))
+ fprintf(fp, " %s", get_page_size_name(sample->code_page_size, str));
+
+ if (PRINT_FIELD(CGROUP)) {
+ const char *cgrp_name;
+ struct cgroup *cgrp = cgroup__find(machine->env,
+ sample->cgroup);
+ if (cgrp != NULL)
+ cgrp_name = cgrp->name;
+ else
+ cgrp_name = "unknown";
+ fprintf(fp, " %s", cgrp_name);
+ }
+
perf_sample__fprintf_ipc(sample, attr, fp);
fprintf(fp, "\n");
@@ -1966,7 +2260,7 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(METRIC))
perf_sample__fprint_metric(script, thread, evsel, sample, fp);
- if (verbose)
+ if (verbose > 0)
fflush(fp);
}
@@ -1975,13 +2269,10 @@ static struct scripting_ops *scripting_ops;
static void __process_stat(struct evsel *counter, u64 tstamp)
{
int nthreads = perf_thread_map__nr(counter->core.threads);
- int ncpus = perf_evsel__nr_cpus(counter);
- int cpu, thread;
+ int idx, thread;
+ struct perf_cpu cpu;
static int header_printed;
- if (counter->core.system_wide)
- nthreads = 1;
-
if (!header_printed) {
printf("%3s %8s %15s %15s %15s %15s %s\n",
"CPU", "THREAD", "VAL", "ENA", "RUN", "TIME", "EVENT");
@@ -1989,19 +2280,19 @@ static void __process_stat(struct evsel *counter, u64 tstamp)
}
for (thread = 0; thread < nthreads; thread++) {
- for (cpu = 0; cpu < ncpus; cpu++) {
+ perf_cpu_map__for_each_cpu(cpu, idx, evsel__cpus(counter)) {
struct perf_counts_values *counts;
- counts = perf_counts(counter->counts, cpu, thread);
+ counts = perf_counts(counter->counts, idx, thread);
printf("%3d %8d %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %15" PRIu64 " %s\n",
- counter->core.cpus->map[cpu],
+ cpu.cpu,
perf_thread_map__pid(counter->core.threads, thread),
counts->val,
counts->ena,
counts->run,
tstamp,
- perf_evsel__name(counter));
+ evsel__name(counter));
}
}
}
@@ -2022,7 +2313,9 @@ static void process_stat_interval(u64 tstamp)
static void setup_scripting(void)
{
+#ifdef HAVE_LIBTRACEEVENT
setup_perl_scripting();
+#endif
setup_python_scripting();
}
@@ -2040,7 +2333,7 @@ static int cleanup_scripting(void)
static bool filter_cpu(struct perf_sample *sample)
{
- if (cpu_list)
+ if (cpu_list && sample->cpu != (u32)-1)
return !test_bit(sample->cpu, cpu_bitmap);
return false;
}
@@ -2053,10 +2346,23 @@ static int process_sample_event(struct perf_tool *tool,
{
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct addr_location al;
+ struct addr_location addr_al;
+ int ret = 0;
+
+ /* Set thread to NULL to indicate addr_al and al are not initialized */
+ addr_al.thread = NULL;
+ al.thread = NULL;
+
+ ret = dlfilter__filter_event_early(dlfilter, event, sample, evsel, machine, &al, &addr_al);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto out_put;
+ }
if (perf_time__ranges_skip_sample(scr->ptime_range, scr->range_num,
sample->time)) {
- return 0;
+ goto out_put;
}
if (debug_mode) {
@@ -2067,29 +2373,53 @@ static int process_sample_event(struct perf_tool *tool,
nr_unordered++;
}
last_timestamp = sample->time;
- return 0;
+ goto out_put;
}
- if (machine__resolve(machine, &al, sample) < 0) {
+ if (filter_cpu(sample))
+ goto out_put;
+
+ if (!al.thread && machine__resolve(machine, &al, sample) < 0) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
- return -1;
+ ret = -1;
+ goto out_put;
}
if (al.filtered)
goto out_put;
- if (filter_cpu(sample))
+ if (!show_event(sample, evsel, al.thread, &al, &addr_al))
goto out_put;
- if (scripting_ops)
- scripting_ops->process_event(event, sample, evsel, &al);
- else
- process_event(scr, sample, evsel, &al, machine);
+ if (evswitch__discard(&scr->evswitch, evsel))
+ goto out_put;
+
+ ret = dlfilter__filter_event(dlfilter, event, sample, evsel, machine, &al, &addr_al);
+ if (ret) {
+ if (ret > 0)
+ ret = 0;
+ goto out_put;
+ }
+
+ if (scripting_ops) {
+ struct addr_location *addr_al_ptr = NULL;
+
+ if ((evsel->core.attr.sample_type & PERF_SAMPLE_ADDR) &&
+ sample_addr_correlates_sym(&evsel->core.attr)) {
+ if (!addr_al.thread)
+ thread__resolve(al.thread, &addr_al, sample);
+ addr_al_ptr = &addr_al;
+ }
+ scripting_ops->process_event(event, sample, evsel, &al, addr_al_ptr);
+ } else {
+ process_event(scr, sample, evsel, &al, &addr_al, machine);
+ }
out_put:
- addr_location__put(&al);
- return 0;
+ if (al.thread)
+ addr_location__put(&al);
+ return ret;
}
static int process_attr(struct perf_tool *tool, union perf_event *event,
@@ -2098,6 +2428,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
struct perf_script *scr = container_of(tool, struct perf_script, tool);
struct evlist *evlist;
struct evsel *evsel, *pos;
+ u64 sample_type;
int err;
static struct evsel_script *es;
@@ -2110,8 +2441,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
if (!evsel->priv) {
if (scr->per_event_dump) {
- evsel->priv = perf_evsel_script__new(evsel,
- scr->session->data);
+ evsel->priv = evsel_script__new(evsel, scr->session->data);
} else {
es = zalloc(sizeof(*es));
if (!es)
@@ -2130,49 +2460,89 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
return 0;
}
- set_print_ip_opts(&evsel->core.attr);
+ if (evsel->core.attr.sample_type) {
+ err = evsel__check_attr(evsel, scr->session);
+ if (err)
+ return err;
+ }
- if (evsel->core.attr.sample_type)
- err = perf_evsel__check_attr(evsel, scr->session);
+ /*
+ * Check if we need to enable callchains based
+ * on events sample_type.
+ */
+ sample_type = evlist__combined_sample_type(evlist);
+ callchain_param_setup(sample_type, perf_env__arch((*pevlist)->env));
- return err;
+ /* Enable fields for callchain entries */
+ if (symbol_conf.use_callchain &&
+ (sample_type & PERF_SAMPLE_CALLCHAIN ||
+ sample_type & PERF_SAMPLE_BRANCH_STACK ||
+ (sample_type & PERF_SAMPLE_REGS_USER &&
+ sample_type & PERF_SAMPLE_STACK_USER))) {
+ int type = output_type(evsel->core.attr.type);
+
+ if (!(output[type].user_unset_fields & PERF_OUTPUT_IP))
+ output[type].fields |= PERF_OUTPUT_IP;
+ if (!(output[type].user_unset_fields & PERF_OUTPUT_SYM))
+ output[type].fields |= PERF_OUTPUT_SYM;
+ }
+ set_print_ip_opts(&evsel->core.attr);
+ return 0;
}
-static int process_comm_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine)
+static int print_event_with_time(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine,
+ pid_t pid, pid_t tid, u64 timestamp)
{
- struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
- int ret = -1;
+ struct evsel *evsel = evlist__id2evsel(session->evlist, sample->id);
+ struct thread *thread = NULL;
- thread = machine__findnew_thread(machine, event->comm.pid, event->comm.tid);
- if (thread == NULL) {
- pr_debug("problem processing COMM event, skipping it.\n");
- return -1;
+ if (evsel && !evsel->core.attr.sample_id_all) {
+ sample->cpu = 0;
+ sample->time = timestamp;
+ sample->pid = pid;
+ sample->tid = tid;
}
- if (perf_event__process_comm(tool, event, sample, machine) < 0)
- goto out;
+ if (filter_cpu(sample))
+ return 0;
- if (!evsel->core.attr.sample_id_all) {
- sample->cpu = 0;
- sample->time = 0;
- sample->tid = event->comm.tid;
- sample->pid = event->comm.pid;
- }
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_COMM, stdout);
- perf_event__fprintf(event, stdout);
+ if (tid != -1)
+ thread = machine__findnew_thread(machine, pid, tid);
+
+ if (evsel) {
+ perf_sample__fprintf_start(script, sample, thread, evsel,
+ event->header.type, stdout);
}
- ret = 0;
-out:
+
+ perf_event__fprintf(event, machine, stdout);
+
thread__put(thread);
- return ret;
+
+ return 0;
+}
+
+static int print_event(struct perf_tool *tool, union perf_event *event,
+ struct perf_sample *sample, struct machine *machine,
+ pid_t pid, pid_t tid)
+{
+ return print_event_with_time(tool, event, sample, machine, pid, tid, 0);
+}
+
+static int process_comm_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ if (perf_event__process_comm(tool, event, sample, machine) < 0)
+ return -1;
+
+ return print_event(tool, event, sample, machine, event->comm.pid,
+ event->comm.tid);
}
static int process_namespaces_event(struct perf_tool *tool,
@@ -2180,37 +2550,11 @@ static int process_namespaces_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
- struct thread *thread;
- struct perf_script *script = container_of(tool, struct perf_script, tool);
- struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
- int ret = -1;
-
- thread = machine__findnew_thread(machine, event->namespaces.pid,
- event->namespaces.tid);
- if (thread == NULL) {
- pr_debug("problem processing NAMESPACES event, skipping it.\n");
- return -1;
- }
-
if (perf_event__process_namespaces(tool, event, sample, machine) < 0)
- goto out;
+ return -1;
- if (!evsel->core.attr.sample_id_all) {
- sample->cpu = 0;
- sample->time = 0;
- sample->tid = event->namespaces.tid;
- sample->pid = event->namespaces.pid;
- }
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_NAMESPACES, stdout);
- perf_event__fprintf(event, stdout);
- }
- ret = 0;
-out:
- thread__put(thread);
- return ret;
+ return print_event(tool, event, sample, machine, event->namespaces.pid,
+ event->namespaces.tid);
}
static int process_cgroup_event(struct perf_tool *tool,
@@ -2218,34 +2562,11 @@ static int process_cgroup_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
- struct thread *thread;
- struct perf_script *script = container_of(tool, struct perf_script, tool);
- struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
- int ret = -1;
-
- thread = machine__findnew_thread(machine, sample->pid, sample->tid);
- if (thread == NULL) {
- pr_debug("problem processing CGROUP event, skipping it.\n");
- return -1;
- }
-
if (perf_event__process_cgroup(tool, event, sample, machine) < 0)
- goto out;
+ return -1;
- if (!evsel->core.attr.sample_id_all) {
- sample->cpu = 0;
- sample->time = 0;
- }
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_CGROUP, stdout);
- perf_event__fprintf(event, stdout);
- }
- ret = 0;
-out:
- thread__put(thread);
- return ret;
+ return print_event(tool, event, sample, machine, sample->pid,
+ sample->tid);
}
static int process_fork_event(struct perf_tool *tool,
@@ -2253,69 +2574,24 @@ static int process_fork_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
- struct thread *thread;
- struct perf_script *script = container_of(tool, struct perf_script, tool);
- struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
-
if (perf_event__process_fork(tool, event, sample, machine) < 0)
return -1;
- thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid);
- if (thread == NULL) {
- pr_debug("problem processing FORK event, skipping it.\n");
- return -1;
- }
-
- if (!evsel->core.attr.sample_id_all) {
- sample->cpu = 0;
- sample->time = event->fork.time;
- sample->tid = event->fork.tid;
- sample->pid = event->fork.pid;
- }
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_FORK, stdout);
- perf_event__fprintf(event, stdout);
- }
- thread__put(thread);
-
- return 0;
+ return print_event_with_time(tool, event, sample, machine,
+ event->fork.pid, event->fork.tid,
+ event->fork.time);
}
static int process_exit_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample,
struct machine *machine)
{
- int err = 0;
- struct thread *thread;
- struct perf_script *script = container_of(tool, struct perf_script, tool);
- struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
-
- thread = machine__findnew_thread(machine, event->fork.pid, event->fork.tid);
- if (thread == NULL) {
- pr_debug("problem processing EXIT event, skipping it.\n");
+ /* Print before 'exit' deletes anything */
+ if (print_event_with_time(tool, event, sample, machine, event->fork.pid,
+ event->fork.tid, event->fork.time))
return -1;
- }
- if (!evsel->core.attr.sample_id_all) {
- sample->cpu = 0;
- sample->time = 0;
- sample->tid = event->fork.tid;
- sample->pid = event->fork.pid;
- }
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_EXIT, stdout);
- perf_event__fprintf(event, stdout);
- }
-
- if (perf_event__process_exit(tool, event, sample, machine) < 0)
- err = -1;
-
- thread__put(thread);
- return err;
+ return perf_event__process_exit(tool, event, sample, machine);
}
static int process_mmap_event(struct perf_tool *tool,
@@ -2323,33 +2599,11 @@ static int process_mmap_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
- struct thread *thread;
- struct perf_script *script = container_of(tool, struct perf_script, tool);
- struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
-
if (perf_event__process_mmap(tool, event, sample, machine) < 0)
return -1;
- thread = machine__findnew_thread(machine, event->mmap.pid, event->mmap.tid);
- if (thread == NULL) {
- pr_debug("problem processing MMAP event, skipping it.\n");
- return -1;
- }
-
- if (!evsel->core.attr.sample_id_all) {
- sample->cpu = 0;
- sample->time = 0;
- sample->tid = event->mmap.tid;
- sample->pid = event->mmap.pid;
- }
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_MMAP, stdout);
- perf_event__fprintf(event, stdout);
- }
- thread__put(thread);
- return 0;
+ return print_event(tool, event, sample, machine, event->mmap.pid,
+ event->mmap.tid);
}
static int process_mmap2_event(struct perf_tool *tool,
@@ -2357,33 +2611,11 @@ static int process_mmap2_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
- struct thread *thread;
- struct perf_script *script = container_of(tool, struct perf_script, tool);
- struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
-
if (perf_event__process_mmap2(tool, event, sample, machine) < 0)
return -1;
- thread = machine__findnew_thread(machine, event->mmap2.pid, event->mmap2.tid);
- if (thread == NULL) {
- pr_debug("problem processing MMAP2 event, skipping it.\n");
- return -1;
- }
-
- if (!evsel->core.attr.sample_id_all) {
- sample->cpu = 0;
- sample->time = 0;
- sample->tid = event->mmap2.tid;
- sample->pid = event->mmap2.pid;
- }
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_MMAP2, stdout);
- perf_event__fprintf(event, stdout);
- }
- thread__put(thread);
- return 0;
+ return print_event(tool, event, sample, machine, event->mmap2.pid,
+ event->mmap2.tid);
}
static int process_switch_event(struct perf_tool *tool,
@@ -2391,34 +2623,30 @@ static int process_switch_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
- struct thread *thread;
struct perf_script *script = container_of(tool, struct perf_script, tool);
- struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
if (perf_event__process_switch(tool, event, sample, machine) < 0)
return -1;
- if (scripting_ops && scripting_ops->process_switch)
+ if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample))
scripting_ops->process_switch(event, sample, machine);
if (!script->show_switch_events)
return 0;
- thread = machine__findnew_thread(machine, sample->pid,
- sample->tid);
- if (thread == NULL) {
- pr_debug("problem processing SWITCH event, skipping it.\n");
- return -1;
- }
+ return print_event(tool, event, sample, machine, sample->pid,
+ sample->tid);
+}
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_SWITCH, stdout);
- perf_event__fprintf(event, stdout);
+static int process_auxtrace_error(struct perf_session *session,
+ union perf_event *event)
+{
+ if (scripting_ops && scripting_ops->process_auxtrace_error) {
+ scripting_ops->process_auxtrace_error(session, event);
+ return 0;
}
- thread__put(thread);
- return 0;
+
+ return perf_event__process_auxtrace_error(session, event);
}
static int
@@ -2427,22 +2655,18 @@ process_lost_event(struct perf_tool *tool,
struct perf_sample *sample,
struct machine *machine)
{
- struct perf_script *script = container_of(tool, struct perf_script, tool);
- struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
- struct thread *thread;
-
- thread = machine__findnew_thread(machine, sample->pid,
- sample->tid);
- if (thread == NULL)
- return -1;
+ return print_event(tool, event, sample, machine, sample->pid,
+ sample->tid);
+}
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- PERF_RECORD_LOST, stdout);
- perf_event__fprintf(event, stdout);
- }
- thread__put(thread);
+static int
+process_throttle_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ if (scripting_ops && scripting_ops->process_throttle)
+ scripting_ops->process_throttle(event, sample, machine);
return 0;
}
@@ -2452,7 +2676,7 @@ process_finished_round_event(struct perf_tool *tool __maybe_unused,
struct ordered_events *oe __maybe_unused)
{
- perf_event__fprintf(event, stdout);
+ perf_event__fprintf(event, NULL, stdout);
return 0;
}
@@ -2462,33 +2686,23 @@ process_bpf_events(struct perf_tool *tool __maybe_unused,
struct perf_sample *sample,
struct machine *machine)
{
- struct thread *thread;
- struct perf_script *script = container_of(tool, struct perf_script, tool);
- struct perf_session *session = script->session;
- struct evsel *evsel = perf_evlist__id2evsel(session->evlist, sample->id);
-
if (machine__process_ksymbol(machine, event, sample) < 0)
return -1;
- if (!evsel->core.attr.sample_id_all) {
- perf_event__fprintf(event, stdout);
- return 0;
- }
+ return print_event(tool, event, sample, machine, sample->pid,
+ sample->tid);
+}
- thread = machine__findnew_thread(machine, sample->pid, sample->tid);
- if (thread == NULL) {
- pr_debug("problem processing MMAP event, skipping it.\n");
+static int process_text_poke_events(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ if (perf_event__process_text_poke(tool, event, sample, machine) < 0)
return -1;
- }
- if (!filter_cpu(sample)) {
- perf_sample__fprintf_start(sample, thread, evsel,
- event->header.type, stdout);
- perf_event__fprintf(event, stdout);
- }
-
- thread__put(thread);
- return 0;
+ return print_event(tool, event, sample, machine, sample->pid,
+ sample->tid);
}
static void sig_handler(int sig __maybe_unused)
@@ -2504,7 +2718,7 @@ static void perf_script__fclose_per_event_dump(struct perf_script *script)
evlist__for_each_entry(evlist, evsel) {
if (!evsel->priv)
break;
- perf_evsel_script__delete(evsel->priv);
+ evsel_script__delete(evsel->priv);
evsel->priv = NULL;
}
}
@@ -2517,14 +2731,14 @@ static int perf_script__fopen_per_event_dump(struct perf_script *script)
/*
* Already setup? I.e. we may be called twice in cases like
* Intel PT, one for the intel_pt// and dummy events, then
- * for the evsels syntheized from the auxtrace info.
+ * for the evsels synthesized from the auxtrace info.
*
* Ses perf_script__process_auxtrace_info.
*/
if (evsel->priv != NULL)
continue;
- evsel->priv = perf_evsel_script__new(evsel, script->session->data);
+ evsel->priv = evsel_script__new(evsel, script->session->data);
if (evsel->priv == NULL)
goto out_err_fclose;
}
@@ -2559,20 +2773,24 @@ static void perf_script__exit_per_event_dump_stats(struct perf_script *script)
evlist__for_each_entry(script->session->evlist, evsel) {
struct evsel_script *es = evsel->priv;
- perf_evsel_script__fprintf(es, stdout);
- perf_evsel_script__delete(es);
+ evsel_script__fprintf(es, stdout);
+ evsel_script__delete(es);
evsel->priv = NULL;
}
}
+static void perf_script__exit(struct perf_script *script)
+{
+ perf_thread_map__put(script->threads);
+ perf_cpu_map__put(script->cpus);
+}
+
static int __cmd_script(struct perf_script *script)
{
int ret;
signal(SIGINT, sig_handler);
- perf_stat__init_shadow_stats();
-
/* override event processing functions */
if (script->show_task_events) {
script->tool.comm = process_comm_event;
@@ -2585,6 +2803,8 @@ static int __cmd_script(struct perf_script *script)
}
if (script->show_switch_events || (scripting_ops && scripting_ops->process_switch))
script->tool.context_switch = process_switch_event;
+ if (scripting_ops && scripting_ops->process_auxtrace_error)
+ script->tool.auxtrace_error = process_auxtrace_error;
if (script->show_namespace_events)
script->tool.namespaces = process_namespaces_event;
if (script->show_cgroup_events)
@@ -2599,6 +2819,10 @@ static int __cmd_script(struct perf_script *script)
script->tool.ksymbol = process_bpf_events;
script->tool.bpf = process_bpf_events;
}
+ if (script->show_text_poke_events) {
+ script->tool.ksymbol = process_bpf_events;
+ script->tool.text_poke = process_text_poke_events;
+ }
if (perf_script__setup_per_event_dump(script)) {
pr_err("Couldn't create the per event dump files\n");
@@ -2619,7 +2843,7 @@ static int __cmd_script(struct perf_script *script)
struct script_spec {
struct list_head node;
struct scripting_ops *ops;
- char spec[0];
+ char spec[];
};
static LIST_HEAD(script_specs);
@@ -2692,6 +2916,37 @@ static void list_available_languages(void)
fprintf(stderr, "\n");
}
+/* Find script file relative to current directory or exec path */
+static char *find_script(const char *script)
+{
+ char path[PATH_MAX];
+
+ if (!scripting_ops) {
+ const char *ext = strrchr(script, '.');
+
+ if (!ext)
+ return NULL;
+
+ scripting_ops = script_spec__lookup(++ext);
+ if (!scripting_ops)
+ return NULL;
+ }
+
+ if (access(script, R_OK)) {
+ char *exec_path = get_argv_exec_path();
+
+ if (!exec_path)
+ return NULL;
+ snprintf(path, sizeof(path), "%s/scripts/%s/%s",
+ exec_path, scripting_ops->dirname, script);
+ free(exec_path);
+ script = path;
+ if (access(script, R_OK))
+ return NULL;
+ }
+ return strdup(script);
+}
+
static int parse_scriptname(const struct option *opt __maybe_unused,
const char *str, int unset __maybe_unused)
{
@@ -2733,7 +2988,9 @@ static int parse_scriptname(const struct option *opt __maybe_unused,
}
}
- script_name = strdup(script);
+ script_name = find_script(script);
+ if (!script_name)
+ script_name = strdup(script);
return 0;
}
@@ -2836,7 +3093,7 @@ parse:
break;
}
if (i == imax && strcmp(tok, "flags") == 0) {
- print_flags = change == REMOVE ? false : true;
+ print_flags = change != REMOVE;
continue;
}
if (i == imax) {
@@ -2857,9 +3114,11 @@ parse:
if (change == REMOVE) {
output[j].fields &= ~all_output_options[i].field;
output[j].user_set_fields &= ~all_output_options[i].field;
+ output[j].user_unset_fields |= all_output_options[i].field;
} else {
output[j].fields |= all_output_options[i].field;
output[j].user_set_fields |= all_output_options[i].field;
+ output[j].user_unset_fields &= ~all_output_options[i].field;
}
output[j].user_set = true;
output[j].wildcard_set = true;
@@ -3101,6 +3360,34 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
exit(0);
}
+static int add_dlarg(const struct option *opt __maybe_unused,
+ const char *s, int unset __maybe_unused)
+{
+ char *arg = strdup(s);
+ void *a;
+
+ if (!arg)
+ return -1;
+
+ a = realloc(dlargv, sizeof(dlargv[0]) * (dlargc + 1));
+ if (!a) {
+ free(arg);
+ return -1;
+ }
+
+ dlargv = a;
+ dlargv[dlargc++] = arg;
+
+ return 0;
+}
+
+static void free_dlarg(void)
+{
+ while (dlargc--)
+ free(dlargv[dlargc]);
+ free(dlargv);
+}
+
/*
* Some scripts specify the required events in their "xxx-record" file,
* this function will check if the events in perf.data match those
@@ -3108,7 +3395,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
*
* Fixme: All existing "xxx-record" are all in good formats "-e event ",
* which is covered well now. And new parsing code should be added to
- * cover the future complexing formats like event groups etc.
+ * cover the future complex formats like event groups etc.
*/
static int check_ev_match(char *dir_name, char *scriptname,
struct perf_session *session)
@@ -3145,7 +3432,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
match = 0;
evlist__for_each_entry(session->evlist, pos) {
- if (!strcmp(perf_evsel__name(pos), evname)) {
+ if (!strcmp(evsel__name(pos), evname)) {
match = 1;
break;
}
@@ -3183,7 +3470,7 @@ int find_scripts(char **scripts_array, char **scripts_path_array, int num,
char *temp;
int i = 0;
- session = perf_session__new(&data, false, NULL);
+ session = perf_session__new(&data, NULL);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -3282,7 +3569,7 @@ static char *get_script_path(const char *script_root, const char *suffix)
static bool is_top_script(const char *script_path)
{
- return ends_with(script_path, "top") == NULL ? false : true;
+ return ends_with(script_path, "top") != NULL;
}
static int has_required_arg(char *script_path)
@@ -3330,17 +3617,14 @@ static int have_cmd(int argc, const char **argv)
static void script__setup_sample_type(struct perf_script *script)
{
struct perf_session *session = script->session;
- u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
-
- if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
- if ((sample_type & PERF_SAMPLE_REGS_USER) &&
- (sample_type & PERF_SAMPLE_STACK_USER)) {
- callchain_param.record_mode = CALLCHAIN_DWARF;
- dwarf_callchain_users = true;
- } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
- callchain_param.record_mode = CALLCHAIN_LBR;
- else
- callchain_param.record_mode = CALLCHAIN_FP;
+ u64 sample_type = evlist__combined_sample_type(session->evlist);
+
+ callchain_param_setup(sample_type, perf_env__arch(session->machines.host.env));
+
+ if (script->stitch_lbr && (callchain_param.record_mode != CALLCHAIN_LBR)) {
+ pr_warning("Can't find LBR callchain. Switch off --stitch-lbr.\n"
+ "Please apply --call-graph lbr when recording.\n");
+ script->stitch_lbr = false;
}
}
@@ -3378,7 +3662,7 @@ static int set_maps(struct perf_script *script)
perf_evlist__set_maps(&evlist->core, script->cpus, script->threads);
- if (perf_evlist__alloc_stats(evlist, true))
+ if (evlist__alloc_stats(&stat_config, evlist, /*alloc_raw=*/true))
return -ENOMEM;
script->allocated = true;
@@ -3392,6 +3676,9 @@ int process_thread_map_event(struct perf_session *session,
struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
+ if (dump_trace)
+ perf_event__fprintf_thread_map(event, stdout);
+
if (script->threads) {
pr_warning("Extra thread map event, ignoring.\n");
return 0;
@@ -3411,6 +3698,9 @@ int process_cpu_map_event(struct perf_session *session,
struct perf_tool *tool = session->tool;
struct perf_script *script = container_of(tool, struct perf_script, tool);
+ if (dump_trace)
+ perf_event__fprintf_cpu_map(event, stdout);
+
if (script->cpus) {
pr_warning("Extra cpu map event, ignoring.\n");
return 0;
@@ -3465,7 +3755,10 @@ static int parse_xed(const struct option *opt __maybe_unused,
const char *str __maybe_unused,
int unset __maybe_unused)
{
- force_pager("xed -F insn: -A -64 | less");
+ if (isatty(1))
+ force_pager("xed -F insn: -A -64 | less");
+ else
+ force_pager("xed -F insn: -A -64");
return 0;
}
@@ -3496,6 +3789,7 @@ int cmd_script(int argc, const char **argv)
bool header = false;
bool header_only = false;
bool script_started = false;
+ bool unsorted_dump = false;
char *rec_script_path = NULL;
char *rep_script_path = NULL;
struct perf_session *session;
@@ -3505,6 +3799,7 @@ int cmd_script(int argc, const char **argv)
};
struct utsname uts;
char *script_path = NULL;
+ const char *dlfilter_file = NULL;
const char **__argv;
int i, j, err = 0;
struct perf_script script = {
@@ -3519,7 +3814,9 @@ int cmd_script(int argc, const char **argv)
.fork = perf_event__process_fork,
.attr = process_attr,
.event_update = perf_event__process_event_update,
+#ifdef HAVE_LIBTRACEEVENT
.tracing_data = perf_event__process_tracing_data,
+#endif
.feature = process_feature_event,
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
@@ -3531,6 +3828,8 @@ int cmd_script(int argc, const char **argv)
.stat_config = process_stat_config_event,
.thread_map = process_thread_map_event,
.cpu_map = process_cpu_map_event,
+ .throttle = process_throttle_event,
+ .unthrottle = process_throttle_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
@@ -3541,17 +3840,24 @@ int cmd_script(int argc, const char **argv)
const struct option options[] = {
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
"dump raw trace in ASCII"),
+ OPT_BOOLEAN(0, "dump-unsorted-raw-trace", &unsorted_dump,
+ "dump unsorted raw trace in ASCII"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('L', "Latency", &latency_format,
"show latency attributes (irqs/preemption disabled, etc)"),
OPT_CALLBACK_NOOPT('l', "list", NULL, NULL, "list available scripts",
list_available_scripts),
+ OPT_CALLBACK_NOOPT(0, "list-dlfilters", NULL, NULL, "list available dlfilters",
+ list_available_dlfilters),
OPT_CALLBACK('s', "script", NULL, "name",
"script file name (lang:script name, script name, or *)",
parse_scriptname),
OPT_STRING('g', "gen-script", &generate_script_lang, "lang",
"generate perf-script.xx script in specified language"),
+ OPT_STRING(0, "dlfilter", &dlfilter_file, "file", "filter .so file name"),
+ OPT_CALLBACK(0, "dlarg", NULL, "argument", "filter argument",
+ add_dlarg),
OPT_STRING('i', "input", &input_name, "file", "input file name"),
OPT_BOOLEAN('d', "debug-mode", &debug_mode,
"do various checks like samples ordering and lost events"),
@@ -3572,19 +3878,25 @@ int cmd_script(int argc, const char **argv)
"Valid types: hw,sw,trace,raw,synth. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
"addr,symoff,srcline,period,iregs,uregs,brstack,"
- "brstacksym,flags,bpf-output,brstackinsn,brstackoff,"
- "callindent,insn,insnlen,synth,phys_addr,metric,misc,ipc",
+ "brstacksym,flags,data_src,weight,bpf-output,brstackinsn,"
+ "brstackinsnlen,brstackoff,callindent,insn,insnlen,synth,"
+ "phys_addr,metric,misc,srccode,ipc,tod,data_page_size,"
+ "code_page_size,ins_lat,machine_pid,vcpu,cgroup,retire_lat",
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
+ OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
+ "only consider symbols in these DSOs"),
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
"only consider these symbols"),
+ OPT_INTEGER(0, "addr-range", &symbol_conf.addr_range,
+ "Use with -S to list traced records within address range"),
OPT_CALLBACK_OPTARG(0, "insn-trace", &itrace_synth_opts, NULL, NULL,
"Decode instructions from itrace", parse_insn_trace),
OPT_CALLBACK_OPTARG(0, "xed", NULL, NULL, NULL,
"Run xed disassembler on output", parse_xed),
OPT_CALLBACK_OPTARG(0, "call-trace", &itrace_synth_opts, NULL, NULL,
- "Decode calls from from itrace", parse_call_trace),
+ "Decode calls from itrace", parse_call_trace),
OPT_CALLBACK_OPTARG(0, "call-ret-trace", &itrace_synth_opts, NULL, NULL,
"Decode calls and returns from itrace", parse_callret_trace),
OPT_STRING(0, "graph-function", &symbol_conf.graph_function, "symbol[,symbol...]",
@@ -3624,6 +3936,8 @@ int cmd_script(int argc, const char **argv)
"Show round events (if recorded)"),
OPT_BOOLEAN('\0', "show-bpf-events", &script.show_bpf_events,
"Show bpf related events (if recorded)"),
+ OPT_BOOLEAN('\0', "show-text-poke-events", &script.show_text_poke_events,
+ "Show text poke related events (if recorded)"),
OPT_BOOLEAN('\0', "per-event-dump", &script.per_event_dump,
"Dump trace output to files named by the monitored events"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
@@ -3653,6 +3967,10 @@ int cmd_script(int argc, const char **argv)
"file", "file saving guest os /proc/kallsyms"),
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
"file", "file saving guest os /proc/modules"),
+ OPT_BOOLEAN(0, "guest-code", &symbol_conf.guest_code,
+ "Guest code can be found in hypervisor process"),
+ OPT_BOOLEAN('\0', "stitch-lbr", &script.stitch_lbr,
+ "Enable LBR callgraph stitching approach"),
OPTS_EVSWITCH(&script.evswitch),
OPT_END()
};
@@ -3676,7 +3994,8 @@ int cmd_script(int argc, const char **argv)
if (symbol_conf.guestmount ||
symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_kallsyms ||
- symbol_conf.default_guest_modules) {
+ symbol_conf.default_guest_modules ||
+ symbol_conf.guest_code) {
/*
* Enable guest sample processing.
*/
@@ -3686,13 +4005,21 @@ int cmd_script(int argc, const char **argv)
data.path = input_name;
data.force = symbol_conf.force;
- if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
+ if (unsorted_dump) {
+ dump_trace = true;
+ script.tool.ordered_events = false;
+ }
+
+ if (symbol__validate_sym_arguments())
+ return -1;
+
+ if (argc > 1 && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
if (!rec_script_path)
return cmd_record(argc, argv);
}
- if (argc > 1 && !strncmp(argv[0], "rep", strlen("rep"))) {
+ if (argc > 1 && strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
if (!rep_script_path) {
fprintf(stderr,
@@ -3709,7 +4036,7 @@ int cmd_script(int argc, const char **argv)
return -1;
}
- if (itrace_synth_opts.callchain &&
+ if ((itrace_synth_opts.callchain || itrace_synth_opts.add_callchain) &&
itrace_synth_opts.callchain_sz > scripting_max_stack)
scripting_max_stack = itrace_synth_opts.callchain_sz;
@@ -3725,6 +4052,12 @@ int cmd_script(int argc, const char **argv)
rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
if (!rec_script_path && !rep_script_path) {
+ script_name = find_script(argv[0]);
+ if (script_name) {
+ argc -= 1;
+ argv += 1;
+ goto script_found;
+ }
usage_with_options_msg(script_usage, options,
"Couldn't find script `%s'\n\n See perf"
" script -l for available scripts.\n", argv[0]);
@@ -3817,7 +4150,7 @@ int cmd_script(int argc, const char **argv)
free(__argv);
exit(-1);
}
-
+script_found:
if (rec_script_path)
script_path = rec_script_path;
if (rep_script_path)
@@ -3855,12 +4188,18 @@ int cmd_script(int argc, const char **argv)
exit(-1);
}
+ if (dlfilter_file) {
+ dlfilter = dlfilter__new(dlfilter_file, dlargc, dlargv);
+ if (!dlfilter)
+ return -1;
+ }
+
if (!script_name) {
setup_pager();
use_browser = 0;
}
- session = perf_session__new(&data, false, &script.tool);
+ session = perf_session__new(&data, &script.tool);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -3877,11 +4216,15 @@ int cmd_script(int argc, const char **argv)
goto out_delete;
uname(&uts);
- if (data.is_pipe || /* assume pipe_mode indicates native_arch */
- !strcmp(uts.machine, session->header.env.arch) ||
- (!strcmp(uts.machine, "x86_64") &&
- !strcmp(session->header.env.arch, "i386")))
+ if (data.is_pipe) { /* Assume pipe_mode indicates native_arch */
native_arch = true;
+ } else if (session->header.env.arch) {
+ if (!strcmp(uts.machine, session->header.env.arch))
+ native_arch = true;
+ else if (!strcmp(uts.machine, "x86_64") &&
+ !strcmp(session->header.env.arch, "i386"))
+ native_arch = true;
+ }
script.session = session;
script__setup_sample_type(&script);
@@ -3904,6 +4247,7 @@ int cmd_script(int argc, const char **argv)
else
symbol_conf.use_callchain = false;
+#ifdef HAVE_LIBTRACEEVENT
if (session->tevent.pevent &&
tep_set_function_resolver(session->tevent.pevent,
machine__resolve_kernel_addr,
@@ -3912,7 +4256,7 @@ int cmd_script(int argc, const char **argv)
err = -1;
goto out_delete;
}
-
+#endif
if (generate_script_lang) {
struct stat perf_stat;
int input;
@@ -3948,14 +4292,21 @@ int cmd_script(int argc, const char **argv)
err = -ENOENT;
goto out_delete;
}
-
+#ifdef HAVE_LIBTRACEEVENT
err = scripting_ops->generate_script(session->tevent.pevent,
"perf-script");
+#else
+ err = scripting_ops->generate_script(NULL, "perf-script");
+#endif
goto out_delete;
}
+ err = dlfilter__start(dlfilter, session);
+ if (err)
+ goto out_delete;
+
if (script_name) {
- err = scripting_ops->start_script(script_name, argc, argv);
+ err = scripting_ops->start_script(script_name, argc, argv, session);
if (err)
goto out_delete;
pr_debug("perf script started with script %s\n\n", script_name);
@@ -3985,6 +4336,9 @@ int cmd_script(int argc, const char **argv)
if (err)
goto out_delete;
+ if (zstd_init(&(session->zstd_data), 0) < 0)
+ pr_warning("Decompression initialization failed. Reported data may be incomplete.\n");
+
err = __cmd_script(&script);
flush_scripting();
@@ -3995,11 +4349,15 @@ out_delete:
zfree(&script.ptime_range);
}
- perf_evlist__free_stats(session->evlist);
+ zstd_fini(&(session->zstd_data));
+ evlist__free_stats(session->evlist);
perf_session__delete(session);
+ perf_script__exit(&script);
if (script_started)
cleanup_scripting();
+ dlfilter__cleanup(dlfilter);
+ free_dlarg();
out:
return err;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index ec053dc1e35c..cc9fa48d636f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -41,13 +41,13 @@
*/
#include "builtin.h"
-#include "perf.h"
#include "util/cgroup.h"
#include <subcmd/parse-options.h>
#include "util/parse-events.h"
#include "util/pmu.h"
#include "util/event.h"
#include "util/evlist.h"
+#include "util/evlist-hybrid.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/color.h"
@@ -56,7 +56,7 @@
#include "util/cpumap.h"
#include "util/thread_map.h"
#include "util/counts.h"
-#include "util/group.h"
+#include "util/topdown.h"
#include "util/session.h"
#include "util/tool.h"
#include "util/string2.h"
@@ -66,6 +66,11 @@
#include "util/time-utils.h"
#include "util/top.h"
#include "util/affinity.h"
+#include "util/pfm.h"
+#include "util/bpf_counter.h"
+#include "util/iostat.h"
+#include "util/pmu-hybrid.h"
+#include "util/util.h"
#include "asm/bug.h"
#include <linux/time64.h>
@@ -88,54 +93,15 @@
#include <linux/ctype.h>
#include <perf/evlist.h>
+#include <internal/threadmap.h>
#define DEFAULT_SEPARATOR " "
#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi"
static void print_counters(struct timespec *ts, int argc, const char **argv);
-/* Default events used for perf stat -T */
-static const char *transaction_attrs = {
- "task-clock,"
- "{"
- "instructions,"
- "cycles,"
- "cpu/cycles-t/,"
- "cpu/tx-start/,"
- "cpu/el-start/,"
- "cpu/cycles-ct/"
- "}"
-};
-
-/* More limited version when the CPU does not have all events. */
-static const char * transaction_limited_attrs = {
- "task-clock,"
- "{"
- "instructions,"
- "cycles,"
- "cpu/cycles-t/,"
- "cpu/tx-start/"
- "}"
-};
-
-static const char * topdown_attrs[] = {
- "topdown-total-slots",
- "topdown-slots-retired",
- "topdown-recovery-bubbles",
- "topdown-fetch-bubbles",
- "topdown-slots-issued",
- NULL,
-};
-
-static const char *smi_cost_attrs = {
- "{"
- "msr/aperf/,"
- "msr/smi/,"
- "cycles"
- "}"
-};
-
static struct evlist *evsel_list;
+static bool all_counters_use_bpf = true;
static struct target target = {
.uid = UINT_MAX,
@@ -143,14 +109,13 @@ static struct target target = {
#define METRIC_ONLY_LEN 20
-static volatile pid_t child_pid = -1;
+static volatile sig_atomic_t child_pid = -1;
static int detailed_run = 0;
static bool transaction_run;
static bool topdown_run = false;
static bool smi_cost = false;
static bool smi_reset = false;
static int big_num_opt = -1;
-static bool group = false;
static const char *pre_cmd = NULL;
static const char *post_cmd = NULL;
static bool sync_run = false;
@@ -161,6 +126,7 @@ static bool append_file;
static bool interval_count;
static const char *output_name;
static int output_fd;
+static char *metrics;
struct perf_stat {
bool record;
@@ -177,7 +143,7 @@ struct perf_stat {
static struct perf_stat perf_stat;
#define STAT_RECORD perf_stat.record
-static volatile int done = 0;
+static volatile sig_atomic_t done = 0;
static struct perf_stat_config stat_config = {
.aggr_mode = AGGR_GLOBAL,
@@ -186,9 +152,73 @@ static struct perf_stat_config stat_config = {
.run_count = 1,
.metric_only_len = METRIC_ONLY_LEN,
.walltime_nsecs_stats = &walltime_nsecs_stats,
+ .ru_stats = &ru_stats,
.big_num = true,
+ .ctl_fd = -1,
+ .ctl_fd_ack = -1,
+ .iostat_run = false,
};
+static bool cpus_map_matched(struct evsel *a, struct evsel *b)
+{
+ if (!a->core.cpus && !b->core.cpus)
+ return true;
+
+ if (!a->core.cpus || !b->core.cpus)
+ return false;
+
+ if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus))
+ return false;
+
+ for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) {
+ if (perf_cpu_map__cpu(a->core.cpus, i).cpu !=
+ perf_cpu_map__cpu(b->core.cpus, i).cpu)
+ return false;
+ }
+
+ return true;
+}
+
+static void evlist__check_cpu_maps(struct evlist *evlist)
+{
+ struct evsel *evsel, *warned_leader = NULL;
+
+ if (evlist__has_hybrid(evlist))
+ evlist__warn_hybrid_group(evlist);
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct evsel *leader = evsel__leader(evsel);
+
+ /* Check that leader matches cpus with each member. */
+ if (leader == evsel)
+ continue;
+ if (cpus_map_matched(leader, evsel))
+ continue;
+
+ /* If there's mismatch disable the group and warn user. */
+ if (warned_leader != leader) {
+ char buf[200];
+
+ pr_warning("WARNING: grouped events cpus do not match.\n"
+ "Events with CPUs not matching the leader will "
+ "be removed from the group.\n");
+ evsel__group_desc(leader, buf, sizeof(buf));
+ pr_warning(" %s\n", buf);
+ warned_leader = leader;
+ }
+ if (verbose > 0) {
+ char buf[200];
+
+ cpu_map__snprint(leader->core.cpus, buf, sizeof(buf));
+ pr_warning(" %s: %s\n", leader->name, buf);
+ cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf));
+ pr_warning(" %s: %s\n", evsel->name, buf);
+ }
+
+ evsel__remove_from_group(evsel, leader);
+ }
+}
+
static inline void diff_timespec(struct timespec *r, struct timespec *a,
struct timespec *b)
{
@@ -203,13 +233,8 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a,
static void perf_stat__reset_stats(void)
{
- int i;
-
- perf_evlist__reset_stats(evsel_list);
+ evlist__reset_stats(evsel_list);
perf_stat__reset_shadow_stats();
-
- for (i = 0; i < stat_config.stats_num; i++)
- perf_stat__reset_shadow_per_stat(&stat_config.stats[i]);
}
static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
@@ -238,35 +263,55 @@ static int write_stat_round_event(u64 tm, u64 type)
#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
-static int
-perf_evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread,
- struct perf_counts_values *count)
+static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread,
+ struct perf_counts_values *count)
{
- struct perf_sample_id *sid = SID(counter, cpu, thread);
+ struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread);
+ struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx);
return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count,
process_synthesized_event, NULL);
}
-static int read_single_counter(struct evsel *counter, int cpu,
+static int read_single_counter(struct evsel *counter, int cpu_map_idx,
int thread, struct timespec *rs)
{
- if (counter->tool_event == PERF_TOOL_DURATION_TIME) {
- u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
- struct perf_counts_values *count =
- perf_counts(counter->counts, cpu, thread);
- count->ena = count->run = val;
- count->val = val;
- return 0;
+ switch(counter->tool_event) {
+ case PERF_TOOL_DURATION_TIME: {
+ u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL;
+ struct perf_counts_values *count =
+ perf_counts(counter->counts, cpu_map_idx, thread);
+ count->ena = count->run = val;
+ count->val = val;
+ return 0;
+ }
+ case PERF_TOOL_USER_TIME:
+ case PERF_TOOL_SYSTEM_TIME: {
+ u64 val;
+ struct perf_counts_values *count =
+ perf_counts(counter->counts, cpu_map_idx, thread);
+ if (counter->tool_event == PERF_TOOL_USER_TIME)
+ val = ru_stats.ru_utime_usec_stat.mean;
+ else
+ val = ru_stats.ru_stime_usec_stat.mean;
+ count->ena = count->run = val;
+ count->val = val;
+ return 0;
+ }
+ default:
+ case PERF_TOOL_NONE:
+ return evsel__read_counter(counter, cpu_map_idx, thread);
+ case PERF_TOOL_MAX:
+ /* This should never be reached */
+ return 0;
}
- return perf_evsel__read_counter(counter, cpu, thread);
}
/*
* Read out the results of a single counter:
* do not aggregate counts across CPUs in system-wide mode
*/
-static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
+static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx)
{
int nthreads = perf_thread_map__nr(evsel_list->core.threads);
int thread;
@@ -274,30 +319,27 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
if (!counter->supported)
return -ENOENT;
- if (counter->core.system_wide)
- nthreads = 1;
-
for (thread = 0; thread < nthreads; thread++) {
struct perf_counts_values *count;
- count = perf_counts(counter->counts, cpu, thread);
+ count = perf_counts(counter->counts, cpu_map_idx, thread);
/*
* The leader's group read loads data into its group members
- * (via perf_evsel__read_counter()) and sets their count->loaded.
+ * (via evsel__read_counter()) and sets their count->loaded.
*/
- if (!perf_counts__is_loaded(counter->counts, cpu, thread) &&
- read_single_counter(counter, cpu, thread, rs)) {
+ if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) &&
+ read_single_counter(counter, cpu_map_idx, thread, rs)) {
counter->counts->scaled = -1;
- perf_counts(counter->counts, cpu, thread)->ena = 0;
- perf_counts(counter->counts, cpu, thread)->run = 0;
+ perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0;
+ perf_counts(counter->counts, cpu_map_idx, thread)->run = 0;
return -1;
}
- perf_counts__set_loaded(counter->counts, cpu, thread, false);
+ perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false);
if (STAT_RECORD) {
- if (perf_evsel__write_stat_event(counter, cpu, thread, count)) {
+ if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) {
pr_err("failed to write stat event\n");
return -1;
}
@@ -306,8 +348,9 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
if (verbose > 1) {
fprintf(stat_config.output,
"%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
- perf_evsel__name(counter),
- cpu,
+ evsel__name(counter),
+ perf_cpu_map__cpu(evsel__cpus(counter),
+ cpu_map_idx).cpu,
count->val, count->ena, count->run);
}
}
@@ -315,33 +358,67 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
return 0;
}
-static void read_counters(struct timespec *rs)
+static int read_affinity_counters(struct timespec *rs)
{
- struct evsel *counter;
- struct affinity affinity;
- int i, ncpus, cpu;
+ struct evlist_cpu_iterator evlist_cpu_itr;
+ struct affinity saved_affinity, *affinity;
- if (affinity__setup(&affinity) < 0)
- return;
+ if (all_counters_use_bpf)
+ return 0;
- ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus);
if (!target__has_cpu(&target) || target__has_per_thread(&target))
- ncpus = 1;
- evlist__for_each_cpu(evsel_list, i, cpu) {
- if (i >= ncpus)
- break;
- affinity__set(&affinity, cpu);
+ affinity = NULL;
+ else if (affinity__setup(&saved_affinity) < 0)
+ return -1;
+ else
+ affinity = &saved_affinity;
- evlist__for_each_entry(evsel_list, counter) {
- if (evsel__cpu_iter_skip(counter, cpu))
- continue;
- if (!counter->err) {
- counter->err = read_counter_cpu(counter, rs,
- counter->cpu_iter - 1);
- }
+ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
+ struct evsel *counter = evlist_cpu_itr.evsel;
+
+ if (evsel__is_bpf(counter))
+ continue;
+
+ if (!counter->err) {
+ counter->err = read_counter_cpu(counter, rs,
+ evlist_cpu_itr.cpu_map_idx);
}
}
- affinity__cleanup(&affinity);
+ if (affinity)
+ affinity__cleanup(&saved_affinity);
+
+ return 0;
+}
+
+static int read_bpf_map_counters(void)
+{
+ struct evsel *counter;
+ int err;
+
+ evlist__for_each_entry(evsel_list, counter) {
+ if (!evsel__is_bpf(counter))
+ continue;
+
+ err = bpf_counter__read(counter);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int read_counters(struct timespec *rs)
+{
+ if (!stat_config.stop_read_counter) {
+ if (read_bpf_map_counters() ||
+ read_affinity_counters(rs))
+ return -1;
+ }
+ return 0;
+}
+
+static void process_counters(void)
+{
+ struct evsel *counter;
evlist__for_each_entry(evsel_list, counter) {
if (counter->err)
@@ -350,6 +427,9 @@ static void read_counters(struct timespec *rs)
pr_warning("failed to process counter %s\n", counter->name);
counter->err = 0;
}
+
+ perf_stat_merge_counters(&stat_config, evsel_list);
+ perf_stat_process_percore(&stat_config, evsel_list);
}
static void process_interval(void)
@@ -359,7 +439,10 @@ static void process_interval(void)
clock_gettime(CLOCK_MONOTONIC, &ts);
diff_timespec(&rs, &ts, &ref_time);
- read_counters(&rs);
+ evlist__reset_aggr_stats(evsel_list);
+
+ if (read_counters(&rs) == 0)
+ process_counters();
if (STAT_RECORD) {
if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
@@ -367,39 +450,62 @@ static void process_interval(void)
}
init_stats(&walltime_nsecs_stats);
- update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
+ update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
print_counters(&rs, 0, NULL);
}
-static void enable_counters(void)
+static bool handle_interval(unsigned int interval, int *times)
{
- if (stat_config.initial_delay)
- usleep(stat_config.initial_delay * USEC_PER_MSEC);
+ if (interval) {
+ process_interval();
+ if (interval_count && !(--(*times)))
+ return true;
+ }
+ return false;
+}
- /*
- * We need to enable counters only if:
- * - we don't have tracee (attaching to task or cpu)
- * - we have initial delay configured
- */
- if (!target__none(&target) || stat_config.initial_delay)
- evlist__enable(evsel_list);
+static int enable_counters(void)
+{
+ struct evsel *evsel;
+ int err;
+
+ evlist__for_each_entry(evsel_list, evsel) {
+ if (!evsel__is_bpf(evsel))
+ continue;
+
+ err = bpf_counter__enable(evsel);
+ if (err)
+ return err;
+ }
+
+ if (!target__enable_on_exec(&target)) {
+ if (!all_counters_use_bpf)
+ evlist__enable(evsel_list);
+ }
+ return 0;
}
static void disable_counters(void)
{
+ struct evsel *counter;
+
/*
* If we don't have tracee (attaching to task or cpu), counters may
* still be running. To get accurate group ratios, we must stop groups
* from counting before reading their constituent counters.
*/
- if (!target__none(&target))
- evlist__disable(evsel_list);
+ if (!target__none(&target)) {
+ evlist__for_each_entry(evsel_list, counter)
+ bpf_counter__disable(counter);
+ if (!all_counters_use_bpf)
+ evlist__disable(evsel_list);
+ }
}
-static volatile int workload_exec_errno;
+static volatile sig_atomic_t workload_exec_errno;
/*
- * perf_evlist__prepare_workload will send a SIGUSR1
+ * evlist__prepare_workload will send a SIGUSR1
* if the fork fails, since we asked by setting its
* want_signal to true.
*/
@@ -409,7 +515,7 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf
workload_exec_errno = info->si_value.sival_int;
}
-static bool perf_evsel__should_store_id(struct evsel *counter)
+static bool evsel__should_store_id(struct evsel *counter)
{
return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID;
}
@@ -436,6 +542,86 @@ static bool is_target_alive(struct target *_target,
return false;
}
+static void process_evlist(struct evlist *evlist, unsigned int interval)
+{
+ enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
+
+ if (evlist__ctlfd_process(evlist, &cmd) > 0) {
+ switch (cmd) {
+ case EVLIST_CTL_CMD_ENABLE:
+ fallthrough;
+ case EVLIST_CTL_CMD_DISABLE:
+ if (interval)
+ process_interval();
+ break;
+ case EVLIST_CTL_CMD_SNAPSHOT:
+ case EVLIST_CTL_CMD_ACK:
+ case EVLIST_CTL_CMD_UNSUPPORTED:
+ case EVLIST_CTL_CMD_EVLIST:
+ case EVLIST_CTL_CMD_STOP:
+ case EVLIST_CTL_CMD_PING:
+ default:
+ break;
+ }
+ }
+}
+
+static void compute_tts(struct timespec *time_start, struct timespec *time_stop,
+ int *time_to_sleep)
+{
+ int tts = *time_to_sleep;
+ struct timespec time_diff;
+
+ diff_timespec(&time_diff, time_stop, time_start);
+
+ tts -= time_diff.tv_sec * MSEC_PER_SEC +
+ time_diff.tv_nsec / NSEC_PER_MSEC;
+
+ if (tts < 0)
+ tts = 0;
+
+ *time_to_sleep = tts;
+}
+
+static int dispatch_events(bool forks, int timeout, int interval, int *times)
+{
+ int child_exited = 0, status = 0;
+ int time_to_sleep, sleep_time;
+ struct timespec time_start, time_stop;
+
+ if (interval)
+ sleep_time = interval;
+ else if (timeout)
+ sleep_time = timeout;
+ else
+ sleep_time = 1000;
+
+ time_to_sleep = sleep_time;
+
+ while (!done) {
+ if (forks)
+ child_exited = waitpid(child_pid, &status, WNOHANG);
+ else
+ child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
+
+ if (child_exited)
+ break;
+
+ clock_gettime(CLOCK_MONOTONIC, &time_start);
+ if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
+ if (timeout || handle_interval(interval, times))
+ break;
+ time_to_sleep = sleep_time;
+ } else { /* fd revent */
+ process_evlist(evsel_list, interval);
+ clock_gettime(CLOCK_MONOTONIC, &time_stop);
+ compute_tts(&time_start, &time_stop, &time_to_sleep);
+ }
+ }
+
+ return status;
+}
+
enum counter_recovery {
COUNTER_SKIP,
COUNTER_RETRY,
@@ -454,7 +640,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
errno == ENXIO) {
if (verbose > 0)
ui__warning("%s event is not supported by the kernel.\n",
- perf_evsel__name(counter));
+ evsel__name(counter));
counter->supported = false;
/*
* errored is a sticky flag that means one of the counter's
@@ -462,10 +648,10 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
*/
counter->errored = true;
- if ((counter->leader != counter) ||
- !(counter->leader->core.nr_members > 1))
+ if ((evsel__leader(counter) != counter) ||
+ !(counter->core.leader->nr_members > 1))
return COUNTER_SKIP;
- } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
+ } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
return COUNTER_RETRY;
@@ -483,8 +669,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
}
}
- perf_evsel__open_strerror(counter, &target,
- errno, msg, sizeof(msg));
+ evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
if (child_pid != -1)
@@ -500,83 +685,84 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
char msg[BUFSIZ];
unsigned long long t0, t1;
struct evsel *counter;
- struct timespec ts;
size_t l;
int status = 0;
const bool forks = (argc > 0);
bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false;
- struct affinity affinity;
- int i, cpu;
+ struct evlist_cpu_iterator evlist_cpu_itr;
+ struct affinity saved_affinity, *affinity = NULL;
+ int err;
bool second_pass = false;
- if (interval) {
- ts.tv_sec = interval / USEC_PER_MSEC;
- ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC;
- } else if (timeout) {
- ts.tv_sec = timeout / USEC_PER_MSEC;
- ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC;
- } else {
- ts.tv_sec = 1;
- ts.tv_nsec = 0;
- }
-
if (forks) {
- if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
- workload_exec_failed_signal) < 0) {
+ if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
perror("failed to prepare workload");
return -1;
}
child_pid = evsel_list->workload.pid;
}
- if (group)
- perf_evlist__set_leader(evsel_list);
+ if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
+ if (affinity__setup(&saved_affinity) < 0)
+ return -1;
+ affinity = &saved_affinity;
+ }
- if (affinity__setup(&affinity) < 0)
- return -1;
+ evlist__for_each_entry(evsel_list, counter) {
+ counter->reset_group = false;
+ if (bpf_counter__load(counter, &target))
+ return -1;
+ if (!(evsel__is_bperf(counter)))
+ all_counters_use_bpf = false;
+ }
- evlist__for_each_cpu (evsel_list, i, cpu) {
- affinity__set(&affinity, cpu);
+ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
+ counter = evlist_cpu_itr.evsel;
- evlist__for_each_entry(evsel_list, counter) {
- if (evsel__cpu_iter_skip(counter, cpu))
- continue;
- if (counter->reset_group || counter->errored)
- continue;
-try_again:
- if (create_perf_stat_counter(counter, &stat_config, &target,
- counter->cpu_iter - 1) < 0) {
-
- /*
- * Weak group failed. We cannot just undo this here
- * because earlier CPUs might be in group mode, and the kernel
- * doesn't support mixing group and non group reads. Defer
- * it to later.
- * Don't close here because we're in the wrong affinity.
- */
- if ((errno == EINVAL || errno == EBADF) &&
- counter->leader != counter &&
- counter->weak_group) {
- perf_evlist__reset_weak_group(evsel_list, counter, false);
- assert(counter->reset_group);
- second_pass = true;
- continue;
- }
+ /*
+ * bperf calls evsel__open_per_cpu() in bperf__load(), so
+ * no need to call it again here.
+ */
+ if (target.use_bpf)
+ break;
- switch (stat_handle_error(counter)) {
- case COUNTER_FATAL:
- return -1;
- case COUNTER_RETRY:
- goto try_again;
- case COUNTER_SKIP:
- continue;
- default:
- break;
- }
+ if (counter->reset_group || counter->errored)
+ continue;
+ if (evsel__is_bperf(counter))
+ continue;
+try_again:
+ if (create_perf_stat_counter(counter, &stat_config, &target,
+ evlist_cpu_itr.cpu_map_idx) < 0) {
+
+ /*
+ * Weak group failed. We cannot just undo this here
+ * because earlier CPUs might be in group mode, and the kernel
+ * doesn't support mixing group and non group reads. Defer
+ * it to later.
+ * Don't close here because we're in the wrong affinity.
+ */
+ if ((errno == EINVAL || errno == EBADF) &&
+ evsel__leader(counter) != counter &&
+ counter->weak_group) {
+ evlist__reset_weak_group(evsel_list, counter, false);
+ assert(counter->reset_group);
+ second_pass = true;
+ continue;
+ }
+ switch (stat_handle_error(counter)) {
+ case COUNTER_FATAL:
+ return -1;
+ case COUNTER_RETRY:
+ goto try_again;
+ case COUNTER_SKIP:
+ continue;
+ default:
+ break;
}
- counter->supported = true;
+
}
+ counter->supported = true;
}
if (second_pass) {
@@ -585,45 +771,41 @@ try_again:
* and also close errored counters.
*/
- evlist__for_each_cpu(evsel_list, i, cpu) {
- affinity__set(&affinity, cpu);
- /* First close errored or weak retry */
- evlist__for_each_entry(evsel_list, counter) {
- if (!counter->reset_group && !counter->errored)
- continue;
- if (evsel__cpu_iter_skip_no_inc(counter, cpu))
- continue;
- perf_evsel__close_cpu(&counter->core, counter->cpu_iter);
- }
- /* Now reopen weak */
- evlist__for_each_entry(evsel_list, counter) {
- if (!counter->reset_group && !counter->errored)
- continue;
- if (evsel__cpu_iter_skip(counter, cpu))
- continue;
- if (!counter->reset_group)
- continue;
+ /* First close errored or weak retry */
+ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
+ counter = evlist_cpu_itr.evsel;
+
+ if (!counter->reset_group && !counter->errored)
+ continue;
+
+ perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx);
+ }
+ /* Now reopen weak */
+ evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
+ counter = evlist_cpu_itr.evsel;
+
+ if (!counter->reset_group)
+ continue;
try_again_reset:
- pr_debug2("reopening weak %s\n", perf_evsel__name(counter));
- if (create_perf_stat_counter(counter, &stat_config, &target,
- counter->cpu_iter - 1) < 0) {
-
- switch (stat_handle_error(counter)) {
- case COUNTER_FATAL:
- return -1;
- case COUNTER_RETRY:
- goto try_again_reset;
- case COUNTER_SKIP:
- continue;
- default:
- break;
- }
+ pr_debug2("reopening weak %s\n", evsel__name(counter));
+ if (create_perf_stat_counter(counter, &stat_config, &target,
+ evlist_cpu_itr.cpu_map_idx) < 0) {
+
+ switch (stat_handle_error(counter)) {
+ case COUNTER_FATAL:
+ return -1;
+ case COUNTER_RETRY:
+ goto try_again_reset;
+ case COUNTER_SKIP:
+ continue;
+ default:
+ break;
}
- counter->supported = true;
}
+ counter->supported = true;
}
}
- affinity__cleanup(&affinity);
+ affinity__cleanup(affinity);
evlist__for_each_entry(evsel_list, counter) {
if (!counter->supported) {
@@ -635,20 +817,20 @@ try_again_reset:
if (l > stat_config.unit_width)
stat_config.unit_width = l;
- if (perf_evsel__should_store_id(counter) &&
- perf_evsel__store_ids(counter, evsel_list))
+ if (evsel__should_store_id(counter) &&
+ evsel__store_ids(counter, evsel_list))
return -1;
}
- if (perf_evlist__apply_filters(evsel_list, &counter)) {
+ if (evlist__apply_filters(evsel_list, &counter)) {
pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
- counter->filter, perf_evsel__name(counter), errno,
+ counter->filter, evsel__name(counter), errno,
str_error_r(errno, msg, sizeof(msg)));
return -1;
}
if (STAT_RECORD) {
- int err, fd = perf_data__fd(&perf_stat.data);
+ int fd = perf_data__fd(&perf_stat.data);
if (is_pipe) {
err = perf_header__write_pipe(perf_data__fd(&perf_stat.data));
@@ -666,28 +848,38 @@ try_again_reset:
return err;
}
- /*
- * Enable counters and exec the command:
- */
+ if (target.initial_delay) {
+ pr_info(EVLIST_DISABLED_MSG);
+ } else {
+ err = enable_counters();
+ if (err)
+ return -1;
+ }
+
+ /* Exec the command, if any */
+ if (forks)
+ evlist__start_workload(evsel_list);
+
+ if (target.initial_delay > 0) {
+ usleep(target.initial_delay * USEC_PER_MSEC);
+ err = enable_counters();
+ if (err)
+ return -1;
+
+ pr_info(EVLIST_ENABLED_MSG);
+ }
+
t0 = rdclock();
clock_gettime(CLOCK_MONOTONIC, &ref_time);
if (forks) {
- perf_evlist__start_workload(evsel_list);
- enable_counters();
-
- if (interval || timeout) {
- while (!waitpid(child_pid, &status, WNOHANG)) {
- nanosleep(&ts, NULL);
- if (timeout)
- break;
- process_interval();
- if (interval_count && !(--times))
- break;
- }
- }
- if (child_pid != -1)
+ if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
+ status = dispatch_events(forks, timeout, interval, &times);
+ if (child_pid != -1) {
+ if (timeout)
+ kill(child_pid, SIGTERM);
wait4(child_pid, &status, 0, &stat_config.ru_data);
+ }
if (workload_exec_errno) {
const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
@@ -698,19 +890,7 @@ try_again_reset:
if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]);
} else {
- enable_counters();
- while (!done) {
- nanosleep(&ts, NULL);
- if (!is_target_alive(&target, evsel_list->core.threads))
- break;
- if (timeout)
- break;
- if (interval) {
- process_interval();
- if (interval_count && !(--times))
- break;
- }
- }
+ status = dispatch_events(forks, timeout, interval, &times);
}
disable_counters();
@@ -720,7 +900,19 @@ try_again_reset:
if (stat_config.walltime_run_table)
stat_config.walltime_run[run_idx] = t1 - t0;
- update_stats(&walltime_nsecs_stats, t1 - t0);
+ if (interval && stat_config.summary) {
+ stat_config.interval = 0;
+ stat_config.stop_read_counter = true;
+ init_stats(&walltime_nsecs_stats);
+ update_stats(&walltime_nsecs_stats, t1 - t0);
+
+ evlist__copy_prev_raw_counts(evsel_list);
+ evlist__reset_prev_raw_counts(evsel_list);
+ evlist__reset_aggr_stats(evsel_list);
+ } else {
+ update_stats(&walltime_nsecs_stats, t1 - t0);
+ update_rusage_stats(&ru_stats, &stat_config.ru_data);
+ }
/*
* Closing a group leader splits the group, and as we only disable
@@ -728,7 +920,8 @@ try_again_reset:
* avoid arbitrary skew, we must read all counters before closing any
* group leaders.
*/
- read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
+ if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0)
+ process_counters();
/*
* We need to keep evsel_list alive, because it's processed
@@ -771,12 +964,13 @@ static void print_counters(struct timespec *ts, int argc, const char **argv)
/* Do not print anything if we record to the pipe. */
if (STAT_RECORD && perf_stat.data.is_pipe)
return;
+ if (quiet)
+ return;
- perf_evlist__print_counters(evsel_list, &stat_config, &target,
- ts, argc, argv);
+ evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
}
-static volatile int signr = -1;
+static volatile sig_atomic_t signr = -1;
static void skip_signal(int signo)
{
@@ -819,10 +1013,21 @@ static void sig_atexit(void)
kill(getpid(), signr);
}
+void perf_stat__set_big_num(int set)
+{
+ stat_config.big_num = (set != 0);
+}
+
+void perf_stat__set_no_csv_summary(int set)
+{
+ stat_config.no_csv_summary = (set != 0);
+}
+
static int stat__set_big_num(const struct option *opt __maybe_unused,
const char *s __maybe_unused, int unset)
{
big_num_opt = unset ? 0 : 1;
+ perf_stat__set_big_num(!unset);
return 0;
}
@@ -834,11 +1039,63 @@ static int enable_metric_only(const struct option *opt __maybe_unused,
return 0;
}
-static int parse_metric_groups(const struct option *opt,
+static int append_metric_groups(const struct option *opt __maybe_unused,
const char *str,
int unset __maybe_unused)
{
- return metricgroup__parse_groups(opt, str, &stat_config.metric_events);
+ if (metrics) {
+ char *tmp;
+
+ if (asprintf(&tmp, "%s,%s", metrics, str) < 0)
+ return -ENOMEM;
+ free(metrics);
+ metrics = tmp;
+ } else {
+ metrics = strdup(str);
+ if (!metrics)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int parse_control_option(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct perf_stat_config *config = opt->value;
+
+ return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close);
+}
+
+static int parse_stat_cgroups(const struct option *opt,
+ const char *str, int unset)
+{
+ if (stat_config.cgroup_list) {
+ pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
+ return -1;
+ }
+
+ return parse_cgroups(opt, str, unset);
+}
+
+static int parse_hybrid_type(const struct option *opt,
+ const char *str,
+ int unset __maybe_unused)
+{
+ struct evlist *evlist = *(struct evlist **)opt->value;
+
+ if (!list_empty(&evlist->core.entries)) {
+ fprintf(stderr, "Must define cputype before events/metrics\n");
+ return -1;
+ }
+
+ evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str);
+ if (!evlist->hybrid_pmu_name) {
+ fprintf(stderr, "--cputype %s is not supported!\n", str);
+ return -1;
+ }
+
+ return 0;
}
static struct option stat_options[] = {
@@ -855,10 +1112,16 @@ static struct option stat_options[] = {
"stat events on existing process id"),
OPT_STRING('t', "tid", &target.tid, "tid",
"stat events on existing thread id"),
+#ifdef HAVE_BPF_SKEL
+ OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id",
+ "stat events on existing bpf program id"),
+ OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf,
+ "use bpf program to count events"),
+ OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
+ "path to perf_event_attr map"),
+#endif
OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
"system-wide collection from all CPUs"),
- OPT_BOOLEAN('g', "group", &group,
- "put the counters into a counter group"),
OPT_BOOLEAN(0, "scale", &stat_config.scale,
"Use --no-scale to disable counter scaling for multiplexing"),
OPT_INCR('v', "verbose", &verbose,
@@ -881,10 +1144,16 @@ static struct option stat_options[] = {
OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
"disable CPU count aggregation", AGGR_NONE),
OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"),
+ OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge,
+ "Merge identical named hybrid events"),
OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator",
"print counts with custom separator"),
+ OPT_BOOLEAN('j', "json-output", &stat_config.json_output,
+ "print counts in JSON format"),
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
- "monitor event in cgroup name only", parse_cgroups),
+ "monitor event in cgroup name only", parse_stat_cgroups),
+ OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name",
+ "expand events for each cgroup"),
OPT_STRING('o', "output", &output_name, "file", "output file name"),
OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
OPT_INTEGER(0, "log-fd", &output_fd,
@@ -912,17 +1181,25 @@ static struct option stat_options[] = {
"aggregate counts per thread", AGGR_THREAD),
OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode,
"aggregate counts per numa node", AGGR_NODE),
- OPT_UINTEGER('D', "delay", &stat_config.initial_delay,
- "ms to wait before starting measurement after program start"),
+ OPT_INTEGER('D', "delay", &target.initial_delay,
+ "ms to wait before starting measurement after program start (-1: start with events disabled)"),
OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL,
"Only print computed metrics. No raw values", enable_metric_only),
+ OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group,
+ "don't group metric events, impacts multiplexing"),
+ OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge,
+ "don't try to share events between metrics in a group"),
+ OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold,
+ "don't try to share events between metrics in a group "),
OPT_BOOLEAN(0, "topdown", &topdown_run,
- "measure topdown level 1 statistics"),
+ "measure top-down statistics"),
+ OPT_UINTEGER(0, "td-level", &stat_config.topdown_level,
+ "Set the metrics level for the top-down statistics (0: max level)"),
OPT_BOOLEAN(0, "smi-cost", &smi_cost,
"measure SMI cost"),
OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
"monitor specified metrics or metric groups (separated by ,)",
- parse_metric_groups),
+ append_metric_groups),
OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel,
"Configure all used events to run in kernel space.",
PARSE_OPT_EXCLUSIVE),
@@ -933,133 +1210,206 @@ static struct option stat_options[] = {
"Use with 'percore' event qualifier to show the event "
"counts of one hardware thread by sum up total hardware "
"threads of same physical core"),
+ OPT_BOOLEAN(0, "summary", &stat_config.summary,
+ "print summary for interval mode"),
+ OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary,
+ "don't print 'summary' for CSV summary output"),
+ OPT_BOOLEAN(0, "quiet", &quiet,
+ "don't print any output, messages or warnings (useful with record)"),
+ OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
+ "Only enable events on applying cpu with this type "
+ "for hybrid platform (e.g. core or atom)",
+ parse_hybrid_type),
+#ifdef HAVE_LIBPFM
+ OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
+ "libpfm4 event selector. use 'perf list' to list available events",
+ parse_libpfm_events_option),
+#endif
+ OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
+ "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n"
+ "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
+ "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
+ parse_control_option),
+ OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
+ "measure I/O performance metrics provided by arch/platform",
+ iostat_parse),
OPT_END()
};
-static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
- struct perf_cpu_map *map, int cpu)
+static const char *const aggr_mode__string[] = {
+ [AGGR_CORE] = "core",
+ [AGGR_DIE] = "die",
+ [AGGR_GLOBAL] = "global",
+ [AGGR_NODE] = "node",
+ [AGGR_NONE] = "none",
+ [AGGR_SOCKET] = "socket",
+ [AGGR_THREAD] = "thread",
+ [AGGR_UNSET] = "unset",
+};
+
+static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return cpu_map__get_socket(map, cpu, NULL);
+ return aggr_cpu_id__socket(cpu, /*data=*/NULL);
}
-static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
- struct perf_cpu_map *map, int cpu)
+static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return cpu_map__get_die(map, cpu, NULL);
+ return aggr_cpu_id__die(cpu, /*data=*/NULL);
}
-static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
- struct perf_cpu_map *map, int cpu)
+static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return cpu_map__get_core(map, cpu, NULL);
+ return aggr_cpu_id__core(cpu, /*data=*/NULL);
}
-static int perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
- struct perf_cpu_map *map, int cpu)
+static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return cpu_map__get_node(map, cpu, NULL);
+ return aggr_cpu_id__node(cpu, /*data=*/NULL);
}
-static int perf_stat__get_aggr(struct perf_stat_config *config,
- aggr_get_id_t get_id, struct perf_cpu_map *map, int idx)
+static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- int cpu;
+ return aggr_cpu_id__global(cpu, /*data=*/NULL);
+}
- if (idx >= map->nr)
- return -1;
+static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
+{
+ return aggr_cpu_id__cpu(cpu, /*data=*/NULL);
+}
- cpu = map->map[idx];
+static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config,
+ aggr_get_id_t get_id, struct perf_cpu cpu)
+{
+ struct aggr_cpu_id id;
+
+ /* per-process mode - should use global aggr mode */
+ if (cpu.cpu == -1)
+ return get_id(config, cpu);
- if (config->cpus_aggr_map->map[cpu] == -1)
- config->cpus_aggr_map->map[cpu] = get_id(config, map, idx);
+ if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu]))
+ config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu);
- return config->cpus_aggr_map->map[cpu];
+ id = config->cpus_aggr_map->map[cpu.cpu];
+ return id;
}
-static int perf_stat__get_socket_cached(struct perf_stat_config *config,
- struct perf_cpu_map *map, int idx)
+static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config,
+ struct perf_cpu cpu)
{
- return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx);
+ return perf_stat__get_aggr(config, perf_stat__get_socket, cpu);
}
-static int perf_stat__get_die_cached(struct perf_stat_config *config,
- struct perf_cpu_map *map, int idx)
+static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config,
+ struct perf_cpu cpu)
{
- return perf_stat__get_aggr(config, perf_stat__get_die, map, idx);
+ return perf_stat__get_aggr(config, perf_stat__get_die, cpu);
}
-static int perf_stat__get_core_cached(struct perf_stat_config *config,
- struct perf_cpu_map *map, int idx)
+static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config,
+ struct perf_cpu cpu)
{
- return perf_stat__get_aggr(config, perf_stat__get_core, map, idx);
+ return perf_stat__get_aggr(config, perf_stat__get_core, cpu);
}
-static int perf_stat__get_node_cached(struct perf_stat_config *config,
- struct perf_cpu_map *map, int idx)
+static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config,
+ struct perf_cpu cpu)
{
- return perf_stat__get_aggr(config, perf_stat__get_node, map, idx);
+ return perf_stat__get_aggr(config, perf_stat__get_node, cpu);
}
-static bool term_percore_set(void)
+static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config,
+ struct perf_cpu cpu)
{
- struct evsel *counter;
-
- evlist__for_each_entry(evsel_list, counter) {
- if (counter->percore)
- return true;
- }
+ return perf_stat__get_aggr(config, perf_stat__get_global, cpu);
+}
- return false;
+static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config,
+ struct perf_cpu cpu)
+{
+ return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu);
}
-static int perf_stat_init_aggr_mode(void)
+static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode)
{
- int nr;
+ switch (aggr_mode) {
+ case AGGR_SOCKET:
+ return aggr_cpu_id__socket;
+ case AGGR_DIE:
+ return aggr_cpu_id__die;
+ case AGGR_CORE:
+ return aggr_cpu_id__core;
+ case AGGR_NODE:
+ return aggr_cpu_id__node;
+ case AGGR_NONE:
+ return aggr_cpu_id__cpu;
+ case AGGR_GLOBAL:
+ return aggr_cpu_id__global;
+ case AGGR_THREAD:
+ case AGGR_UNSET:
+ case AGGR_MAX:
+ default:
+ return NULL;
+ }
+}
- switch (stat_config.aggr_mode) {
+static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode)
+{
+ switch (aggr_mode) {
case AGGR_SOCKET:
- if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
- perror("cannot build socket map");
- return -1;
- }
- stat_config.aggr_get_id = perf_stat__get_socket_cached;
- break;
+ return perf_stat__get_socket_cached;
case AGGR_DIE:
- if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
- perror("cannot build die map");
- return -1;
- }
- stat_config.aggr_get_id = perf_stat__get_die_cached;
- break;
+ return perf_stat__get_die_cached;
case AGGR_CORE:
- if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
- perror("cannot build core map");
- return -1;
- }
- stat_config.aggr_get_id = perf_stat__get_core_cached;
- break;
+ return perf_stat__get_core_cached;
case AGGR_NODE:
- if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
- perror("cannot build core map");
- return -1;
- }
- stat_config.aggr_get_id = perf_stat__get_node_cached;
- break;
+ return perf_stat__get_node_cached;
case AGGR_NONE:
- if (term_percore_set()) {
- if (cpu_map__build_core_map(evsel_list->core.cpus,
- &stat_config.aggr_map)) {
- perror("cannot build core map");
- return -1;
- }
- stat_config.aggr_get_id = perf_stat__get_core_cached;
- }
- break;
+ return perf_stat__get_cpu_cached;
case AGGR_GLOBAL:
+ return perf_stat__get_global_cached;
case AGGR_THREAD:
case AGGR_UNSET:
+ case AGGR_MAX:
default:
- break;
+ return NULL;
+ }
+}
+
+static int perf_stat_init_aggr_mode(void)
+{
+ int nr;
+ aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
+
+ if (get_id) {
+ bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
+ stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
+ get_id, /*data=*/NULL, needs_sort);
+ if (!stat_config.aggr_map) {
+ pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
+ return -1;
+ }
+ stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode);
+ }
+
+ if (stat_config.aggr_mode == AGGR_THREAD) {
+ nr = perf_thread_map__nr(evsel_list->core.threads);
+ stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
+ if (stat_config.aggr_map == NULL)
+ return -ENOMEM;
+
+ for (int s = 0; s < nr; s++) {
+ struct aggr_cpu_id id = aggr_cpu_id__empty();
+
+ id.thread_idx = s;
+ stat_config.aggr_map->map[s] = id;
+ }
+ return 0;
}
/*
@@ -1067,241 +1417,237 @@ static int perf_stat_init_aggr_mode(void)
* taking the highest cpu number to be the size of
* the aggregation translate cpumap.
*/
- nr = perf_cpu_map__max(evsel_list->core.cpus);
- stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1);
+ if (evsel_list->core.user_requested_cpus)
+ nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
+ else
+ nr = 0;
+ stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
return stat_config.cpus_aggr_map ? 0 : -ENOMEM;
}
+static void cpu_aggr_map__delete(struct cpu_aggr_map *map)
+{
+ if (map) {
+ WARN_ONCE(refcount_read(&map->refcnt) != 0,
+ "cpu_aggr_map refcnt unbalanced\n");
+ free(map);
+ }
+}
+
+static void cpu_aggr_map__put(struct cpu_aggr_map *map)
+{
+ if (map && refcount_dec_and_test(&map->refcnt))
+ cpu_aggr_map__delete(map);
+}
+
static void perf_stat__exit_aggr_mode(void)
{
- perf_cpu_map__put(stat_config.aggr_map);
- perf_cpu_map__put(stat_config.cpus_aggr_map);
+ cpu_aggr_map__put(stat_config.aggr_map);
+ cpu_aggr_map__put(stat_config.cpus_aggr_map);
stat_config.aggr_map = NULL;
stat_config.cpus_aggr_map = NULL;
}
-static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx)
+static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
- int cpu;
-
- if (idx > map->nr)
- return -1;
-
- cpu = map->map[idx];
+ struct perf_env *env = data;
+ struct aggr_cpu_id id = aggr_cpu_id__empty();
- if (cpu >= env->nr_cpus_avail)
- return -1;
+ if (cpu.cpu != -1)
+ id.socket = env->cpu[cpu.cpu].socket_id;
- return cpu;
+ return id;
}
-static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data)
+static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
struct perf_env *env = data;
- int cpu = perf_env__get_cpu(env, map, idx);
+ struct aggr_cpu_id id = aggr_cpu_id__empty();
+
+ if (cpu.cpu != -1) {
+ /*
+ * die_id is relative to socket, so start
+ * with the socket ID and then add die to
+ * make a unique ID.
+ */
+ id.socket = env->cpu[cpu.cpu].socket_id;
+ id.die = env->cpu[cpu.cpu].die_id;
+ }
- return cpu == -1 ? -1 : env->cpu[cpu].socket_id;
+ return id;
}
-static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data)
+static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
struct perf_env *env = data;
- int die_id = -1, cpu = perf_env__get_cpu(env, map, idx);
+ struct aggr_cpu_id id = aggr_cpu_id__empty();
- if (cpu != -1) {
+ if (cpu.cpu != -1) {
/*
- * Encode socket in bit range 15:8
- * die_id is relative to socket,
- * we need a global id. So we combine
- * socket + die id
+ * core_id is relative to socket and die,
+ * we need a global id. So we set
+ * socket, die id and core id
*/
- if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
- return -1;
-
- if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
- return -1;
-
- die_id = (env->cpu[cpu].socket_id << 8) | (env->cpu[cpu].die_id & 0xff);
+ id.socket = env->cpu[cpu.cpu].socket_id;
+ id.die = env->cpu[cpu.cpu].die_id;
+ id.core = env->cpu[cpu.cpu].core_id;
}
- return die_id;
+ return id;
}
-static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data)
+static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
struct perf_env *env = data;
- int core = -1, cpu = perf_env__get_cpu(env, map, idx);
+ struct aggr_cpu_id id = aggr_cpu_id__empty();
- if (cpu != -1) {
+ if (cpu.cpu != -1) {
/*
- * Encode socket in bit range 31:24
- * encode die id in bit range 23:16
* core_id is relative to socket and die,
- * we need a global id. So we combine
- * socket + die id + core id
+ * we need a global id. So we set
+ * socket, die id and core id
*/
- if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n"))
- return -1;
-
- if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n"))
- return -1;
-
- if (WARN_ONCE(env->cpu[cpu].core_id >> 16, "The core id number is too big.\n"))
- return -1;
-
- core = (env->cpu[cpu].socket_id << 24) |
- (env->cpu[cpu].die_id << 16) |
- (env->cpu[cpu].core_id & 0xffff);
+ id.socket = env->cpu[cpu.cpu].socket_id;
+ id.die = env->cpu[cpu.cpu].die_id;
+ id.core = env->cpu[cpu.cpu].core_id;
+ id.cpu = cpu;
}
- return core;
+ return id;
}
-static int perf_env__get_node(struct perf_cpu_map *map, int idx, void *data)
+static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data)
{
- int cpu = perf_env__get_cpu(data, map, idx);
+ struct aggr_cpu_id id = aggr_cpu_id__empty();
- return perf_env__numa_node(data, cpu);
+ id.node = perf_env__numa_node(data, cpu);
+ return id;
}
-static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus,
- struct perf_cpu_map **sockp)
+static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused,
+ void *data __maybe_unused)
{
- return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env);
+ struct aggr_cpu_id id = aggr_cpu_id__empty();
+
+ /* it always aggregates to the cpu 0 */
+ id.cpu = (struct perf_cpu){ .cpu = 0 };
+ return id;
}
-static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus,
- struct perf_cpu_map **diep)
+static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return cpu_map__build_map(cpus, diep, perf_env__get_die, env);
+ return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
-
-static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus,
- struct perf_cpu_map **corep)
+static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return cpu_map__build_map(cpus, corep, perf_env__get_core, env);
+ return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
-static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus,
- struct perf_cpu_map **nodep)
+static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return cpu_map__build_map(cpus, nodep, perf_env__get_node, env);
+ return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
-static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused,
- struct perf_cpu_map *map, int idx)
+static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return perf_env__get_socket(map, idx, &perf_stat.session->header.env);
+ return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
-static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused,
- struct perf_cpu_map *map, int idx)
+
+static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return perf_env__get_die(map, idx, &perf_stat.session->header.env);
+ return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
-static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused,
- struct perf_cpu_map *map, int idx)
+static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused,
+ struct perf_cpu cpu)
{
- return perf_env__get_core(map, idx, &perf_stat.session->header.env);
+ return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env);
}
-static int perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused,
- struct perf_cpu_map *map, int idx)
+static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode)
{
- return perf_env__get_node(map, idx, &perf_stat.session->header.env);
+ switch (aggr_mode) {
+ case AGGR_SOCKET:
+ return perf_env__get_socket_aggr_by_cpu;
+ case AGGR_DIE:
+ return perf_env__get_die_aggr_by_cpu;
+ case AGGR_CORE:
+ return perf_env__get_core_aggr_by_cpu;
+ case AGGR_NODE:
+ return perf_env__get_node_aggr_by_cpu;
+ case AGGR_GLOBAL:
+ return perf_env__get_global_aggr_by_cpu;
+ case AGGR_NONE:
+ return perf_env__get_cpu_aggr_by_cpu;
+ case AGGR_THREAD:
+ case AGGR_UNSET:
+ case AGGR_MAX:
+ default:
+ return NULL;
+ }
}
-static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
+static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode)
{
- struct perf_env *env = &st->session->header.env;
-
- switch (stat_config.aggr_mode) {
+ switch (aggr_mode) {
case AGGR_SOCKET:
- if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
- perror("cannot build socket map");
- return -1;
- }
- stat_config.aggr_get_id = perf_stat__get_socket_file;
- break;
+ return perf_stat__get_socket_file;
case AGGR_DIE:
- if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
- perror("cannot build die map");
- return -1;
- }
- stat_config.aggr_get_id = perf_stat__get_die_file;
- break;
+ return perf_stat__get_die_file;
case AGGR_CORE:
- if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
- perror("cannot build core map");
- return -1;
- }
- stat_config.aggr_get_id = perf_stat__get_core_file;
- break;
+ return perf_stat__get_core_file;
case AGGR_NODE:
- if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
- perror("cannot build core map");
- return -1;
- }
- stat_config.aggr_get_id = perf_stat__get_node_file;
- break;
- case AGGR_NONE:
+ return perf_stat__get_node_file;
case AGGR_GLOBAL:
+ return perf_stat__get_global_file;
+ case AGGR_NONE:
+ return perf_stat__get_cpu_file;
case AGGR_THREAD:
case AGGR_UNSET:
+ case AGGR_MAX:
default:
- break;
+ return NULL;
}
-
- return 0;
}
-static int topdown_filter_events(const char **attr, char **str, bool use_group)
+static int perf_stat_init_aggr_mode_file(struct perf_stat *st)
{
- int off = 0;
- int i;
- int len = 0;
- char *s;
+ struct perf_env *env = &st->session->header.env;
+ aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode);
+ bool needs_sort = stat_config.aggr_mode != AGGR_NONE;
- for (i = 0; attr[i]; i++) {
- if (pmu_have_event("cpu", attr[i])) {
- len += strlen(attr[i]) + 1;
- attr[i - off] = attr[i];
- } else
- off++;
- }
- attr[i - off] = NULL;
+ if (stat_config.aggr_mode == AGGR_THREAD) {
+ int nr = perf_thread_map__nr(evsel_list->core.threads);
- *str = malloc(len + 1 + 2);
- if (!*str)
- return -1;
- s = *str;
- if (i - off == 0) {
- *s = 0;
+ stat_config.aggr_map = cpu_aggr_map__empty_new(nr);
+ if (stat_config.aggr_map == NULL)
+ return -ENOMEM;
+
+ for (int s = 0; s < nr; s++) {
+ struct aggr_cpu_id id = aggr_cpu_id__empty();
+
+ id.thread_idx = s;
+ stat_config.aggr_map->map[s] = id;
+ }
return 0;
}
- if (use_group)
- *s++ = '{';
- for (i = 0; attr[i]; i++) {
- strcpy(s, attr[i]);
- s += strlen(s);
- *s++ = ',';
- }
- if (use_group) {
- s[-1] = '}';
- *s = 0;
- } else
- s[-1] = 0;
- return 0;
-}
-__weak bool arch_topdown_check_group(bool *warn)
-{
- *warn = false;
- return false;
-}
+ if (!get_id)
+ return 0;
-__weak void arch_topdown_group_warn(void)
-{
+ stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
+ get_id, env, needs_sort);
+ if (!stat_config.aggr_map) {
+ pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
+ return -1;
+ }
+ stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode);
+ return 0;
}
/*
@@ -1310,7 +1656,6 @@ __weak void arch_topdown_group_warn(void)
*/
static int add_default_attributes(void)
{
- int err;
struct perf_event_attr default_attrs0[] = {
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
@@ -1423,47 +1768,37 @@ static int add_default_attributes(void)
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
- struct parse_events_error errinfo;
+
+ struct perf_event_attr default_null_attrs[] = {};
/* Set attrs if no event is selected and !null_run: */
if (stat_config.null_run)
return 0;
- bzero(&errinfo, sizeof(errinfo));
if (transaction_run) {
/* Handle -T as -M transaction. Once platform specific metrics
- * support has been added to the json files, all archictures
+ * support has been added to the json files, all architectures
* will use this approach. To determine transaction support
* on an architecture test for such a metric name.
*/
- if (metricgroup__has_metric("transaction")) {
- struct option opt = { .value = &evsel_list };
-
- return metricgroup__parse_groups(&opt, "transaction",
- &stat_config.metric_events);
- }
-
- if (pmu_have_event("cpu", "cycles-ct") &&
- pmu_have_event("cpu", "el-start"))
- err = parse_events(evsel_list, transaction_attrs,
- &errinfo);
- else
- err = parse_events(evsel_list,
- transaction_limited_attrs,
- &errinfo);
- if (err) {
- fprintf(stderr, "Cannot set up transaction events\n");
- parse_events_print_error(&errinfo, transaction_attrs);
+ if (!metricgroup__has_metric("transaction")) {
+ pr_err("Missing transaction metrics");
return -1;
}
- return 0;
+ return metricgroup__parse_groups(evsel_list, "transaction",
+ stat_config.metric_no_group,
+ stat_config.metric_no_merge,
+ stat_config.metric_no_threshold,
+ stat_config.user_requested_cpu_list,
+ stat_config.system_wide,
+ &stat_config.metric_events);
}
if (smi_cost) {
int smi;
if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
- fprintf(stderr, "freeze_on_smi is not supported.\n");
+ pr_err("freeze_on_smi is not supported.");
return -1;
}
@@ -1475,83 +1810,98 @@ static int add_default_attributes(void)
smi_reset = true;
}
- if (pmu_have_event("msr", "aperf") &&
- pmu_have_event("msr", "smi")) {
- if (!force_metric_only)
- stat_config.metric_only = true;
- err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
- } else {
- fprintf(stderr, "To measure SMI cost, it needs "
- "msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
- parse_events_print_error(&errinfo, smi_cost_attrs);
+ if (!metricgroup__has_metric("smi")) {
+ pr_err("Missing smi metrics");
return -1;
}
- if (err) {
- parse_events_print_error(&errinfo, smi_cost_attrs);
- fprintf(stderr, "Cannot set up SMI cost events\n");
- return -1;
- }
- return 0;
+
+ if (!force_metric_only)
+ stat_config.metric_only = true;
+
+ return metricgroup__parse_groups(evsel_list, "smi",
+ stat_config.metric_no_group,
+ stat_config.metric_no_merge,
+ stat_config.metric_no_threshold,
+ stat_config.user_requested_cpu_list,
+ stat_config.system_wide,
+ &stat_config.metric_events);
}
if (topdown_run) {
- char *str = NULL;
- bool warn = false;
-
- if (stat_config.aggr_mode != AGGR_GLOBAL &&
- stat_config.aggr_mode != AGGR_CORE) {
- pr_err("top down event configuration requires --per-core mode\n");
- return -1;
- }
- stat_config.aggr_mode = AGGR_CORE;
- if (nr_cgroups || !target__has_cpu(&target)) {
- pr_err("top down event configuration requires system-wide mode (-a)\n");
- return -1;
- }
+ unsigned int max_level = metricgroups__topdown_max_level();
+ char str[] = "TopdownL1";
if (!force_metric_only)
stat_config.metric_only = true;
- if (topdown_filter_events(topdown_attrs, &str,
- arch_topdown_check_group(&warn)) < 0) {
- pr_err("Out of memory\n");
+
+ if (!max_level) {
+ pr_err("Topdown requested but the topdown metric groups aren't present.\n"
+ "(See perf list the metric groups have names like TopdownL1)");
return -1;
}
- if (topdown_attrs[0] && str) {
- if (warn)
- arch_topdown_group_warn();
- err = parse_events(evsel_list, str, &errinfo);
- if (err) {
- fprintf(stderr,
- "Cannot set up top down events %s: %d\n",
- str, err);
- parse_events_print_error(&errinfo, str);
- free(str);
- return -1;
- }
- } else {
- fprintf(stderr, "System does not support topdown\n");
+ if (stat_config.topdown_level > max_level) {
+ pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level);
return -1;
+ } else if (!stat_config.topdown_level)
+ stat_config.topdown_level = 1;
+
+ if (!stat_config.interval && !stat_config.metric_only) {
+ fprintf(stat_config.output,
+ "Topdown accuracy may decrease when measuring long periods.\n"
+ "Please print the result regularly, e.g. -I1000\n");
}
- free(str);
+ str[8] = stat_config.topdown_level + '0';
+ if (metricgroup__parse_groups(evsel_list, str,
+ /*metric_no_group=*/false,
+ /*metric_no_merge=*/false,
+ /*metric_no_threshold=*/true,
+ stat_config.user_requested_cpu_list,
+ stat_config.system_wide,
+ &stat_config.metric_events) < 0)
+ return -1;
}
+ if (!stat_config.topdown_level)
+ stat_config.topdown_level = 1;
+
if (!evsel_list->core.nr_entries) {
+ /* No events so add defaults. */
if (target__has_cpu(&target))
default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
- if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
+ if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
return -1;
if (pmu_have_event("cpu", "stalled-cycles-frontend")) {
- if (perf_evlist__add_default_attrs(evsel_list,
- frontend_attrs) < 0)
+ if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
return -1;
}
if (pmu_have_event("cpu", "stalled-cycles-backend")) {
- if (perf_evlist__add_default_attrs(evsel_list,
- backend_attrs) < 0)
+ if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
return -1;
}
- if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
+ if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
+ return -1;
+ /*
+ * Add TopdownL1 metrics if they exist. To minimize
+ * multiplexing, don't request threshold computation.
+ */
+ /*
+ * TODO: TopdownL1 is disabled on hybrid CPUs to avoid a crashes
+ * caused by exposing latent bugs. This is fixed properly in:
+ * https://lore.kernel.org/lkml/bff481ba-e60a-763f-0aa0-3ee53302c480@linux.intel.com/
+ */
+ if (metricgroup__has_metric("TopdownL1") && !perf_pmu__has_hybrid() &&
+ metricgroup__parse_groups(evsel_list, "TopdownL1",
+ /*metric_no_group=*/false,
+ /*metric_no_merge=*/false,
+ /*metric_no_threshold=*/true,
+ stat_config.user_requested_cpu_list,
+ stat_config.system_wide,
+ &stat_config.metric_events) < 0)
+ return -1;
+
+ /* Platform specific attrs */
+ if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
return -1;
}
@@ -1561,21 +1911,21 @@ static int add_default_attributes(void)
return 0;
/* Append detailed run extra attributes: */
- if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
+ if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
return -1;
if (detailed_run < 2)
return 0;
/* Append very detailed run extra attributes: */
- if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
+ if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
return -1;
if (detailed_run < 3)
return 0;
/* Append very, very detailed run extra attributes: */
- return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
+ return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
}
static const char * const stat_record_usage[] = {
@@ -1613,7 +1963,7 @@ static int __cmd_record(int argc, const char **argv)
return -1;
}
- session = perf_session__new(data, false, NULL);
+ session = perf_session__new(data, NULL);
if (IS_ERR(session)) {
pr_err("Perf session creation failed\n");
return PTR_ERR(session);
@@ -1631,13 +1981,11 @@ static int process_stat_round_event(struct perf_session *session,
union perf_event *event)
{
struct perf_record_stat_round *stat_round = &event->stat_round;
- struct evsel *counter;
struct timespec tsh, *ts = NULL;
const char **argv = session->header.env.cmdline_argv;
int argc = session->header.env.nr_cmdline;
- evlist__for_each_entry(evsel_list, counter)
- perf_stat_process_counter(&stat_config, counter);
+ process_counters();
if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
update_stats(&walltime_nsecs_stats, stat_round->time);
@@ -1664,17 +2012,23 @@ int process_stat_config_event(struct perf_session *session,
if (perf_cpu_map__empty(st->cpus)) {
if (st->aggr_mode != AGGR_UNSET)
pr_warning("warning: processing task data, aggregation mode not set\n");
- return 0;
- }
-
- if (st->aggr_mode != AGGR_UNSET)
+ } else if (st->aggr_mode != AGGR_UNSET) {
stat_config.aggr_mode = st->aggr_mode;
+ }
if (perf_stat.data.is_pipe)
perf_stat_init_aggr_mode();
else
perf_stat_init_aggr_mode_file(st);
+ if (stat_config.aggr_map) {
+ int nr_aggr = stat_config.aggr_map->nr;
+
+ if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) {
+ pr_err("cannot allocate aggr counts\n");
+ return -1;
+ }
+ }
return 0;
}
@@ -1688,7 +2042,7 @@ static int set_maps(struct perf_stat *st)
perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
- if (perf_evlist__alloc_stats(evsel_list, true))
+ if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true))
return -ENOMEM;
st->maps_allocated = true;
@@ -1735,35 +2089,6 @@ int process_cpu_map_event(struct perf_session *session,
return set_maps(st);
}
-static int runtime_stat_new(struct perf_stat_config *config, int nthreads)
-{
- int i;
-
- config->stats = calloc(nthreads, sizeof(struct runtime_stat));
- if (!config->stats)
- return -1;
-
- config->stats_num = nthreads;
-
- for (i = 0; i < nthreads; i++)
- runtime_stat__init(&config->stats[i]);
-
- return 0;
-}
-
-static void runtime_stat_delete(struct perf_stat_config *config)
-{
- int i;
-
- if (!config->stats)
- return;
-
- for (i = 0; i < config->stats_num; i++)
- runtime_stat__exit(&config->stats[i]);
-
- zfree(&config->stats);
-}
-
static const char * const stat_report_usage[] = {
"perf stat report [<options>]",
NULL,
@@ -1814,7 +2139,7 @@ static int __cmd_report(int argc, const char **argv)
perf_stat.data.path = input_name;
perf_stat.data.mode = PERF_DATA_MODE_READ;
- session = perf_session__new(&perf_stat.data, false, &perf_stat.tool);
+ session = perf_session__new(&perf_stat.data, &perf_stat.tool);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -1850,8 +2175,10 @@ static void setup_system_wide(int forks)
struct evsel *counter;
evlist__for_each_entry(evsel_list, counter) {
- if (!counter->core.system_wide)
+ if (!counter->core.requires_cpu &&
+ !evsel__name_is(counter, "duration_time")) {
return;
+ }
}
if (evsel_list->core.nr_entries)
@@ -1865,11 +2192,12 @@ int cmd_stat(int argc, const char **argv)
"perf stat [<options>] [<command>]",
NULL
};
- int status = -EINVAL, run_idx;
+ int status = -EINVAL, run_idx, err;
const char *mode;
FILE *output = stderr;
unsigned int interval, timeout;
const char * const stat_subcommands[] = { "record", "report" };
+ char errbuf[BUFSIZ];
setlocale(LC_ALL, "");
@@ -1887,8 +2215,6 @@ int cmd_stat(int argc, const char **argv)
argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
(const char **) stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- perf_stat__collect_metric_expr(evsel_list);
- perf_stat__init_shadow_stats();
if (stat_config.csv_sep) {
stat_config.csv_output = true;
@@ -1897,11 +2223,11 @@ int cmd_stat(int argc, const char **argv)
} else
stat_config.csv_sep = DEFAULT_SEPARATOR;
- if (argc && !strncmp(argv[0], "rec", 3)) {
+ if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
argc = __cmd_record(argc, argv);
if (argc < 0)
return -1;
- } else if (argc && !strncmp(argv[0], "rep", 3))
+ } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0]))
return __cmd_report(argc, argv);
interval = stat_config.interval;
@@ -1943,7 +2269,7 @@ int cmd_stat(int argc, const char **argv)
goto out;
}
- if (!output) {
+ if (!output && !quiet) {
struct timespec tm;
mode = append_file ? "a" : "w";
@@ -1952,8 +2278,10 @@ int cmd_stat(int argc, const char **argv)
perror("failed to create output file");
return -1;
}
- clock_gettime(CLOCK_REALTIME, &tm);
- fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
+ if (!stat_config.json_output) {
+ clock_gettime(CLOCK_REALTIME, &tm);
+ fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
+ }
} else if (output_fd > 0) {
mode = append_file ? "a" : "w";
output = fdopen(output_fd, mode);
@@ -1963,6 +2291,14 @@ int cmd_stat(int argc, const char **argv)
}
}
+ if (stat_config.interval_clear && !isatty(fileno(output))) {
+ fprintf(stderr, "--interval-clear does not work with output\n");
+ parse_options_usage(stat_usage, stat_options, "o", 1);
+ parse_options_usage(NULL, stat_options, "log-fd", 0);
+ parse_options_usage(NULL, stat_options, "interval-clear", 0);
+ return -1;
+ }
+
stat_config.output = output;
/*
@@ -1980,6 +2316,12 @@ int cmd_stat(int argc, const char **argv)
} else if (big_num_opt == 0) /* User passed --no-big-num */
stat_config.big_num = false;
+ err = target__validate(&target);
+ if (err) {
+ target__strerror(&target, err, errbuf, BUFSIZ);
+ pr_warning("%s\n", errbuf);
+ }
+
setup_system_wide(argc);
/*
@@ -2023,7 +2365,8 @@ int cmd_stat(int argc, const char **argv)
* --per-thread is aggregated per thread, we dont mix it with cpu mode
*/
if (((stat_config.aggr_mode != AGGR_GLOBAL &&
- stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
+ stat_config.aggr_mode != AGGR_THREAD) ||
+ (nr_cgroups || stat_config.cgroup_list)) &&
!target__has_cpu(&target)) {
fprintf(stderr, "both cgroup and no-aggregation "
"modes only available in system-wide mode\n");
@@ -2031,18 +2374,76 @@ int cmd_stat(int argc, const char **argv)
parse_options_usage(stat_usage, stat_options, "G", 1);
parse_options_usage(NULL, stat_options, "A", 1);
parse_options_usage(NULL, stat_options, "a", 1);
+ parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
goto out;
}
+ if (stat_config.iostat_run) {
+ status = iostat_prepare(evsel_list, &stat_config);
+ if (status)
+ goto out;
+ if (iostat_mode == IOSTAT_LIST) {
+ iostat_list(evsel_list, &stat_config);
+ goto out;
+ } else if (verbose > 0)
+ iostat_list(evsel_list, &stat_config);
+ if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
+ target.system_wide = true;
+ }
+
+ if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
+ target.per_thread = true;
+
+ stat_config.system_wide = target.system_wide;
+ if (target.cpu_list) {
+ stat_config.user_requested_cpu_list = strdup(target.cpu_list);
+ if (!stat_config.user_requested_cpu_list) {
+ status = -ENOMEM;
+ goto out;
+ }
+ }
+
+ /*
+ * Metric parsing needs to be delayed as metrics may optimize events
+ * knowing the target is system-wide.
+ */
+ if (metrics) {
+ metricgroup__parse_groups(evsel_list, metrics,
+ stat_config.metric_no_group,
+ stat_config.metric_no_merge,
+ stat_config.metric_no_threshold,
+ stat_config.user_requested_cpu_list,
+ stat_config.system_wide,
+ &stat_config.metric_events);
+ zfree(&metrics);
+ }
+
if (add_default_attributes())
goto out;
- target__validate(&target);
+ if (stat_config.cgroup_list) {
+ if (nr_cgroups > 0) {
+ pr_err("--cgroup and --for-each-cgroup cannot be used together\n");
+ parse_options_usage(stat_usage, stat_options, "G", 1);
+ parse_options_usage(NULL, stat_options, "for-each-cgroup", 0);
+ goto out;
+ }
- if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide))
- target.per_thread = true;
+ if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
+ &stat_config.metric_events, true) < 0) {
+ parse_options_usage(stat_usage, stat_options,
+ "for-each-cgroup", 0);
+ goto out;
+ }
+ }
+
+ if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) {
+ pr_err("failed to use cpu list %s\n", target.cpu_list);
+ goto out;
+ }
- if (perf_evlist__create_maps(evsel_list, &target) < 0) {
+ target.hybrid = perf_pmu__has_hybrid();
+ if (evlist__create_maps(evsel_list, &target) < 0) {
if (target__has_task(&target)) {
pr_err("Problems finding threads of monitor\n");
parse_options_usage(stat_usage, stat_options, "p", 1);
@@ -2055,18 +2456,14 @@ int cmd_stat(int argc, const char **argv)
goto out;
}
+ evlist__check_cpu_maps(evsel_list);
+
/*
* Initialize thread_map with comm names,
* so we could print it out on output.
*/
if (stat_config.aggr_mode == AGGR_THREAD) {
thread_map__read_comms(evsel_list->core.threads);
- if (target.system_wide) {
- if (runtime_stat_new(&stat_config,
- perf_thread_map__nr(evsel_list->core.threads))) {
- goto out;
- }
- }
}
if (stat_config.aggr_mode == AGGR_NODE)
@@ -2099,10 +2496,10 @@ int cmd_stat(int argc, const char **argv)
goto out;
}
- if (perf_evlist__alloc_stats(evsel_list, interval))
+ if (perf_stat_init_aggr_mode())
goto out;
- if (perf_stat_init_aggr_mode())
+ if (evlist__alloc_stats(&stat_config, evsel_list, interval))
goto out;
/*
@@ -2129,6 +2526,11 @@ int cmd_stat(int argc, const char **argv)
signal(SIGALRM, skip_signal);
signal(SIGABRT, skip_signal);
+ if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
+ goto out;
+
+ /* Enable ignoring missing threads when -p option is defined. */
+ evlist__first(evsel_list)->ignore_missing_thread = target.pid;
status = 0;
for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) {
if (stat_config.run_count != 1 && verbose > 0)
@@ -2136,7 +2538,7 @@ int cmd_stat(int argc, const char **argv)
run_idx + 1);
if (run_idx != 0)
- perf_evlist__reset_prev_raw_counts(evsel_list);
+ evlist__reset_prev_raw_counts(evsel_list);
status = run_perf_stat(argc, argv, run_idx);
if (forever && status != -1 && !interval) {
@@ -2145,14 +2547,16 @@ int cmd_stat(int argc, const char **argv)
}
}
- if (!forever && status != -1 && !interval)
+ if (!forever && status != -1 && (!interval || stat_config.summary))
print_counters(NULL, argc, argv);
+ evlist__finalize_ctlfd(evsel_list);
+
if (STAT_RECORD) {
/*
* We synthesize the kernel mmap record just so that older tools
* don't emit warnings about not being able to resolve symbols
- * due to /proc/sys/kernel/kptr_restrict settings and instear provide
+ * due to /proc/sys/kernel/kptr_restrict settings and instead provide
* a saner message about no samples being in the perf.data file.
*
* This also serves to suppress a warning about f_header.data.size == 0
@@ -2162,9 +2566,10 @@ int cmd_stat(int argc, const char **argv)
* tools remain -acme
*/
int fd = perf_data__fd(&perf_stat.data);
- int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
- process_synthesized_event,
- &perf_stat.session->machines.host);
+
+ err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
+ process_synthesized_event,
+ &perf_stat.session->machines.host);
if (err) {
pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
"older tools may produce warnings about this file\n.");
@@ -2185,16 +2590,21 @@ int cmd_stat(int argc, const char **argv)
}
perf_stat__exit_aggr_mode();
- perf_evlist__free_stats(evsel_list);
+ evlist__free_stats(evsel_list);
out:
+ if (stat_config.iostat_run)
+ iostat_release(evsel_list);
+
zfree(&stat_config.walltime_run);
+ zfree(&stat_config.user_requested_cpu_list);
if (smi_cost && smi_reset)
sysfs__write_int(FREEZE_ON_SMI_PATH, 0);
evlist__delete(evsel_list);
- runtime_stat_delete(&stat_config);
+ metricgroup__rblist_exit(&stat_config.metric_events);
+ evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close);
return status;
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 9e84fae9b096..bce1cf896f9c 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -24,7 +24,6 @@
#include "util/thread.h"
#include "util/callchain.h"
-#include "perf.h"
#include "util/header.h"
#include <subcmd/pager.h>
#include <subcmd/parse-options.h>
@@ -35,7 +34,11 @@
#include "util/tool.h"
#include "util/data.h"
#include "util/debug.h"
+#include "util/string2.h"
+#include "util/tracepoint.h"
+#include "util/util.h"
#include <linux/err.h>
+#include <traceevent/event-parse.h>
#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
FILE *open_memstream(char **ptr, size_t *sizeloc);
@@ -128,7 +131,7 @@ struct sample_wrapper {
struct sample_wrapper *next;
u64 timestamp;
- unsigned char data[0];
+ unsigned char data[];
};
#define TYPE_NONE 0
@@ -213,6 +216,19 @@ static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
return cursor;
}
+static struct per_pidcomm *create_pidcomm(struct per_pid *p)
+{
+ struct per_pidcomm *c;
+
+ c = zalloc(sizeof(*c));
+ if (!c)
+ return NULL;
+ p->current = c;
+ c->next = p->all;
+ p->all = c;
+ return c;
+}
+
static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
{
struct per_pid *p;
@@ -231,12 +247,9 @@ static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
}
c = c->next;
}
- c = zalloc(sizeof(*c));
+ c = create_pidcomm(p);
assert(c != NULL);
c->comm = strdup(comm);
- p->current = c;
- c->next = p->all;
- p->all = c;
}
static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
@@ -275,11 +288,8 @@ static void pid_put_sample(struct timechart *tchart, int pid, int type,
p = find_create_pid(tchart, pid);
c = p->current;
if (!c) {
- c = zalloc(sizeof(*c));
+ c = create_pidcomm(p);
assert(c != NULL);
- p->current = c;
- c->next = p->all;
- p->all = c;
}
sample = zalloc(sizeof(*sample));
@@ -367,16 +377,13 @@ static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
tchart->power_events = pwr;
}
-static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
+static struct power_event *p_state_end(struct timechart *tchart, int cpu,
+ u64 timestamp)
{
- struct power_event *pwr;
-
- if (new_freq > 8000000) /* detect invalid data */
- return;
+ struct power_event *pwr = zalloc(sizeof(*pwr));
- pwr = zalloc(sizeof(*pwr));
if (!pwr)
- return;
+ return NULL;
pwr->state = cpus_pstate_state[cpu];
pwr->start_time = cpus_pstate_start_times[cpu];
@@ -384,11 +391,23 @@ static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64
pwr->cpu = cpu;
pwr->type = PSTATE;
pwr->next = tchart->power_events;
-
if (!pwr->start_time)
pwr->start_time = tchart->first_time;
tchart->power_events = pwr;
+ return pwr;
+}
+
+static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
+{
+ struct power_event *pwr;
+
+ if (new_freq > 8000000) /* detect invalid data */
+ return;
+
+ pwr = p_state_end(tchart, cpu, timestamp);
+ if (!pwr)
+ return;
cpus_pstate_state[cpu] = new_freq;
cpus_pstate_start_times[cpu] = timestamp;
@@ -579,8 +598,8 @@ process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
- u32 state = perf_evsel__intval(evsel, sample, "state");
- u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
+ u32 state = evsel__intval(evsel, sample, "state");
+ u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
if (state == (u32)PWR_EVENT_EXIT)
c_state_end(tchart, cpu_id, sample->time);
@@ -595,8 +614,8 @@ process_sample_cpu_frequency(struct timechart *tchart,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
- u32 state = perf_evsel__intval(evsel, sample, "state");
- u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
+ u32 state = evsel__intval(evsel, sample, "state");
+ u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
p_state_change(tchart, cpu_id, sample->time, state);
return 0;
@@ -608,9 +627,9 @@ process_sample_sched_wakeup(struct timechart *tchart,
struct perf_sample *sample,
const char *backtrace)
{
- u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
- int waker = perf_evsel__intval(evsel, sample, "common_pid");
- int wakee = perf_evsel__intval(evsel, sample, "pid");
+ u8 flags = evsel__intval(evsel, sample, "common_flags");
+ int waker = evsel__intval(evsel, sample, "common_pid");
+ int wakee = evsel__intval(evsel, sample, "pid");
sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
return 0;
@@ -622,9 +641,9 @@ process_sample_sched_switch(struct timechart *tchart,
struct perf_sample *sample,
const char *backtrace)
{
- int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
- int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
+ int prev_pid = evsel__intval(evsel, sample, "prev_pid");
+ int next_pid = evsel__intval(evsel, sample, "next_pid");
+ u64 prev_state = evsel__intval(evsel, sample, "prev_state");
sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
prev_state, backtrace);
@@ -638,8 +657,8 @@ process_sample_power_start(struct timechart *tchart __maybe_unused,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
- u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
- u64 value = perf_evsel__intval(evsel, sample, "value");
+ u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
+ u64 value = evsel__intval(evsel, sample, "value");
c_state_start(cpu_id, sample->time, value);
return 0;
@@ -661,8 +680,8 @@ process_sample_power_frequency(struct timechart *tchart,
struct perf_sample *sample,
const char *backtrace __maybe_unused)
{
- u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
- u64 value = perf_evsel__intval(evsel, sample, "value");
+ u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
+ u64 value = evsel__intval(evsel, sample, "value");
p_state_change(tchart, cpu_id, sample->time, value);
return 0;
@@ -696,22 +715,12 @@ static void end_sample_processing(struct timechart *tchart)
#endif
/* P state */
- pwr = zalloc(sizeof(*pwr));
+ pwr = p_state_end(tchart, cpu, tchart->last_time);
if (!pwr)
return;
- pwr->state = cpus_pstate_state[cpu];
- pwr->start_time = cpus_pstate_start_times[cpu];
- pwr->end_time = tchart->last_time;
- pwr->cpu = cpu;
- pwr->type = PSTATE;
- pwr->next = tchart->power_events;
-
- if (!pwr->start_time)
- pwr->start_time = tchart->first_time;
if (!pwr->state)
pwr->state = tchart->min_freq;
- tchart->power_events = pwr;
}
}
@@ -724,12 +733,9 @@ static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
struct io_sample *prev;
if (!c) {
- c = zalloc(sizeof(*c));
+ c = create_pidcomm(p);
if (!c)
return -ENOMEM;
- p->current = c;
- c->next = p->all;
- p->all = c;
}
prev = c->io_samples;
@@ -843,7 +849,7 @@ process_enter_read(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long fd = perf_evsel__intval(evsel, sample, "fd");
+ long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
sample->time, fd);
}
@@ -853,7 +859,7 @@ process_exit_read(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long ret = perf_evsel__intval(evsel, sample, "ret");
+ long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
sample->time, ret);
}
@@ -863,7 +869,7 @@ process_enter_write(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long fd = perf_evsel__intval(evsel, sample, "fd");
+ long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
sample->time, fd);
}
@@ -873,7 +879,7 @@ process_exit_write(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long ret = perf_evsel__intval(evsel, sample, "ret");
+ long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
sample->time, ret);
}
@@ -883,7 +889,7 @@ process_enter_sync(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long fd = perf_evsel__intval(evsel, sample, "fd");
+ long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
sample->time, fd);
}
@@ -893,7 +899,7 @@ process_exit_sync(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long ret = perf_evsel__intval(evsel, sample, "ret");
+ long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
sample->time, ret);
}
@@ -903,7 +909,7 @@ process_enter_tx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long fd = perf_evsel__intval(evsel, sample, "fd");
+ long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
sample->time, fd);
}
@@ -913,7 +919,7 @@ process_exit_tx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long ret = perf_evsel__intval(evsel, sample, "ret");
+ long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
sample->time, ret);
}
@@ -923,7 +929,7 @@ process_enter_rx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long fd = perf_evsel__intval(evsel, sample, "fd");
+ long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
sample->time, fd);
}
@@ -933,7 +939,7 @@ process_exit_rx(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long ret = perf_evsel__intval(evsel, sample, "ret");
+ long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
sample->time, ret);
}
@@ -943,7 +949,7 @@ process_enter_poll(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long fd = perf_evsel__intval(evsel, sample, "fd");
+ long fd = evsel__intval(evsel, sample, "fd");
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
sample->time, fd);
}
@@ -953,7 +959,7 @@ process_exit_poll(struct timechart *tchart,
struct evsel *evsel,
struct perf_sample *sample)
{
- long ret = perf_evsel__intval(evsel, sample, "ret");
+ long ret = evsel__intval(evsel, sample, "ret");
return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
sample->time, ret);
}
@@ -1598,8 +1604,7 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
.force = tchart->force,
};
- struct perf_session *session = perf_session__new(&data, false,
- &tchart->tool);
+ struct perf_session *session = perf_session__new(&data, &tchart->tool);
int ret = -EINVAL;
if (IS_ERR(session))
@@ -1984,7 +1989,7 @@ int cmd_timechart(int argc, const char **argv)
return -1;
}
- if (argc && !strncmp(argv[0], "rec", 3)) {
+ if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
argc = parse_options(argc, argv, timechart_record_options,
timechart_record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 289cf83e658a..eb5740154bc0 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -22,6 +22,7 @@
#include "util/annotate.h"
#include "util/bpf-event.h"
+#include "util/cgroup.h"
#include "util/config.h"
#include "util/color.h"
#include "util/dso.h"
@@ -33,6 +34,7 @@
#include "util/map.h"
#include "util/mmap.h"
#include "util/session.h"
+#include "util/thread.h"
#include "util/symbol.h"
#include "util/synthetic-events.h"
#include "util/top.h"
@@ -52,6 +54,7 @@
#include "util/debug.h"
#include "util/ordered-events.h"
+#include "util/pfm.h"
#include <assert.h>
#include <elf.h>
@@ -84,8 +87,8 @@
#include <linux/ctype.h>
#include <perf/mmap.h>
-static volatile int done;
-static volatile int resize;
+static volatile sig_atomic_t done;
+static volatile sig_atomic_t resize;
#define HEADER_LINE_NR 5
@@ -111,6 +114,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
struct symbol *sym;
struct annotation *notes;
struct map *map;
+ struct dso *dso;
int err = -1;
if (!he || !he->ms.sym)
@@ -120,12 +124,12 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
sym = he->ms.sym;
map = he->ms.map;
+ dso = map__dso(map);
/*
* We can't annotate with just /proc/kallsyms
*/
- if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
- !dso__is_kcore(map->dso)) {
+ if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
pr_err("Can't annotate %s: No vmlinux file was found in the "
"path\n", sym->name);
sleep(1);
@@ -133,10 +137,10 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
}
notes = symbol__annotation(sym);
- pthread_mutex_lock(&notes->lock);
+ mutex_lock(&notes->lock);
if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
- pthread_mutex_unlock(&notes->lock);
+ mutex_unlock(&notes->lock);
pr_err("Not enough memory for annotating '%s' symbol!\n",
sym->name);
sleep(1);
@@ -152,7 +156,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
}
- pthread_mutex_unlock(&notes->lock);
+ mutex_unlock(&notes->lock);
return err;
}
@@ -166,6 +170,7 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
{
struct utsname uts;
int err = uname(&uts);
+ struct dso *dso = map__dso(map);
ui__warning("Out of bounds address found:\n\n"
"Addr: %" PRIx64 "\n"
@@ -177,8 +182,8 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
"Tools: %s\n\n"
"Not all samples will be on the annotation output.\n\n"
"Please report to linux-kernel@vger.kernel.org\n",
- ip, map->dso->long_name, dso__symtab_origin(map->dso),
- map->start, map->end, sym->start, sym->end,
+ ip, dso->long_name, dso__symtab_origin(dso),
+ map__start(map), map__end(map), sym->start, sym->end,
sym->binding == STB_GLOBAL ? 'g' :
sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
err ? "[unknown]" : uts.machine,
@@ -186,13 +191,14 @@ static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
if (use_browser <= 0)
sleep(5);
- map->erange_warned = true;
+ map__set_erange_warned(map, true);
}
static void perf_top__record_precise_ip(struct perf_top *top,
struct hist_entry *he,
struct perf_sample *sample,
struct evsel *evsel, u64 ip)
+ EXCLUSIVE_LOCKS_REQUIRED(he->hists->lock)
{
struct annotation *notes;
struct symbol *sym = he->ms.sym;
@@ -205,21 +211,21 @@ static void perf_top__record_precise_ip(struct perf_top *top,
notes = symbol__annotation(sym);
- if (pthread_mutex_trylock(&notes->lock))
+ if (!mutex_trylock(&notes->lock))
return;
err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
- pthread_mutex_unlock(&notes->lock);
+ mutex_unlock(&notes->lock);
if (unlikely(err)) {
/*
* This function is now called with he->hists->lock held.
* Release it before going to sleep.
*/
- pthread_mutex_unlock(&he->hists->lock);
+ mutex_unlock(&he->hists->lock);
- if (err == -ERANGE && !he->ms.map->erange_warned)
+ if (err == -ERANGE && !map__erange_warned(he->ms.map))
ui__warn_map_erange(he->ms.map, sym, ip);
else if (err == -ENOMEM) {
pr_err("Not enough memory for annotating '%s' symbol!\n",
@@ -227,7 +233,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
sleep(1);
}
- pthread_mutex_lock(&he->hists->lock);
+ mutex_lock(&he->hists->lock);
}
}
@@ -247,28 +253,28 @@ static void perf_top__show_details(struct perf_top *top)
symbol = he->ms.sym;
notes = symbol__annotation(symbol);
- pthread_mutex_lock(&notes->lock);
+ mutex_lock(&notes->lock);
symbol__calc_percent(symbol, evsel);
if (notes->src == NULL)
goto out_unlock;
- printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
+ printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
if (top->evlist->enabled) {
if (top->zero)
- symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
+ symbol__annotate_zero_histogram(symbol, top->sym_evsel->core.idx);
else
- symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
+ symbol__annotate_decay_histogram(symbol, top->sym_evsel->core.idx);
}
if (more != 0)
printf("%d lines not displayed, maybe increase display entries [e]\n", more);
out_unlock:
- pthread_mutex_unlock(&notes->lock);
+ mutex_unlock(&notes->lock);
}
static void perf_top__resort_hists(struct perf_top *t)
@@ -297,9 +303,8 @@ static void perf_top__resort_hists(struct perf_top *t)
hists__collapse_resort(hists, NULL);
/* Non-group events are considered as leader */
- if (symbol_conf.event_group &&
- !perf_evsel__is_group_leader(pos)) {
- struct hists *leader_hists = evsel__hists(pos->leader);
+ if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
+ struct hists *leader_hists = evsel__hists(evsel__leader(pos));
hists__match(leader_hists, hists);
hists__link(leader_hists, hists);
@@ -307,7 +312,7 @@ static void perf_top__resort_hists(struct perf_top *t)
}
evlist__for_each_entry(evlist, pos) {
- perf_evsel__output_resort(pos, NULL);
+ evsel__output_resort(pos, NULL);
}
}
@@ -327,13 +332,13 @@ static void perf_top__print_sym_table(struct perf_top *top)
printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
if (!top->record_opts.overwrite &&
- (hists->stats.nr_lost_warned !=
- hists->stats.nr_events[PERF_RECORD_LOST])) {
- hists->stats.nr_lost_warned =
- hists->stats.nr_events[PERF_RECORD_LOST];
+ (top->evlist->stats.nr_lost_warned !=
+ top->evlist->stats.nr_events[PERF_RECORD_LOST])) {
+ top->evlist->stats.nr_lost_warned =
+ top->evlist->stats.nr_events[PERF_RECORD_LOST];
color_fprintf(stdout, PERF_COLOR_RED,
"WARNING: LOST %d chunks, Check IO/CPU overload",
- hists->stats.nr_lost_warned);
+ top->evlist->stats.nr_lost_warned);
++printed;
}
@@ -441,7 +446,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top)
fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
if (top->evlist->core.nr_entries > 1)
- fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
+ fprintf(stdout, "\t[E] active event counter. \t(%s)\n", evsel__name(top->sym_evsel));
fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
@@ -528,18 +533,18 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
fprintf(stderr, "\nAvailable events:");
evlist__for_each_entry(top->evlist, top->sym_evsel)
- fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
+ fprintf(stderr, "\n\t%d %s", top->sym_evsel->core.idx, evsel__name(top->sym_evsel));
prompt_integer(&counter, "Enter details event counter");
if (counter >= top->evlist->core.nr_entries) {
top->sym_evsel = evlist__first(top->evlist);
- fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
+ fprintf(stderr, "Sorry, no such event, using %s.\n", evsel__name(top->sym_evsel));
sleep(1);
break;
}
evlist__for_each_entry(top->evlist, top->sym_evsel)
- if (top->sym_evsel->idx == counter)
+ if (top->sym_evsel->core.idx == counter)
break;
} else
top->sym_evsel = evlist__first(top->evlist);
@@ -640,12 +645,9 @@ repeat:
hists->uid_filter_str = top->record_opts.target.uid_str;
}
- ret = perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
- top->min_percent,
- &top->session->header.env,
- !top->record_opts.overwrite,
- &top->annotation_opts);
-
+ ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
+ &top->session->header.env, !top->record_opts.overwrite,
+ &top->annotation_opts);
if (ret == K_RELOAD) {
top->zero = true;
goto repeat;
@@ -707,7 +709,7 @@ repeat:
case -1:
if (errno == EINTR)
continue;
- __fallthrough;
+ fallthrough;
default:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
@@ -725,13 +727,13 @@ repeat:
static int hist_iter__top_callback(struct hist_entry_iter *iter,
struct addr_location *al, bool single,
void *arg)
+ EXCLUSIVE_LOCKS_REQUIRED(iter->he->hists->lock)
{
struct perf_top *top = arg;
- struct hist_entry *he = iter->he;
struct evsel *evsel = iter->evsel;
if (perf_hpp_list.sym && single)
- perf_top__record_precise_ip(top, he, iter->sample, evsel, al->addr);
+ perf_top__record_precise_ip(top, iter->he, iter->sample, evsel, al->addr);
hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
!(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
@@ -747,7 +749,6 @@ static void perf_event__process_sample(struct perf_tool *tool,
{
struct perf_top *top = container_of(tool, struct perf_top, tool);
struct addr_location al;
- int err;
if (!machine && perf_guest) {
static struct intlist *seen;
@@ -775,10 +776,13 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (machine__resolve(machine, &al, sample) < 0)
return;
+ if (top->stitch_lbr)
+ al.thread->lbr_stitch_enable = true;
+
if (!machine->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&
al.cpumode == PERF_RECORD_MISC_KERNEL) {
- if (!perf_evlist__exclude_kernel(top->session->evlist)) {
+ if (!evlist__exclude_kernel(top->session->evlist)) {
ui__warning(
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
@@ -808,7 +812,8 @@ static void perf_event__process_sample(struct perf_tool *tool,
__map__is_kernel(al.map) && map__has_symbols(al.map)) {
if (symbol_conf.vmlinux_name) {
char serr[256];
- dso__strerror_load(al.map->dso, serr, sizeof(serr));
+
+ dso__strerror_load(map__dso(al.map), serr, sizeof(serr));
ui__warning("The %s file can't be used: %s\n%s",
symbol_conf.vmlinux_name, serr, msg);
} else {
@@ -835,13 +840,12 @@ static void perf_event__process_sample(struct perf_tool *tool,
else
iter.ops = &hist_iter_normal;
- pthread_mutex_lock(&hists->lock);
+ mutex_lock(&hists->lock);
- err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
- if (err < 0)
+ if (hist_entry_iter__add(&iter, &al, top->max_stack, top) < 0)
pr_err("Problem incrementing symbol period, skipping event\n");
- pthread_mutex_unlock(&hists->lock);
+ mutex_unlock(&hists->lock);
}
addr_location__put(&al);
@@ -851,11 +855,9 @@ static void
perf_top__process_lost(struct perf_top *top, union perf_event *event,
struct evsel *evsel)
{
- struct hists *hists = evsel__hists(evsel);
-
top->lost += event->lost.lost;
top->lost_total += event->lost.lost;
- hists->stats.total_lost += event->lost.lost;
+ evsel->evlist->stats.total_lost += event->lost.lost;
}
static void
@@ -863,11 +865,9 @@ perf_top__process_lost_samples(struct perf_top *top,
union perf_event *event,
struct evsel *evsel)
{
- struct hists *hists = evsel__hists(evsel);
-
top->lost += event->lost_samples.lost;
top->lost_total += event->lost_samples.lost;
- hists->stats.total_lost_samples += event->lost_samples.lost;
+ evsel->evlist->stats.total_lost_samples += event->lost_samples.lost;
}
static u64 last_timestamp;
@@ -886,21 +886,21 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
while ((event = perf_mmap__read_event(&md->core)) != NULL) {
int ret;
- ret = perf_evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
+ ret = evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
if (ret && ret != -1)
break;
- ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0);
+ ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0, NULL);
if (ret)
break;
perf_mmap__consume(&md->core);
if (top->qe.rotate) {
- pthread_mutex_lock(&top->qe.mutex);
+ mutex_lock(&top->qe.mutex);
top->qe.rotate = false;
- pthread_cond_signal(&top->qe.cond);
- pthread_mutex_unlock(&top->qe.mutex);
+ cond_signal(&top->qe.cond);
+ mutex_unlock(&top->qe.mutex);
}
}
@@ -914,14 +914,14 @@ static void perf_top__mmap_read(struct perf_top *top)
int i;
if (overwrite)
- perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
+ evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
for (i = 0; i < top->evlist->core.nr_mmaps; i++)
perf_top__mmap_read_idx(top, i);
if (overwrite) {
- perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
- perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
+ evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
+ evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
}
}
@@ -946,7 +946,7 @@ static int perf_top__overwrite_check(struct perf_top *top)
{
struct record_opts *opts = &top->record_opts;
struct evlist *evlist = top->evlist;
- struct perf_evsel_config_term *term;
+ struct evsel_config_term *term;
struct list_head *config_terms;
struct evsel *evsel;
int set, overwrite = -1;
@@ -955,7 +955,7 @@ static int perf_top__overwrite_check(struct perf_top *top)
set = -1;
config_terms = &evsel->config_terms;
list_for_each_entry(term, config_terms, list) {
- if (term->type == PERF_EVSEL__CONFIG_TERM_OVERWRITE)
+ if (term->type == EVSEL__CONFIG_TERM_OVERWRITE)
set = term->val.overwrite ? 1 : 0;
}
@@ -1021,11 +1021,11 @@ static int perf_top__start_counters(struct perf_top *top)
goto out_err;
}
- perf_evlist__config(evlist, opts, &callchain_param);
+ evlist__config(evlist, opts, &callchain_param);
evlist__for_each_entry(evlist, counter) {
try_again:
- if (evsel__open(counter, top->evlist->core.cpus,
+ if (evsel__open(counter, top->evlist->core.user_requested_cpus,
top->evlist->core.threads) < 0) {
/*
@@ -1042,14 +1042,13 @@ try_again:
perf_top_overwrite_fallback(top, counter))
goto try_again;
- if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
+ if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
if (verbose > 0)
ui__warning("%s\n", msg);
goto try_again;
}
- perf_evsel__open_strerror(counter, &opts->target,
- errno, msg, sizeof(msg));
+ evsel__open_strerror(counter, &opts->target, errno, msg, sizeof(msg));
ui__error("%s\n", msg);
goto out_err;
}
@@ -1105,10 +1104,10 @@ static void *process_thread(void *arg)
out = rotate_queues(top);
- pthread_mutex_lock(&top->qe.mutex);
+ mutex_lock(&top->qe.mutex);
top->qe.rotate = true;
- pthread_cond_wait(&top->qe.cond, &top->qe.mutex);
- pthread_mutex_unlock(&top->qe.mutex);
+ cond_wait(&top->qe.cond, &top->qe.mutex);
+ mutex_unlock(&top->qe.mutex);
if (ordered_events__flush(out, OE_FLUSH__TOP))
pr_err("failed to process events\n");
@@ -1150,13 +1149,13 @@ static int deliver_event(struct ordered_events *qe,
return 0;
}
- ret = perf_evlist__parse_sample(evlist, event, &sample);
+ ret = evlist__parse_sample(evlist, event, &sample);
if (ret) {
pr_err("Can't parse sample, err = %d\n", ret);
goto next_event;
}
- evsel = perf_evlist__id2evsel(session->evlist, sample.id);
+ evsel = evlist__id2evsel(session->evlist, sample.id);
assert(evsel != NULL);
if (event->header.type == PERF_RECORD_SAMPLE) {
@@ -1205,7 +1204,7 @@ static int deliver_event(struct ordered_events *qe,
} else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
perf_top__process_lost_samples(top, event, evsel);
} else if (event->header.type < PERF_RECORD_MAX) {
- hists__inc_nr_events(evsel__hists(evsel), event->header.type);
+ events_stats__inc(&session->evlist->stats, event->header.type);
machine__process_event(machine, event, &sample);
} else
++session->evlist->stats.nr_unknown_events;
@@ -1222,8 +1221,8 @@ static void init_process_thread(struct perf_top *top)
ordered_events__set_copy_on_queue(&top->qe.data[0], true);
ordered_events__set_copy_on_queue(&top->qe.data[1], true);
top->qe.in = &top->qe.data[0];
- pthread_mutex_init(&top->qe.mutex, NULL);
- pthread_cond_init(&top->qe.cond, NULL);
+ mutex_init(&top->qe.mutex);
+ cond_init(&top->qe.cond);
}
static int __cmd_top(struct perf_top *top)
@@ -1274,11 +1273,10 @@ static int __cmd_top(struct perf_top *top)
pr_debug("Couldn't synthesize cgroup events.\n");
machine__synthesize_threads(&top->session->machines.host, &opts->target,
- top->evlist->core.threads, false,
+ top->evlist->core.threads, true, false,
top->nr_threads_synthesize);
- if (top->nr_threads_synthesize > 1)
- perf_set_singlethreaded();
+ perf_set_multithreaded();
if (perf_hpp_list.socket) {
ret = perf_env__read_cpu_topology_map(&perf_env);
@@ -1354,8 +1352,9 @@ static int __cmd_top(struct perf_top *top)
out_join:
pthread_join(thread, NULL);
out_join_thread:
- pthread_cond_signal(&top->qe.cond);
+ cond_signal(&top->qe.cond);
pthread_join(thread_process, NULL);
+ perf_set_singlethreaded();
return ret;
}
@@ -1439,11 +1438,12 @@ int cmd_top(int argc, const char **argv)
.sample_time_set = true,
},
.max_stack = sysctl__max_stack(),
- .annotation_opts = annotation__default_options,
.nr_threads_synthesize = UINT_MAX,
};
+ bool branch_call_mode = false;
struct record_opts *opts = &top.record_opts;
struct target *target = &opts->target;
+ const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
const struct option options[] = {
OPT_CALLBACK('e', "event", &top.evlist, "event",
"event selector. use 'perf list' to list available events",
@@ -1466,8 +1466,7 @@ int cmd_top(int argc, const char **argv)
OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
"hide kernel symbols"),
OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
- "number of mmap data pages",
- perf_evlist__parse_mmap_pages),
+ "number of mmap data pages", evlist__parse_mmap_pages),
OPT_INTEGER('r', "realtime", &top.realtime_prio,
"collect data with this RT SCHED_FIFO priority"),
OPT_INTEGER('d', "delay", &top.delay_secs,
@@ -1476,8 +1475,6 @@ int cmd_top(int argc, const char **argv)
"dump the symbol table used for profiling"),
OPT_INTEGER('f', "count-filter", &top.count_filter,
"only display functions with more events than this"),
- OPT_BOOLEAN(0, "group", &opts->group,
- "put the counters into a counter group"),
OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
"child tasks do not inherit counters"),
OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
@@ -1490,7 +1487,9 @@ int cmd_top(int argc, const char **argv)
"display this many functions"),
OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
"hide user symbols"),
+#ifdef HAVE_SLANG_SUPPORT
OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
+#endif
OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
@@ -1530,9 +1529,11 @@ int cmd_top(int argc, const char **argv)
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
"Enable kernel symbol demangling"),
OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
- OPT_STRING(0, "objdump", &top.annotation_opts.objdump_path, "path",
+ OPT_STRING(0, "objdump", &objdump_path, "path",
"objdump binary to use for disassembly and annotations"),
- OPT_STRING('M', "disassembler-style", &top.annotation_opts.disassembler_style, "disassembler style",
+ OPT_STRING(0, "addr2line", &addr2line_path, "path",
+ "addr2line binary to use for line numbers"),
+ OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix",
"Add prefix to source file path names in programs (with --prefix-strip)"),
@@ -1554,6 +1555,8 @@ int cmd_top(int argc, const char **argv)
OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
"branch filter mask", "branch stack filter modes",
parse_branch_stack),
+ OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
+ "add last branch records to call history"),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
"Show raw trace event output (do not use print fmt or plugins)"),
OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
@@ -1563,6 +1566,8 @@ int cmd_top(int argc, const char **argv)
OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
"number of thread to run event synthesize"),
+ OPT_CALLBACK('G', "cgroup", &top.evlist, "name",
+ "monitor event in cgroup name only", parse_cgroups),
OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
"Record namespaces events"),
OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup,
@@ -1571,10 +1576,16 @@ int cmd_top(int argc, const char **argv)
"Sort the output by the event at the index n in group. "
"If n is invalid, sort by the first event. "
"WARNING: should be used on grouped events."),
+ OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
+ "Enable LBR callgraph stitching approach"),
+#ifdef HAVE_LIBPFM
+ OPT_CALLBACK(0, "pfm-events", &top.evlist, "event",
+ "libpfm4 event selector. use 'perf list' to list available events",
+ parse_libpfm_events_option),
+#endif
OPTS_EVSWITCH(&top.evswitch),
OPT_END()
};
- struct evlist *sb_evlist = NULL;
const char * const top_usage[] = {
"perf top [<options>]",
NULL
@@ -1584,6 +1595,8 @@ int cmd_top(int argc, const char **argv)
if (status < 0)
return status;
+ annotation_options__init(&top.annotation_opts);
+
top.annotation_opts.min_pcnt = 5;
top.annotation_opts.context = 4;
@@ -1602,7 +1615,7 @@ int cmd_top(int argc, const char **argv)
if (status) {
/*
* Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
- * warn the user explicitely.
+ * warn the user explicitly.
*/
eprintf(status == ENOSYS ? 1 : 0, verbose,
"Couldn't read the cpuid for this machine: %s\n",
@@ -1614,11 +1627,31 @@ int cmd_top(int argc, const char **argv)
if (argc)
usage_with_options(top_usage, options);
+ if (disassembler_style) {
+ top.annotation_opts.disassembler_style = strdup(disassembler_style);
+ if (!top.annotation_opts.disassembler_style)
+ return -ENOMEM;
+ }
+ if (objdump_path) {
+ top.annotation_opts.objdump_path = strdup(objdump_path);
+ if (!top.annotation_opts.objdump_path)
+ return -ENOMEM;
+ }
+ if (addr2line_path) {
+ symbol_conf.addr2line_path = strdup(addr2line_path);
+ if (!symbol_conf.addr2line_path)
+ return -ENOMEM;
+ }
+
+ status = symbol__validate_sym_arguments();
+ if (status)
+ goto out_delete_evlist;
+
if (annotate_check_args(&top.annotation_opts) < 0)
goto out_delete_evlist;
if (!top.evlist->core.nr_entries &&
- perf_evlist__add_default(top.evlist) < 0) {
+ evlist__add_default(top.evlist) < 0) {
pr_err("Not enough memory for event selector list\n");
goto out_delete_evlist;
}
@@ -1640,6 +1673,30 @@ int cmd_top(int argc, const char **argv)
}
}
+ if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) {
+ pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n");
+ goto out_delete_evlist;
+ }
+
+ if (nr_cgroups > 0 && opts->record_cgroup) {
+ pr_err("--cgroup and --all-cgroups cannot be used together\n");
+ goto out_delete_evlist;
+ }
+
+ if (branch_call_mode) {
+ if (!opts->branch_stack)
+ opts->branch_stack = PERF_SAMPLE_BRANCH_ANY;
+ symbol_conf.use_callchain = true;
+ callchain_param.key = CCKEY_ADDRESS;
+ callchain_param.branch_callstack = true;
+ callchain_param.enabled = true;
+ if (callchain_param.record_mode == CALLCHAIN_NONE)
+ callchain_param.record_mode = CALLCHAIN_FP;
+ callchain_register_param(&callchain_param);
+ if (!sort_order)
+ sort_order = "srcline,symbol,dso";
+ }
+
if (opts->branch_stack && callchain_param.enabled)
symbol_conf.show_branchflag_count = true;
@@ -1649,8 +1706,10 @@ int cmd_top(int argc, const char **argv)
if (top.use_stdio)
use_browser = 0;
+#ifdef HAVE_SLANG_SUPPORT
else if (top.use_tui)
use_browser = 1;
+#endif
setup_browser(false);
@@ -1683,9 +1742,10 @@ int cmd_top(int argc, const char **argv)
if (target__none(target))
target->system_wide = true;
- if (perf_evlist__create_maps(top.evlist, target) < 0) {
+ if (evlist__create_maps(top.evlist, target) < 0) {
ui__error("Couldn't create thread/CPU maps: %s\n",
errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
+ status = -errno;
goto out_delete_evlist;
}
@@ -1726,16 +1786,31 @@ int cmd_top(int argc, const char **argv)
signal(SIGWINCH, winch_sig);
}
- top.session = perf_session__new(NULL, false, NULL);
+ top.session = perf_session__new(NULL, NULL);
if (IS_ERR(top.session)) {
status = PTR_ERR(top.session);
goto out_delete_evlist;
}
- if (!top.record_opts.no_bpf_event)
- bpf_event__add_sb_event(&sb_evlist, &perf_env);
+#ifdef HAVE_LIBBPF_SUPPORT
+ if (!top.record_opts.no_bpf_event) {
+ top.sb_evlist = evlist__new();
+
+ if (top.sb_evlist == NULL) {
+ pr_err("Couldn't create side band evlist.\n.");
+ status = -EINVAL;
+ goto out_delete_evlist;
+ }
+
+ if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
+ pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
+ status = -EINVAL;
+ goto out_delete_evlist;
+ }
+ }
+#endif
- if (perf_evlist__start_sb_thread(sb_evlist, target)) {
+ if (evlist__start_sb_thread(top.sb_evlist, target)) {
pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
opts->no_bpf_event = true;
}
@@ -1743,11 +1818,12 @@ int cmd_top(int argc, const char **argv)
status = __cmd_top(&top);
if (!opts->no_bpf_event)
- perf_evlist__stop_sb_thread(sb_evlist);
+ evlist__stop_sb_thread(top.sb_evlist);
out_delete_evlist:
evlist__delete(top.evlist);
perf_session__delete(top.session);
+ annotation_options__exit(&top.annotation_opts);
return status;
}
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 01d542007c8b..8ee3a45c3c54 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -15,9 +15,10 @@
*/
#include "util/record.h"
-#include <traceevent/event-parse.h>
#include <api/fs/tracing_path.h>
+#ifdef HAVE_LIBBPF_SUPPORT
#include <bpf/bpf.h>
+#endif
#include "util/bpf_map.h"
#include "util/rlimit.h"
#include "builtin.h"
@@ -53,6 +54,7 @@
#include "trace-event.h"
#include "util/parse-events.h"
#include "util/bpf-loader.h"
+#include "util/tracepoint.h"
#include "callchain.h"
#include "print_binary.h"
#include "string2.h"
@@ -79,6 +81,10 @@
#include <linux/ctype.h>
#include <perf/mmap.h>
+#ifdef HAVE_LIBTRACEEVENT
+#include <traceevent/event-parse.h>
+#endif
+
#ifndef O_CLOEXEC
# define O_CLOEXEC 02000000
#endif
@@ -87,6 +93,8 @@
# define F_LINUX_SPECIFIC_BASE 1024
#endif
+#define RAW_SYSCALL_ARGS_NUM 6
+
/*
* strtoul: Go from a string to a value, i.e. for msr: MSR_FS_BASE to 0xc0000100
*/
@@ -107,7 +115,7 @@ struct syscall_fmt {
const char *sys_enter,
*sys_exit;
} bpf_prog_name;
- struct syscall_arg_fmt arg[6];
+ struct syscall_arg_fmt arg[RAW_SYSCALL_ARGS_NUM];
u8 nr_args;
bool errpid;
bool timeout;
@@ -119,7 +127,6 @@ struct trace {
struct syscalltbl *sctbl;
struct {
struct syscall *table;
- struct bpf_map *map;
struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY
struct bpf_map *sys_enter,
*sys_exit;
@@ -366,11 +373,9 @@ out_delete:
return NULL;
}
-static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
- struct tp_field *field,
- const char *name)
+static int evsel__init_tp_uint_field(struct evsel *evsel, struct tp_field *field, const char *name)
{
- struct tep_format_field *format_field = perf_evsel__field(evsel, name);
+ struct tep_format_field *format_field = evsel__field(evsel, name);
if (format_field == NULL)
return -1;
@@ -380,13 +385,11 @@ static int perf_evsel__init_tp_uint_field(struct evsel *evsel,
#define perf_evsel__init_sc_tp_uint_field(evsel, name) \
({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
- perf_evsel__init_tp_uint_field(evsel, &sc->name, #name); })
+ evsel__init_tp_uint_field(evsel, &sc->name, #name); })
-static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
- struct tp_field *field,
- const char *name)
+static int evsel__init_tp_ptr_field(struct evsel *evsel, struct tp_field *field, const char *name)
{
- struct tep_format_field *format_field = perf_evsel__field(evsel, name);
+ struct tep_format_field *format_field = evsel__field(evsel, name);
if (format_field == NULL)
return -1;
@@ -396,7 +399,7 @@ static int perf_evsel__init_tp_ptr_field(struct evsel *evsel,
#define perf_evsel__init_sc_tp_ptr_field(evsel, name) \
({ struct syscall_tp *sc = __evsel__syscall_tp(evsel);\
- perf_evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
+ evsel__init_tp_ptr_field(evsel, &sc->name, #name); })
static void evsel__delete_priv(struct evsel *evsel)
{
@@ -404,13 +407,13 @@ static void evsel__delete_priv(struct evsel *evsel)
evsel__delete(evsel);
}
-static int perf_evsel__init_syscall_tp(struct evsel *evsel)
+static int evsel__init_syscall_tp(struct evsel *evsel)
{
struct syscall_tp *sc = evsel__syscall_tp(evsel);
if (sc != NULL) {
- if (perf_evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
- perf_evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
+ if (evsel__init_tp_uint_field(evsel, &sc->id, "__syscall_nr") &&
+ evsel__init_tp_uint_field(evsel, &sc->id, "nr"))
return -ENOENT;
return 0;
}
@@ -418,14 +421,14 @@ static int perf_evsel__init_syscall_tp(struct evsel *evsel)
return -ENOMEM;
}
-static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
+static int evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evsel *tp)
{
struct syscall_tp *sc = evsel__syscall_tp(evsel);
if (sc != NULL) {
- struct tep_format_field *syscall_id = perf_evsel__field(tp, "id");
+ struct tep_format_field *syscall_id = evsel__field(tp, "id");
if (syscall_id == NULL)
- syscall_id = perf_evsel__field(tp, "__syscall_nr");
+ syscall_id = evsel__field(tp, "__syscall_nr");
if (syscall_id == NULL ||
__tp_field__init_uint(&sc->id, syscall_id->size, syscall_id->offset, evsel->needs_swap))
return -EINVAL;
@@ -436,21 +439,21 @@ static int perf_evsel__init_augmented_syscall_tp(struct evsel *evsel, struct evs
return -ENOMEM;
}
-static int perf_evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
+static int evsel__init_augmented_syscall_tp_args(struct evsel *evsel)
{
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
return __tp_field__init_ptr(&sc->args, sc->id.offset + sizeof(u64));
}
-static int perf_evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
+static int evsel__init_augmented_syscall_tp_ret(struct evsel *evsel)
{
struct syscall_tp *sc = __evsel__syscall_tp(evsel);
return __tp_field__init_uint(&sc->ret, sizeof(u64), sc->id.offset + sizeof(u64), evsel->needs_swap);
}
-static int perf_evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
+static int evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
{
if (evsel__syscall_tp(evsel) != NULL) {
if (perf_evsel__init_sc_tp_uint_field(evsel, id))
@@ -465,16 +468,16 @@ static int perf_evsel__init_raw_syscall_tp(struct evsel *evsel, void *handler)
static struct evsel *perf_evsel__raw_syscall_newtp(const char *direction, void *handler)
{
- struct evsel *evsel = perf_evsel__newtp("raw_syscalls", direction);
+ struct evsel *evsel = evsel__newtp("raw_syscalls", direction);
/* older kernel (e.g., RHEL6) use syscalls:{enter,exit} */
if (IS_ERR(evsel))
- evsel = perf_evsel__newtp("syscalls", direction);
+ evsel = evsel__newtp("syscalls", direction);
if (IS_ERR(evsel))
return NULL;
- if (perf_evsel__init_raw_syscall_tp(evsel, handler))
+ if (evsel__init_raw_syscall_tp(evsel, handler))
goto out_delete;
return evsel;
@@ -618,11 +621,8 @@ bool strarray__strtoul_flags(struct strarray *sa, char *bf, size_t size, u64 *re
if (isalpha(*tok) || *tok == '_') {
if (!strarray__strtoul(sa, tok, toklen, &val))
return false;
- } else {
- bool is_hexa = tok[0] == 0 && (tok[1] = 'x' || tok[1] == 'X');
-
- val = strtoul(tok, NULL, is_hexa ? 16 : 0);
- }
+ } else
+ val = strtoul(tok, NULL, 0);
*ret |= (1 << (val - 1));
@@ -711,7 +711,15 @@ static size_t syscall_arg__scnprintf_char_array(char *bf, size_t size, struct sy
static const char *bpf_cmd[] = {
"MAP_CREATE", "MAP_LOOKUP_ELEM", "MAP_UPDATE_ELEM", "MAP_DELETE_ELEM",
- "MAP_GET_NEXT_KEY", "PROG_LOAD",
+ "MAP_GET_NEXT_KEY", "PROG_LOAD", "OBJ_PIN", "OBJ_GET", "PROG_ATTACH",
+ "PROG_DETACH", "PROG_TEST_RUN", "PROG_GET_NEXT_ID", "MAP_GET_NEXT_ID",
+ "PROG_GET_FD_BY_ID", "MAP_GET_FD_BY_ID", "OBJ_GET_INFO_BY_FD",
+ "PROG_QUERY", "RAW_TRACEPOINT_OPEN", "BTF_LOAD", "BTF_GET_FD_BY_ID",
+ "TASK_FD_QUERY", "MAP_LOOKUP_AND_DELETE_ELEM", "MAP_FREEZE",
+ "BTF_GET_NEXT_ID", "MAP_LOOKUP_BATCH", "MAP_LOOKUP_AND_DELETE_BATCH",
+ "MAP_UPDATE_BATCH", "MAP_DELETE_BATCH", "LINK_CREATE", "LINK_UPDATE",
+ "LINK_GET_FD_BY_ID", "LINK_GET_NEXT_ID", "ENABLE_STATS", "ITER_CREATE",
+ "LINK_DETACH", "PROG_BIND_MAP",
};
static DEFINE_STRARRAY(bpf_cmd, "BPF_");
@@ -922,6 +930,8 @@ static struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_PTR, /* brk */ }, }, },
{ .name = "clock_gettime",
.arg = { [0] = STRARRAY(clk_id, clockid), }, },
+ { .name = "clock_nanosleep",
+ .arg = { [2] = { .scnprintf = SCA_TIMESPEC, /* rqtp */ }, }, },
{ .name = "clone", .errpid = true, .nr_args = 5,
.arg = { [0] = { .name = "flags", .scnprintf = SCA_CLONE_FLAGS, },
[1] = { .name = "child_stack", .scnprintf = SCA_HEX, },
@@ -975,6 +985,8 @@ static struct syscall_fmt syscall_fmts[] = {
.arg = { [2] = { .scnprintf = SCA_GETRANDOM_FLAGS, /* flags */ }, }, },
{ .name = "getrlimit",
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
+ { .name = "getsockopt",
+ .arg = { [1] = STRARRAY(level, socket_level), }, },
{ .name = "gettid", .errpid = true, },
{ .name = "ioctl",
.arg = {
@@ -1049,7 +1061,8 @@ static struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = { .scnprintf = SCA_FDAT, /* dfd */ },
[2] = { .scnprintf = SCA_OPEN_FLAGS, /* flags */ }, }, },
{ .name = "perf_event_open",
- .arg = { [2] = { .scnprintf = SCA_INT, /* cpu */ },
+ .arg = { [0] = { .scnprintf = SCA_PERF_ATTR, /* attr */ },
+ [2] = { .scnprintf = SCA_INT, /* cpu */ },
[3] = { .scnprintf = SCA_FD, /* group_fd */ },
[4] = { .scnprintf = SCA_PERF_FLAGS, /* flags */ }, }, },
{ .name = "pipe2",
@@ -1117,6 +1130,8 @@ static struct syscall_fmt syscall_fmts[] = {
.arg = { [0] = STRARRAY(which, itimers), }, },
{ .name = "setrlimit",
.arg = { [0] = STRARRAY(resource, rlimit_resources), }, },
+ { .name = "setsockopt",
+ .arg = { [1] = STRARRAY(level, socket_level), }, },
{ .name = "socket",
.arg = { [0] = STRARRAY(family, socket_families),
[1] = { .scnprintf = SCA_SK_TYPE, /* type */ },
@@ -1214,16 +1229,6 @@ struct syscall {
};
/*
- * Must match what is in the BPF program:
- *
- * tools/perf/examples/bpf/augmented_raw_syscalls.c
- */
-struct bpf_map_syscall_entry {
- bool enabled;
- u16 string_args_len[6];
-};
-
-/*
* We need to have this 'calculated' boolean because in some cases we really
* don't know what is the duration of a syscall, for instance, when we start
* a session and some threads are waiting for a syscall to finish, say 'poll',
@@ -1528,13 +1533,20 @@ static size_t trace__fprintf_tstamp(struct trace *trace, u64 tstamp, FILE *fp)
return fprintf(fp, " ? ");
}
-static bool done = false;
-static bool interrupted = false;
+static pid_t workload_pid = -1;
+static volatile sig_atomic_t done = false;
+static volatile sig_atomic_t interrupted = false;
-static void sig_handler(int sig)
+static void sighandler_interrupt(int sig __maybe_unused)
{
- done = true;
- interrupted = sig == SIGINT;
+ done = interrupted = true;
+}
+
+static void sighandler_chld(int sig __maybe_unused, siginfo_t *info,
+ void *context __maybe_unused)
+{
+ if (info->si_pid == workload_pid)
+ done = true;
}
static size_t trace__fprintf_comm_tid(struct trace *trace, struct thread *thread, FILE *fp)
@@ -1624,8 +1636,8 @@ static int trace__symbols_init(struct trace *trace, struct evlist *evlist)
goto out;
err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
- evlist->core.threads, trace__tool_process, false,
- 1);
+ evlist->core.threads, trace__tool_process,
+ true, false, 1);
out:
if (err)
symbol__exit();
@@ -1645,7 +1657,7 @@ static int syscall__alloc_arg_fmts(struct syscall *sc, int nr_args)
{
int idx;
- if (nr_args == 6 && sc->fmt && sc->fmt->nr_args != 0)
+ if (nr_args == RAW_SYSCALL_ARGS_NUM && sc->fmt && sc->fmt->nr_args != 0)
nr_args = sc->fmt->nr_args;
sc->arg_fmt = calloc(nr_args, sizeof(*sc->arg_fmt));
@@ -1717,7 +1729,7 @@ syscall_arg_fmt__init_array(struct syscall_arg_fmt *arg, struct tep_format_field
len >= 2 && strcmp(field->name + len - 2, "fd") == 0) {
/*
* /sys/kernel/tracing/events/syscalls/sys_enter*
- * egrep 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
+ * grep -E 'field:.*fd;' .../format|sed -r 's/.*field:([a-z ]+) [a-z_]*fd.+/\1/g'|sort|uniq -c
* 65 int
* 23 unsigned int
* 7 unsigned long
@@ -1752,19 +1764,37 @@ static int trace__read_syscall_info(struct trace *trace, int id)
struct syscall *sc;
const char *name = syscalltbl__name(trace->sctbl, id);
+#ifdef HAVE_SYSCALL_TABLE_SUPPORT
if (trace->syscalls.table == NULL) {
trace->syscalls.table = calloc(trace->sctbl->syscalls.max_id + 1, sizeof(*sc));
if (trace->syscalls.table == NULL)
return -ENOMEM;
}
+#else
+ if (id > trace->sctbl->syscalls.max_id || (id == 0 && trace->syscalls.table == NULL)) {
+ // When using libaudit we don't know beforehand what is the max syscall id
+ struct syscall *table = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
+
+ if (table == NULL)
+ return -ENOMEM;
+
+ // Need to memset from offset 0 and +1 members if brand new
+ if (trace->syscalls.table == NULL)
+ memset(table, 0, (id + 1) * sizeof(*sc));
+ else
+ memset(table + trace->sctbl->syscalls.max_id + 1, 0, (id - trace->sctbl->syscalls.max_id) * sizeof(*sc));
+ trace->syscalls.table = table;
+ trace->sctbl->syscalls.max_id = id;
+ }
+#endif
sc = trace->syscalls.table + id;
if (sc->nonexistent)
- return 0;
+ return -EEXIST;
if (name == NULL) {
sc->nonexistent = true;
- return 0;
+ return -EEXIST;
}
sc->name = name;
@@ -1778,11 +1808,18 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc->tp_format = trace_event__tp_format("syscalls", tp_name);
}
- if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ? 6 : sc->tp_format->format.nr_fields))
- return -ENOMEM;
-
- if (IS_ERR(sc->tp_format))
+ /*
+ * Fails to read trace point format via sysfs node, so the trace point
+ * doesn't exist. Set the 'nonexistent' flag as true.
+ */
+ if (IS_ERR(sc->tp_format)) {
+ sc->nonexistent = true;
return PTR_ERR(sc->tp_format);
+ }
+
+ if (syscall__alloc_arg_fmts(sc, IS_ERR(sc->tp_format) ?
+ RAW_SYSCALL_ARGS_NUM : sc->tp_format->format.nr_fields))
+ return -ENOMEM;
sc->args = sc->tp_format->format.fields;
/*
@@ -1801,7 +1838,7 @@ static int trace__read_syscall_info(struct trace *trace, int id)
return syscall__set_arg_fmts(sc);
}
-static int perf_evsel__init_tp_arg_scnprintf(struct evsel *evsel)
+static int evsel__init_tp_arg_scnprintf(struct evsel *evsel)
{
struct syscall_arg_fmt *fmt = evsel__syscall_arg_fmt(evsel);
@@ -2074,25 +2111,34 @@ static struct syscall *trace__syscall_info(struct trace *trace,
if (verbose > 1) {
static u64 n;
fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
- id, perf_evsel__name(evsel), ++n);
+ id, evsel__name(evsel), ++n);
}
return NULL;
}
err = -EINVAL;
- if (id > trace->sctbl->syscalls.max_id)
+#ifdef HAVE_SYSCALL_TABLE_SUPPORT
+ if (id > trace->sctbl->syscalls.max_id) {
+#else
+ if (id >= trace->sctbl->syscalls.max_id) {
+ /*
+ * With libaudit we don't know beforehand what is the max_id,
+ * so we let trace__read_syscall_info() figure that out as we
+ * go on reading syscalls.
+ */
+ err = trace__read_syscall_info(trace, id);
+ if (err)
+#endif
goto out_cant_read;
+ }
if ((trace->syscalls.table == NULL || trace->syscalls.table[id].name == NULL) &&
(err = trace__read_syscall_info(trace, id)) != 0)
goto out_cant_read;
- if (trace->syscalls.table[id].name == NULL) {
- if (trace->syscalls.table[id].nonexistent)
- return NULL;
+ if (trace->syscalls.table && trace->syscalls.table[id].nonexistent)
goto out_cant_read;
- }
return &trace->syscalls.table[id];
@@ -2127,13 +2173,10 @@ static void thread__update_stats(struct thread *thread, struct thread_trace *ttr
stats = inode->priv;
if (stats == NULL) {
- stats = malloc(sizeof(*stats));
+ stats = zalloc(sizeof(*stats));
if (stats == NULL)
return;
- stats->nr_failures = 0;
- stats->max_errno = 0;
- stats->errnos = NULL;
init_stats(&stats->stats);
inode->priv = stats;
}
@@ -2206,7 +2249,7 @@ static int trace__fprintf_sample(struct trace *trace, struct evsel *evsel,
double ts = (double)sample->time / NSEC_PER_MSEC;
printed += fprintf(trace->output, "%22s %10.3f %s %d/%d [%d]\n",
- perf_evsel__name(evsel), ts,
+ evsel__name(evsel), ts,
thread__comm_str(thread),
sample->pid, sample->tid, sample->cpu);
}
@@ -2240,6 +2283,14 @@ static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sam
return augmented_args;
}
+static void syscall__exit(struct syscall *sc)
+{
+ if (!sc)
+ return;
+
+ zfree(&sc->arg_fmt);
+}
+
static int trace__sys_enter(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
@@ -2382,7 +2433,7 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam
static const char *errno_to_name(struct evsel *evsel, int err)
{
- struct perf_env *env = perf_evsel__env(evsel);
+ struct perf_env *env = evsel__env(evsel);
const char *arch_name = perf_env__arch(env);
return arch_syscalls__strerrno(arch_name, err);
@@ -2513,7 +2564,7 @@ errno_print: {
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
- pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
+ pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
out:
ttrace->entry_pending = false;
err = 0;
@@ -2531,7 +2582,7 @@ static int trace__vfs_getname(struct trace *trace, struct evsel *evsel,
size_t filename_len, entry_str_len, to_move;
ssize_t remaining_space;
char *pos;
- const char *filename = perf_evsel__rawptr(evsel, sample, "pathname");
+ const char *filename = evsel__rawptr(evsel, sample, "pathname");
if (!thread)
goto out;
@@ -2587,7 +2638,7 @@ static int trace__sched_stat_runtime(struct trace *trace, struct evsel *evsel,
union perf_event *event __maybe_unused,
struct perf_sample *sample)
{
- u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
+ u64 runtime = evsel__intval(evsel, sample, "runtime");
double runtime_ms = (double)runtime / NSEC_PER_MSEC;
struct thread *thread = machine__findnew_thread(trace->host,
sample->pid,
@@ -2606,10 +2657,10 @@ out_put:
out_dump:
fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
evsel->name,
- perf_evsel__strval(evsel, sample, "comm"),
- (pid_t)perf_evsel__intval(evsel, sample, "pid"),
+ evsel__strval(evsel, sample, "comm"),
+ (pid_t)evsel__intval(evsel, sample, "pid"),
runtime,
- perf_evsel__intval(evsel, sample, "vruntime"));
+ evsel__intval(evsel, sample, "vruntime"));
goto out_put;
}
@@ -2680,6 +2731,8 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
offset = format_field__intval(field, sample, evsel->needs_swap);
syscall_arg.len = offset >> 16;
offset &= 0xffff;
+ if (tep_field_is_relative(field->flags))
+ offset += field->offset + field->size;
}
val = (uintptr_t)(sample->raw_data + offset);
@@ -2693,7 +2746,7 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
/*
* Suppress this argument if its value is zero and
- * and we don't have a string associated in an
+ * we don't have a string associated in an
* strarray for it.
*/
if (val == 0 &&
@@ -2706,11 +2759,7 @@ static size_t trace__fprintf_tp_fields(struct trace *trace, struct evsel *evsel,
printed += scnprintf(bf + printed, size - printed, "%s", printed ? ", " : "");
- /*
- * XXX Perhaps we should have a show_tp_arg_names,
- * leaving show_arg_names just for syscalls?
- */
- if (1 || trace->show_arg_names)
+ if (trace->show_arg_names)
printed += scnprintf(bf + printed, size - printed, "%s: ", field->name);
printed += syscall_arg_fmt__scnprintf_val(arg, bf + printed, size - printed, &syscall_arg, val);
@@ -2774,7 +2823,7 @@ static int trace__event_handler(struct trace *trace, struct evsel *evsel,
fprintf(trace->output, "%s(", evsel->name);
- if (perf_evsel__is_bpf_output(evsel)) {
+ if (evsel__is_bpf_output(evsel)) {
bpf_output__fprintf(trace, sample);
} else if (evsel->tp_format) {
if (strncmp(evsel->tp_format->name, "sys_enter_", 10) ||
@@ -2795,7 +2844,7 @@ newline:
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
- pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
+ pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
++trace->nr_events_printed;
@@ -2814,7 +2863,7 @@ static void print_location(FILE *f, struct perf_sample *sample,
{
if ((verbose > 0 || print_dso) && al->map)
- fprintf(f, "%s@", al->map->dso->long_name);
+ fprintf(f, "%s@", map__dso(al->map)->long_name);
if ((verbose > 0 || print_sym) && al->sym)
fprintf(f, "%s+0x%" PRIx64, al->sym->name,
@@ -2890,7 +2939,7 @@ static int trace__pgfault(struct trace *trace,
if (callchain_ret > 0)
trace__fprintf_callchain(trace, sample);
else if (callchain_ret < 0)
- pr_err("Problem processing %s callchain, skipping...\n", perf_evsel__name(evsel));
+ pr_err("Problem processing %s callchain, skipping...\n", evsel__name(evsel));
++trace->nr_events_printed;
out:
@@ -3021,21 +3070,17 @@ static bool evlist__add_vfs_getname(struct evlist *evlist)
struct parse_events_error err;
int ret;
- bzero(&err, sizeof(err));
+ parse_events_error__init(&err);
ret = parse_events(evlist, "probe:vfs_getname*", &err);
- if (ret) {
- free(err.str);
- free(err.help);
- free(err.first_str);
- free(err.first_help);
+ parse_events_error__exit(&err);
+ if (ret)
return false;
- }
evlist__for_each_entry_safe(evlist, evsel, tmp) {
- if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
+ if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
continue;
- if (perf_evsel__field(evsel, "pathname")) {
+ if (evsel__field(evsel, "pathname")) {
evsel->handler = trace__vfs_getname;
found = true;
continue;
@@ -3049,7 +3094,7 @@ static bool evlist__add_vfs_getname(struct evlist *evlist)
return found;
}
-static struct evsel *perf_evsel__new_pgfault(u64 config)
+static struct evsel *evsel__new_pgfault(u64 config)
{
struct evsel *evsel;
struct perf_event_attr attr = {
@@ -3069,6 +3114,21 @@ static struct evsel *perf_evsel__new_pgfault(u64 config)
return evsel;
}
+static void evlist__free_syscall_tp_fields(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ struct evsel_trace *et = evsel->priv;
+
+ if (!et || !evsel->tp_format || strcmp(evsel->tp_format->system, "syscalls"))
+ continue;
+
+ zfree(&et->fmt);
+ free(et);
+ }
+}
+
static void trace__handle_event(struct trace *trace, union perf_event *event, struct perf_sample *sample)
{
const u32 type = event->header.type;
@@ -3079,7 +3139,7 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st
return;
}
- evsel = perf_evlist__id2evsel(trace->evlist, sample->id);
+ evsel = evlist__id2evsel(trace->evlist, sample->id);
if (evsel == NULL) {
fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample->id);
return;
@@ -3093,7 +3153,7 @@ static void trace__handle_event(struct trace *trace, union perf_event *event, st
if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
sample->raw_data == NULL) {
fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
- perf_evsel__name(evsel), sample->tid,
+ evsel__name(evsel), sample->tid,
sample->cpu, sample->raw_size);
} else {
tracepoint_handler handler = evsel->handler;
@@ -3124,8 +3184,8 @@ static int trace__add_syscall_newtp(struct trace *trace)
if (perf_evsel__init_sc_tp_uint_field(sys_exit, ret))
goto out_delete_sys_exit;
- perf_evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
- perf_evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
+ evsel__config_callchain(sys_enter, &trace->opts, &callchain_param);
+ evsel__config_callchain(sys_exit, &trace->opts, &callchain_param);
evlist__add(evlist, sys_enter);
evlist__add(evlist, sys_exit);
@@ -3164,10 +3224,9 @@ static int trace__set_ev_qualifier_tp_filter(struct trace *trace)
if (filter == NULL)
goto out_enomem;
- if (!perf_evsel__append_tp_filter(trace->syscalls.events.sys_enter,
- filter)) {
+ if (!evsel__append_tp_filter(trace->syscalls.events.sys_enter, filter)) {
sys_exit = trace->syscalls.events.sys_exit;
- err = perf_evsel__append_tp_filter(sys_exit, filter);
+ err = evsel__append_tp_filter(sys_exit, filter);
}
free(filter);
@@ -3179,12 +3238,42 @@ out_enomem:
}
#ifdef HAVE_LIBBPF_SUPPORT
+static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
+{
+ if (trace->bpf_obj == NULL)
+ return NULL;
+
+ return bpf_object__find_map_by_name(trace->bpf_obj, name);
+}
+
+static void trace__set_bpf_map_filtered_pids(struct trace *trace)
+{
+ trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
+}
+
+static void trace__set_bpf_map_syscalls(struct trace *trace)
+{
+ trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
+ trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
+}
+
static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace, const char *name)
{
+ struct bpf_program *pos, *prog = NULL;
+ const char *sec_name;
+
if (trace->bpf_obj == NULL)
return NULL;
- return bpf_object__find_program_by_title(trace->bpf_obj, name);
+ bpf_object__for_each_program(pos, trace->bpf_obj) {
+ sec_name = bpf_program__section_name(pos);
+ if (sec_name && !strcmp(sec_name, name)) {
+ prog = pos;
+ break;
+ }
+ }
+
+ return prog;
}
static struct bpf_program *trace__find_syscall_bpf_prog(struct trace *trace, struct syscall *sc,
@@ -3243,80 +3332,6 @@ static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id)
return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog);
}
-static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry)
-{
- struct syscall *sc = trace__syscall_info(trace, NULL, id);
- int arg = 0;
-
- if (sc == NULL)
- goto out;
-
- for (; arg < sc->nr_args; ++arg) {
- entry->string_args_len[arg] = 0;
- if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) {
- /* Should be set like strace -s strsize */
- entry->string_args_len[arg] = PATH_MAX;
- }
- }
-out:
- for (; arg < 6; ++arg)
- entry->string_args_len[arg] = 0;
-}
-static int trace__set_ev_qualifier_bpf_filter(struct trace *trace)
-{
- int fd = bpf_map__fd(trace->syscalls.map);
- struct bpf_map_syscall_entry value = {
- .enabled = !trace->not_ev_qualifier,
- };
- int err = 0;
- size_t i;
-
- for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) {
- int key = trace->ev_qualifier_ids.entries[i];
-
- if (value.enabled) {
- trace__init_bpf_map_syscall_args(trace, key, &value);
- trace__init_syscall_bpf_progs(trace, key);
- }
-
- err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled)
-{
- int fd = bpf_map__fd(trace->syscalls.map);
- struct bpf_map_syscall_entry value = {
- .enabled = enabled,
- };
- int err = 0, key;
-
- for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) {
- if (enabled)
- trace__init_bpf_map_syscall_args(trace, key, &value);
-
- err = bpf_map_update_elem(fd, &key, &value, BPF_ANY);
- if (err)
- break;
- }
-
- return err;
-}
-
-static int trace__init_syscalls_bpf_map(struct trace *trace)
-{
- bool enabled = true;
-
- if (trace->ev_qualifier_ids.nr)
- enabled = trace->not_ev_qualifier;
-
- return __trace__init_syscalls_bpf_map(trace, enabled);
-}
-
static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc)
{
struct tep_format_field *field, *candidate_field;
@@ -3517,14 +3532,18 @@ static void trace__delete_augmented_syscalls(struct trace *trace)
trace->bpf_obj = NULL;
}
#else // HAVE_LIBBPF_SUPPORT
-static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused)
+static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace __maybe_unused,
+ const char *name __maybe_unused)
{
- return 0;
+ return NULL;
}
-static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused)
+static void trace__set_bpf_map_filtered_pids(struct trace *trace __maybe_unused)
+{
+}
+
+static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused)
{
- return 0;
}
static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused,
@@ -3560,8 +3579,6 @@ static bool trace__only_augmented_syscalls_evsels(struct trace *trace)
static int trace__set_ev_qualifier_filter(struct trace *trace)
{
- if (trace->syscalls.map)
- return trace__set_ev_qualifier_bpf_filter(trace);
if (trace->syscalls.events.sys_enter)
return trace__set_ev_qualifier_tp_filter(trace);
return 0;
@@ -3607,7 +3624,7 @@ static int trace__set_filter_loop_pids(struct trace *trace)
thread = parent;
}
- err = perf_evlist__append_tp_filter_pids(trace->evlist, nr, pids);
+ err = evlist__append_tp_filter_pids(trace->evlist, nr, pids);
if (!err && trace->filter_pids.map)
err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids);
@@ -3621,11 +3638,11 @@ static int trace__set_filter_pids(struct trace *trace)
* Better not use !target__has_task() here because we need to cover the
* case where no threads were specified in the command line, but a
* workload was, and in that case we will fill in the thread_map when
- * we fork the workload in perf_evlist__prepare_workload.
+ * we fork the workload in evlist__prepare_workload.
*/
if (trace->filter_pids.nr > 0) {
- err = perf_evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
- trace->filter_pids.entries);
+ err = evlist__append_tp_filter_pids(trace->evlist, trace->filter_pids.nr,
+ trace->filter_pids.entries);
if (!err && trace->filter_pids.map) {
err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr,
trace->filter_pids.entries);
@@ -3641,9 +3658,8 @@ static int __trace__deliver_event(struct trace *trace, union perf_event *event)
{
struct evlist *evlist = trace->evlist;
struct perf_sample sample;
- int err;
+ int err = evlist__parse_sample(evlist, event, &sample);
- err = perf_evlist__parse_sample(evlist, event, &sample);
if (err)
fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
else
@@ -3676,11 +3692,11 @@ static int trace__deliver_event(struct trace *trace, union perf_event *event)
if (!trace->sort_events)
return __trace__deliver_event(trace, event);
- err = perf_evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
+ err = evlist__parse_sample_timestamp(trace->evlist, event, &trace->oe.last);
if (err && err != -1)
return err;
- err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0);
+ err = ordered_events__queue(&trace->oe.data, event, trace->oe.last, 0, NULL);
if (err)
return err;
@@ -3695,7 +3711,7 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
return __trace__deliver_event(trace, event->event);
}
-static struct syscall_arg_fmt *perf_evsel__syscall_arg_fmt(struct evsel *evsel, char *arg)
+static struct syscall_arg_fmt *evsel__find_syscall_arg_fmt_by_name(struct evsel *evsel, char *arg)
{
struct tep_format_field *field;
struct syscall_arg_fmt *fmt = __evsel__syscall_arg_fmt(evsel);
@@ -3750,7 +3766,7 @@ static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel
scnprintf(arg, sizeof(arg), "%.*s", left_size, left);
- fmt = perf_evsel__syscall_arg_fmt(evsel, arg);
+ fmt = evsel__find_syscall_arg_fmt_by_name(evsel, arg);
if (fmt == NULL) {
pr_err("\"%s\" not found in \"%s\", can't set filter \"%s\"\n",
arg, evsel->name, evsel->filter);
@@ -3801,7 +3817,7 @@ static int trace__expand_filter(struct trace *trace __maybe_unused, struct evsel
if (new_filter != evsel->filter) {
pr_debug("New filter for %s: %s\n", evsel->name, new_filter);
- perf_evsel__set_filter(evsel, new_filter);
+ evsel__set_filter(evsel, new_filter);
free(new_filter);
}
@@ -3846,24 +3862,26 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
}
if ((trace->trace_pgfaults & TRACE_PFMAJ)) {
- pgfault_maj = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
+ pgfault_maj = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MAJ);
if (pgfault_maj == NULL)
goto out_error_mem;
- perf_evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
+ evsel__config_callchain(pgfault_maj, &trace->opts, &callchain_param);
evlist__add(evlist, pgfault_maj);
}
if ((trace->trace_pgfaults & TRACE_PFMIN)) {
- pgfault_min = perf_evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
+ pgfault_min = evsel__new_pgfault(PERF_COUNT_SW_PAGE_FAULTS_MIN);
if (pgfault_min == NULL)
goto out_error_mem;
- perf_evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
+ evsel__config_callchain(pgfault_min, &trace->opts, &callchain_param);
evlist__add(evlist, pgfault_min);
}
+ /* Enable ignoring missing threads when -u/-p option is defined. */
+ trace->opts.ignore_missing_thread = trace->opts.target.uid != UINT_MAX || trace->opts.target.pid;
+
if (trace->sched &&
- perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
- trace__sched_stat_runtime))
+ evlist__add_newtp(evlist, "sched", "sched_stat_runtime", trace__sched_stat_runtime))
goto out_error_sched_stat_runtime;
/*
* If a global cgroup was set, apply it to all the events without an
@@ -3893,7 +3911,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (trace->cgroup)
evlist__set_default_cgroup(trace->evlist, trace->cgroup);
- err = perf_evlist__create_maps(evlist, &trace->opts.target);
+ err = evlist__create_maps(evlist, &trace->opts.target);
if (err < 0) {
fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
goto out_delete_evlist;
@@ -3905,18 +3923,15 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
goto out_delete_evlist;
}
- perf_evlist__config(evlist, &trace->opts, &callchain_param);
-
- signal(SIGCHLD, sig_handler);
- signal(SIGINT, sig_handler);
+ evlist__config(evlist, &trace->opts, &callchain_param);
if (forks) {
- err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
- argv, false, NULL);
+ err = evlist__prepare_workload(evlist, &trace->opts.target, argv, false, NULL);
if (err < 0) {
fprintf(trace->output, "Couldn't run the workload!\n");
goto out_delete_evlist;
}
+ workload_pid = evlist->workload.pid;
}
err = evlist__open(evlist);
@@ -3937,9 +3952,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_mem;
- if (trace->syscalls.map)
- trace__init_syscalls_bpf_map(trace);
-
if (trace->syscalls.prog_array.sys_enter)
trace__init_syscalls_bpf_prog_array_maps(trace);
@@ -3970,7 +3982,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
err = trace__expand_filters(trace, &evsel);
if (err)
goto out_delete_evlist;
- err = perf_evlist__apply_filters(evlist, &evsel);
+ err = evlist__apply_filters(evlist, &evsel);
if (err < 0)
goto out_error_apply_filters;
@@ -3981,20 +3993,20 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_mmap;
- if (!target__none(&trace->opts.target) && !trace->opts.initial_delay)
+ if (!target__none(&trace->opts.target) && !trace->opts.target.initial_delay)
evlist__enable(evlist);
if (forks)
- perf_evlist__start_workload(evlist);
+ evlist__start_workload(evlist);
- if (trace->opts.initial_delay) {
- usleep(trace->opts.initial_delay * 1000);
+ if (trace->opts.target.initial_delay) {
+ usleep(trace->opts.target.initial_delay * 1000);
evlist__enable(evlist);
}
trace->multiple_threads = perf_thread_map__pid(evlist->core.threads, 0) == -1 ||
- evlist->core.threads->nr > 1 ||
- evlist__first(evlist)->core.attr.inherit;
+ perf_thread_map__nr(evlist->core.threads) > 1 ||
+ evlist__first(evlist)->core.attr.inherit;
/*
* Now that we already used evsel->core.attr to ask the kernel to setup the
@@ -4077,7 +4089,7 @@ out_disable:
out_delete_evlist:
trace__symbols__exit(trace);
-
+ evlist__free_syscall_tp_fields(evlist);
evlist__delete(evlist);
cgroup__put(trace->cgroup);
trace->evlist = NULL;
@@ -4095,11 +4107,11 @@ out_error_raw_syscalls:
goto out_error;
out_error_mmap:
- perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
+ evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
goto out_error;
out_error_open:
- perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
+ evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
out_error:
fprintf(trace->output, "%s\n", errbuf);
@@ -4108,7 +4120,7 @@ out_error:
out_error_apply_filters:
fprintf(trace->output,
"Failed to set filter \"%s\" on event %s with %d (%s)\n",
- evsel->filter, perf_evsel__name(evsel), errno,
+ evsel->filter, evsel__name(evsel), errno,
str_error_r(errno, errbuf, sizeof(errbuf)));
goto out_delete_evlist;
}
@@ -4152,7 +4164,7 @@ static int trace__replay(struct trace *trace)
/* add tid to output */
trace->multiple_threads = true;
- session = perf_session__new(&data, false, &trace->tool);
+ session = perf_session__new(&data, &trace->tool);
if (IS_ERR(session))
return PTR_ERR(session);
@@ -4171,27 +4183,25 @@ static int trace__replay(struct trace *trace)
if (err)
goto out;
- evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
- "raw_syscalls:sys_enter");
+ evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_enter");
+ trace->syscalls.events.sys_enter = evsel;
/* older kernels have syscalls tp versus raw_syscalls */
if (evsel == NULL)
- evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
- "syscalls:sys_enter");
+ evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_enter");
if (evsel &&
- (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
+ (evsel__init_raw_syscall_tp(evsel, trace__sys_enter) < 0 ||
perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
pr_err("Error during initialize raw_syscalls:sys_enter event\n");
goto out;
}
- evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
- "raw_syscalls:sys_exit");
+ evsel = evlist__find_tracepoint_by_name(session->evlist, "raw_syscalls:sys_exit");
+ trace->syscalls.events.sys_exit = evsel;
if (evsel == NULL)
- evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
- "syscalls:sys_exit");
+ evsel = evlist__find_tracepoint_by_name(session->evlist, "syscalls:sys_exit");
if (evsel &&
- (perf_evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
+ (evsel__init_raw_syscall_tp(evsel, trace__sys_exit) < 0 ||
perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
pr_err("Error during initialize raw_syscalls:sys_exit event\n");
goto out;
@@ -4471,11 +4481,11 @@ static int evlist__set_syscall_tp_fields(struct evlist *evlist)
continue;
if (strcmp(evsel->tp_format->system, "syscalls")) {
- perf_evsel__init_tp_arg_scnprintf(evsel);
+ evsel__init_tp_arg_scnprintf(evsel);
continue;
}
- if (perf_evsel__init_syscall_tp(evsel))
+ if (evsel__init_syscall_tp(evsel))
return -1;
if (!strncmp(evsel->tp_format->name, "sys_enter_", 10)) {
@@ -4581,12 +4591,15 @@ do_concat:
err = 0;
if (lists[0]) {
- struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event",
- "event selector. use 'perf list' to list available events",
- parse_events_option);
+ struct option o = {
+ .value = &trace->evlist,
+ };
err = parse_events_option(&o, lists[0], 0);
}
out:
+ free(strace_groups_dir);
+ free(lists[0]);
+ free(lists[1]);
if (sep)
*sep = ',';
@@ -4597,34 +4610,17 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u
{
struct trace *trace = opt->value;
- if (!list_empty(&trace->evlist->core.entries))
- return parse_cgroups(opt, str, unset);
-
+ if (!list_empty(&trace->evlist->core.entries)) {
+ struct option o = {
+ .value = &trace->evlist,
+ };
+ return parse_cgroups(&o, str, unset);
+ }
trace->cgroup = evlist__findnew_cgroup(trace->evlist, str);
return 0;
}
-static struct bpf_map *trace__find_bpf_map_by_name(struct trace *trace, const char *name)
-{
- if (trace->bpf_obj == NULL)
- return NULL;
-
- return bpf_object__find_map_by_name(trace->bpf_obj, name);
-}
-
-static void trace__set_bpf_map_filtered_pids(struct trace *trace)
-{
- trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered");
-}
-
-static void trace__set_bpf_map_syscalls(struct trace *trace)
-{
- trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls");
- trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter");
- trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit");
-}
-
static int trace__config(const char *var, const char *value, void *arg)
{
struct trace *trace = arg;
@@ -4669,6 +4665,21 @@ out:
return err;
}
+static void trace__exit(struct trace *trace)
+{
+ int i;
+
+ strlist__delete(trace->ev_qualifier);
+ zfree(&trace->ev_qualifier_ids.entries);
+ if (trace->syscalls.table) {
+ for (i = 0; i <= trace->sctbl->syscalls.max_id; i++)
+ syscall__exit(&trace->syscalls.table[i]);
+ zfree(&trace->syscalls.table);
+ }
+ syscalltbl__delete(trace->sctbl);
+ zfree(&trace->perfconfig_events);
+}
+
int cmd_trace(int argc, const char **argv)
{
const char *trace_usage[] = {
@@ -4728,8 +4739,7 @@ int cmd_trace(int argc, const char **argv)
OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
"child tasks do not inherit counters"),
OPT_CALLBACK('m', "mmap-pages", &trace.opts.mmap_pages, "pages",
- "number of mmap data pages",
- perf_evlist__parse_mmap_pages),
+ "number of mmap data pages", evlist__parse_mmap_pages),
OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
"user to profile"),
OPT_CALLBACK(0, "duration", &trace, "float",
@@ -4778,7 +4788,7 @@ int cmd_trace(int argc, const char **argv)
"per thread proc mmap processing timeout in ms"),
OPT_CALLBACK('G', "cgroup", &trace, "name", "monitor event in cgroup name only",
trace__parse_cgroups),
- OPT_UINTEGER('D', "delay", &trace.opts.initial_delay,
+ OPT_INTEGER('D', "delay", &trace.opts.target.initial_delay,
"ms to wait before starting measurement after program "
"start"),
OPTS_EVSWITCH(&trace.evswitch),
@@ -4790,9 +4800,16 @@ int cmd_trace(int argc, const char **argv)
const char * const trace_subcommands[] = { "record", NULL };
int err = -1;
char bf[BUFSIZ];
+ struct sigaction sigchld_act;
signal(SIGSEGV, sighandler_dump_stack);
signal(SIGFPE, sighandler_dump_stack);
+ signal(SIGINT, sighandler_interrupt);
+
+ memset(&sigchld_act, 0, sizeof(sigchld_act));
+ sigchld_act.sa_flags = SA_SIGINFO;
+ sigchld_act.sa_sigaction = sighandler_chld;
+ sigaction(SIGCHLD, &sigchld_act, NULL);
trace.evlist = evlist__new();
trace.sctbl = syscalltbl__new();
@@ -4843,12 +4860,13 @@ int cmd_trace(int argc, const char **argv)
if (trace.perfconfig_events != NULL) {
struct parse_events_error parse_err;
- bzero(&parse_err, sizeof(parse_err));
+ parse_events_error__init(&parse_err);
err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
- if (err) {
- parse_events_print_error(&parse_err, trace.perfconfig_events);
+ if (err)
+ parse_events_error__print(&parse_err, trace.perfconfig_events);
+ parse_events_error__exit(&parse_err);
+ if (err)
goto out;
- }
}
if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
@@ -4866,7 +4884,7 @@ int cmd_trace(int argc, const char **argv)
if (evsel) {
trace.syscalls.events.augmented = evsel;
- evsel = perf_evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
+ evsel = evlist__find_tracepoint_by_name(trace.evlist, "raw_syscalls:sys_enter");
if (evsel == NULL) {
pr_err("ERROR: raw_syscalls:sys_enter not found in the augmented BPF object\n");
goto out;
@@ -4989,7 +5007,7 @@ int cmd_trace(int argc, const char **argv)
*/
if (trace.syscalls.events.augmented) {
evlist__for_each_entry(trace.evlist, evsel) {
- bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
+ bool raw_syscalls_sys_exit = strcmp(evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
if (raw_syscalls_sys_exit) {
trace.raw_augmented_syscalls = true;
@@ -4997,10 +5015,10 @@ int cmd_trace(int argc, const char **argv)
}
if (trace.syscalls.events.augmented->priv == NULL &&
- strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
+ strstr(evsel__name(evsel), "syscalls:sys_enter")) {
struct evsel *augmented = trace.syscalls.events.augmented;
- if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
- perf_evsel__init_augmented_syscall_tp_args(augmented))
+ if (evsel__init_augmented_syscall_tp(augmented, evsel) ||
+ evsel__init_augmented_syscall_tp_args(augmented))
goto out;
/*
* Augmented is __augmented_syscalls__ BPF_OUTPUT event
@@ -5014,16 +5032,16 @@ int cmd_trace(int argc, const char **argv)
* as not to filter it, then we'll handle it just like we would
* for the BPF_OUTPUT one:
*/
- if (perf_evsel__init_augmented_syscall_tp(evsel, evsel) ||
- perf_evsel__init_augmented_syscall_tp_args(evsel))
+ if (evsel__init_augmented_syscall_tp(evsel, evsel) ||
+ evsel__init_augmented_syscall_tp_args(evsel))
goto out;
evsel->handler = trace__sys_enter;
}
- if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
+ if (strstarts(evsel__name(evsel), "syscalls:sys_exit_")) {
struct syscall_tp *sc;
init_augmented_syscall_tp:
- if (perf_evsel__init_augmented_syscall_tp(evsel, evsel))
+ if (evsel__init_augmented_syscall_tp(evsel, evsel))
goto out;
sc = __evsel__syscall_tp(evsel);
/*
@@ -5047,7 +5065,7 @@ init_augmented_syscall_tp:
*/
if (trace.raw_augmented_syscalls)
trace.raw_augmented_syscalls_args_size = (6 + 1) * sizeof(long) + sc->id.offset;
- perf_evsel__init_augmented_syscall_tp_ret(evsel);
+ evsel__init_augmented_syscall_tp_ret(evsel);
evsel->handler = trace__sys_exit;
}
}
@@ -5102,6 +5120,6 @@ out_close:
if (output_name != NULL)
fclose(trace.output);
out:
- zfree(&trace.perfconfig_events);
+ trace__exit(&trace);
return err;
}
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index 05cf2af9e2c2..e5859c70e195 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -1,15 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
#include "builtin.h"
-#include "perf.h"
#include "color.h"
+#include "util/debug.h"
+#include "util/header.h"
#include <tools/config.h>
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <subcmd/parse-options.h>
-int version_verbose;
-
struct version {
bool build_options;
};
@@ -59,13 +58,12 @@ static void library_status(void)
{
STATUS(HAVE_DWARF_SUPPORT, dwarf);
STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations);
- STATUS(HAVE_GLIBC_SUPPORT, glibc);
- STATUS(HAVE_GTK2_SUPPORT, gtk2);
#ifndef HAVE_SYSCALL_TABLE_SUPPORT
STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit);
#endif
STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table);
STATUS(HAVE_LIBBFD_SUPPORT, libbfd);
+ STATUS(HAVE_DEBUGINFOD_SUPPORT, debuginfod);
STATUS(HAVE_LIBELF_SUPPORT, libelf);
STATUS(HAVE_LIBNUMA_SUPPORT, libnuma);
STATUS(HAVE_LIBNUMA_SUPPORT, numa_num_possible_cpus);
@@ -81,6 +79,8 @@ static void library_status(void)
STATUS(HAVE_LIBBPF_SUPPORT, bpf);
STATUS(HAVE_AIO_SUPPORT, aio);
STATUS(HAVE_ZSTD_SUPPORT, zstd);
+ STATUS(HAVE_LIBPFM, libpfm4);
+ STATUS(HAVE_LIBTRACEEVENT, libtraceevent);
}
int cmd_version(int argc, const char **argv)
@@ -90,7 +90,7 @@ int cmd_version(int argc, const char **argv)
printf("perf version %s\n", perf_version_string);
- if (version.build_options || version_verbose == 1)
+ if (version.build_options || verbose > 0)
library_status();
return 0;
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 14a2db622a7b..f2ab5bae2150 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -2,9 +2,6 @@
#ifndef BUILTIN_H
#define BUILTIN_H
-extern const char perf_usage_string[];
-extern const char perf_more_info_string[];
-
void list_common_cmds_help(void);
const char *help_unknown_cmd(const char *cmd);
@@ -37,6 +34,8 @@ int cmd_inject(int argc, const char **argv);
int cmd_mem(int argc, const char **argv);
int cmd_data(int argc, const char **argv);
int cmd_ftrace(int argc, const char **argv);
+int cmd_daemon(int argc, const char **argv);
+int cmd_kwork(int argc, const char **argv);
int find_scripts(char **scripts_array, char **scripts_path_array, int num,
int pathlen);
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index bfb21d049e6c..6f831ee2f60f 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -22,8 +22,11 @@ include/uapi/linux/usbdevice_fs.h
include/uapi/linux/vhost.h
include/uapi/sound/asound.h
include/linux/bits.h
+include/vdso/bits.h
include/linux/const.h
+include/vdso/const.h
include/linux/hash.h
+include/linux/list-sort.h
include/uapi/linux/hw_breakpoint.h
arch/x86/include/asm/disabled-features.h
arch/x86/include/asm/required-features.h
@@ -37,6 +40,8 @@ arch/x86/lib/x86-opcode-map.txt
arch/x86/tools/gen-insn-attr-x86.awk
arch/arm/include/uapi/asm/perf_regs.h
arch/arm64/include/uapi/asm/perf_regs.h
+arch/loongarch/include/uapi/asm/perf_regs.h
+arch/mips/include/uapi/asm/perf_regs.h
arch/powerpc/include/uapi/asm/perf_regs.h
arch/s390/include/uapi/asm/perf_regs.h
arch/x86/include/uapi/asm/perf_regs.h
@@ -48,7 +53,6 @@ arch/x86/include/uapi/asm/vmx.h
arch/powerpc/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/kvm_perf.h
-arch/s390/include/uapi/asm/ptrace.h
arch/s390/include/uapi/asm/sie.h
arch/arm/include/uapi/asm/kvm.h
arch/arm64/include/uapi/asm/kvm.h
@@ -73,6 +77,22 @@ include/uapi/asm-generic/mman-common.h
include/uapi/asm-generic/unistd.h
'
+SYNC_CHECK_FILES='
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/insn.h
+arch/x86/lib/inat.c
+arch/x86/lib/insn.c
+'
+
+# These copies are under tools/perf/trace/beauty/ as they are not used to in
+# building object files only by scripts in tools/perf/trace/beauty/ to generate
+# tables that then gets included in .c files for things like id->string syscall
+# tables (and the reverse lookup as well: string -> id)
+
+BEAUTY_FILES='
+include/linux/socket.h
+'
+
check_2 () {
file1=$1
file2=$2
@@ -98,6 +118,14 @@ check () {
check_2 tools/$file $file $*
}
+beauty_check () {
+ file=$1
+
+ shift
+
+ check_2 tools/perf/trace/beauty/$file $file $*
+}
+
# Check if we have the kernel headers (tools/perf/../../include), else
# we're probably on a detached tarball, so no point in trying to check
# differences.
@@ -110,19 +138,34 @@ for i in $FILES; do
check $i -B
done
+for i in $SYNC_CHECK_FILES; do
+ check $i '-I "^.*\/\*.*__ignore_sync_check__.*\*\/.*$"'
+done
+
# diff with extra ignore lines
-check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))"'
+check arch/x86/lib/memcpy_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memcpy_\(erms\|orig\))" -I"^#include <linux/cfi_types.h>"'
check arch/x86/lib/memset_64.S '-I "^EXPORT_SYMBOL" -I "^#include <asm/export.h>" -I"^SYM_FUNC_START\(_LOCAL\)*(memset_\(erms\|orig\))"'
+check arch/x86/include/asm/amd-ibs.h '-I "^#include [<\"]\(asm/\)*msr-index.h"'
+check arch/arm64/include/asm/cputype.h '-I "^#include [<\"]\(asm/\)*sysreg.h"'
check include/uapi/asm-generic/mman.h '-I "^#include <\(uapi/\)*asm-generic/mman-common\(-tools\)*.h>"'
check include/uapi/linux/mman.h '-I "^#include <\(uapi/\)*asm/mman.h>"'
+check include/linux/build_bug.h '-I "^#\(ifndef\|endif\)\( \/\/\)* static_assert$"'
check include/linux/ctype.h '-I "isdigit("'
check lib/ctype.c '-I "^EXPORT_SYMBOL" -I "^#include <linux/export.h>" -B'
-check arch/x86/include/asm/inat.h '-I "^#include [\"<]\(asm/\)*inat_types.h[\">]"'
-check arch/x86/include/asm/insn.h '-I "^#include [\"<]\(asm/\)*inat.h[\">]"'
-check arch/x86/lib/inat.c '-I "^#include [\"<]\(../include/\)*asm/insn.h[\">]"'
-check arch/x86/lib/insn.c '-I "^#include [\"<]\(../include/\)*asm/in\(at\|sn\).h[\">]" -I "^#include [\"<]\(../include/\)*asm/emulate_prefix.h[\">]"'
+check lib/list_sort.c '-I "^#include <linux/bug.h>"'
# diff non-symmetric files
check_2 tools/perf/arch/x86/entry/syscalls/syscall_64.tbl arch/x86/entry/syscalls/syscall_64.tbl
+check_2 tools/perf/arch/powerpc/entry/syscalls/syscall.tbl arch/powerpc/kernel/syscalls/syscall.tbl
+check_2 tools/perf/arch/s390/entry/syscalls/syscall.tbl arch/s390/kernel/syscalls/syscall.tbl
+check_2 tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl arch/mips/kernel/syscalls/syscall_n64.tbl
+
+for i in $BEAUTY_FILES; do
+ beauty_check $i -B
+done
+
+# check duplicated library files
+check_2 tools/perf/util/hashmap.h tools/lib/bpf/hashmap.h
+check_2 tools/perf/util/hashmap.c tools/lib/bpf/hashmap.c
cd tools/perf
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index bc6c585f74fc..e8d2762adade 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -14,20 +14,23 @@ perf-config mainporcelain common
perf-evlist mainporcelain common
perf-ftrace mainporcelain common
perf-inject mainporcelain common
+perf-iostat mainporcelain common
perf-kallsyms mainporcelain common
-perf-kmem mainporcelain common
+perf-kmem mainporcelain traceevent
perf-kvm mainporcelain common
+perf-kwork mainporcelain traceevent
perf-list mainporcelain common
-perf-lock mainporcelain common
+perf-lock mainporcelain traceevent
perf-mem mainporcelain common
perf-probe mainporcelain full
perf-record mainporcelain common
perf-report mainporcelain common
-perf-sched mainporcelain common
+perf-sched mainporcelain traceevent
perf-script mainporcelain common
perf-stat mainporcelain common
perf-test mainporcelain common
-perf-timechart mainporcelain common
+perf-timechart mainporcelain traceevent
perf-top mainporcelain common
perf-trace mainporcelain audit
perf-version mainporcelain common
+perf-daemon mainporcelain common
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index 0453ba26cdbd..aa8cfeabb743 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -106,6 +106,9 @@ enum perf_hw_id {
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
PERF_COUNT_HW_BRANCH_MISSES = 5,
PERF_COUNT_HW_BUS_CYCLES = 6,
+ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
+ PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
};
These are standardized types of events that work relatively uniformly
@@ -258,7 +261,8 @@ gets schedule to. Per task counters can be created by any user, for
their own tasks.
A 'pid == -1' and 'cpu == x' counter is a per CPU counter that counts
-all events on CPU-x. Per CPU counters need CAP_SYS_ADMIN privilege.
+all events on CPU-x. Per CPU counters need CAP_PERFMON or CAP_SYS_ADMIN
+privilege.
The 'flags' parameter is currently unused and must be zero.
diff --git a/tools/perf/dlfilters/dlfilter-show-cycles.c b/tools/perf/dlfilters/dlfilter-show-cycles.c
new file mode 100644
index 000000000000..6d47298ebe9f
--- /dev/null
+++ b/tools/perf/dlfilters/dlfilter-show-cycles.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dlfilter-show-cycles.c: Print the number of cycles at the start of each line
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#include <perf/perf_dlfilter.h>
+#include <string.h>
+#include <stdio.h>
+
+#define MAX_CPU 4096
+
+enum {
+ INSTR_CYC,
+ BRNCH_CYC,
+ OTHER_CYC,
+ MAX_ENTRY
+};
+
+static __u64 cycles[MAX_CPU][MAX_ENTRY];
+static __u64 cycles_rpt[MAX_CPU][MAX_ENTRY];
+
+#define BITS 16
+#define TABLESZ (1 << BITS)
+#define TABLEMAX (TABLESZ / 2)
+#define MASK (TABLESZ - 1)
+
+static struct entry {
+ __u32 used;
+ __s32 tid;
+ __u64 cycles[MAX_ENTRY];
+ __u64 cycles_rpt[MAX_ENTRY];
+} table[TABLESZ];
+
+static int tid_cnt;
+
+static int event_entry(const char *event)
+{
+ if (!event)
+ return OTHER_CYC;
+ if (!strncmp(event, "instructions", 12))
+ return INSTR_CYC;
+ if (!strncmp(event, "branches", 8))
+ return BRNCH_CYC;
+ return OTHER_CYC;
+}
+
+static struct entry *find_entry(__s32 tid)
+{
+ __u32 pos = tid & MASK;
+ struct entry *e;
+
+ e = &table[pos];
+ while (e->used) {
+ if (e->tid == tid)
+ return e;
+ if (++pos == TABLESZ)
+ pos = 0;
+ e = &table[pos];
+ }
+
+ if (tid_cnt >= TABLEMAX) {
+ fprintf(stderr, "Too many threads\n");
+ return NULL;
+ }
+
+ tid_cnt += 1;
+ e->used = 1;
+ e->tid = tid;
+ return e;
+}
+
+static void add_entry(__s32 tid, int pos, __u64 cnt)
+{
+ struct entry *e = find_entry(tid);
+
+ if (e)
+ e->cycles[pos] += cnt;
+}
+
+int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ __s32 cpu = sample->cpu;
+ __s32 tid = sample->tid;
+ int pos;
+
+ if (!sample->cyc_cnt)
+ return 0;
+
+ pos = event_entry(sample->event);
+
+ if (cpu >= 0 && cpu < MAX_CPU)
+ cycles[cpu][pos] += sample->cyc_cnt;
+ else if (tid != -1)
+ add_entry(tid, pos, sample->cyc_cnt);
+ return 0;
+}
+
+static void print_vals(__u64 cycles, __u64 delta)
+{
+ if (delta)
+ printf("%10llu %10llu ", (unsigned long long)cycles, (unsigned long long)delta);
+ else
+ printf("%10llu %10s ", (unsigned long long)cycles, "");
+}
+
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ __s32 cpu = sample->cpu;
+ __s32 tid = sample->tid;
+ int pos;
+
+ pos = event_entry(sample->event);
+
+ if (cpu >= 0 && cpu < MAX_CPU) {
+ print_vals(cycles[cpu][pos], cycles[cpu][pos] - cycles_rpt[cpu][pos]);
+ cycles_rpt[cpu][pos] = cycles[cpu][pos];
+ return 0;
+ }
+
+ if (tid != -1) {
+ struct entry *e = find_entry(tid);
+
+ if (e) {
+ print_vals(e->cycles[pos], e->cycles[pos] - e->cycles_rpt[pos]);
+ e->cycles_rpt[pos] = e->cycles[pos];
+ return 0;
+ }
+ }
+
+ printf("%22s", "");
+ return 0;
+}
+
+const char *filter_description(const char **long_description)
+{
+ static char *long_desc = "Cycle counts are accumulated per CPU (or "
+ "per thread if CPU is not recorded) from IPC information, and "
+ "printed together with the change since the last print, at the "
+ "start of each line. Separate counts are kept for branches, "
+ "instructions or other events.";
+
+ *long_description = long_desc;
+ return "Print the number of cycles at the start of each line";
+}
diff --git a/tools/perf/dlfilters/dlfilter-test-api-v0.c b/tools/perf/dlfilters/dlfilter-test-api-v0.c
new file mode 100644
index 000000000000..b1f51efd67d6
--- /dev/null
+++ b/tools/perf/dlfilters/dlfilter-test-api-v0.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dlfilter-test-api-v0.c: test original (v0) API for perf --dlfilter shared object
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+
+/*
+ * Copy original (v0) API instead of including current API
+ */
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+/* Definitions for perf_dlfilter_sample flags */
+enum {
+ PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
+ PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
+ PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
+ PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
+ PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
+ PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
+ PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
+ PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
+ PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
+ PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
+ PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
+ PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
+ PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
+};
+
+/*
+ * perf sample event information (as per perf script and <linux/perf_event.h>)
+ */
+struct perf_dlfilter_sample {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 ip;
+ __s32 pid;
+ __s32 tid;
+ __u64 time;
+ __u64 addr;
+ __u64 id;
+ __u64 stream_id;
+ __u64 period;
+ __u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
+ __u64 insn_cnt; /* For instructions-per-cycle (IPC) */
+ __u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
+ __s32 cpu;
+ __u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
+ __u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
+ __u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
+ __u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
+ __u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
+ __u8 addr_correlates_sym; /* True => resolve_addr() can be called */
+ __u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
+ __u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ __u64 brstack_nr; /* Number of brstack entries */
+ const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
+ __u64 raw_callchain_nr; /* Number of raw_callchain entries */
+ const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
+ const char *event;
+};
+
+/*
+ * Address location (as per perf script)
+ */
+struct perf_dlfilter_al {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u32 symoff;
+ const char *sym;
+ __u64 addr; /* Mapped address (from dso) */
+ __u64 sym_start;
+ __u64 sym_end;
+ const char *dso;
+ __u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
+ __u8 is_64_bit; /* Only valid if dso is not NULL */
+ __u8 is_kernel_ip; /* True if in kernel space */
+ __u32 buildid_size;
+ __u8 *buildid;
+ /* Below members are only populated by resolve_ip() */
+ __u8 filtered; /* True if this sample event will be filtered out */
+ const char *comm;
+};
+
+struct perf_dlfilter_fns {
+ /* Return information about ip */
+ const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
+ /* Return information about addr (if addr_correlates_sym) */
+ const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
+ /* Return arguments from --dlarg option */
+ char **(*args)(void *ctx, int *dlargc);
+ /*
+ * Return information about address (al->size must be set before
+ * calling). Returns 0 on success, -1 otherwise.
+ */
+ __s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
+ /* Return instruction bytes and length */
+ const __u8 *(*insn)(void *ctx, __u32 *length);
+ /* Return source file name and line number */
+ const char *(*srcline)(void *ctx, __u32 *line_number);
+ /* Return perf_event_attr, refer <linux/perf_event.h> */
+ struct perf_event_attr *(*attr)(void *ctx);
+ /* Read object code, return numbers of bytes read */
+ __s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+ /* Reserved */
+ void *(*reserved[120])(void *);
+};
+
+struct perf_dlfilter_fns perf_dlfilter_fns;
+
+static int verbose;
+
+#define pr_debug(fmt, ...) do { \
+ if (verbose > 0) \
+ fprintf(stderr, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+static int test_fail(const char *msg)
+{
+ pr_debug("%s\n", msg);
+ return -1;
+}
+
+#define CHECK(x) do { \
+ if (!(x)) \
+ return test_fail("Check '" #x "' failed\n"); \
+ } while (0)
+
+struct filter_data {
+ __u64 ip;
+ __u64 addr;
+ int do_early;
+ int early_filter_cnt;
+ int filter_cnt;
+};
+
+static struct filter_data *filt_dat;
+
+int start(void **data, void *ctx)
+{
+ int dlargc;
+ char **dlargv;
+ struct filter_data *d;
+ static bool called;
+
+ verbose = 1;
+
+ CHECK(!filt_dat && !called);
+ called = true;
+
+ d = calloc(1, sizeof(*d));
+ if (!d)
+ test_fail("Failed to allocate memory");
+ filt_dat = d;
+ *data = d;
+
+ dlargv = perf_dlfilter_fns.args(ctx, &dlargc);
+
+ CHECK(dlargc == 6);
+ CHECK(!strcmp(dlargv[0], "first"));
+ verbose = strtol(dlargv[1], NULL, 0);
+ d->ip = strtoull(dlargv[2], NULL, 0);
+ d->addr = strtoull(dlargv[3], NULL, 0);
+ d->do_early = strtol(dlargv[4], NULL, 0);
+ CHECK(!strcmp(dlargv[5], "last"));
+
+ pr_debug("%s API\n", __func__);
+
+ return 0;
+}
+
+#define CHECK_SAMPLE(x) do { \
+ if (sample->x != expected.x) \
+ return test_fail("'" #x "' not expected value\n"); \
+ } while (0)
+
+static int check_sample(struct filter_data *d, const struct perf_dlfilter_sample *sample)
+{
+ struct perf_dlfilter_sample expected = {
+ .ip = d->ip,
+ .pid = 12345,
+ .tid = 12346,
+ .time = 1234567890,
+ .addr = d->addr,
+ .id = 99,
+ .stream_id = 101,
+ .period = 543212345,
+ .cpu = 31,
+ .cpumode = PERF_RECORD_MISC_USER,
+ .addr_correlates_sym = 1,
+ .misc = PERF_RECORD_MISC_USER,
+ };
+
+ CHECK(sample->size >= sizeof(struct perf_dlfilter_sample));
+
+ CHECK_SAMPLE(ip);
+ CHECK_SAMPLE(pid);
+ CHECK_SAMPLE(tid);
+ CHECK_SAMPLE(time);
+ CHECK_SAMPLE(addr);
+ CHECK_SAMPLE(id);
+ CHECK_SAMPLE(stream_id);
+ CHECK_SAMPLE(period);
+ CHECK_SAMPLE(cpu);
+ CHECK_SAMPLE(cpumode);
+ CHECK_SAMPLE(addr_correlates_sym);
+ CHECK_SAMPLE(misc);
+
+ CHECK(!sample->raw_data);
+ CHECK_SAMPLE(brstack_nr);
+ CHECK(!sample->brstack);
+ CHECK_SAMPLE(raw_callchain_nr);
+ CHECK(!sample->raw_callchain);
+
+#define EVENT_NAME "branches:"
+ CHECK(!strncmp(sample->event, EVENT_NAME, strlen(EVENT_NAME)));
+
+ return 0;
+}
+
+static int check_al(void *ctx)
+{
+ const struct perf_dlfilter_al *al;
+
+ al = perf_dlfilter_fns.resolve_ip(ctx);
+ if (!al)
+ return test_fail("resolve_ip() failed");
+
+ CHECK(al->sym && !strcmp("foo", al->sym));
+ CHECK(!al->symoff);
+
+ return 0;
+}
+
+static int check_addr_al(void *ctx)
+{
+ const struct perf_dlfilter_al *addr_al;
+
+ addr_al = perf_dlfilter_fns.resolve_addr(ctx);
+ if (!addr_al)
+ return test_fail("resolve_addr() failed");
+
+ CHECK(addr_al->sym && !strcmp("bar", addr_al->sym));
+ CHECK(!addr_al->symoff);
+
+ return 0;
+}
+
+static int check_attr(void *ctx)
+{
+ struct perf_event_attr *attr = perf_dlfilter_fns.attr(ctx);
+
+ CHECK(attr);
+ CHECK(attr->type == PERF_TYPE_HARDWARE);
+ CHECK(attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+
+ return 0;
+}
+
+static int do_checks(void *data, const struct perf_dlfilter_sample *sample, void *ctx, bool early)
+{
+ struct filter_data *d = data;
+
+ CHECK(data && filt_dat == data);
+
+ if (early) {
+ CHECK(!d->early_filter_cnt);
+ d->early_filter_cnt += 1;
+ } else {
+ CHECK(!d->filter_cnt);
+ CHECK(d->early_filter_cnt);
+ CHECK(d->do_early != 2);
+ d->filter_cnt += 1;
+ }
+
+ if (check_sample(data, sample))
+ return -1;
+
+ if (check_attr(ctx))
+ return -1;
+
+ if (early && !d->do_early)
+ return 0;
+
+ if (check_al(ctx) || check_addr_al(ctx))
+ return -1;
+
+ if (early)
+ return d->do_early == 2;
+
+ return 1;
+}
+
+int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ pr_debug("%s API\n", __func__);
+
+ return do_checks(data, sample, ctx, true);
+}
+
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
+{
+ pr_debug("%s API\n", __func__);
+
+ return do_checks(data, sample, ctx, false);
+}
+
+int stop(void *data, void *ctx)
+{
+ static bool called;
+
+ pr_debug("%s API\n", __func__);
+
+ CHECK(data && filt_dat == data && !called);
+ called = true;
+
+ free(data);
+ filt_dat = NULL;
+ return 0;
+}
+
+const char *filter_description(const char **long_description)
+{
+ *long_description = "Filter used by the 'dlfilter C API' perf test";
+ return "dlfilter to test v0 C API";
+}
diff --git a/tools/perf/examples/bpf/5sec.c b/tools/perf/examples/bpf/5sec.c
index 65c4ff6892d9..3bd7fc17631f 100644
--- a/tools/perf/examples/bpf/5sec.c
+++ b/tools/perf/examples/bpf/5sec.c
@@ -39,13 +39,15 @@
Copyright (C) 2018 Red Hat, Inc., Arnaldo Carvalho de Melo <acme@redhat.com>
*/
-#include <bpf/bpf.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
#define NSEC_PER_SEC 1000000000L
-int probe(hrtimer_nanosleep, rqtp)(void *ctx, int err, long long sec)
+SEC("hrtimer_nanosleep=hrtimer_nanosleep rqtp")
+int hrtimer_nanosleep(void *ctx, int err, long long sec)
{
return sec / NSEC_PER_SEC == 5ULL;
}
-license(GPL);
+char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
index b80437971d80..9a03189d33d3 100644
--- a/tools/perf/examples/bpf/augmented_raw_syscalls.c
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -14,39 +14,52 @@
* code that will combine entry/exit in a strace like way.
*/
-#include <unistd.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
#include <linux/limits.h>
-#include <linux/socket.h>
-#include <pid_filter.h>
-/* bpf-output associated map */
-bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
+// FIXME: These should come from system headers
+typedef char bool;
+typedef int pid_t;
+typedef long long int __s64;
+typedef __s64 time64_t;
-/*
- * string_args_len: one per syscall arg, 0 means not a string or don't copy it,
- * PATH_MAX for copying everything, any other value to limit
- * it a la 'strace -s strsize'.
- */
-struct syscall {
- bool enabled;
- u16 string_args_len[6];
+struct timespec64 {
+ time64_t tv_sec;
+ long int tv_nsec;
};
-bpf_map(syscalls, ARRAY, int, struct syscall, 512);
+/* bpf-output associated map */
+struct __augmented_syscalls__ {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, int);
+ __type(value, __u32);
+ __uint(max_entries, __NR_CPUS__);
+} __augmented_syscalls__ SEC(".maps");
/*
* What to augment at entry?
*
* Pointer arg payloads (filenames, etc) passed from userspace to the kernel
*/
-bpf_map(syscalls_sys_enter, PROG_ARRAY, u32, u32, 512);
+struct syscalls_sys_enter {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 512);
+} syscalls_sys_enter SEC(".maps");
/*
* What to augment at exit?
*
* Pointer arg payloads returned from the kernel (struct stat, etc) to userspace.
*/
-bpf_map(syscalls_sys_exit, PROG_ARRAY, u32, u32, 512);
+struct syscalls_sys_exit {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __type(key, __u32);
+ __type(value, __u32);
+ __uint(max_entries, 512);
+} syscalls_sys_exit SEC(".maps");
struct syscall_enter_args {
unsigned long long common_tp_fields;
@@ -66,7 +79,38 @@ struct augmented_arg {
char value[PATH_MAX];
};
-pid_filter(pids_filtered);
+struct pids_filtered {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, pid_t);
+ __type(value, bool);
+ __uint(max_entries, 64);
+} pids_filtered SEC(".maps");
+
+/*
+ * Desired design of maximum size and alignment (see RFC2553)
+ */
+#define SS_MAXSIZE 128 /* Implementation specific max size */
+
+typedef unsigned short sa_family_t;
+
+/*
+ * FIXME: Should come from system headers
+ *
+ * The definition uses anonymous union and struct in order to control the
+ * default alignment.
+ */
+struct sockaddr_storage {
+ union {
+ struct {
+ sa_family_t ss_family; /* address family */
+ /* Following field(s) are implementation specific */
+ char __data[SS_MAXSIZE - sizeof(unsigned short)];
+ /* space to achieve desired size, */
+ /* _SS_MAXSIZE value minus size of ss_family */
+ };
+ void *__align; /* implementation specific desired alignment */
+ };
+};
struct augmented_args_payload {
struct syscall_enter_args args;
@@ -75,11 +119,17 @@ struct augmented_args_payload {
struct augmented_arg arg, arg2;
};
struct sockaddr_storage saddr;
+ char __data[sizeof(struct augmented_arg)];
};
};
// We need more tmp space than the BPF stack can give us
-bpf_map(augmented_args_tmp, PERCPU_ARRAY, int, struct augmented_args_payload, 1);
+struct augmented_args_tmp {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, int);
+ __type(value, struct augmented_args_payload);
+ __uint(max_entries, 1);
+} augmented_args_tmp SEC(".maps");
static inline struct augmented_args_payload *augmented_args_payload(void)
{
@@ -90,14 +140,14 @@ static inline struct augmented_args_payload *augmented_args_payload(void)
static inline int augmented__output(void *ctx, struct augmented_args_payload *args, int len)
{
/* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */
- return perf_event_output(ctx, &__augmented_syscalls__, BPF_F_CURRENT_CPU, args, len);
+ return bpf_perf_event_output(ctx, &__augmented_syscalls__, BPF_F_CURRENT_CPU, args, len);
}
static inline
unsigned int augmented_arg__read_str(struct augmented_arg *augmented_arg, const void *arg, unsigned int arg_len)
{
unsigned int augmented_len = sizeof(*augmented_arg);
- int string_len = probe_read_str(&augmented_arg->value, arg_len, arg);
+ int string_len = bpf_probe_read_str(&augmented_arg->value, arg_len, arg);
augmented_arg->size = augmented_arg->err = 0;
/*
@@ -146,7 +196,7 @@ int sys_enter_connect(struct syscall_enter_args *args)
if (socklen > sizeof(augmented_args->saddr))
socklen = sizeof(augmented_args->saddr);
- probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
+ bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
@@ -165,7 +215,7 @@ int sys_enter_sendto(struct syscall_enter_args *args)
if (socklen > sizeof(augmented_args->saddr))
socklen = sizeof(augmented_args->saddr);
- probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
+ bpf_probe_read(&augmented_args->saddr, socklen, sockaddr_arg);
return augmented__output(args, augmented_args, len + socklen);
}
@@ -234,6 +284,80 @@ int sys_enter_renameat(struct syscall_enter_args *args)
return augmented__output(args, augmented_args, len);
}
+#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
+
+// we need just the start, get the size to then copy it
+struct perf_event_attr_size {
+ __u32 type;
+ /*
+ * Size of the attr structure, for fwd/bwd compat.
+ */
+ __u32 size;
+};
+
+SEC("!syscalls:sys_enter_perf_event_open")
+int sys_enter_perf_event_open(struct syscall_enter_args *args)
+{
+ struct augmented_args_payload *augmented_args = augmented_args_payload();
+ const struct perf_event_attr_size *attr = (const struct perf_event_attr_size *)args->args[0], *attr_read;
+ unsigned int len = sizeof(augmented_args->args);
+
+ if (augmented_args == NULL)
+ goto failure;
+
+ if (bpf_probe_read(&augmented_args->__data, sizeof(*attr), attr) < 0)
+ goto failure;
+
+ attr_read = (const struct perf_event_attr_size *)augmented_args->__data;
+
+ __u32 size = attr_read->size;
+
+ if (!size)
+ size = PERF_ATTR_SIZE_VER0;
+
+ if (size > sizeof(augmented_args->__data))
+ goto failure;
+
+ // Now that we read attr->size and tested it against the size limits, read it completely
+ if (bpf_probe_read(&augmented_args->__data, size, attr) < 0)
+ goto failure;
+
+ return augmented__output(args, augmented_args, len + size);
+failure:
+ return 1; /* Failure: don't filter */
+}
+
+SEC("!syscalls:sys_enter_clock_nanosleep")
+int sys_enter_clock_nanosleep(struct syscall_enter_args *args)
+{
+ struct augmented_args_payload *augmented_args = augmented_args_payload();
+ const void *rqtp_arg = (const void *)args->args[2];
+ unsigned int len = sizeof(augmented_args->args);
+ __u32 size = sizeof(struct timespec64);
+
+ if (augmented_args == NULL)
+ goto failure;
+
+ if (size > sizeof(augmented_args->__data))
+ goto failure;
+
+ bpf_probe_read(&augmented_args->__data, size, rqtp_arg);
+
+ return augmented__output(args, augmented_args, len + size);
+failure:
+ return 1; /* Failure: don't filter */
+}
+
+static pid_t getpid(void)
+{
+ return bpf_get_current_pid_tgid();
+}
+
+static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
+{
+ return bpf_map_lookup_elem(pids, &pid) != NULL;
+}
+
SEC("raw_syscalls:sys_enter")
int sys_enter(struct syscall_enter_args *args)
{
@@ -248,7 +372,6 @@ int sys_enter(struct syscall_enter_args *args)
* initial, non-augmented raw_syscalls:sys_enter payload.
*/
unsigned int len = sizeof(augmented_args->args);
- struct syscall *syscall;
if (pid_filter__has(&pids_filtered, getpid()))
return 0;
@@ -257,12 +380,12 @@ int sys_enter(struct syscall_enter_args *args)
if (augmented_args == NULL)
return 1;
- probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
+ bpf_probe_read(&augmented_args->args, sizeof(augmented_args->args), args);
/*
* Jump to syscall specific augmenter, even if the default one,
* "!raw_syscalls:unaugmented" that will just return 1 to return the
- * unagmented tracepoint payload.
+ * unaugmented tracepoint payload.
*/
bpf_tail_call(args, &syscalls_sys_enter, augmented_args->args.syscall_nr);
@@ -278,11 +401,11 @@ int sys_exit(struct syscall_exit_args *args)
if (pid_filter__has(&pids_filtered, getpid()))
return 0;
- probe_read(&exit_args, sizeof(exit_args), args);
+ bpf_probe_read(&exit_args, sizeof(exit_args), args);
/*
* Jump to syscall specific return augmenter, even if the default one,
* "!raw_syscalls:unaugmented" that will just return 1 to return the
- * unagmented tracepoint payload.
+ * unaugmented tracepoint payload.
*/
bpf_tail_call(args, &syscalls_sys_exit, exit_args.syscall_nr);
/*
@@ -291,4 +414,4 @@ int sys_exit(struct syscall_exit_args *args)
return 0;
}
-license(GPL);
+char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/examples/bpf/augmented_syscalls.c b/tools/perf/examples/bpf/augmented_syscalls.c
deleted file mode 100644
index 524fdb8534b3..000000000000
--- a/tools/perf/examples/bpf/augmented_syscalls.c
+++ /dev/null
@@ -1,169 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Augment syscalls with the contents of the pointer arguments.
- *
- * Test it with:
- *
- * perf trace -e tools/perf/examples/bpf/augmented_syscalls.c cat /etc/passwd > /dev/null
- *
- * It'll catch some openat syscalls related to the dynamic linked and
- * the last one should be the one for '/etc/passwd'.
- *
- * This matches what is marshalled into the raw_syscall:sys_enter payload
- * expected by the 'perf trace' beautifiers, and can be used by them, that will
- * check if perf_sample->raw_data is more than what is expected for each
- * syscalls:sys_{enter,exit}_SYSCALL tracepoint, uing the extra data as the
- * contents of pointer arguments.
- */
-
-#include <stdio.h>
-#include <linux/socket.h>
-
-/* bpf-output associated map */
-bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
-
-struct syscall_exit_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- long ret;
-};
-
-struct augmented_filename {
- unsigned int size;
- int reserved;
- char value[256];
-};
-
-#define augmented_filename_syscall(syscall) \
-struct augmented_enter_##syscall##_args { \
- struct syscall_enter_##syscall##_args args; \
- struct augmented_filename filename; \
-}; \
-int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
-{ \
- struct augmented_enter_##syscall##_args augmented_args = { .filename.reserved = 0, }; \
- unsigned int len = sizeof(augmented_args); \
- probe_read(&augmented_args.args, sizeof(augmented_args.args), args); \
- augmented_args.filename.size = probe_read_str(&augmented_args.filename.value, \
- sizeof(augmented_args.filename.value), \
- args->filename_ptr); \
- if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) { \
- len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size; \
- len &= sizeof(augmented_args.filename.value) - 1; \
- } \
- /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
- return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, len); \
-} \
-int syscall_exit(syscall)(struct syscall_exit_args *args) \
-{ \
- return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */ \
-}
-
-struct syscall_enter_openat_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- long dfd;
- char *filename_ptr;
- long flags;
- long mode;
-};
-
-augmented_filename_syscall(openat);
-
-struct syscall_enter_open_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- char *filename_ptr;
- long flags;
- long mode;
-};
-
-augmented_filename_syscall(open);
-
-struct syscall_enter_inotify_add_watch_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- long fd;
- char *filename_ptr;
- long mask;
-};
-
-augmented_filename_syscall(inotify_add_watch);
-
-struct statbuf;
-
-struct syscall_enter_newstat_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- char *filename_ptr;
- struct stat *statbuf;
-};
-
-augmented_filename_syscall(newstat);
-
-#ifndef _K_SS_MAXSIZE
-#define _K_SS_MAXSIZE 128
-#endif
-
-#define augmented_sockaddr_syscall(syscall) \
-struct augmented_enter_##syscall##_args { \
- struct syscall_enter_##syscall##_args args; \
- struct sockaddr_storage addr; \
-}; \
-int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
-{ \
- struct augmented_enter_##syscall##_args augmented_args; \
- unsigned long addrlen = sizeof(augmented_args.addr); \
- probe_read(&augmented_args.args, sizeof(augmented_args.args), args); \
-/* FIXME_CLANG_OPTIMIZATION_THAT_ACCESSES_USER_CONTROLLED_ADDRLEN_DESPITE_THIS_CHECK */ \
-/* if (addrlen > augmented_args.args.addrlen) */ \
-/* addrlen = augmented_args.args.addrlen; */ \
-/* */ \
- probe_read(&augmented_args.addr, addrlen, args->addr_ptr); \
- /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
- return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- sizeof(augmented_args) - sizeof(augmented_args.addr) + addrlen);\
-} \
-int syscall_exit(syscall)(struct syscall_exit_args *args) \
-{ \
- return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */ \
-}
-
-struct sockaddr;
-
-struct syscall_enter_bind_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- long fd;
- struct sockaddr *addr_ptr;
- unsigned long addrlen;
-};
-
-augmented_sockaddr_syscall(bind);
-
-struct syscall_enter_connect_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- long fd;
- struct sockaddr *addr_ptr;
- unsigned long addrlen;
-};
-
-augmented_sockaddr_syscall(connect);
-
-struct syscall_enter_sendto_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- long fd;
- void *buff;
- long len;
- unsigned long flags;
- struct sockaddr *addr_ptr;
- long addr_len;
-};
-
-augmented_sockaddr_syscall(sendto);
-
-license(GPL);
diff --git a/tools/perf/examples/bpf/empty.c b/tools/perf/examples/bpf/empty.c
index 7d7fb0c9fe76..3e296c0c53d7 100644
--- a/tools/perf/examples/bpf/empty.c
+++ b/tools/perf/examples/bpf/empty.c
@@ -1,3 +1,12 @@
-#include <bpf/bpf.h>
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
-license(GPL);
+struct syscall_enter_args;
+
+SEC("raw_syscalls:sys_enter")
+int sys_enter(struct syscall_enter_args *args)
+{
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/examples/bpf/etcsnoop.c b/tools/perf/examples/bpf/etcsnoop.c
deleted file mode 100644
index e81b535346c0..000000000000
--- a/tools/perf/examples/bpf/etcsnoop.c
+++ /dev/null
@@ -1,76 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Augment the filename syscalls with the contents of the filename pointer argument
- * filtering only those that do not start with /etc/.
- *
- * Test it with:
- *
- * perf trace -e tools/perf/examples/bpf/augmented_syscalls.c cat /etc/passwd > /dev/null
- *
- * It'll catch some openat syscalls related to the dynamic linked and
- * the last one should be the one for '/etc/passwd'.
- *
- * This matches what is marshalled into the raw_syscall:sys_enter payload
- * expected by the 'perf trace' beautifiers, and can be used by them unmodified,
- * which will be done as that feature is implemented in the next csets, for now
- * it will appear in a dump done by the default tracepoint handler in 'perf trace',
- * that uses bpf_output__fprintf() to just dump those contents, as done with
- * the bpf-output event associated with the __bpf_output__ map declared in
- * tools/perf/include/bpf/stdio.h.
- */
-
-#include <stdio.h>
-
-/* bpf-output associated map */
-bpf_map(__augmented_syscalls__, PERF_EVENT_ARRAY, int, u32, __NR_CPUS__);
-
-struct augmented_filename {
- int size;
- int reserved;
- char value[64];
-};
-
-#define augmented_filename_syscall_enter(syscall) \
-struct augmented_enter_##syscall##_args { \
- struct syscall_enter_##syscall##_args args; \
- struct augmented_filename filename; \
-}; \
-int syscall_enter(syscall)(struct syscall_enter_##syscall##_args *args) \
-{ \
- char etc[6] = "/etc/"; \
- struct augmented_enter_##syscall##_args augmented_args = { .filename.reserved = 0, }; \
- probe_read(&augmented_args.args, sizeof(augmented_args.args), args); \
- augmented_args.filename.size = probe_read_str(&augmented_args.filename.value, \
- sizeof(augmented_args.filename.value), \
- args->filename_ptr); \
- if (__builtin_memcmp(augmented_args.filename.value, etc, 4) != 0) \
- return 0; \
- /* If perf_event_output fails, return non-zero so that it gets recorded unaugmented */ \
- return perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, \
- &augmented_args, \
- (sizeof(augmented_args) - sizeof(augmented_args.filename.value) + \
- augmented_args.filename.size)); \
-}
-
-struct syscall_enter_openat_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- long dfd;
- char *filename_ptr;
- long flags;
- long mode;
-};
-
-augmented_filename_syscall_enter(openat);
-
-struct syscall_enter_open_args {
- unsigned long long common_tp_fields;
- long syscall_nr;
- char *filename_ptr;
- long flags;
- long mode;
-};
-
-augmented_filename_syscall_enter(open);
-
-license(GPL);
diff --git a/tools/perf/examples/bpf/hello.c b/tools/perf/examples/bpf/hello.c
index cf3c2fdc7f79..e9080b0df158 100644
--- a/tools/perf/examples/bpf/hello.c
+++ b/tools/perf/examples/bpf/hello.c
@@ -1,9 +1,27 @@
-#include <stdio.h>
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
-int syscall_enter(openat)(void *args)
+struct __bpf_stdout__ {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __type(key, int);
+ __type(value, __u32);
+ __uint(max_entries, __NR_CPUS__);
+} __bpf_stdout__ SEC(".maps");
+
+#define puts(from) \
+ ({ const int __len = sizeof(from); \
+ char __from[sizeof(from)] = from; \
+ bpf_perf_event_output(args, &__bpf_stdout__, BPF_F_CURRENT_CPU, \
+ &__from, __len & (sizeof(from) - 1)); })
+
+struct syscall_enter_args;
+
+SEC("raw_syscalls:sys_enter")
+int sys_enter(struct syscall_enter_args *args)
{
puts("Hello, world\n");
return 0;
}
-license(GPL);
+char _license[] SEC("license") = "GPL";
diff --git a/tools/perf/include/bpf/bpf.h b/tools/perf/include/bpf/bpf.h
deleted file mode 100644
index b422aeef5339..000000000000
--- a/tools/perf/include/bpf/bpf.h
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#ifndef _PERF_BPF_H
-#define _PERF_BPF_H
-
-#include <uapi/linux/bpf.h>
-
-/*
- * A helper structure used by eBPF C program to describe map attributes to
- * elf_bpf loader, taken from tools/testing/selftests/bpf/bpf_helpers.h:
- */
-struct bpf_map {
- unsigned int type;
- unsigned int key_size;
- unsigned int value_size;
- unsigned int max_entries;
- unsigned int map_flags;
- unsigned int inner_map_idx;
- unsigned int numa_node;
-};
-
-#define bpf_map(name, _type, type_key, type_val, _max_entries) \
-struct bpf_map SEC("maps") name = { \
- .type = BPF_MAP_TYPE_##_type, \
- .key_size = sizeof(type_key), \
- .value_size = sizeof(type_val), \
- .max_entries = _max_entries, \
-}; \
-struct ____btf_map_##name { \
- type_key key; \
- type_val value; \
-}; \
-struct ____btf_map_##name __attribute__((section(".maps." #name), used)) \
- ____btf_map_##name = { }
-
-/*
- * FIXME: this should receive .max_entries as a parameter, as careful
- * tuning of these limits is needed to avoid hitting limits that
- * prevents other BPF constructs, such as tracepoint handlers,
- * to get installed, with cryptic messages from libbpf, etc.
- * For the current need, 'perf trace --filter-pids', 64 should
- * be good enough, but this surely needs to be revisited.
- */
-#define pid_map(name, value_type) bpf_map(name, HASH, pid_t, value_type, 64)
-
-static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem;
-static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem;
-
-static void (*bpf_tail_call)(void *ctx, void *map, int index) = (void *)BPF_FUNC_tail_call;
-
-#define SEC(NAME) __attribute__((section(NAME), used))
-
-#define probe(function, vars) \
- SEC(#function "=" #function " " #vars) function
-
-#define syscall_enter(name) \
- SEC("syscalls:sys_enter_" #name) syscall_enter_ ## name
-
-#define syscall_exit(name) \
- SEC("syscalls:sys_exit_" #name) syscall_exit_ ## name
-
-#define license(name) \
-char _license[] SEC("license") = #name; \
-int _version SEC("version") = LINUX_VERSION_CODE;
-
-static int (*probe_read)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read;
-static int (*probe_read_str)(void *dst, int size, const void *unsafe_addr) = (void *)BPF_FUNC_probe_read_str;
-
-static int (*perf_event_output)(void *, struct bpf_map *, int, void *, unsigned long) = (void *)BPF_FUNC_perf_event_output;
-
-#endif /* _PERF_BPF_H */
diff --git a/tools/perf/include/bpf/linux/socket.h b/tools/perf/include/bpf/linux/socket.h
deleted file mode 100644
index 7f844568dab8..000000000000
--- a/tools/perf/include/bpf/linux/socket.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_SOCKET_H
-#define _UAPI_LINUX_SOCKET_H
-
-/*
- * Desired design of maximum size and alignment (see RFC2553)
- */
-#define _K_SS_MAXSIZE 128 /* Implementation specific max size */
-#define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *))
- /* Implementation specific desired alignment */
-
-typedef unsigned short __kernel_sa_family_t;
-
-struct __kernel_sockaddr_storage {
- __kernel_sa_family_t ss_family; /* address family */
- /* Following field(s) are implementation specific */
- char __data[_K_SS_MAXSIZE - sizeof(unsigned short)];
- /* space to achieve desired size, */
- /* _SS_MAXSIZE value minus size of ss_family */
-} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */
-
-#define sockaddr_storage __kernel_sockaddr_storage
-
-#endif /* _UAPI_LINUX_SOCKET_H */
diff --git a/tools/perf/include/bpf/pid_filter.h b/tools/perf/include/bpf/pid_filter.h
deleted file mode 100644
index 6e61c4bdf548..000000000000
--- a/tools/perf/include/bpf/pid_filter.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-
-#ifndef _PERF_BPF_PID_FILTER_
-#define _PERF_BPF_PID_FILTER_
-
-#include <bpf.h>
-
-#define pid_filter(name) pid_map(name, bool)
-
-static int pid_filter__add(struct bpf_map *pids, pid_t pid)
-{
- bool value = true;
- return bpf_map_update_elem(pids, &pid, &value, BPF_NOEXIST);
-}
-
-static bool pid_filter__has(struct bpf_map *pids, pid_t pid)
-{
- return bpf_map_lookup_elem(pids, &pid) != NULL;
-}
-
-#endif // _PERF_BPF_PID_FILTER_
diff --git a/tools/perf/include/bpf/stdio.h b/tools/perf/include/bpf/stdio.h
deleted file mode 100644
index 316af5b2ff35..000000000000
--- a/tools/perf/include/bpf/stdio.h
+++ /dev/null
@@ -1,16 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#include <bpf.h>
-
-struct bpf_map SEC("maps") __bpf_stdout__ = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = __NR_CPUS__,
-};
-
-#define puts(from) \
- ({ const int __len = sizeof(from); \
- char __from[__len] = from; \
- perf_event_output(args, &__bpf_stdout__, BPF_F_CURRENT_CPU, \
- &__from, __len & (sizeof(from) - 1)); })
diff --git a/tools/perf/include/bpf/unistd.h b/tools/perf/include/bpf/unistd.h
deleted file mode 100644
index ca7877f9a976..000000000000
--- a/tools/perf/include/bpf/unistd.h
+++ /dev/null
@@ -1,10 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-
-#include <bpf.h>
-
-static int (*bpf_get_current_pid_tgid)(void) = (void *)BPF_FUNC_get_current_pid_tgid;
-
-static pid_t getpid(void)
-{
- return bpf_get_current_pid_tgid();
-}
diff --git a/tools/perf/include/perf/perf_dlfilter.h b/tools/perf/include/perf/perf_dlfilter.h
new file mode 100644
index 000000000000..a26e2f129f83
--- /dev/null
+++ b/tools/perf/include/perf/perf_dlfilter.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * perf_dlfilter.h: API for perf --dlfilter shared object
+ * Copyright (c) 2021, Intel Corporation.
+ */
+#ifndef _LINUX_PERF_DLFILTER_H
+#define _LINUX_PERF_DLFILTER_H
+
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+/*
+ * The following macro can be used to determine if this header defines
+ * perf_dlfilter_sample machine_pid and vcpu.
+ */
+#define PERF_DLFILTER_HAS_MACHINE_PID
+
+/* Definitions for perf_dlfilter_sample flags */
+enum {
+ PERF_DLFILTER_FLAG_BRANCH = 1ULL << 0,
+ PERF_DLFILTER_FLAG_CALL = 1ULL << 1,
+ PERF_DLFILTER_FLAG_RETURN = 1ULL << 2,
+ PERF_DLFILTER_FLAG_CONDITIONAL = 1ULL << 3,
+ PERF_DLFILTER_FLAG_SYSCALLRET = 1ULL << 4,
+ PERF_DLFILTER_FLAG_ASYNC = 1ULL << 5,
+ PERF_DLFILTER_FLAG_INTERRUPT = 1ULL << 6,
+ PERF_DLFILTER_FLAG_TX_ABORT = 1ULL << 7,
+ PERF_DLFILTER_FLAG_TRACE_BEGIN = 1ULL << 8,
+ PERF_DLFILTER_FLAG_TRACE_END = 1ULL << 9,
+ PERF_DLFILTER_FLAG_IN_TX = 1ULL << 10,
+ PERF_DLFILTER_FLAG_VMENTRY = 1ULL << 11,
+ PERF_DLFILTER_FLAG_VMEXIT = 1ULL << 12,
+};
+
+/*
+ * perf sample event information (as per perf script and <linux/perf_event.h>)
+ */
+struct perf_dlfilter_sample {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u16 ins_lat; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u16 p_stage_cyc; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 ip;
+ __s32 pid;
+ __s32 tid;
+ __u64 time;
+ __u64 addr;
+ __u64 id;
+ __u64 stream_id;
+ __u64 period;
+ __u64 weight; /* Refer PERF_SAMPLE_WEIGHT_TYPE in <linux/perf_event.h> */
+ __u64 transaction; /* Refer PERF_SAMPLE_TRANSACTION in <linux/perf_event.h> */
+ __u64 insn_cnt; /* For instructions-per-cycle (IPC) */
+ __u64 cyc_cnt; /* For instructions-per-cycle (IPC) */
+ __s32 cpu;
+ __u32 flags; /* Refer PERF_DLFILTER_FLAG_* above */
+ __u64 data_src; /* Refer PERF_SAMPLE_DATA_SRC in <linux/perf_event.h> */
+ __u64 phys_addr; /* Refer PERF_SAMPLE_PHYS_ADDR in <linux/perf_event.h> */
+ __u64 data_page_size; /* Refer PERF_SAMPLE_DATA_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 code_page_size; /* Refer PERF_SAMPLE_CODE_PAGE_SIZE in <linux/perf_event.h> */
+ __u64 cgroup; /* Refer PERF_SAMPLE_CGROUP in <linux/perf_event.h> */
+ __u8 cpumode; /* Refer CPUMODE_MASK etc in <linux/perf_event.h> */
+ __u8 addr_correlates_sym; /* True => resolve_addr() can be called */
+ __u16 misc; /* Refer perf_event_header in <linux/perf_event.h> */
+ __u32 raw_size; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ const void *raw_data; /* Refer PERF_SAMPLE_RAW in <linux/perf_event.h> */
+ __u64 brstack_nr; /* Number of brstack entries */
+ const struct perf_branch_entry *brstack; /* Refer <linux/perf_event.h> */
+ __u64 raw_callchain_nr; /* Number of raw_callchain entries */
+ const __u64 *raw_callchain; /* Refer <linux/perf_event.h> */
+ const char *event;
+ __s32 machine_pid;
+ __s32 vcpu;
+};
+
+/*
+ * Address location (as per perf script)
+ */
+struct perf_dlfilter_al {
+ __u32 size; /* Size of this structure (for compatibility checking) */
+ __u32 symoff;
+ const char *sym;
+ __u64 addr; /* Mapped address (from dso) */
+ __u64 sym_start;
+ __u64 sym_end;
+ const char *dso;
+ __u8 sym_binding; /* STB_LOCAL, STB_GLOBAL or STB_WEAK, refer <elf.h> */
+ __u8 is_64_bit; /* Only valid if dso is not NULL */
+ __u8 is_kernel_ip; /* True if in kernel space */
+ __u32 buildid_size;
+ __u8 *buildid;
+ /* Below members are only populated by resolve_ip() */
+ __u8 filtered; /* True if this sample event will be filtered out */
+ const char *comm;
+};
+
+struct perf_dlfilter_fns {
+ /* Return information about ip */
+ const struct perf_dlfilter_al *(*resolve_ip)(void *ctx);
+ /* Return information about addr (if addr_correlates_sym) */
+ const struct perf_dlfilter_al *(*resolve_addr)(void *ctx);
+ /* Return arguments from --dlarg option */
+ char **(*args)(void *ctx, int *dlargc);
+ /*
+ * Return information about address (al->size must be set before
+ * calling). Returns 0 on success, -1 otherwise.
+ */
+ __s32 (*resolve_address)(void *ctx, __u64 address, struct perf_dlfilter_al *al);
+ /* Return instruction bytes and length */
+ const __u8 *(*insn)(void *ctx, __u32 *length);
+ /* Return source file name and line number */
+ const char *(*srcline)(void *ctx, __u32 *line_number);
+ /* Return perf_event_attr, refer <linux/perf_event.h> */
+ struct perf_event_attr *(*attr)(void *ctx);
+ /* Read object code, return numbers of bytes read */
+ __s32 (*object_code)(void *ctx, __u64 ip, void *buf, __u32 len);
+ /* Reserved */
+ void *(*reserved[120])(void *);
+};
+
+/*
+ * If implemented, 'start' will be called at the beginning,
+ * before any calls to 'filter_event'. Return 0 to indicate success,
+ * or return a negative error code. '*data' can be assigned for use
+ * by other functions. 'ctx' is needed for calls to perf_dlfilter_fns,
+ * but most perf_dlfilter_fns are not valid when called from 'start'.
+ */
+int start(void **data, void *ctx);
+
+/*
+ * If implemented, 'stop' will be called at the end,
+ * after any calls to 'filter_event'. Return 0 to indicate success, or
+ * return a negative error code. 'data' is set by start(). 'ctx' is
+ * needed for calls to perf_dlfilter_fns, but most perf_dlfilter_fns
+ * are not valid when called from 'stop'.
+ */
+int stop(void *data, void *ctx);
+
+/*
+ * If implemented, 'filter_event' will be called for each sample
+ * event. Return 0 to keep the sample event, 1 to filter it out, or
+ * return a negative error code. 'data' is set by start(). 'ctx' is
+ * needed for calls to perf_dlfilter_fns.
+ */
+int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+
+/*
+ * The same as 'filter_event' except it is called before internal
+ * filtering.
+ */
+int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, void *ctx);
+
+/*
+ * If implemented, return a one-line description of the filter, and optionally
+ * a longer description.
+ */
+const char *filter_description(const char **long_description);
+
+#endif
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index 88108598d6e9..526dcaf9f079 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -390,7 +390,7 @@ jvmti_write_code(void *agent, char const *sym,
rec.p.total_size += size;
/*
- * If JVM is multi-threaded, nultiple concurrent calls to agent
+ * If JVM is multi-threaded, multiple concurrent calls to agent
* may be possible, so protect file writes
*/
flockfile(fp);
@@ -457,7 +457,7 @@ jvmti_write_debug_info(void *agent, uint64_t code,
rec.p.total_size = size;
/*
- * If JVM is multi-threaded, nultiple concurrent calls to agent
+ * If JVM is multi-threaded, multiple concurrent calls to agent
* may be possible, so protect file writes
*/
flockfile(fp);
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
index c441a34cb1c0..fcca275e5bf9 100644
--- a/tools/perf/jvmti/libjvmti.c
+++ b/tools/perf/jvmti/libjvmti.c
@@ -32,34 +32,41 @@ static void print_error(jvmtiEnv *jvmti, const char *msg, jvmtiError ret)
#ifdef HAVE_JVMTI_CMLR
static jvmtiError
-do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
- jvmti_line_info_t *tab, jint *nr)
+do_get_line_number(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
+ jvmti_line_info_t *tab)
{
- jint i, lines = 0;
- jint nr_lines = 0;
+ jint i, nr_lines = 0;
jvmtiLineNumberEntry *loc_tab = NULL;
jvmtiError ret;
+ jint src_line = -1;
ret = (*jvmti)->GetLineNumberTable(jvmti, m, &nr_lines, &loc_tab);
- if (ret != JVMTI_ERROR_NONE) {
+ if (ret == JVMTI_ERROR_ABSENT_INFORMATION || ret == JVMTI_ERROR_NATIVE_METHOD) {
+ /* No debug information for this method */
+ return ret;
+ } else if (ret != JVMTI_ERROR_NONE) {
print_error(jvmti, "GetLineNumberTable", ret);
return ret;
}
- for (i = 0; i < nr_lines; i++) {
- if (loc_tab[i].start_location < bci) {
- tab[lines].pc = (unsigned long)pc;
- tab[lines].line_number = loc_tab[i].line_number;
- tab[lines].discrim = 0; /* not yet used */
- tab[lines].methodID = m;
- lines++;
- } else {
- break;
- }
+ for (i = 0; i < nr_lines && loc_tab[i].start_location <= bci; i++) {
+ src_line = i;
+ }
+
+ if (src_line != -1) {
+ tab->pc = (unsigned long)pc;
+ tab->line_number = loc_tab[src_line].line_number;
+ tab->discrim = 0; /* not yet used */
+ tab->methodID = m;
+
+ ret = JVMTI_ERROR_NONE;
+ } else {
+ ret = JVMTI_ERROR_ABSENT_INFORMATION;
}
+
(*jvmti)->Deallocate(jvmti, (unsigned char *)loc_tab);
- *nr = lines;
- return JVMTI_ERROR_NONE;
+
+ return ret;
}
static jvmtiError
@@ -67,9 +74,8 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
{
const jvmtiCompiledMethodLoadRecordHeader *hdr;
jvmtiCompiledMethodLoadInlineRecord *rec;
- jvmtiLineNumberEntry *lne = NULL;
PCStackInfo *c;
- jint nr, ret;
+ jint ret;
int nr_total = 0;
int i, lines_total = 0;
@@ -82,21 +88,7 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
for (hdr = compile_info; hdr != NULL; hdr = hdr->next) {
if (hdr->kind == JVMTI_CMLR_INLINE_INFO) {
rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
- for (i = 0; i < rec->numpcs; i++) {
- c = rec->pcinfo + i;
- nr = 0;
- /*
- * unfortunately, need a tab to get the number of lines!
- */
- ret = (*jvmti)->GetLineNumberTable(jvmti, c->methods[0], &nr, &lne);
- if (ret == JVMTI_ERROR_NONE) {
- /* free what was allocated for nothing */
- (*jvmti)->Deallocate(jvmti, (unsigned char *)lne);
- nr_total += (int)nr;
- } else {
- print_error(jvmti, "GetLineNumberTable", ret);
- }
- }
+ nr_total += rec->numpcs;
}
}
@@ -115,14 +107,17 @@ get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **
rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
for (i = 0; i < rec->numpcs; i++) {
c = rec->pcinfo + i;
- nr = 0;
- ret = do_get_line_numbers(jvmti, c->pc,
- c->methods[0],
- c->bcis[0],
- *tab + lines_total,
- &nr);
+ /*
+ * c->methods is the stack of inlined method calls
+ * at c->pc. [0] is the leaf method. Caller frames
+ * are ignored at the moment.
+ */
+ ret = do_get_line_number(jvmti, c->pc,
+ c->methods[0],
+ c->bcis[0],
+ *tab + lines_total);
if (ret == JVMTI_ERROR_NONE)
- lines_total += nr;
+ lines_total++;
}
}
}
@@ -246,8 +241,6 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
char *class_sign = NULL;
char *func_name = NULL;
char *func_sign = NULL;
- char *file_name = NULL;
- char fn[PATH_MAX];
uint64_t addr = (uint64_t)(uintptr_t)code_addr;
jvmtiError ret;
int nr_lines = 0; /* in line_tab[] */
@@ -264,7 +257,9 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
if (has_line_numbers && map && map_length) {
ret = get_line_numbers(jvmti, compile_info, &line_tab, &nr_lines);
if (ret != JVMTI_ERROR_NONE) {
- warnx("jvmti: cannot get line table for method");
+ if (ret != JVMTI_ERROR_NOT_FOUND) {
+ warnx("jvmti: cannot get line table for method");
+ }
nr_lines = 0;
} else if (nr_lines > 0) {
line_file_names = malloc(sizeof(char*) * nr_lines);
@@ -282,12 +277,6 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
}
}
- ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
- if (ret != JVMTI_ERROR_NONE) {
- print_error(jvmti, "GetSourceFileName", ret);
- goto error;
- }
-
ret = (*jvmti)->GetClassSignature(jvmti, decl_class,
&class_sign, NULL);
if (ret != JVMTI_ERROR_NONE) {
@@ -302,8 +291,6 @@ compiled_method_load_cb(jvmtiEnv *jvmti,
goto error;
}
- copy_class_filename(class_sign, file_name, fn, PATH_MAX);
-
/*
* write source line info record if we have it
*/
@@ -323,7 +310,6 @@ error:
(*jvmti)->Deallocate(jvmti, (unsigned char *)func_name);
(*jvmti)->Deallocate(jvmti, (unsigned char *)func_sign);
(*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
- (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
free(line_tab);
while (line_file_names && (nr_lines > 0)) {
if (line_file_names[nr_lines - 1]) {
diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh
index 0cfb3e2cefef..133f0eddbcc4 100644
--- a/tools/perf/perf-archive.sh
+++ b/tools/perf/perf-archive.sh
@@ -20,9 +20,8 @@ else
fi
BUILDIDS=$(mktemp /tmp/perf-archive-buildids.XXXXXX)
-NOBUILDID=0000000000000000000000000000000000000000
-perf buildid-list -i $PERF_DATA --with-hits | grep -v "^$NOBUILDID " > $BUILDIDS
+perf buildid-list -i $PERF_DATA --with-hits | grep -v "^ " > $BUILDIDS
if [ ! -s $BUILDIDS ] ; then
echo "perf archive: no build-ids found"
rm $BUILDIDS || true
diff --git a/tools/perf/perf-completion.sh b/tools/perf/perf-completion.sh
index fdf75d45efff..978249d7868c 100644
--- a/tools/perf/perf-completion.sh
+++ b/tools/perf/perf-completion.sh
@@ -165,7 +165,12 @@ __perf_main ()
local cur1=${COMP_WORDS[COMP_CWORD]}
local raw_evts=$($cmd list --raw-dump)
- local arr s tmp result
+ local arr s tmp result cpu_evts
+
+ # aarch64 doesn't have /sys/bus/event_source/devices/cpu/events
+ if [[ `uname -m` != aarch64 ]]; then
+ cpu_evts=$(ls /sys/bus/event_source/devices/cpu/events)
+ fi
if [[ "$cur1" == */* && ${cur1#*/} =~ ^[A-Z] ]]; then
OLD_IFS="$IFS"
@@ -183,9 +188,9 @@ __perf_main ()
fi
done
- evts=${result}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ evts=${result}" "${cpu_evts}
else
- evts=${raw_evts}" "$(ls /sys/bus/event_source/devices/cpu/events)
+ evts=${raw_evts}" "${cpu_evts}
fi
if [[ "$cur1" == , ]]; then
diff --git a/tools/perf/perf-iostat.sh b/tools/perf/perf-iostat.sh
new file mode 100644
index 000000000000..e562f252d56f
--- /dev/null
+++ b/tools/perf/perf-iostat.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# perf iostat
+# Alexander Antonov <alexander.antonov@linux.intel.com>
+
+if [[ "$1" == "list" ]] || [[ "$1" =~ ([a-f0-9A-F]{1,}):([a-f0-9A-F]{1,2})(,)? ]]; then
+ DELIMITER="="
+else
+ DELIMITER=" "
+fi
+
+perf stat --iostat$DELIMITER$*
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 15e458e150bd..7a2264e1e4e1 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -9,31 +9,13 @@
struct perf_event_attr;
-extern bool test_attr__enabled;
-void test_attr__ready(void);
-void test_attr__init(void);
-void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
- int fd, int group_fd, unsigned long flags);
-
-#ifndef HAVE_ATTR_TEST
-#define HAVE_ATTR_TEST 1
-#endif
-
static inline int
sys_perf_event_open(struct perf_event_attr *attr,
pid_t pid, int cpu, int group_fd,
unsigned long flags)
{
- int fd;
-
- fd = syscall(__NR_perf_event_open, attr, pid, cpu,
- group_fd, flags);
-
-#if HAVE_ATTR_TEST
- if (unlikely(test_attr__enabled))
- test_attr__open(attr, pid, cpu, fd, group_fd, flags);
-#endif
- return fd;
+ return syscall(__NR_perf_event_open, attr, pid, cpu,
+ group_fd, flags);
}
#endif /* _PERF_SYS_H */
diff --git a/tools/perf/perf-with-kcore.sh b/tools/perf/perf-with-kcore.sh
deleted file mode 100644
index 0b96545c8184..000000000000
--- a/tools/perf/perf-with-kcore.sh
+++ /dev/null
@@ -1,247 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0-only
-# perf-with-kcore: use perf with a copy of kcore
-# Copyright (c) 2014, Intel Corporation.
-#
-
-set -e
-
-usage()
-{
- echo "Usage: perf-with-kcore <perf sub-command> <perf.data directory> [<sub-command options> [ -- <workload>]]" >&2
- echo " <perf sub-command> can be record, script, report or inject" >&2
- echo " or: perf-with-kcore fix_buildid_cache_permissions" >&2
- exit 1
-}
-
-find_perf()
-{
- if [ -n "$PERF" ] ; then
- return
- fi
- PERF=`which perf || true`
- if [ -z "$PERF" ] ; then
- echo "Failed to find perf" >&2
- exit 1
- fi
- if [ ! -x "$PERF" ] ; then
- echo "Failed to find perf" >&2
- exit 1
- fi
- echo "Using $PERF"
- "$PERF" version
-}
-
-copy_kcore()
-{
- echo "Copying kcore"
-
- if [ $EUID -eq 0 ] ; then
- SUDO=""
- else
- SUDO="sudo"
- fi
-
- rm -f perf.data.junk
- ("$PERF" record -o perf.data.junk "${PERF_OPTIONS[@]}" -- sleep 60) >/dev/null 2>/dev/null &
- PERF_PID=$!
-
- # Need to make sure that perf has started
- sleep 1
-
- KCORE=$(($SUDO "$PERF" buildid-cache -v -f -k /proc/kcore >/dev/null) 2>&1)
- case "$KCORE" in
- "kcore added to build-id cache directory "*)
- KCORE_DIR=${KCORE#"kcore added to build-id cache directory "}
- ;;
- *)
- kill $PERF_PID
- wait >/dev/null 2>/dev/null || true
- rm perf.data.junk
- echo "$KCORE"
- echo "Failed to find kcore" >&2
- exit 1
- ;;
- esac
-
- kill $PERF_PID
- wait >/dev/null 2>/dev/null || true
- rm perf.data.junk
-
- $SUDO cp -a "$KCORE_DIR" "$(pwd)/$PERF_DATA_DIR"
- $SUDO rm -f "$KCORE_DIR/kcore"
- $SUDO rm -f "$KCORE_DIR/kallsyms"
- $SUDO rm -f "$KCORE_DIR/modules"
- $SUDO rmdir "$KCORE_DIR"
-
- KCORE_DIR_BASENAME=$(basename "$KCORE_DIR")
- KCORE_DIR="$(pwd)/$PERF_DATA_DIR/$KCORE_DIR_BASENAME"
-
- $SUDO chown $UID "$KCORE_DIR"
- $SUDO chown $UID "$KCORE_DIR/kcore"
- $SUDO chown $UID "$KCORE_DIR/kallsyms"
- $SUDO chown $UID "$KCORE_DIR/modules"
-
- $SUDO chgrp $GROUPS "$KCORE_DIR"
- $SUDO chgrp $GROUPS "$KCORE_DIR/kcore"
- $SUDO chgrp $GROUPS "$KCORE_DIR/kallsyms"
- $SUDO chgrp $GROUPS "$KCORE_DIR/modules"
-
- ln -s "$KCORE_DIR_BASENAME" "$PERF_DATA_DIR/kcore_dir"
-}
-
-fix_buildid_cache_permissions()
-{
- if [ $EUID -ne 0 ] ; then
- echo "This script must be run as root via sudo " >&2
- exit 1
- fi
-
- if [ -z "$SUDO_USER" ] ; then
- echo "This script must be run via sudo" >&2
- exit 1
- fi
-
- USER_HOME=$(bash <<< "echo ~$SUDO_USER")
-
- echo "Fixing buildid cache permissions"
-
- find "$USER_HOME/.debug" -xdev -type d ! -user "$SUDO_USER" -ls -exec chown "$SUDO_USER" \{\} \;
- find "$USER_HOME/.debug" -xdev -type f -links 1 ! -user "$SUDO_USER" -ls -exec chown "$SUDO_USER" \{\} \;
- find "$USER_HOME/.debug" -xdev -type l ! -user "$SUDO_USER" -ls -exec chown -h "$SUDO_USER" \{\} \;
-
- if [ -n "$SUDO_GID" ] ; then
- find "$USER_HOME/.debug" -xdev -type d ! -group "$SUDO_GID" -ls -exec chgrp "$SUDO_GID" \{\} \;
- find "$USER_HOME/.debug" -xdev -type f -links 1 ! -group "$SUDO_GID" -ls -exec chgrp "$SUDO_GID" \{\} \;
- find "$USER_HOME/.debug" -xdev -type l ! -group "$SUDO_GID" -ls -exec chgrp -h "$SUDO_GID" \{\} \;
- fi
-
- echo "Done"
-}
-
-check_buildid_cache_permissions()
-{
- if [ $EUID -eq 0 ] ; then
- return
- fi
-
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type d ! -user "$USER" -print -quit)
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type f -links 1 ! -user "$USER" -print -quit)
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type l ! -user "$USER" -print -quit)
-
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type d ! -group "$GROUPS" -print -quit)
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type f -links 1 ! -group "$GROUPS" -print -quit)
- PERMISSIONS_OK+=$(find "$HOME/.debug" -xdev -type l ! -group "$GROUPS" -print -quit)
-
- if [ -n "$PERMISSIONS_OK" ] ; then
- echo "*** WARNING *** buildid cache permissions may need fixing" >&2
- fi
-}
-
-record()
-{
- echo "Recording"
-
- if [ $EUID -ne 0 ] ; then
-
- if [ "$(cat /proc/sys/kernel/kptr_restrict)" -ne 0 ] ; then
- echo "*** WARNING *** /proc/sys/kernel/kptr_restrict prevents access to kernel addresses" >&2
- fi
-
- if echo "${PERF_OPTIONS[@]}" | grep -q ' -a \|^-a \| -a$\|^-a$\| --all-cpus \|^--all-cpus \| --all-cpus$\|^--all-cpus$' ; then
- echo "*** WARNING *** system-wide tracing without root access will not be able to read all necessary information from /proc" >&2
- fi
-
- if echo "${PERF_OPTIONS[@]}" | grep -q 'intel_pt\|intel_bts\| -I\|^-I' ; then
- if [ "$(cat /proc/sys/kernel/perf_event_paranoid)" -gt -1 ] ; then
- echo "*** WARNING *** /proc/sys/kernel/perf_event_paranoid restricts buffer size and tracepoint (sched_switch) use" >&2
- fi
-
- if echo "${PERF_OPTIONS[@]}" | grep -q ' --per-thread \|^--per-thread \| --per-thread$\|^--per-thread$' ; then
- true
- elif echo "${PERF_OPTIONS[@]}" | grep -q ' -t \|^-t \| -t$\|^-t$' ; then
- true
- elif [ ! -r /sys/kernel/debug -o ! -x /sys/kernel/debug ] ; then
- echo "*** WARNING *** /sys/kernel/debug permissions prevent tracepoint (sched_switch) use" >&2
- fi
- fi
- fi
-
- if [ -z "$1" ] ; then
- echo "Workload is required for recording" >&2
- usage
- fi
-
- if [ -e "$PERF_DATA_DIR" ] ; then
- echo "'$PERF_DATA_DIR' exists" >&2
- exit 1
- fi
-
- find_perf
-
- mkdir "$PERF_DATA_DIR"
-
- echo "$PERF record -o $PERF_DATA_DIR/perf.data ${PERF_OPTIONS[@]} -- $@"
- "$PERF" record -o "$PERF_DATA_DIR/perf.data" "${PERF_OPTIONS[@]}" -- "$@" || true
-
- if rmdir "$PERF_DATA_DIR" > /dev/null 2>/dev/null ; then
- exit 1
- fi
-
- copy_kcore
-
- echo "Done"
-}
-
-subcommand()
-{
- find_perf
- check_buildid_cache_permissions
- echo "$PERF $PERF_SUB_COMMAND -i $PERF_DATA_DIR/perf.data --kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms $@"
- "$PERF" $PERF_SUB_COMMAND -i "$PERF_DATA_DIR/perf.data" "--kallsyms=$PERF_DATA_DIR/kcore_dir/kallsyms" "$@"
-}
-
-if [ "$1" = "fix_buildid_cache_permissions" ] ; then
- fix_buildid_cache_permissions
- exit 0
-fi
-
-PERF_SUB_COMMAND=$1
-PERF_DATA_DIR=$2
-shift || true
-shift || true
-
-if [ -z "$PERF_SUB_COMMAND" ] ; then
- usage
-fi
-
-if [ -z "$PERF_DATA_DIR" ] ; then
- usage
-fi
-
-case "$PERF_SUB_COMMAND" in
-"record")
- while [ "$1" != "--" ] ; do
- PERF_OPTIONS+=("$1")
- shift || break
- done
- if [ "$1" != "--" ] ; then
- echo "Options and workload are required for recording" >&2
- usage
- fi
- shift
- record "$@"
-;;
-"script")
- subcommand "$@"
-;;
-"report")
- subcommand "$@"
-;;
-"inject")
- subcommand "$@"
-;;
-*)
- usage
-;;
-esac
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 27f94b0bb874..38cae4721583 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -39,14 +39,7 @@
#include <linux/string.h>
#include <linux/zalloc.h>
-const char perf_usage_string[] =
- "perf [--version] [--help] [OPTIONS] COMMAND [ARGS]";
-
-const char perf_more_info_string[] =
- "See 'perf help COMMAND' for more information on a specific command.";
-
static int use_pager = -1;
-const char *input_name;
struct cmd_struct {
const char *cmd;
@@ -55,6 +48,7 @@ struct cmd_struct {
};
static struct cmd_struct commands[] = {
+ { "archive", NULL, 0 },
{ "buildid-cache", cmd_buildid_cache, 0 },
{ "buildid-list", cmd_buildid_list, 0 },
{ "config", cmd_config, 0 },
@@ -62,32 +56,43 @@ static struct cmd_struct commands[] = {
{ "diff", cmd_diff, 0 },
{ "evlist", cmd_evlist, 0 },
{ "help", cmd_help, 0 },
+ { "iostat", NULL, 0 },
{ "kallsyms", cmd_kallsyms, 0 },
{ "list", cmd_list, 0 },
{ "record", cmd_record, 0 },
{ "report", cmd_report, 0 },
{ "bench", cmd_bench, 0 },
{ "stat", cmd_stat, 0 },
+#ifdef HAVE_LIBTRACEEVENT
{ "timechart", cmd_timechart, 0 },
+#endif
{ "top", cmd_top, 0 },
{ "annotate", cmd_annotate, 0 },
{ "version", cmd_version, 0 },
{ "script", cmd_script, 0 },
+#ifdef HAVE_LIBTRACEEVENT
{ "sched", cmd_sched, 0 },
+#endif
#ifdef HAVE_LIBELF_SUPPORT
{ "probe", cmd_probe, 0 },
#endif
+#ifdef HAVE_LIBTRACEEVENT
{ "kmem", cmd_kmem, 0 },
{ "lock", cmd_lock, 0 },
+#endif
{ "kvm", cmd_kvm, 0 },
{ "test", cmd_test, 0 },
-#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
+#if defined(HAVE_LIBTRACEEVENT) && (defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT))
{ "trace", cmd_trace, 0 },
#endif
{ "inject", cmd_inject, 0 },
{ "mem", cmd_mem, 0 },
{ "data", cmd_data, 0 },
{ "ftrace", cmd_ftrace, 0 },
+ { "daemon", cmd_daemon, 0 },
+#ifdef HAVE_LIBTRACEEVENT
+ { "kwork", cmd_kwork, 0 },
+#endif
};
struct pager_config {
@@ -95,10 +100,16 @@ struct pager_config {
int val;
};
+static bool same_cmd_with_prefix(const char *var, struct pager_config *c,
+ const char *header)
+{
+ return (strstarts(var, header) && !strcmp(var + strlen(header), c->cmd));
+}
+
static int pager_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
- if (strstarts(var, "pager.") && !strcmp(var + 6, c->cmd))
+ if (same_cmd_with_prefix(var, c, "pager."))
c->val = perf_config_bool(var, value);
return 0;
}
@@ -117,9 +128,9 @@ static int check_pager_config(const char *cmd)
static int browser_command_config(const char *var, const char *value, void *data)
{
struct pager_config *c = data;
- if (strstarts(var, "tui.") && !strcmp(var + 4, c->cmd))
+ if (same_cmd_with_prefix(var, c, "tui."))
c->val = perf_config_bool(var, value);
- if (strstarts(var, "gtk.") && !strcmp(var + 4, c->cmd))
+ if (same_cmd_with_prefix(var, c, "gtk."))
c->val = perf_config_bool(var, value) ? 2 : 0;
return 0;
}
@@ -200,7 +211,7 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
if (!strcmp(cmd, "-vv")) {
(*argv)[0] = "version";
- version_verbose = 1;
+ verbose = 1;
break;
}
@@ -359,6 +370,8 @@ static void handle_internal_command(int argc, const char **argv)
for (i = 0; i < ARRAY_SIZE(commands); i++) {
struct cmd_struct *p = commands+i;
+ if (p->fn == NULL)
+ continue;
if (strcmp(p->cmd, cmd))
continue;
exit(run_builtin(p, argc, argv));
@@ -412,28 +425,10 @@ static int run_argv(int *argcp, const char ***argv)
return 0;
}
-static void pthread__block_sigwinch(void)
-{
- sigset_t set;
-
- sigemptyset(&set);
- sigaddset(&set, SIGWINCH);
- pthread_sigmask(SIG_BLOCK, &set, NULL);
-}
-
-void pthread__unblock_sigwinch(void)
-{
- sigset_t set;
-
- sigemptyset(&set);
- sigaddset(&set, SIGWINCH);
- pthread_sigmask(SIG_UNBLOCK, &set, NULL);
-}
-
static int libperf_print(enum libperf_print_level level,
const char *fmt, va_list ap)
{
- return eprintf(level, verbose, fmt, ap);
+ return veprintf(level, verbose, fmt, ap);
}
int main(int argc, const char **argv)
@@ -442,6 +437,8 @@ int main(int argc, const char **argv)
const char *cmd;
char sbuf[STRERR_BUFSIZE];
+ perf_debug_setup();
+
/* libsubcmd init */
exec_cmd_init("perf", PREFIX, PERF_EXEC_PATH, EXEC_PATH_ENVIRONMENT);
pager_init(PERF_PAGER_ENVIRONMENT);
@@ -486,14 +483,18 @@ int main(int argc, const char **argv)
argv[0] = cmd;
}
if (strstarts(cmd, "trace")) {
-#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)
- setup_path();
- argv[0] = "trace";
- return cmd_trace(argc, argv);
-#else
+#ifndef HAVE_LIBTRACEEVENT
+ fprintf(stderr,
+ "trace command not available: missing libtraceevent devel package at build time.\n");
+ goto out;
+#elif !defined(HAVE_LIBAUDIT_SUPPORT) && !defined(HAVE_SYSCALL_TABLE_SUPPORT)
fprintf(stderr,
"trace command not available: missing audit-libs devel package at build time.\n");
goto out;
+#else
+ setup_path();
+ argv[0] = "trace";
+ return cmd_trace(argc, argv);
#endif
}
/* Look for flags.. */
@@ -530,8 +531,6 @@ int main(int argc, const char **argv)
*/
pthread__block_sigwinch();
- perf_debug_setup();
-
while (1) {
static int done_help;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 74014033df60..c004dd4e65a3 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -2,18 +2,10 @@
#ifndef _PERF_PERF_H
#define _PERF_PERF_H
-#include <stdbool.h>
-
#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 2048
#endif
-extern const char *input_name;
-extern bool perf_host, perf_guest;
-extern const char perf_version_string[];
-
-void pthread__unblock_sigwinch(void);
-
enum perf_affinity {
PERF_AFFINITY_SYS = 0,
PERF_AFFINITY_NODE,
@@ -21,5 +13,4 @@ enum perf_affinity {
PERF_AFFINITY_MAX
};
-extern int version_verbose;
#endif
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 215ba30b8534..150765f2baee 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -1,15 +1,37 @@
-hostprogs := jevents
-
-jevents-y += json.o jsmn.o jevents.o
-HOSTCFLAGS_jevents.o = -I$(srctree)/tools/include
pmu-events-y += pmu-events.o
JDIR = pmu-events/arch/$(SRCARCH)
JSON = $(shell [ -d $(JDIR) ] && \
find $(JDIR) -name '*.json' -o -name 'mapfile.csv')
+JDIR_TEST = pmu-events/arch/test
+JSON_TEST = $(shell [ -d $(JDIR_TEST) ] && \
+ find $(JDIR_TEST) -name '*.json')
+JEVENTS_PY = pmu-events/jevents.py
+METRIC_PY = pmu-events/metric.py
+METRIC_TEST_PY = pmu-events/metric_test.py
+EMPTY_PMU_EVENTS_C = pmu-events/empty-pmu-events.c
+PMU_EVENTS_C = $(OUTPUT)pmu-events/pmu-events.c
+METRIC_TEST_LOG = $(OUTPUT)pmu-events/metric_test.log
+
+ifeq ($(JEVENTS_ARCH),)
+JEVENTS_ARCH=$(SRCARCH)
+endif
+JEVENTS_MODEL ?= all
#
# Locate/process JSON files in pmu-events/arch/
# directory and create tables in pmu-events.c.
#
-$(OUTPUT)pmu-events/pmu-events.c: $(JSON) $(JEVENTS)
- $(Q)$(call echo-cmd,gen)$(JEVENTS) $(SRCARCH) pmu-events/arch $(OUTPUT)pmu-events/pmu-events.c $(V)
+
+ifeq ($(NO_JEVENTS),1)
+$(PMU_EVENTS_C): $(EMPTY_PMU_EVENTS_C)
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,gen)cp $< $@
+else
+$(METRIC_TEST_LOG): $(METRIC_TEST_PY) $(METRIC_PY)
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,test)$(PYTHON) $< 2> $@ || (cat $@ && false)
+
+$(PMU_EVENTS_C): $(JSON) $(JSON_TEST) $(JEVENTS_PY) $(METRIC_PY) $(METRIC_TEST_LOG)
+ $(call rule_mkdir)
+ $(Q)$(call echo-cmd,gen)$(PYTHON) $(JEVENTS_PY) $(JEVENTS_ARCH) $(JEVENTS_MODEL) pmu-events/arch $@
+endif
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json
index 2d15b11e5383..5c69c1e82ef8 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/branch.json
@@ -9,15 +9,11 @@
"ArchStdEvent": "BR_INDIRECT_SPEC"
},
{
- "PublicDescription": "Mispredicted or not predicted branch speculatively executed",
- "EventCode": "0x10",
- "EventName": "BR_MIS_PRED",
+ "ArchStdEvent": "BR_MIS_PRED",
"BriefDescription": "Branch mispredicted"
},
{
- "PublicDescription": "Predictable branch speculatively executed",
- "EventCode": "0x12",
- "EventName": "BR_PRED",
+ "ArchStdEvent": "BR_PRED",
"BriefDescription": "Predictable branch"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
index 5c1a9a922ca4..cf48d0dfc759 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/bus.json
@@ -18,9 +18,6 @@
"ArchStdEvent": "BUS_ACCESS_PERIPH"
},
{
- "PublicDescription": "Bus access",
- "EventCode": "0x19",
- "EventName": "BUS_ACCESS",
- "BriefDescription": "Bus access"
+ "ArchStdEvent": "BUS_ACCESS"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
index 40010a8724b3..4cc50b7da526 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
@@ -39,70 +39,40 @@
"ArchStdEvent": "L2D_CACHE_INVAL"
},
{
- "PublicDescription": "Level 1 instruction cache refill",
- "EventCode": "0x01",
- "EventName": "L1I_CACHE_REFILL",
- "BriefDescription": "L1I cache refill"
+ "ArchStdEvent": "L1I_CACHE_REFILL"
},
{
- "PublicDescription": "Level 1 instruction TLB refill",
- "EventCode": "0x02",
- "EventName": "L1I_TLB_REFILL",
- "BriefDescription": "L1I TLB refill"
+ "ArchStdEvent": "L1I_TLB_REFILL"
},
{
- "PublicDescription": "Level 1 data cache refill",
- "EventCode": "0x03",
- "EventName": "L1D_CACHE_REFILL",
- "BriefDescription": "L1D cache refill"
+ "ArchStdEvent": "L1D_CACHE_REFILL"
},
{
- "PublicDescription": "Level 1 data cache access",
- "EventCode": "0x04",
- "EventName": "L1D_CACHE_ACCESS",
- "BriefDescription": "L1D cache access"
+ "ArchStdEvent": "L1D_CACHE"
},
{
- "PublicDescription": "Level 1 data TLB refill",
- "EventCode": "0x05",
- "EventName": "L1D_TLB_REFILL",
- "BriefDescription": "L1D TLB refill"
+ "ArchStdEvent": "L1D_TLB_REFILL"
},
{
- "PublicDescription": "Level 1 instruction cache access",
- "EventCode": "0x14",
- "EventName": "L1I_CACHE_ACCESS",
- "BriefDescription": "L1I cache access"
+ "ArchStdEvent": "L1I_CACHE"
},
{
- "PublicDescription": "Level 2 data cache access",
- "EventCode": "0x16",
- "EventName": "L2D_CACHE_ACCESS",
- "BriefDescription": "L2D cache access"
+ "ArchStdEvent": "L2D_CACHE"
},
{
- "PublicDescription": "Level 2 data refill",
- "EventCode": "0x17",
- "EventName": "L2D_CACHE_REFILL",
- "BriefDescription": "L2D cache refill"
+ "ArchStdEvent": "L2D_CACHE_REFILL"
},
{
- "PublicDescription": "Level 2 data cache, Write-Back",
- "EventCode": "0x18",
- "EventName": "L2D_CACHE_WB",
- "BriefDescription": "L2D cache Write-Back"
+ "ArchStdEvent": "L2D_CACHE_WB"
},
{
- "PublicDescription": "Level 1 data TLB access. This event counts any load or store operation which accesses the data L1 TLB",
- "EventCode": "0x25",
- "EventName": "L1D_TLB_ACCESS",
+ "PublicDescription": "This event counts any load or store operation which accesses the data L1 TLB",
+ "ArchStdEvent": "L1D_TLB",
"BriefDescription": "L1D TLB access"
},
{
- "PublicDescription": "Level 1 instruction TLB access. This event counts any instruction fetch which accesses the instruction L1 TLB",
- "EventCode": "0x26",
- "EventName": "L1I_TLB_ACCESS",
- "BriefDescription": "L1I TLB access"
+ "PublicDescription": "This event counts any instruction fetch which accesses the instruction L1 TLB",
+ "ArchStdEvent": "L1I_TLB"
},
{
"PublicDescription": "Level 2 access to data TLB that caused a page table walk. This event counts on any data access which causes L2D_TLB_REFILL to count",
@@ -114,7 +84,7 @@
"PublicDescription": "Level 2 access to instruciton TLB that caused a page table walk. This event counts on any instruciton access which causes L2I_TLB_REFILL to count",
"EventCode": "0x35",
"EventName": "L2I_TLB_ACCESS",
- "BriefDescription": "L2D TLB access"
+ "BriefDescription": "L2I TLB access"
},
{
"PublicDescription": "Branch target buffer misprediction",
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
index 51d1dc1519b2..927a6f629a03 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/clock.json
@@ -1,9 +1,7 @@
[
{
"PublicDescription": "The number of core clock cycles",
- "EventCode": "0x11",
- "EventName": "CPU_CYCLES",
- "BriefDescription": "Clock cycles"
+ "ArchStdEvent": "CPU_CYCLES"
},
{
"PublicDescription": "FSU clocking gated off cycle",
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
index 66e51bc64b22..ada052e19632 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/exception.json
@@ -36,15 +36,9 @@
"ArchStdEvent": "EXC_TRAP_FIQ"
},
{
- "PublicDescription": "Exception taken",
- "EventCode": "0x09",
- "EventName": "EXC_TAKEN",
- "BriefDescription": "Exception taken"
+ "ArchStdEvent": "EXC_TAKEN"
},
{
- "PublicDescription": "Instruction architecturally executed, condition check pass, exception return",
- "EventCode": "0x0a",
- "EventName": "EXC_RETURN",
- "BriefDescription": "Exception return"
+ "ArchStdEvent": "EXC_RETURN"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
index 0d3e46776642..62f6276e3016 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/instruction.json
@@ -40,45 +40,29 @@
},
{
"PublicDescription": "Instruction architecturally executed, software increment",
- "EventCode": "0x00",
- "EventName": "SW_INCR",
+ "ArchStdEvent": "SW_INCR",
"BriefDescription": "Software increment"
},
{
- "PublicDescription": "Instruction architecturally executed",
- "EventCode": "0x08",
- "EventName": "INST_RETIRED",
- "BriefDescription": "Instruction retired"
+ "ArchStdEvent": "INST_RETIRED"
},
{
- "PublicDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR",
- "EventCode": "0x0b",
- "EventName": "CID_WRITE_RETIRED",
+ "ArchStdEvent": "CID_WRITE_RETIRED",
"BriefDescription": "Write to CONTEXTIDR"
},
{
- "PublicDescription": "Operation speculatively executed",
- "EventCode": "0x1b",
- "EventName": "INST_SPEC",
- "BriefDescription": "Speculatively executed"
+ "ArchStdEvent": "INST_SPEC"
},
{
- "PublicDescription": "Instruction architecturally executed (condition check pass), write to TTBR",
- "EventCode": "0x1c",
- "EventName": "TTBR_WRITE_RETIRED",
- "BriefDescription": "Instruction executed, TTBR write"
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
},
{
- "PublicDescription": "Instruction architecturally executed, branch. This event counts all branches, taken or not. This excludes exception entries, debug entries and CCFAIL branches",
- "EventCode": "0x21",
- "EventName": "BR_RETIRED",
- "BriefDescription": "Branch retired"
+ "PublicDescription": "This event counts all branches, taken or not. This excludes exception entries, debug entries and CCFAIL branches",
+ "ArchStdEvent": "BR_RETIRED"
},
{
- "PublicDescription": "Instruction architecturally executed, mispredicted branch. This event counts any branch counted by BR_RETIRED which is not correctly predicted and causes a pipeline flush",
- "EventCode": "0x22",
- "EventName": "BR_MISPRED_RETIRED",
- "BriefDescription": "Mispredicted branch retired"
+ "PublicDescription": "This event counts any branch counted by BR_RETIRED which is not correctly predicted and causes a pipeline flush",
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
},
{
"PublicDescription": "Operation speculatively executed, NOP",
diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
index c2fe674df960..50157e8c2005 100644
--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
+++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/memory.json
@@ -15,15 +15,10 @@
"ArchStdEvent": "UNALIGNED_LDST_SPEC"
},
{
- "PublicDescription": "Data memory access",
- "EventCode": "0x13",
- "EventName": "MEM_ACCESS",
- "BriefDescription": "Memory access"
+ "ArchStdEvent": "MEM_ACCESS"
},
{
- "PublicDescription": "Local memory error. This event counts any correctable or uncorrectable memory error (ECC or parity) in the protected core RAMs",
- "EventCode": "0x1a",
- "EventName": "MEM_ERROR",
- "BriefDescription": "Memory error"
+ "PublicDescription": "This event counts any correctable or uncorrectable memory error (ECC or parity) in the protected core RAMs",
+ "ArchStdEvent": "MEMORY_ERROR"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/branch.json
new file mode 100644
index 000000000000..ece201718284
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/branch.json
@@ -0,0 +1,11 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/cache.json
new file mode 100644
index 000000000000..8a9a95e05c32
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/cache.json
@@ -0,0 +1,32 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/exception.json
new file mode 100644
index 000000000000..27c3fe9c831a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/instruction.json
new file mode 100644
index 000000000000..7c018f439206
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/instruction.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/memory.json
new file mode 100644
index 000000000000..2c319f936957
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a34/memory.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/branch.json
new file mode 100644
index 000000000000..ece201718284
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/branch.json
@@ -0,0 +1,11 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/cache.json
new file mode 100644
index 000000000000..8a9a95e05c32
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/cache.json
@@ -0,0 +1,32 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/exception.json
new file mode 100644
index 000000000000..27c3fe9c831a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/instruction.json
new file mode 100644
index 000000000000..df9f94cfc8d5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/instruction.json
@@ -0,0 +1,44 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/memory.json
new file mode 100644
index 000000000000..2c319f936957
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a35/memory.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/branch.json
new file mode 100644
index 000000000000..411fcbdbd7e6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/branch.json
@@ -0,0 +1,59 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "PublicDescription": "Predicted conditional branch executed. This event counts when any branch that the conditional predictor can predict is retired. This event still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off",
+ "EventCode": "0xC9",
+ "EventName": "BR_COND_PRED",
+ "BriefDescription": "Predicted conditional branch executed. This event counts when any branch that the conditional predictor can predict is retired. This event still counts when branch prediction is disabled due to the Memory Management Unit (MMU) being off"
+ },
+ {
+ "PublicDescription": "Indirect branch mispredicted. This event counts when any indirect branch that the Branch Target Address Cache (BTAC) can predict is retired and has mispredicted either the condition or the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCA",
+ "EventName": "BR_INDIRECT_MIS_PRED",
+ "BriefDescription": "Indirect branch mispredicted. This event counts when any indirect branch that the Branch Target Address Cache (BTAC) can predict is retired and has mispredicted either the condition or the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Indirect branch mispredicted due to address miscompare. This event counts when any indirect branch that the BTAC can predict is retired, was taken, correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCB",
+ "EventName": "BR_INDIRECT_ADDR_MIS_PRED",
+ "BriefDescription": "Indirect branch mispredicted due to address miscompare. This event counts when any indirect branch that the BTAC can predict is retired, was taken, correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Conditional branch mispredicted. This event counts when any branch that the conditional predictor can predict is retired and has mispredicted the condition. This event still counts when branch prediction is disabled due to the MMU being off. Conditional indirect branches that correctly predict the condition but mispredict the address do not count",
+ "EventCode": "0xCC",
+ "EventName": "BR_COND_MIS_PRED",
+ "BriefDescription": "Conditional branch mispredicted. This event counts when any branch that the conditional predictor can predict is retired and has mispredicted the condition. This event still counts when branch prediction is disabled due to the MMU being off. Conditional indirect branches that correctly predict the condition but mispredict the address do not count"
+ },
+ {
+ "PublicDescription": "Indirect branch with predicted address executed. This event counts when any indirect branch that the BTAC can predict is retired, was taken, and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCD",
+ "EventName": "BR_INDIRECT_ADDR_PRED",
+ "BriefDescription": "Indirect branch with predicted address executed. This event counts when any indirect branch that the BTAC can predict is retired, was taken, and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Procedure return with predicted address executed. This event counts when any procedure return that the call-return stack can predict is retired, was taken, and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCE",
+ "EventName": "BR_RETURN_ADDR_PRED",
+ "BriefDescription": "Procedure return with predicted address executed. This event counts when any procedure return that the call-return stack can predict is retired, was taken, and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Procedure return mispredicted due to address miscompare. This event counts when any procedure return that the call-return stack can predict is retired, was taken, correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCF",
+ "EventName": "BR_RETURN_ADDR_MIS_PRED",
+ "BriefDescription": "Procedure return mispredicted due to address miscompare. This event counts when any procedure return that the call-return stack can predict is retired, was taken, correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/cache.json
new file mode 100644
index 000000000000..27cd913e186b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/cache.json
@@ -0,0 +1,182 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD"
+ },
+ {
+ "PublicDescription": "L2 cache refill due to prefetch. If the complex is configured with a per-complex L2 cache, this event does not count. If the complex is configured without a per-complex L2 cache, this event counts the cluster cache event, as defined by L3D_CACHE_REFILL_PREFETCH. If neither a per-complex cache or a cluster cache is configured, this event is not implemented",
+ "EventCode": "0xC1",
+ "EventName": "L2D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "L2 cache refill due to prefetch. If the complex is configured with a per-complex L2 cache, this event does not count. If the complex is configured without a per-complex L2 cache, this event counts the cluster cache event, as defined by L3D_CACHE_REFILL_PREFETCH. If neither a per-complex cache or a cluster cache is configured, this event is not implemented"
+ },
+ {
+ "PublicDescription": "L1 data cache refill due to prefetch. This event counts any linefills from the prefetcher that cause an allocation into the L1 data cache",
+ "EventCode": "0xC2",
+ "EventName": "L1D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "L1 data cache refill due to prefetch. This event counts any linefills from the prefetcher that cause an allocation into the L1 data cache"
+ },
+ {
+ "PublicDescription": "L2 cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L2 cache",
+ "EventCode": "0xC3",
+ "EventName": "L2D_WS_MODE",
+ "BriefDescription": "L2 cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L2 cache"
+ },
+ {
+ "PublicDescription": "L1 data cache entering write streaming mode. This event counts for each entry into write streaming mode",
+ "EventCode": "0xC4",
+ "EventName": "L1D_WS_MODE_ENTRY",
+ "BriefDescription": "L1 data cache entering write streaming mode. This event counts for each entry into write streaming mode"
+ },
+ {
+ "PublicDescription": "L1 data cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L1 data cache",
+ "EventCode": "0xC5",
+ "EventName": "L1D_WS_MODE",
+ "BriefDescription": "L1 data cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L1 data cache"
+ },
+ {
+ "PublicDescription": "L3 cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L3 cache",
+ "EventCode": "0xC7",
+ "EventName": "L3D_WS_MODE",
+ "BriefDescription": "L3 cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the L3 cache"
+ },
+ {
+ "PublicDescription": "Last level cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the system cache",
+ "EventCode": "0xC8",
+ "EventName": "LL_WS_MODE",
+ "BriefDescription": "Last level cache write streaming mode. This event counts for each cycle where the core is in write streaming mode and is not allocating writes into the system cache"
+ },
+ {
+ "PublicDescription": "L2 TLB walk cache access. This event does not count if the MMU is disabled",
+ "EventCode": "0xD0",
+ "EventName": "L2D_WALK_TLB",
+ "BriefDescription": "L2 TLB walk cache access. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "L2 TLB walk cache refill. This event does not count if the MMU is disabled",
+ "EventCode": "0xD1",
+ "EventName": "L2D_WALK_TLB_REFILL",
+ "BriefDescription": "L2 TLB walk cache refill. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "L2 TLB IPA cache access. This event counts on each access to the IPA cache. If a single translation table walk needs to make multiple accesses to the IPA cache, each access is counted. If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xD4",
+ "EventName": "L2D_S2_TLB",
+ "BriefDescription": "L2 TLB IPA cache access. This event counts on each access to the IPA cache. If a single translation table walk needs to make multiple accesses to the IPA cache, each access is counted. If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "L2 TLB IPA cache refill. This event counts on each refill of the IPA cache. If a single translation table walk needs to make multiple accesses to the IPA cache, each access that causes a refill is counted. If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xD5",
+ "EventName": "L2D_S2_TLB_REFILL",
+ "BriefDescription": "L2 TLB IPA cache refill. This event counts on each refill of the IPA cache. If a single translation table walk needs to make multiple accesses to the IPA cache, each access that causes a refill is counted. If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "L2 cache stash dropped. This event counts on each stash request that is received from the interconnect or the Accelerator Coherency Port (ACP), that targets L2 cache and is dropped due to lack of buffer space to hold the request",
+ "EventCode": "0xD6",
+ "EventName": "L2D_CACHE_STASH_DROPPED",
+ "BriefDescription": "L2 cache stash dropped. This event counts on each stash request that is received from the interconnect or the Accelerator Coherency Port (ACP), that targets L2 cache and is dropped due to lack of buffer space to hold the request"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/exception.json
new file mode 100644
index 000000000000..27c3fe9c831a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/instruction.json
new file mode 100644
index 000000000000..3039d03412df
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/instruction.json
@@ -0,0 +1,95 @@
+[
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT8_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT16_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT32_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT64_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/memory.json
new file mode 100644
index 000000000000..38f459502514
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/memory.json
@@ -0,0 +1,32 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "LDST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "LD_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "ST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pipeline.json
new file mode 100644
index 000000000000..325daaa7b809
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pipeline.json
@@ -0,0 +1,107 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, cache miss. This event counts every cycle that the Data Processing Unit (DPU) instruction queue is empty and there is an instruction cache miss being processed",
+ "EventCode": "0xE1",
+ "EventName": "STALL_FRONTEND_CACHE",
+ "BriefDescription": "No operation issued due to the frontend, cache miss. This event counts every cycle that the Data Processing Unit (DPU) instruction queue is empty and there is an instruction cache miss being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, TLB miss. This event counts every cycle that the DPU instruction queue is empty and there is an instruction L1 TLB miss being processed",
+ "EventCode": "0xE2",
+ "EventName": "STALL_FRONTEND_TLB",
+ "BriefDescription": "No operation issued due to the frontend, TLB miss. This event counts every cycle that the DPU instruction queue is empty and there is an instruction L1 TLB miss being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, pre-decode error",
+ "EventCode": "0xE3",
+ "EventName": "STALL_FRONTEND_PDERR",
+ "BriefDescription": "No operation issued due to the frontend, pre-decode error"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend interlock. This event counts every cycle where the issue of an operation is stalled and there is an interlock. Stall cycles due to a stall in the Wr stage are excluded",
+ "EventCode": "0xE4",
+ "EventName": "STALL_BACKEND_ILOCK",
+ "BriefDescription": "No operation issued due to the backend interlock. This event counts every cycle where the issue of an operation is stalled and there is an interlock. Stall cycles due to a stall in the Wr stage are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, address interlock. This event counts every cycle where the issue of an operation is stalled and there is an interlock on an address operand. This type of interlock is caused by a load/store instruction waiting for data to calculate the address. Stall cycles due to a stall in the Wr stage are excluded",
+ "EventCode": "0xE5",
+ "EventName": "STALL_BACKEND_ILOCK_ADDR",
+ "BriefDescription": "No operation issued due to the backend, address interlock. This event counts every cycle where the issue of an operation is stalled and there is an interlock on an address operand. This type of interlock is caused by a load/store instruction waiting for data to calculate the address. Stall cycles due to a stall in the Wr stage are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, interlock, or the Vector Processing Unit (VPU). This event counts every cycle where there is a stall or an interlock that is caused by a VPU instruction. Stall cycles due to a stall in the Wr stage are excluded",
+ "EventCode": "0xE6",
+ "EventName": "STALL_BACKEND_ILOCK_VPU",
+ "BriefDescription": "No operation issued due to the backend, interlock, or the Vector Processing Unit (VPU). This event counts every cycle where there is a stall or an interlock that is caused by a VPU instruction. Stall cycles due to a stall in the Wr stage are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load. This event counts every cycle where there is a stall in the Wr stage due to a load",
+ "EventCode": "0xE7",
+ "EventName": "STALL_BACKEND_LD",
+ "BriefDescription": "No operation issued due to the backend, load. This event counts every cycle where there is a stall in the Wr stage due to a load"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store. This event counts every cycle where there is a stall in the Wr stage due to a store",
+ "EventCode": "0xE8",
+ "EventName": "STALL_BACKEND_ST",
+ "BriefDescription": "No operation issued due to the backend, store. This event counts every cycle where there is a stall in the Wr stage due to a store"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load, cache miss. This event counts every cycle where there is a stall in the Wr stage due to a load that is waiting on data. The event counts for stalls that are caused by missing the cache or where the data is Non-cacheable",
+ "EventCode": "0xE9",
+ "EventName": "STALL_BACKEND_LD_CACHE",
+ "BriefDescription": "No operation issued due to the backend, load, cache miss. This event counts every cycle where there is a stall in the Wr stage due to a load that is waiting on data. The event counts for stalls that are caused by missing the cache or where the data is Non-cacheable"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load, TLB miss. This event counts every cycle where there is a stall in the Wr stage due to a load that misses in the L1 TLB",
+ "EventCode": "0xEA",
+ "EventName": "STALL_BACKEND_LD_TLB",
+ "BriefDescription": "No operation issued due to the backend, load, TLB miss. This event counts every cycle where there is a stall in the Wr stage due to a load that misses in the L1 TLB"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store, Store Buffer (STB) full. This event counts every cycle where there is a stall in the Wr stage because of a store operation that is waiting due to the STB being full",
+ "EventCode": "0xEB",
+ "EventName": "STALL_BACKEND_ST_STB",
+ "BriefDescription": "No operation issued due to the backend, store, Store Buffer (STB) full. This event counts every cycle where there is a stall in the Wr stage because of a store operation that is waiting due to the STB being full"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store, TLB miss. This event counts every cycle where there is a stall in the Wr stage because of a store operation that has missed in the L1 TLB",
+ "EventCode": "0xEC",
+ "EventName": "STALL_BACKEND_ST_TLB",
+ "BriefDescription": "No operation issued due to the backend, store, TLB miss. This event counts every cycle where there is a stall in the Wr stage because of a store operation that has missed in the L1 TLB"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, VPU hazard. This event counts every cycle where the core stalls due to contention for the VPU with the other core",
+ "EventCode": "0xED",
+ "EventName": "STALL_BACKEND_VPU_HAZARD",
+ "BriefDescription": "No operation issued due to the backend, VPU hazard. This event counts every cycle where the core stalls due to contention for the VPU with the other core"
+ },
+ {
+ "PublicDescription": "Issue slot not issued due to interlock. For each cycle, this event counts each dispatch slot that does not issue due to an interlock",
+ "EventCode": "0xEE",
+ "EventName": "STALL_SLOT_BACKEND_ILOCK",
+ "BriefDescription": "Issue slot not issued due to interlock. For each cycle, this event counts each dispatch slot that does not issue due to an interlock"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pmu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pmu.json
new file mode 100644
index 000000000000..d8b7b9f9e5fa
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/pmu.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "PMU_OVFS"
+ },
+ {
+ "ArchStdEvent": "PMU_HOVFS"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/trace.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/trace.json
new file mode 100644
index 000000000000..33672a8711d4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a510/trace.json
@@ -0,0 +1,32 @@
+[
+ {
+ "ArchStdEvent": "TRB_WRAP"
+ },
+ {
+ "ArchStdEvent": "TRB_TRIG"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT0"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT1"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT2"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT3"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT4"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT5"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT6"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT7"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/branch.json
new file mode 100644
index 000000000000..8633d5db42a0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/branch.json
@@ -0,0 +1,59 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "PublicDescription": "Predicted conditional branch executed.This event counts when any branch which can be predicted by the conditional predictor is retired. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xC9",
+ "EventName": "BR_COND_PRED",
+ "BriefDescription": "Predicted conditional branch executed.This event counts when any branch which can be predicted by the conditional predictor is retired. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Indirect branch mis-predicted.This event counts when any indirect branch which can be predicted by the BTAC is retired, and has mispredicted for either the condition or the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCA",
+ "EventName": "BR_INDIRECT_MIS_PRED",
+ "BriefDescription": "Indirect branch mis-predicted.This event counts when any indirect branch which can be predicted by the BTAC is retired, and has mispredicted for either the condition or the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Indirect branch mis-predicted due to address mis-compare.This event counts when any indirect branch which can be predicted by the BTAC is retired, was taken and correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCB",
+ "EventName": "BR_INDIRECT_ADDR_MIS_PRED",
+ "BriefDescription": "Indirect branch mis-predicted due to address mis-compare.This event counts when any indirect branch which can be predicted by the BTAC is retired, was taken and correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Conditional branch mis-predicted.This event counts when any branch which can be predicted by the conditional predictor is retired, and has mis-predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off. Conditional indirect branches which correctly predicted the condition but mis-predicted on the address do not count this event",
+ "EventCode": "0xCC",
+ "EventName": "BR_COND_MIS_PRED",
+ "BriefDescription": "Conditional branch mis-predicted.This event counts when any branch which can be predicted by the conditional predictor is retired, and has mis-predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off. Conditional indirect branches which correctly predicted the condition but mis-predicted on the address do not count this event"
+ },
+ {
+ "PublicDescription": "Indirect branch with predicted address executed.This event counts when any indirect branch which can be predicted by the BTAC is retired, was taken and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCD",
+ "EventName": "BR_INDIRECT_ADDR_PRED",
+ "BriefDescription": "Indirect branch with predicted address executed.This event counts when any indirect branch which can be predicted by the BTAC is retired, was taken and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Procedure return with predicted address executed.This event counts when any procedure return which can be predicted by the CRS is retired, was taken and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCE",
+ "EventName": "BR_RETURN_ADDR_PRED",
+ "BriefDescription": "Procedure return with predicted address executed.This event counts when any procedure return which can be predicted by the CRS is retired, was taken and correctly predicted the condition. This event still counts when branch prediction is disabled due to the MMU being off"
+ },
+ {
+ "PublicDescription": "Procedure return mis-predicted due to address mis-compare.This event counts when any procedure return which can be predicted by the CRS is retired, was taken and correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off",
+ "EventCode": "0xCF",
+ "EventName": "BR_RETURN_ADDR_MIS_PRED",
+ "BriefDescription": "Procedure return mis-predicted due to address mis-compare.This event counts when any procedure return which can be predicted by the CRS is retired, was taken and correctly predicted the condition, and has mispredicted the address. This event still counts when branch prediction is disabled due to the MMU being off"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/cache.json
new file mode 100644
index 000000000000..cd684c7ae026
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/cache.json
@@ -0,0 +1,188 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD"
+ },
+ {
+ "PublicDescription": "Level 3 cache refill due to prefetch. This event counts any linefills from the hardware prefetcher which cause an allocation into the L3 cache. Note It might not be possible to both distinguish hardware vs software prefetches and also which prefetches cause an allocation. If so, only hardware prefetches should be counted, regardless of whether they allocate. If either the core is configured without a per-core L2 or the cluster is configured without an L3 cache, this event is not implemented",
+ "EventCode": "0xC0",
+ "EventName": "L3D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "Level 3 cache refill due to prefetch. This event counts any linefills from the hardware prefetcher which cause an allocation into the L3 cache. Note It might not be possible to both distinguish hardware vs software prefetches and also which prefetches cause an allocation. If so, only hardware prefetches should be counted, regardless of whether they allocate. If either the core is configured without a per-core L2 or the cluster is configured without an L3 cache, this event is not implemented"
+ },
+ {
+ "PublicDescription": "Level 2 cache refill due to prefetch. +//0 If the core is configured with a per-core L2 cache: This event does not count. +//0 If the core is configured without a per-core L2 cache: This event counts the cluster cache event, as defined by L3D_CACHE_REFILL_PREFETCH. +//0 If there is neither a per-core cache nor a cluster cache configured, this event is not implemented",
+ "EventCode": "0xC1",
+ "EventName": "L2D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "Level 2 cache refill due to prefetch. +//0 If the core is configured with a per-core L2 cache: This event does not count. +//0 If the core is configured without a per-core L2 cache: This event counts the cluster cache event, as defined by L3D_CACHE_REFILL_PREFETCH. +//0 If there is neither a per-core cache nor a cluster cache configured, this event is not implemented"
+ },
+ {
+ "PublicDescription": "Level 1 data cache refill due to prefetch. This event counts any linefills from the prefetcher which cause an allocation into the L1 D-cache",
+ "EventCode": "0xC2",
+ "EventName": "L1D_CACHE_REFILL_PREFETCH",
+ "BriefDescription": "Level 1 data cache refill due to prefetch. This event counts any linefills from the prefetcher which cause an allocation into the L1 D-cache"
+ },
+ {
+ "PublicDescription": "Level 2 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L2 cache",
+ "EventCode": "0xC3",
+ "EventName": "L2D_WS_MODE",
+ "BriefDescription": "Level 2 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L2 cache"
+ },
+ {
+ "PublicDescription": "Level 1 data cache entering write streaming mode.This event counts for each entry into write-streaming mode",
+ "EventCode": "0xC4",
+ "EventName": "L1D_WS_MODE_ENTRY",
+ "BriefDescription": "Level 1 data cache entering write streaming mode.This event counts for each entry into write-streaming mode"
+ },
+ {
+ "PublicDescription": "Level 1 data cache write streaming mode.This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L1 D-cache",
+ "EventCode": "0xC5",
+ "EventName": "L1D_WS_MODE",
+ "BriefDescription": "Level 1 data cache write streaming mode.This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L1 D-cache"
+ },
+ {
+ "PublicDescription": "Level 3 cache write streaming mode.This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L3 cache",
+ "EventCode": "0xC7",
+ "EventName": "L3D_WS_MODE",
+ "BriefDescription": "Level 3 cache write streaming mode.This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L3 cache"
+ },
+ {
+ "PublicDescription": "Level 2 TLB last-level walk cache access.This event does not count if the MMU is disabled",
+ "EventCode": "0xD0",
+ "EventName": "L2D_LLWALK_TLB",
+ "BriefDescription": "Level 2 TLB last-level walk cache access.This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB last-level walk cache refill.This event does not count if the MMU is disabled",
+ "EventCode": "0xD1",
+ "EventName": "L2D_LLWALK_TLB_REFILL",
+ "BriefDescription": "Level 2 TLB last-level walk cache refill.This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB level-2 walk cache access.This event counts accesses to the level-2 walk cache where the last-level walk cache has missed. The event only counts when the translation regime of the pagewalk uses level 2 descriptors. This event does not count if the MMU is disabled",
+ "EventCode": "0xD2",
+ "EventName": "L2D_L2WALK_TLB",
+ "BriefDescription": "Level 2 TLB level-2 walk cache access.This event counts accesses to the level-2 walk cache where the last-level walk cache has missed. The event only counts when the translation regime of the pagewalk uses level 2 descriptors. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB level-2 walk cache refill.This event does not count if the MMU is disabled",
+ "EventCode": "0xD3",
+ "EventName": "L2D_L2WALK_TLB_REFILL",
+ "BriefDescription": "Level 2 TLB level-2 walk cache refill.This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB IPA cache access. This event counts on each access to the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access is counted. +//0 If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xD4",
+ "EventName": "L2D_S2_TLB",
+ "BriefDescription": "Level 2 TLB IPA cache access. This event counts on each access to the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access is counted. +//0 If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "Level 2 TLB IPA cache refill. This event counts on each refill of the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access which causes a refill is counted. +//0 If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xD5",
+ "EventName": "L2D_S2_TLB_REFILL",
+ "BriefDescription": "Level 2 TLB IPA cache refill. This event counts on each refill of the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access which causes a refill is counted. +//0 If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "Level 2 cache stash dropped.This event counts on each stash request received from the interconnect or ACP, that is targeting L2 and gets dropped due to lack of buffer space to hold the request",
+ "EventCode": "0xD6",
+ "EventName": "L2D_CACHE_STASH_DROPPED",
+ "BriefDescription": "Level 2 cache stash dropped.This event counts on each stash request received from the interconnect or ACP, that is targeting L2 and gets dropped due to lack of buffer space to hold the request"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/exception.json
new file mode 100644
index 000000000000..99f1ab987709
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/exception.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "PublicDescription": "Predecode error",
+ "EventCode": "0xC6",
+ "EventName": "PREDECODE_ERROR",
+ "BriefDescription": "Predecode error"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/instruction.json
new file mode 100644
index 000000000000..e762fab9e2d8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/instruction.json
@@ -0,0 +1,65 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/memory.json
new file mode 100644
index 000000000000..d9229173d189
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/memory.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/pipeline.json
new file mode 100644
index 000000000000..6c6b5869cf70
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a55/pipeline.json
@@ -0,0 +1,80 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, cache miss.This event counts every cycle the DPU IQ is empty and there is an instruction cache miss being processed",
+ "EventCode": "0xE1",
+ "EventName": "STALL_FRONTEND_CACHE",
+ "BriefDescription": "No operation issued due to the frontend, cache miss.This event counts every cycle the DPU IQ is empty and there is an instruction cache miss being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, TLB miss.This event counts every cycle the DPU IQ is empty and there is an instruction L1 TLB miss being processed",
+ "EventCode": "0xE2",
+ "EventName": "STALL_FRONTEND_TLB",
+ "BriefDescription": "No operation issued due to the frontend, TLB miss.This event counts every cycle the DPU IQ is empty and there is an instruction L1 TLB miss being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the frontend, pre-decode error.This event counts every cycle the DPU IQ is empty and there is a pre-decode error being processed",
+ "EventCode": "0xE3",
+ "EventName": "STALL_FRONTEND_PDERR",
+ "BriefDescription": "No operation issued due to the frontend, pre-decode error.This event counts every cycle the DPU IQ is empty and there is a pre-decode error being processed"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend interlock.This event counts every cycle that issue is stalled and there is an interlock. Stall cycles due to a stall in Wr (typically awaiting load data) are excluded",
+ "EventCode": "0xE4",
+ "EventName": "STALL_BACKEND_ILOCK",
+ "BriefDescription": "No operation issued due to the backend interlock.This event counts every cycle that issue is stalled and there is an interlock. Stall cycles due to a stall in Wr (typically awaiting load data) are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, interlock, AGU.This event counts every cycle that issue is stalled and there is an interlock that is due to a load/store instruction waiting for data to calculate the address in the AGU. Stall cycles due to a stall in Wr (typically awaiting load data) are excluded",
+ "EventCode": "0xE5",
+ "EventName": "STALL_BACKEND_ILOCK_AGU",
+ "BriefDescription": "No operation issued due to the backend, interlock, AGU.This event counts every cycle that issue is stalled and there is an interlock that is due to a load/store instruction waiting for data to calculate the address in the AGU. Stall cycles due to a stall in Wr (typically awaiting load data) are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, interlock, FPU.This event counts every cycle that issue is stalled and there is an interlock that is due to an FPU/NEON instruction. Stall cycles due to a stall in the Wr stage (typically awaiting load data) are excluded",
+ "EventCode": "0xE6",
+ "EventName": "STALL_BACKEND_ILOCK_FPU",
+ "BriefDescription": "No operation issued due to the backend, interlock, FPU.This event counts every cycle that issue is stalled and there is an interlock that is due to an FPU/NEON instruction. Stall cycles due to a stall in the Wr stage (typically awaiting load data) are excluded"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load.This event counts every cycle there is a stall in the Wr stage due to a load",
+ "EventCode": "0xE7",
+ "EventName": "STALL_BACKEND_LD",
+ "BriefDescription": "No operation issued due to the backend, load.This event counts every cycle there is a stall in the Wr stage due to a load"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store.This event counts every cycle there is a stall in the Wr stage due to a store",
+ "EventCode": "0xE8",
+ "EventName": "STALL_BACKEND_ST",
+ "BriefDescription": "No operation issued due to the backend, store.This event counts every cycle there is a stall in the Wr stage due to a store"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load, cache miss.This event counts every cycle there is a stall in the Wr stage due to a load which is waiting on data (due to missing the cache or being non-cacheable)",
+ "EventCode": "0xE9",
+ "EventName": "STALL_BACKEND_LD_CACHE",
+ "BriefDescription": "No operation issued due to the backend, load, cache miss.This event counts every cycle there is a stall in the Wr stage due to a load which is waiting on data (due to missing the cache or being non-cacheable)"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, load, TLB miss.This event counts every cycle there is a stall in the Wr stage due to a load which has missed in the L1 TLB",
+ "EventCode": "0xEA",
+ "EventName": "STALL_BACKEND_LD_TLB",
+ "BriefDescription": "No operation issued due to the backend, load, TLB miss.This event counts every cycle there is a stall in the Wr stage due to a load which has missed in the L1 TLB"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store, STB full.This event counts every cycle there is a stall in the Wr stage due to a store which is waiting due to the STB being full",
+ "EventCode": "0xEB",
+ "EventName": "STALL_BACKEND_ST_STB",
+ "BriefDescription": "No operation issued due to the backend, store, STB full.This event counts every cycle there is a stall in the Wr stage due to a store which is waiting due to the STB being full"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend, store, TLB miss.This event counts every cycle there is a stall in the Wr stage due to a store which has missed in the L1 TLB",
+ "EventCode": "0xEC",
+ "EventName": "STALL_BACKEND_ST_TLB",
+ "BriefDescription": "No operation issued due to the backend, store, TLB miss.This event counts every cycle there is a stall in the Wr stage due to a store which has missed in the L1 TLB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/bus.json
new file mode 100644
index 000000000000..31505994c06c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/bus.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_SHARED"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NOT_SHARED"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NORMAL"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_PERIPH"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/cache.json
new file mode 100644
index 000000000000..1bd59e7d982b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/cache.json
@@ -0,0 +1,80 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
deleted file mode 100644
index 543c7692677a..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/core-imp-def.json
+++ /dev/null
@@ -1,179 +0,0 @@
-[
- {
- "ArchStdEvent": "L1D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L1D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L2D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_RD"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_WR"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_SHARED"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_NOT_SHARED"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_NORMAL"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_PERIPH"
- },
- {
- "ArchStdEvent": "MEM_ACCESS_RD"
- },
- {
- "ArchStdEvent": "MEM_ACCESS_WR"
- },
- {
- "ArchStdEvent": "UNALIGNED_LD_SPEC"
- },
- {
- "ArchStdEvent": "UNALIGNED_ST_SPEC"
- },
- {
- "ArchStdEvent": "UNALIGNED_LDST_SPEC"
- },
- {
- "ArchStdEvent": "LDREX_SPEC"
- },
- {
- "ArchStdEvent": "STREX_PASS_SPEC"
- },
- {
- "ArchStdEvent": "STREX_FAIL_SPEC"
- },
- {
- "ArchStdEvent": "LD_SPEC"
- },
- {
- "ArchStdEvent": "ST_SPEC"
- },
- {
- "ArchStdEvent": "LDST_SPEC"
- },
- {
- "ArchStdEvent": "DP_SPEC"
- },
- {
- "ArchStdEvent": "ASE_SPEC"
- },
- {
- "ArchStdEvent": "VFP_SPEC"
- },
- {
- "ArchStdEvent": "PC_WRITE_SPEC"
- },
- {
- "ArchStdEvent": "CRYPTO_SPEC"
- },
- {
- "ArchStdEvent": "BR_IMMED_SPEC"
- },
- {
- "ArchStdEvent": "BR_RETURN_SPEC"
- },
- {
- "ArchStdEvent": "BR_INDIRECT_SPEC"
- },
- {
- "ArchStdEvent": "ISB_SPEC"
- },
- {
- "ArchStdEvent": "DSB_SPEC"
- },
- {
- "ArchStdEvent": "DMB_SPEC"
- },
- {
- "ArchStdEvent": "EXC_UNDEF"
- },
- {
- "ArchStdEvent": "EXC_SVC"
- },
- {
- "ArchStdEvent": "EXC_PABORT"
- },
- {
- "ArchStdEvent": "EXC_DABORT"
- },
- {
- "ArchStdEvent": "EXC_IRQ"
- },
- {
- "ArchStdEvent": "EXC_FIQ"
- },
- {
- "ArchStdEvent": "EXC_SMC"
- },
- {
- "ArchStdEvent": "EXC_HVC"
- },
- {
- "ArchStdEvent": "EXC_TRAP_PABORT"
- },
- {
- "ArchStdEvent": "EXC_TRAP_DABORT"
- },
- {
- "ArchStdEvent": "EXC_TRAP_OTHER"
- },
- {
- "ArchStdEvent": "EXC_TRAP_IRQ"
- },
- {
- "ArchStdEvent": "EXC_TRAP_FIQ"
- },
- {
- "ArchStdEvent": "RC_LD_SPEC"
- },
- {
- "ArchStdEvent": "RC_ST_SPEC"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/exception.json
index 98d29c862320..344a2d552ad5 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/exception.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/exception.json
@@ -1,52 +1,47 @@
[
{
- "EventCode": "0x09",
- "EventName": "EXC_TAKEN",
- "BriefDescription": "Exception taken."
+ "ArchStdEvent": "EXC_TAKEN"
},
{
- "PublicDescription": "Local memory error. This event counts any correctable or uncorrectable memory error (ECC or parity) in the protected core RAMs",
- "EventCode": "0x1A",
- "EventName": "MEMORY_ERROR",
- "BriefDescription": "Local memory error."
+ "ArchStdEvent": "MEMORY_ERROR"
},
{
- "ArchStdEvent": "EXC_DABORT"
+ "ArchStdEvent": "EXC_UNDEF"
},
{
- "ArchStdEvent": "EXC_FIQ"
+ "ArchStdEvent": "EXC_SVC"
},
{
- "ArchStdEvent": "EXC_HVC"
+ "ArchStdEvent": "EXC_PABORT"
},
{
- "ArchStdEvent": "EXC_IRQ"
+ "ArchStdEvent": "EXC_DABORT"
},
{
- "ArchStdEvent": "EXC_PABORT"
+ "ArchStdEvent": "EXC_IRQ"
},
{
- "ArchStdEvent": "EXC_SMC"
+ "ArchStdEvent": "EXC_FIQ"
},
{
- "ArchStdEvent": "EXC_SVC"
+ "ArchStdEvent": "EXC_SMC"
},
{
- "ArchStdEvent": "EXC_TRAP_DABORT"
+ "ArchStdEvent": "EXC_HVC"
},
{
- "ArchStdEvent": "EXC_TRAP_FIQ"
+ "ArchStdEvent": "EXC_TRAP_PABORT"
},
{
- "ArchStdEvent": "EXC_TRAP_IRQ"
+ "ArchStdEvent": "EXC_TRAP_DABORT"
},
{
"ArchStdEvent": "EXC_TRAP_OTHER"
},
{
- "ArchStdEvent": "EXC_TRAP_PABORT"
+ "ArchStdEvent": "EXC_TRAP_IRQ"
},
{
- "ArchStdEvent": "EXC_UNDEF"
+ "ArchStdEvent": "EXC_TRAP_FIQ"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/instruction.json
new file mode 100644
index 000000000000..e42486d406b3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/instruction.json
@@ -0,0 +1,68 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/memory.json
new file mode 100644
index 000000000000..e3d08f1f7c92
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a57-a72/memory.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/cache.json
new file mode 100644
index 000000000000..118c5cb0674b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/cache.json
@@ -0,0 +1,236 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD"
+ },
+ {
+ "PublicDescription": "Merge in the store buffer",
+ "EventCode": "0xC0",
+ "EventName": "STB_STALL",
+ "BriefDescription": "Merge in the store buffer"
+ },
+ {
+ "PublicDescription": "Level 1 data cache refill started due to prefetch. Counts any linefills from the prefetcher which cause an allocation into the L1 D-cache",
+ "EventCode": "0xC3",
+ "EventName": "L1D_PREF_LINE_FILL",
+ "BriefDescription": "Level 1 data cache refill started due to prefetch. Counts any linefills from the prefetcher which cause an allocation into the L1 D-cache"
+ },
+ {
+ "PublicDescription": "Level 2 cache refill due to prefetch. +//0 If the core is configured with a per-core L2 cache: This event does not count. +//0 If the core is configured without a per-core L2 cache: This event counts the cluster cache event, as defined by L3_PREF_LINE_FILL. +//0 If there is neither a per-core cache nor a cluster cache configured, this event is not implemented",
+ "EventCode": "0xC4",
+ "EventName": "L2D_PREF_LINE_FILL",
+ "BriefDescription": "Level 2 cache refill due to prefetch. +//0 If the core is configured with a per-core L2 cache: This event does not count. +//0 If the core is configured without a per-core L2 cache: This event counts the cluster cache event, as defined by L3_PREF_LINE_FILL. +//0 If there is neither a per-core cache nor a cluster cache configured, this event is not implemented"
+ },
+ {
+ "PublicDescription": "Level 3 cache refill due to prefetch. This event counts any linefills from the hardware prefetcher which cause an allocation into the L3 cache. Note It might not be possible to distinguish between both hardware and software prefetches and also which prefetches cause an allocation. If so, only hardware prefetches should be counted, regardless of whether they allocate. If either the core is configured without a per-core L2 or the cluster is configured without an L3 cache, this event is not implemented",
+ "EventCode": "0xC5",
+ "EventName": "L3_PREF_LINE_FILL",
+ "BriefDescription": "Level 3 cache refill due to prefetch. This event counts any linefills from the hardware prefetcher which cause an allocation into the L3 cache. Note It might not be possible to distinguish between both hardware and software prefetches and also which prefetches cause an allocation. If so, only hardware prefetches should be counted, regardless of whether they allocate. If either the core is configured without a per-core L2 or the cluster is configured without an L3 cache, this event is not implemented"
+ },
+ {
+ "PublicDescription": "L1D entering write stream mode",
+ "EventCode": "0xC6",
+ "EventName": "L1D_WS_MODE_ENTER",
+ "BriefDescription": "L1D entering write stream mode"
+ },
+ {
+ "PublicDescription": "L1D is in write stream mode",
+ "EventCode": "0xC7",
+ "EventName": "L1D_WS_MODE",
+ "BriefDescription": "L1D is in write stream mode"
+ },
+ {
+ "PublicDescription": "Level 2 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L2 cache",
+ "EventCode": "0xC8",
+ "EventName": "L2D_WS_MODE",
+ "BriefDescription": "Level 2 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L2 cache"
+ },
+ {
+ "PublicDescription": "Level 3 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L3 cache",
+ "EventCode": "0xC9",
+ "EventName": "L3D_WS_MODE",
+ "BriefDescription": "Level 3 cache write streaming mode. This event counts for each cycle where the core is in write-streaming mode and not allocating writes into the L3 cache"
+ },
+ {
+ "PublicDescription": "Level 2 TLB last-level walk cache access. This event does not count if the MMU is disabled",
+ "EventCode": "0xCA",
+ "EventName": "TLB_L2TLB_LLWALK_ACCESS",
+ "BriefDescription": "Level 2 TLB last-level walk cache access. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB last-level walk cache refill. This event does not count if the MMU is disabled",
+ "EventCode": "0xCB",
+ "EventName": "TLB_L2TLB_LLWALK_REFILL",
+ "BriefDescription": "Level 2 TLB last-level walk cache refill. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB level-2 walk cache access. This event counts accesses to the level-2 walk cache where the last-level walk cache has missed. The event only counts when the translation regime of the pagewalk uses level 2 descriptors. This event does not count if the MMU is disabled",
+ "EventCode": "0xCC",
+ "EventName": "TLB_L2TLB_L2WALK_ACCESS",
+ "BriefDescription": "Level 2 TLB level-2 walk cache access. This event counts accesses to the level-2 walk cache where the last-level walk cache has missed. The event only counts when the translation regime of the pagewalk uses level 2 descriptors. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB level-2 walk cache refill. This event does not count if the MMU is disabled",
+ "EventCode": "0xCD",
+ "EventName": "TLB_L2TLB_L2WALK_REFILL",
+ "BriefDescription": "Level 2 TLB level-2 walk cache refill. This event does not count if the MMU is disabled"
+ },
+ {
+ "PublicDescription": "Level 2 TLB IPA cache access. This event counts on each access to the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access is counted. +//0 If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xCE",
+ "EventName": "TLB_L2TLB_S2_ACCESS",
+ "BriefDescription": "Level 2 TLB IPA cache access. This event counts on each access to the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access is counted. +//0 If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "Level 2 TLB IPA cache refill. This event counts on each refill of the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access which causes a refill is counted. +//0 If stage 2 translation is disabled, this event does not count",
+ "EventCode": "0xCF",
+ "EventName": "TLB_L2TLB_S2_REFILL",
+ "BriefDescription": "Level 2 TLB IPA cache refill. This event counts on each refill of the IPA cache. +//0 If a single pagewalk needs to make multiple accesses to the IPA cache, each access which causes a refill is counted. +//0 If stage 2 translation is disabled, this event does not count"
+ },
+ {
+ "PublicDescription": "Unattributable Level 1 data cache write-back. This event occurs when a requestor outside the PE makes a coherency request that results in writeback",
+ "EventCode": "0xF0",
+ "EventName": "L2_L1D_CACHE_WB_UNATT",
+ "BriefDescription": "Unattributable Level 1 data cache write-back. This event occurs when a requestor outside the PE makes a coherency request that results in writeback"
+ },
+ {
+ "PublicDescription": "Unattributable Level 2 data cache access. This event occurs when a requestor outside the PE makes a coherency request that results in level 2 data cache access",
+ "EventCode": "0xF1",
+ "EventName": "L2_L2D_CACHE_UNATT",
+ "BriefDescription": "Unattributable Level 2 data cache access. This event occurs when a requestor outside the PE makes a coherency request that results in level 2 data cache access"
+ },
+ {
+ "PublicDescription": "Unattributable Level 2 data cache access, read. This event occurs when a requestor outside the PE makes a coherency request that results in level 2 data cache read access",
+ "EventCode": "0xF2",
+ "EventName": "L2_L2D_CACHE_RD_UNATT",
+ "BriefDescription": "Unattributable Level 2 data cache access, read. This event occurs when a requestor outside the PE makes a coherency request that results in level 2 data cache read access"
+ },
+ {
+ "PublicDescription": "Unattributable Level 3 data cache access. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 data cache read access",
+ "EventCode": "0xF3",
+ "EventName": "L2_L3D_CACHE_UNATT",
+ "BriefDescription": "Unattributable Level 3 data cache access. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 data cache read access"
+ },
+ {
+ "PublicDescription": "Unattributable Level 3 data cache access, read. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 data cache read access",
+ "EventCode": "0xF4",
+ "EventName": "L2_L3D_CACHE_RD_UNATT",
+ "BriefDescription": "Unattributable Level 3 data cache access, read. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 data cache read access"
+ },
+ {
+ "PublicDescription": "Unattributable Level 3 data or unified cache allocation without refill. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 cache allocate without refill",
+ "EventCode": "0xF5",
+ "EventName": "L2_L3D_CACHE_ALLOC_UNATT",
+ "BriefDescription": "Unattributable Level 3 data or unified cache allocation without refill. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 cache allocate without refill"
+ },
+ {
+ "PublicDescription": "Unattributable Level 3 data or unified cache refill. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 cache refill",
+ "EventCode": "0xF6",
+ "EventName": "L2_L3D_CACHE_REFILL_UNATT",
+ "BriefDescription": "Unattributable Level 3 data or unified cache refill. This event occurs when a requestor outside the PE makes a coherency request that results in level 3 cache refill"
+ },
+ {
+ "PublicDescription": "Level 2 cache stash dropped. This event counts on each stash request received from the interconnect or ACP, that is targeting L2 and gets dropped due to lack of buffer space to hold the request. L2 and L3 cache events (L2D_CACHE*, L3D_CACHE*) The behavior of these events depends on the configuration of the core. If the private L2 cache is present, the L2D_CACHE* events count the activity in the private L2 cache, and the L3D_CACHE* events count the activity in the DSU L3 cache (if present). If the private L2 cache is not present but the DSU L3 cache is present, the L2D_CACHE* events count activity in the DSU L3 cache and the L3D_CACHE* events do not count. The L2D_CACHE_WB, L2D_CACHE_WR and L2D_CACHE_REFILL_WR events do not count in this configuration. If neither the private L2 cache nor the DSU L3 cache are present, neither the L2D_CACHE* or L3D_CACHE* events will count",
+ "EventCode": "0xF7",
+ "EventName": "L2D_CACHE_STASH_DROPPED",
+ "BriefDescription": "Level 2 cache stash dropped. This event counts on each stash request received from the interconnect or ACP, that is targeting L2 and gets dropped due to lack of buffer space to hold the request. L2 and L3 cache events (L2D_CACHE*, L3D_CACHE*) The behavior of these events depends on the configuration of the core. If the private L2 cache is present, the L2D_CACHE* events count the activity in the private L2 cache, and the L3D_CACHE* events count the activity in the DSU L3 cache (if present). If the private L2 cache is not present but the DSU L3 cache is present, the L2D_CACHE* events count activity in the DSU L3 cache and the L3D_CACHE* events do not count. The L2D_CACHE_WB, L2D_CACHE_WR and L2D_CACHE_REFILL_WR events do not count in this configuration. If neither the private L2 cache nor the DSU L3 cache are present, neither the L2D_CACHE* or L3D_CACHE* events will count"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/dpu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/dpu.json
new file mode 100644
index 000000000000..b8e402a91bdd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/dpu.json
@@ -0,0 +1,32 @@
+[
+ {
+ "PublicDescription": "Instruction retired, indirect branch, mispredicted",
+ "EventCode": "0xE9",
+ "EventName": "DPU_BR_IND_MIS",
+ "BriefDescription": "Instruction retired, indirect branch, mispredicted"
+ },
+ {
+ "PublicDescription": "Instruction retired, conditional branch, mispredicted",
+ "EventCode": "0xEA",
+ "EventName": "DPU_BR_COND_MIS",
+ "BriefDescription": "Instruction retired, conditional branch, mispredicted"
+ },
+ {
+ "PublicDescription": "Memory error (any type) from IFU",
+ "EventCode": "0xEB",
+ "EventName": "DPU_MEM_ERR_IFU",
+ "BriefDescription": "Memory error (any type) from IFU"
+ },
+ {
+ "PublicDescription": "Memory error (any type) from DCU",
+ "EventCode": "0xEC",
+ "EventName": "DPU_MEM_ERR_DCU",
+ "BriefDescription": "Memory error (any type) from DCU"
+ },
+ {
+ "PublicDescription": "Memory error (any type) from TLB",
+ "EventCode": "0xED",
+ "EventName": "DPU_MEM_ERR_TLB",
+ "BriefDescription": "Memory error (any type) from TLB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/exception.json
new file mode 100644
index 000000000000..27c3fe9c831a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/ifu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/ifu.json
new file mode 100644
index 000000000000..13178c5dca14
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/ifu.json
@@ -0,0 +1,122 @@
+[
+ {
+ "PublicDescription": "I-Cache miss on an access from the prefetch block",
+ "EventCode": "0xD0",
+ "EventName": "IFU_IC_MISS_WAIT",
+ "BriefDescription": "I-Cache miss on an access from the prefetch block"
+ },
+ {
+ "PublicDescription": "Counts the cycles spent on a request for Level 2 TLB lookup after a Level 1l ITLB miss",
+ "EventCode": "0xD1",
+ "EventName": "IFU_IUTLB_MISS_WAIT",
+ "BriefDescription": "Counts the cycles spent on a request for Level 2 TLB lookup after a Level 1l ITLB miss"
+ },
+ {
+ "PublicDescription": "Micro-predictor conditional/direction mispredict, with respect to. if3/if4 predictor",
+ "EventCode": "0xD2",
+ "EventName": "IFU_MICRO_COND_MISPRED",
+ "BriefDescription": "Micro-predictor conditional/direction mispredict, with respect to. if3/if4 predictor"
+ },
+ {
+ "PublicDescription": "Micro-predictor address mispredict, with respect to if3/if4 predictor",
+ "EventCode": "0xD3",
+ "EventName": "IFU_MICRO_CADDR_MISPRED",
+ "BriefDescription": "Micro-predictor address mispredict, with respect to if3/if4 predictor"
+ },
+ {
+ "PublicDescription": "Micro-predictor hit with immediate redirect",
+ "EventCode": "0xD4",
+ "EventName": "IFU_MICRO_HIT",
+ "BriefDescription": "Micro-predictor hit with immediate redirect"
+ },
+ {
+ "PublicDescription": "Micro-predictor negative cache hit",
+ "EventCode": "0xD6",
+ "EventName": "IFU_MICRO_NEG_HIT",
+ "BriefDescription": "Micro-predictor negative cache hit"
+ },
+ {
+ "PublicDescription": "Micro-predictor correction",
+ "EventCode": "0xD7",
+ "EventName": "IFU_MICRO_CORRECTION",
+ "BriefDescription": "Micro-predictor correction"
+ },
+ {
+ "PublicDescription": "A 2nd instruction could have been pushed but was not because it was nonsequential",
+ "EventCode": "0xD8",
+ "EventName": "IFU_MICRO_NO_INSTR1",
+ "BriefDescription": "A 2nd instruction could have been pushed but was not because it was nonsequential"
+ },
+ {
+ "PublicDescription": "Micro-predictor miss",
+ "EventCode": "0xD9",
+ "EventName": "IFU_MICRO_NO_PRED",
+ "BriefDescription": "Micro-predictor miss"
+ },
+ {
+ "PublicDescription": "Thread flushed due to TLB miss",
+ "EventCode": "0xDA",
+ "EventName": "IFU_FLUSHED_TLB_MISS",
+ "BriefDescription": "Thread flushed due to TLB miss"
+ },
+ {
+ "PublicDescription": "Thread flushed due to reasons other than TLB miss",
+ "EventCode": "0xDB",
+ "EventName": "IFU_FLUSHED_EXCL_TLB_MISS",
+ "BriefDescription": "Thread flushed due to reasons other than TLB miss"
+ },
+ {
+ "PublicDescription": "This thread and the other thread both ready for scheduling in if0",
+ "EventCode": "0xDC",
+ "EventName": "IFU_ALL_THRDS_RDY",
+ "BriefDescription": "This thread and the other thread both ready for scheduling in if0"
+ },
+ {
+ "PublicDescription": "This thread was arbitrated when the other thread was also ready for scheduling",
+ "EventCode": "0xDD",
+ "EventName": "IFU_WIN_ARB_OTHER_RDY",
+ "BriefDescription": "This thread was arbitrated when the other thread was also ready for scheduling"
+ },
+ {
+ "PublicDescription": "This thread was arbitrated when the other thread was also active, but not necessarily ready. For example, waiting for I-Cache or TLB",
+ "EventCode": "0xDE",
+ "EventName": "IFU_WIN_ARB_OTHER_ACT",
+ "BriefDescription": "This thread was arbitrated when the other thread was also active, but not necessarily ready. For example, waiting for I-Cache or TLB"
+ },
+ {
+ "PublicDescription": "This thread was not arbitrated because it was not ready for scheduling. For example, due to a cache miss or TLB miss",
+ "EventCode": "0xDF",
+ "EventName": "IFU_NOT_RDY_FOR_ARB",
+ "BriefDescription": "This thread was not arbitrated because it was not ready for scheduling. For example, due to a cache miss or TLB miss"
+ },
+ {
+ "PublicDescription": "The thread moved from an active state to an inactive state (long-term sleep state, causing deallocation of some resources)",
+ "EventCode": "0xE0",
+ "EventName": "IFU_GOTO_IDLE",
+ "BriefDescription": "The thread moved from an active state to an inactive state (long-term sleep state, causing deallocation of some resources)"
+ },
+ {
+ "PublicDescription": "I-Cache lookup under miss from other thread",
+ "EventCode": "0xE1",
+ "EventName": "IFU_IC_LOOKUP_UNDER_MISS",
+ "BriefDescription": "I-Cache lookup under miss from other thread"
+ },
+ {
+ "PublicDescription": "I-Cache miss under miss from other thread",
+ "EventCode": "0xE2",
+ "EventName": "IFU_IC_MISS_UNDER_MISS",
+ "BriefDescription": "I-Cache miss under miss from other thread"
+ },
+ {
+ "PublicDescription": "This thread pushed an instruction into the IQ",
+ "EventCode": "0xE3",
+ "EventName": "IFU_INSTR_PUSHED",
+ "BriefDescription": "This thread pushed an instruction into the IQ"
+ },
+ {
+ "PublicDescription": "I-Cache Speculative line fill",
+ "EventCode": "0xE4",
+ "EventName": "IFU_IC_LF_SP",
+ "BriefDescription": "I-Cache Speculative line fill"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/instruction.json
new file mode 100644
index 000000000000..2e0d60779dce
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/instruction.json
@@ -0,0 +1,71 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "LD_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "PublicDescription": "Instruction retired, conditional branch",
+ "EventCode": "0xE8",
+ "EventName": "DPU_BR_COND_RETIRED",
+ "BriefDescription": "Instruction retired, conditional branch"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/memory.json
new file mode 100644
index 000000000000..18d527f7fad4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/memory.json
@@ -0,0 +1,35 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ },
+ {
+ "PublicDescription": "External memory request",
+ "EventCode": "0xC1",
+ "EventName": "BIU_EXT_MEM_REQ",
+ "BriefDescription": "External memory request"
+ },
+ {
+ "PublicDescription": "External memory request to non-cacheable memory",
+ "EventCode": "0xC2",
+ "EventName": "BIU_EXT_MEM_REQ_NC",
+ "BriefDescription": "External memory request to non-cacheable memory"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/pipeline.json
new file mode 100644
index 000000000000..eeac798d403a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a65-e1/pipeline.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/instruction.json
new file mode 100644
index 000000000000..964f47c6b099
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/instruction.json
@@ -0,0 +1,134 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_EMPTY_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_NOT_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_FAULT_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT8_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT16_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT32_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT64_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/memory.json
new file mode 100644
index 000000000000..7b2b21ac150f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/memory.json
@@ -0,0 +1,41 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "LD_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "ST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/trace.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/trace.json
new file mode 100644
index 000000000000..3116135c59e2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a710/trace.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "TRB_WRAP"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT0"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT1"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT2"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT3"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT4"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT5"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT6"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT7"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/branch.json
new file mode 100644
index 000000000000..ece201718284
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/branch.json
@@ -0,0 +1,11 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/bus.json
new file mode 100644
index 000000000000..103bb2535775
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/bus.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_SHARED"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NOT_SHARED"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_NORMAL"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_PERIPH"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/cache.json
new file mode 100644
index 000000000000..b9b3d3fb07b2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/cache.json
@@ -0,0 +1,107 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction cache - Tag RAM",
+ "EventCode": "0xC2",
+ "EventName": "I_TAG_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction cache - Tag RAM"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction cache - Data RAM",
+ "EventCode": "0xC3",
+ "EventName": "I_DATA_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction cache - Data RAM"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction BTAC RAM",
+ "EventCode": "0xC4",
+ "EventName": "I_BTAC_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction BTAC RAM"
+ },
+ {
+ "PublicDescription": "Level 1 PLD TLB refill",
+ "EventCode": "0xE7",
+ "EventName": "PLD_UTLB_REFILL",
+ "BriefDescription": "Level 1 PLD TLB refill"
+ },
+ {
+ "PublicDescription": "Level 1 CP15 TLB refill",
+ "EventCode": "0xE8",
+ "EventName": "CP15_UTLB_REFILL",
+ "BriefDescription": "Level 1 CP15 TLB refill"
+ },
+ {
+ "PublicDescription": "Level 1 TLB flush",
+ "EventCode": "0xE9",
+ "EventName": "UTLB_FLUSH",
+ "BriefDescription": "Level 1 TLB flush"
+ },
+ {
+ "PublicDescription": "Level 2 TLB access",
+ "EventCode": "0xEA",
+ "EventName": "TLB_ACCESS",
+ "BriefDescription": "Level 2 TLB access"
+ },
+ {
+ "PublicDescription": "Level 2 TLB miss",
+ "EventCode": "0xEB",
+ "EventName": "TLB_MISS",
+ "BriefDescription": "Level 2 TLB miss"
+ },
+ {
+ "PublicDescription": "Data cache hit in itself due to VIPT aliasing",
+ "EventCode": "0xEC",
+ "EventName": "DCACHE_SELF_HIT_VIPT",
+ "BriefDescription": "Data cache hit in itself due to VIPT aliasing"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/etm.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/etm.json
new file mode 100644
index 000000000000..fce852e82369
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/etm.json
@@ -0,0 +1,14 @@
+[
+ {
+ "PublicDescription": "ETM trace unit output 0",
+ "EventCode": "0xDE",
+ "EventName": "ETM_EXT_OUT0",
+ "BriefDescription": "ETM trace unit output 0"
+ },
+ {
+ "PublicDescription": "ETM trace unit output 1",
+ "EventCode": "0xDF",
+ "EventName": "ETM_EXT_OUT1",
+ "BriefDescription": "ETM trace unit output 1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/exception.json
new file mode 100644
index 000000000000..b77f1228873d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/exception.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "PublicDescription": "Number of Traps to hypervisor",
+ "EventCode": "0xDC",
+ "EventName": "EXC_TRAP_HYP",
+ "BriefDescription": "Number of Traps to hypervisor"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/instruction.json
new file mode 100644
index 000000000000..91a7863ddc9a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/instruction.json
@@ -0,0 +1,65 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/memory.json
new file mode 100644
index 000000000000..34e9cab7f0b9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/memory.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/mmu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/mmu.json
new file mode 100644
index 000000000000..b85c9cc81f23
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/mmu.json
@@ -0,0 +1,44 @@
+[
+ {
+ "PublicDescription": "Duration of a translation table walk handled by the MMU",
+ "EventCode": "0xE0",
+ "EventName": "MMU_PTW",
+ "BriefDescription": "Duration of a translation table walk handled by the MMU"
+ },
+ {
+ "PublicDescription": "Duration of a Stage 1 translation table walk handled by the MMU",
+ "EventCode": "0xE1",
+ "EventName": "MMU_PTW_ST1",
+ "BriefDescription": "Duration of a Stage 1 translation table walk handled by the MMU"
+ },
+ {
+ "PublicDescription": "Duration of a Stage 2 translation table walk handled by the MMU",
+ "EventCode": "0xE2",
+ "EventName": "MMU_PTW_ST2",
+ "BriefDescription": "Duration of a Stage 2 translation table walk handled by the MMU"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by the LSU",
+ "EventCode": "0xE3",
+ "EventName": "MMU_PTW_LSU",
+ "BriefDescription": "Duration of a translation table walk requested by the LSU"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by the Instruction Side",
+ "EventCode": "0xE4",
+ "EventName": "MMU_PTW_ISIDE",
+ "BriefDescription": "Duration of a translation table walk requested by the Instruction Side"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by a Preload instruction or Prefetch request",
+ "EventCode": "0xE5",
+ "EventName": "MMU_PTW_PLD",
+ "BriefDescription": "Duration of a translation table walk requested by a Preload instruction or Prefetch request"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by a CP15 operation (maintenance by MVA and VA to PA operations)",
+ "EventCode": "0xE6",
+ "EventName": "MMU_PTW_CP15",
+ "BriefDescription": "Duration of a translation table walk requested by a CP15 operation (maintenance by MVA and VA to PA operations)"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/pipeline.json
new file mode 100644
index 000000000000..1730969e49f7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a73/pipeline.json
@@ -0,0 +1,38 @@
+[
+ {
+ "PublicDescription": "A linefill caused an instruction side stall",
+ "EventCode": "0xC0",
+ "EventName": "LF_STALL",
+ "BriefDescription": "A linefill caused an instruction side stall"
+ },
+ {
+ "PublicDescription": "A translation table walk caused an instruction side stall",
+ "EventCode": "0xC1",
+ "EventName": "PTW_STALL",
+ "BriefDescription": "A translation table walk caused an instruction side stall"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the Load-Store Unit are busy",
+ "EventCode": "0xD3",
+ "EventName": "D_LSU_SLOT_FULL",
+ "BriefDescription": "Duration for which all slots in the Load-Store Unit are busy"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the load-store issue queue are busy",
+ "EventCode": "0xD8",
+ "EventName": "LS_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the load-store issue queue are busy"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the data processing issue queue are busy",
+ "EventCode": "0xD9",
+ "EventName": "DP_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the data processing issue queue are busy"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the Data Engine issue queue are busy",
+ "EventCode": "0xDA",
+ "EventName": "DE_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the Data Engine issue queue are busy"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/branch.json
new file mode 100644
index 000000000000..ece201718284
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/branch.json
@@ -0,0 +1,11 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/cache.json
new file mode 100644
index 000000000000..7efa09800a51
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/cache.json
@@ -0,0 +1,164 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "L2I_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL_RD"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction cache - Tag RAM",
+ "EventCode": "0xC2",
+ "EventName": "I_TAG_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction cache - Tag RAM"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction cache - Data RAM",
+ "EventCode": "0xC3",
+ "EventName": "I_DATA_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction cache - Data RAM"
+ },
+ {
+ "PublicDescription": "Number of ways read in the instruction BTAC RAM",
+ "EventCode": "0xC4",
+ "EventName": "I_BTAC_RAM_RD",
+ "BriefDescription": "Number of ways read in the instruction BTAC RAM"
+ },
+ {
+ "PublicDescription": "Level 1 PLD TLB refill",
+ "EventCode": "0xE7",
+ "EventName": "L1PLD_TLB_REFILL",
+ "BriefDescription": "Level 1 PLD TLB refill"
+ },
+ {
+ "PublicDescription": "Level 2 preload and MMU prefetcher TLB access. This event only counts software and hardware prefetches at Level 2",
+ "EventCode": "0xE8",
+ "EventName": "L2PLD_TLB",
+ "BriefDescription": "Level 2 preload and MMU prefetcher TLB access. This event only counts software and hardware prefetches at Level 2"
+ },
+ {
+ "PublicDescription": "Level 1 TLB flush",
+ "EventCode": "0xE9",
+ "EventName": "UTLB_FLUSH",
+ "BriefDescription": "Level 1 TLB flush"
+ },
+ {
+ "PublicDescription": "Level 2 TLB access",
+ "EventCode": "0xEA",
+ "EventName": "TLB_ACCESS",
+ "BriefDescription": "Level 2 TLB access"
+ },
+ {
+ "PublicDescription": "Level 1 preload TLB access. This event only counts software and hardware prefetches at Level 1. This event counts all accesses to the preload data micro TLB, that is L1 prefetcher and preload instructions. This event does not take into account whether the MMU is enabled or not",
+ "EventCode": "0xEB",
+ "EventName": "L1PLD_TLB",
+ "BriefDescription": "Level 1 preload TLB access. This event only counts software and hardware prefetches at Level 1. This event counts all accesses to the preload data micro TLB, that is L1 prefetcher and preload instructions. This event does not take into account whether the MMU is enabled or not"
+ },
+ {
+ "PublicDescription": "Prefetch access to unified TLB that caused a page table walk. This event counts software and hardware prefetches",
+ "EventCode": "0xEC",
+ "EventName": "PLDTLB_WALK",
+ "BriefDescription": "Prefetch access to unified TLB that caused a page table walk. This event counts software and hardware prefetches"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/etm.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/etm.json
new file mode 100644
index 000000000000..fce852e82369
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/etm.json
@@ -0,0 +1,14 @@
+[
+ {
+ "PublicDescription": "ETM trace unit output 0",
+ "EventCode": "0xDE",
+ "EventName": "ETM_EXT_OUT0",
+ "BriefDescription": "ETM trace unit output 0"
+ },
+ {
+ "PublicDescription": "ETM trace unit output 1",
+ "EventCode": "0xDF",
+ "EventName": "ETM_EXT_OUT1",
+ "BriefDescription": "ETM trace unit output 1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/exception.json
new file mode 100644
index 000000000000..5b04d01de703
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/exception.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "PublicDescription": "Number of traps to hypervisor. This event counts the number of exception traps taken to EL2, excluding HVC instructions. This event is set every time that an exception is executed because of a decoded trap to the hypervisor. CCFAIL exceptions and traps caused by HVC instructions are excluded. This event is not counted when it is accessible from Non-secure EL0 or EL1",
+ "EventCode": "0xDC",
+ "EventName": "EXC_TRAP_HYP",
+ "BriefDescription": "Number of traps to hypervisor. This event counts the number of exception traps taken to EL2, excluding HVC instructions. This event is set every time that an exception is executed because of a decoded trap to the hypervisor. CCFAIL exceptions and traps caused by HVC instructions are excluded. This event is not counted when it is accessible from Non-secure EL0 or EL1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/instruction.json
new file mode 100644
index 000000000000..930ce8a259f3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/instruction.json
@@ -0,0 +1,74 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/memory.json
new file mode 100644
index 000000000000..929fc545470f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/memory.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/mmu.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/mmu.json
new file mode 100644
index 000000000000..0e63e68bc8cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/mmu.json
@@ -0,0 +1,44 @@
+[
+ {
+ "PublicDescription": "Duration of a translation table walk handled by the MMU",
+ "EventCode": "0xE0",
+ "EventName": "MMU_PTW",
+ "BriefDescription": "Duration of a translation table walk handled by the MMU"
+ },
+ {
+ "PublicDescription": "Duration of a Stage 1 translation table walk handled by the MMU. This event is not counted when it is accessible from Non-secure EL0 or EL1",
+ "EventCode": "0xE1",
+ "EventName": "MMU_PTW_ST1",
+ "BriefDescription": "Duration of a Stage 1 translation table walk handled by the MMU. This event is not counted when it is accessible from Non-secure EL0 or EL1"
+ },
+ {
+ "PublicDescription": "Duration of a Stage 2 translation table walk handled by the MMU. This event is not counted when it is accessible from Non-secure EL0 or EL1",
+ "EventCode": "0xE2",
+ "EventName": "MMU_PTW_ST2",
+ "BriefDescription": "Duration of a Stage 2 translation table walk handled by the MMU. This event is not counted when it is accessible from Non-secure EL0 or EL1"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by the LSU",
+ "EventCode": "0xE3",
+ "EventName": "MMU_PTW_LSU",
+ "BriefDescription": "Duration of a translation table walk requested by the LSU"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by the instruction side",
+ "EventCode": "0xE4",
+ "EventName": "MMU_PTW_ISIDE",
+ "BriefDescription": "Duration of a translation table walk requested by the instruction side"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by a Preload instruction or Prefetch request",
+ "EventCode": "0xE5",
+ "EventName": "MMU_PTW_PLD",
+ "BriefDescription": "Duration of a translation table walk requested by a Preload instruction or Prefetch request"
+ },
+ {
+ "PublicDescription": "Duration of a translation table walk requested by an address translation operation",
+ "EventCode": "0xE6",
+ "EventName": "MMU_PTW_CP15",
+ "BriefDescription": "Duration of a translation table walk requested by an address translation operation"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/pipeline.json
new file mode 100644
index 000000000000..0f8f50823cf1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a75/pipeline.json
@@ -0,0 +1,44 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "PublicDescription": "A linefill caused an instruction side stall",
+ "EventCode": "0xC0",
+ "EventName": "LF_STALL",
+ "BriefDescription": "A linefill caused an instruction side stall"
+ },
+ {
+ "PublicDescription": "A translation table walk caused an instruction side stall",
+ "EventCode": "0xC1",
+ "EventName": "PTW_STALL",
+ "BriefDescription": "A translation table walk caused an instruction side stall"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the Load-Store Unit (LSU) are busy",
+ "EventCode": "0xD3",
+ "EventName": "D_LSU_SLOT_FULL",
+ "BriefDescription": "Duration for which all slots in the Load-Store Unit (LSU) are busy"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the load-store issue queue are busy. This event counts the cycles where all slots in the LS IQs are full with micro-operations waiting for issuing, and the dispatch stage is not empty",
+ "EventCode": "0xD8",
+ "EventName": "LS_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the load-store issue queue are busy. This event counts the cycles where all slots in the LS IQs are full with micro-operations waiting for issuing, and the dispatch stage is not empty"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the data processing issue queue are busy. This event counts the cycles where all slots in the DP0 and DP1 IQs are full with micro-operations waiting for issuing, and the despatch stage is not empty",
+ "EventCode": "0xD9",
+ "EventName": "DP_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the data processing issue queue are busy. This event counts the cycles where all slots in the DP0 and DP1 IQs are full with micro-operations waiting for issuing, and the despatch stage is not empty"
+ },
+ {
+ "PublicDescription": "Duration for which all slots in the data engine issue queue are busy. This event is set every time that the data engine rename has at least one valid instruction, excluding No Operations (NOPs), that cannot move to the issue stage because accpt_instr is LOW",
+ "EventCode": "0xDA",
+ "EventName": "DE_IQ_FULL",
+ "BriefDescription": "Duration for which all slots in the data engine issue queue are busy. This event is set every time that the data engine rename has at least one valid instruction, excluding No Operations (NOPs), that cannot move to the issue stage because accpt_instr is LOW"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json
deleted file mode 100644
index b5e5d055c70d..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/branch.json
+++ /dev/null
@@ -1,14 +0,0 @@
-[
- {
- "PublicDescription": "Mispredicted or not predicted branch speculatively executed. This event counts any predictable branch instruction which is mispredicted either due to dynamic misprediction or because the MMU is off and the branches are statically predicted not taken.",
- "EventCode": "0x10",
- "EventName": "BR_MIS_PRED",
- "BriefDescription": "Mispredicted or not predicted branch speculatively executed."
- },
- {
- "PublicDescription": "Predictable branch speculatively executed. This event counts all predictable branches.",
- "EventCode": "0x12",
- "EventName": "BR_PRED",
- "BriefDescription": "Predictable branch speculatively executed."
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json
deleted file mode 100644
index fce7309ae624..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/bus.json
+++ /dev/null
@@ -1,24 +0,0 @@
-[
- {
- "EventCode": "0x11",
- "EventName": "CPU_CYCLES",
- "BriefDescription": "The number of core clock cycles."
- },
- {
- "PublicDescription": "Bus access. This event counts for every beat of data transferred over the data channels between the core and the SCU. If both read and write data beats are transferred on a given cycle, this event is counted twice on that cycle. This event counts the sum of BUS_ACCESS_RD and BUS_ACCESS_WR.",
- "EventCode": "0x19",
- "EventName": "BUS_ACCESS",
- "BriefDescription": "Bus access."
- },
- {
- "EventCode": "0x1D",
- "EventName": "BUS_CYCLES",
- "BriefDescription": "Bus cycles. This event duplicates CPU_CYCLES."
- },
- {
- "ArchStdEvent": "BUS_ACCESS_RD"
- },
- {
- "ArchStdEvent": "BUS_ACCESS_WR"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json
deleted file mode 100644
index 24594081c199..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/cache.json
+++ /dev/null
@@ -1,207 +0,0 @@
-[
- {
- "PublicDescription": "L1 instruction cache refill. This event counts any instruction fetch which misses in the cache.",
- "EventCode": "0x01",
- "EventName": "L1I_CACHE_REFILL",
- "BriefDescription": "L1 instruction cache refill"
- },
- {
- "PublicDescription": "L1 instruction TLB refill. This event counts any refill of the instruction L1 TLB from the L2 TLB. This includes refills that result in a translation fault.",
- "EventCode": "0x02",
- "EventName": "L1I_TLB_REFILL",
- "BriefDescription": "L1 instruction TLB refill"
- },
- {
- "PublicDescription": "L1 data cache refill. This event counts any load or store operation or page table walk access which causes data to be read from outside the L1, including accesses which do not allocate into L1.",
- "EventCode": "0x03",
- "EventName": "L1D_CACHE_REFILL",
- "BriefDescription": "L1 data cache refill"
- },
- {
- "PublicDescription": "L1 data cache access. This event counts any load or store operation or page table walk access which looks up in the L1 data cache. In particular, any access which could count the L1D_CACHE_REFILL event causes this event to count.",
- "EventCode": "0x04",
- "EventName": "L1D_CACHE",
- "BriefDescription": "L1 data cache access"
- },
- {
- "PublicDescription": "L1 data TLB refill. This event counts any refill of the data L1 TLB from the L2 TLB. This includes refills that result in a translation fault.",
- "EventCode": "0x05",
- "EventName": "L1D_TLB_REFILL",
- "BriefDescription": "L1 data TLB refill"
- },
- {
- "PublicDescription": "Level 1 instruction cache access or Level 0 Macro-op cache access. This event counts any instruction fetch which accesses the L1 instruction cache or L0 Macro-op cache.",
- "EventCode": "0x14",
- "EventName": "L1I_CACHE",
- "BriefDescription": "L1 instruction cache access"
- },
- {
- "PublicDescription": "L1 data cache Write-Back. This event counts any write-back of data from the L1 data cache to L2 or L3. This counts both victim line evictions and snoops, including cache maintenance operations.",
- "EventCode": "0x15",
- "EventName": "L1D_CACHE_WB",
- "BriefDescription": "L1 data cache Write-Back"
- },
- {
- "PublicDescription": "L2 data cache access. This event counts any transaction from L1 which looks up in the L2 cache, and any write-back from the L1 to the L2. Snoops from outside the core and cache maintenance operations are not counted.",
- "EventCode": "0x16",
- "EventName": "L2D_CACHE",
- "BriefDescription": "L2 data cache access"
- },
- {
- "PublicDescription": "L2 data cache refill. This event counts any cacheable transaction from L1 which causes data to be read from outside the core. L2 refills caused by stashes into L2 should not be counted",
- "EventCode": "0x17",
- "EventName": "L2D_CACHE_REFILL",
- "BriefDescription": "L2 data cache refill"
- },
- {
- "PublicDescription": "L2 data cache write-back. This event counts any write-back of data from the L2 cache to outside the core. This includes snoops to the L2 which return data, regardless of whether they cause an invalidation. Invalidations from the L2 which do not write data outside of the core and snoops which return data from the L1 are not counted",
- "EventCode": "0x18",
- "EventName": "L2D_CACHE_WB",
- "BriefDescription": "L2 data cache write-back"
- },
- {
- "PublicDescription": "L2 data cache allocation without refill. This event counts any full cache line write into the L2 cache which does not cause a linefill, including write-backs from L1 to L2 and full-line writes which do not allocate into L1.",
- "EventCode": "0x20",
- "EventName": "L2D_CACHE_ALLOCATE",
- "BriefDescription": "L2 data cache allocation without refill"
- },
- {
- "PublicDescription": "Level 1 data TLB access. This event counts any load or store operation which accesses the data L1 TLB. If both a load and a store are executed on a cycle, this event counts twice. This event counts regardless of whether the MMU is enabled.",
- "EventCode": "0x25",
- "EventName": "L1D_TLB",
- "BriefDescription": "Level 1 data TLB access."
- },
- {
- "PublicDescription": "Level 1 instruction TLB access. This event counts any instruction fetch which accesses the instruction L1 TLB.This event counts regardless of whether the MMU is enabled.",
- "EventCode": "0x26",
- "EventName": "L1I_TLB",
- "BriefDescription": "Level 1 instruction TLB access"
- },
- {
- "PublicDescription": "This event counts any full cache line write into the L3 cache which does not cause a linefill, including write-backs from L2 to L3 and full-line writes which do not allocate into L2",
- "EventCode": "0x29",
- "EventName": "L3D_CACHE_ALLOCATE",
- "BriefDescription": "Allocation without refill"
- },
- {
- "PublicDescription": "Attributable Level 3 unified cache refill. This event counts for any cacheable read transaction returning datafrom the SCU for which the data source was outside the cluster. Transactions such as ReadUnique are counted here as 'read' transactions, even though they can be generated by store instructions.",
- "EventCode": "0x2A",
- "EventName": "L3D_CACHE_REFILL",
- "BriefDescription": "Attributable Level 3 unified cache refill."
- },
- {
- "PublicDescription": "Attributable Level 3 unified cache access. This event counts for any cacheable read transaction returning datafrom the SCU, or for any cacheable write to the SCU.",
- "EventCode": "0x2B",
- "EventName": "L3D_CACHE",
- "BriefDescription": "Attributable Level 3 unified cache access."
- },
- {
- "PublicDescription": "Attributable L2 data or unified TLB refill. This event counts on anyrefill of the L2 TLB, caused by either an instruction or data access.This event does not count if the MMU is disabled.",
- "EventCode": "0x2D",
- "EventName": "L2D_TLB_REFILL",
- "BriefDescription": "Attributable L2 data or unified TLB refill"
- },
- {
- "PublicDescription": "Attributable L2 data or unified TLB access. This event counts on any access to the L2 TLB (caused by a refill of any of the L1 TLBs). This event does not count if the MMU is disabled.",
- "EventCode": "0x2F",
- "EventName": "L2D_TLB",
- "BriefDescription": "Attributable L2 data or unified TLB access"
- },
- {
- "PublicDescription": "Access to data TLB that caused a page table walk. This event counts on any data access which causes L2D_TLB_REFILL to count.",
- "EventCode": "0x34",
- "EventName": "DTLB_WALK",
- "BriefDescription": "Access to data TLB that caused a page table walk."
- },
- {
- "PublicDescription": "Access to instruction TLB that caused a page table walk. This event counts on any instruction access which causes L2D_TLB_REFILL to count.",
- "EventCode": "0x35",
- "EventName": "ITLB_WALK",
- "BriefDescription": "Access to instruction TLB that caused a page table walk."
- },
- {
- "EventCode": "0x36",
- "EventName": "LL_CACHE_RD",
- "BriefDescription": "Last level cache access, read"
- },
- {
- "EventCode": "0x37",
- "EventName": "LL_CACHE_MISS_RD",
- "BriefDescription": "Last level cache miss, read"
- },
- {
- "ArchStdEvent": "L1D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "L1D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L1D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L1D_TLB_RD"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_RD"
- },
- {
- "ArchStdEvent": "L1D_TLB_REFILL_WR"
- },
- {
- "ArchStdEvent": "L1D_TLB_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_INVAL"
- },
- {
- "ArchStdEvent": "L2D_CACHE_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_RD"
- },
- {
- "ArchStdEvent": "L2D_CACHE_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
- },
- {
- "ArchStdEvent": "L2D_CACHE_WR"
- },
- {
- "ArchStdEvent": "L2D_TLB_RD"
- },
- {
- "ArchStdEvent": "L2D_TLB_REFILL_RD"
- },
- {
- "ArchStdEvent": "L2D_TLB_REFILL_WR"
- },
- {
- "ArchStdEvent": "L2D_TLB_WR"
- },
- {
- "ArchStdEvent": "L3D_CACHE_RD"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json
deleted file mode 100644
index c153ac706d8d..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/instruction.json
+++ /dev/null
@@ -1,108 +0,0 @@
-[
- {
- "PublicDescription": "Software increment. Instruction architecturally executed (condition code check pass).",
- "EventCode": "0x00",
- "EventName": "SW_INCR",
- "BriefDescription": "Software increment."
- },
- {
- "PublicDescription": "Instruction architecturally executed. This event counts all retired instructions, including those that fail their condition check.",
- "EventCode": "0x08",
- "EventName": "INST_RETIRED",
- "BriefDescription": "Instruction architecturally executed."
- },
- {
- "EventCode": "0x0A",
- "EventName": "EXC_RETURN",
- "BriefDescription": "Instruction architecturally executed, condition code check pass, exception return."
- },
- {
- "PublicDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR. This event only counts writes to CONTEXTIDR in AArch32 state, and via the CONTEXTIDR_EL1 mnemonic in AArch64 state.",
- "EventCode": "0x0B",
- "EventName": "CID_WRITE_RETIRED",
- "BriefDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR."
- },
- {
- "EventCode": "0x1B",
- "EventName": "INST_SPEC",
- "BriefDescription": "Operation speculatively executed"
- },
- {
- "PublicDescription": "Instruction architecturally executed, condition code check pass, write to TTBR. This event only counts writes to TTBR0/TTBR1 in AArch32 state and TTBR0_EL1/TTBR1_EL1 in AArch64 state.",
- "EventCode": "0x1C",
- "EventName": "TTBR_WRITE_RETIRED",
- "BriefDescription": "Instruction architecturally executed, condition code check pass, write to TTBR"
- },
- {
- "PublicDescription": "Instruction architecturally executed, branch. This event counts all branches, taken or not. This excludes exception entries, debug entries and CCFAIL branches.",
- "EventCode": "0x21",
- "EventName": "BR_RETIRED",
- "BriefDescription": "Instruction architecturally executed, branch."
- },
- {
- "PublicDescription": "Instruction architecturally executed, mispredicted branch. This event counts any branch counted by BR_RETIRED which is not correctly predicted and causes a pipeline flush.",
- "EventCode": "0x22",
- "EventName": "BR_MIS_PRED_RETIRED",
- "BriefDescription": "Instruction architecturally executed, mispredicted branch."
- },
- {
- "ArchStdEvent": "ASE_SPEC"
- },
- {
- "ArchStdEvent": "BR_IMMED_SPEC"
- },
- {
- "ArchStdEvent": "BR_INDIRECT_SPEC"
- },
- {
- "ArchStdEvent": "BR_RETURN_SPEC"
- },
- {
- "ArchStdEvent": "CRYPTO_SPEC"
- },
- {
- "ArchStdEvent": "DMB_SPEC"
- },
- {
- "ArchStdEvent": "DP_SPEC"
- },
- {
- "ArchStdEvent": "DSB_SPEC"
- },
- {
- "ArchStdEvent": "ISB_SPEC"
- },
- {
- "ArchStdEvent": "LDREX_SPEC"
- },
- {
- "ArchStdEvent": "LDST_SPEC"
- },
- {
- "ArchStdEvent": "LD_SPEC"
- },
- {
- "ArchStdEvent": "PC_WRITE_SPEC"
- },
- {
- "ArchStdEvent": "RC_LD_SPEC"
- },
- {
- "ArchStdEvent": "RC_ST_SPEC"
- },
- {
- "ArchStdEvent": "STREX_FAIL_SPEC"
- },
- {
- "ArchStdEvent": "STREX_PASS_SPEC"
- },
- {
- "ArchStdEvent": "STREX_SPEC"
- },
- {
- "ArchStdEvent": "ST_SPEC"
- },
- {
- "ArchStdEvent": "VFP_SPEC"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json
deleted file mode 100644
index 8bde029a62d5..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/other.json
+++ /dev/null
@@ -1,7 +0,0 @@
-[
- {
- "EventCode": "0x31",
- "EventName": "REMOTE_ACCESS",
- "BriefDescription": "Access to another socket in a multi-socket system"
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json
deleted file mode 100644
index 010a647f9d02..000000000000
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/pipeline.json
+++ /dev/null
@@ -1,14 +0,0 @@
-[
- {
- "PublicDescription": "No operation issued because of the frontend. The counter counts on any cycle when there are no fetched instructions available to dispatch.",
- "EventCode": "0x23",
- "EventName": "STALL_FRONTEND",
- "BriefDescription": "No operation issued because of the frontend."
- },
- {
- "PublicDescription": "No operation issued because of the backend. The counter counts on any cycle fetched instructions are not dispatched due to resource constraints.",
- "EventCode": "0x24",
- "EventName": "STALL_BACKEND",
- "BriefDescription": "No operation issued because of the backend."
- }
-]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/branch.json
new file mode 100644
index 000000000000..db68de188390
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/branch.json
@@ -0,0 +1,10 @@
+[
+ {
+ "PublicDescription": "This event counts any predictable branch instruction which is mispredicted either due to dynamic misprediction or because the MMU is off and the branches are statically predicted not taken",
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "PublicDescription": "This event counts all predictable branches.",
+ "ArchStdEvent": "BR_PRED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/bus.json
new file mode 100644
index 000000000000..e0875d3a685d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/bus.json
@@ -0,0 +1,21 @@
+[
+ {
+ "PublicDescription": "The number of core clock cycles",
+ "ArchStdEvent": "CPU_CYCLES",
+ "BriefDescription": "The number of core clock cycles."
+ },
+ {
+ "PublicDescription": "This event counts for every beat of data transferred over the data channels between the core and the SCU. If both read and write data beats are transferred on a given cycle, this event is counted twice on that cycle. This event counts the sum of BUS_ACCESS_RD and BUS_ACCESS_WR.",
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "PublicDescription": "This event duplicates CPU_CYCLES.",
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/cache.json
new file mode 100644
index 000000000000..fc448c2d5ea4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/cache.json
@@ -0,0 +1,169 @@
+[
+ {
+ "PublicDescription": "This event counts any instruction fetch which misses in the cache.",
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "PublicDescription": "This event counts any refill of the instruction L1 TLB from the L2 TLB. This includes refills that result in a translation fault.",
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "PublicDescription": "This event counts any load or store operation or page table walk access which causes data to be read from outside the L1, including accesses which do not allocate into L1.",
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "PublicDescription": "This event counts any load or store operation or page table walk access which looks up in the L1 data cache. In particular, any access which could count the L1D_CACHE_REFILL event causes this event to count.",
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "PublicDescription": "This event counts any refill of the data L1 TLB from the L2 TLB. This includes refills that result in a translation fault.",
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "PublicDescription": "Level 1 instruction cache access or Level 0 Macro-op cache access. This event counts any instruction fetch which accesses the L1 instruction cache or L0 Macro-op cache.",
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "PublicDescription": "This event counts any write-back of data from the L1 data cache to L2 or L3. This counts both victim line evictions and snoops, including cache maintenance operations.",
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "PublicDescription": "This event counts any transaction from L1 which looks up in the L2 cache, and any write-back from the L1 to the L2. Snoops from outside the core and cache maintenance operations are not counted.",
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "PublicDescription": "L2 data cache refill. This event counts any cacheable transaction from L1 which causes data to be read from outside the core. L2 refills caused by stashes into L2 should not be counted",
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "PublicDescription": "This event counts any write-back of data from the L2 cache to outside the core. This includes snoops to the L2 which return data, regardless of whether they cause an invalidation. Invalidations from the L2 which do not write data outside of the core and snoops which return data from the L1 are not counted",
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "PublicDescription": "This event counts any full cache line write into the L2 cache which does not cause a linefill, including write-backs from L1 to L2 and full-line writes which do not allocate into L1.",
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "PublicDescription": "This event counts any load or store operation which accesses the data L1 TLB. If both a load and a store are executed on a cycle, this event counts twice. This event counts regardless of whether the MMU is enabled.",
+ "ArchStdEvent": "L1D_TLB",
+ "BriefDescription": "Level 1 data TLB access."
+ },
+ {
+ "PublicDescription": "This event counts any instruction fetch which accesses the instruction L1 TLB.This event counts regardless of whether the MMU is enabled.",
+ "ArchStdEvent": "L1I_TLB",
+ "BriefDescription": "Level 1 instruction TLB access"
+ },
+ {
+ "PublicDescription": "This event counts any full cache line write into the L3 cache which does not cause a linefill, including write-backs from L2 to L3 and full-line writes which do not allocate into L2",
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE",
+ "BriefDescription": "Allocation without refill"
+ },
+ {
+ "PublicDescription": "This event counts for any cacheable read transaction returning datafrom the SCU for which the data source was outside the cluster. Transactions such as ReadUnique are counted here as 'read' transactions, even though they can be generated by store instructions.",
+ "ArchStdEvent": "L3D_CACHE_REFILL",
+ "BriefDescription": "Attributable Level 3 unified cache refill."
+ },
+ {
+ "PublicDescription": "This event counts for any cacheable read transaction returning datafrom the SCU, or for any cacheable write to the SCU.",
+ "ArchStdEvent": "L3D_CACHE",
+ "BriefDescription": "Attributable Level 3 unified cache access."
+ },
+ {
+ "PublicDescription": "This event counts on anyrefill of the L2 TLB, caused by either an instruction or data access.This event does not count if the MMU is disabled.",
+ "ArchStdEvent": "L2D_TLB_REFILL",
+ "BriefDescription": "Attributable L2 data or unified TLB refill"
+ },
+ {
+ "PublicDescription": "This event counts on any access to the L2 TLB (caused by a refill of any of the L1 TLBs). This event does not count if the MMU is disabled.",
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "PublicDescription": "This event counts on any data access which causes L2D_TLB_REFILL to count.",
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "PublicDescription": "This event counts on any instruction access which causes L2D_TLB_REFILL to count.",
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/exception.json
new file mode 100644
index 000000000000..ce942324ee60
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/exception.json
@@ -0,0 +1,48 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "PublicDescription": "This event counts any correctable or uncorrectable memory error (ECC or parity) in the protected core RAMs",
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/instruction.json
new file mode 100644
index 000000000000..b0b439a36ae9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/instruction.json
@@ -0,0 +1,91 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "PublicDescription": "This event counts all retired instructions, including those that fail their condition check.",
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "PublicDescription": "This event only counts writes to CONTEXTIDR in AArch32 state, and via the CONTEXTIDR_EL1 mnemonic in AArch64 state.",
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "PublicDescription": "This event only counts writes to TTBR0/TTBR1 in AArch32 state and TTBR0_EL1/TTBR1_EL1 in AArch64 state.",
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "PublicDescription": "This event counts all branches, taken or not. This excludes exception entries, debug entries and CCFAIL branches.",
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "PublicDescription": "This event counts any branch counted by BR_RETIRED which is not correctly predicted and causes a pipeline flush.",
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/memory.json
index b86643253f19..5bed2514b245 100644
--- a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76-n1/memory.json
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/memory.json
@@ -1,9 +1,10 @@
[
{
- "PublicDescription": "Data memory access. This event counts memory accesses due to load or store instructions. This event counts the sum of MEM_ACCESS_RD and MEM_ACCESS_WR.",
- "EventCode": "0x13",
- "EventName": "MEM_ACCESS",
- "BriefDescription": "Data memory access"
+ "PublicDescription": "This event counts memory accesses due to load or store instructions. This event counts the sum of MEM_ACCESS_RD and MEM_ACCESS_WR.",
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
},
{
"ArchStdEvent": "MEM_ACCESS_RD"
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/pipeline.json
new file mode 100644
index 000000000000..b4e96551d51a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a76/pipeline.json
@@ -0,0 +1,10 @@
+[
+ {
+ "PublicDescription": "The counter counts on any cycle when there are no fetched instructions available to dispatch.",
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "PublicDescription": "The counter counts on any cycle fetched instructions are not dispatched due to resource constraints.",
+ "ArchStdEvent": "STALL_BACKEND"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/bus.json
new file mode 100644
index 000000000000..75d850b781ac
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/bus.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/cache.json
new file mode 100644
index 000000000000..cbb365f5091f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/cache.json
@@ -0,0 +1,143 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/instruction.json
new file mode 100644
index 000000000000..1a74786271d4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/instruction.json
@@ -0,0 +1,77 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/memory.json
new file mode 100644
index 000000000000..5aff6e93c1ad
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/memory.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/pipeline.json
new file mode 100644
index 000000000000..eeac798d403a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a77/pipeline.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/instruction.json
new file mode 100644
index 000000000000..a9edd52843a1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/instruction.json
@@ -0,0 +1,80 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/memory.json
new file mode 100644
index 000000000000..5aff6e93c1ad
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/memory.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-a78/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/instruction.json
new file mode 100644
index 000000000000..a9edd52843a1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/instruction.json
@@ -0,0 +1,80 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/memory.json
new file mode 100644
index 000000000000..5aff6e93c1ad
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/memory.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x1/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/branch.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/branch.json
new file mode 100644
index 000000000000..2f2d137f5f55
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/branch.json
@@ -0,0 +1,17 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/bus.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/cache.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/exception.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/instruction.json
new file mode 100644
index 000000000000..964f47c6b099
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/instruction.json
@@ -0,0 +1,134 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_EMPTY_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_NOT_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_FAULT_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT8_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT16_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT32_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT64_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/memory.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/memory.json
new file mode 100644
index 000000000000..7b2b21ac150f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/memory.json
@@ -0,0 +1,41 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "LD_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "ST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/trace.json b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/trace.json
new file mode 100644
index 000000000000..3116135c59e2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/cortex-x2/trace.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "TRB_WRAP"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT0"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT1"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT2"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT3"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT4"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT5"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT6"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT7"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/bus.json
new file mode 100644
index 000000000000..2e11a8c4a484
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/bus.json
@@ -0,0 +1,18 @@
+[
+ {
+ "ArchStdEvent": "BUS_ACCESS",
+ "PublicDescription": "Counts memory transactions issued by the CPU to the external bus, including snoop requests and snoop responses. Each beat of data is counted individually."
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES",
+ "PublicDescription": "Counts bus cycles in the CPU. Bus cycles represent a clock cycle in which a transaction could be sent or received on the interface from the CPU to the external bus. Since that interface is driven at the same clock speed as the CPU, this event is a duplicate of CPU_CYCLES."
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD",
+ "PublicDescription": "Counts memory read transactions seen on the external bus. Each beat of data is counted individually."
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR",
+ "PublicDescription": "Counts memory write transactions seen on the external bus. Each beat of data is counted individually."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/exception.json
new file mode 100644
index 000000000000..4404b8e91690
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/exception.json
@@ -0,0 +1,62 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN",
+ "PublicDescription": "Counts any taken architecturally visible exceptions such as IRQ, FIQ, SError, and other synchronous exceptions. Exceptions are counted whether or not they are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN",
+ "PublicDescription": "Counts any architecturally executed exception return instructions. Eg: AArch64: ERET"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF",
+ "PublicDescription": "Counts the number of synchronous exceptions which are taken locally that are due to attempting to execute an instruction that is UNDEFINED. Attempting to execute instruction bit patterns that have not been allocated. Attempting to execute instructions when they are disabled. Attempting to execute instructions at an inappropriate Exception level. Attempting to execute an instruction when the value of PSTATE.IL is 1."
+ },
+ {
+ "ArchStdEvent": "EXC_SVC",
+ "PublicDescription": "Counts SVC exceptions taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT",
+ "PublicDescription": "Counts synchronous exceptions that are taken locally and caused by Instruction Aborts."
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT",
+ "PublicDescription": "Counts exceptions that are taken locally and are caused by data aborts or SErrors. Conditions that could cause those exceptions are attempting to read or write memory where the MMU generates a fault, attempting to read or write memory with a misaligned address, interrupts from the nSEI inputs and internally generated SErrors."
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ",
+ "PublicDescription": "Counts IRQ exceptions including the virtual IRQs that are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ",
+ "PublicDescription": "Counts FIQ exceptions including the virtual FIQs that are taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_SMC",
+ "PublicDescription": "Counts SMC exceptions take to EL3."
+ },
+ {
+ "ArchStdEvent": "EXC_HVC",
+ "PublicDescription": "Counts HVC exceptions taken to EL2."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT",
+ "PublicDescription": "Counts exceptions which are traps not taken locally and are caused by Instruction Aborts. For example, attempting to execute an instruction with a misaligned PC."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT",
+ "PublicDescription": "Counts exceptions which are traps not taken locally and are caused by Data Aborts or SError interrupts. Conditions that could cause those exceptions are:\n\n1. Attempting to read or write memory where the MMU generates a fault,\n2. Attempting to read or write memory with a misaligned address,\n3. Interrupts from the SEI input.\n4. internally generated SErrors."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER",
+ "PublicDescription": "Counts the number of synchronous trap exceptions which are not taken locally and are not SVC, SMC, HVC, data aborts, Instruction Aborts, or interrupts."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ",
+ "PublicDescription": "Counts IRQ exceptions including the virtual IRQs that are not taken locally."
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ",
+ "PublicDescription": "Counts FIQs which are not taken locally but taken from EL0, EL1,\n or EL2 to EL3 (which would be the normal behavior for FIQs when not executing\n in EL3)."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/general.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/general.json
new file mode 100644
index 000000000000..20fada95ef97
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/general.json
@@ -0,0 +1,6 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES",
+ "PublicDescription": "Counts CPU clock cycles (not timer cycles). The clock measured by this event is defined as the physical clock driving the CPU logic."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l1d_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l1d_cache.json
new file mode 100644
index 000000000000..6cd0b3ba5010
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l1d_cache.json
@@ -0,0 +1,50 @@
+[
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL",
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed load or store operations that missed in the level 1 data cache. This event only counts one event per cache line. This event does not count cache line allocations from preload instructions or from hardware cache prefetching."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE",
+ "PublicDescription": "Counts level 1 data cache accesses from any load/store operations. Atomic operations that resolve in the CPUs caches (near atomic operations) counts as both a write access and read access. Each access to a cache line is counted including the multiple accesses caused by single instructions such as LDM or STM. Each access to other level 1 data or unified memory structures, for example refill buffers, write buffers, and write-back buffers, are also counted."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB",
+ "PublicDescription": "Counts write-backs of dirty data from the L1 data cache to the L2 cache. This occurs when either a dirty cache line is evicted from L1 data cache and allocated in the L2 cache or dirty data is written to the L2 and possibly to the next level of cache. This event counts both victim cache line evictions and cache write-backs from snoops or cache maintenance operations. The following cache operations are not counted:\n\n1. Invalidations which do not result in data being transferred out of the L1 (such as evictions of clean data),\n2. Full line writes which write to L2 without writing L1, such as write streaming mode."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD",
+ "PublicDescription": "Counts level 1 data cache accesses from any load operation. Atomic load operations that resolve in the CPUs caches counts as both a write access and read access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR",
+ "PublicDescription": "Counts level 1 data cache accesses generated by store operations. This event also counts accesses caused by a DC ZVA (data cache zero, specified by virtual address) instruction. Near atomic operations that resolve in the CPUs caches count as a write access and read access."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD",
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed load instructions where the memory read operation misses in the level 1 data cache. This event only counts one event per cache line."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR",
+ "PublicDescription": "Counts level 1 data cache refills caused by speculatively executed store instructions where the memory write operation misses in the level 1 data cache. This event only counts one event per cache line."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER",
+ "PublicDescription": "Counts level 1 data cache refills where the cache line data came from caches inside the immediate cluster of the core."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER",
+ "PublicDescription": "Counts level 1 data cache refills for which the cache line data came from outside the immediate cluster of the core, like an SLC in the system interconnect or DRAM."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM",
+ "PublicDescription": "Counts dirty cache line evictions from the level 1 data cache caused by a new cache line allocation. This event does not count evictions caused by cache maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN",
+ "PublicDescription": "Counts write-backs from the level 1 data cache that are a result of a coherency operation made by another CPU. Event count includes cache maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL",
+ "PublicDescription": "Counts each explicit invalidation of a cache line in the level 1 data cache caused by:\n\n- Cache Maintenance Operations (CMO) that operate by a virtual address.\n- Broadcast cache coherency operations from another CPU in the system.\n\nThis event does not count for the following conditions:\n\n1. A cache refill invalidates a cache line.\n2. A CMO which is executed on that CPU and invalidates a cache line specified by set/way.\n\nNote that CMOs that operate by set/way cannot be broadcast from one CPU to another."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l1i_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l1i_cache.json
new file mode 100644
index 000000000000..e719b6e7fa77
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l1i_cache.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL",
+ "PublicDescription": "Counts cache line refills in the level 1 instruction cache caused by a missed instruction fetch. Instruction fetches may include accessing multiple instructions, but the single cache line allocation is counted once."
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE",
+ "PublicDescription": "Counts instruction fetches which access the level 1 instruction cache. Instruction cache accesses caused by cache maintenance operations are not counted."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l2_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l2_cache.json
new file mode 100644
index 000000000000..2f6099889de1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l2_cache.json
@@ -0,0 +1,46 @@
+[
+ {
+ "ArchStdEvent": "L2D_CACHE",
+ "PublicDescription": "Counts level 2 cache accesses. level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the first level caches or translation resolutions due to accesses. This event also counts write back of dirty data from level 1 data cache to the L2 cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL",
+ "PublicDescription": "Counts cache line refills into the level 2 cache. level 2 cache is a unified cache for data and instruction accesses. Accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB",
+ "PublicDescription": "Counts write-backs of data from the L2 cache to outside the CPU. This includes snoops to the L2 (from other CPUs) which return data even if the snoops cause an invalidation. L2 cache line invalidations which do not write data outside the CPU and snoops which return data from an L1 cache are not counted. Data would not be written outside the cache when invalidating a clean cache line."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE",
+ "PublicDescription": "TBD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD",
+ "PublicDescription": "Counts level 2 cache accesses due to memory read operations. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR",
+ "PublicDescription": "Counts level 2 cache accesses due to memory write operations. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD",
+ "PublicDescription": "Counts refills for memory accesses due to memory read operation counted by L2D_CACHE_RD. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR",
+ "PublicDescription": "Counts refills for memory accesses due to memory write operation counted by L2D_CACHE_WR. level 2 cache is a unified cache for data and instruction accesses, accesses are for misses in the level 1 caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM",
+ "PublicDescription": "Counts evictions from the level 2 cache because of a line being allocated into the L2 cache."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN",
+ "PublicDescription": "Counts write-backs from the level 2 cache that are a result of either:\n\n1. Cache maintenance operations,\n\n2. Snoop responses or,\n\n3. Direct cache transfers to another CPU due to a forwarding snoop request."
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL",
+ "PublicDescription": "Counts each explicit invalidation of a cache line in the level 2 cache by cache maintenance operations that operate by a virtual address, or by external coherency operations. This event does not count if either:\n\n1. A cache refill invalidates a cache line or,\n2. A Cache Maintenance Operation (CMO), which invalidates a cache line specified by set/way, is executed on that CPU.\n\nCMOs that operate by set/way cannot be broadcast from one CPU to another."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l3_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l3_cache.json
new file mode 100644
index 000000000000..f93e0c9f309a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/l3_cache.json
@@ -0,0 +1,18 @@
+[
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE",
+ "PublicDescription": "Counts level 3 cache line allocates that do not fetch data from outside the level 3 data or unified cache. For example, allocates due to streaming stores."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL",
+ "PublicDescription": "Counts level 3 accesses that receive data from outside the L3 cache."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE",
+ "PublicDescription": "Counts level 3 cache accesses. level 3 cache is a unified cache for data and instruction accesses. Accesses are for misses in the lower level caches or translation resolutions due to accesses."
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD",
+ "PublicDescription": "TBD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/ll_cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/ll_cache.json
new file mode 100644
index 000000000000..bb712d57d58a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/ll_cache.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "LL_CACHE_RD",
+ "PublicDescription": "Counts read transactions that were returned from outside the core cluster. This event counts when the system register CPUECTLR.EXTLLC bit is set. This event counts read transactions returned from outside the core if those transactions are either hit in the system level cache or missed in the SLC and are returned from any other external sources."
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD",
+ "PublicDescription": "Counts read transactions that were returned from outside the core cluster but missed in the system level cache. This event counts when the system register CPUECTLR.EXTLLC bit is set. This event counts read transactions returned from outside the core if those transactions are missed in the System level Cache. The data source of the transaction is indicated by a field in the CHI transaction returning to the CPU. This event does not count reads caused by cache maintenance operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/memory.json
new file mode 100644
index 000000000000..9041f6e0befb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/memory.json
@@ -0,0 +1,22 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS",
+ "PublicDescription": "Counts memory accesses issued by the CPU load store unit, where those accesses are issued due to load or store operations. This event counts memory accesses no matter whether the data is received from any level of cache hierarchy or external memory. If memory accesses are broken up into smaller transactions than what were specified in the load or store instructions, then the event counts those smaller memory transactions."
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR",
+ "PublicDescription": "Counts any detected correctable or uncorrectable physical memory errors (ECC or parity) in protected CPUs RAMs. On the core, this event counts errors in the caches (including data and tag rams). Any detected memory error (from either a speculative and abandoned access, or an architecturally executed access) is counted. Note that errors are only detected when the actual protected memory is accessed by an operation."
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS",
+ "PublicDescription": "Counts accesses to another chip, which is implemented as a different CMN mesh in the system. If the CHI bus response back to the core indicates that the data source is from another chip (mesh), then the counter is updated. If no data is returned, even if the system snoops another chip/mesh, then the counter is not updated."
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD",
+ "PublicDescription": "Counts memory accesses issued by the CPU due to load operations. The event counts any memory load access, no matter whether the data is received from any level of cache hierarchy or external memory. The event also counts atomic load operations. If memory accesses are broken up by the load/store unit into smaller transactions that are issued by the bus interface, then the event counts those smaller transactions."
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR",
+ "PublicDescription": "Counts memory accesses issued by the CPU due to store operations. The event counts any memory store access, no matter whether the data is located in any level of cache or external memory. The event also counts atomic load and store operations. If memory accesses are broken up by the load/store unit into smaller transactions that are issued by the bus interface, then the event counts those smaller transactions."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/metrics.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/metrics.json
new file mode 100644
index 000000000000..dc0f8638f8f5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/metrics.json
@@ -0,0 +1,219 @@
+[
+ {
+ "MetricName": "backend_stalled_cycles",
+ "MetricExpr": "((STALL_BACKEND / CPU_CYCLES) * 100)",
+ "BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the backend unit of the processor.",
+ "MetricGroup": "Cycle_Accounting",
+ "ScaleUnit": "1percent of cycles"
+ },
+ {
+ "MetricName": "branch_misprediction_ratio",
+ "MetricExpr": "(BR_MIS_PRED_RETIRED / BR_RETIRED)",
+ "BriefDescription": "This metric measures the ratio of branches mispredicted to the total number of branches architecturally executed. This gives an indication of the effectiveness of the branch prediction unit.",
+ "MetricGroup": "Miss_Ratio;Branch_Effectiveness",
+ "ScaleUnit": "1per branch"
+ },
+ {
+ "MetricName": "branch_mpki",
+ "MetricExpr": "((BR_MIS_PRED_RETIRED / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of branch mispredictions per thousand instructions executed.",
+ "MetricGroup": "MPKI;Branch_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "branch_percentage",
+ "MetricExpr": "(((BR_IMMED_SPEC + BR_INDIRECT_SPEC) / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures branch operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "crypto_percentage",
+ "MetricExpr": "((CRYPTO_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "dtlb_mpki",
+ "MetricExpr": "((DTLB_WALK / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of data TLB Walks per thousand instructions executed.",
+ "MetricGroup": "MPKI;DTLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "dtlb_walk_ratio",
+ "MetricExpr": "(DTLB_WALK / L1D_TLB)",
+ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "frontend_stalled_cycles",
+ "MetricExpr": "((STALL_FRONTEND / CPU_CYCLES) * 100)",
+ "BriefDescription": "This metric is the percentage of cycles that were stalled due to resource constraints in the frontend unit of the processor.",
+ "MetricGroup": "Cycle_Accounting",
+ "ScaleUnit": "1percent of cycles"
+ },
+ {
+ "MetricName": "integer_dp_percentage",
+ "MetricExpr": "((DP_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "ipc",
+ "MetricExpr": "(INST_RETIRED / CPU_CYCLES)",
+ "BriefDescription": "This metric measures the number of instructions retired per cycle.",
+ "MetricGroup": "General",
+ "ScaleUnit": "1per cycle"
+ },
+ {
+ "MetricName": "itlb_mpki",
+ "MetricExpr": "((ITLB_WALK / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of instruction TLB Walks per thousand instructions executed.",
+ "MetricGroup": "MPKI;ITLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "itlb_walk_ratio",
+ "MetricExpr": "(ITLB_WALK / L1I_TLB)",
+ "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l1d_cache_miss_ratio",
+ "MetricExpr": "(L1D_CACHE_REFILL / L1D_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.",
+ "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l1d_cache_mpki",
+ "MetricExpr": "((L1D_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 data cache accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;L1D_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l1d_tlb_miss_ratio",
+ "MetricExpr": "(L1D_TLB_REFILL / L1D_TLB)",
+ "BriefDescription": "This metric measures the ratio of level 1 data TLB accesses missed to the total number of level 1 data TLB accesses. This gives an indication of the effectiveness of the level 1 data TLB.",
+ "MetricGroup": "Miss_Ratio;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l1d_tlb_mpki",
+ "MetricExpr": "((L1D_TLB_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 instruction TLB accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;DTLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l1i_cache_miss_ratio",
+ "MetricExpr": "(L1I_CACHE_REFILL / L1I_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.",
+ "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l1i_cache_mpki",
+ "MetricExpr": "((L1I_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 instruction cache accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;L1I_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l1i_tlb_miss_ratio",
+ "MetricExpr": "(L1I_TLB_REFILL / L1I_TLB)",
+ "BriefDescription": "This metric measures the ratio of level 1 instruction TLB accesses missed to the total number of level 1 instruction TLB accesses. This gives an indication of the effectiveness of the level 1 instruction TLB.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l1i_tlb_mpki",
+ "MetricExpr": "((L1I_TLB_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 1 instruction TLB accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;ITLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l2_cache_miss_ratio",
+ "MetricExpr": "(L2D_CACHE_REFILL / L2D_CACHE)",
+ "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "l2_cache_mpki",
+ "MetricExpr": "((L2D_CACHE_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 2 unified cache accesses missed per thousand instructions executed. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.",
+ "MetricGroup": "MPKI;L2_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "l2_tlb_miss_ratio",
+ "MetricExpr": "(L2D_TLB_REFILL / L2D_TLB)",
+ "BriefDescription": "This metric measures the ratio of level 2 unified TLB accesses missed to the total number of level 2 unified TLB accesses. This gives an indication of the effectiveness of the level 2 TLB.",
+ "MetricGroup": "Miss_Ratio;ITLB_Effectiveness;DTLB_Effectiveness",
+ "ScaleUnit": "1per TLB access"
+ },
+ {
+ "MetricName": "l2_tlb_mpki",
+ "MetricExpr": "((L2D_TLB_REFILL / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of level 2 unified TLB accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;ITLB_Effectiveness;DTLB_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "ll_cache_read_hit_ratio",
+ "MetricExpr": "((LL_CACHE_RD - LL_CACHE_MISS_RD) / LL_CACHE_RD)",
+ "BriefDescription": "This metric measures the ratio of last level cache read accesses hit in the cache to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
+ "MetricGroup": "LL_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "ll_cache_read_miss_ratio",
+ "MetricExpr": "(LL_CACHE_MISS_RD / LL_CACHE_RD)",
+ "BriefDescription": "This metric measures the ratio of last level cache read accesses missed to the total number of last level cache accesses. This gives an indication of the effectiveness of the last level cache for read traffic. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a system level cache.",
+ "MetricGroup": "Miss_Ratio;LL_Cache_Effectiveness",
+ "ScaleUnit": "1per cache access"
+ },
+ {
+ "MetricName": "ll_cache_read_mpki",
+ "MetricExpr": "((LL_CACHE_MISS_RD / INST_RETIRED) * 1000)",
+ "BriefDescription": "This metric measures the number of last level cache read accesses missed per thousand instructions executed.",
+ "MetricGroup": "MPKI;LL_Cache_Effectiveness",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricName": "load_percentage",
+ "MetricExpr": "((LD_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "scalar_fp_percentage",
+ "MetricExpr": "((VFP_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "simd_percentage",
+ "MetricExpr": "((ASE_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ },
+ {
+ "MetricName": "store_percentage",
+ "MetricExpr": "((ST_SPEC / INST_SPEC) * 100)",
+ "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.",
+ "MetricGroup": "Operation_Mix",
+ "ScaleUnit": "1percent of operations"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/retired.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/retired.json
new file mode 100644
index 000000000000..0c7692ad5108
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/retired.json
@@ -0,0 +1,26 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR",
+ "PublicDescription": "Counts software writes to the PMSWINC_EL0 (software PMU increment) register. The PMSWINC_EL0 register is a manually updated counter for use by application software.\n\nThis event could be used to measure any user program event, such as accesses to a particular data structure (by writing to the PMSWINC_EL0 register each time the data structure is accessed).\n\nTo use the PMSWINC_EL0 register and event, developers must insert instructions that write to the PMSWINC_EL0 register into the source code.\n\nSince the SW_INCR event records writes to the PMSWINC_EL0 register, there is no need to do a read/increment/write sequence to the PMSWINC_EL0 register."
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED",
+ "PublicDescription": "Counts instructions that have been architecturally executed."
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED",
+ "PublicDescription": "Counts architecturally executed writes to the CONTEXTIDR register, which usually contain the kernel PID and can be output with hardware trace."
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED",
+ "PublicDescription": "Counts architectural writes to TTBR0/1_EL1. If virtualization host extensions are enabled (by setting the HCR_EL2.E2H bit to 1), then accesses to TTBR0/1_EL1 that are redirected to TTBR0/1_EL2, or accesses to TTBR0/1_EL12, are counted. TTBRn registers are typically updated when the kernel is swapping user-space threads or applications."
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED",
+ "PublicDescription": "Counts architecturally executed branches, whether the branch is taken or not. Instructions that explicitly write to the PC are also counted."
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED",
+ "PublicDescription": "Counts branches counted by BR_RETIRED which were mispredicted and caused a pipeline flush."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/spe.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/spe.json
new file mode 100644
index 000000000000..5de8b0f3a440
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/spe.json
@@ -0,0 +1,18 @@
+[
+ {
+ "ArchStdEvent": "SAMPLE_POP",
+ "PublicDescription": "Counts statistical profiling sample population, the count of all operations that could be sampled but may or may not be chosen for sampling."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED",
+ "PublicDescription": "Counts statistical profiling samples taken for sampling."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FILTRATE",
+ "PublicDescription": "Counts statistical profiling samples taken which are not removed by filtering."
+ },
+ {
+ "ArchStdEvent": "SAMPLE_COLLISION",
+ "PublicDescription": "Counts statistical profiling samples that have collided with a previous sample and so therefore not taken."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/spec_operation.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/spec_operation.json
new file mode 100644
index 000000000000..be8c0667f1d9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/spec_operation.json
@@ -0,0 +1,102 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED",
+ "PublicDescription": "Counts branches which are speculatively executed and mispredicted."
+ },
+ {
+ "ArchStdEvent": "BR_PRED",
+ "PublicDescription": "Counts branches speculatively executed and were predicted right."
+ },
+ {
+ "ArchStdEvent": "INST_SPEC",
+ "PublicDescription": "Counts operations that have been speculatively executed."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC",
+ "PublicDescription": "Counts unaligned memory read operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses. The event does not count preload operations (PLD, PLI)."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC",
+ "PublicDescription": "Counts unaligned memory write operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses."
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC",
+ "PublicDescription": "Counts unaligned memory operations issued by the CPU. This event counts unaligned accesses (as defined by the actual instruction), even if they are subsequently issued as multiple aligned accesses."
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC",
+ "PublicDescription": "Counts Load-Exclusive operations that have been speculatively executed. Eg: LDREX, LDX"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC",
+ "PublicDescription": "Counts store-exclusive operations that have been speculatively executed and have successfully completed the store operation."
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC",
+ "PublicDescription": "Counts store-exclusive operations that have been speculatively executed and have not successfully completed the store operation."
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC",
+ "PublicDescription": "Counts store-exclusive operations that have been speculatively executed."
+ },
+ {
+ "ArchStdEvent": "LD_SPEC",
+ "PublicDescription": "Counts speculatively executed load operations including Single Instruction Multiple Data (SIMD) load operations."
+ },
+ {
+ "ArchStdEvent": "ST_SPEC",
+ "PublicDescription": "Counts speculatively executed store operations including Single Instruction Multiple Data (SIMD) store operations."
+ },
+ {
+ "ArchStdEvent": "DP_SPEC",
+ "PublicDescription": "Counts speculatively executed logical or arithmetic instructions such as MOV/MVN operations."
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC",
+ "PublicDescription": "Counts speculatively executed Advanced SIMD operations excluding load, store and move micro-operations that move data to or from SIMD (vector) registers."
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC",
+ "PublicDescription": "Counts speculatively executed floating point operations. This event does not count operations that move data to or from floating point (vector) registers."
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC",
+ "PublicDescription": "Counts speculatively executed operations which cause software changes of the PC. Those operations include all taken branch operations."
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC",
+ "PublicDescription": "Counts speculatively executed cryptographic operations except for PMULL and VMULL operations."
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC",
+ "PublicDescription": "Counts immediate branch operations which are speculatively executed."
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC",
+ "PublicDescription": "Counts procedure return operations (RET) which are speculatively executed."
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC",
+ "PublicDescription": "Counts indirect branch operations including procedure returns, which are speculatively executed. This includes operations that force a software change of the PC, other than exception-generating operations. Eg: BR Xn, RET"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC",
+ "PublicDescription": "Counts ISB operations that are executed."
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC",
+ "PublicDescription": "Counts DSB operations that are speculatively issued to Load/Store unit in the CPU."
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC",
+ "PublicDescription": "Counts DMB operations that are speculatively issued to the Load/Store unit in the CPU. This event does not count implied barriers from load acquire/store release operations."
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC",
+ "PublicDescription": "Counts any load acquire operations that are speculatively executed. Eg: LDAR, LDARH, LDARB"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC",
+ "PublicDescription": "Counts any store release operations that are speculatively executed. Eg: STLR, STLRH, STLRB'"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/stall.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/stall.json
new file mode 100644
index 000000000000..688afd8a4061
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/stall.json
@@ -0,0 +1,10 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND",
+ "PublicDescription": "Counts cycles when frontend could not send any micro-operations to the rename stage because of frontend resource stalls caused by fetch memory latency or branch prediction flow stalls. All the frontend slots were empty during the cycle when this event counts."
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND",
+ "PublicDescription": "Counts cycles whenever the rename unit is unable to send any micro-operations to the backend of the pipeline because of backend resource constraints. Backend resource constraints can include issue stage fullness, execution stage fullness, or other internal pipeline resource fullness. All the backend slots were empty during the cycle when this event counts."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/tlb.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/tlb.json
new file mode 100644
index 000000000000..b550af1831f5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n1/tlb.json
@@ -0,0 +1,66 @@
+[
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL",
+ "PublicDescription": "Counts level 1 instruction TLB refills from any Instruction fetch. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL",
+ "PublicDescription": "Counts level 1 data TLB accesses that resulted in TLB refills. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count on an access from an AT(address translation) instruction."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB",
+ "PublicDescription": "Counts level 1 data TLB accesses caused by any memory load or store operation. Note that load or store instructions can be broken up into multiple memory operations. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1I_TLB",
+ "PublicDescription": "Counts level 1 instruction TLB accesses, whether the access hits or misses in the TLB. This event counts both demand accesses and prefetch or preload generated accesses."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL",
+ "PublicDescription": "Counts level 2 TLB refills caused by memory operations from both data and instruction fetch, except for those caused by TLB maintenance operations and hardware prefetches."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB",
+ "PublicDescription": "Counts level 2 TLB accesses except those caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK",
+ "PublicDescription": "Counts data memory translation table walks caused by a miss in the L2 TLB driven by a memory access. Note that partial translations that also cause a table walk are counted. This event does not count table walks caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK",
+ "PublicDescription": "Counts instruction memory translation table walks caused by a miss in the L2 TLB driven by a memory access. Partial translations that also cause a table walk are counted. This event does not count table walks caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD",
+ "PublicDescription": "Counts level 1 data TLB refills caused by memory read operations. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the translation table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count on an access from an Address Translation (AT) instruction."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR",
+ "PublicDescription": "Counts level 1 data TLB refills caused by data side memory write operations. If there are multiple misses in the TLB that are resolved by the refill, then this event only counts once. This event counts for refills caused by preload instructions or hardware prefetch accesses. This event counts regardless of whether the miss hits in L2 or results in a translation table walk. This event will not count if the table walk results in a fault (such as a translation or access fault), since there is no new translation created for the TLB. This event will not count with an access from an Address Translation (AT) instruction."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD",
+ "PublicDescription": "Counts level 1 data TLB accesses caused by memory read operations. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR",
+ "PublicDescription": "Counts any L1 data side TLB accesses caused by memory write operations. This event counts whether the access hits or misses in the TLB. This event does not count TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD",
+ "PublicDescription": "Counts level 2 TLB refills caused by memory read operations from both data and instruction fetch except for those caused by TLB maintenance operations or hardware prefetches."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR",
+ "PublicDescription": "Counts level 2 TLB refills caused by memory write operations from both data and instruction fetch except for those caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD",
+ "PublicDescription": "Counts level 2 TLB accesses caused by memory read operations from both data and instruction fetch except for those caused by TLB maintenance operations."
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR",
+ "PublicDescription": "Counts level 2 TLB accesses caused by memory write operations from both data and instruction fetch except for those caused by TLB maintenance operations."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/branch.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/branch.json
new file mode 100644
index 000000000000..79f2016c53b0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/branch.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/bus.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/instruction.json
new file mode 100644
index 000000000000..e57cd55937c6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/instruction.json
@@ -0,0 +1,143 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_EMPTY_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_NOT_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_FAULT_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT8_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT16_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT32_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT64_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json
new file mode 100644
index 000000000000..7b2b21ac150f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/memory.json
@@ -0,0 +1,41 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "LD_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "ST_ALIGN_LAT"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_CHECKED_WR"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json
new file mode 100644
index 000000000000..8ad15b726dca
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/metrics.json
@@ -0,0 +1,273 @@
+[
+ {
+ "ArchStdEvent": "FRONTEND_BOUND",
+ "MetricExpr": "((stall_slot_frontend) if (#slots - 5) else (stall_slot_frontend - cpu_cycles)) / (#slots * cpu_cycles)"
+ },
+ {
+ "ArchStdEvent": "BAD_SPECULATION",
+ "MetricExpr": "(1 - op_retired / op_spec) * (1 - (stall_slot if (#slots - 5) else (stall_slot - cpu_cycles)) / (#slots * cpu_cycles))"
+ },
+ {
+ "ArchStdEvent": "RETIRING",
+ "MetricExpr": "(op_retired / op_spec) * (1 - (stall_slot if (#slots - 5) else (stall_slot - cpu_cycles)) / (#slots * cpu_cycles))"
+ },
+ {
+ "ArchStdEvent": "BACKEND_BOUND"
+ },
+ {
+ "MetricExpr": "L1D_TLB_REFILL / L1D_TLB",
+ "BriefDescription": "The rate of L1D TLB refill to the overall L1D TLB lookups",
+ "MetricGroup": "TLB",
+ "MetricName": "l1d_tlb_miss_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "L1I_TLB_REFILL / L1I_TLB",
+ "BriefDescription": "The rate of L1I TLB refill to the overall L1I TLB lookups",
+ "MetricGroup": "TLB",
+ "MetricName": "l1i_tlb_miss_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "L2D_TLB_REFILL / L2D_TLB",
+ "BriefDescription": "The rate of L2D TLB refill to the overall L2D TLB lookups",
+ "MetricGroup": "TLB",
+ "MetricName": "l2_tlb_miss_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "DTLB_WALK / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of TLB Walks per kilo instructions for data accesses",
+ "MetricGroup": "TLB",
+ "MetricName": "dtlb_mpki",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricExpr": "DTLB_WALK / L1D_TLB",
+ "BriefDescription": "The rate of DTLB Walks to the overall L1D TLB lookups",
+ "MetricGroup": "TLB",
+ "MetricName": "dtlb_walk_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "ITLB_WALK / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of TLB Walks per kilo instructions for instruction accesses",
+ "MetricGroup": "TLB",
+ "MetricName": "itlb_mpki",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricExpr": "ITLB_WALK / L1I_TLB",
+ "BriefDescription": "The rate of ITLB Walks to the overall L1I TLB lookups",
+ "MetricGroup": "TLB",
+ "MetricName": "itlb_walk_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "L1I_CACHE_REFILL / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of L1 I-Cache misses per kilo instructions",
+ "MetricGroup": "Cache",
+ "MetricName": "l1i_cache_mpki",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE",
+ "BriefDescription": "The rate of L1 I-Cache misses to the overall L1 I-Cache",
+ "MetricGroup": "Cache",
+ "MetricName": "l1i_cache_miss_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "L1D_CACHE_REFILL / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of L1 D-Cache misses per kilo instructions",
+ "MetricGroup": "Cache",
+ "MetricName": "l1d_cache_mpki",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE",
+ "BriefDescription": "The rate of L1 D-Cache misses to the overall L1 D-Cache",
+ "MetricGroup": "Cache",
+ "MetricName": "l1d_cache_miss_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "L2D_CACHE_REFILL / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of L2 D-Cache misses per kilo instructions",
+ "MetricGroup": "Cache",
+ "MetricName": "l2d_cache_mpki",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE",
+ "BriefDescription": "The rate of L2 D-Cache misses to the overall L2 D-Cache",
+ "MetricGroup": "Cache",
+ "MetricName": "l2d_cache_miss_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "L3D_CACHE_REFILL / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of L3 D-Cache misses per kilo instructions",
+ "MetricGroup": "Cache",
+ "MetricName": "l3d_cache_mpki",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricExpr": "L3D_CACHE_REFILL / L3D_CACHE",
+ "BriefDescription": "The rate of L3 D-Cache misses to the overall L3 D-Cache",
+ "MetricGroup": "Cache",
+ "MetricName": "l3d_cache_miss_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "LL_CACHE_MISS_RD / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of LL Cache read misses per kilo instructions",
+ "MetricGroup": "Cache",
+ "MetricName": "ll_cache_read_mpki",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricExpr": "LL_CACHE_MISS_RD / LL_CACHE_RD",
+ "BriefDescription": "The rate of LL Cache read misses to the overall LL Cache read",
+ "MetricGroup": "Cache",
+ "MetricName": "ll_cache_read_miss_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "(LL_CACHE_RD - LL_CACHE_MISS_RD) / LL_CACHE_RD",
+ "BriefDescription": "The rate of LL Cache read hit to the overall LL Cache read",
+ "MetricGroup": "Cache",
+ "MetricName": "ll_cache_read_hit_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "BR_MIS_PRED_RETIRED / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of branches mis-predicted per kilo instructions",
+ "MetricGroup": "Branch",
+ "MetricName": "branch_mpki",
+ "ScaleUnit": "1MPKI"
+ },
+ {
+ "MetricExpr": "BR_RETIRED / INST_RETIRED * 1000",
+ "BriefDescription": "The rate of branches retired per kilo instructions",
+ "MetricGroup": "Branch",
+ "MetricName": "branch_pki",
+ "ScaleUnit": "1PKI"
+ },
+ {
+ "MetricExpr": "BR_MIS_PRED_RETIRED / BR_RETIRED",
+ "BriefDescription": "The rate of branches mis-predited to the overall branches",
+ "MetricGroup": "Branch",
+ "MetricName": "branch_miss_pred_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "instructions / CPU_CYCLES",
+ "BriefDescription": "The average number of instructions executed for each cycle.",
+ "MetricGroup": "PEutilization",
+ "MetricName": "ipc"
+ },
+ {
+ "MetricExpr": "ipc / 5",
+ "BriefDescription": "IPC percentage of peak. The peak of IPC is 5.",
+ "MetricGroup": "PEutilization",
+ "MetricName": "ipc_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "INST_RETIRED / CPU_CYCLES",
+ "BriefDescription": "Architecturally executed Instructions Per Cycle (IPC)",
+ "MetricGroup": "PEutilization",
+ "MetricName": "retired_ipc"
+ },
+ {
+ "MetricExpr": "INST_SPEC / CPU_CYCLES",
+ "BriefDescription": "Speculatively executed Instructions Per Cycle (IPC)",
+ "MetricGroup": "PEutilization",
+ "MetricName": "spec_ipc"
+ },
+ {
+ "MetricExpr": "OP_RETIRED / OP_SPEC",
+ "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)",
+ "MetricGroup": "PEutilization",
+ "MetricName": "retired_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "1 - OP_RETIRED / OP_SPEC",
+ "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)",
+ "MetricGroup": "PEutilization",
+ "MetricName": "wasted_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "OP_RETIRED / OP_SPEC * (1 - (STALL_SLOT if (#slots - 5) else (STALL_SLOT - CPU_CYCLES)) / (#slots * CPU_CYCLES))",
+ "BriefDescription": "The truly effective ratio of micro-operations executed by the CPU, which means that misprediction and stall are not included",
+ "MetricGroup": "PEutilization",
+ "MetricName": "cpu_utilization",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "LD_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of load instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "InstructionMix",
+ "MetricName": "load_spec_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "ST_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of store instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "InstructionMix",
+ "MetricName": "store_spec_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "DP_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of integer data-processing instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "InstructionMix",
+ "MetricName": "data_process_spec_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "ASE_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of advanced SIMD instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "InstructionMix",
+ "MetricName": "advanced_simd_spec_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "VFP_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of floating point instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "InstructionMix",
+ "MetricName": "float_point_spec_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "CRYPTO_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of crypto instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "InstructionMix",
+ "MetricName": "crypto_spec_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "BR_IMMED_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of branch immediate instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "InstructionMix",
+ "MetricName": "branch_immed_spec_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "BR_RETURN_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of procedure return instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "InstructionMix",
+ "MetricName": "branch_return_spec_rate",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "BR_INDIRECT_SPEC / INST_SPEC",
+ "BriefDescription": "The rate of indirect branch instructions speculatively executed to overall instructions speclatively executed",
+ "MetricGroup": "InstructionMix",
+ "MetricName": "branch_indirect_spec_rate",
+ "ScaleUnit": "100%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spe.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spe.json
new file mode 100644
index 000000000000..20f2165c85fe
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/spe.json
@@ -0,0 +1,14 @@
+[
+ {
+ "ArchStdEvent": "SAMPLE_POP"
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FEED"
+ },
+ {
+ "ArchStdEvent": "SAMPLE_FILTRATE"
+ },
+ {
+ "ArchStdEvent": "SAMPLE_COLLISION"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/trace.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/trace.json
new file mode 100644
index 000000000000..3116135c59e2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-n2-v2/trace.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "TRB_WRAP"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT0"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT1"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT2"
+ },
+ {
+ "ArchStdEvent": "TRCEXTOUT3"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT4"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT5"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT6"
+ },
+ {
+ "ArchStdEvent": "CTI_TRIGOUT7"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json
new file mode 100644
index 000000000000..79f2016c53b0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/branch.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json
new file mode 100644
index 000000000000..579c1c993d17
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/bus.json
@@ -0,0 +1,20 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS"
+ },
+ {
+ "ArchStdEvent": "BUS_CYCLES"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "BUS_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "CNT_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json
new file mode 100644
index 000000000000..0141f749bff3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_ALLOCATE"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "DTLB_WALK"
+ },
+ {
+ "ArchStdEvent": "ITLB_WALK"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "LL_CACHE_MISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_INNER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL_OUTER"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_VICTIM"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB_CLEAN"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_INVAL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL_WR"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_RD"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_WR"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_RD"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE_LMISS"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_LMISS_RD"
+ },
+ {
+ "ArchStdEvent": "L3D_CACHE_LMISS_RD"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json
new file mode 100644
index 000000000000..344a2d552ad5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/exception.json
@@ -0,0 +1,47 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "MEMORY_ERROR"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_OTHER"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_TRAP_FIQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json
new file mode 100644
index 000000000000..e29b88fb7f24
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/instruction.json
@@ -0,0 +1,119 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "TTBR_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_RETIRED"
+ },
+ {
+ "ArchStdEvent": "BR_MIS_PRED_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_RETIRED"
+ },
+ {
+ "ArchStdEvent": "OP_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_PASS_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_FAIL_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "RC_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_EMPTY_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_FULL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_PARTIAL_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_FAULT_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json
new file mode 100644
index 000000000000..5aff6e93c1ad
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/memory.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "MEM_ACCESS"
+ },
+ {
+ "ArchStdEvent": "REMOTE_ACCESS"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_RD"
+ },
+ {
+ "ArchStdEvent": "MEM_ACCESS_WR"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "UNALIGNED_LDST_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json
new file mode 100644
index 000000000000..f9fae15f7555
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/arm/neoverse-v1/pipeline.json
@@ -0,0 +1,23 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_BACKEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_SLOT"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND_MEM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/common-and-microarch.json b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
new file mode 100644
index 000000000000..492083b99256
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/common-and-microarch.json
@@ -0,0 +1,812 @@
+[
+ {
+ "PublicDescription": "Instruction architecturally executed, Condition code check pass, software increment",
+ "EventCode": "0x00",
+ "EventName": "SW_INCR",
+ "BriefDescription": "Instruction architecturally executed, Condition code check pass, software increment"
+ },
+ {
+ "PublicDescription": "Level 1 instruction cache refill",
+ "EventCode": "0x01",
+ "EventName": "L1I_CACHE_REFILL",
+ "BriefDescription": "Level 1 instruction cache refill"
+ },
+ {
+ "PublicDescription": "Attributable Level 1 instruction TLB refill",
+ "EventCode": "0x02",
+ "EventName": "L1I_TLB_REFILL",
+ "BriefDescription": "Attributable Level 1 instruction TLB refill"
+ },
+ {
+ "PublicDescription": "Level 1 data cache refill",
+ "EventCode": "0x03",
+ "EventName": "L1D_CACHE_REFILL",
+ "BriefDescription": "Level 1 data cache refill"
+ },
+ {
+ "PublicDescription": "Level 1 data cache access",
+ "EventCode": "0x04",
+ "EventName": "L1D_CACHE",
+ "BriefDescription": "Level 1 data cache access"
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data TLB refill",
+ "EventCode": "0x05",
+ "EventName": "L1D_TLB_REFILL",
+ "BriefDescription": "Attributable Level 1 data TLB refill"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, load",
+ "EventCode": "0x06",
+ "EventName": "LD_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, load"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, store",
+ "EventCode": "0x07",
+ "EventName": "ST_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, store"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed",
+ "EventCode": "0x08",
+ "EventName": "INST_RETIRED",
+ "BriefDescription": "Instruction architecturally executed"
+ },
+ {
+ "PublicDescription": "Exception taken",
+ "EventCode": "0x09",
+ "EventName": "EXC_TAKEN",
+ "BriefDescription": "Exception taken"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition check pass, exception return",
+ "EventCode": "0x0a",
+ "EventName": "EXC_RETURN",
+ "BriefDescription": "Instruction architecturally executed, condition check pass, exception return"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR",
+ "EventCode": "0x0b",
+ "EventName": "CID_WRITE_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, write to CONTEXTIDR"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, software change of the PC",
+ "EventCode": "0x0C",
+ "EventName": "PC_WRITE_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, software change of the PC"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, immediate branch",
+ "EventCode": "0x0D",
+ "EventName": "BR_IMMED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, immediate branch"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, procedure return",
+ "EventCode": "0x0E",
+ "EventName": "BR_RETURN_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, procedure return"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, condition code check pass, unaligned",
+ "EventCode": "0x0F",
+ "EventName": "UNALIGNED_LDST_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, condition code check pass, unaligned"
+ },
+ {
+ "PublicDescription": "Mispredicted or not predicted branch speculatively executed",
+ "EventCode": "0x10",
+ "EventName": "BR_MIS_PRED",
+ "BriefDescription": "Mispredicted or not predicted branch speculatively executed"
+ },
+ {
+ "PublicDescription": "Cycle",
+ "EventCode": "0x11",
+ "EventName": "CPU_CYCLES",
+ "BriefDescription": "Cycle"
+ },
+ {
+ "PublicDescription": "Predictable branch speculatively executed",
+ "EventCode": "0x12",
+ "EventName": "BR_PRED",
+ "BriefDescription": "Predictable branch speculatively executed"
+ },
+ {
+ "PublicDescription": "Data memory access",
+ "EventCode": "0x13",
+ "EventName": "MEM_ACCESS",
+ "BriefDescription": "Data memory access"
+ },
+ {
+ "PublicDescription": "Attributable Level 1 instruction cache access",
+ "EventCode": "0x14",
+ "EventName": "L1I_CACHE",
+ "BriefDescription": "Attributable Level 1 instruction cache access"
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data cache write-back",
+ "EventCode": "0x15",
+ "EventName": "L1D_CACHE_WB",
+ "BriefDescription": "Attributable Level 1 data cache write-back"
+ },
+ {
+ "PublicDescription": "Level 2 data cache access",
+ "EventCode": "0x16",
+ "EventName": "L2D_CACHE",
+ "BriefDescription": "Level 2 data cache access"
+ },
+ {
+ "PublicDescription": "Level 2 data refill",
+ "EventCode": "0x17",
+ "EventName": "L2D_CACHE_REFILL",
+ "BriefDescription": "Level 2 data refill"
+ },
+ {
+ "PublicDescription": "Attributable Level 2 data cache write-back",
+ "EventCode": "0x18",
+ "EventName": "L2D_CACHE_WB",
+ "BriefDescription": "Attributable Level 2 data cache write-back"
+ },
+ {
+ "PublicDescription": "Attributable Bus access",
+ "EventCode": "0x19",
+ "EventName": "BUS_ACCESS",
+ "BriefDescription": "Attributable Bus access"
+ },
+ {
+ "PublicDescription": "Local memory error",
+ "EventCode": "0x1a",
+ "EventName": "MEMORY_ERROR",
+ "BriefDescription": "Local memory error"
+ },
+ {
+ "PublicDescription": "Operation speculatively executed",
+ "EventCode": "0x1b",
+ "EventName": "INST_SPEC",
+ "BriefDescription": "Operation speculatively executed"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, Condition code check pass, write to TTBR",
+ "EventCode": "0x1c",
+ "EventName": "TTBR_WRITE_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, Condition code check pass, write to TTBR"
+ },
+ {
+ "PublicDescription": "Bus cycle",
+ "EventCode": "0x1D",
+ "EventName": "BUS_CYCLES",
+ "BriefDescription": "Bus cycle"
+ },
+ {
+ "PublicDescription": "Level 1 data cache allocation without refill",
+ "EventCode": "0x1F",
+ "EventName": "L1D_CACHE_ALLOCATE",
+ "BriefDescription": "Level 1 data cache allocation without refill"
+ },
+ {
+ "PublicDescription": "Attributable Level 2 data cache allocation without refill",
+ "EventCode": "0x20",
+ "EventName": "L2D_CACHE_ALLOCATE",
+ "BriefDescription": "Attributable Level 2 data cache allocation without refill"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, branch",
+ "EventCode": "0x21",
+ "EventName": "BR_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, branch"
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, mispredicted branch",
+ "EventCode": "0x22",
+ "EventName": "BR_MIS_PRED_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, mispredicted branch"
+ },
+ {
+ "PublicDescription": "No operation issued because of the frontend",
+ "EventCode": "0x23",
+ "EventName": "STALL_FRONTEND",
+ "BriefDescription": "No operation issued because of the frontend"
+ },
+ {
+ "PublicDescription": "No operation issued due to the backend",
+ "EventCode": "0x24",
+ "EventName": "STALL_BACKEND",
+ "BriefDescription": "No operation issued due to the backend"
+ },
+ {
+ "PublicDescription": "Attributable Level 1 data or unified TLB access",
+ "EventCode": "0x25",
+ "EventName": "L1D_TLB",
+ "BriefDescription": "Attributable Level 1 data or unified TLB access"
+ },
+ {
+ "PublicDescription": "Attributable Level 1 instruction TLB access",
+ "EventCode": "0x26",
+ "EventName": "L1I_TLB",
+ "BriefDescription": "Attributable Level 1 instruction TLB access"
+ },
+ {
+ "PublicDescription": "Attributable Level 3 data cache allocation without refill",
+ "EventCode": "0x29",
+ "EventName": "L3D_CACHE_ALLOCATE",
+ "BriefDescription": "Attributable Level 3 data cache allocation without refill"
+ },
+ {
+ "PublicDescription": "Attributable Level 3 data cache refill",
+ "EventCode": "0x2A",
+ "EventName": "L3D_CACHE_REFILL",
+ "BriefDescription": "Attributable Level 3 data cache refill"
+ },
+ {
+ "PublicDescription": "Attributable Level 3 data cache access",
+ "EventCode": "0x2B",
+ "EventName": "L3D_CACHE",
+ "BriefDescription": "Attributable Level 3 data cache access"
+ },
+ {
+ "PublicDescription": "Attributable Level 2 data TLB refill",
+ "EventCode": "0x2D",
+ "EventName": "L2D_TLB_REFILL",
+ "BriefDescription": "Attributable Level 2 data TLB refill"
+ },
+ {
+ "PublicDescription": "Attributable Level 2 instruction TLB refill.",
+ "EventCode": "0x2E",
+ "EventName": "L2I_TLB_REFILL",
+ "BriefDescription": "Attributable Level 2 instruction TLB refill."
+ },
+ {
+ "PublicDescription": "Attributable Level 2 data or unified TLB access",
+ "EventCode": "0x2F",
+ "EventName": "L2D_TLB",
+ "BriefDescription": "Attributable Level 2 data or unified TLB access"
+ },
+ {
+ "PublicDescription": "Attributable Level 2 instruction TLB access.",
+ "EventCode": "0x30",
+ "EventName": "L2I_TLB",
+ "BriefDescription": "Attributable Level 2 instruction TLB access."
+ },
+ {
+ "PublicDescription": "Access to another socket in a multi-socket system",
+ "EventCode": "0x31",
+ "EventName": "REMOTE_ACCESS",
+ "BriefDescription": "Access to another socket in a multi-socket system"
+ },
+ {
+ "PublicDescription": "Access to data TLB causes a translation table walk",
+ "EventCode": "0x34",
+ "EventName": "DTLB_WALK",
+ "BriefDescription": "Access to data TLB causes a translation table walk"
+ },
+ {
+ "PublicDescription": "Access to instruction TLB that causes a translation table walk",
+ "EventCode": "0x35",
+ "EventName": "ITLB_WALK",
+ "BriefDescription": "Access to instruction TLB that causes a translation table walk"
+ },
+ {
+ "PublicDescription": "Attributable Last level cache memory read",
+ "EventCode": "0x36",
+ "EventName": "LL_CACHE_RD",
+ "BriefDescription": "Attributable Last level cache memory read"
+ },
+ {
+ "PublicDescription": "Last level cache miss, read",
+ "EventCode": "0x37",
+ "EventName": "LL_CACHE_MISS_RD",
+ "BriefDescription": "Last level cache miss, read"
+ },
+ {
+ "PublicDescription": "Attributable memory read access to another socket in a multi-socket system",
+ "EventCode": "0x38",
+ "EventName": "REMOTE_ACCESS_RD",
+ "BriefDescription": "Attributable memory read access to another socket in a multi-socket system"
+ },
+ {
+ "PublicDescription": "Level 1 data cache long-latency read miss. The counter counts each memory read access counted by L1D_CACHE that incurs additional latency because it returns data from outside the Level 1 data or unified cache of this processing element.",
+ "EventCode": "0x39",
+ "EventName": "L1D_CACHE_LMISS_RD",
+ "BriefDescription": "Level 1 data cache long-latency read miss"
+ },
+ {
+ "PublicDescription": "Micro-operation architecturally executed. The counter counts each operation counted by OP_SPEC that would be executed in a simple sequential execution of the program.",
+ "EventCode": "0x3A",
+ "EventName": "OP_RETIRED",
+ "BriefDescription": "Micro-operation architecturally executed"
+ },
+ {
+ "PublicDescription": "Micro-operation speculatively executed. The counter counts the number of operations executed by the processing element, including those that are executed speculatively and would not be executed in a simple sequential execution of the program.",
+ "EventCode": "0x3B",
+ "EventName": "OP_SPEC",
+ "BriefDescription": "Micro-operation speculatively executed"
+ },
+ {
+ "PublicDescription": "No operation sent for execution. The counter counts every attributable cycle on which no attributable instruction or operation was sent for execution on this processing element.",
+ "EventCode": "0x3C",
+ "EventName": "STALL",
+ "BriefDescription": "No operation sent for execution"
+ },
+ {
+ "PublicDescription": "No operation sent for execution on a slot due to the backend. Counts each slot counted by STALL_SLOT where no attributable instruction or operation was sent for execution because the backend is unable to accept it.",
+ "EventCode": "0x3D",
+ "EventName": "STALL_SLOT_BACKEND",
+ "BriefDescription": "No operation sent for execution on a slot due to the backend"
+ },
+ {
+ "PublicDescription": "No operation sent for execution on a slot due to the frontend. Counts each slot counted by STALL_SLOT where no attributable instruction or operation was sent for execution because there was no attributable instruction or operation available to issue from the processing element from the frontend for the slot.",
+ "EventCode": "0x3E",
+ "EventName": "STALL_SLOT_FRONTEND",
+ "BriefDescription": "No operation sent for execution on a slot due to the frontend"
+ },
+ {
+ "PublicDescription": "No operation sent for execution on a slot. The counter counts on each attributable cycle the number of instruction or operation slots that were not occupied by an instruction or operation attributable to the processing element.",
+ "EventCode": "0x3F",
+ "EventName": "STALL_SLOT",
+ "BriefDescription": "No operation sent for execution on a slot"
+ },
+ {
+ "PublicDescription": "Sample Population",
+ "EventCode": "0x4000",
+ "EventName": "SAMPLE_POP",
+ "BriefDescription": "Sample Population"
+ },
+ {
+ "PublicDescription": "Sample Taken",
+ "EventCode": "0x4001",
+ "EventName": "SAMPLE_FEED",
+ "BriefDescription": "Sample Taken"
+ },
+ {
+ "PublicDescription": "Sample Taken and not removed by filtering",
+ "EventCode": "0x4002",
+ "EventName": "SAMPLE_FILTRATE",
+ "BriefDescription": "Sample Taken and not removed by filtering"
+ },
+ {
+ "PublicDescription": "Sample collided with previous sample",
+ "EventCode": "0x4003",
+ "EventName": "SAMPLE_COLLISION",
+ "BriefDescription": "Sample collided with previous sample"
+ },
+ {
+ "PublicDescription": "Constant frequency cycles. The counter increments at a constant frequency equal to the rate of increment of the system counter, CNTPCT_EL0.",
+ "EventCode": "0x4004",
+ "EventName": "CNT_CYCLES",
+ "BriefDescription": "Constant frequency cycles"
+ },
+ {
+ "PublicDescription": "Memory stall cycles. The counter counts each cycle counted by STALL_BACKEND where there is a cache miss in the last level of cache within the processing element clock domain",
+ "EventCode": "0x4005",
+ "EventName": "STALL_BACKEND_MEM",
+ "BriefDescription": "Memory stall cycles"
+ },
+ {
+ "PublicDescription": "Level 1 instruction cache long-latency read miss. If the L1I_CACHE_RD event is implemented, the counter counts each access counted by L1I_CACHE_RD that incurs additional latency because it returns instructions from outside of the Level 1 instruction cache of this PE. If the L1I_CACHE_RD event is not implemented, the counter counts each access counted by L1I_CACHE that incurs additional latency because it returns instructions from outside the Level 1 instruction cache of this PE. The event indicates to software that the access missed in the Level 1 instruction cache and might have a significant performance impact due to the additional latency, compared to the latency of an access that hits in the Level 1 instruction cache.",
+ "EventCode": "0x4006",
+ "EventName": "L1I_CACHE_LMISS",
+ "BriefDescription": "Level 1 instruction cache long-latency read miss"
+ },
+ {
+ "PublicDescription": "Level 2 data cache long-latency read miss. The counter counts each memory read access counted by L2D_CACHE that incurs additional latency because it returns data from outside the Level 2 data or unified cache of this processing element. The event indicates to software that the access missed in the Level 2 data or unified cache and might have a significant performance impact compared to the latency of an access that hits in the Level 2 data or unified cache.",
+ "EventCode": "0x4009",
+ "EventName": "L2D_CACHE_LMISS_RD",
+ "BriefDescription": "Level 2 data cache long-latency read miss"
+ },
+ {
+ "PublicDescription": "Level 3 data cache long-latency read miss. The counter counts each memory read access counted by L3D_CACHE that incurs additional latency because it returns data from outside the Level 3 data or unified cache of this processing element. The event indicates to software that the access missed in the Level 3 data or unified cache and might have a significant performance impact compared to the latency of an access that hits in the Level 3 data or unified cache.",
+ "EventCode": "0x400B",
+ "EventName": "L3D_CACHE_LMISS_RD",
+ "BriefDescription": "Level 3 data cache long-latency read miss"
+ },
+ {
+ "PublicDescription": "Trace buffer current write pointer wrapped",
+ "EventCode": "0x400C",
+ "EventName": "TRB_WRAP",
+ "BriefDescription": "Trace buffer current write pointer wrapped"
+ },
+ {
+ "PublicDescription": "PMU overflow, counters accessible to EL1 and EL0",
+ "EventCode": "0x400D",
+ "EventName": "PMU_OVFS",
+ "BriefDescription": "PMU overflow, counters accessible to EL1 and EL0"
+ },
+ {
+ "PublicDescription": "Trace buffer Trigger Event",
+ "EventCode": "0x400E",
+ "EventName": "TRB_TRIG",
+ "BriefDescription": "Trace buffer Trigger Event"
+ },
+ {
+ "PublicDescription": "PMU overflow, counters reserved for use by EL2",
+ "EventCode": "0x400F",
+ "EventName": "PMU_HOVFS",
+ "BriefDescription": "PMU overflow, counters reserved for use by EL2"
+ },
+ {
+ "PublicDescription": "PE Trace Unit external output 0",
+ "EventCode": "0x4010",
+ "EventName": "TRCEXTOUT0",
+ "BriefDescription": "PE Trace Unit external output 0"
+ },
+ {
+ "PublicDescription": "PE Trace Unit external output 1",
+ "EventCode": "0x4011",
+ "EventName": "TRCEXTOUT1",
+ "BriefDescription": "PE Trace Unit external output 1"
+ },
+ {
+ "PublicDescription": "PE Trace Unit external output 2",
+ "EventCode": "0x4012",
+ "EventName": "TRCEXTOUT2",
+ "BriefDescription": "PE Trace Unit external output 2"
+ },
+ {
+ "PublicDescription": "PE Trace Unit external output 3",
+ "EventCode": "0x4013",
+ "EventName": "TRCEXTOUT3",
+ "BriefDescription": "PE Trace Unit external output 3"
+ },
+ {
+ "PublicDescription": "Cross-trigger Interface output trigger 4",
+ "EventCode": "0x4018",
+ "EventName": "CTI_TRIGOUT4",
+ "BriefDescription": "Cross-trigger Interface output trigger 4"
+ },
+ {
+ "PublicDescription": "Cross-trigger Interface output trigger 5 ",
+ "EventCode": "0x4019",
+ "EventName": "CTI_TRIGOUT5",
+ "BriefDescription": "Cross-trigger Interface output trigger 5 "
+ },
+ {
+ "PublicDescription": "Cross-trigger Interface output trigger 6",
+ "EventCode": "0x401A",
+ "EventName": "CTI_TRIGOUT6",
+ "BriefDescription": "Cross-trigger Interface output trigger 6"
+ },
+ {
+ "PublicDescription": "Cross-trigger Interface output trigger 7",
+ "EventCode": "0x401B",
+ "EventName": "CTI_TRIGOUT7",
+ "BriefDescription": "Cross-trigger Interface output trigger 7"
+ },
+ {
+ "PublicDescription": "Access with additional latency from alignment",
+ "EventCode": "0x4020",
+ "EventName": "LDST_ALIGN_LAT",
+ "BriefDescription": "Access with additional latency from alignment"
+ },
+ {
+ "PublicDescription": "Load with additional latency from alignment",
+ "EventCode": "0x4021",
+ "EventName": "LD_ALIGN_LAT",
+ "BriefDescription": "Load with additional latency from alignment"
+ },
+ {
+ "PublicDescription": "Store with additional latency from alignment",
+ "EventCode": "0x4022",
+ "EventName": "ST_ALIGN_LAT",
+ "BriefDescription": "Store with additional latency from alignment"
+ },
+ {
+ "PublicDescription": "Checked data memory access",
+ "EventCode": "0x4024",
+ "EventName": "MEM_ACCESS_CHECKED",
+ "BriefDescription": "Checked data memory access"
+ },
+ {
+ "PublicDescription": "Checked data memory access, read",
+ "EventCode": "0x4025",
+ "EventName": "MEM_ACCESS_CHECKED_RD",
+ "BriefDescription": "Checked data memory access, read"
+ },
+ {
+ "PublicDescription": "Checked data memory access, write",
+ "EventCode": "0x4026",
+ "EventName": "MEM_ACCESS_CHECKED_WR",
+ "BriefDescription": "Checked data memory access, write"
+ },
+ {
+ "PublicDescription": "SIMD Instruction architecturally executed.",
+ "EventCode": "0x8000",
+ "EventName": "SIMD_INST_RETIRED",
+ "BriefDescription": "SIMD Instruction architecturally executed."
+ },
+ {
+ "PublicDescription": "Instruction architecturally executed, SVE.",
+ "EventCode": "0x8002",
+ "EventName": "SVE_INST_RETIRED",
+ "BriefDescription": "Instruction architecturally executed, SVE."
+ },
+ {
+ "PublicDescription": "ASE operations speculatively executed",
+ "EventCode": "0x8005",
+ "EventName": "ASE_INST_SPEC",
+ "BriefDescription": "ASE operations speculatively executed"
+ },
+ {
+ "PublicDescription": "SVE operations speculatively executed",
+ "EventCode": "0x8006",
+ "EventName": "SVE_INST_SPEC",
+ "BriefDescription": "SVE operations speculatively executed"
+ },
+ {
+ "PublicDescription": "Microarchitectural operation, Operations speculatively executed.",
+ "EventCode": "0x8008",
+ "EventName": "UOP_SPEC",
+ "BriefDescription": "Microarchitectural operation, Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE Math accelerator Operations speculatively executed.",
+ "EventCode": "0x800E",
+ "EventName": "SVE_MATH_SPEC",
+ "BriefDescription": "SVE Math accelerator Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Floating-point Operations speculatively executed.",
+ "EventCode": "0x8010",
+ "EventName": "FP_SPEC",
+ "BriefDescription": "Floating-point Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Floating-point half-precision operations speculatively executed",
+ "EventCode": "0x8014",
+ "EventName": "FP_HP_SPEC",
+ "BriefDescription": "Floating-point half-precision operations speculatively executed"
+ },
+ {
+ "PublicDescription": "Floating-point single-precision operations speculatively executed",
+ "EventCode": "0x8018",
+ "EventName": "FP_SP_SPEC",
+ "BriefDescription": "Floating-point single-precision operations speculatively executed"
+ },
+ {
+ "PublicDescription": "Floating-point double-precision operations speculatively executed",
+ "EventCode": "0x801C",
+ "EventName": "FP_DP_SPEC",
+ "BriefDescription": "Floating-point double-precision operations speculatively executed"
+ },
+ {
+ "PublicDescription": "Floating-point FMA Operations speculatively executed.",
+ "EventCode": "0x8028",
+ "EventName": "FP_FMA_SPEC",
+ "BriefDescription": "Floating-point FMA Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Floating-point reciprocal estimate Operations speculatively executed.",
+ "EventCode": "0x8034",
+ "EventName": "FP_RECPE_SPEC",
+ "BriefDescription": "Floating-point reciprocal estimate Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "floating-point convert Operations speculatively executed.",
+ "EventCode": "0x8038",
+ "EventName": "FP_CVT_SPEC",
+ "BriefDescription": "floating-point convert Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE integer Operations speculatively executed.",
+ "EventCode": "0x8043",
+ "EventName": "ASE_SVE_INT_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE integer Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE predicated Operations speculatively executed.",
+ "EventCode": "0x8074",
+ "EventName": "SVE_PRED_SPEC",
+ "BriefDescription": "SVE predicated Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE predicated operations with no active predicates speculatively executed",
+ "EventCode": "0x8075",
+ "EventName": "SVE_PRED_EMPTY_SPEC",
+ "BriefDescription": "SVE predicated operations with no active predicates speculatively executed"
+ },
+ {
+ "PublicDescription": "SVE predicated operations speculatively executed with all active predicates",
+ "EventCode": "0x8076",
+ "EventName": "SVE_PRED_FULL_SPEC",
+ "BriefDescription": "SVE predicated operations speculatively executed with all active predicates"
+ },
+ {
+ "PublicDescription": "SVE predicated operations speculatively executed with partially active predicates",
+ "EventCode": "0x8077",
+ "EventName": "SVE_PRED_PARTIAL_SPEC",
+ "BriefDescription": "SVE predicated operations speculatively executed with partially active predicates"
+ },
+ {
+ "PublicDescription": "SVE predicated operations with empty or partially active predicates",
+ "EventCode": "0x8079",
+ "EventName": "SVE_PRED_NOT_FULL_SPEC",
+ "BriefDescription": "SVE predicated operations with empty or partially active predicates"
+ },
+ {
+ "PublicDescription": "SVE MOVPRFX Operations speculatively executed.",
+ "EventCode": "0x807C",
+ "EventName": "SVE_MOVPRFX_SPEC",
+ "BriefDescription": "SVE MOVPRFX Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE MOVPRFX unfused Operations speculatively executed.",
+ "EventCode": "0x807F",
+ "EventName": "SVE_MOVPRFX_U_SPEC",
+ "BriefDescription": "SVE MOVPRFX unfused Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE load Operations speculatively executed.",
+ "EventCode": "0x8085",
+ "EventName": "ASE_SVE_LD_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE load Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE store Operations speculatively executed.",
+ "EventCode": "0x8086",
+ "EventName": "ASE_SVE_ST_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE store Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Prefetch Operations speculatively executed.",
+ "EventCode": "0x8087",
+ "EventName": "PRF_SPEC",
+ "BriefDescription": "Prefetch Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "General-purpose register load Operations speculatively executed.",
+ "EventCode": "0x8089",
+ "EventName": "BASE_LD_REG_SPEC",
+ "BriefDescription": "General-purpose register load Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "General-purpose register store Operations speculatively executed.",
+ "EventCode": "0x808A",
+ "EventName": "BASE_ST_REG_SPEC",
+ "BriefDescription": "General-purpose register store Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE unpredicated load register Operations speculatively executed.",
+ "EventCode": "0x8091",
+ "EventName": "SVE_LDR_REG_SPEC",
+ "BriefDescription": "SVE unpredicated load register Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE unpredicated store register Operations speculatively executed.",
+ "EventCode": "0x8092",
+ "EventName": "SVE_STR_REG_SPEC",
+ "BriefDescription": "SVE unpredicated store register Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE load predicate register Operations speculatively executed.",
+ "EventCode": "0x8095",
+ "EventName": "SVE_LDR_PREG_SPEC",
+ "BriefDescription": "SVE load predicate register Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE store predicate register Operations speculatively executed.",
+ "EventCode": "0x8096",
+ "EventName": "SVE_STR_PREG_SPEC",
+ "BriefDescription": "SVE store predicate register Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE contiguous prefetch element Operations speculatively executed.",
+ "EventCode": "0x809F",
+ "EventName": "SVE_PRF_CONTIG_SPEC",
+ "BriefDescription": "SVE contiguous prefetch element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE contiguous load multiple vector Operations speculatively executed.",
+ "EventCode": "0x80A5",
+ "EventName": "ASE_SVE_LD_MULTI_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE contiguous load multiple vector Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE contiguous store multiple vector Operations speculatively executed.",
+ "EventCode": "0x80A6",
+ "EventName": "ASE_SVE_ST_MULTI_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE contiguous store multiple vector Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE gather-load Operations speculatively executed.",
+ "EventCode": "0x80AD",
+ "EventName": "SVE_LD_GATHER_SPEC",
+ "BriefDescription": "SVE gather-load Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE scatter-store Operations speculatively executed.",
+ "EventCode": "0x80AE",
+ "EventName": "SVE_ST_SCATTER_SPEC",
+ "BriefDescription": "SVE scatter-store Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE gather-prefetch Operations speculatively executed.",
+ "EventCode": "0x80AF",
+ "EventName": "SVE_PRF_GATHER_SPEC",
+ "BriefDescription": "SVE gather-prefetch Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE First-fault load Operations speculatively executed.",
+ "EventCode": "0x80BC",
+ "EventName": "SVE_LDFF_SPEC",
+ "BriefDescription": "SVE First-fault load Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "SVE first-fault load operations speculatively executed which set FFR bit to 0",
+ "EventCode": "0x80BD",
+ "EventName": "SVE_LDFF_FAULT_SPEC",
+ "BriefDescription": "SVE first-fault load operations speculatively executed which set FFR bit to 0"
+ },
+ {
+ "PublicDescription": "Scalable floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C0",
+ "EventName": "FP_SCALE_OPS_SPEC",
+ "BriefDescription": "Scalable floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Non-scalable floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C1",
+ "EventName": "FP_FIXED_OPS_SPEC",
+ "BriefDescription": "Non-scalable floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Scalable half-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C2",
+ "EventName": "FP_HP_SCALE_OPS_SPEC",
+ "BriefDescription": "Scalable half-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Non-scalable half-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C3",
+ "EventName": "FP_HP_FIXED_OPS_SPEC",
+ "BriefDescription": "Non-scalable half-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Scalable single-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C4",
+ "EventName": "FP_SP_SCALE_OPS_SPEC",
+ "BriefDescription": "Scalable single-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Non-scalable single-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C5",
+ "EventName": "FP_SP_FIXED_OPS_SPEC",
+ "BriefDescription": "Non-scalable single-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Scalable double-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C6",
+ "EventName": "FP_DP_SCALE_OPS_SPEC",
+ "BriefDescription": "Scalable double-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Non-scalable double-precision floating-point element Operations speculatively executed.",
+ "EventCode": "0x80C7",
+ "EventName": "FP_DP_FIXED_OPS_SPEC",
+ "BriefDescription": "Non-scalable double-precision floating-point element Operations speculatively executed."
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE 8-bit integer operations speculatively executed",
+ "EventCode": "0x80E3",
+ "EventName": "ASE_SVE_INT8_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE 8-bit integer operations speculatively executed"
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE 16-bit integer operations speculatively executed",
+ "EventCode": "0x80E7",
+ "EventName": "ASE_SVE_INT16_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE 16-bit integer operations speculatively executed"
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE 32-bit integer operations speculatively executed",
+ "EventCode": "0x80EB",
+ "EventName": "ASE_SVE_INT32_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE 32-bit integer operations speculatively executed"
+ },
+ {
+ "PublicDescription": "Advanced SIMD and SVE 64-bit integer operations speculatively executed",
+ "EventCode": "0x80EF",
+ "EventName": "ASE_SVE_INT64_SPEC",
+ "BriefDescription": "Advanced SIMD and SVE 64-bit integer operations speculatively executed"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx8mm/sys/ddrc.json b/tools/perf/pmu-events/arch/arm64/freescale/imx8mm/sys/ddrc.json
new file mode 100644
index 000000000000..3b1cd708f568
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx8mm/sys/ddrc.json
@@ -0,0 +1,39 @@
+[
+ {
+ "BriefDescription": "ddr cycles event",
+ "EventCode": "0x00",
+ "EventName": "imx8mm_ddr.cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MM"
+ },
+ {
+ "BriefDescription": "ddr read-cycles event",
+ "EventCode": "0x2a",
+ "EventName": "imx8mm_ddr.read_cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MM"
+ },
+ {
+ "BriefDescription": "ddr write-cycles event",
+ "EventCode": "0x2b",
+ "EventName": "imx8mm_ddr.write_cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MM"
+ },
+ {
+ "BriefDescription": "ddr read event",
+ "EventCode": "0x35",
+ "EventName": "imx8mm_ddr.read",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MM"
+ },
+ {
+ "BriefDescription": "ddr write event",
+ "EventCode": "0x38",
+ "EventName": "imx8mm_ddr.write",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MM"
+ }
+]
+
+
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx8mm/sys/metrics.json b/tools/perf/pmu-events/arch/arm64/freescale/imx8mm/sys/metrics.json
new file mode 100644
index 000000000000..f416fa052337
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx8mm/sys/metrics.json
@@ -0,0 +1,18 @@
+[
+ {
+ "BriefDescription": "bytes all masters read from ddr based on read-cycles event",
+ "MetricName": "imx8mm_ddr_read.all",
+ "MetricExpr": "imx8mm_ddr.read_cycles * 4 * 4",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MM"
+ },
+ {
+ "BriefDescription": "bytes all masters write to ddr based on write-cycles event",
+ "MetricName": "imx8mm_ddr_write.all",
+ "MetricExpr": "imx8mm_ddr.write_cycles * 4 * 4",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MM"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/ddrc.json b/tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/ddrc.json
new file mode 100644
index 000000000000..8352e73d6d35
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/ddrc.json
@@ -0,0 +1,37 @@
+[
+ {
+ "BriefDescription": "ddr cycles event",
+ "EventCode": "0x00",
+ "EventName": "imx8mn_ddr.cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MN"
+ },
+ {
+ "BriefDescription": "ddr read-cycles event",
+ "EventCode": "0x2a",
+ "EventName": "imx8mn_ddr.read_cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MN"
+ },
+ {
+ "BriefDescription": "ddr write-cycles event",
+ "EventCode": "0x2b",
+ "EventName": "imx8mn_ddr.write_cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MN"
+ },
+ {
+ "BriefDescription": "ddr read event",
+ "EventCode": "0x35",
+ "EventName": "imx8mn_ddr.read",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MN"
+ },
+ {
+ "BriefDescription": "ddr write event",
+ "EventCode": "0x38",
+ "EventName": "imx8mn_ddr.write",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MN"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/metrics.json b/tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/metrics.json
new file mode 100644
index 000000000000..2bbba4d8ea5b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx8mn/sys/metrics.json
@@ -0,0 +1,18 @@
+[
+ {
+ "BriefDescription": "bytes all masters read from ddr based on read-cycles event",
+ "MetricName": "imx8mn_ddr_read.all",
+ "MetricExpr": "imx8mn_ddr.read_cycles * 4 * 2",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MN"
+ },
+ {
+ "BriefDescription": "bytes all masters write to ddr based on write-cycles event",
+ "MetricName": "imx8mn_ddr_write.all",
+ "MetricExpr": "imx8mn_ddr.write_cycles * 4 * 2",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MN"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/ddrc.json b/tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/ddrc.json
new file mode 100644
index 000000000000..f9a89efc9b24
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/ddrc.json
@@ -0,0 +1,37 @@
+[
+ {
+ "BriefDescription": "ddr cycles event",
+ "EventCode": "0x00",
+ "EventName": "imx8mp_ddr.cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "ddr read-cycles event",
+ "EventCode": "0x2a",
+ "EventName": "imx8mp_ddr.read_cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "ddr write-cycles event",
+ "EventCode": "0x2b",
+ "EventName": "imx8mp_ddr.write_cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "ddr read event",
+ "EventCode": "0x35",
+ "EventName": "imx8mp_ddr.read",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "ddr write event",
+ "EventCode": "0x38",
+ "EventName": "imx8mp_ddr.write",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/metrics.json b/tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/metrics.json
new file mode 100644
index 000000000000..8b9544424b3f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx8mp/sys/metrics.json
@@ -0,0 +1,466 @@
+[
+ {
+ "BriefDescription": "bytes of all masters read from ddr",
+ "MetricName": "imx8mp_ddr_read.all",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0xffff\\,axi_id\\=0x0000@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of all masters write to ddr",
+ "MetricName": "imx8mp_ddr_write.all",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0xffff\\,axi_id\\=0x0000@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of a53 core read from ddr",
+ "MetricName": "imx8mp_ddr_read.a53",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0000@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of a53 core write to ddr",
+ "MetricName": "imx8mp_ddr_write.a53",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0000@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of supermix(m7) core read from ddr",
+ "MetricName": "imx8mp_ddr_read.supermix",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x000f\\,axi_id\\=0x0020@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of supermix(m7) write to ddr",
+ "MetricName": "imx8mp_ddr_write.supermix",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x000f\\,axi_id\\=0x0020@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of gpu 3d read from ddr",
+ "MetricName": "imx8mp_ddr_read.3d",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0070@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of gpu 3d write to ddr",
+ "MetricName": "imx8mp_ddr_write.3d",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0070@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of gpu 2d read from ddr",
+ "MetricName": "imx8mp_ddr_read.2d",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0071@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of gpu 2d write to ddr",
+ "MetricName": "imx8mp_ddr_write.2d",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0071@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display lcdif1 read from ddr",
+ "MetricName": "imx8mp_ddr_read.lcdif1",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0068@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display lcdif1 write to ddr",
+ "MetricName": "imx8mp_ddr_write.lcdif1",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0068@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display lcdif2 read from ddr",
+ "MetricName": "imx8mp_ddr_read.lcdif2",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0069@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display lcdif2 write to ddr",
+ "MetricName": "imx8mp_ddr_write.lcdif2",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0069@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isi1 read from ddr",
+ "MetricName": "imx8mp_ddr_read.isi1",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x006a@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isi1 write to ddr",
+ "MetricName": "imx8mp_ddr_write.isi1",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x006a@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isi2 read from ddr",
+ "MetricName": "imx8mp_ddr_read.isi2",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x006b@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isi2 write to ddr",
+ "MetricName": "imx8mp_ddr_write.isi2",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x006b@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isi3 read from ddr",
+ "MetricName": "imx8mp_ddr_read.isi3",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x006c@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isi3 write to ddr",
+ "MetricName": "imx8mp_ddr_write.isi3",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x006c@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isp1 read from ddr",
+ "MetricName": "imx8mp_ddr_read.isp1",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x006d@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isp1 write to ddr",
+ "MetricName": "imx8mp_ddr_write.isp1",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x006d@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isp2 read from ddr",
+ "MetricName": "imx8mp_ddr_read.isp2",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x006e@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display isp2 write to ddr",
+ "MetricName": "imx8mp_ddr_write.isp2",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x006e@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display dewarp read from ddr",
+ "MetricName": "imx8mp_ddr_read.dewarp",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x006f@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of display dewarp write to ddr",
+ "MetricName": "imx8mp_ddr_write.dewarp",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x006f@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of vpu1 read from ddr",
+ "MetricName": "imx8mp_ddr_read.vpu1",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x007c@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of vpu1 write to ddr",
+ "MetricName": "imx8mp_ddr_write.vpu1",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x007c@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of vpu2 read from ddr",
+ "MetricName": "imx8mp_ddr_read.vpu2",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x007d@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of vpu2 write to ddr",
+ "MetricName": "imx8mp_ddr_write.vpu2",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x007d@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of vpu3 read from ddr",
+ "MetricName": "imx8mp_ddr_read.vpu3",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x007e@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of vpu3 write to ddr",
+ "MetricName": "imx8mp_ddr_write.vpu3",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x007e@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of npu read from ddr",
+ "MetricName": "imx8mp_ddr_read.npu",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0073@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of npu write to ddr",
+ "MetricName": "imx8mp_ddr_write.npu",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0073@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hsio usb1 read from ddr",
+ "MetricName": "imx8mp_ddr_read.usb1",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0078@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hsio usb1 write to ddr",
+ "MetricName": "imx8mp_ddr_write.usb1",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0078@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hsio usb2 read from ddr",
+ "MetricName": "imx8mp_ddr_read.usb2",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0079@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hsio usb2 write to ddr",
+ "MetricName": "imx8mp_ddr_write.usb2",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0079@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hsio pci read from ddr",
+ "MetricName": "imx8mp_ddr_read.pci",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x007a@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hsio pci write to ddr",
+ "MetricName": "imx8mp_ddr_write.pci",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x007a@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hdmi_tx hrv_mwr read from ddr",
+ "MetricName": "imx8mp_ddr_read.hdmi_hrv_mwr",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0074@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hdmi_tx hrv_mwr write to ddr",
+ "MetricName": "imx8mp_ddr_write.hdmi_hrv_mwr",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0074@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hdmi_tx lcdif read from ddr",
+ "MetricName": "imx8mp_ddr_read.hdmi_lcdif",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0075@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hdmi_tx lcdif write to ddr",
+ "MetricName": "imx8mp_ddr_write.hdmi_lcdif",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0075@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hdmi_tx tx_hdcp read from ddr",
+ "MetricName": "imx8mp_ddr_read.hdmi_hdcp",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0076@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of hdmi_tx tx_hdcp write to ddr",
+ "MetricName": "imx8mp_ddr_write.hdmi_hdcp",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0076@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio dsp read from ddr",
+ "MetricName": "imx8mp_ddr_read.audio_dsp",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0041@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio dsp write to ddr",
+ "MetricName": "imx8mp_ddr_write.audio_dsp",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0041@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma2_per read from ddr",
+ "MetricName": "imx8mp_ddr_read.audio_sdma2_per",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0062@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma2_per write to ddr",
+ "MetricName": "imx8mp_ddr_write.audio_sdma2_per",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0062@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma2_burst read from ddr",
+ "MetricName": "imx8mp_ddr_read.audio_sdma2_burst",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0063@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma2_burst write to ddr",
+ "MetricName": "imx8mp_ddr_write.audio_sdma2_burst",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0063@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma3_per read from ddr",
+ "MetricName": "imx8mp_ddr_read.audio_sdma3_per",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0064@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma3_per write to ddr",
+ "MetricName": "imx8mp_ddr_write.audio_sdma3_per",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0064@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma3_burst read from ddr",
+ "MetricName": "imx8mp_ddr_read.audio_sdma3_burst",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0065@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma3_burst write to ddr",
+ "MetricName": "imx8mp_ddr_write.audio_sdma3_burst",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0065@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma_pif read from ddr",
+ "MetricName": "imx8mp_ddr_read.audio_sdma_pif",
+ "MetricExpr": "imx8_ddr0@axid\\-read\\,axi_mask\\=0x0000\\,axi_id\\=0x0066@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ },
+ {
+ "BriefDescription": "bytes of audio sdma_pif write to ddr",
+ "MetricName": "imx8mp_ddr_write.audio_sdma_pif",
+ "MetricExpr": "imx8_ddr0@axid\\-write\\,axi_mask\\=0x0000\\,axi_id\\=0x0066@",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MP"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/ddrc.json b/tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/ddrc.json
new file mode 100644
index 000000000000..c8682728ddad
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/ddrc.json
@@ -0,0 +1,37 @@
+[
+ {
+ "BriefDescription": "ddr cycles event",
+ "EventCode": "0x00",
+ "EventName": "imx8mq_ddr.cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MQ"
+ },
+ {
+ "BriefDescription": "ddr read-cycles event",
+ "EventCode": "0x2a",
+ "EventName": "imx8mq_ddr.read_cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MQ"
+ },
+ {
+ "BriefDescription": "ddr write-cycles event",
+ "EventCode": "0x2b",
+ "EventName": "imx8mq_ddr.write_cycles",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MQ"
+ },
+ {
+ "BriefDescription": "ddr read event",
+ "EventCode": "0x35",
+ "EventName": "imx8mq_ddr.read",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MQ"
+ },
+ {
+ "BriefDescription": "ddr write event",
+ "EventCode": "0x38",
+ "EventName": "imx8mq_ddr.write",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/metrics.json b/tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/metrics.json
new file mode 100644
index 000000000000..862c98171e0d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/freescale/imx8mq/sys/metrics.json
@@ -0,0 +1,18 @@
+[
+ {
+ "BriefDescription": "bytes all masters read from ddr based on read-cycles event",
+ "MetricName": "imx8mq_ddr_read.all",
+ "MetricExpr": "imx8mq_ddr.read_cycles * 4 * 4",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MQ"
+ },
+ {
+ "BriefDescription": "bytes all masters write to ddr based on write-cycles event",
+ "MetricName": "imx8mq_ddr_write.all",
+ "MetricExpr": "imx8mq_ddr.write_cycles * 4 * 4",
+ "ScaleUnit": "9.765625e-4KB",
+ "Unit": "imx8_ddr",
+ "Compat": "i.MX8MQ"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/branch.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/branch.json
new file mode 100644
index 000000000000..b011af11bf94
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/branch.json
@@ -0,0 +1,8 @@
+[
+ {
+ "ArchStdEvent": "BR_MIS_PRED"
+ },
+ {
+ "ArchStdEvent": "BR_PRED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/bus.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/bus.json
new file mode 100644
index 000000000000..084e88d7df73
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/bus.json
@@ -0,0 +1,62 @@
+[
+ {
+ "PublicDescription": "This event counts read transactions from tofu controller to measured CMG.",
+ "EventCode": "0x314",
+ "EventName": "BUS_READ_TOTAL_TOFU",
+ "BriefDescription": "This event counts read transactions from tofu controller to measured CMG."
+ },
+ {
+ "PublicDescription": "This event counts read transactions from PCI controller to measured CMG.",
+ "EventCode": "0x315",
+ "EventName": "BUS_READ_TOTAL_PCI",
+ "BriefDescription": "This event counts read transactions from PCI controller to measured CMG."
+ },
+ {
+ "PublicDescription": "This event counts read transactions from measured CMG local memory to measured CMG.",
+ "EventCode": "0x316",
+ "EventName": "BUS_READ_TOTAL_MEM",
+ "BriefDescription": "This event counts read transactions from measured CMG local memory to measured CMG."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to CMG0, if measured CMG is not CMG0.",
+ "EventCode": "0x318",
+ "EventName": "BUS_WRITE_TOTAL_CMG0",
+ "BriefDescription": "This event counts write transactions from measured CMG to CMG0, if measured CMG is not CMG0."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to CMG1, if measured CMG is not CMG1.",
+ "EventCode": "0x319",
+ "EventName": "BUS_WRITE_TOTAL_CMG1",
+ "BriefDescription": "This event counts write transactions from measured CMG to CMG1, if measured CMG is not CMG1."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to CMG2, if measured CMG is not CMG2.",
+ "EventCode": "0x31A",
+ "EventName": "BUS_WRITE_TOTAL_CMG2",
+ "BriefDescription": "This event counts write transactions from measured CMG to CMG2, if measured CMG is not CMG2."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to CMG3, if measured CMG is not CMG3.",
+ "EventCode": "0x31B",
+ "EventName": "BUS_WRITE_TOTAL_CMG3",
+ "BriefDescription": "This event counts write transactions from measured CMG to CMG3, if measured CMG is not CMG3."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to tofu controller.",
+ "EventCode": "0x31C",
+ "EventName": "BUS_WRITE_TOTAL_TOFU",
+ "BriefDescription": "This event counts write transactions from measured CMG to tofu controller."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to PCI controller.",
+ "EventCode": "0x31D",
+ "EventName": "BUS_WRITE_TOTAL_PCI",
+ "BriefDescription": "This event counts write transactions from measured CMG to PCI controller."
+ },
+ {
+ "PublicDescription": "This event counts write transactions from measured CMG to measured CMG local memory.",
+ "EventCode": "0x31E",
+ "EventName": "BUS_WRITE_TOTAL_MEM",
+ "BriefDescription": "This event counts write transactions from measured CMG to measured CMG local memory."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cache.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cache.json
new file mode 100644
index 000000000000..2e341a951a10
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cache.json
@@ -0,0 +1,128 @@
+[
+ {
+ "ArchStdEvent": "L1I_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L1I_CACHE"
+ },
+ {
+ "ArchStdEvent": "L1D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_CACHE_WB"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2I_TLB_REFILL"
+ },
+ {
+ "ArchStdEvent": "L2D_TLB"
+ },
+ {
+ "ArchStdEvent": "L2I_TLB"
+ },
+ {
+ "PublicDescription": "This event counts L1D_CACHE_REFILL caused by software or hardware prefetch.",
+ "EventCode": "0x49",
+ "EventName": "L1D_CACHE_REFILL_PRF",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by software or hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts L2D_CACHE_REFILL caused by software or hardware prefetch.",
+ "EventCode": "0x59",
+ "EventName": "L2D_CACHE_REFILL_PRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by software or hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts L1D_CACHE_REFILL caused by demand access.",
+ "EventCode": "0x200",
+ "EventName": "L1D_CACHE_REFILL_DM",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by demand access."
+ },
+ {
+ "PublicDescription": "This event counts L1D_CACHE_REFILL caused by hardware prefetch.",
+ "EventCode": "0x202",
+ "EventName": "L1D_CACHE_REFILL_HWPRF",
+ "BriefDescription": "This event counts L1D_CACHE_REFILL caused by hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts outstanding L1D cache miss requests per cycle.",
+ "EventCode": "0x208",
+ "EventName": "L1_MISS_WAIT",
+ "BriefDescription": "This event counts outstanding L1D cache miss requests per cycle."
+ },
+ {
+ "PublicDescription": "This event counts outstanding L1I cache miss requests per cycle.",
+ "EventCode": "0x209",
+ "EventName": "L1I_MISS_WAIT",
+ "BriefDescription": "This event counts outstanding L1I cache miss requests per cycle."
+ },
+ {
+ "PublicDescription": "This event counts L2D_CACHE_REFILL caused by demand access.",
+ "EventCode": "0x300",
+ "EventName": "L2D_CACHE_REFILL_DM",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by demand access."
+ },
+ {
+ "PublicDescription": "This event counts L2D_CACHE_REFILL caused by hardware prefetch.",
+ "EventCode": "0x302",
+ "EventName": "L2D_CACHE_REFILL_HWPRF",
+ "BriefDescription": "This event counts L2D_CACHE_REFILL caused by hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts outstanding L2 cache miss requests per cycle.",
+ "EventCode": "0x308",
+ "EventName": "L2_MISS_WAIT",
+ "BriefDescription": "This event counts outstanding L2 cache miss requests per cycle."
+ },
+ {
+ "PublicDescription": "This event counts the number of times of L2 cache miss.",
+ "EventCode": "0x309",
+ "EventName": "L2_MISS_COUNT",
+ "BriefDescription": "This event counts the number of times of L2 cache miss."
+ },
+ {
+ "PublicDescription": "This event counts operations where demand access hits an L2 cache refill buffer allocated by software or hardware prefetch.",
+ "EventCode": "0x325",
+ "EventName": "L2D_SWAP_DM",
+ "BriefDescription": "This event counts operations where demand access hits an L2 cache refill buffer allocated by software or hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts operations where software or hardware prefetch hits an L2 cache refill buffer allocated by demand access.",
+ "EventCode": "0x326",
+ "EventName": "L2D_CACHE_MIBMCH_PRF",
+ "BriefDescription": "This event counts operations where software or hardware prefetch hits an L2 cache refill buffer allocated by demand access."
+ },
+ {
+ "PublicDescription": "This event counts operations where demand access hits an L2 cache refill buffer allocated by software or hardware prefetch.",
+ "EventCode": "0x396",
+ "EventName": "L2D_CACHE_SWAP_LOCAL",
+ "BriefDescription": "This event counts operations where demand access hits an L2 cache refill buffer allocated by software or hardware prefetch."
+ },
+ {
+ "PublicDescription": "This event counts energy consumption per cycle of L2 cache.",
+ "EventCode": "0x3E0",
+ "EventName": "EA_L2",
+ "BriefDescription": "This event counts energy consumption per cycle of L2 cache."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cycle.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cycle.json
new file mode 100644
index 000000000000..b16484628290
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/cycle.json
@@ -0,0 +1,5 @@
+[
+ {
+ "ArchStdEvent": "CPU_CYCLES"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/exception.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/exception.json
new file mode 100644
index 000000000000..348749c154c0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/exception.json
@@ -0,0 +1,29 @@
+[
+ {
+ "ArchStdEvent": "EXC_TAKEN"
+ },
+ {
+ "ArchStdEvent": "EXC_UNDEF"
+ },
+ {
+ "ArchStdEvent": "EXC_SVC"
+ },
+ {
+ "ArchStdEvent": "EXC_PABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_DABORT"
+ },
+ {
+ "ArchStdEvent": "EXC_IRQ"
+ },
+ {
+ "ArchStdEvent": "EXC_FIQ"
+ },
+ {
+ "ArchStdEvent": "EXC_SMC"
+ },
+ {
+ "ArchStdEvent": "EXC_HVC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/instruction.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/instruction.json
new file mode 100644
index 000000000000..6d258b1080cf
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/instruction.json
@@ -0,0 +1,131 @@
+[
+ {
+ "ArchStdEvent": "SW_INCR"
+ },
+ {
+ "ArchStdEvent": "INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "EXC_RETURN"
+ },
+ {
+ "ArchStdEvent": "CID_WRITE_RETIRED"
+ },
+ {
+ "ArchStdEvent": "INST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "STREX_SPEC"
+ },
+ {
+ "ArchStdEvent": "LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "LDST_SPEC"
+ },
+ {
+ "ArchStdEvent": "DP_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SPEC"
+ },
+ {
+ "ArchStdEvent": "VFP_SPEC"
+ },
+ {
+ "ArchStdEvent": "PC_WRITE_SPEC"
+ },
+ {
+ "ArchStdEvent": "CRYPTO_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_IMMED_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_RETURN_SPEC"
+ },
+ {
+ "ArchStdEvent": "BR_INDIRECT_SPEC"
+ },
+ {
+ "ArchStdEvent": "ISB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DSB_SPEC"
+ },
+ {
+ "ArchStdEvent": "DMB_SPEC"
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed zero blocking operations due to the 'DC ZVA' instruction.",
+ "EventCode": "0x9F",
+ "EventName": "DCZVA_SPEC",
+ "BriefDescription": "This event counts architecturally executed zero blocking operations due to the 'DC ZVA' instruction."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed floating-point move operations.",
+ "EventCode": "0x105",
+ "EventName": "FP_MV_SPEC",
+ "BriefDescription": "This event counts architecturally executed floating-point move operations."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed operations that using predicate register.",
+ "EventCode": "0x108",
+ "EventName": "PRD_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that using predicate register."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed inter-element manipulation operations.",
+ "EventCode": "0x109",
+ "EventName": "IEL_SPEC",
+ "BriefDescription": "This event counts architecturally executed inter-element manipulation operations."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed inter-register manipulation operations.",
+ "EventCode": "0x10A",
+ "EventName": "IREG_SPEC",
+ "BriefDescription": "This event counts architecturally executed inter-register manipulation operations."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed NOSIMD load operations that using SIMD&FP registers.",
+ "EventCode": "0x112",
+ "EventName": "FP_LD_SPEC",
+ "BriefDescription": "This event counts architecturally executed NOSIMD load operations that using SIMD&FP registers."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed NOSIMD store operations that using SIMD&FP registers.",
+ "EventCode": "0x113",
+ "EventName": "FP_ST_SPEC",
+ "BriefDescription": "This event counts architecturally executed NOSIMD store operations that using SIMD&FP registers."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed SIMD broadcast floating-point load operations.",
+ "EventCode": "0x11A",
+ "EventName": "BC_LD_SPEC",
+ "BriefDescription": "This event counts architecturally executed SIMD broadcast floating-point load operations."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed instructions, excluding the MOVPRFX instruction.",
+ "EventCode": "0x121",
+ "EventName": "EFFECTIVE_INST_SPEC",
+ "BriefDescription": "This event counts architecturally executed instructions, excluding the MOVPRFX instruction."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed operations that uses 'pre-index' as its addressing mode.",
+ "EventCode": "0x123",
+ "EventName": "PRE_INDEX_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that uses 'pre-index' as its addressing mode."
+ },
+ {
+ "PublicDescription": "This event counts architecturally executed operations that uses 'post-index' as its addressing mode.",
+ "EventCode": "0x124",
+ "EventName": "POST_INDEX_SPEC",
+ "BriefDescription": "This event counts architecturally executed operations that uses 'post-index' as its addressing mode."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/memory.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/memory.json
new file mode 100644
index 000000000000..c1f6479e92b4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/memory.json
@@ -0,0 +1,8 @@
+[
+ {
+ "PublicDescription": "This event counts energy consumption per cycle of CMG local memory.",
+ "EventCode": "0x3E8",
+ "EventName": "EA_MEMORY",
+ "BriefDescription": "This event counts energy consumption per cycle of CMG local memory."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/other.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/other.json
new file mode 100644
index 000000000000..10c823ac26cc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/other.json
@@ -0,0 +1,188 @@
+[
+ {
+ "PublicDescription": "This event counts the occurrence count of the micro-operation split.",
+ "EventCode": "0x139",
+ "EventName": "UOP_SPLIT",
+ "BriefDescription": "This event counts the occurrence count of the micro-operation split."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no operation was committed because the oldest and uncommitted load/store/prefetch operation waits for memory access.",
+ "EventCode": "0x180",
+ "EventName": "LD_COMP_WAIT_L2_MISS",
+ "BriefDescription": "This event counts every cycle that no operation was committed because the oldest and uncommitted load/store/prefetch operation waits for memory access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for memory access.",
+ "EventCode": "0x181",
+ "EventName": "LD_COMP_WAIT_L2_MISS_EX",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for memory access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L2 cache access.",
+ "EventCode": "0x182",
+ "EventName": "LD_COMP_WAIT_L1_MISS",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L2 cache access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L2 cache access.",
+ "EventCode": "0x183",
+ "EventName": "LD_COMP_WAIT_L1_MISS_EX",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L2 cache access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L1D cache, L2 cache and memory access.",
+ "EventCode": "0x184",
+ "EventName": "LD_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted load/store/prefetch operation waits for L1D cache, L2 cache and memory access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L1D cache, L2 cache and memory access.",
+ "EventCode": "0x185",
+ "EventName": "LD_COMP_WAIT_EX",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the oldest and uncommitted integer load operation waits for L1D cache, L2 cache and memory access."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed due to the lack of an available prefetch port.",
+ "EventCode": "0x186",
+ "EventName": "LD_COMP_WAIT_PFP_BUSY",
+ "BriefDescription": "This event counts every cycle that no instruction was committed due to the lack of an available prefetch port."
+ },
+ {
+ "PublicDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by an integer load operation.",
+ "EventCode": "0x187",
+ "EventName": "LD_COMP_WAIT_PFP_BUSY_EX",
+ "BriefDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by an integer load operation."
+ },
+ {
+ "PublicDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by a software prefetch instruction.",
+ "EventCode": "0x188",
+ "EventName": "LD_COMP_WAIT_PFP_BUSY_SWPF",
+ "BriefDescription": "This event counts the LD_COMP_WAIT_PFP_BUSY caused by a software prefetch instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is an integer or floating-point/SIMD instruction.",
+ "EventCode": "0x189",
+ "EventName": "EU_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is an integer or floating-point/SIMD instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a floating-point/SIMD instruction.",
+ "EventCode": "0x18A",
+ "EventName": "FL_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a floating-point/SIMD instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a branch instruction.",
+ "EventCode": "0x18B",
+ "EventName": "BR_COMP_WAIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed and the oldest and uncommitted instruction is a branch instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the CSE is empty.",
+ "EventCode": "0x18C",
+ "EventName": "ROB_EMPTY",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the CSE is empty."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed because the CSE is empty and the store port (SP) is full.",
+ "EventCode": "0x18D",
+ "EventName": "ROB_EMPTY_STQ_BUSY",
+ "BriefDescription": "This event counts every cycle that no instruction was committed because the CSE is empty and the store port (SP) is full."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that the instruction unit is halted by the WFE/WFI instruction.",
+ "EventCode": "0x18E",
+ "EventName": "WFE_WFI_CYCLE",
+ "BriefDescription": "This event counts every cycle that the instruction unit is halted by the WFE/WFI instruction."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that no instruction was committed, but counts at the time when commits MOVPRFX only.",
+ "EventCode": "0x190",
+ "EventName": "_0INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that no instruction was committed, but counts at the time when commits MOVPRFX only."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that one instruction is committed.",
+ "EventCode": "0x191",
+ "EventName": "_1INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that one instruction is committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that two instructions are committed.",
+ "EventCode": "0x192",
+ "EventName": "_2INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that two instructions are committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that three instructions are committed.",
+ "EventCode": "0x193",
+ "EventName": "_3INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that three instructions are committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that four instructions are committed.",
+ "EventCode": "0x194",
+ "EventName": "_4INST_COMMIT",
+ "BriefDescription": "This event counts every cycle that four instructions are committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that only any micro-operations are committed.",
+ "EventCode": "0x198",
+ "EventName": "UOP_ONLY_COMMIT",
+ "BriefDescription": "This event counts every cycle that only any micro-operations are committed."
+ },
+ {
+ "PublicDescription": "This event counts every cycle that only the MOVPRFX instruction is committed.",
+ "EventCode": "0x199",
+ "EventName": "SINGLE_MOVPRFX_COMMIT",
+ "BriefDescription": "This event counts every cycle that only the MOVPRFX instruction is committed."
+ },
+ {
+ "PublicDescription": "This event counts energy consumption per cycle of core.",
+ "EventCode": "0x1E0",
+ "EventName": "EA_CORE",
+ "BriefDescription": "This event counts energy consumption per cycle of core."
+ },
+ {
+ "PublicDescription": "This event counts streaming prefetch requests to L1D cache generated by hardware prefetcher.",
+ "EventCode": "0x230",
+ "EventName": "L1HWPF_STREAM_PF",
+ "BriefDescription": "This event counts streaming prefetch requests to L1D cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts allocation type prefetch injection requests to L1D cache generated by hardware prefetcher.",
+ "EventCode": "0x231",
+ "EventName": "L1HWPF_INJ_ALLOC_PF",
+ "BriefDescription": "This event counts allocation type prefetch injection requests to L1D cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts non-allocation type prefetch injection requests to L1D cache generated by hardware prefetcher.",
+ "EventCode": "0x232",
+ "EventName": "L1HWPF_INJ_NOALLOC_PF",
+ "BriefDescription": "This event counts non-allocation type prefetch injection requests to L1D cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts streaming prefetch requests to L2 cache generated by hardware prefecher.",
+ "EventCode": "0x233",
+ "EventName": "L2HWPF_STREAM_PF",
+ "BriefDescription": "This event counts streaming prefetch requests to L2 cache generated by hardware prefecher."
+ },
+ {
+ "PublicDescription": "This event counts allocation type prefetch injection requests to L2 cache generated by hardware prefetcher.",
+ "EventCode": "0x234",
+ "EventName": "L2HWPF_INJ_ALLOC_PF",
+ "BriefDescription": "This event counts allocation type prefetch injection requests to L2 cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts non-allocation type prefetch injection requests to L2 cache generated by hardware prefetcher.",
+ "EventCode": "0x235",
+ "EventName": "L2HWPF_INJ_NOALLOC_PF",
+ "BriefDescription": "This event counts non-allocation type prefetch injection requests to L2 cache generated by hardware prefetcher."
+ },
+ {
+ "PublicDescription": "This event counts prefetch requests to L2 cache generated by the other causes.",
+ "EventCode": "0x236",
+ "EventName": "L2HWPF_OTHER",
+ "BriefDescription": "This event counts prefetch requests to L2 cache generated by the other causes."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/pipeline.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/pipeline.json
new file mode 100644
index 000000000000..dd7c97a9972b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/pipeline.json
@@ -0,0 +1,194 @@
+[
+ {
+ "ArchStdEvent": "STALL_FRONTEND"
+ },
+ {
+ "ArchStdEvent": "STALL_BACKEND"
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of EAGA pipeline.",
+ "EventCode": "0x1A0",
+ "EventName": "EAGA_VAL",
+ "BriefDescription": "This event counts valid cycles of EAGA pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of EAGB pipeline.",
+ "EventCode": "0x1A1",
+ "EventName": "EAGB_VAL",
+ "BriefDescription": "This event counts valid cycles of EAGB pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of EXA pipeline.",
+ "EventCode": "0x1A2",
+ "EventName": "EXA_VAL",
+ "BriefDescription": "This event counts valid cycles of EXA pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of EXB pipeline.",
+ "EventCode": "0x1A3",
+ "EventName": "EXB_VAL",
+ "BriefDescription": "This event counts valid cycles of EXB pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of FLA pipeline.",
+ "EventCode": "0x1A4",
+ "EventName": "FLA_VAL",
+ "BriefDescription": "This event counts valid cycles of FLA pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of FLB pipeline.",
+ "EventCode": "0x1A5",
+ "EventName": "FLB_VAL",
+ "BriefDescription": "This event counts valid cycles of FLB pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of PRX pipeline.",
+ "EventCode": "0x1A6",
+ "EventName": "PRX_VAL",
+ "BriefDescription": "This event counts valid cycles of PRX pipeline."
+ },
+ {
+ "PublicDescription": "This event counts the number of 1's in the predicate bits of request in FLA pipeline, where it is corrected so that it becomes 16 when all bits are 1.",
+ "EventCode": "0x1B4",
+ "EventName": "FLA_VAL_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in FLA pipeline, where it is corrected so that it becomes 16 when all bits are 1."
+ },
+ {
+ "PublicDescription": "This event counts the number of 1's in the predicate bits of request in FLB pipeline, where it is corrected so that it becomes 16 when all bits are 1.",
+ "EventCode": "0x1B5",
+ "EventName": "FLB_VAL_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in FLB pipeline, where it is corrected so that it becomes 16 when all bits are 1."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of L1D cache pipeline#0.",
+ "EventCode": "0x240",
+ "EventName": "L1_PIPE0_VAL",
+ "BriefDescription": "This event counts valid cycles of L1D cache pipeline#0."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of L1D cache pipeline#1.",
+ "EventCode": "0x241",
+ "EventName": "L1_PIPE1_VAL",
+ "BriefDescription": "This event counts valid cycles of L1D cache pipeline#1."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#0 that its sce bit of tagged address is 1.",
+ "EventCode": "0x250",
+ "EventName": "L1_PIPE0_VAL_IU_TAG_ADRS_SCE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#0 that its sce bit of tagged address is 1."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#0 that its pfe bit of tagged address is 1.",
+ "EventCode": "0x251",
+ "EventName": "L1_PIPE0_VAL_IU_TAG_ADRS_PFE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#0 that its pfe bit of tagged address is 1."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#1 that its sce bit of tagged address is 1.",
+ "EventCode": "0x252",
+ "EventName": "L1_PIPE1_VAL_IU_TAG_ADRS_SCE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#1 that its sce bit of tagged address is 1."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#1 that its pfe bit of tagged address is 1.",
+ "EventCode": "0x253",
+ "EventName": "L1_PIPE1_VAL_IU_TAG_ADRS_PFE",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#1 that its pfe bit of tagged address is 1."
+ },
+ {
+ "PublicDescription": "This event counts completed requests in L1D cache pipeline#0.",
+ "EventCode": "0x260",
+ "EventName": "L1_PIPE0_COMP",
+ "BriefDescription": "This event counts completed requests in L1D cache pipeline#0."
+ },
+ {
+ "PublicDescription": "This event counts completed requests in L1D cache pipeline#1.",
+ "EventCode": "0x261",
+ "EventName": "L1_PIPE1_COMP",
+ "BriefDescription": "This event counts completed requests in L1D cache pipeline#1."
+ },
+ {
+ "PublicDescription": "This event counts completed requests in L1I cache pipeline.",
+ "EventCode": "0x268",
+ "EventName": "L1I_PIPE_COMP",
+ "BriefDescription": "This event counts completed requests in L1I cache pipeline."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of L1I cache pipeline.",
+ "EventCode": "0x269",
+ "EventName": "L1I_PIPE_VAL",
+ "BriefDescription": "This event counts valid cycles of L1I cache pipeline."
+ },
+ {
+ "PublicDescription": "This event counts aborted requests in L1D pipelines that due to store-load interlock.",
+ "EventCode": "0x274",
+ "EventName": "L1_PIPE_ABORT_STLD_INTLK",
+ "BriefDescription": "This event counts aborted requests in L1D pipelines that due to store-load interlock."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#0 that its sector cache ID is not 0.",
+ "EventCode": "0x2A0",
+ "EventName": "L1_PIPE0_VAL_IU_NOT_SEC0",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#0 that its sector cache ID is not 0."
+ },
+ {
+ "PublicDescription": "This event counts requests in L1D cache pipeline#1 that its sector cache ID is not 0.",
+ "EventCode": "0x2A1",
+ "EventName": "L1_PIPE1_VAL_IU_NOT_SEC0",
+ "BriefDescription": "This event counts requests in L1D cache pipeline#1 that its sector cache ID is not 0."
+ },
+ {
+ "PublicDescription": "This event counts the number of times where 2 elements of the gather instructions became 2 flows because 2 elements could not be combined.",
+ "EventCode": "0x2B0",
+ "EventName": "L1_PIPE_COMP_GATHER_2FLOW",
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 2 flows because 2 elements could not be combined."
+ },
+ {
+ "PublicDescription": "This event counts the number of times where 2 elements of the gather instructions became 1 flow because 2 elements could be combined.",
+ "EventCode": "0x2B1",
+ "EventName": "L1_PIPE_COMP_GATHER_1FLOW",
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 1 flow because 2 elements could be combined."
+ },
+ {
+ "PublicDescription": "This event counts the number of times where 2 elements of the gather instructions became 0 flow because both predicate values are 0.",
+ "EventCode": "0x2B2",
+ "EventName": "L1_PIPE_COMP_GATHER_0FLOW",
+ "BriefDescription": "This event counts the number of times where 2 elements of the gather instructions became 0 flow because both predicate values are 0."
+ },
+ {
+ "PublicDescription": "This event counts the number of flows of the scatter instructions.",
+ "EventCode": "0x2B3",
+ "EventName": "L1_PIPE_COMP_SCATTER_1FLOW",
+ "BriefDescription": "This event counts the number of flows of the scatter instructions."
+ },
+ {
+ "PublicDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#0, where it is corrected so that it becomes 16 when all bits are 1.",
+ "EventCode": "0x2B8",
+ "EventName": "L1_PIPE0_COMP_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#0, where it is corrected so that it becomes 16 when all bits are 1."
+ },
+ {
+ "PublicDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#1, where it is corrected so that it becomes 16 when all bits are 1.",
+ "EventCode": "0x2B9",
+ "EventName": "L1_PIPE1_COMP_PRD_CNT",
+ "BriefDescription": "This event counts the number of 1's in the predicate bits of request in L1D cache pipeline#1, where it is corrected so that it becomes 16 when all bits are 1."
+ },
+ {
+ "PublicDescription": "This event counts valid cycles of L2 cache pipeline.",
+ "EventCode": "0x330",
+ "EventName": "L2_PIPE_VAL",
+ "BriefDescription": "This event counts valid cycles of L2 cache pipeline."
+ },
+ {
+ "PublicDescription": "This event counts completed requests in L2 cache pipeline.",
+ "EventCode": "0x350",
+ "EventName": "L2_PIPE_COMP_ALL",
+ "BriefDescription": "This event counts completed requests in L2 cache pipeline."
+ },
+ {
+ "PublicDescription": "This event counts operations where software or hardware prefetch hits an L2 cache refill buffer allocated by demand access.",
+ "EventCode": "0x370",
+ "EventName": "L2_PIPE_COMP_PF_L2MIB_MCH",
+ "BriefDescription": "This event counts operations where software or hardware prefetch hits an L2 cache refill buffer allocated by demand access."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/sve.json b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/sve.json
new file mode 100644
index 000000000000..dc1b95e42c32
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/sve.json
@@ -0,0 +1,110 @@
+[
+ {
+ "ArchStdEvent": "SIMD_INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "SVE_INST_RETIRED"
+ },
+ {
+ "ArchStdEvent": "UOP_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_MATH_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FMA_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_RECPE_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_CVT_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_INT_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRED_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_MOVPRFX_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_MOVPRFX_U_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_LD_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_ST_SPEC"
+ },
+ {
+ "ArchStdEvent": "PRF_SPEC"
+ },
+ {
+ "ArchStdEvent": "BASE_LD_REG_SPEC"
+ },
+ {
+ "ArchStdEvent": "BASE_ST_REG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDR_REG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_STR_REG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDR_PREG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_STR_PREG_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRF_CONTIG_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_LD_MULTI_SPEC"
+ },
+ {
+ "ArchStdEvent": "ASE_SVE_ST_MULTI_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LD_GATHER_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_ST_SCATTER_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_PRF_GATHER_SPEC"
+ },
+ {
+ "ArchStdEvent": "SVE_LDFF_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_HP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_SP_FIXED_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_SCALE_OPS_SPEC"
+ },
+ {
+ "ArchStdEvent": "FP_DP_FIXED_OPS_SPEC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
new file mode 100644
index 000000000000..6443a061e22a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/metrics.json
@@ -0,0 +1,233 @@
+[
+ {
+ "MetricExpr": "FETCH_BUBBLE / (4 * CPU_CYCLES)",
+ "PublicDescription": "Frontend bound L1 topdown metric",
+ "BriefDescription": "Frontend bound L1 topdown metric",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "frontend_bound"
+ },
+ {
+ "MetricExpr": "(INST_SPEC - INST_RETIRED) / (4 * CPU_CYCLES)",
+ "PublicDescription": "Bad Speculation L1 topdown metric",
+ "BriefDescription": "Bad Speculation L1 topdown metric",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "bad_speculation"
+ },
+ {
+ "MetricExpr": "INST_RETIRED / (CPU_CYCLES * 4)",
+ "PublicDescription": "Retiring L1 topdown metric",
+ "BriefDescription": "Retiring L1 topdown metric",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "retiring"
+ },
+ {
+ "MetricExpr": "1 - (frontend_bound + bad_speculation + retiring)",
+ "PublicDescription": "Backend Bound L1 topdown metric",
+ "BriefDescription": "Backend Bound L1 topdown metric",
+ "MetricGroup": "TopDownL1",
+ "MetricName": "backend_bound"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x201d@ / CPU_CYCLES",
+ "PublicDescription": "Fetch latency bound L2 topdown metric",
+ "BriefDescription": "Fetch latency bound L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "fetch_latency_bound"
+ },
+ {
+ "MetricExpr": "frontend_bound - fetch_latency_bound",
+ "PublicDescription": "Fetch bandwidth bound L2 topdown metric",
+ "BriefDescription": "Fetch bandwidth bound L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "fetch_bandwidth_bound"
+ },
+ {
+ "MetricExpr": "(bad_speculation * BR_MIS_PRED) / (BR_MIS_PRED + armv8_pmuv3_0@event\\=0x2013@)",
+ "PublicDescription": "Branch mispredicts L2 topdown metric",
+ "BriefDescription": "Branch mispredicts L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "branch_mispredicts"
+ },
+ {
+ "MetricExpr": "bad_speculation - branch_mispredicts",
+ "PublicDescription": "Machine clears L2 topdown metric",
+ "BriefDescription": "Machine clears L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "machine_clears"
+ },
+ {
+ "MetricExpr": "(EXE_STALL_CYCLE - (MEM_STALL_ANYLOAD + armv8_pmuv3_0@event\\=0x7005@)) / CPU_CYCLES",
+ "PublicDescription": "Core bound L2 topdown metric",
+ "BriefDescription": "Core bound L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "core_bound"
+ },
+ {
+ "MetricExpr": "(MEM_STALL_ANYLOAD + armv8_pmuv3_0@event\\=0x7005@) / CPU_CYCLES",
+ "PublicDescription": "Memory bound L2 topdown metric",
+ "BriefDescription": "Memory bound L2 topdown metric",
+ "MetricGroup": "TopDownL2",
+ "MetricName": "memory_bound"
+ },
+ {
+ "MetricExpr": "(((L2I_TLB - L2I_TLB_REFILL) * 15) + (L2I_TLB_REFILL * 100)) / CPU_CYCLES",
+ "PublicDescription": "Idle by itlb miss L3 topdown metric",
+ "BriefDescription": "Idle by itlb miss L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "idle_by_itlb_miss"
+ },
+ {
+ "MetricExpr": "(((L2I_CACHE - L2I_CACHE_REFILL) * 15) + (L2I_CACHE_REFILL * 100)) / CPU_CYCLES",
+ "PublicDescription": "Idle by icache miss L3 topdown metric",
+ "BriefDescription": "Idle by icache miss L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "idle_by_icache_miss"
+ },
+ {
+ "MetricExpr": "(BR_MIS_PRED * 5) / CPU_CYCLES",
+ "PublicDescription": "BP misp flush L3 topdown metric",
+ "BriefDescription": "BP misp flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "bp_misp_flush"
+ },
+ {
+ "MetricExpr": "(armv8_pmuv3_0@event\\=0x2013@ * 5) / CPU_CYCLES",
+ "PublicDescription": "OOO flush L3 topdown metric",
+ "BriefDescription": "OOO flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "ooo_flush"
+ },
+ {
+ "MetricExpr": "(armv8_pmuv3_0@event\\=0x1001@ * 5) / CPU_CYCLES",
+ "PublicDescription": "Static predictor flush L3 topdown metric",
+ "BriefDescription": "Static predictor flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "sp_flush"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x1010@ / BR_MIS_PRED",
+ "PublicDescription": "Indirect branch L3 topdown metric",
+ "BriefDescription": "Indirect branch L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "indirect_branch"
+ },
+ {
+ "MetricExpr": "(armv8_pmuv3_0@event\\=0x1013@ + armv8_pmuv3_0@event\\=0x1016@) / BR_MIS_PRED",
+ "PublicDescription": "Push branch L3 topdown metric",
+ "BriefDescription": "Push branch L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "push_branch"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x100d@ / BR_MIS_PRED",
+ "PublicDescription": "Pop branch L3 topdown metric",
+ "BriefDescription": "Pop branch L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "pop_branch"
+ },
+ {
+ "MetricExpr": "(BR_MIS_PRED - armv8_pmuv3_0@event\\=0x1010@ - armv8_pmuv3_0@event\\=0x1013@ - armv8_pmuv3_0@event\\=0x1016@ - armv8_pmuv3_0@event\\=0x100d@) / BR_MIS_PRED",
+ "PublicDescription": "Other branch L3 topdown metric",
+ "BriefDescription": "Other branch L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "other_branch"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x2012@ / armv8_pmuv3_0@event\\=0x2013@",
+ "PublicDescription": "Nuke flush L3 topdown metric",
+ "BriefDescription": "Nuke flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "nuke_flush"
+ },
+ {
+ "MetricExpr": "1 - nuke_flush",
+ "PublicDescription": "Other flush L3 topdown metric",
+ "BriefDescription": "Other flush L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "other_flush"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x2010@ / CPU_CYCLES",
+ "PublicDescription": "Sync stall L3 topdown metric",
+ "BriefDescription": "Sync stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "sync_stall"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x2004@ / CPU_CYCLES",
+ "PublicDescription": "Rob stall L3 topdown metric",
+ "BriefDescription": "Rob stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "rob_stall"
+ },
+ {
+ "MetricExpr": "(armv8_pmuv3_0@event\\=0x2006@ + armv8_pmuv3_0@event\\=0x2007@ + armv8_pmuv3_0@event\\=0x2008@) / CPU_CYCLES",
+ "PublicDescription": "Ptag stall L3 topdown metric",
+ "BriefDescription": "Ptag stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "ptag_stall"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x201e@ / CPU_CYCLES",
+ "PublicDescription": "SaveOpQ stall L3 topdown metric",
+ "BriefDescription": "SaveOpQ stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "saveopq_stall"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x2005@ / CPU_CYCLES",
+ "PublicDescription": "PC buffer stall L3 topdown metric",
+ "BriefDescription": "PC buffer stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "pc_buffer_stall"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x7002@ / CPU_CYCLES",
+ "PublicDescription": "Divider L3 topdown metric",
+ "BriefDescription": "Divider L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "divider"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x7003@ / CPU_CYCLES",
+ "PublicDescription": "FSU stall L3 topdown metric",
+ "BriefDescription": "FSU stall L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "fsu_stall"
+ },
+ {
+ "MetricExpr": "core_bound - divider - fsu_stall",
+ "PublicDescription": "EXE ports util L3 topdown metric",
+ "BriefDescription": "EXE ports util L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "exe_ports_util"
+ },
+ {
+ "MetricExpr": "(MEM_STALL_ANYLOAD - MEM_STALL_L1MISS) / CPU_CYCLES",
+ "PublicDescription": "L1 bound L3 topdown metric",
+ "BriefDescription": "L1 bound L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "l1_bound"
+ },
+ {
+ "MetricExpr": "(MEM_STALL_L1MISS - MEM_STALL_L2MISS) / CPU_CYCLES",
+ "PublicDescription": "L2 bound L3 topdown metric",
+ "BriefDescription": "L2 bound L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "l2_bound"
+ },
+ {
+ "MetricExpr": "MEM_STALL_L2MISS / CPU_CYCLES",
+ "PublicDescription": "Mem bound L3 topdown metric",
+ "BriefDescription": "Mem bound L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "mem_bound"
+ },
+ {
+ "MetricExpr": "armv8_pmuv3_0@event\\=0x7005@ / CPU_CYCLES",
+ "PublicDescription": "Store bound L3 topdown metric",
+ "BriefDescription": "Store bound L3 topdown metric",
+ "MetricGroup": "TopDownL3",
+ "MetricName": "store_bound"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
index 61514d38601b..2b3cb55df288 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-ddrc.json
@@ -1,56 +1,56 @@
[
{
- "EventCode": "0x00",
- "EventName": "uncore_hisi_ddrc.flux_wr",
+ "ConfigCode": "0x00",
+ "EventName": "flux_wr",
"BriefDescription": "DDRC total write operations",
"PublicDescription": "DDRC total write operations",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x01",
- "EventName": "uncore_hisi_ddrc.flux_rd",
+ "ConfigCode": "0x01",
+ "EventName": "flux_rd",
"BriefDescription": "DDRC total read operations",
"PublicDescription": "DDRC total read operations",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x02",
- "EventName": "uncore_hisi_ddrc.flux_wcmd",
+ "ConfigCode": "0x02",
+ "EventName": "flux_wcmd",
"BriefDescription": "DDRC write commands",
"PublicDescription": "DDRC write commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x03",
- "EventName": "uncore_hisi_ddrc.flux_rcmd",
+ "ConfigCode": "0x03",
+ "EventName": "flux_rcmd",
"BriefDescription": "DDRC read commands",
"PublicDescription": "DDRC read commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x04",
- "EventName": "uncore_hisi_ddrc.pre_cmd",
+ "ConfigCode": "0x04",
+ "EventName": "pre_cmd",
"BriefDescription": "DDRC precharge commands",
"PublicDescription": "DDRC precharge commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x05",
- "EventName": "uncore_hisi_ddrc.act_cmd",
+ "ConfigCode": "0x05",
+ "EventName": "act_cmd",
"BriefDescription": "DDRC active commands",
"PublicDescription": "DDRC active commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x06",
- "EventName": "uncore_hisi_ddrc.rnk_chg",
+ "ConfigCode": "0x06",
+ "EventName": "rnk_chg",
"BriefDescription": "DDRC rank commands",
"PublicDescription": "DDRC rank commands",
"Unit": "hisi_sccl,ddrc"
},
{
- "EventCode": "0x07",
- "EventName": "uncore_hisi_ddrc.rw_chg",
+ "ConfigCode": "0x07",
+ "EventName": "rw_chg",
"BriefDescription": "DDRC read and write changes",
"PublicDescription": "DDRC read and write changes",
"Unit": "hisi_sccl,ddrc"
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
index ada86782933f..9a7ec7af2060 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-hha.json
@@ -1,72 +1,152 @@
[
{
- "EventCode": "0x00",
- "EventName": "uncore_hisi_hha.rx_ops_num",
+ "ConfigCode": "0x00",
+ "EventName": "rx_ops_num",
"BriefDescription": "The number of all operations received by the HHA",
"PublicDescription": "The number of all operations received by the HHA",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x01",
- "EventName": "uncore_hisi_hha.rx_outer",
+ "ConfigCode": "0x01",
+ "EventName": "rx_outer",
"BriefDescription": "The number of all operations received by the HHA from another socket",
"PublicDescription": "The number of all operations received by the HHA from another socket",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x02",
- "EventName": "uncore_hisi_hha.rx_sccl",
+ "ConfigCode": "0x02",
+ "EventName": "rx_sccl",
"BriefDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"PublicDescription": "The number of all operations received by the HHA from another SCCL in this socket",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x03",
- "EventName": "uncore_hisi_hha.rx_ccix",
+ "ConfigCode": "0x03",
+ "EventName": "rx_ccix",
"BriefDescription": "Count of the number of operations that HHA has received from CCIX",
"PublicDescription": "Count of the number of operations that HHA has received from CCIX",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x1c",
- "EventName": "uncore_hisi_hha.rd_ddr_64b",
+ "ConfigCode": "0x4",
+ "EventName": "rx_wbi",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x5",
+ "EventName": "rx_wbip",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x11",
+ "EventName": "rx_wtistash",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x1c",
+ "EventName": "rd_ddr_64b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 64bytes",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x1d",
- "EventName": "uncore_hisi_hha.wr_ddr_64b",
+ "ConfigCode": "0x1d",
+ "EventName": "wr_ddr_64b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 64 bytes",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x1e",
- "EventName": "uncore_hisi_hha.rd_ddr_128b",
+ "ConfigCode": "0x1e",
+ "EventName": "rd_ddr_128b",
"BriefDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of read operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x1f",
- "EventName": "uncore_hisi_hha.wr_ddr_128b",
+ "ConfigCode": "0x1f",
+ "EventName": "wr_ddr_128b",
"BriefDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"PublicDescription": "The number of write operations sent by HHA to DDRC which size is 128 bytes",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x20",
- "EventName": "uncore_hisi_hha.spill_num",
+ "ConfigCode": "0x20",
+ "EventName": "spill_num",
"BriefDescription": "Count of the number of spill operations that the HHA has sent",
"PublicDescription": "Count of the number of spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
},
{
- "EventCode": "0x21",
- "EventName": "uncore_hisi_hha.spill_success",
+ "ConfigCode": "0x21",
+ "EventName": "spill_success",
"BriefDescription": "Count of the number of successful spill operations that the HHA has sent",
"PublicDescription": "Count of the number of successful spill operations that the HHA has sent",
"Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x23",
+ "EventName": "bi_num",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x32",
+ "EventName": "mediated_num",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x33",
+ "EventName": "tx_snp_num",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x34",
+ "EventName": "tx_snp_outer",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x35",
+ "EventName": "tx_snp_ccix",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x38",
+ "EventName": "rx_snprspdata",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x3c",
+ "EventName": "rx_snprsp_outer",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x40",
+ "EventName": "sdir-lookup",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x41",
+ "EventName": "edir-lookup",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x42",
+ "EventName": "sdir-hit",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x43",
+ "EventName": "edir-hit",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x4c",
+ "EventName": "sdir-home-migrate",
+ "Unit": "hisi_sccl,hha"
+ },
+ {
+ "ConfigCode": "0x4d",
+ "EventName": "edir-home-migrate",
+ "Unit": "hisi_sccl,hha"
}
]
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
index 67ab19e8cf3a..e3479b65be9a 100644
--- a/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip08/uncore-l3c.json
@@ -1,91 +1,91 @@
[
{
- "EventCode": "0x00",
- "EventName": "uncore_hisi_l3c.rd_cpipe",
+ "ConfigCode": "0x00",
+ "EventName": "rd_cpipe",
"BriefDescription": "Total read accesses",
"PublicDescription": "Total read accesses",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x01",
- "EventName": "uncore_hisi_l3c.wr_cpipe",
+ "ConfigCode": "0x01",
+ "EventName": "wr_cpipe",
"BriefDescription": "Total write accesses",
"PublicDescription": "Total write accesses",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x02",
- "EventName": "uncore_hisi_l3c.rd_hit_cpipe",
+ "ConfigCode": "0x02",
+ "EventName": "rd_hit_cpipe",
"BriefDescription": "Total read hits",
"PublicDescription": "Total read hits",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x03",
- "EventName": "uncore_hisi_l3c.wr_hit_cpipe",
+ "ConfigCode": "0x03",
+ "EventName": "wr_hit_cpipe",
"BriefDescription": "Total write hits",
"PublicDescription": "Total write hits",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x04",
- "EventName": "uncore_hisi_l3c.victim_num",
+ "ConfigCode": "0x04",
+ "EventName": "victim_num",
"BriefDescription": "l3c precharge commands",
"PublicDescription": "l3c precharge commands",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x20",
- "EventName": "uncore_hisi_l3c.rd_spipe",
+ "ConfigCode": "0x20",
+ "EventName": "rd_spipe",
"BriefDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"PublicDescription": "Count of the number of read lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x21",
- "EventName": "uncore_hisi_l3c.wr_spipe",
+ "ConfigCode": "0x21",
+ "EventName": "wr_spipe",
"BriefDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
"PublicDescription": "Count of the number of write lines that come from this cluster of CPU core in spipe",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x22",
- "EventName": "uncore_hisi_l3c.rd_hit_spipe",
+ "ConfigCode": "0x22",
+ "EventName": "rd_hit_spipe",
"BriefDescription": "Count of the number of read lines that hits in spipe of this L3C",
"PublicDescription": "Count of the number of read lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x23",
- "EventName": "uncore_hisi_l3c.wr_hit_spipe",
+ "ConfigCode": "0x23",
+ "EventName": "wr_hit_spipe",
"BriefDescription": "Count of the number of write lines that hits in spipe of this L3C",
"PublicDescription": "Count of the number of write lines that hits in spipe of this L3C",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x29",
- "EventName": "uncore_hisi_l3c.back_invalid",
+ "ConfigCode": "0x29",
+ "EventName": "back_invalid",
"BriefDescription": "Count of the number of L3C back invalid operations",
"PublicDescription": "Count of the number of L3C back invalid operations",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x40",
- "EventName": "uncore_hisi_l3c.retry_cpu",
+ "ConfigCode": "0x40",
+ "EventName": "retry_cpu",
"BriefDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"PublicDescription": "Count of the number of retry that L3C suppresses the CPU operations",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x41",
- "EventName": "uncore_hisi_l3c.retry_ring",
+ "ConfigCode": "0x41",
+ "EventName": "retry_ring",
"BriefDescription": "Count of the number of retry that L3C suppresses the ring operations",
"PublicDescription": "Count of the number of retry that L3C suppresses the ring operations",
"Unit": "hisi_sccl,l3c"
},
{
- "EventCode": "0x42",
- "EventName": "uncore_hisi_l3c.prefetch_drop",
+ "ConfigCode": "0x42",
+ "EventName": "prefetch_drop",
"BriefDescription": "Count of the number of prefetch drops from this L3C",
"PublicDescription": "Count of the number of prefetch drops from this L3C",
"Unit": "hisi_sccl,l3c"
diff --git a/tools/perf/pmu-events/arch/arm64/hisilicon/hip09/sys/uncore-cpa.json b/tools/perf/pmu-events/arch/arm64/hisilicon/hip09/sys/uncore-cpa.json
new file mode 100644
index 000000000000..7bcddec8a84f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/hisilicon/hip09/sys/uncore-cpa.json
@@ -0,0 +1,81 @@
+[
+ {
+ "ConfigCode": "0x00",
+ "EventName": "cpa_cycles",
+ "BriefDescription": "count of CPA cycles",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "ConfigCode": "0x61",
+ "EventName": "cpa_p1_wr_dat",
+ "BriefDescription": "Number of write ops transmitted by the P1 port",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "ConfigCode": "0x62",
+ "EventName": "cpa_p1_rd_dat",
+ "BriefDescription": "Number of read ops transmitted by the P1 port",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "ConfigCode": "0x3",
+ "EventName": "cpa_p1_rd_dat_64b",
+ "BriefDescription": "Number of read ops transmitted by the P1 port which size is 64 bytes",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "ConfigCode": "0x4",
+ "EventName": "cpa_p1_rd_dat_32b",
+ "BriefDescription": "Number of read ops transmitted by the P1 port which size is 32 bytes",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "ConfigCode": "0xE1",
+ "EventName": "cpa_p0_wr_dat",
+ "BriefDescription": "Number of write ops transmitted by the P0 port",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "ConfigCode": "0xE2",
+ "EventName": "cpa_p0_rd_dat",
+ "BriefDescription": "Number of read ops transmitted by the P0 port",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "ConfigCode": "0x83",
+ "EventName": "cpa_p0_rd_dat_64b",
+ "BriefDescription": "Number of read ops transmitted by the P0 port which size is 64 bytes",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "ConfigCode": "0x84",
+ "EventName": "cpa_p0_rd_dat_32b",
+ "BriefDescription": "Number of read ops transmitted by the P0 port which size is 32 bytes",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "MetricExpr": "(cpa_p1_wr_dat * 64 + cpa_p1_rd_dat_64b * 64 + cpa_p1_rd_dat_32b * 32) / cpa_cycles",
+ "BriefDescription": "Average bandwidth of CPA Port 1",
+ "MetricGroup": "CPA",
+ "MetricName": "cpa_p1_avg_bw",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ },
+ {
+ "MetricExpr": "(cpa_p0_wr_dat * 64 + cpa_p0_rd_dat_64b * 64 + cpa_p0_rd_dat_32b * 32) / cpa_cycles",
+ "BriefDescription": "Average bandwidth of CPA Port 0",
+ "MetricGroup": "CPA",
+ "MetricName": "cpa_p0_avg_bw",
+ "Compat": "0x00000030",
+ "Unit": "hisi_sicl,cpa"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv
index 0d609149b82a..9d400785d195 100644
--- a/tools/perf/pmu-events/arch/arm64/mapfile.csv
+++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv
@@ -12,13 +12,32 @@
#
#
#Family-model,Version,Filename,EventType
+0x00000000410fd020,v1,arm/cortex-a34,core
0x00000000410fd030,v1,arm/cortex-a53,core
0x00000000420f1000,v1,arm/cortex-a53,core
+0x00000000410fd040,v1,arm/cortex-a35,core
+0x00000000410fd050,v1,arm/cortex-a55,core
+0x00000000410fd060,v1,arm/cortex-a65-e1,core
+0x00000000410fd4a0,v1,arm/cortex-a65-e1,core
0x00000000410fd070,v1,arm/cortex-a57-a72,core
0x00000000410fd080,v1,arm/cortex-a57-a72,core
-0x00000000410fd0b0,v1,arm/cortex-a76-n1,core
-0x00000000410fd0c0,v1,arm/cortex-a76-n1,core
+0x00000000410fd090,v1,arm/cortex-a73,core
+0x00000000410fd0a0,v1,arm/cortex-a75,core
+0x00000000410fd0b0,v1,arm/cortex-a76,core
+0x00000000410fd0c0,v1,arm/neoverse-n1,core
+0x00000000410fd0d0,v1,arm/cortex-a77,core
+0x00000000410fd400,v1,arm/neoverse-v1,core
+0x00000000410fd410,v1,arm/cortex-a78,core
+0x00000000410fd4b0,v1,arm/cortex-a78,core
+0x00000000410fd440,v1,arm/cortex-x1,core
+0x00000000410fd4c0,v1,arm/cortex-x1,core
+0x00000000410fd460,v1,arm/cortex-a510,core
+0x00000000410fd470,v1,arm/cortex-a710,core
+0x00000000410fd480,v1,arm/cortex-x2,core
+0x00000000410fd490,v1,arm/neoverse-n2-v2,core
+0x00000000410fd4f0,v1,arm/neoverse-n2-v2,core
0x00000000420f5160,v1,cavium/thunderx2,core
0x00000000430f0af0,v1,cavium/thunderx2,core
+0x00000000460f0010,v1,fujitsu/a64fx,core
0x00000000480fd010,v1,hisilicon/hip08,core
0x00000000500f0000,v1,ampere/emag,core
diff --git a/tools/perf/pmu-events/arch/arm64/armv8-recommended.json b/tools/perf/pmu-events/arch/arm64/recommended.json
index d0a19866563d..210afa856091 100644
--- a/tools/perf/pmu-events/arch/arm64/armv8-recommended.json
+++ b/tools/perf/pmu-events/arch/arm64/recommended.json
@@ -148,305 +148,305 @@
"EventCode": "0x60",
"EventName": "BUS_ACCESS_RD",
"BriefDescription": "Bus access read"
- },
- {
+ },
+ {
"PublicDescription": "Bus access write",
"EventCode": "0x61",
"EventName": "BUS_ACCESS_WR",
"BriefDescription": "Bus access write"
- },
- {
+ },
+ {
"PublicDescription": "Bus access, Normal, Cacheable, Shareable",
"EventCode": "0x62",
"EventName": "BUS_ACCESS_SHARED",
"BriefDescription": "Bus access, Normal, Cacheable, Shareable"
- },
- {
+ },
+ {
"PublicDescription": "Bus access, not Normal, Cacheable, Shareable",
"EventCode": "0x63",
"EventName": "BUS_ACCESS_NOT_SHARED",
"BriefDescription": "Bus access, not Normal, Cacheable, Shareable"
- },
- {
+ },
+ {
"PublicDescription": "Bus access, Normal",
"EventCode": "0x64",
"EventName": "BUS_ACCESS_NORMAL",
"BriefDescription": "Bus access, Normal"
- },
- {
+ },
+ {
"PublicDescription": "Bus access, peripheral",
"EventCode": "0x65",
"EventName": "BUS_ACCESS_PERIPH",
"BriefDescription": "Bus access, peripheral"
- },
- {
+ },
+ {
"PublicDescription": "Data memory access, read",
"EventCode": "0x66",
"EventName": "MEM_ACCESS_RD",
"BriefDescription": "Data memory access, read"
- },
- {
+ },
+ {
"PublicDescription": "Data memory access, write",
"EventCode": "0x67",
"EventName": "MEM_ACCESS_WR",
"BriefDescription": "Data memory access, write"
- },
- {
+ },
+ {
"PublicDescription": "Unaligned access, read",
"EventCode": "0x68",
"EventName": "UNALIGNED_LD_SPEC",
"BriefDescription": "Unaligned access, read"
- },
- {
+ },
+ {
"PublicDescription": "Unaligned access, write",
"EventCode": "0x69",
"EventName": "UNALIGNED_ST_SPEC",
"BriefDescription": "Unaligned access, write"
- },
- {
+ },
+ {
"PublicDescription": "Unaligned access",
"EventCode": "0x6a",
"EventName": "UNALIGNED_LDST_SPEC",
"BriefDescription": "Unaligned access"
- },
- {
+ },
+ {
"PublicDescription": "Exclusive operation speculatively executed, LDREX or LDX",
"EventCode": "0x6c",
"EventName": "LDREX_SPEC",
"BriefDescription": "Exclusive operation speculatively executed, LDREX or LDX"
- },
- {
+ },
+ {
"PublicDescription": "Exclusive operation speculatively executed, STREX or STX pass",
"EventCode": "0x6d",
"EventName": "STREX_PASS_SPEC",
"BriefDescription": "Exclusive operation speculatively executed, STREX or STX pass"
- },
- {
+ },
+ {
"PublicDescription": "Exclusive operation speculatively executed, STREX or STX fail",
"EventCode": "0x6e",
"EventName": "STREX_FAIL_SPEC",
"BriefDescription": "Exclusive operation speculatively executed, STREX or STX fail"
- },
- {
+ },
+ {
"PublicDescription": "Exclusive operation speculatively executed, STREX or STX",
"EventCode": "0x6f",
"EventName": "STREX_SPEC",
"BriefDescription": "Exclusive operation speculatively executed, STREX or STX"
- },
- {
+ },
+ {
"PublicDescription": "Operation speculatively executed, load",
"EventCode": "0x70",
"EventName": "LD_SPEC",
"BriefDescription": "Operation speculatively executed, load"
- },
- {
+ },
+ {
"PublicDescription": "Operation speculatively executed, store",
"EventCode": "0x71",
"EventName": "ST_SPEC",
"BriefDescription": "Operation speculatively executed, store"
- },
- {
+ },
+ {
"PublicDescription": "Operation speculatively executed, load or store",
"EventCode": "0x72",
"EventName": "LDST_SPEC",
"BriefDescription": "Operation speculatively executed, load or store"
- },
- {
+ },
+ {
"PublicDescription": "Operation speculatively executed, integer data processing",
"EventCode": "0x73",
"EventName": "DP_SPEC",
"BriefDescription": "Operation speculatively executed, integer data processing"
- },
- {
+ },
+ {
"PublicDescription": "Operation speculatively executed, Advanced SIMD instruction",
"EventCode": "0x74",
"EventName": "ASE_SPEC",
"BriefDescription": "Operation speculatively executed, Advanced SIMD instruction"
- },
- {
+ },
+ {
"PublicDescription": "Operation speculatively executed, floating-point instruction",
"EventCode": "0x75",
"EventName": "VFP_SPEC",
"BriefDescription": "Operation speculatively executed, floating-point instruction"
- },
- {
+ },
+ {
"PublicDescription": "Operation speculatively executed, software change of the PC",
"EventCode": "0x76",
"EventName": "PC_WRITE_SPEC",
"BriefDescription": "Operation speculatively executed, software change of the PC"
- },
- {
+ },
+ {
"PublicDescription": "Operation speculatively executed, Cryptographic instruction",
"EventCode": "0x77",
"EventName": "CRYPTO_SPEC",
"BriefDescription": "Operation speculatively executed, Cryptographic instruction"
- },
- {
+ },
+ {
"PublicDescription": "Branch speculatively executed, immediate branch",
"EventCode": "0x78",
"EventName": "BR_IMMED_SPEC",
"BriefDescription": "Branch speculatively executed, immediate branch"
- },
- {
+ },
+ {
"PublicDescription": "Branch speculatively executed, procedure return",
"EventCode": "0x79",
"EventName": "BR_RETURN_SPEC",
"BriefDescription": "Branch speculatively executed, procedure return"
- },
- {
+ },
+ {
"PublicDescription": "Branch speculatively executed, indirect branch",
"EventCode": "0x7a",
"EventName": "BR_INDIRECT_SPEC",
"BriefDescription": "Branch speculatively executed, indirect branch"
- },
- {
+ },
+ {
"PublicDescription": "Barrier speculatively executed, ISB",
"EventCode": "0x7c",
"EventName": "ISB_SPEC",
"BriefDescription": "Barrier speculatively executed, ISB"
- },
- {
+ },
+ {
"PublicDescription": "Barrier speculatively executed, DSB",
"EventCode": "0x7d",
"EventName": "DSB_SPEC",
"BriefDescription": "Barrier speculatively executed, DSB"
- },
- {
+ },
+ {
"PublicDescription": "Barrier speculatively executed, DMB",
"EventCode": "0x7e",
"EventName": "DMB_SPEC",
"BriefDescription": "Barrier speculatively executed, DMB"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, Other synchronous",
"EventCode": "0x81",
"EventName": "EXC_UNDEF",
"BriefDescription": "Exception taken, Other synchronous"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, Supervisor Call",
"EventCode": "0x82",
"EventName": "EXC_SVC",
"BriefDescription": "Exception taken, Supervisor Call"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, Instruction Abort",
"EventCode": "0x83",
"EventName": "EXC_PABORT",
"BriefDescription": "Exception taken, Instruction Abort"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, Data Abort and SError",
"EventCode": "0x84",
"EventName": "EXC_DABORT",
"BriefDescription": "Exception taken, Data Abort and SError"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, IRQ",
"EventCode": "0x86",
"EventName": "EXC_IRQ",
"BriefDescription": "Exception taken, IRQ"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, FIQ",
"EventCode": "0x87",
"EventName": "EXC_FIQ",
"BriefDescription": "Exception taken, FIQ"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, Secure Monitor Call",
"EventCode": "0x88",
"EventName": "EXC_SMC",
"BriefDescription": "Exception taken, Secure Monitor Call"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, Hypervisor Call",
"EventCode": "0x8a",
"EventName": "EXC_HVC",
"BriefDescription": "Exception taken, Hypervisor Call"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, Instruction Abort not taken locally",
"EventCode": "0x8b",
"EventName": "EXC_TRAP_PABORT",
"BriefDescription": "Exception taken, Instruction Abort not taken locally"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, Data Abort or SError not taken locally",
"EventCode": "0x8c",
"EventName": "EXC_TRAP_DABORT",
"BriefDescription": "Exception taken, Data Abort or SError not taken locally"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, Other traps not taken locally",
"EventCode": "0x8d",
"EventName": "EXC_TRAP_OTHER",
"BriefDescription": "Exception taken, Other traps not taken locally"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, IRQ not taken locally",
"EventCode": "0x8e",
"EventName": "EXC_TRAP_IRQ",
"BriefDescription": "Exception taken, IRQ not taken locally"
- },
- {
+ },
+ {
"PublicDescription": "Exception taken, FIQ not taken locally",
"EventCode": "0x8f",
"EventName": "EXC_TRAP_FIQ",
"BriefDescription": "Exception taken, FIQ not taken locally"
- },
- {
+ },
+ {
"PublicDescription": "Release consistency operation speculatively executed, Load-Acquire",
"EventCode": "0x90",
"EventName": "RC_LD_SPEC",
"BriefDescription": "Release consistency operation speculatively executed, Load-Acquire"
- },
- {
+ },
+ {
"PublicDescription": "Release consistency operation speculatively executed, Store-Release",
"EventCode": "0x91",
"EventName": "RC_ST_SPEC",
"BriefDescription": "Release consistency operation speculatively executed, Store-Release"
- },
- {
+ },
+ {
"PublicDescription": "Attributable Level 3 data or unified cache access, read",
"EventCode": "0xa0",
"EventName": "L3D_CACHE_RD",
"BriefDescription": "Attributable Level 3 data or unified cache access, read"
- },
- {
+ },
+ {
"PublicDescription": "Attributable Level 3 data or unified cache access, write",
"EventCode": "0xa1",
"EventName": "L3D_CACHE_WR",
"BriefDescription": "Attributable Level 3 data or unified cache access, write"
- },
- {
+ },
+ {
"PublicDescription": "Attributable Level 3 data or unified cache refill, read",
"EventCode": "0xa2",
"EventName": "L3D_CACHE_REFILL_RD",
"BriefDescription": "Attributable Level 3 data or unified cache refill, read"
- },
- {
+ },
+ {
"PublicDescription": "Attributable Level 3 data or unified cache refill, write",
"EventCode": "0xa3",
"EventName": "L3D_CACHE_REFILL_WR",
"BriefDescription": "Attributable Level 3 data or unified cache refill, write"
- },
- {
+ },
+ {
"PublicDescription": "Attributable Level 3 data or unified cache Write-Back, victim",
"EventCode": "0xa6",
"EventName": "L3D_CACHE_WB_VICTIM",
"BriefDescription": "Attributable Level 3 data or unified cache Write-Back, victim"
- },
- {
+ },
+ {
"PublicDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean",
"EventCode": "0xa7",
"EventName": "L3D_CACHE_WB_CLEAN",
"BriefDescription": "Attributable Level 3 data or unified cache Write-Back, cache clean"
- },
- {
+ },
+ {
"PublicDescription": "Attributable Level 3 data or unified cache access, invalidate",
"EventCode": "0xa8",
"EventName": "L3D_CACHE_INVAL",
"BriefDescription": "Attributable Level 3 data or unified cache access, invalidate"
- }
+ }
]
diff --git a/tools/perf/pmu-events/arch/arm64/sbsa.json b/tools/perf/pmu-events/arch/arm64/sbsa.json
new file mode 100644
index 000000000000..f678c37ea9c3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/arm64/sbsa.json
@@ -0,0 +1,30 @@
+[
+ {
+ "MetricExpr": "stall_slot_frontend / (#slots * cpu_cycles)",
+ "BriefDescription": "Frontend bound L1 topdown metric",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "frontend_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "(1 - op_retired / op_spec) * (1 - stall_slot / (#slots * cpu_cycles))",
+ "BriefDescription": "Bad speculation L1 topdown metric",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "bad_speculation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "(op_retired / op_spec) * (1 - stall_slot / (#slots * cpu_cycles))",
+ "BriefDescription": "Retiring L1 topdown metric",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "retiring",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricExpr": "stall_slot_backend / (#slots * cpu_cycles)",
+ "BriefDescription": "Backend Bound L1 topdown metric",
+ "MetricGroup": "TopdownL1",
+ "MetricName": "backend_bound",
+ "ScaleUnit": "100%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/nds32/n13/atcpmu.json b/tools/perf/pmu-events/arch/nds32/n13/atcpmu.json
index 5347350c360c..3e7ac409d894 100644
--- a/tools/perf/pmu-events/arch/nds32/n13/atcpmu.json
+++ b/tools/perf/pmu-events/arch/nds32/n13/atcpmu.json
@@ -286,5 +286,5 @@
"EventCode": "0x21e",
"EventName": "pop25_inst",
"BriefDescription": "V3 POP25 instructions"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
index 229150e7ab7d..4abdfc3f9692 100644
--- a/tools/perf/pmu-events/arch/powerpc/mapfile.csv
+++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv
@@ -15,3 +15,4 @@
# Power8 entries
004[bcd][[:xdigit:]]{4},1,power8,core
004e[[:xdigit:]]{4},1,power9,core
+0080[[:xdigit:]]{4},1,power10,core
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/cache.json b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
new file mode 100644
index 000000000000..605be14f441c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/cache.json
@@ -0,0 +1,57 @@
+[
+ {
+ "EventCode": "0x1003C",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from either the local L2 or local L3."
+ },
+ {
+ "EventCode": "0x1E054",
+ "EventName": "PM_EXEC_STALL_DMISS_L21_L31",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from another core's L2 or L3 on the same chip."
+ },
+ {
+ "EventCode": "0x34054",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, without a dispatch conflict."
+ },
+ {
+ "EventCode": "0x34056",
+ "EventName": "PM_EXEC_STALL_LOAD_FINISH",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was finishing a load after its data was reloaded from a data source beyond the local L1; cycles in which the LSU was processing an L1-hit; cycles in which the NTF instruction merged with another load in the LMQ; cycles in which the NTF instruction is waiting for a data reload for a load miss, but the data comes back with a non-NTF instruction."
+ },
+ {
+ "EventCode": "0x3006C",
+ "EventName": "PM_RUN_CYC_SMT2_MODE",
+ "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT2 mode."
+ },
+ {
+ "EventCode": "0x300F4",
+ "EventName": "PM_RUN_INST_CMPL_CONC",
+ "BriefDescription": "PowerPC instructions completed by this thread when all threads in the core had the run-latch set."
+ },
+ {
+ "EventCode": "0x4C016",
+ "EventName": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local L2 or local L3, with a dispatch conflict."
+ },
+ {
+ "EventCode": "0x4D014",
+ "EventName": "PM_EXEC_STALL_LOAD",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a load instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "0x4D016",
+ "EventName": "PM_EXEC_STALL_PTESYNC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a PTESYNC instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "0x401EA",
+ "EventName": "PM_THRESH_EXC_128",
+ "BriefDescription": "Threshold counter exceeded a value of 128."
+ },
+ {
+ "EventCode": "0x400F6",
+ "EventName": "PM_BR_MPRED_CMPL",
+ "BriefDescription": "A mispredicted branch completed. Includes direction and target."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
new file mode 100644
index 000000000000..54acb55e2c8c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/floating_point.json
@@ -0,0 +1,7 @@
+[
+ {
+ "EventCode": "0x4016E",
+ "EventName": "PM_THRESH_NOT_MET",
+ "BriefDescription": "Threshold counter did not meet threshold."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/frontend.json b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
new file mode 100644
index 000000000000..558f9530f54e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/frontend.json
@@ -0,0 +1,247 @@
+[
+ {
+ "EventCode": "0x10004",
+ "EventName": "PM_EXEC_STALL_TRANSLATION",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss or ERAT miss and waited for it to resolve."
+ },
+ {
+ "EventCode": "0x10006",
+ "EventName": "PM_DISP_STALL_HELD_OTHER_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any other reason."
+ },
+ {
+ "EventCode": "0x10010",
+ "EventName": "PM_PMC4_OVERFLOW",
+ "BriefDescription": "The event selected for PMC4 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x10020",
+ "EventName": "PM_PMC4_REWIND",
+ "BriefDescription": "The speculative event selected for PMC4 rewinds and the counter for PMC4 is not charged."
+ },
+ {
+ "EventCode": "0x10038",
+ "EventName": "PM_DISP_STALL_TRANSLATION",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread because the MMU was handling a translation miss."
+ },
+ {
+ "EventCode": "0x1003A",
+ "EventName": "PM_DISP_STALL_BR_MPRED_IC_L2",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2 after suffering a branch mispredict."
+ },
+ {
+ "EventCode": "0x1D05E",
+ "EventName": "PM_DISP_STALL_HELD_HALT_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of power management."
+ },
+ {
+ "EventCode": "0x1E050",
+ "EventName": "PM_DISP_STALL_HELD_STF_MAPPER_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the STF mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR."
+ },
+ {
+ "EventCode": "0x1F054",
+ "EventName": "PM_DTLB_HIT",
+ "BriefDescription": "The PTE required by the instruction was resident in the TLB (data TLB access). When MMCR1[16]=0 this event counts only demand hits. When MMCR1[16]=1 this event includes demand and prefetch. Applies to both HPT and RPT."
+ },
+ {
+ "EventCode": "0x10064",
+ "EventName": "PM_DISP_STALL_IC_L2",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L2."
+ },
+ {
+ "EventCode": "0x101E8",
+ "EventName": "PM_THRESH_EXC_256",
+ "BriefDescription": "Threshold counter exceeded a count of 256."
+ },
+ {
+ "EventCode": "0x101EC",
+ "EventName": "PM_THRESH_MET",
+ "BriefDescription": "Threshold exceeded."
+ },
+ {
+ "EventCode": "0x100F2",
+ "EventName": "PM_1PLUS_PPC_CMPL",
+ "BriefDescription": "Cycles in which at least one instruction is completed by this thread."
+ },
+ {
+ "EventCode": "0x100F6",
+ "EventName": "PM_IERAT_MISS",
+ "BriefDescription": "IERAT Reloaded to satisfy an IERAT miss. All page sizes are counted by this event."
+ },
+ {
+ "EventCode": "0x100F8",
+ "EventName": "PM_DISP_STALL_CYC",
+ "BriefDescription": "Cycles the ICT has no itags assigned to this thread (no instructions were dispatched during these cycles)."
+ },
+ {
+ "EventCode": "0x20006",
+ "EventName": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch due to Issue queue full. Includes issue queue and branch queue."
+ },
+ {
+ "EventCode": "0x20114",
+ "EventName": "PM_MRK_L2_RC_DISP",
+ "BriefDescription": "Marked instruction RC dispatched in L2."
+ },
+ {
+ "EventCode": "0x2C010",
+ "EventName": "PM_EXEC_STALL_LSU",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Load Store Unit. This does not include simple fixed point instructions."
+ },
+ {
+ "EventCode": "0x2C016",
+ "EventName": "PM_DISP_STALL_IERAT_ONLY_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction ERAT miss."
+ },
+ {
+ "EventCode": "0x2C01E",
+ "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3 after suffering a branch mispredict."
+ },
+ {
+ "EventCode": "0x2D01A",
+ "EventName": "PM_DISP_STALL_IC_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread due to an Icache Miss."
+ },
+ {
+ "EventCode": "0x2E018",
+ "EventName": "PM_DISP_STALL_FETCH",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread because Fetch was being held."
+ },
+ {
+ "EventCode": "0x2E01A",
+ "EventName": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the XVFC mapper/SRB was full."
+ },
+ {
+ "EventCode": "0x2C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC2",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x24050",
+ "EventName": "PM_IOPS_DISP",
+ "BriefDescription": "Internal Operations dispatched. PM_IOPS_DISP / PM_INST_DISP will show the average number of internal operations per PowerPC instruction."
+ },
+ {
+ "EventCode": "0x2405E",
+ "EventName": "PM_ISSUE_CANCEL",
+ "BriefDescription": "An instruction issued and the issue was later cancelled. Only one cancel per PowerPC instruction."
+ },
+ {
+ "EventCode": "0x200FA",
+ "EventName": "PM_BR_TAKEN_CMPL",
+ "BriefDescription": "Branch Taken instruction completed."
+ },
+ {
+ "EventCode": "0x30004",
+ "EventName": "PM_DISP_STALL_FLUSH",
+ "BriefDescription": "Cycles when dispatch was stalled because of a flush that happened to an instruction(s) that was not yet NTC. PM_EXEC_STALL_NTC_FLUSH only includes instructions that were flushed after becoming NTC."
+ },
+ {
+ "EventCode": "0x3000A",
+ "EventName": "PM_DISP_STALL_ITLB_MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while waiting to resolve an instruction TLB miss."
+ },
+ {
+ "EventCode": "0x30012",
+ "EventName": "PM_FLUSH_COMPLETION",
+ "BriefDescription": "The instruction that was next to complete (oldest in the pipeline) did not complete because it suffered a flush."
+ },
+ {
+ "EventCode": "0x30014",
+ "EventName": "PM_EXEC_STALL_STORE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "0x30018",
+ "EventName": "PM_DISP_STALL_HELD_SCOREBOARD_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch while waiting on the Scoreboard. This event combines VSCR and FPSCR together."
+ },
+ {
+ "EventCode": "0x30026",
+ "EventName": "PM_EXEC_STALL_STORE_MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a store whose cache line was not resident in the L1 and was waiting for allocation of the missing line into the L1."
+ },
+ {
+ "EventCode": "0x3012A",
+ "EventName": "PM_MRK_L2_RC_DONE",
+ "BriefDescription": "L2 RC machine completed the transaction for the marked instruction."
+ },
+ {
+ "EventCode": "0x3F046",
+ "EventName": "PM_ITLB_HIT_1G",
+ "BriefDescription": "Instruction TLB hit (IERAT reload) page size 1G, which implies Radix Page Table translation is in use. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x34058",
+ "EventName": "PM_DISP_STALL_BR_MPRED_ICMISS",
+ "BriefDescription": "Cycles when dispatch was stalled after a mispredicted branch resulted in an instruction cache miss."
+ },
+ {
+ "EventCode": "0x3D05C",
+ "EventName": "PM_DISP_STALL_HELD_RENAME_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because the mapper/SRB was full. Includes GPR (count, link, tar), VSR, VMR, FPR and XVFC."
+ },
+ {
+ "EventCode": "0x3E052",
+ "EventName": "PM_DISP_STALL_IC_L3",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from the local L3."
+ },
+ {
+ "EventCode": "0x3E054",
+ "EventName": "PM_LD_MISS_L1",
+ "BriefDescription": "Load Missed L1, counted at execution time (can be greater than loads finished). LMQ merges are not included in this count. i.e. if a load instruction misses on an address that is already allocated on the LMQ, this event will not increment for that load). Note that this count is per slice, so if a load spans multiple slices this event will increment multiple times for a single load."
+ },
+ {
+ "EventCode": "0x301EA",
+ "EventName": "PM_THRESH_EXC_1024",
+ "BriefDescription": "Threshold counter exceeded a value of 1024."
+ },
+ {
+ "EventCode": "0x300FA",
+ "EventName": "PM_INST_FROM_L3MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
+ },
+ {
+ "EventCode": "0x40006",
+ "EventName": "PM_ISSUE_KILL",
+ "BriefDescription": "Cycles in which an instruction or group of instructions were cancelled after being issued. This event increments once per occurrence, regardless of how many instructions are included in the issue group."
+ },
+ {
+ "EventCode": "0x40116",
+ "EventName": "PM_MRK_LARX_FIN",
+ "BriefDescription": "Marked load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "0x4C010",
+ "EventName": "PM_DISP_STALL_BR_MPRED_IC_L3MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from sources beyond the local L3 after suffering a mispredicted branch."
+ },
+ {
+ "EventCode": "0x4D01E",
+ "EventName": "PM_DISP_STALL_BR_MPRED",
+ "BriefDescription": "Cycles when dispatch was stalled for this thread due to a mispredicted branch."
+ },
+ {
+ "EventCode": "0x4E010",
+ "EventName": "PM_DISP_STALL_IC_L3MISS",
+ "BriefDescription": "Cycles when dispatch was stalled while the instruction was fetched from any source beyond the local L3."
+ },
+ {
+ "EventCode": "0x4E01A",
+ "EventName": "PM_DISP_STALL_HELD_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch for any reason."
+ },
+ {
+ "EventCode": "0x4003C",
+ "EventName": "PM_DISP_STALL_HELD_SYNC_CYC",
+ "BriefDescription": "Cycles in which the NTC instruction is held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch."
+ },
+ {
+ "EventCode": "0x44056",
+ "EventName": "PM_VECTOR_ST_CMPL",
+ "BriefDescription": "Vector store instructions completed."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/locks.json b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
new file mode 100644
index 000000000000..b5a0d6521963
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/locks.json
@@ -0,0 +1,12 @@
+[
+ {
+ "EventCode": "0x1E058",
+ "EventName": "PM_STCX_FAIL_FIN",
+ "BriefDescription": "Conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "0x4E050",
+ "EventName": "PM_STCX_PASS_FIN",
+ "BriefDescription": "Conditional store instruction (STCX) passed. LARX and STCX are instructions used to acquire a lock."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/marked.json b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
new file mode 100644
index 000000000000..58b5dfe3a273
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/marked.json
@@ -0,0 +1,142 @@
+[
+ {
+ "EventCode": "0x1002C",
+ "EventName": "PM_LD_PREFETCH_CACHE_LINE_MISS",
+ "BriefDescription": "The L1 cache was reloaded with a line that fulfills a prefetch request."
+ },
+ {
+ "EventCode": "0x10132",
+ "EventName": "PM_MRK_INST_ISSUED",
+ "BriefDescription": "Marked instruction issued. Note that stores always get issued twice, the address gets issued to the LSU and the data gets issued to the VSU. Also, issues can sometimes get killed/cancelled and cause multiple sequential issues for the same instruction."
+ },
+ {
+ "EventCode": "0x101E0",
+ "EventName": "PM_MRK_INST_DISP",
+ "BriefDescription": "The thread has dispatched a randomly sampled marked instruction."
+ },
+ {
+ "EventCode": "0x101E2",
+ "EventName": "PM_MRK_BR_TAKEN_CMPL",
+ "BriefDescription": "Marked Branch Taken instruction completed."
+ },
+ {
+ "EventCode": "0x20112",
+ "EventName": "PM_MRK_NTF_FIN",
+ "BriefDescription": "The marked instruction became the oldest in the pipeline before it finished. It excludes instructions that finish at dispatch."
+ },
+ {
+ "EventCode": "0x2C01C",
+ "EventName": "PM_EXEC_STALL_DMISS_OFF_CHIP",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a remote chip."
+ },
+ {
+ "EventCode": "0x20138",
+ "EventName": "PM_MRK_ST_NEST",
+ "BriefDescription": "A store has been sampled/marked and is at the point of execution where it has completed in the core and can no longer be flushed. At this point the store is sent to the L2."
+ },
+ {
+ "EventCode": "0x2013A",
+ "EventName": "PM_MRK_BRU_FIN",
+ "BriefDescription": "Marked Branch instruction finished."
+ },
+ {
+ "EventCode": "0x2C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC2",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[15:27]."
+ },
+ {
+ "EventCode": "0x24156",
+ "EventName": "PM_MRK_STCX_FIN",
+ "BriefDescription": "Marked conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "0x24158",
+ "EventName": "PM_MRK_INST",
+ "BriefDescription": "An instruction was marked. Includes both Random Instruction Sampling (RIS) at decode time and Random Event Sampling (RES) at the time the configured event happens."
+ },
+ {
+ "EventCode": "0x2415C",
+ "EventName": "PM_MRK_BR_CMPL",
+ "BriefDescription": "A marked branch completed. All branches are included."
+ },
+ {
+ "EventCode": "0x200FD",
+ "EventName": "PM_L1_ICACHE_MISS",
+ "BriefDescription": "Demand iCache Miss."
+ },
+ {
+ "EventCode": "0x30130",
+ "EventName": "PM_MRK_INST_FIN",
+ "BriefDescription": "marked instruction finished. Excludes instructions that finish at dispatch. Note that stores always finish twice since the address gets issued to the LSU and the data gets issued to the VSU."
+ },
+ {
+ "EventCode": "0x34146",
+ "EventName": "PM_MRK_LD_CMPL",
+ "BriefDescription": "Marked loads completed."
+ },
+ {
+ "EventCode": "0x3E158",
+ "EventName": "PM_MRK_STCX_FAIL",
+ "BriefDescription": "Marked conditional store instruction (STCX) failed. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "0x3E15A",
+ "EventName": "PM_MRK_ST_FIN",
+ "BriefDescription": "The marked instruction was a store of any kind."
+ },
+ {
+ "EventCode": "0x30068",
+ "EventName": "PM_L1_ICACHE_RELOADED_PREF",
+ "BriefDescription": "Counts all Icache prefetch reloads ( includes demand turned into prefetch)."
+ },
+ {
+ "EventCode": "0x301E4",
+ "EventName": "PM_MRK_BR_MPRED_CMPL",
+ "BriefDescription": "Marked Branch Mispredicted. Includes direction and target."
+ },
+ {
+ "EventCode": "0x300F6",
+ "EventName": "PM_LD_DEMAND_MISS_L1",
+ "BriefDescription": "The L1 cache was reloaded with a line that fulfills a demand miss request. Counted at reload time, before finish."
+ },
+ {
+ "EventCode": "0x300FE",
+ "EventName": "PM_DATA_FROM_L3MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss."
+ },
+ {
+ "EventCode": "0x40012",
+ "EventName": "PM_L1_ICACHE_RELOADED_ALL",
+ "BriefDescription": "Counts all Icache reloads includes demand, prefetch, prefetch turned into demand and demand turned into prefetch."
+ },
+ {
+ "EventCode": "0x40134",
+ "EventName": "PM_MRK_INST_TIMEO",
+ "BriefDescription": "Marked instruction finish timeout (instruction was lost)."
+ },
+ {
+ "EventCode": "0x4505A",
+ "EventName": "PM_SP_FLOP_CMPL",
+ "BriefDescription": "Single Precision floating point instructions completed."
+ },
+ {
+ "EventCode": "0x4D058",
+ "EventName": "PM_VECTOR_FLOP_CMPL",
+ "BriefDescription": "Vector floating point instructions completed."
+ },
+ {
+ "EventCode": "0x4D05A",
+ "EventName": "PM_NON_MATH_FLOP_CMPL",
+ "BriefDescription": "Non Math instructions completed."
+ },
+ {
+ "EventCode": "0x401E0",
+ "EventName": "PM_MRK_INST_CMPL",
+ "BriefDescription": "marked instruction completed."
+ },
+ {
+ "EventCode": "0x400FE",
+ "EventName": "PM_DATA_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/memory.json b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
new file mode 100644
index 000000000000..843b51f531e9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/memory.json
@@ -0,0 +1,187 @@
+[
+ {
+ "EventCode": "0x1000A",
+ "EventName": "PM_PMC3_REWIND",
+ "BriefDescription": "The speculative event selected for PMC3 rewinds and the counter for PMC3 is not charged."
+ },
+ {
+ "EventCode": "0x1C040",
+ "EventName": "PM_XFER_FROM_SRC_PMC1",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x1C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC1",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[0:12]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x1C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC1",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[0:12]."
+ },
+ {
+ "EventCode": "0x1C056",
+ "EventName": "PM_DERAT_MISS_4K",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 4K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x1C058",
+ "EventName": "PM_DTLB_MISS_16G",
+ "BriefDescription": "Data TLB reload (after a miss) page size 16G. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x1C05C",
+ "EventName": "PM_DTLB_MISS_2M",
+ "BriefDescription": "Data TLB reload (after a miss) page size 2M. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x1E056",
+ "EventName": "PM_EXEC_STALL_STORE_PIPE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the store unit. This does not include cycles spent handling store misses, PTESYNC instructions or TLBIE instructions."
+ },
+ {
+ "EventCode": "0x1F150",
+ "EventName": "PM_MRK_ST_L2_CYC",
+ "BriefDescription": "Cycles from L2 RC dispatch to L2 RC completion."
+ },
+ {
+ "EventCode": "0x10062",
+ "EventName": "PM_LD_L3MISS_PEND_CYC",
+ "BriefDescription": "Cycles L3 miss was pending for this thread."
+ },
+ {
+ "EventCode": "0x20010",
+ "EventName": "PM_PMC1_OVERFLOW",
+ "BriefDescription": "The event selected for PMC1 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x2001A",
+ "EventName": "PM_ITLB_HIT",
+ "BriefDescription": "The PTE required to translate the instruction address was resident in the TLB (instruction TLB access/IERAT reload). Applies to both HPT and RPT. When MMCR1[17]=0 this event counts only for demand misses. When MMCR1[17]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x2003E",
+ "EventName": "PM_PTESYNC_FIN",
+ "BriefDescription": "Ptesync instruction finished in the store unit. Only one ptesync can finish at a time."
+ },
+ {
+ "EventCode": "0x2C040",
+ "EventName": "PM_XFER_FROM_SRC_PMC2",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[15:27]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x2C054",
+ "EventName": "PM_DERAT_MISS_64K",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x2C056",
+ "EventName": "PM_DTLB_MISS_4K",
+ "BriefDescription": "Data TLB reload (after a miss) page size 4K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x2D154",
+ "EventName": "PM_MRK_DERAT_MISS_64K",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 64K for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x200F6",
+ "EventName": "PM_DERAT_MISS",
+ "BriefDescription": "DERAT Reloaded to satisfy a DERAT miss. All page sizes are counted by this event. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x30016",
+ "EventName": "PM_EXEC_STALL_DERAT_DTLB_MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered a TLB miss and waited for it resolve."
+ },
+ {
+ "EventCode": "0x3C040",
+ "EventName": "PM_XFER_FROM_SRC_PMC3",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x3C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC3",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[30:42]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x3C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC3",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[30:42]."
+ },
+ {
+ "EventCode": "0x3C054",
+ "EventName": "PM_DERAT_MISS_16M",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 16M. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x3C056",
+ "EventName": "PM_DTLB_MISS_64K",
+ "BriefDescription": "Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x3C058",
+ "EventName": "PM_LARX_FIN",
+ "BriefDescription": "Load and reserve instruction (LARX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "0x301E2",
+ "EventName": "PM_MRK_ST_CMPL",
+ "BriefDescription": "Marked store completed and sent to nest. Note that this count excludes cache-inhibited stores."
+ },
+ {
+ "EventCode": "0x300FC",
+ "EventName": "PM_DTLB_MISS",
+ "BriefDescription": "The DPTEG required for the load/store instruction in execution was missing from the TLB. It includes pages of all sizes for demand and prefetch activity."
+ },
+ {
+ "EventCode": "0x4D02C",
+ "EventName": "PM_PMC1_REWIND",
+ "BriefDescription": "The speculative event selected for PMC1 rewinds and the counter for PMC1 is not charged."
+ },
+ {
+ "EventCode": "0x4003E",
+ "EventName": "PM_LD_CMPL",
+ "BriefDescription": "Loads completed."
+ },
+ {
+ "EventCode": "0x4C040",
+ "EventName": "PM_XFER_FROM_SRC_PMC4",
+ "BriefDescription": "The processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x4C142",
+ "EventName": "PM_MRK_XFER_FROM_SRC_PMC4",
+ "BriefDescription": "For a marked data transfer instruction, the processor's L1 data cache was reloaded from the source specified in MMCR3[45:57]. If MMCR1[16|17] is 0 (default), this count includes only lines that were reloaded to satisfy a demand miss. If MMCR1[16|17] is 1, this count includes both demand misses and prefetch reloads."
+ },
+ {
+ "EventCode": "0x4C144",
+ "EventName": "PM_MRK_XFER_FROM_SRC_CYC_PMC4",
+ "BriefDescription": "Cycles taken for a marked demand miss to reload a line from the source specified in MMCR3[45:57]."
+ },
+ {
+ "EventCode": "0x4C056",
+ "EventName": "PM_DTLB_MISS_16M",
+ "BriefDescription": "Data TLB reload (after a miss) page size 16M. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x4C05A",
+ "EventName": "PM_DTLB_MISS_1G",
+ "BriefDescription": "Data TLB reload (after a miss) page size 1G. Implies radix translation was used. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x4C15E",
+ "EventName": "PM_MRK_DTLB_MISS_64K",
+ "BriefDescription": "Marked Data TLB reload (after a miss) page size 64K. When MMCR1[16]=0 this event counts only for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x4D056",
+ "EventName": "PM_NON_FMA_FLOP_CMPL",
+ "BriefDescription": "Non FMA instruction completed."
+ },
+ {
+ "EventCode": "0x40164",
+ "EventName": "PM_MRK_DERAT_MISS_2M",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M for a marked instruction. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
new file mode 100644
index 000000000000..6f53583a0c62
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/metrics.json
@@ -0,0 +1,676 @@
+[
+ {
+ "BriefDescription": "Percentage of cycles that are run cycles",
+ "MetricExpr": "PM_RUN_CYC / PM_CYC * 100",
+ "MetricGroup": "General",
+ "MetricName": "RUN_CYCLES_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction",
+ "MetricExpr": "PM_CYC / PM_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "CYCLES_PER_INSTRUCTION"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled for any reason",
+ "MetricExpr": "PM_DISP_STALL_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI;CPI_STALL_RATIO",
+ "MetricName": "DISPATCHED_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because there was a flush",
+ "MetricExpr": "PM_DISP_STALL_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_FLUSH_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because the MMU was handling a translation miss",
+ "MetricExpr": "PM_DISP_STALL_TRANSLATION / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_TRANSLATION_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled waiting to resolve an instruction ERAT miss",
+ "MetricExpr": "PM_DISP_STALL_IERAT_ONLY_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IERAT_ONLY_MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled waiting to resolve an instruction TLB miss",
+ "MetricExpr": "PM_DISP_STALL_ITLB_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_ITLB_MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to an icache miss",
+ "MetricExpr": "PM_DISP_STALL_IC_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IC_MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from the local L2",
+ "MetricExpr": "PM_DISP_STALL_IC_L2 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IC_L2_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from the local L3",
+ "MetricExpr": "PM_DISP_STALL_IC_L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IC_L3_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while the instruction was fetched from any source beyond the local L3",
+ "MetricExpr": "PM_DISP_STALL_IC_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_IC_L3MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to an icache miss after a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED_ICMISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_ICMISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from the local L2 after suffering a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L2 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_IC_L2_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from the local L3 after suffering a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_IC_L3_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled while instruction was fetched from any source beyond the local L3 after suffering a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED_IC_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_IC_L3MISS_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled due to a branch mispredict",
+ "MetricExpr": "PM_DISP_STALL_BR_MPRED / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_BR_MPRED_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch for any reason",
+ "MetricExpr": "PM_DISP_STALL_HELD_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because of a synchronizing instruction that requires the ICT to be empty before dispatch",
+ "MetricExpr": "PM_DISP_STALL_HELD_SYNC_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISP_HELD_STALL_SYNC_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch while waiting on the scoreboard",
+ "MetricExpr": "PM_DISP_STALL_HELD_SCOREBOARD_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISP_HELD_STALL_SCOREBOARD_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch due to issue queue full",
+ "MetricExpr": "PM_DISP_STALL_HELD_ISSQ_FULL_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISP_HELD_STALL_ISSQ_FULL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the mapper/SRB was full",
+ "MetricExpr": "PM_DISP_STALL_HELD_RENAME_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_RENAME_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the STF mapper/SRB was full",
+ "MetricExpr": "PM_DISP_STALL_HELD_STF_MAPPER_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_STF_MAPPER_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because the XVFC mapper/SRB was full",
+ "MetricExpr": "PM_DISP_STALL_HELD_XVFC_MAPPER_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_XVFC_MAPPER_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch for any other reason",
+ "MetricExpr": "PM_DISP_STALL_HELD_OTHER_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_OTHER_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction has been dispatched but not issued for any reason",
+ "MetricExpr": "PM_ISSUE_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI;CPI_STALL_RATIO",
+ "MetricName": "ISSUE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting to be finished in one of the execution units",
+ "MetricExpr": "PM_EXEC_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI;CPI_STALL_RATIO",
+ "MetricName": "EXECUTION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction spent executing an NTC instruction that gets flushed some time after dispatch",
+ "MetricExpr": "PM_EXEC_STALL_NTC_FLUSH / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "NTC_FLUSH_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTF instruction finishes at dispatch",
+ "MetricExpr": "PM_EXEC_STALL_FIN_AT_DISP / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "FIN_AT_DISP_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing in the branch unit",
+ "MetricExpr": "PM_EXEC_STALL_BRU / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "BRU_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a simple fixed point instruction that is executing in the LSU",
+ "MetricExpr": "PM_EXEC_STALL_SIMPLE_FX / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "SIMPLE_FX_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing in the VSU",
+ "MetricExpr": "PM_EXEC_STALL_VSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "VSU_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting to be finished in one of the execution units",
+ "MetricExpr": "PM_EXEC_STALL_TRANSLATION / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "TRANSLATION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a load or store that suffered a translation miss",
+ "MetricExpr": "PM_EXEC_STALL_DERAT_ONLY_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DERAT_ONLY_MISS_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is recovering from a TLB miss",
+ "MetricExpr": "PM_EXEC_STALL_DERAT_DTLB_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DERAT_DTLB_MISS_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing in the LSU",
+ "MetricExpr": "PM_EXEC_STALL_LSU / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "LSU_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a load that is executing in the LSU",
+ "MetricExpr": "PM_EXEC_STALL_LOAD / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "LOAD_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from either the local L2 or local L3",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L2L3 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L2L3_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from either the local L2 or local L3, with an RC dispatch conflict",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L2L3_CONFLICT / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L2L3_CONFLICT_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from either the local L2 or local L3, without an RC dispatch conflict",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L2L3_NOCONFLICT / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L2L3_NOCONFLICT_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from a source beyond the local L2 and local L3",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L3MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L3MISS_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from a neighbor chiplet's L2 or L3 in the same chip",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_L21_L31 / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_L21_L31_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from L4, local memory or OpenCAPI chip",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_LMEM / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_LMEM_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from a remote chip (cache, L4, memory or OpenCAPI) in the same group",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_OFF_CHIP / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_OFF_CHIP_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is waiting for a load miss to resolve from a distant chip (cache, L4, memory or OpenCAPI chip)",
+ "MetricExpr": "PM_EXEC_STALL_DMISS_OFF_NODE / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DMISS_OFF_NODE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing a TLBIEL instruction",
+ "MetricExpr": "PM_EXEC_STALL_TLBIEL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "TLBIEL_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is finishing a load after its data has been reloaded from a data source beyond the local L1, OR when the LSU is processing an L1-hit, OR when the NTF instruction merged with another load in the LMQ",
+ "MetricExpr": "PM_EXEC_STALL_LOAD_FINISH / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "LOAD_FINISH_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a store that is executing in the LSU",
+ "MetricExpr": "PM_EXEC_STALL_STORE / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "STORE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is in the store unit outside of handling store misses or other special store operations",
+ "MetricExpr": "PM_EXEC_STALL_STORE_PIPE / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "STORE_PIPE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a store whose cache line was not resident in the L1 and had to wait for allocation of the missing line into the L1",
+ "MetricExpr": "PM_EXEC_STALL_STORE_MISS / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "STORE_MISS_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a TLBIE instruction waiting for a response from the L2",
+ "MetricExpr": "PM_EXEC_STALL_TLBIE / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "TLBIE_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is executing a PTESYNC instruction",
+ "MetricExpr": "PM_EXEC_STALL_PTESYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "PTESYNC_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction cannot complete because the thread was blocked",
+ "MetricExpr": "PM_CMPL_STALL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI;CPI_STALL_RATIO",
+ "MetricName": "COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction cannot complete because it was interrupted by ANY exception",
+ "MetricExpr": "PM_CMPL_STALL_EXCEPTION / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "EXCEPTION_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is stuck at finish waiting for the non-speculative finish of either a STCX instruction waiting for its result or a load waiting for non-critical sectors of data and ECC",
+ "MetricExpr": "PM_CMPL_STALL_MEM_ECC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "MEM_ECC_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a STCX instruction waiting for resolution from the nest",
+ "MetricExpr": "PM_CMPL_STALL_STCX / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "STCX_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a LWSYNC instruction waiting to complete",
+ "MetricExpr": "PM_CMPL_STALL_LWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "LWSYNC_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction is a HWSYNC instruction stuck at finish waiting for a response from the L2",
+ "MetricExpr": "PM_CMPL_STALL_HWSYNC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "HWSYNC_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction required special handling before completion",
+ "MetricExpr": "PM_CMPL_STALL_SPECIAL / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "SPECIAL_COMPLETION_STALL_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when dispatch was stalled because fetch was being held, so there was nothing in the pipeline for this thread",
+ "MetricExpr": "PM_DISP_STALL_FETCH / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_FETCH_CPI"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTC instruction was held at dispatch because of power management",
+ "MetricExpr": "PM_DISP_STALL_HELD_HALT_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "CPI",
+ "MetricName": "DISPATCHED_HELD_HALT_CPI"
+ },
+ {
+ "BriefDescription": "Percentage of flushes per completed instruction",
+ "MetricExpr": "PM_FLUSH / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Others",
+ "MetricName": "FLUSH_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of flushes due to a branch mispredict per completed instruction",
+ "MetricExpr": "PM_FLUSH_MPRED / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Others",
+ "MetricName": "BR_MPRED_FLUSH_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of branch mispredictions per completed instruction",
+ "MetricExpr": "PM_BR_MPRED_CMPL / PM_RUN_INST_CMPL",
+ "MetricGroup": "Others",
+ "MetricName": "BRANCH_MISPREDICTION_RATE"
+ },
+ {
+ "BriefDescription": "Percentage of finished loads that missed in the L1",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_LD_REF_L1 * 100",
+ "MetricGroup": "Others",
+ "MetricName": "L1_LD_MISS_RATIO",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were loads that missed the L1",
+ "MetricExpr": "PM_LD_MISS_L1 / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Others",
+ "MetricName": "L1_LD_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions when the DPTEG required for the load/store instruction in execution was missing from the TLB",
+ "MetricExpr": "PM_DTLB_MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Others",
+ "MetricName": "DTLB_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of completed instructions dispatched per instruction completed",
+ "MetricExpr": "PM_INST_DISP / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "DISPATCH_PER_INST_CMPL"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were a demand load that did not hit in the L1 or L2",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "General",
+ "MetricName": "L2_LD_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were demand fetches that missed the L1 icache",
+ "MetricExpr": "PM_L1_ICACHE_MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "L1_INST_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were demand fetches that reloaded from beyond the L3 icache",
+ "MetricExpr": "PM_INST_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "General",
+ "MetricName": "L3_INST_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of completed instructions per cycle",
+ "MetricExpr": "PM_INST_CMPL / PM_CYC",
+ "MetricGroup": "General",
+ "MetricName": "IPC"
+ },
+ {
+ "BriefDescription": "Average number of cycles per completed instruction group",
+ "MetricExpr": "PM_CYC / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "CYCLES_PER_COMPLETED_INSTRUCTIONS_SET"
+ },
+ {
+ "BriefDescription": "Percentage of cycles when at least 1 instruction dispatched",
+ "MetricExpr": "PM_1PLUS_PPC_DISP / PM_RUN_CYC * 100",
+ "MetricGroup": "General",
+ "MetricName": "CYCLES_ATLEAST_ONE_INST_DISPATCHED",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of finished loads per completed instruction",
+ "MetricExpr": "PM_LD_REF_L1 / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "LOADS_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of finished stores per completed instruction",
+ "MetricExpr": "PM_ST_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "STORES_PER_INST"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded from beyond the L2 per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L2_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand loads that reloaded from beyond the L3 per completed instruction",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L3_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses with 4k page size per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_4K_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses with 64k page size per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_64K_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of run cycles per completed instruction",
+ "MetricExpr": "PM_RUN_CYC / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "RUN_CPI"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of completed instructions per run cycle",
+ "MetricExpr": "PM_RUN_INST_CMPL / PM_RUN_CYC",
+ "MetricGroup": "General",
+ "MetricName": "RUN_IPC"
+ },
+ {
+ "BriefDescription": "Average number of completed instructions per instruction group",
+ "MetricExpr": "PM_RUN_INST_CMPL / PM_1PLUS_PPC_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "AVERAGE_COMPLETED_INSTRUCTION_SET_SIZE"
+ },
+ {
+ "BriefDescription": "Average number of finished instructions per completed instructions",
+ "MetricExpr": "PM_INST_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "INST_FIN_PER_CMPL"
+ },
+ {
+ "BriefDescription": "Average cycles per completed instruction when the NTF instruction is completing and the finish was overlooked",
+ "MetricExpr": "PM_EXEC_STALL_UNKNOWN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "EXEC_STALL_UNKOWN_CPI"
+ },
+ {
+ "BriefDescription": "Percentage of finished branches that were taken",
+ "MetricExpr": "PM_BR_TAKEN_CMPL / PM_BR_FIN * 100",
+ "MetricGroup": "General",
+ "MetricName": "TAKEN_BRANCHES",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of completed instructions that were a demand load that did not hit in the L1, L2, or the L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "General",
+ "MetricName": "L3_LD_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Average number of finished branches per completed instruction",
+ "MetricExpr": "PM_BR_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "BRANCHES_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of instructions finished in the LSU per completed instruction",
+ "MetricExpr": "PM_LSU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "LSU_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of instructions finished in the VSU per completed instruction",
+ "MetricExpr": "PM_VSU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "VSU_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of TLBIE instructions finished in the LSU per completed instruction",
+ "MetricExpr": "PM_TLBIE_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "TLBIE_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of STCX instructions finshed per completed instruction",
+ "MetricExpr": "PM_STCX_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "STXC_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of LARX instructions finshed per completed instruction",
+ "MetricExpr": "PM_LARX_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "LARX_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of PTESYNC instructions finshed per completed instruction",
+ "MetricExpr": "PM_PTESYNC_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "PTESYNC_PER_INST"
+ },
+ {
+ "BriefDescription": "Average number of simple fixed-point instructions finshed in the store unit per completed instruction",
+ "MetricExpr": "PM_FX_LSU_FIN / PM_RUN_INST_CMPL",
+ "MetricGroup": "General",
+ "MetricName": "FX_PER_INST"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded the L1 cache",
+ "MetricExpr": "PM_LD_DEMAND_MISS_L1 / PM_LD_MISS_L1 * 100",
+ "MetricGroup": "General",
+ "MetricName": "DL1_MISS_RELOADS",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from beyond the local L2",
+ "MetricExpr": "PM_DATA_FROM_L2MISS / PM_LD_DEMAND_MISS_L1 * 100",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L2_MISS",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of demand load misses that reloaded from beyond the local L3",
+ "MetricExpr": "PM_DATA_FROM_L3MISS / PM_LD_DEMAND_MISS_L1 * 100",
+ "MetricGroup": "dL1_Reloads",
+ "MetricName": "DL1_RELOAD_FROM_L3_MISS",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles stalled due to the NTC instruction waiting for a load miss to resolve from a source beyond the local L2 and local L3",
+ "MetricExpr": "DMISS_L3MISS_STALL_CPI / RUN_CPI * 100",
+ "MetricGroup": "General",
+ "MetricName": "DCACHE_MISS_CPI",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses with 2M page size per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS_2M / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_2M_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses with 16M page size per completed instruction",
+ "MetricExpr": "PM_DERAT_MISS_16M / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_16M_MISS_RATE",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 4K page size",
+ "MetricExpr": "PM_DERAT_MISS_4K / PM_DERAT_MISS",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_4K_MISS_RATIO"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 2M page size",
+ "MetricExpr": "PM_DERAT_MISS_2M / PM_DERAT_MISS",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_2M_MISS_RATIO"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 16M page size",
+ "MetricExpr": "PM_DERAT_MISS_16M / PM_DERAT_MISS",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_16M_MISS_RATIO"
+ },
+ {
+ "BriefDescription": "DERAT miss ratio for 64K page size",
+ "MetricExpr": "PM_DERAT_MISS_64K / PM_DERAT_MISS",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_64K_MISS_RATIO"
+ },
+ {
+ "BriefDescription": "Percentage of DERAT misses that resulted in TLB reloads",
+ "MetricExpr": "PM_DTLB_MISS / PM_DERAT_MISS * 100",
+ "MetricGroup": "Translation",
+ "MetricName": "DERAT_MISS_RELOAD",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of icache misses that were reloaded from beyond the local L3",
+ "MetricExpr": "PM_INST_FROM_L3MISS / PM_L1_ICACHE_MISS * 100",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_L3_MISS",
+ "ScaleUnit": "1%"
+ },
+ {
+ "BriefDescription": "Percentage of icache reloads from the beyond the L3 per completed instruction",
+ "MetricExpr": "PM_INST_FROM_L3MISS / PM_RUN_INST_CMPL * 100",
+ "MetricGroup": "Instruction_Misses",
+ "MetricName": "INST_FROM_L3_MISS_RATE",
+ "ScaleUnit": "1%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json b/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json
new file mode 100644
index 000000000000..fe050d44374b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/nest_metrics.json
@@ -0,0 +1,424 @@
+[
+ {
+ "MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / (1 + hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "VEC_GROUP_PUMP_RETRY_RATIO_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / (1 + hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "LOCAL_NODE_PUMP_RETRY_RATIO_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_LNS_PUMP01\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "LOCAL_NODE_PUMP_RETRY_RATIO_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_LNS_PUMP23\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "GROUP_PUMP_RETRY_RATIO_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP01\\,chip\\=?@ / hv_24x7@PM_PB_GROUP_PUMP01\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "GROUP_PUMP_RETRY_RATIO_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP23\\,chip\\=?@ / hv_24x7@PM_PB_GROUP_PUMP23\\,chip\\=?@) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_GROUP_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_GROUP_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_GROUP_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_GROUP_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_GROUP_PUMPS_RETRIES_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_GROUP_PUMPS_RETRIES_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_GROUP_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP01\\,chip\\=?@ / (1 + hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "REMOTE_NODE_PUMPS_RETRIES_RATIO_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_RNS_PUMP23\\,chip\\=?@ / (1 + hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_LOCAL_NODE_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_LNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_LOCAL_NODE_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_LNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_RETRIES_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_VECTOR_GROUP_PUMPS_RETRIES_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_VG_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_LOCAL_NODE_PUMPS_RETRIES_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_LOCAL_NODE_PUMPS_RETRIES_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RTY_LNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_REMOTE_NODE_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_RNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_REMOTE_NODE_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_RNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_NEAR_NODE_PUMPS_P01",
+ "MetricExpr": "(hv_24x7@PM_PB_NNS_PUMP01\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_NEAR_NODE_PUMPS_P23",
+ "MetricExpr": "(hv_24x7@PM_PB_NNS_PUMP23\\,chip\\=?@ / hv_24x7@PM_PAU_CYC\\,chip\\=?@)",
+ "ScaleUnit": "4",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_INT_PB_BW",
+ "MetricExpr": "(hv_24x7@PM_PB_INT_DATA_XFER\\,chip\\=?@)",
+ "ScaleUnit": "2.09MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK0_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK1_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK2_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK3_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK4_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK5_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK6_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK7_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK0_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK1_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK2_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK3_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK4_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK5_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK6_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "XLINK7_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_XLINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_XLINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_XLINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK0_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK1_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK2_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK3_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK4_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK5_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK6_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK7_OUT_TOTAL_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_TOTAL_UTIL\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_TOTAL_UTIL\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK0_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK0_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK0_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK0_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK1_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK1_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK1_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK1_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK2_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK2_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK2_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK2_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK3_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK3_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK3_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK3_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK4_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK4_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK4_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK4_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK5_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK5_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK5_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK5_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK6_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK6_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK6_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK6_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "ALINK7_OUT_DATA_UTILIZATION",
+ "MetricExpr": "((hv_24x7@PM_ALINK7_OUT_ODD_DATA\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_DATA\\,chip\\=?@) / (1 + hv_24x7@PM_ALINK7_OUT_ODD_AVLBL_CYCLES\\,chip\\=?@ + hv_24x7@PM_ALINK7_OUT_EVEN_AVLBL_CYCLES\\,chip\\=?@)) * 100",
+ "ScaleUnit": "1.063%",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_DATA_BANDWIDTH_TRANSFERRED_OVER_PB_PCI1",
+ "MetricExpr": "(hv_24x7@PM_PCI1_32B_INOUT\\,chip\\=?@)",
+ "ScaleUnit": "3.28e-2MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_DATA_BANDWIDTH_TRANSFERRED_OVER_PB_PCI0",
+ "MetricExpr": "(hv_24x7@PM_PCI0_32B_INOUT\\,chip\\=?@)",
+ "ScaleUnit": "3.28e-2MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_READ_BW_MC0_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_READ_BW_MC1_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_READ_BW_MC2_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_READ_BW_MC3_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_WRITE_BW_MC0_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_WRITE_BW_MC1_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_WRITE_BW_MC2_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "TOTAL_MCS_WRITE_BW_MC3_CHAN01",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@)",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "Memory_RD_BW_Chip",
+ "MetricExpr": "(hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_128B_RD_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@)",
+ "MetricGroup": "Memory_BW",
+ "ScaleUnit": "5.24e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "Memory_WR_BW_Chip",
+ "MetricExpr": "(hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC0_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC1_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC2_CHAN01\\,chip\\=?@ + hv_24x7@PM_MCS_64B_WR_DATA_BLOCKS_MC3_CHAN01\\,chip\\=?@ )",
+ "MetricGroup": "Memory_BW",
+ "ScaleUnit": "2.6e-1MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricName": "PowerBUS_Frequency",
+ "MetricExpr": "(hv_24x7@PM_PAU_CYC\\,chip\\=?@ )",
+ "ScaleUnit": "2.56e-7GHz",
+ "AggregationMode": "PerChip"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/others.json b/tools/perf/pmu-events/arch/powerpc/power10/others.json
new file mode 100644
index 000000000000..a771e4b6bec5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/others.json
@@ -0,0 +1,272 @@
+[
+ {
+ "EventCode": "0x10016",
+ "EventName": "PM_VSU0_ISSUE",
+ "BriefDescription": "VSU instructions issued to VSU pipe 0."
+ },
+ {
+ "EventCode": "0x1001C",
+ "EventName": "PM_ULTRAVISOR_INST_CMPL",
+ "BriefDescription": "PowerPC instructions that completed while the thread was in ultravisor state."
+ },
+ {
+ "EventCode": "0x100F0",
+ "EventName": "PM_CYC",
+ "BriefDescription": "Processor cycles."
+ },
+ {
+ "EventCode": "0x10134",
+ "EventName": "PM_MRK_ST_DONE_L2",
+ "BriefDescription": "Marked stores completed in L2 (RC machine done)."
+ },
+ {
+ "EventCode": "0x1505E",
+ "EventName": "PM_LD_HIT_L1",
+ "BriefDescription": "Loads that finished without experiencing an L1 miss."
+ },
+ {
+ "EventCode": "0x1F056",
+ "EventName": "PM_DISP_SS0_2_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 0 dispatches either 1 or 2 instructions."
+ },
+ {
+ "EventCode": "0x1F15C",
+ "EventName": "PM_MRK_STCX_L2_CYC",
+ "BriefDescription": "Cycles spent in the nest portion of a marked Stcx instruction. It starts counting when the operation starts to drain to the L2 and it stops counting when the instruction retires from the Instruction Completion Table (ICT) in the Instruction Sequencing Unit (ISU)."
+ },
+ {
+ "EventCode": "0x10066",
+ "EventName": "PM_ADJUNCT_CYC",
+ "BriefDescription": "Cycles in which the thread is in Adjunct state. MSR[S HV PR] bits = 011."
+ },
+ {
+ "EventCode": "0x101E4",
+ "EventName": "PM_MRK_L1_ICACHE_MISS",
+ "BriefDescription": "Marked Instruction suffered an icache Miss."
+ },
+ {
+ "EventCode": "0x101EA",
+ "EventName": "PM_MRK_L1_RELOAD_VALID",
+ "BriefDescription": "Marked demand reload."
+ },
+ {
+ "EventCode": "0x100F4",
+ "EventName": "PM_FLOP_CMPL",
+ "BriefDescription": "Floating Point Operations Completed. Includes any type. It counts once for each 1, 2, 4 or 8 flop instruction. Use PM_1|2|4|8_FLOP_CMPL events to count flops."
+ },
+ {
+ "EventCode": "0x100FA",
+ "EventName": "PM_RUN_LATCH_ANY_THREAD_CYC",
+ "BriefDescription": "Cycles when at least one thread has the run latch set."
+ },
+ {
+ "EventCode": "0x100FC",
+ "EventName": "PM_LD_REF_L1",
+ "BriefDescription": "All L1 D cache load references counted at finish, gated by reject. In P9 and earlier this event counted only cacheable loads but in P10 both cacheable and non-cacheable loads are included."
+ },
+ {
+ "EventCode": "0x2000C",
+ "EventName": "PM_RUN_LATCH_ALL_THREADS_CYC",
+ "BriefDescription": "Cycles when the run latch is set for all threads."
+ },
+ {
+ "EventCode": "0x2E010",
+ "EventName": "PM_ADJUNCT_INST_CMPL",
+ "BriefDescription": "PowerPC instructions that completed while the thread is in Adjunct state."
+ },
+ {
+ "EventCode": "0x2E014",
+ "EventName": "PM_STCX_FIN",
+ "BriefDescription": "Conditional store instruction (STCX) finished. LARX and STCX are instructions used to acquire a lock."
+ },
+ {
+ "EventCode": "0x20130",
+ "EventName": "PM_MRK_INST_DECODED",
+ "BriefDescription": "An instruction was marked at decode time. Random Instruction Sampling (RIS) only."
+ },
+ {
+ "EventCode": "0x20132",
+ "EventName": "PM_MRK_DFU_ISSUE",
+ "BriefDescription": "The marked instruction was a decimal floating point operation issued to the VSU. Measured at issue time."
+ },
+ {
+ "EventCode": "0x20134",
+ "EventName": "PM_MRK_FXU_ISSUE",
+ "BriefDescription": "The marked instruction was a fixed point operation issued to the VSU. Measured at issue time."
+ },
+ {
+ "EventCode": "0x2505C",
+ "EventName": "PM_VSU_ISSUE",
+ "BriefDescription": "At least one VSU instruction was issued to one of the VSU pipes. Up to 4 per cycle. Includes fixed point operations."
+ },
+ {
+ "EventCode": "0x2F054",
+ "EventName": "PM_DISP_SS1_2_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 1 dispatches either 1 or 2 instructions."
+ },
+ {
+ "EventCode": "0x2F056",
+ "EventName": "PM_DISP_SS1_4_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 1 dispatches either 3 or 4 instructions."
+ },
+ {
+ "EventCode": "0x2006C",
+ "EventName": "PM_RUN_CYC_SMT4_MODE",
+ "BriefDescription": "Cycles when this thread's run latch is set and the core is in SMT4 mode."
+ },
+ {
+ "EventCode": "0x201E0",
+ "EventName": "PM_MRK_DATA_FROM_MEMORY",
+ "BriefDescription": "The processor's data cache was reloaded from local, remote, or distant memory due to a demand miss for a marked load."
+ },
+ {
+ "EventCode": "0x201E4",
+ "EventName": "PM_MRK_DATA_FROM_L3MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked load."
+ },
+ {
+ "EventCode": "0x201E8",
+ "EventName": "PM_THRESH_EXC_512",
+ "BriefDescription": "Threshold counter exceeded a value of 512."
+ },
+ {
+ "EventCode": "0x200F2",
+ "EventName": "PM_INST_DISP",
+ "BriefDescription": "PowerPC instructions dispatched."
+ },
+ {
+ "EventCode": "0x30132",
+ "EventName": "PM_MRK_VSU_FIN",
+ "BriefDescription": "VSU marked instructions finished. Excludes simple FX instructions issued to the Store Unit."
+ },
+ {
+ "EventCode": "0x30038",
+ "EventName": "PM_EXEC_STALL_DMISS_LMEM",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from the local memory, local OpenCapp cache, or local OpenCapp memory."
+ },
+ {
+ "EventCode": "0x3F04A",
+ "EventName": "PM_LSU_ST5_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST2 port."
+ },
+ {
+ "EventCode": "0x3405A",
+ "EventName": "PM_PRIVILEGED_INST_CMPL",
+ "BriefDescription": "PowerPC Instructions that completed while the thread is in Privileged state."
+ },
+ {
+ "EventCode": "0x3F150",
+ "EventName": "PM_MRK_ST_DRAIN_CYC",
+ "BriefDescription": "cycles to drain st from core to L2."
+ },
+ {
+ "EventCode": "0x3F054",
+ "EventName": "PM_DISP_SS0_4_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 0 dispatches either 3 or 4 instructions."
+ },
+ {
+ "EventCode": "0x3F056",
+ "EventName": "PM_DISP_SS0_8_INSTR_CYC",
+ "BriefDescription": "Cycles in which Superslice 0 dispatches either 5, 6, 7 or 8 instructions."
+ },
+ {
+ "EventCode": "0x30162",
+ "EventName": "PM_MRK_ISSUE_DEPENDENT_LOAD",
+ "BriefDescription": "The marked instruction was dependent on a load. It is eligible for issue kill."
+ },
+ {
+ "EventCode": "0x40114",
+ "EventName": "PM_MRK_START_PROBE_NOP_DISP",
+ "BriefDescription": "Marked Start probe nop dispatched. Instruction AND R0,R0,R0."
+ },
+ {
+ "EventCode": "0x4001C",
+ "EventName": "PM_VSU_FIN",
+ "BriefDescription": "VSU instructions finished."
+ },
+ {
+ "EventCode": "0x4C01A",
+ "EventName": "PM_EXEC_STALL_DMISS_OFF_NODE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a distant chip."
+ },
+ {
+ "EventCode": "0x4D012",
+ "EventName": "PM_PMC3_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC3 are met and PMC3 is charged."
+ },
+ {
+ "EventCode": "0x4D022",
+ "EventName": "PM_HYPERVISOR_INST_CMPL",
+ "BriefDescription": "PowerPC instructions that completed while the thread is in hypervisor state."
+ },
+ {
+ "EventCode": "0x4D026",
+ "EventName": "PM_ULTRAVISOR_CYC",
+ "BriefDescription": "Cycles when the thread is in Ultravisor state. MSR[S HV PR]=110."
+ },
+ {
+ "EventCode": "0x4D028",
+ "EventName": "PM_PRIVILEGED_CYC",
+ "BriefDescription": "Cycles when the thread is in Privileged state. MSR[S HV PR]=x00."
+ },
+ {
+ "EventCode": "0x40030",
+ "EventName": "PM_INST_FIN",
+ "BriefDescription": "Instructions finished."
+ },
+ {
+ "EventCode": "0x44146",
+ "EventName": "PM_MRK_STCX_CORE_CYC",
+ "BriefDescription": "Cycles spent in the core portion of a marked Stcx instruction. It starts counting when the instruction is decoded and stops counting when it drains into the L2."
+ },
+ {
+ "EventCode": "0x44054",
+ "EventName": "PM_VECTOR_LD_CMPL",
+ "BriefDescription": "Vector load instructions completed."
+ },
+ {
+ "EventCode": "0x45054",
+ "EventName": "PM_FMA_CMPL",
+ "BriefDescription": "Two floating point instructions completed (FMA class of instructions: fmadd, fnmadd, fmsub, fnmsub). Scalar instructions only."
+ },
+ {
+ "EventCode": "0x45056",
+ "EventName": "PM_SCALAR_FLOP_CMPL",
+ "BriefDescription": "Scalar floating point instructions completed."
+ },
+ {
+ "EventCode": "0x4505C",
+ "EventName": "PM_MATH_FLOP_CMPL",
+ "BriefDescription": "Math floating point instructions completed."
+ },
+ {
+ "EventCode": "0x4D05E",
+ "EventName": "PM_BR_CMPL",
+ "BriefDescription": "A branch completed. All branches are included."
+ },
+ {
+ "EventCode": "0x4E15E",
+ "EventName": "PM_MRK_INST_FLUSHED",
+ "BriefDescription": "The marked instruction was flushed."
+ },
+ {
+ "EventCode": "0x401E6",
+ "EventName": "PM_MRK_INST_FROM_L3MISS",
+ "BriefDescription": "The processor's instruction cache was reloaded from a source other than the local core's L1, L2, or L3 due to a demand miss for a marked instruction."
+ },
+ {
+ "EventCode": "0x401E8",
+ "EventName": "PM_MRK_DATA_FROM_L2MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss for a marked load."
+ },
+ {
+ "EventCode": "0x400F0",
+ "EventName": "PM_LD_DEMAND_MISS_L1_FIN",
+ "BriefDescription": "Load Missed L1, counted at finish time."
+ },
+ {
+ "EventCode": "0x500FA",
+ "EventName": "PM_RUN_INST_CMPL",
+ "BriefDescription": "Completed PowerPC instructions gated by the run latch."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
new file mode 100644
index 000000000000..b8aded6045fa
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pipeline.json
@@ -0,0 +1,292 @@
+[
+ {
+ "EventCode": "0x100FE",
+ "EventName": "PM_INST_CMPL",
+ "BriefDescription": "PowerPC instructions completed."
+ },
+ {
+ "EventCode": "0x1000C",
+ "EventName": "PM_LSU_LD0_FIN",
+ "BriefDescription": "LSU Finished an internal operation in LD0 port."
+ },
+ {
+ "EventCode": "0x1000E",
+ "EventName": "PM_MMA_ISSUED",
+ "BriefDescription": "MMA instructions issued."
+ },
+ {
+ "EventCode": "0x10012",
+ "EventName": "PM_LSU_ST0_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST0 port."
+ },
+ {
+ "EventCode": "0x10014",
+ "EventName": "PM_LSU_ST4_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST4 port."
+ },
+ {
+ "EventCode": "0x10018",
+ "EventName": "PM_IC_DEMAND_CYC",
+ "BriefDescription": "Cycles in which an instruction reload is pending to satisfy a demand miss."
+ },
+ {
+ "EventCode": "0x10022",
+ "EventName": "PM_PMC2_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC2 are met and PMC2 is charged."
+ },
+ {
+ "EventCode": "0x10024",
+ "EventName": "PM_PMC5_OVERFLOW",
+ "BriefDescription": "The event selected for PMC5 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x10058",
+ "EventName": "PM_EXEC_STALL_FIN_AT_DISP",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline finished at dispatch and did not require execution in the LSU, BRU or VSU."
+ },
+ {
+ "EventCode": "0x1005A",
+ "EventName": "PM_FLUSH_MPRED",
+ "BriefDescription": "A flush occurred due to a mispredicted branch. Includes target and direction."
+ },
+ {
+ "EventCode": "0x1C05A",
+ "EventName": "PM_DERAT_MISS_2M",
+ "BriefDescription": "Data ERAT Miss (Data TLB Access) page size 2M. Implies radix translation. When MMCR1[16]=0 this event counts only DERAT reloads for demand misses. When MMCR1[16]=1 this event includes demand misses and prefetches."
+ },
+ {
+ "EventCode": "0x1E05A",
+ "EventName": "PM_CMPL_STALL_LWSYNC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a lwsync waiting to complete."
+ },
+ {
+ "EventCode": "0x10068",
+ "EventName": "PM_BR_FIN",
+ "BriefDescription": "A branch instruction finished. Includes predicted/mispredicted/unconditional."
+ },
+ {
+ "EventCode": "0x1006A",
+ "EventName": "PM_FX_LSU_FIN",
+ "BriefDescription": "Simple fixed point instruction issued to the store unit. Measured at finish time."
+ },
+ {
+ "EventCode": "0x1006C",
+ "EventName": "PM_RUN_CYC_ST_MODE",
+ "BriefDescription": "Cycles when the run latch is set and the core is in ST mode."
+ },
+ {
+ "EventCode": "0x20004",
+ "EventName": "PM_ISSUE_STALL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was dispatched but not issued yet."
+ },
+ {
+ "EventCode": "0x2000A",
+ "EventName": "PM_HYPERVISOR_CYC",
+ "BriefDescription": "Cycles when the thread is in Hypervisor state. MSR[S HV PR]=010."
+ },
+ {
+ "EventCode": "0x2000E",
+ "EventName": "PM_LSU_LD1_FIN",
+ "BriefDescription": "LSU Finished an internal operation in LD1 port."
+ },
+ {
+ "EventCode": "0x2C014",
+ "EventName": "PM_CMPL_STALL_SPECIAL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline required special handling before completing."
+ },
+ {
+ "EventCode": "0x2C018",
+ "EventName": "PM_EXEC_STALL_DMISS_L3MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for a load miss to resolve from a source beyond the local L2 or local L3."
+ },
+ {
+ "EventCode": "0x2D010",
+ "EventName": "PM_LSU_ST1_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST1 port."
+ },
+ {
+ "EventCode": "0x2D012",
+ "EventName": "PM_VSU1_ISSUE",
+ "BriefDescription": "VSU instructions issued to VSU pipe 1."
+ },
+ {
+ "EventCode": "0x2D018",
+ "EventName": "PM_EXEC_STALL_VSU",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the VSU (includes FXU, VSU, CRU)."
+ },
+ {
+ "EventCode": "0x2D01C",
+ "EventName": "PM_CMPL_STALL_STCX",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a stcx waiting for resolution from the nest before completing."
+ },
+ {
+ "EventCode": "0x2E01E",
+ "EventName": "PM_EXEC_STALL_NTC_FLUSH",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in any unit before it was flushed. Note that if the flush of the oldest instruction happens after finish, the cycles from dispatch to issue will be included in PM_DISP_STALL and the cycles from issue to finish will be included in PM_EXEC_STALL and its corresponding children. This event will also count cycles when the previous NTF instruction is still completing and the new NTF instruction is stalled at dispatch."
+ },
+ {
+ "EventCode": "0x2013C",
+ "EventName": "PM_MRK_FX_LSU_FIN",
+ "BriefDescription": "The marked instruction was simple fixed point that was issued to the store unit. Measured at finish time."
+ },
+ {
+ "EventCode": "0x2405A",
+ "EventName": "PM_NTC_FIN",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline (NTC) finishes. Note that instructions can finish out of order, therefore not all the instructions that finish have a Next-to-complete status."
+ },
+ {
+ "EventCode": "0x201E2",
+ "EventName": "PM_MRK_LD_MISS_L1",
+ "BriefDescription": "Marked DL1 Demand Miss counted at finish time."
+ },
+ {
+ "EventCode": "0x200F4",
+ "EventName": "PM_RUN_CYC",
+ "BriefDescription": "Processor cycles gated by the run latch."
+ },
+ {
+ "EventCode": "0x30008",
+ "EventName": "PM_EXEC_STALL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting to finish in one of the execution units (BRU, LSU, VSU). Only cycles between issue and finish are counted in this category."
+ },
+ {
+ "EventCode": "0x3001A",
+ "EventName": "PM_LSU_ST2_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST2 port."
+ },
+ {
+ "EventCode": "0x30020",
+ "EventName": "PM_PMC2_REWIND",
+ "BriefDescription": "The speculative event selected for PMC2 rewinds and the counter for PMC2 is not charged."
+ },
+ {
+ "EventCode": "0x30022",
+ "EventName": "PM_PMC4_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC4 are met and PMC4 is charged."
+ },
+ {
+ "EventCode": "0x30024",
+ "EventName": "PM_PMC6_OVERFLOW",
+ "BriefDescription": "The event selected for PMC6 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x30028",
+ "EventName": "PM_CMPL_STALL_MEM_ECC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was waiting for the non-speculative finish of either a stcx waiting for its result or a load waiting for non-critical sectors of data and ECC."
+ },
+ {
+ "EventCode": "0x30036",
+ "EventName": "PM_EXEC_STALL_SIMPLE_FX",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a simple fixed point instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "0x3003A",
+ "EventName": "PM_CMPL_STALL_EXCEPTION",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was not allowed to complete because it was interrupted by ANY exception, which has to be serviced before the instruction can complete."
+ },
+ {
+ "EventCode": "0x3F044",
+ "EventName": "PM_VSU2_ISSUE",
+ "BriefDescription": "VSU instructions issued to VSU pipe 2."
+ },
+ {
+ "EventCode": "0x30058",
+ "EventName": "PM_TLBIE_FIN",
+ "BriefDescription": "TLBIE instructions finished in the LSU. Two TLBIEs can finish each cycle. All will be counted."
+ },
+ {
+ "EventCode": "0x3D058",
+ "EventName": "PM_SCALAR_FSQRT_FDIV_ISSUE",
+ "BriefDescription": "Scalar versions of four floating point operations: fdiv,fsqrt (xvdivdp, xvdivsp, xvsqrtdp, xvsqrtsp)."
+ },
+ {
+ "EventCode": "0x30066",
+ "EventName": "PM_LSU_FIN",
+ "BriefDescription": "LSU Finished an internal operation (up to 4 per cycle)."
+ },
+ {
+ "EventCode": "0x40004",
+ "EventName": "PM_FXU_ISSUE",
+ "BriefDescription": "A fixed point instruction was issued to the VSU."
+ },
+ {
+ "EventCode": "0x40008",
+ "EventName": "PM_NTC_ALL_FIN",
+ "BriefDescription": "Cycles in which both instructions in the ICT entry pair show as finished. These are the cycles between finish and completion for the oldest pair of instructions in the pipeline."
+ },
+ {
+ "EventCode": "0x40010",
+ "EventName": "PM_PMC3_OVERFLOW",
+ "BriefDescription": "The event selected for PMC3 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x4C012",
+ "EventName": "PM_EXEC_STALL_DERAT_ONLY_MISS",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline suffered an ERAT miss and waited for it resolve."
+ },
+ {
+ "EventCode": "0x4C018",
+ "EventName": "PM_CMPL_STALL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline cannot complete because the thread was blocked for any reason."
+ },
+ {
+ "EventCode": "0x4C01E",
+ "EventName": "PM_LSU_ST3_FIN",
+ "BriefDescription": "LSU Finished an internal operation in ST3 port."
+ },
+ {
+ "EventCode": "0x4D018",
+ "EventName": "PM_EXEC_STALL_BRU",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was executing in the Branch unit."
+ },
+ {
+ "EventCode": "0x4D01A",
+ "EventName": "PM_CMPL_STALL_HWSYNC",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a hwsync waiting for response from L2 before completing."
+ },
+ {
+ "EventCode": "0x4D01C",
+ "EventName": "PM_EXEC_STALL_TLBIEL",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIEL instruction executing in the Load Store Unit. TLBIEL instructions have lower overhead than TLBIE instructions because they don't get set to the nest."
+ },
+ {
+ "EventCode": "0x4E012",
+ "EventName": "PM_EXEC_STALL_UNKNOWN",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline completed without an ntf_type pulse. The ntf_pulse was missed by the ISU because the NTF finishes and completions came too close together."
+ },
+ {
+ "EventCode": "0x4D020",
+ "EventName": "PM_VSU3_ISSUE",
+ "BriefDescription": "VSU instruction was issued to VSU pipe 3."
+ },
+ {
+ "EventCode": "0x40132",
+ "EventName": "PM_MRK_LSU_FIN",
+ "BriefDescription": "LSU marked instruction finish."
+ },
+ {
+ "EventCode": "0x45058",
+ "EventName": "PM_IC_MISS_CMPL",
+ "BriefDescription": "Non-speculative icache miss, counted at completion."
+ },
+ {
+ "EventCode": "0x4D050",
+ "EventName": "PM_VSU_NON_FLOP_CMPL",
+ "BriefDescription": "Non-floating point VSU instructions completed."
+ },
+ {
+ "EventCode": "0x4D052",
+ "EventName": "PM_2FLOP_CMPL",
+ "BriefDescription": "Double Precision vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg completed."
+ },
+ {
+ "EventCode": "0x400F2",
+ "EventName": "PM_1PLUS_PPC_DISP",
+ "BriefDescription": "Cycles at least one Instr Dispatched."
+ },
+ {
+ "EventCode": "0x400F8",
+ "EventName": "PM_FLUSH",
+ "BriefDescription": "Flush (any type)."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/pmc.json b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
new file mode 100644
index 000000000000..b5d1bd39cfb2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/pmc.json
@@ -0,0 +1,22 @@
+[
+ {
+ "EventCode": "0x301E8",
+ "EventName": "PM_THRESH_EXC_64",
+ "BriefDescription": "Threshold counter exceeded a value of 64."
+ },
+ {
+ "EventCode": "0x45050",
+ "EventName": "PM_1FLOP_CMPL",
+ "BriefDescription": "One floating point instruction completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+ },
+ {
+ "EventCode": "0x45052",
+ "EventName": "PM_4FLOP_CMPL",
+ "BriefDescription": "Four floating point instructions completed (fadd, fmul, fsub, fcmp, fsel, fabs, fnabs, fres, fsqrte, fneg)."
+ },
+ {
+ "EventCode": "0x4D054",
+ "EventName": "PM_8FLOP_CMPL",
+ "BriefDescription": "Four Double Precision vector instructions completed."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power10/translation.json b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
new file mode 100644
index 000000000000..db3766dca07c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power10/translation.json
@@ -0,0 +1,57 @@
+[
+ {
+ "EventCode": "0x1F15E",
+ "EventName": "PM_MRK_START_PROBE_NOP_CMPL",
+ "BriefDescription": "Marked Start probe nop (AND R0,R0,R0) completed."
+ },
+ {
+ "EventCode": "0x20016",
+ "EventName": "PM_ST_FIN",
+ "BriefDescription": "Store finish count. Includes speculative activity."
+ },
+ {
+ "EventCode": "0x20018",
+ "EventName": "PM_ST_FWD",
+ "BriefDescription": "Store forwards that finished."
+ },
+ {
+ "EventCode": "0x2011C",
+ "EventName": "PM_MRK_NTF_CYC",
+ "BriefDescription": "Cycles during which the marked instruction is the oldest in the pipeline (NTF or NTC)."
+ },
+ {
+ "EventCode": "0x2E01C",
+ "EventName": "PM_EXEC_STALL_TLBIE",
+ "BriefDescription": "Cycles in which the oldest instruction in the pipeline was a TLBIE instruction executing in the Load Store Unit."
+ },
+ {
+ "EventCode": "0x201E6",
+ "EventName": "PM_THRESH_EXC_32",
+ "BriefDescription": "Threshold counter exceeded a value of 32."
+ },
+ {
+ "EventCode": "0x200F0",
+ "EventName": "PM_ST_CMPL",
+ "BriefDescription": "Stores completed from S2Q (2nd-level store queue). This event includes regular stores, stcx and cache inhibited stores. The following operations are excluded (pteupdate, snoop tlbie complete, store atomics, miso, load atomic payloads, tlbie, tlbsync, slbieg, isync, msgsnd, slbiag, cpabort, copy, tcheck, tend, stsync, dcbst, icbi, dcbf, hwsync, lwsync, ptesync, eieio, msgsync)."
+ },
+ {
+ "EventCode": "0x200FE",
+ "EventName": "PM_DATA_FROM_L2MISS",
+ "BriefDescription": "The processor's data cache was reloaded from a source other than the local core's L1 or L2 due to a demand miss."
+ },
+ {
+ "EventCode": "0x30010",
+ "EventName": "PM_PMC2_OVERFLOW",
+ "BriefDescription": "The event selected for PMC2 caused the event counter to overflow."
+ },
+ {
+ "EventCode": "0x4D010",
+ "EventName": "PM_PMC1_SAVED",
+ "BriefDescription": "The conditions for the speculative event selected for PMC1 are met and PMC1 is charged."
+ },
+ {
+ "EventCode": "0x4D05C",
+ "EventName": "PM_DPP_FLOP_CMPL",
+ "BriefDescription": "Double-Precision or Quad-Precision instructions completed."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/cache.json b/tools/perf/pmu-events/arch/powerpc/power8/cache.json
index 6b792b2c87e2..05a17084d939 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/cache.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/cache.json
@@ -32,8 +32,8 @@
{
"EventCode": "0x1c04e",
"EventName": "PM_DATA_FROM_L2MISS_MOD",
- "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to a demand load",
- "PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
{
"EventCode": "0x3c040",
@@ -74,8 +74,8 @@
{
"EventCode": "0x4c04e",
"EventName": "PM_DATA_FROM_L3MISS_MOD",
- "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to a demand load",
- "PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to a demand load",
+ "PublicDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
{
"EventCode": "0x3c042",
@@ -134,7 +134,7 @@
{
"EventCode": "0x4e04e",
"EventName": "PM_DPTEG_FROM_L3MISS",
- "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L3 due to a data side request",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L3 due to a data side request",
"PublicDescription": ""
},
{
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/frontend.json b/tools/perf/pmu-events/arch/powerpc/power8/frontend.json
index 1ddc30655d43..1c902a8263b6 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/frontend.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/frontend.json
@@ -116,8 +116,8 @@
{
"EventCode": "0x1404e",
"EventName": "PM_INST_FROM_L2MISS",
- "BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to an instruction fetch (not prefetch)",
- "PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ "BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L2 due to an instruction fetch (not prefetch)",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
{
"EventCode": "0x34040",
@@ -158,8 +158,8 @@
{
"EventCode": "0x4404e",
"EventName": "PM_INST_FROM_L3MISS_MOD",
- "BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to a instruction fetch",
- "PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ "BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L3 due to a instruction fetch",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
{
"EventCode": "0x34042",
@@ -320,7 +320,7 @@
{
"EventCode": "0x1504e",
"EventName": "PM_IPTEG_FROM_L2MISS",
- "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L2 due to a instruction side request",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L2 due to a instruction side request",
"PublicDescription": ""
},
{
@@ -344,7 +344,7 @@
{
"EventCode": "0x4504e",
"EventName": "PM_IPTEG_FROM_L3MISS",
- "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L3 due to a instruction side request",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L3 due to a instruction side request",
"PublicDescription": ""
},
{
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/marked.json b/tools/perf/pmu-events/arch/powerpc/power8/marked.json
index 94dc58b83b7e..6de61a797bbd 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/marked.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/marked.json
@@ -92,7 +92,7 @@
{
"EventCode": "0x4c12e",
"EventName": "PM_MRK_DATA_FROM_L2MISS_CYC",
- "BriefDescription": "Duration in cycles to reload from a localtion other than the local core's L2 due to a marked load",
+ "BriefDescription": "Duration in cycles to reload from a location other than the local core's L2 due to a marked load",
"PublicDescription": ""
},
{
@@ -158,13 +158,13 @@
{
"EventCode": "0x201e4",
"EventName": "PM_MRK_DATA_FROM_L3MISS",
- "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to a marked load",
+ "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to a marked load",
"PublicDescription": ""
},
{
"EventCode": "0x2d12e",
"EventName": "PM_MRK_DATA_FROM_L3MISS_CYC",
- "BriefDescription": "Duration in cycles to reload from a localtion other than the local core's L3 due to a marked load",
+ "BriefDescription": "Duration in cycles to reload from a location other than the local core's L3 due to a marked load",
"PublicDescription": ""
},
{
@@ -392,7 +392,7 @@
{
"EventCode": "0x1f14e",
"EventName": "PM_MRK_DPTEG_FROM_L2MISS",
- "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L2 due to a marked data side request",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L2 due to a marked data side request",
"PublicDescription": ""
},
{
@@ -416,7 +416,7 @@
{
"EventCode": "0x4f14e",
"EventName": "PM_MRK_DPTEG_FROM_L3MISS",
- "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L3 due to a marked data side request",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L3 due to a marked data side request",
"PublicDescription": ""
},
{
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
index bffb2d4a6420..4e25525b7da6 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/metrics.json
@@ -169,7 +169,7 @@
},
{
"BriefDescription": "Cycles GCT empty where dispatch was held",
- "MetricExpr": "(PM_GCT_NOSLOT_DISP_HELD_MAP + PM_GCT_NOSLOT_DISP_HELD_SRQ + PM_GCT_NOSLOT_DISP_HELD_ISSQ + PM_GCT_NOSLOT_DISP_HELD_OTHER) / PM_RUN_INST_CMPL)",
+ "MetricExpr": "(PM_GCT_NOSLOT_DISP_HELD_MAP + PM_GCT_NOSLOT_DISP_HELD_SRQ + PM_GCT_NOSLOT_DISP_HELD_ISSQ + PM_GCT_NOSLOT_DISP_HELD_OTHER) / PM_RUN_INST_CMPL",
"MetricGroup": "cpi_breakdown",
"MetricName": "gct_empty_disp_held_cpi"
},
@@ -885,37 +885,37 @@
"MetricName": "flush_rate_percent"
},
{
- "BriefDescription": "GCT slot utilization (11 to 14) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (11 to 14) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_11_14_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_11to14_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization (15 to 17) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (15 to 17) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_15_17_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_15to17_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization 18+ as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization 18+ as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_18_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_18plus_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization (1 to 2) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (1 to 2) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_1_2_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_1to2_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization (3 to 6) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (3 to 6) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_3_6_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_3to6_slots_percent"
},
{
- "BriefDescription": "GCT slot utilization (7 to 10) as a % of cycles this thread had atleast 1 slot valid",
+ "BriefDescription": "GCT slot utilization (7 to 10) as a % of cycles this thread had at least 1 slot valid",
"MetricExpr": "PM_GCT_UTIL_7_10_ENTRIES / ( PM_RUN_CYC - PM_GCT_NOSLOT_CYC) * 100",
"MetricGroup": "general",
"MetricName": "gct_util_7to10_slots_percent"
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
index f4e760cab111..f1f2965f6775 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -410,8 +410,8 @@
{
"EventCode": "0x61c04e",
"EventName": "PM_DATA_ALL_FROM_L2MISS_MOD",
- "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to either demand loads or data prefetch",
- "PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from a location other than the local core's L2 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
{
"EventCode": "0x63c040",
@@ -470,8 +470,8 @@
{
"EventCode": "0x64c04e",
"EventName": "PM_DATA_ALL_FROM_L3MISS_MOD",
- "BriefDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to either demand loads or data prefetch",
- "PublicDescription": "The processor's data cache was reloaded from a localtion other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
+ "BriefDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to either demand loads or data prefetch",
+ "PublicDescription": "The processor's data cache was reloaded from a location other than the local core's L3 due to either only demand loads or demand loads plus prefetches if MMCR1[16] is 1"
},
{
"EventCode": "0x63c042",
@@ -1046,7 +1046,7 @@
{
"EventCode": "0x4e010",
"EventName": "PM_GCT_NOSLOT_IC_L3MISS",
- "BriefDescription": "Gct empty for this thread due to icach l3 miss",
+ "BriefDescription": "Gct empty for this thread due to icache l3 miss",
"PublicDescription": ""
},
{
@@ -1280,8 +1280,8 @@
{
"EventCode": "0x51404e",
"EventName": "PM_INST_ALL_FROM_L2MISS",
- "BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to instruction fetches and prefetches",
- "PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ "BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L2 due to instruction fetches and prefetches",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L2 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
{
"EventCode": "0x534040",
@@ -1340,8 +1340,8 @@
{
"EventCode": "0x54404e",
"EventName": "PM_INST_ALL_FROM_L3MISS_MOD",
- "BriefDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to a instruction fetch",
- "PublicDescription": "The processor's Instruction cache was reloaded from a localtion other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
+ "BriefDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L3 due to a instruction fetch",
+ "PublicDescription": "The processor's Instruction cache was reloaded from a location other than the local core's L3 due to either an instruction fetch or instruction fetch plus prefetch if MMCR1[17] is 1"
},
{
"EventCode": "0x534042",
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/translation.json b/tools/perf/pmu-events/arch/powerpc/power8/translation.json
index 623e7475b010..a1657f5fdc6b 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/translation.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/translation.json
@@ -44,7 +44,7 @@
{
"EventCode": "0x1e04e",
"EventName": "PM_DPTEG_FROM_L2MISS",
- "BriefDescription": "A Page Table Entry was loaded into the TLB from a localtion other than the local core's L2 due to a data side request",
+ "BriefDescription": "A Page Table Entry was loaded into the TLB from a location other than the local core's L2 due to a data side request",
"PublicDescription": ""
},
{
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
index 811c2a8c1c9e..db86ba36224d 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/metrics.json
@@ -60,7 +60,7 @@
},
{
"BriefDescription": "Stalls due to short latency decimal floating ops.",
- "MetricExpr": "(PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_DFLONG)/PM_RUN_INST_CMPL",
+ "MetricExpr": "dfu_stall_cpi - dflong_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "dfu_other_stall_cpi"
},
@@ -72,7 +72,7 @@
},
{
"BriefDescription": "Completion stall by Dcache miss which resolved off node memory/cache",
- "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM - PM_CMPLU_STALL_DMISS_REMOTE)/PM_RUN_INST_CMPL",
+ "MetricExpr": "dmiss_non_local_stall_cpi - dmiss_remote_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "dmiss_distant_stall_cpi"
},
@@ -90,7 +90,7 @@
},
{
"BriefDescription": "Completion stall due to cache miss that resolves in the L2 or L3 without conflict",
- "MetricExpr": "(PM_CMPLU_STALL_DMISS_L2L3 - PM_CMPLU_STALL_DMISS_L2L3_CONFLICT)/PM_RUN_INST_CMPL",
+ "MetricExpr": "dmiss_l2l3_stall_cpi - dmiss_l2l3_conflict_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "dmiss_l2l3_noconflict_stall_cpi"
},
@@ -114,7 +114,7 @@
},
{
"BriefDescription": "Completion stall by Dcache miss which resolved outside of local memory",
- "MetricExpr": "(PM_CMPLU_STALL_DMISS_L3MISS - PM_CMPLU_STALL_DMISS_L21_L31 - PM_CMPLU_STALL_DMISS_LMEM)/PM_RUN_INST_CMPL",
+ "MetricExpr": "dmiss_l3miss_stall_cpi - dmiss_l21_l31_stall_cpi - dmiss_lmem_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "dmiss_non_local_stall_cpi"
},
@@ -126,7 +126,7 @@
},
{
"BriefDescription": "Stalls due to short latency double precision ops.",
- "MetricExpr": "(PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DPLONG)/PM_RUN_INST_CMPL",
+ "MetricExpr": "dp_stall_cpi - dplong_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "dp_other_stall_cpi"
},
@@ -155,7 +155,7 @@
"MetricName": "emq_full_stall_cpi"
},
{
- "MetricExpr": "(PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL)/PM_RUN_INST_CMPL",
+ "MetricExpr": "erat_miss_stall_cpi + emq_full_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "emq_stall_cpi"
},
@@ -173,7 +173,7 @@
},
{
"BriefDescription": "Completion stall due to execution units for other reasons.",
- "MetricExpr": "(PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_DP - PM_CMPLU_STALL_DFU - PM_CMPLU_STALL_PM - PM_CMPLU_STALL_CRYPTO - PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricExpr": "exec_unit_stall_cpi - scalar_stall_cpi - vector_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "exec_unit_other_stall_cpi"
},
@@ -197,7 +197,7 @@
},
{
"BriefDescription": "Stalls due to short latency integer ops",
- "MetricExpr": "(PM_CMPLU_STALL_FXU - PM_CMPLU_STALL_FXLONG)/PM_RUN_INST_CMPL",
+ "MetricExpr": "fxu_stall_cpi - fxlong_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "fxu_other_stall_cpi"
},
@@ -208,7 +208,85 @@
"MetricName": "fxu_stall_cpi"
},
{
- "MetricExpr": "(PM_NTC_ISSUE_HELD_DARQ_FULL + PM_NTC_ISSUE_HELD_ARB + PM_NTC_ISSUE_HELD_OTHER)/PM_RUN_INST_CMPL",
+ "BriefDescription": "Instruction Completion Table empty for this thread due to branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_br_mpred_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to Icache Miss and branch mispred",
+ "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED_ICMISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_br_mpred_icmiss_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table other stalls",
+ "MetricExpr": "nothing_dispatched_cpi - ict_noslot_ic_miss_cpi - ict_noslot_br_mpred_icmiss_cpi - ict_noslot_br_mpred_cpi - ict_noslot_disp_held_cpi",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_cyc_other_cpi"
+ },
+ {
+ "BriefDescription": "Cycles in which the NTC instruciton is held at dispatch for any reason",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_hb_full_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to dispatch hold on this thread due to Issue q full, BRQ full, XVCF Full, Count cache, Link, Tar full",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_ISSQ/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_issq_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_DISP_HELD_OTHER_CPI",
+ "MetricExpr": "ict_noslot_disp_held_cpi - ict_noslot_disp_held_hb_full_cpi - ict_noslot_disp_held_sync_cpi - ict_noslot_disp_held_tbegin_cpi - ict_noslot_disp_held_issq_cpi",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_other_cpi"
+ },
+ {
+ "BriefDescription": "Dispatch held due to a synchronizing instruction at dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_SYNC/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_sync_cpi"
+ },
+ {
+ "BriefDescription": "the NTC instruction is being held at dispatch because it is a tbegin instruction and there is an older tbegin in the pipeline that must complete before the younger tbegin can dispatch",
+ "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_TBEGIN/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_disp_held_tbegin_cpi"
+ },
+ {
+ "BriefDescription": "ICT_NOSLOT_IC_L2_CPI",
+ "MetricExpr": "ict_noslot_ic_miss_cpi - ict_noslot_ic_l3_cpi - ict_noslot_ic_l3miss_cpi",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_ic_l2_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to icache misses that were sourced from the local L3",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_ic_l3_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to icache misses that were sourced from beyond the local L3. The source could be local/remote/distant memory or another core's cache",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_L3MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_ic_l3miss_cpi"
+ },
+ {
+ "BriefDescription": "Instruction Completion Table empty for this thread due to Icache Miss",
+ "MetricExpr": "PM_ICT_NOSLOT_IC_MISS/PM_RUN_INST_CMPL",
+ "MetricGroup": "cpi_breakdown",
+ "MetricName": "ict_noslot_ic_miss_cpi"
+ },
+ {
+ "MetricExpr": "ntc_issue_held_darq_full_cpi + ntc_issue_held_arb_cpi + ntc_issue_held_other_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "issue_hold_cpi"
},
@@ -249,7 +327,7 @@
"MetricName": "lrq_other_stall_cpi"
},
{
- "MetricExpr": "(PM_CMPLU_STALL_LMQ_FULL + PM_CMPLU_STALL_ST_FWD + PM_CMPLU_STALL_LHS + PM_CMPLU_STALL_LSU_MFSPR + PM_CMPLU_STALL_LARX + PM_CMPLU_STALL_LRQ_OTHER)/PM_RUN_INST_CMPL",
+ "MetricExpr": "lmq_full_stall_cpi + st_fwd_stall_cpi + lhs_stall_cpi + lsu_mfspr_stall_cpi + larx_stall_cpi + lrq_other_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "lrq_stall_cpi"
},
@@ -260,7 +338,7 @@
"MetricName": "lsaq_arb_stall_cpi"
},
{
- "MetricExpr": "(PM_CMPLU_STALL_LRQ_FULL + PM_CMPLU_STALL_SRQ_FULL + PM_CMPLU_STALL_LSAQ_ARB)/PM_RUN_INST_CMPL",
+ "MetricExpr": "lrq_full_stall_cpi + srq_full_stall_cpi + lsaq_arb_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "lsaq_stall_cpi"
},
@@ -284,7 +362,7 @@
},
{
"BriefDescription": "Completion LSU stall for other reasons",
- "MetricExpr": "(PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_LSU_FIN - PM_CMPLU_STALL_STORE_FINISH - PM_CMPLU_STALL_STORE_DATA - PM_CMPLU_STALL_EIEIO - PM_CMPLU_STALL_STCX - PM_CMPLU_STALL_SLB - PM_CMPLU_STALL_TEND - PM_CMPLU_STALL_PASTE - PM_CMPLU_STALL_TLBIE - PM_CMPLU_STALL_STORE_PIPE_ARB - PM_CMPLU_STALL_STORE_FIN_ARB - PM_CMPLU_STALL_LOAD_FINISH + PM_CMPLU_STALL_DCACHE_MISS - PM_CMPLU_STALL_LMQ_FULL - PM_CMPLU_STALL_ST_FWD - PM_CMPLU_STALL_LHS - PM_CMPLU_STALL_LSU_MFSPR - PM_CMPLU_STALL_LARX - PM_CMPLU_STALL_LRQ_OTHER + PM_CMPLU_STALL_ERAT_MISS + PM_CMPLU_STALL_EMQ_FULL - PM_CMPLU_STALL_LRQ_FULL - PM_CMPLU_STALL_SRQ_FULL - PM_CMPLU_STALL_LSAQ_ARB) / PM_RUN_INST_CMPL",
+ "MetricExpr": "lsu_stall_cpi - lsu_fin_stall_cpi - store_finish_stall_cpi - srq_stall_cpi - load_finish_stall_cpi + lsu_stall_dcache_miss_cpi - lrq_stall_cpi + emq_stall_cpi - lsaq_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "lsu_other_stall_cpi"
},
@@ -313,7 +391,7 @@
"MetricName": "nested_tend_stall_cpi"
},
{
- "BriefDescription": "Number of cycles the ICT has no itags assigned to this thread",
+ "BriefDescription": "Number of cycles the Instruction Completion Table has no itags assigned to this thread",
"MetricExpr": "PM_ICT_NOSLOT_CYC/PM_RUN_INST_CMPL",
"MetricGroup": "cpi_breakdown",
"MetricName": "nothing_dispatched_cpi"
@@ -356,13 +434,13 @@
},
{
"BriefDescription": "Cycles unaccounted for.",
- "MetricExpr": "(PM_RUN_CYC - PM_1PLUS_PPC_CMPL - PM_CMPLU_STALL_THRD - PM_CMPLU_STALL - PM_ICT_NOSLOT_CYC)/PM_RUN_INST_CMPL",
+ "MetricExpr": "run_cpi - completion_cpi - thread_block_stall_cpi - stall_cpi - nothing_dispatched_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "other_cpi"
},
{
"BriefDescription": "Completion stall for other reasons",
- "MetricExpr": "PM_CMPLU_STALL - PM_CMPLU_STALL_NTC_DISP_FIN - PM_CMPLU_STALL_NTC_FLUSH - PM_CMPLU_STALL_LSU - PM_CMPLU_STALL_EXEC_UNIT - PM_CMPLU_STALL_BRU)/PM_RUN_INST_CMPL",
+ "MetricExpr": "stall_cpi - ntc_disp_fin_stall_cpi - ntc_flush_stall_cpi - lsu_stall_cpi - exec_unit_stall_cpi - bru_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "other_stall_cpi"
},
@@ -391,7 +469,7 @@
"MetricName": "run_cyc_cpi"
},
{
- "MetricExpr": "(PM_CMPLU_STALL_FXU + PM_CMPLU_STALL_DP + PM_CMPLU_STALL_DFU + PM_CMPLU_STALL_PM + PM_CMPLU_STALL_CRYPTO)/PM_RUN_INST_CMPL",
+ "MetricExpr": "fxu_stall_cpi + dp_stall_cpi + dfu_stall_cpi + pm_stall_cpi + crypto_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "scalar_stall_cpi"
},
@@ -414,7 +492,7 @@
"MetricName": "srq_full_stall_cpi"
},
{
- "MetricExpr": "(PM_CMPLU_STALL_STORE_DATA + PM_CMPLU_STALL_EIEIO + PM_CMPLU_STALL_STCX + PM_CMPLU_STALL_SLB + PM_CMPLU_STALL_TEND + PM_CMPLU_STALL_PASTE + PM_CMPLU_STALL_TLBIE + PM_CMPLU_STALL_STORE_PIPE_ARB + PM_CMPLU_STALL_STORE_FIN_ARB)/PM_RUN_INST_CMPL",
+ "MetricExpr": "store_data_stall_cpi + eieio_stall_cpi + stcx_stall_cpi + slb_stall_cpi + tend_stall_cpi + paste_stall_cpi + tlbie_stall_cpi + store_pipe_arb_stall_cpi + store_fin_arb_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "srq_stall_cpi"
},
@@ -425,7 +503,7 @@
"MetricName": "st_fwd_stall_cpi"
},
{
- "BriefDescription": "Nothing completed and ICT not empty",
+ "BriefDescription": "Nothing completed and Instruction Completion Table not empty",
"MetricExpr": "PM_CMPLU_STALL/PM_RUN_INST_CMPL",
"MetricGroup": "cpi_breakdown",
"MetricName": "stall_cpi"
@@ -480,7 +558,7 @@
},
{
"BriefDescription": "Vector stalls due to small latency double precision ops",
- "MetricExpr": "(PM_CMPLU_STALL_VDP - PM_CMPLU_STALL_VDPLONG)/PM_RUN_INST_CMPL",
+ "MetricExpr": "vdp_stall_cpi - vdplong_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "vdp_other_stall_cpi"
},
@@ -497,7 +575,7 @@
"MetricName": "vdplong_stall_cpi"
},
{
- "MetricExpr": "(PM_CMPLU_STALL_VFXU + PM_CMPLU_STALL_VDP)/PM_RUN_INST_CMPL",
+ "MetricExpr": "vfxu_stall_cpi + vdp_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "vector_stall_cpi"
},
@@ -509,7 +587,7 @@
},
{
"BriefDescription": "Vector stalls due to small latency integer ops",
- "MetricExpr": "(PM_CMPLU_STALL_VFXU - PM_CMPLU_STALL_VFXLONG)/PM_RUN_INST_CMPL",
+ "MetricExpr": "vfxu_stall_cpi - vfxlong_stall_cpi",
"MetricGroup": "cpi_breakdown",
"MetricName": "vfxu_other_stall_cpi"
},
@@ -1132,156 +1210,24 @@
"MetricName": "inst_from_rmem_percent"
},
{
- "BriefDescription": "%L2 Modified CO Cache read Utilization (4 pclks per disp attempt)",
- "MetricExpr": "((PM_L2_CASTOUT_MOD/2)*4)/ PM_RUN_CYC * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_co_m_rd_util"
- },
- {
- "BriefDescription": "L2 dcache invalidates per run inst (per core)",
- "MetricExpr": "(PM_L2_DC_INV / 2) / PM_RUN_INST_CMPL * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_dc_inv_rate_percent"
- },
- {
"BriefDescription": "Demand load misses as a % of L2 LD dispatches (per thread)",
"MetricExpr": "PM_L1_DCACHE_RELOAD_VALID / (PM_L2_LD / 2) * 100",
"MetricGroup": "l2_stats",
"MetricName": "l2_dem_ld_disp_percent"
},
{
- "BriefDescription": "L2 Icache invalidates per run inst (per core)",
- "MetricExpr": "(PM_L2_IC_INV / 2) / PM_RUN_INST_CMPL * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ic_inv_rate_percent"
- },
- {
- "BriefDescription": "L2 Inst misses as a % of total L2 Inst dispatches (per thread)",
- "MetricExpr": "PM_L2_INST_MISS / PM_L2_INST * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_inst_miss_ratio_percent"
- },
- {
- "BriefDescription": "Average number of cycles between L2 Load hits",
- "MetricExpr": "(PM_L2_LD_HIT / PM_RUN_CYC) / 2",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ld_hit_frequency"
- },
- {
- "BriefDescription": "Average number of cycles between L2 Load misses",
- "MetricExpr": "(PM_L2_LD_MISS / PM_RUN_CYC) / 2",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ld_miss_frequency"
- },
- {
- "BriefDescription": "L2 Load misses as a % of total L2 Load dispatches (per thread)",
- "MetricExpr": "PM_L2_LD_MISS / PM_L2_LD * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ld_miss_ratio_percent"
- },
- {
- "BriefDescription": "% L2 load disp attempts Cache read Utilization (4 pclks per disp attempt)",
- "MetricExpr": "((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ld_rd_util"
- },
- {
- "BriefDescription": "L2 load misses that require a cache write (4 pclks per disp attempt) % of pclks",
- "MetricExpr": "((( PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4)/ PM_RUN_CYC * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_ldmiss_wr_util"
- },
- {
- "BriefDescription": "L2 local pump prediction success",
- "MetricExpr": "PM_L2_LOC_GUESS_CORRECT / (PM_L2_LOC_GUESS_CORRECT + PM_L2_LOC_GUESS_WRONG) * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_local_pred_correct_percent"
- },
- {
- "BriefDescription": "L2 COs that were in M,Me,Mu state as a % of all L2 COs",
- "MetricExpr": "PM_L2_CASTOUT_MOD / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_mod_co_percent"
- },
- {
- "BriefDescription": "% of L2 Load RC dispatch atampts that failed because of address collisions and cclass conflicts",
- "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR )/ PM_L2_RCLD_DISP * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rc_ld_disp_addr_fail_percent"
- },
- {
- "BriefDescription": "% of L2 Load RC dispatch attempts that failed",
- "MetricExpr": "(PM_L2_RCLD_DISP_FAIL_ADDR + PM_L2_RCLD_DISP_FAIL_OTHER)/ PM_L2_RCLD_DISP * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rc_ld_disp_fail_percent"
- },
- {
- "BriefDescription": "% of L2 Store RC dispatch atampts that failed because of address collisions and cclass conflicts",
- "MetricExpr": "PM_L2_RCST_DISP_FAIL_ADDR / PM_L2_RCST_DISP * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rc_st_disp_addr_fail_percent"
- },
- {
- "BriefDescription": "% of L2 Store RC dispatch attempts that failed",
- "MetricExpr": "(PM_L2_RCST_DISP_FAIL_ADDR + PM_L2_RCST_DISP_FAIL_OTHER)/ PM_L2_RCST_DISP * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rc_st_disp_fail_percent"
- },
- {
- "BriefDescription": "L2 Cache Read Utilization (per core)",
- "MetricExpr": "(((PM_L2_RCLD_DISP/2)*4)/ PM_RUN_CYC * 100) + (((PM_L2_RCST_DISP/2)*4)/PM_RUN_CYC * 100) + (((PM_L2_CASTOUT_MOD/2)*4)/PM_RUN_CYC * 100)",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_rd_util_percent"
- },
- {
- "BriefDescription": "L2 COs that were in T,Te,Si,S state as a % of all L2 COs",
- "MetricExpr": "PM_L2_CASTOUT_SHR / (PM_L2_CASTOUT_MOD + PM_L2_CASTOUT_SHR) * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_shr_co_percent"
- },
- {
"BriefDescription": "L2 Store misses as a % of total L2 Store dispatches (per thread)",
"MetricExpr": "PM_L2_ST_MISS / PM_L2_ST * 100",
"MetricGroup": "l2_stats",
"MetricName": "l2_st_miss_ratio_percent"
},
{
- "BriefDescription": "% L2 store disp attempts Cache read Utilization (4 pclks per disp attempt)",
- "MetricExpr": "((PM_L2_RCST_DISP/2)*4) / PM_RUN_CYC * 100",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_st_rd_util"
- },
- {
"BriefDescription": "L2 stores that require a cache write (4 pclks per disp attempt) % of pclks",
"MetricExpr": "((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100",
"MetricGroup": "l2_stats",
"MetricName": "l2_st_wr_util"
},
{
- "BriefDescription": "L2 Cache Write Utilization (per core)",
- "MetricExpr": "((((PM_L2_LD_DISP - PM_L2_LD_HIT)/2)*4) / PM_RUN_CYC * 100) + (((PM_L2_ST_DISP/2)*4) / PM_RUN_CYC * 100)",
- "MetricGroup": "l2_stats",
- "MetricName": "l2_wr_util_percent"
- },
- {
- "BriefDescription": "Average number of cycles between L3 Load hits",
- "MetricExpr": "(PM_L3_LD_HIT / PM_RUN_CYC) / 2",
- "MetricGroup": "l3_stats",
- "MetricName": "l3_ld_hit_frequency"
- },
- {
- "BriefDescription": "Average number of cycles between L3 Load misses",
- "MetricExpr": "(PM_L3_LD_MISS / PM_RUN_CYC) / 2",
- "MetricGroup": "l3_stats",
- "MetricName": "l3_ld_miss_frequency"
- },
- {
- "BriefDescription": "Average number of Write-in machines used. 1 of 8 WI machines is sampled every L3 cycle",
- "MetricExpr": "(PM_L3_WI_USAGE / PM_RUN_CYC) * 8",
- "MetricGroup": "l3_stats",
- "MetricName": "l3_wi_usage"
- },
- {
"BriefDescription": "Average icache miss latency",
"MetricExpr": "PM_IC_DEMAND_CYC / PM_IC_DEMAND_REQ",
"MetricGroup": "latency",
@@ -1745,7 +1691,7 @@
"MetricName": "custom_secs"
},
{
- "BriefDescription": "Percentage Cycles atleast one instruction dispatched",
+ "BriefDescription": "Percentage Cycles at least one instruction dispatched",
"MetricExpr": "PM_1PLUS_PPC_DISP / PM_CYC * 100",
"MetricName": "cycles_atleast_one_inst_dispatched_percent"
},
@@ -1766,7 +1712,7 @@
},
{
"BriefDescription": "% of DL1 reloads from Private L3, other core per Inst",
- "MetricExpr": "(PM_DATA_FROM_L31_MOD + PM_DATA_FROM_L31_SHR) * 100 / PM_RUN_INST_CMPL",
+ "MetricExpr": "dl1_reload_from_l31_mod_rate_percent + dl1_reload_from_l31_shr_rate_percent",
"MetricName": "dl1_reload_from_l31_rate_percent"
},
{
@@ -1820,71 +1766,6 @@
"MetricName": "fxu_all_idle"
},
{
- "BriefDescription": "Ict empty for this thread due to branch mispred",
- "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_br_mpred_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to Icache Miss and branch mispred",
- "MetricExpr": "PM_ICT_NOSLOT_BR_MPRED_ICMISS/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_br_mpred_icmiss_cpi"
- },
- {
- "BriefDescription": "ICT other stalls",
- "MetricExpr": "(PM_ICT_NOSLOT_CYC - PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_BR_MPRED_ICMISS - PM_ICT_NOSLOT_BR_MPRED - PM_ICT_NOSLOT_DISP_HELD)/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_cyc_other_cpi"
- },
- {
- "BriefDescription": "Cycles in which the NTC instruciton is held at dispatch for any reason",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to dispatch holds because the History Buffer was full. Could be GPR/VSR/VMR/FPR/CR/XVF",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_HB_FULL/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_hb_full_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to dispatch hold on this thread due to Issue q full, BRQ full, XVCF Full, Count cache, Link, Tar full",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_ISSQ/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_issq_cpi"
- },
- {
- "BriefDescription": "ICT_NOSLOT_DISP_HELD_OTHER_CPI",
- "MetricExpr": "(PM_ICT_NOSLOT_DISP_HELD - PM_ICT_NOSLOT_DISP_HELD_HB_FULL - PM_ICT_NOSLOT_DISP_HELD_SYNC - PM_ICT_NOSLOT_DISP_HELD_TBEGIN - PM_ICT_NOSLOT_DISP_HELD_ISSQ)/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_other_cpi"
- },
- {
- "BriefDescription": "Dispatch held due to a synchronizing instruction at dispatch",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_SYNC/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_sync_cpi"
- },
- {
- "BriefDescription": "the NTC instruction is being held at dispatch because it is a tbegin instruction and there is an older tbegin in the pipeline that must complete before the younger tbegin can dispatch",
- "MetricExpr": "PM_ICT_NOSLOT_DISP_HELD_TBEGIN/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_disp_held_tbegin_cpi"
- },
- {
- "BriefDescription": "ICT_NOSLOT_IC_L2_CPI",
- "MetricExpr": "(PM_ICT_NOSLOT_IC_MISS - PM_ICT_NOSLOT_IC_L3 - PM_ICT_NOSLOT_IC_L3MISS)/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_ic_l2_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from the local L3",
- "MetricExpr": "PM_ICT_NOSLOT_IC_L3/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_ic_l3_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to icache misses that were sourced from beyond the local L3. The source could be local/remote/distant memory or another core's cache",
- "MetricExpr": "PM_ICT_NOSLOT_IC_L3MISS/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_ic_l3miss_cpi"
- },
- {
- "BriefDescription": "Ict empty for this thread due to Icache Miss",
- "MetricExpr": "PM_ICT_NOSLOT_IC_MISS/PM_RUN_INST_CMPL",
- "MetricName": "ict_noslot_ic_miss_cpi"
- },
- {
"BriefDescription": "Rate of IERAT reloads from L2",
"MetricExpr": "PM_IPTEG_FROM_L2 * 100 / PM_RUN_INST_CMPL",
"MetricName": "ipteg_from_l2_rate_percent"
@@ -1966,7 +1847,7 @@
},
{
"BriefDescription": "Completion stall because a different thread was using the completion pipe",
- "MetricExpr": "(PM_CMPLU_STALL_THRD - PM_CMPLU_STALL_EXCEPTION - PM_CMPLU_STALL_ANY_SYNC - PM_CMPLU_STALL_SYNC_PMU_INT - PM_CMPLU_STALL_SPEC_FINISH - PM_CMPLU_STALL_FLUSH_ANY_THREAD - PM_CMPLU_STALL_LSU_FLUSH_NEXT - PM_CMPLU_STALL_NESTED_TBEGIN - PM_CMPLU_STALL_NESTED_TEND - PM_CMPLU_STALL_MTFPSCR)/PM_RUN_INST_CMPL",
+ "MetricExpr": "thread_block_stall_cpi - exception_stall_cpi - any_sync_stall_cpi - sync_pmu_int_stall_cpi - spec_finish_stall_cpi - flush_any_thread_stall_cpi - lsu_flush_next_stall_cpi - nested_tbegin_stall_cpi - nested_tend_stall_cpi - mtfpscr_stall_cpi",
"MetricName": "other_thread_cmpl_stall"
},
{
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/nest_metrics.json b/tools/perf/pmu-events/arch/powerpc/power9/nest_metrics.json
new file mode 100644
index 000000000000..7a5d1bf543f8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/powerpc/power9/nest_metrics.json
@@ -0,0 +1,63 @@
+[
+ {
+ "MetricExpr": "(hv_24x7@PM_MCS01_128B_RD_DISP_PORT01\\,chip\\=?@ + hv_24x7@PM_MCS01_128B_RD_DISP_PORT23\\,chip\\=?@ + hv_24x7@PM_MCS23_128B_RD_DISP_PORT01\\,chip\\=?@ + hv_24x7@PM_MCS23_128B_RD_DISP_PORT23\\,chip\\=?@)",
+ "MetricName": "Memory_RD_BW_Chip",
+ "MetricGroup": "Memory_BW",
+ "ScaleUnit": "1.6e-2MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricExpr": "(hv_24x7@PM_MCS01_128B_WR_DISP_PORT01\\,chip\\=?@ + hv_24x7@PM_MCS01_128B_WR_DISP_PORT23\\,chip\\=?@ + hv_24x7@PM_MCS23_128B_WR_DISP_PORT01\\,chip\\=?@ + hv_24x7@PM_MCS23_128B_WR_DISP_PORT23\\,chip\\=?@ )",
+ "MetricName": "Memory_WR_BW_Chip",
+ "MetricGroup": "Memory_BW",
+ "ScaleUnit": "1.6e-2MB",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricExpr": "(hv_24x7@PM_PB_CYC\\,chip\\=?@ )",
+ "MetricName": "PowerBUS_Frequency",
+ "ScaleUnit": "2.5e-7GHz",
+ "AggregationMode": "PerChip"
+ },
+ {
+ "MetricExpr": "(hv_24x7@CPM_CS_32MHZ_CYC\\,domain\\=3\\,core\\=?@ )",
+ "MetricName": "CPM_CS_32MHZ_CYC",
+ "ScaleUnit": "1MHz",
+ "AggregationMode": "PerCore"
+ },
+ {
+ "MetricExpr" : "nest_mcs01_imc@PM_MCS01_128B_RD_DISP_PORT01@ + nest_mcs01_imc@PM_MCS01_128B_RD_DISP_PORT23@",
+ "MetricName" : "mcs01-read",
+ "MetricGroup" : "memory-bandwidth",
+ "ScaleUnit": "6.1e-5MB"
+ },
+ {
+ "MetricExpr" : "nest_mcs23_imc@PM_MCS23_128B_RD_DISP_PORT01@ + nest_mcs23_imc@PM_MCS23_128B_RD_DISP_PORT23@",
+ "MetricName" : "mcs23-read",
+ "MetricGroup" : "memory-bandwidth",
+ "ScaleUnit": "6.1e-5MB"
+ },
+ {
+ "MetricExpr" : "nest_mcs01_imc@PM_MCS01_128B_WR_DISP_PORT01@ + nest_mcs01_imc@PM_MCS01_128B_WR_DISP_PORT23@",
+ "MetricName" : "mcs01-write",
+ "MetricGroup" : "memory-bandwidth",
+ "ScaleUnit": "6.1e-5MB"
+ },
+ {
+ "MetricExpr" : "nest_mcs23_imc@PM_MCS23_128B_WR_DISP_PORT01@ + nest_mcs23_imc@PM_MCS23_128B_WR_DISP_PORT23@",
+ "MetricName" : "mcs23-write",
+ "MetricGroup" : "memory-bandwidth",
+ "ScaleUnit": "6.1e-5MB"
+ },
+ {
+ "MetricExpr" : "nest_powerbus0_imc@PM_PB_CYC@",
+ "MetricName" : "powerbus_freq",
+ "ScaleUnit": "1e-9GHz"
+ },
+ {
+ "MetricExpr" : "(nest_mcs01_imc@PM_MCS01_128B_RD_DISP_PORT01@ + nest_mcs01_imc@PM_MCS01_128B_RD_DISP_PORT23@ + nest_mcs23_imc@PM_MCS23_128B_RD_DISP_PORT01@ + nest_mcs23_imc@PM_MCS23_128B_RD_DISP_PORT23@ + nest_mcs01_imc@PM_MCS01_128B_WR_DISP_PORT01@ + nest_mcs01_imc@PM_MCS01_128B_WR_DISP_PORT23@ + nest_mcs23_imc@PM_MCS23_128B_WR_DISP_PORT01@ + nest_mcs23_imc@PM_MCS23_128B_WR_DISP_PORT23@)",
+ "MetricName" : "Memory-bandwidth-MCS",
+ "MetricGroup" : "memory-bandwidth",
+ "ScaleUnit": "6.1e-5MB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/other.json b/tools/perf/pmu-events/arch/powerpc/power9/other.json
index 3f69422c21f9..f10bd554521a 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/other.json
@@ -1417,7 +1417,7 @@
{
"EventCode": "0x45054",
"EventName": "PM_FMA_CMPL",
- "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only. "
+ "BriefDescription": "two flops operation completed (fmadd, fnmadd, fmsub, fnmsub) Scalar instructions only."
},
{
"EventCode": "0x201E8",
@@ -2017,7 +2017,7 @@
{
"EventCode": "0xC0BC",
"EventName": "PM_LSU_FLUSH_OTHER",
- "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the “bad dval” back and flush all younger ops)"
+ "BriefDescription": "Other LSU flushes including: Sync (sync ack from L2 caused search of LRQ for oldest snooped load, This will either signal a Precise Flush of the oldest snooped loa or a Flush Next PPC); Data Valid Flush Next (several cases of this, one example is store and reload are lined up such that a store-hit-reload scenario exists and the CDF has already launched and has gotten bad/stale data); Bad Data Valid Flush Next (might be a few cases of this, one example is a larxa (D$ hit) return data and dval but can't allocate to LMQ (LMQ full or other reason). Already gave dval but can't watch it for snoop_hit_larx. Need to take the 'bad dval' back and flush all younger ops)"
},
{
"EventCode": "0x5094",
diff --git a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
index d0265f255de2..723bffa41c44 100644
--- a/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
+++ b/tools/perf/pmu-events/arch/powerpc/power9/pipeline.json
@@ -442,7 +442,7 @@
{
"EventCode": "0x4D052",
"EventName": "PM_2FLOP_CMPL",
- "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg "
+ "BriefDescription": "DP vector version of fmul, fsub, fcmp, fsel, fabs, fnabs, fres ,fsqrte, fneg"
},
{
"EventCode": "0x1F142",
diff --git a/tools/perf/pmu-events/arch/riscv/mapfile.csv b/tools/perf/pmu-events/arch/riscv/mapfile.csv
new file mode 100644
index 000000000000..c61b3d6ef616
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/mapfile.csv
@@ -0,0 +1,17 @@
+# Format:
+# MVENDORID-MARCHID-MIMPID,Version,JSON/file/pathname,Type
+#
+# where
+# MVENDORID JEDEC code of the core provider
+# MARCHID base microarchitecture of the hart
+# MIMPID unique encoding of the version
+# of the processor implementation
+# Version could be used to track version of JSON file
+# but currently unused.
+# JSON/file/pathname is the path to JSON file, relative
+# to tools/perf/pmu-events/arch/riscv/.
+# Type is core, uncore etc
+#
+#
+#MVENDORID-MARCHID-MIMPID,Version,Filename,EventType
+0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core
diff --git a/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json b/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
new file mode 100644
index 000000000000..a9939823b14b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/riscv-sbi-firmware.json
@@ -0,0 +1,134 @@
+[
+ {
+ "PublicDescription": "Misaligned load trap",
+ "ConfigCode": "0x8000000000000000",
+ "EventName": "FW_MISALIGNED_LOAD",
+ "BriefDescription": "Misaligned load trap event"
+ },
+ {
+ "PublicDescription": "Misaligned store trap",
+ "ConfigCode": "0x8000000000000001",
+ "EventName": "FW_MISALIGNED_STORE",
+ "BriefDescription": "Misaligned store trap event"
+ },
+ {
+ "PublicDescription": "Load access trap",
+ "ConfigCode": "0x8000000000000002",
+ "EventName": "FW_ACCESS_LOAD",
+ "BriefDescription": "Load access trap event"
+ },
+ {
+ "PublicDescription": "Store access trap",
+ "ConfigCode": "0x8000000000000003",
+ "EventName": "FW_ACCESS_STORE",
+ "BriefDescription": "Store access trap event"
+ },
+ {
+ "PublicDescription": "Illegal instruction trap",
+ "ConfigCode": "0x8000000000000004",
+ "EventName": "FW_ILLEGAL_INSN",
+ "BriefDescription": "Illegal instruction trap event"
+ },
+ {
+ "PublicDescription": "Set timer event",
+ "ConfigCode": "0x8000000000000005",
+ "EventName": "FW_SET_TIMER",
+ "BriefDescription": "Set timer event"
+ },
+ {
+ "PublicDescription": "Sent IPI to other HART event",
+ "ConfigCode": "0x8000000000000006",
+ "EventName": "FW_IPI_SENT",
+ "BriefDescription": "Sent IPI to other HART event"
+ },
+ {
+ "PublicDescription": "Received IPI from other HART event",
+ "ConfigCode": "0x8000000000000007",
+ "EventName": "FW_IPI_RECEIVED",
+ "BriefDescription": "Received IPI from other HART event"
+ },
+ {
+ "PublicDescription": "Sent FENCE.I request to other HART event",
+ "ConfigCode": "0x8000000000000008",
+ "EventName": "FW_FENCE_I_SENT",
+ "BriefDescription": "Sent FENCE.I request to other HART event"
+ },
+ {
+ "PublicDescription": "Received FENCE.I request from other HART event",
+ "ConfigCode": "0x8000000000000009",
+ "EventName": "FW_FENCE_I_RECEIVED",
+ "BriefDescription": "Received FENCE.I request from other HART event"
+ },
+ {
+ "PublicDescription": "Sent SFENCE.VMA request to other HART event",
+ "ConfigCode": "0x800000000000000a",
+ "EventName": "FW_SFENCE_VMA_SENT",
+ "BriefDescription": "Sent SFENCE.VMA request to other HART event"
+ },
+ {
+ "PublicDescription": "Received SFENCE.VMA request from other HART event",
+ "ConfigCode": "0x800000000000000b",
+ "EventName": "FW_SFENCE_VMA_RECEIVED",
+ "BriefDescription": "Received SFENCE.VMA request from other HART event"
+ },
+ {
+ "PublicDescription": "Sent SFENCE.VMA with ASID request to other HART event",
+ "ConfigCode": "0x800000000000000c",
+ "EventName": "FW_SFENCE_VMA_RECEIVED",
+ "BriefDescription": "Sent SFENCE.VMA with ASID request to other HART event"
+ },
+ {
+ "PublicDescription": "Received SFENCE.VMA with ASID request from other HART event",
+ "ConfigCode": "0x800000000000000d",
+ "EventName": "FW_SFENCE_VMA_ASID_RECEIVED",
+ "BriefDescription": "Received SFENCE.VMA with ASID request from other HART event"
+ },
+ {
+ "PublicDescription": "Sent HFENCE.GVMA request to other HART event",
+ "ConfigCode": "0x800000000000000e",
+ "EventName": "FW_HFENCE_GVMA_SENT",
+ "BriefDescription": "Sent HFENCE.GVMA request to other HART event"
+ },
+ {
+ "PublicDescription": "Received HFENCE.GVMA request from other HART event",
+ "ConfigCode": "0x800000000000000f",
+ "EventName": "FW_HFENCE_GVMA_RECEIVED",
+ "BriefDescription": "Received HFENCE.GVMA request from other HART event"
+ },
+ {
+ "PublicDescription": "Sent HFENCE.GVMA with VMID request to other HART event",
+ "ConfigCode": "0x8000000000000010",
+ "EventName": "FW_HFENCE_GVMA_VMID_SENT",
+ "BriefDescription": "Sent HFENCE.GVMA with VMID request to other HART event"
+ },
+ {
+ "PublicDescription": "Received HFENCE.GVMA with VMID request from other HART event",
+ "ConfigCode": "0x8000000000000011",
+ "EventName": "FW_HFENCE_GVMA_VMID_RECEIVED",
+ "BriefDescription": "Received HFENCE.GVMA with VMID request from other HART event"
+ },
+ {
+ "PublicDescription": "Sent HFENCE.VVMA request to other HART event",
+ "ConfigCode": "0x8000000000000012",
+ "EventName": "FW_HFENCE_VVMA_SENT",
+ "BriefDescription": "Sent HFENCE.VVMA request to other HART event"
+ },
+ {
+ "PublicDescription": "Received HFENCE.VVMA request from other HART event",
+ "ConfigCode": "0x8000000000000013",
+ "EventName": "FW_HFENCE_VVMA_RECEIVED",
+ "BriefDescription": "Received HFENCE.VVMA request from other HART event"
+ },
+ {
+ "PublicDescription": "Sent HFENCE.VVMA with ASID request to other HART event",
+ "ConfigCode": "0x8000000000000014",
+ "EventName": "FW_HFENCE_VVMA_ASID_SENT",
+ "BriefDescription": "Sent HFENCE.VVMA with ASID request to other HART event"
+ },
+ {
+ "PublicDescription": "Received HFENCE.VVMA with ASID request from other HART event",
+ "ConfigCode": "0x8000000000000015",
+ "EventName": "FW_HFENCE_VVMA_ASID_RECEIVED",
+ "BriefDescription": "Received HFENCE.VVMA with ASID request from other HART event"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json b/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
new file mode 100644
index 000000000000..9b4a032186a7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/sifive/u74/firmware.json
@@ -0,0 +1,68 @@
+[
+ {
+ "ArchStdEvent": "FW_MISALIGNED_LOAD"
+ },
+ {
+ "ArchStdEvent": "FW_MISALIGNED_STORE"
+ },
+ {
+ "ArchStdEvent": "FW_ACCESS_LOAD"
+ },
+ {
+ "ArchStdEvent": "FW_ACCESS_STORE"
+ },
+ {
+ "ArchStdEvent": "FW_ILLEGAL_INSN"
+ },
+ {
+ "ArchStdEvent": "FW_SET_TIMER"
+ },
+ {
+ "ArchStdEvent": "FW_IPI_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_IPI_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_FENCE_I_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_FENCE_I_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT"
+ },
+ {
+ "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/riscv/sifive/u74/instructions.json b/tools/perf/pmu-events/arch/riscv/sifive/u74/instructions.json
new file mode 100644
index 000000000000..5eab718c9256
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/sifive/u74/instructions.json
@@ -0,0 +1,92 @@
+[
+ {
+ "EventName": "EXCEPTION_TAKEN",
+ "EventCode": "0x0000100",
+ "BriefDescription": "Exception taken"
+ },
+ {
+ "EventName": "INTEGER_LOAD_RETIRED",
+ "EventCode": "0x0000200",
+ "BriefDescription": "Integer load instruction retired"
+ },
+ {
+ "EventName": "INTEGER_STORE_RETIRED",
+ "EventCode": "0x0000400",
+ "BriefDescription": "Integer store instruction retired"
+ },
+ {
+ "EventName": "ATOMIC_MEMORY_RETIRED",
+ "EventCode": "0x0000800",
+ "BriefDescription": "Atomic memory operation retired"
+ },
+ {
+ "EventName": "SYSTEM_INSTRUCTION_RETIRED",
+ "EventCode": "0x0001000",
+ "BriefDescription": "System instruction retired"
+ },
+ {
+ "EventName": "INTEGER_ARITHMETIC_RETIRED",
+ "EventCode": "0x0002000",
+ "BriefDescription": "Integer arithmetic instruction retired"
+ },
+ {
+ "EventName": "CONDITIONAL_BRANCH_RETIRED",
+ "EventCode": "0x0004000",
+ "BriefDescription": "Conditional branch retired"
+ },
+ {
+ "EventName": "JAL_INSTRUCTION_RETIRED",
+ "EventCode": "0x0008000",
+ "BriefDescription": "JAL instruction retired"
+ },
+ {
+ "EventName": "JALR_INSTRUCTION_RETIRED",
+ "EventCode": "0x0010000",
+ "BriefDescription": "JALR instruction retired"
+ },
+ {
+ "EventName": "INTEGER_MULTIPLICATION_RETIRED",
+ "EventCode": "0x0020000",
+ "BriefDescription": "Integer multiplication instruction retired"
+ },
+ {
+ "EventName": "INTEGER_DIVISION_RETIRED",
+ "EventCode": "0x0040000",
+ "BriefDescription": "Integer division instruction retired"
+ },
+ {
+ "EventName": "FP_LOAD_RETIRED",
+ "EventCode": "0x0080000",
+ "BriefDescription": "Floating-point load instruction retired"
+ },
+ {
+ "EventName": "FP_STORE_RETIRED",
+ "EventCode": "0x0100000",
+ "BriefDescription": "Floating-point store instruction retired"
+ },
+ {
+ "EventName": "FP_ADDITION_RETIRED",
+ "EventCode": "0x0200000",
+ "BriefDescription": "Floating-point addition retired"
+ },
+ {
+ "EventName": "FP_MULTIPLICATION_RETIRED",
+ "EventCode": "0x0400000",
+ "BriefDescription": "Floating-point multiplication retired"
+ },
+ {
+ "EventName": "FP_FUSEDMADD_RETIRED",
+ "EventCode": "0x0800000",
+ "BriefDescription": "Floating-point fused multiply-add retired"
+ },
+ {
+ "EventName": "FP_DIV_SQRT_RETIRED",
+ "EventCode": "0x1000000",
+ "BriefDescription": "Floating-point division or square-root retired"
+ },
+ {
+ "EventName": "OTHER_FP_RETIRED",
+ "EventCode": "0x2000000",
+ "BriefDescription": "Other floating-point instruction retired"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/riscv/sifive/u74/memory.json b/tools/perf/pmu-events/arch/riscv/sifive/u74/memory.json
new file mode 100644
index 000000000000..be1a46312ac3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/sifive/u74/memory.json
@@ -0,0 +1,32 @@
+[
+ {
+ "EventName": "ICACHE_RETIRED",
+ "EventCode": "0x0000102",
+ "BriefDescription": "Instruction cache miss"
+ },
+ {
+ "EventName": "DCACHE_MISS_MMIO_ACCESSES",
+ "EventCode": "0x0000202",
+ "BriefDescription": "Data cache miss or memory-mapped I/O access"
+ },
+ {
+ "EventName": "DCACHE_WRITEBACK",
+ "EventCode": "0x0000402",
+ "BriefDescription": "Data cache write-back"
+ },
+ {
+ "EventName": "INST_TLB_MISS",
+ "EventCode": "0x0000802",
+ "BriefDescription": "Instruction TLB miss"
+ },
+ {
+ "EventName": "DATA_TLB_MISS",
+ "EventCode": "0x0001002",
+ "BriefDescription": "Data TLB miss"
+ },
+ {
+ "EventName": "UTLB_MISS",
+ "EventCode": "0x0002002",
+ "BriefDescription": "UTLB miss"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/riscv/sifive/u74/microarch.json b/tools/perf/pmu-events/arch/riscv/sifive/u74/microarch.json
new file mode 100644
index 000000000000..50ffa55418cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/riscv/sifive/u74/microarch.json
@@ -0,0 +1,57 @@
+[
+ {
+ "EventName": "ADDRESSGEN_INTERLOCK",
+ "EventCode": "0x0000101",
+ "BriefDescription": "Address-generation interlock"
+ },
+ {
+ "EventName": "LONGLAT_INTERLOCK",
+ "EventCode": "0x0000201",
+ "BriefDescription": "Long-latency interlock"
+ },
+ {
+ "EventName": "CSR_READ_INTERLOCK",
+ "EventCode": "0x0000401",
+ "BriefDescription": "CSR read interlock"
+ },
+ {
+ "EventName": "ICACHE_ITIM_BUSY",
+ "EventCode": "0x0000801",
+ "BriefDescription": "Instruction cache/ITIM busy"
+ },
+ {
+ "EventName": "DCACHE_DTIM_BUSY",
+ "EventCode": "0x0001001",
+ "BriefDescription": "Data cache/DTIM busy"
+ },
+ {
+ "EventName": "BRANCH_DIRECTION_MISPREDICTION",
+ "EventCode": "0x0002001",
+ "BriefDescription": "Branch direction misprediction"
+ },
+ {
+ "EventName": "BRANCH_TARGET_MISPREDICTION",
+ "EventCode": "0x0004001",
+ "BriefDescription": "Branch/jump target misprediction"
+ },
+ {
+ "EventName": "PIPE_FLUSH_CSR_WRITE",
+ "EventCode": "0x0008001",
+ "BriefDescription": "Pipeline flush from CSR write"
+ },
+ {
+ "EventName": "PIPE_FLUSH_OTHER_EVENT",
+ "EventCode": "0x0010001",
+ "BriefDescription": "Pipeline flush from other event"
+ },
+ {
+ "EventName": "INTEGER_MULTIPLICATION_INTERLOCK",
+ "EventCode": "0x0020001",
+ "BriefDescription": "Integer multiplication interlock"
+ },
+ {
+ "EventName": "FP_INTERLOCK",
+ "EventCode": "0x0040001",
+ "BriefDescription": "Floating-point interlock"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
index 2dd8dafff2ef..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
- },
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
index db286f19e7b6..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
index b6b7f29ca831..bf6a9811e014 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z10/extended.json
@@ -4,125 +4,125 @@
"EventCode": "128",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from the Level-2 (L1.5) cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from the Level-2 (L1.5) cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "L1I_L3_LOCAL_WRITES",
"BriefDescription": "L1I L3 Local Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the installed cache line was sourced from the Level-3 cache that is on the same book as the Instruction cache (Local L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1D_L3_LOCAL_WRITES",
"BriefDescription": "L1D L3 Local Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installtion cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was source from the Level-3 cache that is on the same book as the Data cache (Local L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1I_L3_REMOTE_WRITES",
"BriefDescription": "L1I L3 Remote Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Instruction cache (Remote L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L3_REMOTE_WRITES",
"BriefDescription": "L1D L3 Remote Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from a Level-3 cache that is not on the same book as the Data cache (Remote L2 cache)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache where the installed cache line was sourced from memory that is attached to the s ame book as the Instruction cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_CACHELINE_INVALIDATES",
"BriefDescription": "L1I Cacheline Invalidates",
- "PublicDescription": "A cache line in the Level-1 I-Cache has been invalidated by a store on the same CPU as the Level-1 I-Cache"
+ "PublicDescription": "A cache line in the Level-1 Instruction Cache has been invalidated by a store on the same CPU as the Level-1 Instruction Cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written into the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
- "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
+ "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "DTLB1_MISSES",
"BriefDescription": "DTLB1 Misses",
- "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress"
+ "PublicDescription": "Level-1 Data TLB miss in progress. Incremented by one for every cycle an DTLB1 miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
- "PublicDescription": "Incremented by one for every store sent to Level-2 (L1.5) cache"
- },
+ "PublicDescription": "Incremented by one for every store sent to Level-2 (L1.5) cache."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
index 2dd8dafff2ef..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
- },
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
index db286f19e7b6..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
index 5da8296b667e..99c1b93a7e36 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/extended.json
@@ -11,7 +11,7 @@
"EventCode": "129",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
@@ -25,7 +25,7 @@
"EventCode": "131",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
@@ -39,63 +39,63 @@
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB1_MISSES",
"BriefDescription": "ITLB1 Misses",
- "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress"
+ "PublicDescription": "Level-1 Instruction TLB miss in progress. Incremented by one for every cycle an ITLB1 miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Combined Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
@@ -109,273 +109,273 @@
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Node L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Node L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Same-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Far-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Node Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Node memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1D_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1D_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1D_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONNODE_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Node L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONNODE_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Node L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Node Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_ONDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Same-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Same-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Far-Column L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Far-Column Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "176",
"EventName": "L1I_ONNODE_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Node Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Node memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "L1I_ONDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "L1I_OFFDRAWER_MEM_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "L1I_ONCHIP_MEM_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "218",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "219",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "220",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
@@ -390,5 +390,5 @@
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
"PublicDescription": "Cycle count with two threads active"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json
index 1a0034f79f73..b941a7212a4d 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z13/transaction.json
@@ -3,5 +3,75 @@
"BriefDescription": "Transaction count",
"MetricName": "transaction",
"MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ },
+ {
+ "BriefDescription": "Cycles per Instruction",
+ "MetricName": "cpi",
+ "MetricExpr": "CPU_CYCLES / INSTRUCTIONS"
+ },
+ {
+ "BriefDescription": "Problem State Instruction Ratio",
+ "MetricName": "prbstate",
+ "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100"
+ },
+ {
+ "BriefDescription": "Level One Miss per 100 Instructions",
+ "MetricName": "l1mp",
+ "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 2 cache",
+ "MetricName": "l2p",
+ "MetricExpr": "((L1D_L2D_SOURCED_WRITES + L1I_L2I_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 3 on same chip cache",
+ "MetricName": "l3p",
+ "MetricExpr": "((L1D_ONCHIP_L3_SOURCED_WRITES + L1D_ONCHIP_L3_SOURCED_WRITES_IV + L1I_ONCHIP_L3_SOURCED_WRITES + L1I_ONCHIP_L3_SOURCED_WRITES_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Local cache on same book",
+ "MetricName": "l4lp",
+ "MetricExpr": "((L1D_ONNODE_L4_SOURCED_WRITES + L1D_ONNODE_L3_SOURCED_WRITES_IV + L1D_ONNODE_L3_SOURCED_WRITES + L1I_ONNODE_L4_SOURCED_WRITES + L1I_ONNODE_L3_SOURCED_WRITES_IV + L1I_ONNODE_L3_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Remote cache on different book",
+ "MetricName": "l4rp",
+ "MetricExpr": "((L1D_ONDRAWER_L4_SOURCED_WRITES + L1D_ONDRAWER_L3_SOURCED_WRITES_IV + L1D_ONDRAWER_L3_SOURCED_WRITES + L1D_OFFDRAWER_SCOL_L4_SOURCED_WRITES + L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV + L1D_OFFDRAWER_SCOL_L3_SOURCED_WRITES + L1D_OFFDRAWER_FCOL_L4_SOURCED_WRITES + L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV + L1D_OFFDRAWER_FCOL_L3_SOURCED_WRITES + L1I_ONDRAWER_L4_SOURCED_WRITES + L1I_ONDRAWER_L3_SOURCED_WRITES_IV + L1I_ONDRAWER_L3_SOURCED_WRITES + L1I_OFFDRAWER_SCOL_L4_SOURCED_WRITES + L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES_IV + L1I_OFFDRAWER_SCOL_L3_SOURCED_WRITES + L1I_OFFDRAWER_FCOL_L4_SOURCED_WRITES + L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES_IV + L1I_OFFDRAWER_FCOL_L3_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from memory",
+ "MetricName": "memp",
+ "MetricExpr": "((L1D_ONNODE_MEM_SOURCED_WRITES + L1D_ONDRAWER_MEM_SOURCED_WRITES + L1D_OFFDRAWER_MEM_SOURCED_WRITES + L1D_ONCHIP_MEM_SOURCED_WRITES + L1I_ONNODE_MEM_SOURCED_WRITES + L1I_ONDRAWER_MEM_SOURCED_WRITES + L1I_OFFDRAWER_MEM_SOURCED_WRITES + L1I_ONCHIP_MEM_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Cycles per Instructions from Finite cache/memory",
+ "MetricName": "finite_cpi",
+ "MetricExpr": "L1C_TLB1_MISSES / INSTRUCTIONS"
+ },
+ {
+ "BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1",
+ "MetricName": "est_cpi",
+ "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB1_MISSES / INSTRUCTIONS)"
+ },
+ {
+ "BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss",
+ "MetricName": "scpl1m",
+ "MetricExpr": "L1C_TLB1_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES)"
+ },
+ {
+ "BriefDescription": "Estimated TLB CPU percentage of Total CPU",
+ "MetricName": "tlb_percent",
+ "MetricExpr": "((DTLB1_MISSES + ITLB1_MISSES) / CPU_CYCLES) * (L1C_TLB1_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100"
+ },
+ {
+ "BriefDescription": "Estimated Cycles per TLB Miss",
+ "MetricName": "tlb_miss",
+ "MetricExpr": "((DTLB1_MISSES + ITLB1_MISSES) / (DTLB1_WRITES + ITLB1_WRITES)) * (L1C_TLB1_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES))"
+ },
+ {
+ "BriefDescription": "Page Table Entry misses",
+ "MetricName": "pte_miss",
+ "MetricExpr": "(TLB2_PTE_WRITES / (DTLB1_WRITES + ITLB1_WRITES)) * 100"
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
index 17fb5241928b..1023d47028ce 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/basic.json
@@ -3,56 +3,56 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
- },
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
index db286f19e7b6..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
index 89e070727e1b..ad40cc4f9727 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
@@ -4,357 +4,357 @@
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB2_WRITES",
"BriefDescription": "DTLB2 Writes",
- "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB2_MISSES",
"BriefDescription": "DTLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB2_HPAGE_WRITES",
"BriefDescription": "DTLB2 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done"
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB2_GPAGE_WRITES",
"BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
- "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB2_WRITES",
"BriefDescription": "ITLB2 Writes",
- "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB2_MISSES",
"BriefDescription": "ITLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_ENGINES_BUSY",
"BriefDescription": "TLB2 Engines Busy",
- "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB2_MISSES",
"BriefDescription": "L1C TLB2 Misses",
- "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
+ "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
"BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "224",
"EventName": "BCD_DFP_EXECUTION_SLOTS",
"BriefDescription": "BCD DFP Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
+ "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
},
{
"Unit": "CPU-M-CF",
"EventCode": "225",
"EventName": "VX_BCD_EXECUTION_SLOTS",
"BriefDescription": "VX BCD Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG."
},
{
"Unit": "CPU-M-CF",
"EventCode": "226",
"EventName": "DECIMAL_INSTRUCTIONS",
"BriefDescription": "Decimal Instructions",
- "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
+ "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
},
{
"Unit": "CPU-M-CF",
"EventCode": "232",
"EventName": "LAST_HOST_TRANSLATIONS",
"BriefDescription": "Last host translation done",
- "PublicDescription": "Last Host Translation done"
+ "PublicDescription": "Last Host Translation done."
},
{
"Unit": "CPU-M-CF",
"EventCode": "243",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "244",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "245",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
@@ -369,5 +369,5 @@
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
"PublicDescription": "Cycle count with two threads active"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json
index 1a0034f79f73..ce814ea93396 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z14/transaction.json
@@ -3,5 +3,70 @@
"BriefDescription": "Transaction count",
"MetricName": "transaction",
"MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ },
+ {
+ "BriefDescription": "Cycles per Instruction",
+ "MetricName": "cpi",
+ "MetricExpr": "CPU_CYCLES / INSTRUCTIONS"
+ },
+ {
+ "BriefDescription": "Problem State Instruction Ratio",
+ "MetricName": "prbstate",
+ "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100"
+ },
+ {
+ "BriefDescription": "Level One Miss per 100 Instructions",
+ "MetricName": "l1mp",
+ "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 2 cache",
+ "MetricName": "l2p",
+ "MetricExpr": "((L1D_L2D_SOURCED_WRITES + L1I_L2I_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 3 on same chip cache",
+ "MetricName": "l3p",
+ "MetricExpr": "((L1D_ONCHIP_L3_SOURCED_WRITES + L1D_ONCHIP_L3_SOURCED_WRITES_IV + L1I_ONCHIP_L3_SOURCED_WRITES + L1I_ONCHIP_L3_SOURCED_WRITES_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Local cache on same book",
+ "MetricName": "l4lp",
+ "MetricExpr": "((L1D_ONCLUSTER_L3_SOURCED_WRITES + L1D_ONCLUSTER_L3_SOURCED_WRITES_IV + L1D_ONDRAWER_L4_SOURCED_WRITES + L1I_ONCLUSTER_L3_SOURCED_WRITES + L1I_ONCLUSTER_L3_SOURCED_WRITES_IV + L1I_ONDRAWER_L4_SOURCED_WRITES + L1D_OFFCLUSTER_L3_SOURCED_WRITES + L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV + L1D_ONCHIP_L3_SOURCED_WRITES_RO + L1I_OFFCLUSTER_L3_SOURCED_WRITES + L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Remote cache on different book",
+ "MetricName": "l4rp",
+ "MetricExpr": "((L1D_OFFDRAWER_L3_SOURCED_WRITES + L1D_OFFDRAWER_L3_SOURCED_WRITES_IV + L1D_OFFDRAWER_L4_SOURCED_WRITES + L1I_OFFDRAWER_L3_SOURCED_WRITES + L1I_OFFDRAWER_L3_SOURCED_WRITES_IV + L1I_OFFDRAWER_L4_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from memory",
+ "MetricName": "memp",
+ "MetricExpr": "((L1D_ONCHIP_MEMORY_SOURCED_WRITES + L1D_ONCLUSTER_MEMORY_SOURCED_WRITES + L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES + L1D_OFFDRAWER_MEMORY_SOURCED_WRITES + L1I_ONCHIP_MEMORY_SOURCED_WRITES + L1I_ONCLUSTER_MEMORY_SOURCED_WRITES + L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES + L1I_OFFDRAWER_MEMORY_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Cycles per Instructions from Finite cache/memory",
+ "MetricName": "finite_cpi",
+ "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS"
+ },
+ {
+ "BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1",
+ "MetricName": "est_cpi",
+ "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS)"
+ },
+ {
+ "BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss",
+ "MetricName": "scpl1m",
+ "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES)"
+ },
+ {
+ "BriefDescription": "Estimated TLB CPU percentage of Total CPU",
+ "MetricName": "tlb_percent",
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100"
+ },
+ {
+ "BriefDescription": "Estimated Cycles per TLB Miss",
+ "MetricName": "tlb_miss",
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES))"
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
index 17fb5241928b..1023d47028ce 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
@@ -3,56 +3,56 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
- },
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
deleted file mode 100644
index db286f19e7b6..000000000000
--- a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
+++ /dev/null
@@ -1,114 +0,0 @@
-[
- {
- "Unit": "CPU-M-CF",
- "EventCode": "64",
- "EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "65",
- "EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "66",
- "EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "67",
- "EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "68",
- "EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "69",
- "EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "70",
- "EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "71",
- "EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "72",
- "EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "73",
- "EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "74",
- "EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "75",
- "EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "76",
- "EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "77",
- "EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "78",
- "EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
- {
- "Unit": "CPU-M-CF",
- "EventCode": "79",
- "EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
-]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
index c998e4f1d1d2..8b4380b8e489 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
@@ -1,6 +1,118 @@
[
{
"Unit": "CPU-M-CF",
+ "EventCode": "64",
+ "EventName": "PRNG_FUNCTIONS",
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "65",
+ "EventName": "PRNG_CYCLES",
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "66",
+ "EventName": "PRNG_BLOCKED_FUNCTIONS",
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "67",
+ "EventName": "PRNG_BLOCKED_CYCLES",
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "68",
+ "EventName": "SHA_FUNCTIONS",
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "69",
+ "EventName": "SHA_CYCLES",
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "70",
+ "EventName": "SHA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "71",
+ "EventName": "SHA_BLOCKED_CYCLES",
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "72",
+ "EventName": "DEA_FUNCTIONS",
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "73",
+ "EventName": "DEA_CYCLES",
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "74",
+ "EventName": "DEA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "75",
+ "EventName": "DEA_BLOCKED_CYCLES",
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "76",
+ "EventName": "AES_FUNCTIONS",
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "77",
+ "EventName": "AES_CYCLES",
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "78",
+ "EventName": "AES_BLOCKED_FUNCTIONS",
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "79",
+ "EventName": "AES_BLOCKED_CYCLES",
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
"EventCode": "80",
"EventName": "ECC_FUNCTION_COUNT",
"BriefDescription": "ECC Function Count",
@@ -26,5 +138,5 @@
"EventName": "ECC_BLOCKED_CYCLES_COUNT",
"BriefDescription": "ECC Blocked Cycles Count",
"PublicDescription": "This counter counts the total number of CPU cycles blocked for the elliptic-curve cryptography (ECC) functions issued by the CPU because the ECC coprocessor is busy performing a function issued by another CPU."
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
index 2df2e231e9ee..9c691c391086 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
@@ -4,357 +4,357 @@
"EventCode": "128",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "DTLB2_WRITES",
"BriefDescription": "DTLB2 Writes",
- "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "130",
"EventName": "DTLB2_MISSES",
"BriefDescription": "DTLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "DTLB2_HPAGE_WRITES",
"BriefDescription": "DTLB2 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page"
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "DTLB2_GPAGE_WRITES",
"BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
- "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "ITLB2_WRITES",
"BriefDescription": "ITLB2 Writes",
- "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "ITLB2_MISSES",
"BriefDescription": "ITLB2 Misses",
- "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on prior machines."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "TLB2_ENGINES_BUSY",
"BriefDescription": "TLB2 Engines Busy",
- "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1C_TLB2_MISSES",
"BriefDescription": "L1C TLB2 Misses",
- "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
+ "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
"BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "162",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "163",
"EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "164",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "165",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "166",
"EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I On-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "167",
"EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "168",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "169",
"EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "170",
"EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "171",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "172",
"EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
},
{
"Unit": "CPU-M-CF",
"EventCode": "173",
"EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "174",
"EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "175",
"EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "224",
"EventName": "BCD_DFP_EXECUTION_SLOTS",
"BriefDescription": "BCD DFP Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
+ "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
},
{
"Unit": "CPU-M-CF",
"EventCode": "225",
"EventName": "VX_BCD_EXECUTION_SLOTS",
"BriefDescription": "VX BCD Execution Slots",
- "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG."
},
{
"Unit": "CPU-M-CF",
"EventCode": "226",
"EventName": "DECIMAL_INSTRUCTIONS",
"BriefDescription": "Decimal Instructions",
- "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
+ "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
},
{
"Unit": "CPU-M-CF",
"EventCode": "232",
"EventName": "LAST_HOST_TRANSLATIONS",
"BriefDescription": "Last host translation done",
- "PublicDescription": "Last Host Translation done"
+ "PublicDescription": "Last Host Translation done."
},
{
"Unit": "CPU-M-CF",
"EventCode": "243",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "244",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "245",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
@@ -374,15 +374,15 @@
"Unit": "CPU-M-CF",
"EventCode": "264",
"EventName": "DFLT_CC",
- "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed",
+ "BriefDescription": "Increments DEFLATE CONVERSION CALL",
"PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed"
},
{
"Unit": "CPU-M-CF",
"EventCode": "265",
- "EventName": "DFLT_CCERROR",
- "BriefDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2",
- "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2"
+ "EventName": "DFLT_CCFINISH",
+ "BriefDescription": "Increments completed DEFLATE CONVERSION CALL",
+ "PublicDescription": " Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2 complete. "
},
{
"Unit": "CPU-M-CF",
@@ -397,5 +397,5 @@
"EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
"BriefDescription": "Cycle count with two threads active",
"PublicDescription": "Cycle count with two threads active"
- },
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json
index 1a0034f79f73..ce814ea93396 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json
@@ -3,5 +3,70 @@
"BriefDescription": "Transaction count",
"MetricName": "transaction",
"MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ },
+ {
+ "BriefDescription": "Cycles per Instruction",
+ "MetricName": "cpi",
+ "MetricExpr": "CPU_CYCLES / INSTRUCTIONS"
+ },
+ {
+ "BriefDescription": "Problem State Instruction Ratio",
+ "MetricName": "prbstate",
+ "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100"
+ },
+ {
+ "BriefDescription": "Level One Miss per 100 Instructions",
+ "MetricName": "l1mp",
+ "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 2 cache",
+ "MetricName": "l2p",
+ "MetricExpr": "((L1D_L2D_SOURCED_WRITES + L1I_L2I_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 3 on same chip cache",
+ "MetricName": "l3p",
+ "MetricExpr": "((L1D_ONCHIP_L3_SOURCED_WRITES + L1D_ONCHIP_L3_SOURCED_WRITES_IV + L1I_ONCHIP_L3_SOURCED_WRITES + L1I_ONCHIP_L3_SOURCED_WRITES_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Local cache on same book",
+ "MetricName": "l4lp",
+ "MetricExpr": "((L1D_ONCLUSTER_L3_SOURCED_WRITES + L1D_ONCLUSTER_L3_SOURCED_WRITES_IV + L1D_ONDRAWER_L4_SOURCED_WRITES + L1I_ONCLUSTER_L3_SOURCED_WRITES + L1I_ONCLUSTER_L3_SOURCED_WRITES_IV + L1I_ONDRAWER_L4_SOURCED_WRITES + L1D_OFFCLUSTER_L3_SOURCED_WRITES + L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV + L1D_ONCHIP_L3_SOURCED_WRITES_RO + L1I_OFFCLUSTER_L3_SOURCED_WRITES + L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Remote cache on different book",
+ "MetricName": "l4rp",
+ "MetricExpr": "((L1D_OFFDRAWER_L3_SOURCED_WRITES + L1D_OFFDRAWER_L3_SOURCED_WRITES_IV + L1D_OFFDRAWER_L4_SOURCED_WRITES + L1I_OFFDRAWER_L3_SOURCED_WRITES + L1I_OFFDRAWER_L3_SOURCED_WRITES_IV + L1I_OFFDRAWER_L4_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from memory",
+ "MetricName": "memp",
+ "MetricExpr": "((L1D_ONCHIP_MEMORY_SOURCED_WRITES + L1D_ONCLUSTER_MEMORY_SOURCED_WRITES + L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES + L1D_OFFDRAWER_MEMORY_SOURCED_WRITES + L1I_ONCHIP_MEMORY_SOURCED_WRITES + L1I_ONCLUSTER_MEMORY_SOURCED_WRITES + L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES + L1I_OFFDRAWER_MEMORY_SOURCED_WRITES) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Cycles per Instructions from Finite cache/memory",
+ "MetricName": "finite_cpi",
+ "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS"
+ },
+ {
+ "BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1",
+ "MetricName": "est_cpi",
+ "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS)"
+ },
+ {
+ "BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss",
+ "MetricName": "scpl1m",
+ "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES)"
+ },
+ {
+ "BriefDescription": "Estimated TLB CPU percentage of Total CPU",
+ "MetricName": "tlb_percent",
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100"
+ },
+ {
+ "BriefDescription": "Estimated Cycles per TLB Miss",
+ "MetricName": "tlb_miss",
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES))"
}
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/basic.json b/tools/perf/pmu-events/arch/s390/cf_z16/basic.json
new file mode 100644
index 000000000000..1023d47028ce
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/basic.json
@@ -0,0 +1,58 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "0",
+ "EventName": "CPU_CYCLES",
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "1",
+ "EventName": "INSTRUCTIONS",
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "2",
+ "EventName": "L1I_DIR_WRITES",
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "3",
+ "EventName": "L1I_PENALTY_CYCLES",
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "4",
+ "EventName": "L1D_DIR_WRITES",
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "5",
+ "EventName": "L1D_PENALTY_CYCLES",
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "32",
+ "EventName": "PROBLEM_STATE_CPU_CYCLES",
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "33",
+ "EventName": "PROBLEM_STATE_INSTRUCTIONS",
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json
new file mode 100644
index 000000000000..8b4380b8e489
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/crypto6.json
@@ -0,0 +1,142 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "64",
+ "EventName": "PRNG_FUNCTIONS",
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "65",
+ "EventName": "PRNG_CYCLES",
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "66",
+ "EventName": "PRNG_BLOCKED_FUNCTIONS",
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "67",
+ "EventName": "PRNG_BLOCKED_CYCLES",
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "68",
+ "EventName": "SHA_FUNCTIONS",
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "69",
+ "EventName": "SHA_CYCLES",
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "70",
+ "EventName": "SHA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "71",
+ "EventName": "SHA_BLOCKED_CYCLES",
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "72",
+ "EventName": "DEA_FUNCTIONS",
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "73",
+ "EventName": "DEA_CYCLES",
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "74",
+ "EventName": "DEA_BLOCKED_FUNCTIONS",
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "75",
+ "EventName": "DEA_BLOCKED_CYCLES",
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "76",
+ "EventName": "AES_FUNCTIONS",
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "77",
+ "EventName": "AES_CYCLES",
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "78",
+ "EventName": "AES_BLOCKED_FUNCTIONS",
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "79",
+ "EventName": "AES_BLOCKED_CYCLES",
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "80",
+ "EventName": "ECC_FUNCTION_COUNT",
+ "BriefDescription": "ECC Function Count",
+ "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "81",
+ "EventName": "ECC_CYCLES_COUNT",
+ "BriefDescription": "ECC Cycles Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the ECC coprocessor is busy performing the elliptic-curve cryptography (ECC) functions issued by the CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "82",
+ "EventName": "ECC_BLOCKED_FUNCTION_COUNT",
+ "BriefDescription": "Ecc Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the elliptic-curve cryptography (ECC) functions that are issued by the CPU and are blocked because the ECC coprocessor is busy performing a function issued by another CPU."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "83",
+ "EventName": "ECC_BLOCKED_CYCLES_COUNT",
+ "BriefDescription": "ECC Blocked Cycles Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the elliptic-curve cryptography (ECC) functions issued by the CPU because the ECC coprocessor is busy performing a function issued by another CPU."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/extended.json b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
new file mode 100644
index 000000000000..c2b10ec1c6e0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/extended.json
@@ -0,0 +1,492 @@
+[
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "128",
+ "EventName": "L1D_RO_EXCL_WRITES",
+ "BriefDescription": "L1D Read-only Exclusive Writes",
+ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "129",
+ "EventName": "DTLB2_WRITES",
+ "BriefDescription": "DTLB2 Writes",
+ "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the Level-1 Data cache. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "130",
+ "EventName": "DTLB2_MISSES",
+ "BriefDescription": "DTLB2 Misses",
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle. This is a replacement for what was provided for the DTLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "131",
+ "EventName": "CRSTE_1MB_WRITES",
+ "BriefDescription": "One Megabyte CRSTE writes",
+ "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "132",
+ "EventName": "DTLB2_GPAGE_WRITES",
+ "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
+ "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "134",
+ "EventName": "ITLB2_WRITES",
+ "BriefDescription": "ITLB2 Writes",
+ "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "135",
+ "EventName": "ITLB2_MISSES",
+ "BriefDescription": "ITLB2 Misses",
+ "PublicDescription": "A TLB2 miss is in progress for a request made by the Level-1 Instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle. This is a replacement for what was provided for the ITLB on z13 and prior machines."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "137",
+ "EventName": "TLB2_PTE_WRITES",
+ "BriefDescription": "TLB2 Page Table Entry Writes",
+ "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "138",
+ "EventName": "TLB2_CRSTE_WRITES",
+ "BriefDescription": "TLB2 Combined Region and Segment Entry Writes",
+ "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "139",
+ "EventName": "TLB2_ENGINES_BUSY",
+ "BriefDescription": "TLB2 Engines Busy",
+ "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "140",
+ "EventName": "TX_C_TEND",
+ "BriefDescription": "Completed TEND instructions in constrained TX mode",
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "141",
+ "EventName": "TX_NC_TEND",
+ "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
+ "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "143",
+ "EventName": "L1C_TLB2_MISSES",
+ "BriefDescription": "L1C TLB2 Misses",
+ "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "145",
+ "EventName": "DCW_REQ",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "146",
+ "EventName": "DCW_REQ_IV",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "147",
+ "EventName": "DCW_REQ_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "148",
+ "EventName": "DCW_REQ_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the requestors Level-2 cache after using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "149",
+ "EventName": "DCW_ON_CHIP",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "150",
+ "EventName": "DCW_ON_CHIP_IV",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "151",
+ "EventName": "DCW_ON_CHIP_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache after using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "152",
+ "EventName": "DCW_ON_CHIP_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "153",
+ "EventName": "DCW_ON_MODULE",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Module Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "154",
+ "EventName": "DCW_ON_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "155",
+ "EventName": "DCW_OFF_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "156",
+ "EventName": "DCW_ON_CHIP_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Chip Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "157",
+ "EventName": "DCW_ON_MODULE_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Module Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Module memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "158",
+ "EventName": "DCW_ON_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from On-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "159",
+ "EventName": "DCW_OFF_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Data Cache from Off-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "160",
+ "EventName": "IDCW_ON_MODULE_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "161",
+ "EventName": "IDCW_ON_MODULE_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using chip horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "162",
+ "EventName": "IDCW_ON_MODULE_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Module Memory Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "163",
+ "EventName": "IDCW_ON_DRAWER_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "164",
+ "EventName": "IDCW_ON_DRAWER_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "165",
+ "EventName": "IDCW_ON_DRAWER_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from On-Drawer Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an On-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "166",
+ "EventName": "IDCW_OFF_DRAWER_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "167",
+ "EventName": "IDCW_OFF_DRAWER_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Chip Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "168",
+ "EventName": "IDCW_OFF_DRAWER_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction and Data Cache from Off-Drawer Cache with Drawer Hit",
+ "PublicDescription": "A directory write to the Level-1 Data or Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "169",
+ "EventName": "ICW_REQ",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced the requestors Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "170",
+ "EventName": "ICW_REQ_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "171",
+ "EventName": "ICW_REQ_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "172",
+ "EventName": "ICW_REQ_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the requestors Level-2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "173",
+ "EventName": "ICW_ON_CHIP",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "174",
+ "EventName": "ICW_ON_CHIP_IV",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Intervention",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Chip Level-2 cache with intervention."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "175",
+ "EventName": "ICW_ON_CHIP_CHIP_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Chip HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip Level-2 cache using chip level horizontal persistence, Chip-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "176",
+ "EventName": "ICW_ON_CHIP_DRAWER_HIT",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Cache with Drawer HP Hit",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Chip level 2 cache using drawer level horizontal persistence, Drawer-HP hit."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "177",
+ "EventName": "ICW_ON_MODULE",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Module Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "178",
+ "EventName": "ICW_ON_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an On-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "179",
+ "EventName": "ICW_OFF_DRAWER",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer Cache",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced an Off-Drawer Level-2 cache."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "180",
+ "EventName": "ICW_ON_CHIP_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Chip Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Chip memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "181",
+ "EventName": "ICW_ON_MODULE_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Module Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Module memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "182",
+ "EventName": "ICW_ON_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from On-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "183",
+ "EventName": "ICW_OFF_DRAWER_MEMORY",
+ "BriefDescription": "Directory Write Level 1 Instruction Cache from Off-Drawer Memory",
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "224",
+ "EventName": "BCD_DFP_EXECUTION_SLOTS",
+ "BriefDescription": "Binary Coded Decimal to Decimal Floating Point conversions",
+ "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "225",
+ "EventName": "VX_BCD_EXECUTION_SLOTS",
+ "BriefDescription": "Count finished vector arithmetic Binary Coded Decimal instructions",
+ "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMP, VMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOP, VCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVD, VCVDG."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "226",
+ "EventName": "DECIMAL_INSTRUCTIONS",
+ "BriefDescription": "Decimal instruction dispatched",
+ "PublicDescription": "Decimal instruction dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "232",
+ "EventName": "LAST_HOST_TRANSLATIONS",
+ "BriefDescription": "Last host translation done",
+ "PublicDescription": "Last Host Translation done"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "244",
+ "EventName": "TX_NC_TABORT",
+ "BriefDescription": "Aborted transactions in unconstrained TX mode",
+ "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "245",
+ "EventName": "TX_C_TABORT_NO_SPECIAL",
+ "BriefDescription": "Aborted transactions in constrained TX mode",
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "246",
+ "EventName": "TX_C_TABORT_SPECIAL",
+ "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "248",
+ "EventName": "DFLT_ACCESS",
+ "BriefDescription": "Cycles CPU spent obtaining access to Deflate unit",
+ "PublicDescription": "Cycles CPU spent obtaining access to Deflate unit"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "253",
+ "EventName": "DFLT_CYCLES",
+ "BriefDescription": "Cycles CPU is using Deflate unit",
+ "PublicDescription": "Cycles CPU is using Deflate unit"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "256",
+ "EventName": "SORTL",
+ "BriefDescription": "Count SORTL instructions",
+ "PublicDescription": "Increments by one for every SORT LISTS instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "265",
+ "EventName": "DFLT_CC",
+ "BriefDescription": "Increments DEFLATE CONVERSION CALL",
+ "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "266",
+ "EventName": "DFLT_CCFINISH",
+ "BriefDescription": "Increments completed DEFLATE CONVERSION CALL",
+ "PublicDescription": "Increments by one for every DEFLATE CONVERSION CALL instruction executed that ended in Condition Codes 0, 1 or 2."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "267",
+ "EventName": "NNPA_INVOCATIONS",
+ "BriefDescription": "NNPA Total invocations",
+ "PublicDescription": "Increments by one for every Neural Network Processing Assist instruction executed."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "268",
+ "EventName": "NNPA_COMPLETIONS",
+ "BriefDescription": "NNPA Total completions",
+ "PublicDescription": "Increments by one for every Neural Network Processing Assist instruction executed that ended in Condition Codes 0, 1 or 2."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "269",
+ "EventName": "NNPA_WAIT_LOCK",
+ "BriefDescription": "Cycles spent obtaining NNPA lock",
+ "PublicDescription": "Cycles CPU spent obtaining access to IBM Z Integrated Accelerator for AI."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "270",
+ "EventName": "NNPA_HOLD_LOCK",
+ "BriefDescription": "Cycles spent holding NNPA lock",
+ "PublicDescription": "Cycles CPU is using IBM Z Integrated Accelerator for AI."
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "448",
+ "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
+ "BriefDescription": "Cycle count with one thread active",
+ "PublicDescription": "Cycle count with one thread active"
+ },
+ {
+ "Unit": "CPU-M-CF",
+ "EventCode": "449",
+ "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
+ "BriefDescription": "Cycle count with two threads active",
+ "PublicDescription": "Cycle count with two threads active"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json b/tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json
new file mode 100644
index 000000000000..cf8563d059b9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/pai_crypto.json
@@ -0,0 +1,1101 @@
+[
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4096",
+ "EventName": "CRYPTO_ALL",
+ "BriefDescription": "CRYPTO ALL",
+ "PublicDescription": "Sums of all non zero cryptography counters"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4097",
+ "EventName": "KM_DEA",
+ "BriefDescription": "KM DEA",
+ "PublicDescription": "KM-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4098",
+ "EventName": "KM_TDEA_128",
+ "BriefDescription": "KM TDEA 128",
+ "PublicDescription": "KM-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4099",
+ "EventName": "KM_TDEA_192",
+ "BriefDescription": "KM TDEA 192",
+ "PublicDescription": "KM-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4100",
+ "EventName": "KM_ENCRYPTED_DEA",
+ "BriefDescription": "KM ENCRYPTED DEA",
+ "PublicDescription": "KM-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4101",
+ "EventName": "KM_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KM ENCRYPTED TDEA 128",
+ "PublicDescription": "KM-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4102",
+ "EventName": "KM_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KM ENCRYPTED TDEA 192",
+ "PublicDescription": "KM-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4103",
+ "EventName": "KM_AES_128",
+ "BriefDescription": "KM AES 128",
+ "PublicDescription": "KM-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4104",
+ "EventName": "KM_AES_192",
+ "BriefDescription": "KM AES 192",
+ "PublicDescription": "KM-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4105",
+ "EventName": "KM_AES_256",
+ "BriefDescription": "KM AES 256",
+ "PublicDescription": "KM-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4106",
+ "EventName": "KM_ENCRYPTED_AES_128",
+ "BriefDescription": "KM ENCRYPTED AES 128",
+ "PublicDescription": "KM-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4107",
+ "EventName": "KM_ENCRYPTED_AES_192",
+ "BriefDescription": "KM ENCRYPTED AES 192",
+ "PublicDescription": "KM-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4108",
+ "EventName": "KM_ENCRYPTED_AES_256",
+ "BriefDescription": "KM ENCRYPTED AES 256",
+ "PublicDescription": "KM-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4109",
+ "EventName": "KM_XTS_AES_128",
+ "BriefDescription": "KM XTS AES 128",
+ "PublicDescription": "KM-XTS-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4110",
+ "EventName": "KM_XTS_AES_256",
+ "BriefDescription": "KM XTS AES 256",
+ "PublicDescription": "KM-XTS-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4111",
+ "EventName": "KM_XTS_ENCRYPTED_AES_128",
+ "BriefDescription": "KM XTS ENCRYPTED AES 128",
+ "PublicDescription": "KM-XTS-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4112",
+ "EventName": "KM_XTS_ENCRYPTED_AES_256",
+ "BriefDescription": "KM XTS ENCRYPTED AES 256",
+ "PublicDescription": "KM-XTS-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4113",
+ "EventName": "KMC_DEA",
+ "BriefDescription": "KMC DEA",
+ "PublicDescription": "KMC-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4114",
+ "EventName": "KMC_TDEA_128",
+ "BriefDescription": "KMC TDEA 128",
+ "PublicDescription": "KMC-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4115",
+ "EventName": "KMC_TDEA_192",
+ "BriefDescription": "KMC TDEA 192",
+ "PublicDescription": "KMC-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4116",
+ "EventName": "KMC_ENCRYPTED_DEA",
+ "BriefDescription": "KMC ENCRYPTED DEA",
+ "PublicDescription": "KMC-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4117",
+ "EventName": "KMC_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMC ENCRYPTED TDEA 128",
+ "PublicDescription": "KMC-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4118",
+ "EventName": "KMC_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMC ENCRYPTED TDEA 192",
+ "PublicDescription": "KMC-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4119",
+ "EventName": "KMC_AES_128",
+ "BriefDescription": "KMC AES 128",
+ "PublicDescription": "KMC-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4120",
+ "EventName": "KMC_AES_192",
+ "BriefDescription": "KMC AES 192",
+ "PublicDescription": "KMC-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4121",
+ "EventName": "KMC_AES_256",
+ "BriefDescription": "KMC AES 256",
+ "PublicDescription": "KMC-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4122",
+ "EventName": "KMC_ENCRYPTED_AES_128",
+ "BriefDescription": "KMC ENCRYPTED AES 128",
+ "PublicDescription": "KMC-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4123",
+ "EventName": "KMC_ENCRYPTED_AES_192",
+ "BriefDescription": "KMC ENCRYPTED AES 192",
+ "PublicDescription": "KMC-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4124",
+ "EventName": "KMC_ENCRYPTED_AES_256",
+ "BriefDescription": "KMC ENCRYPTED AES 256",
+ "PublicDescription": "KMC-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4125",
+ "EventName": "KMC_PRNG",
+ "BriefDescription": "KMC PRNG",
+ "PublicDescription": "KMC-PRNG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4126",
+ "EventName": "KMA_GCM_AES_128",
+ "BriefDescription": "KMA GCM AES 128",
+ "PublicDescription": "KMA-GCM-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4127",
+ "EventName": "KMA_GCM_AES_192",
+ "BriefDescription": "KMA GCM AES 192",
+ "PublicDescription": "KMA-GCM-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4128",
+ "EventName": "KMA_GCM_AES_256",
+ "BriefDescription": "KMA GCM AES 256",
+ "PublicDescription": "KMA-GCM-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4129",
+ "EventName": "KMA_GCM_ENCRYPTED_AES_128",
+ "BriefDescription": "KMA GCM ENCRYPTED AES 128",
+ "PublicDescription": "KMA-GCM-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4130",
+ "EventName": "KMA_GCM_ENCRYPTED_AES_192",
+ "BriefDescription": "KMA GCM ENCRYPTED AES 192",
+ "PublicDescription": "KMA-GCM-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4131",
+ "EventName": "KMA_GCM_ENCRYPTED_AES_256",
+ "BriefDescription": "KMA GCM ENCRYPTED AES 256",
+ "PublicDescription": "KMA-GCM-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4132",
+ "EventName": "KMF_DEA",
+ "BriefDescription": "KMF DEA",
+ "PublicDescription": "KMF-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4133",
+ "EventName": "KMF_TDEA_128",
+ "BriefDescription": "KMF TDEA 128",
+ "PublicDescription": "KMF-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4134",
+ "EventName": "KMF_TDEA_192",
+ "BriefDescription": "KMF TDEA 192",
+ "PublicDescription": "KMF-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4135",
+ "EventName": "KMF_ENCRYPTED_DEA",
+ "BriefDescription": "KMF ENCRYPTED DEA",
+ "PublicDescription": "KMF-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4136",
+ "EventName": "KMF_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMF ENCRYPTED TDEA 128",
+ "PublicDescription": "KMF-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4137",
+ "EventName": "KMF_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMF ENCRYPTED TDEA 192",
+ "PublicDescription": "KMF-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4138",
+ "EventName": "KMF_AES_128",
+ "BriefDescription": "KMF AES 128",
+ "PublicDescription": "KMF-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4139",
+ "EventName": "KMF_AES_192",
+ "BriefDescription": "KMF AES 192",
+ "PublicDescription": "KMF-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4140",
+ "EventName": "KMF_AES_256",
+ "BriefDescription": "KMF AES 256",
+ "PublicDescription": "KMF-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4141",
+ "EventName": "KMF_ENCRYPTED_AES_128",
+ "BriefDescription": "KMF ENCRYPTED AES 128",
+ "PublicDescription": "KMF-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4142",
+ "EventName": "KMF_ENCRYPTED_AES_192",
+ "BriefDescription": "KMF ENCRYPTED AES 192",
+ "PublicDescription": "KMF-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4143",
+ "EventName": "KMF_ENCRYPTED_AES_256",
+ "BriefDescription": "KMF ENCRYPTED AES 256",
+ "PublicDescription": "KMF-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4144",
+ "EventName": "KMCTR_DEA",
+ "BriefDescription": "KMCTR DEA",
+ "PublicDescription": "KMCTR-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4145",
+ "EventName": "KMCTR_TDEA_128",
+ "BriefDescription": "KMCTR TDEA 128",
+ "PublicDescription": "KMCTR-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4146",
+ "EventName": "KMCTR_TDEA_192",
+ "BriefDescription": "KMCTR TDEA 192",
+ "PublicDescription": "KMCTR-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4147",
+ "EventName": "KMCTR_ENCRYPTED_DEA",
+ "BriefDescription": "KMCTR ENCRYPTED DEA",
+ "PublicDescription": "KMCTR-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4148",
+ "EventName": "KMCTR_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMCTR ENCRYPTED TDEA 128",
+ "PublicDescription": "KMCTR-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4149",
+ "EventName": "KMCTR_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMCTR ENCRYPTED TDEA 192",
+ "PublicDescription": "KMCTR-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4150",
+ "EventName": "KMCTR_AES_128",
+ "BriefDescription": "KMCTR AES 128",
+ "PublicDescription": "KMCTR-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4151",
+ "EventName": "KMCTR_AES_192",
+ "BriefDescription": "KMCTR AES 192",
+ "PublicDescription": "KMCTR-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4152",
+ "EventName": "KMCTR_AES_256",
+ "BriefDescription": "KMCTR AES 256",
+ "PublicDescription": "KMCTR-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4153",
+ "EventName": "KMCTR_ENCRYPTED_AES_128",
+ "BriefDescription": "KMCTR ENCRYPTED AES 128",
+ "PublicDescription": "KMCTR-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4154",
+ "EventName": "KMCTR_ENCRYPTED_AES_192",
+ "BriefDescription": "KMCTR ENCRYPTED AES 192",
+ "PublicDescription": "KMCTR-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4155",
+ "EventName": "KMCTR_ENCRYPTED_AES_256",
+ "BriefDescription": "KMCTR ENCRYPTED AES 256",
+ "PublicDescription": "KMCTR-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4156",
+ "EventName": "KMO_DEA",
+ "BriefDescription": "KMO DEA",
+ "PublicDescription": "KMO-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4157",
+ "EventName": "KMO_TDEA_128",
+ "BriefDescription": "KMO TDEA 128",
+ "PublicDescription": "KMO-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4158",
+ "EventName": "KMO_TDEA_192",
+ "BriefDescription": "KMO TDEA 192",
+ "PublicDescription": "KMO-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4159",
+ "EventName": "KMO_ENCRYPTED_DEA",
+ "BriefDescription": "KMO ENCRYPTED DEA",
+ "PublicDescription": "KMO-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4160",
+ "EventName": "KMO_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMO ENCRYPTED TDEA 128",
+ "PublicDescription": "KMO-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4161",
+ "EventName": "KMO_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMO ENCRYPTED TDEA 192",
+ "PublicDescription": "KMO-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4162",
+ "EventName": "KMO_AES_128",
+ "BriefDescription": "KMO AES 128",
+ "PublicDescription": "KMO-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4163",
+ "EventName": "KMO_AES_192",
+ "BriefDescription": "KMO AES 192",
+ "PublicDescription": "KMO-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4164",
+ "EventName": "KMO_AES_256",
+ "BriefDescription": "KMO AES 256",
+ "PublicDescription": "KMO-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4165",
+ "EventName": "KMO_ENCRYPTED_AES_128",
+ "BriefDescription": "KMO ENCRYPTED AES 128",
+ "PublicDescription": "KMO-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4166",
+ "EventName": "KMO_ENCRYPTED_AES_192",
+ "BriefDescription": "KMO ENCRYPTED AES 192",
+ "PublicDescription": "KMO-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4167",
+ "EventName": "KMO_ENCRYPTED_AES_256",
+ "BriefDescription": "KMO ENCRYPTED AES 256",
+ "PublicDescription": "KMO-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4168",
+ "EventName": "KIMD_SHA_1",
+ "BriefDescription": "KIMD SHA 1",
+ "PublicDescription": "KIMD-SHA-1 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4169",
+ "EventName": "KIMD_SHA_256",
+ "BriefDescription": "KIMD SHA 256",
+ "PublicDescription": "KIMD-SHA-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4170",
+ "EventName": "KIMD_SHA_512",
+ "BriefDescription": "KIMD SHA 512",
+ "PublicDescription": "KIMD-SHA-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4171",
+ "EventName": "KIMD_SHA3_224",
+ "BriefDescription": "KIMD SHA3 224",
+ "PublicDescription": "KIMD-SHA3-224 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4172",
+ "EventName": "KIMD_SHA3_256",
+ "BriefDescription": "KIMD SHA3 256",
+ "PublicDescription": "KIMD-SHA3-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4173",
+ "EventName": "KIMD_SHA3_384",
+ "BriefDescription": "KIMD SHA3 384",
+ "PublicDescription": "KIMD-SHA3-384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4174",
+ "EventName": "KIMD_SHA3_512",
+ "BriefDescription": "KIMD SHA3 512",
+ "PublicDescription": "KIMD-SHA3-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4175",
+ "EventName": "KIMD_SHAKE_128",
+ "BriefDescription": "KIMD SHAKE 128",
+ "PublicDescription": "KIMD-SHAKE-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4176",
+ "EventName": "KIMD_SHAKE_256",
+ "BriefDescription": "KIMD SHAKE 256",
+ "PublicDescription": "KIMD-SHAKE-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4177",
+ "EventName": "KIMD_GHASH",
+ "BriefDescription": "KIMD GHASH",
+ "PublicDescription": "KIMD-GHASH function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4178",
+ "EventName": "KLMD_SHA_1",
+ "BriefDescription": "KLMD SHA 1",
+ "PublicDescription": "KLMD-SHA-1 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4179",
+ "EventName": "KLMD_SHA_256",
+ "BriefDescription": "KLMD SHA 256",
+ "PublicDescription": "KLMD-SHA-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4180",
+ "EventName": "KLMD_SHA_512",
+ "BriefDescription": "KLMD SHA 512",
+ "PublicDescription": "KLMD-SHA-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4181",
+ "EventName": "KLMD_SHA3_224",
+ "BriefDescription": "KLMD SHA3 224",
+ "PublicDescription": "KLMD-SHA3-224 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4182",
+ "EventName": "KLMD_SHA3_256",
+ "BriefDescription": "KLMD SHA3 256",
+ "PublicDescription": "KLMD-SHA3-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4183",
+ "EventName": "KLMD_SHA3_384",
+ "BriefDescription": "KLMD SHA3 384",
+ "PublicDescription": "KLMD-SHA3-384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4184",
+ "EventName": "KLMD_SHA3_512",
+ "BriefDescription": "KLMD SHA3 512",
+ "PublicDescription": "KLMD-SHA3-512 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4185",
+ "EventName": "KLMD_SHAKE_128",
+ "BriefDescription": "KLMD SHAKE 128",
+ "PublicDescription": "KLMD-SHAKE-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4186",
+ "EventName": "KLMD_SHAKE_256",
+ "BriefDescription": "KLMD SHAKE 256",
+ "PublicDescription": "KLMD-SHAKE-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4187",
+ "EventName": "KMAC_DEA",
+ "BriefDescription": "KMAC DEA",
+ "PublicDescription": "KMAC-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4188",
+ "EventName": "KMAC_TDEA_128",
+ "BriefDescription": "KMAC TDEA 128",
+ "PublicDescription": "KMAC-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4189",
+ "EventName": "KMAC_TDEA_192",
+ "BriefDescription": "KMAC TDEA 192",
+ "PublicDescription": "KMAC-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4190",
+ "EventName": "KMAC_ENCRYPTED_DEA",
+ "BriefDescription": "KMAC ENCRYPTED DEA",
+ "PublicDescription": "KMAC-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4191",
+ "EventName": "KMAC_ENCRYPTED_TDEA_128",
+ "BriefDescription": "KMAC ENCRYPTED TDEA 128",
+ "PublicDescription": "KMAC-Encrypted-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4192",
+ "EventName": "KMAC_ENCRYPTED_TDEA_192",
+ "BriefDescription": "KMAC ENCRYPTED TDEA 192",
+ "PublicDescription": "KMAC-Encrypted-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4193",
+ "EventName": "KMAC_AES_128",
+ "BriefDescription": "KMAC AES 128",
+ "PublicDescription": "KMAC-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4194",
+ "EventName": "KMAC_AES_192",
+ "BriefDescription": "KMAC AES 192",
+ "PublicDescription": "KMAC-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4195",
+ "EventName": "KMAC_AES_256",
+ "BriefDescription": "KMAC AES 256",
+ "PublicDescription": "KMAC-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4196",
+ "EventName": "KMAC_ENCRYPTED_AES_128",
+ "BriefDescription": "KMAC ENCRYPTED AES 128",
+ "PublicDescription": "KMAC-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4197",
+ "EventName": "KMAC_ENCRYPTED_AES_192",
+ "BriefDescription": "KMAC ENCRYPTED AES 192",
+ "PublicDescription": "KMAC-Encrypted-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4198",
+ "EventName": "KMAC_ENCRYPTED_AES_256",
+ "BriefDescription": "KMAC ENCRYPTED AES 256",
+ "PublicDescription": "KMAC-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4199",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING DEA",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4200",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING TDEA 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-TDEA-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4201",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING TDEA 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-TDEA-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4202",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED DEA",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-DEA function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4203",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED TDEA 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA- 128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4204",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED TDEA 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-TDEA- 192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4205",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING AES 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4206",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING AES 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-AES-192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4207",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING AES 256",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4208",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 128",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES- 128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4209",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 192",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES- 192 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4210",
+ "EventName": "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
+ "BriefDescription": "PCC COMPUTE LAST BLOCK CMAC USING ENCRYPTED AES 256A",
+ "PublicDescription": "PCC-Compute-Last-Block-CMAC-Using-Encrypted-AES- 256A function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4211",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING AES 128",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4212",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING AES 256",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4213",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING ENCRYPTED AES 128",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-Encrypted-AES-128 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4214",
+ "EventName": "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
+ "BriefDescription": "PCC COMPUTE XTS PARAMETER USING ENCRYPTED AES 256",
+ "PublicDescription": "PCC-Compute-XTS-Parameter-Using-Encrypted-AES-256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4215",
+ "EventName": "PCC_SCALAR_MULTIPLY_P256",
+ "BriefDescription": "PCC SCALAR MULTIPLY P256",
+ "PublicDescription": "PCC-Scalar-Multiply-P256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4216",
+ "EventName": "PCC_SCALAR_MULTIPLY_P384",
+ "BriefDescription": "PCC SCALAR MULTIPLY P384",
+ "PublicDescription": "PCC-Scalar-Multiply-P384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4217",
+ "EventName": "PCC_SCALAR_MULTIPLY_P521",
+ "BriefDescription": "PCC SCALAR MULTIPLY P521",
+ "PublicDescription": "PCC-Scalar-Multiply-P521 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4218",
+ "EventName": "PCC_SCALAR_MULTIPLY_ED25519",
+ "BriefDescription": "PCC SCALAR MULTIPLY ED25519",
+ "PublicDescription": "PCC-Scalar-Multiply-Ed25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4219",
+ "EventName": "PCC_SCALAR_MULTIPLY_ED448",
+ "BriefDescription": "PCC SCALAR MULTIPLY ED448",
+ "PublicDescription": "PCC-Scalar-Multiply-Ed448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4220",
+ "EventName": "PCC_SCALAR_MULTIPLY_X25519",
+ "BriefDescription": "PCC SCALAR MULTIPLY X25519",
+ "PublicDescription": "PCC-Scalar-Multiply-X25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4221",
+ "EventName": "PCC_SCALAR_MULTIPLY_X448",
+ "BriefDescription": "PCC SCALAR MULTIPLY X448",
+ "PublicDescription": "PCC-Scalar-Multiply-X448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4222",
+ "EventName": "PRNO_SHA_512_DRNG",
+ "BriefDescription": "PRNO SHA 512 DRNG",
+ "PublicDescription": "PRNO-SHA-512-DRNG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4223",
+ "EventName": "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
+ "BriefDescription": "PRNO TRNG QUERY RAW TO CONDITIONED RATIO",
+ "PublicDescription": "PRNO-TRNG-Query-Raw-to-Conditioned-Ratio function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4224",
+ "EventName": "PRNO_TRNG",
+ "BriefDescription": "PRNO TRNG",
+ "PublicDescription": "PRNO-TRNG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4225",
+ "EventName": "KDSA_ECDSA_VERIFY_P256",
+ "BriefDescription": "KDSA ECDSA VERIFY P256",
+ "PublicDescription": "KDSA-ECDSA-Verify-P256 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4226",
+ "EventName": "KDSA_ECDSA_VERIFY_P384",
+ "BriefDescription": "KDSA ECDSA VERIFY P384",
+ "PublicDescription": "KDSA-ECDSA-Verify-P384 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4227",
+ "EventName": "KDSA_ECDSA_VERIFY_P521",
+ "BriefDescription": "KDSA ECDSA VERIFY P521",
+ "PublicDescription": "KDSA-ECDSA-Verify-P521 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4228",
+ "EventName": "KDSA_ECDSA_SIGN_P256",
+ "BriefDescription": "KDSA ECDSA SIGN P256",
+ "PublicDescription": "KDSA-ECDSA-Sign-P256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4229",
+ "EventName": "KDSA_ECDSA_SIGN_P384",
+ "BriefDescription": "KDSA ECDSA SIGN P384",
+ "PublicDescription": "KDSA-ECDSA-Sign-P384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4230",
+ "EventName": "KDSA_ECDSA_SIGN_P521",
+ "BriefDescription": "KDSA ECDSA SIGN P521",
+ "PublicDescription": "KDSA-ECDSA-Sign-P521 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4231",
+ "EventName": "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
+ "BriefDescription": "KDSA ENCRYPTED ECDSA SIGN P256",
+ "PublicDescription": "KDSA-Encrypted-ECDSA-Sign-P256 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4232",
+ "EventName": "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
+ "BriefDescription": "KDSA ENCRYPTED ECDSA SIGN P384",
+ "PublicDescription": "KDSA-Encrypted-ECDSA-Sign-P384 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4233",
+ "EventName": "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
+ "BriefDescription": "KDSA ENCRYPTED ECDSA SIGN P521",
+ "PublicDescription": "KDSA-Encrypted-ECDSA-Sign-P521 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4234",
+ "EventName": "KDSA_EDDSA_VERIFY_ED25519",
+ "BriefDescription": "KDSA EDDSA VERIFY ED25519",
+ "PublicDescription": "KDSA-EdDSA-Verify-Ed25519 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4235",
+ "EventName": "KDSA_EDDSA_VERIFY_ED448",
+ "BriefDescription": "KDSA EDDSA VERIFY ED448",
+ "PublicDescription": "KDSA-EdDSA-Verify-Ed448 function ending with CC=0 or CC=2"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4236",
+ "EventName": "KDSA_EDDSA_SIGN_ED25519",
+ "BriefDescription": "KDSA EDDSA SIGN ED25519",
+ "PublicDescription": "KDSA-EdDSA-Sign-Ed25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4237",
+ "EventName": "KDSA_EDDSA_SIGN_ED448",
+ "BriefDescription": "KDSA EDDSA SIGN ED448",
+ "PublicDescription": "KDSA-EdDSA-Sign-Ed448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4238",
+ "EventName": "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
+ "BriefDescription": "KDSA ENCRYPTED EDDSA SIGN ED25519",
+ "PublicDescription": "KDSA-Encrypted-EdDSA-Sign-Ed25519 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4239",
+ "EventName": "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
+ "BriefDescription": "KDSA ENCRYPTED EDDSA SIGN ED448",
+ "PublicDescription": "KDSA-Encrypted-EdDSA-Sign-Ed448 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4240",
+ "EventName": "PCKMO_ENCRYPT_DEA_KEY",
+ "BriefDescription": "PCKMO ENCRYPT DEA KEY",
+ "PublicDescription": "PCKMO-Encrypt-DEA-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4241",
+ "EventName": "PCKMO_ENCRYPT_TDEA_128_KEY",
+ "BriefDescription": "PCKMO ENCRYPT TDEA 128 KEY",
+ "PublicDescription": "PCKMO-Encrypt-TDEA-128-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4242",
+ "EventName": "PCKMO_ENCRYPT_TDEA_192_KEY",
+ "BriefDescription": "PCKMO ENCRYPT TDEA 192 KEY",
+ "PublicDescription": "PCKMO-Encrypt-TDEA-192-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4243",
+ "EventName": "PCKMO_ENCRYPT_AES_128_KEY",
+ "BriefDescription": "PCKMO ENCRYPT AES 128 KEY",
+ "PublicDescription": "PCKMO-Encrypt-AES-128-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4244",
+ "EventName": "PCKMO_ENCRYPT_AES_192_KEY",
+ "BriefDescription": "PCKMO ENCRYPT AES 192 KEY",
+ "PublicDescription": "PCKMO-Encrypt-AES-192-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4245",
+ "EventName": "PCKMO_ENCRYPT_AES_256_KEY",
+ "BriefDescription": "PCKMO ENCRYPT AES 256 KEY",
+ "PublicDescription": "PCKMO-Encrypt-AES-256-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4246",
+ "EventName": "PCKMO_ENCRYPT_ECC_P256_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC P256 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-P256-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4247",
+ "EventName": "PCKMO_ENCRYPT_ECC_P384_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC P384 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-P384-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4248",
+ "EventName": "PCKMO_ENCRYPT_ECC_P521_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC P521 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-P521-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4249",
+ "EventName": "PCKMO_ENCRYPT_ECC_ED25519_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC ED25519 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-Ed25519-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4250",
+ "EventName": "PCKMO_ENCRYPT_ECC_ED448_KEY",
+ "BriefDescription": "PCKMO ENCRYPT ECC ED448 KEY",
+ "PublicDescription": "PCKMO-Encrypt-ECC-Ed448-key function"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4251",
+ "EventName": "IBM_RESERVED_155",
+ "BriefDescription": "IBM RESERVED_155",
+ "PublicDescription": "Reserved for IBM use"
+ },
+ {
+ "Unit": "PAI-CRYPTO",
+ "EventCode": "4252",
+ "EventName": "IBM_RESERVED_156",
+ "BriefDescription": "IBM RESERVED_156",
+ "PublicDescription": "Reserved for IBM use"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/pai_ext.json b/tools/perf/pmu-events/arch/s390/cf_z16/pai_ext.json
new file mode 100644
index 000000000000..7ccbded95dc9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/pai_ext.json
@@ -0,0 +1,178 @@
+[
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6144",
+ "EventName": "NNPA_ALL",
+ "BriefDescription": "NNPA ALL Sum of all non zero counters",
+ "PublicDescription": "Sum of all non zero NNPA (Neural Networks Processing Assist) counters"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6145",
+ "EventName": "NNPA_ADD",
+ "BriefDescription": "NNPA ADD function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6146",
+ "EventName": "NNPA_SUB",
+ "BriefDescription": "NNPA SUB function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6147",
+ "EventName": "NNPA_MUL",
+ "BriefDescription": "NNPA MUL function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6148",
+ "EventName": "NNPA_DIV",
+ "BriefDescription": "NNPA DIV function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6149",
+ "EventName": "NNPA_MIN",
+ "BriefDescription": "NNPA MIN function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6150",
+ "EventName": "NNPA_MAX",
+ "BriefDescription": "NNPA MAX function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6151",
+ "EventName": "NNPA_LOG",
+ "BriefDescription": "NNPA LOG function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6152",
+ "EventName": "NNPA_EXP",
+ "BriefDescription": "NNPA EXP function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6153",
+ "EventName": "NNPA_IBM_RESERVED_9",
+ "BriefDescription": "Reserved for IBM use",
+ "PublicDescription": "Reserved for IBM use"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6154",
+ "EventName": "NNPA_RELU",
+ "BriefDescription": "NNPA RELU function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6155",
+ "EventName": "NNPA_TANH",
+ "BriefDescription": "NNPA TANH function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6156",
+ "EventName": "NNPA_SIGMOID",
+ "BriefDescription": "NNPA SIGMOID function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6157",
+ "EventName": "NNPA_SOFTMAX",
+ "BriefDescription": "NNPA SOFTMAX function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6158",
+ "EventName": "NNPA_BATCHNORM",
+ "BriefDescription": "NNPA BATCHNORM function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6159",
+ "EventName": "NNPA_MAXPOOL2D",
+ "BriefDescription": "NNPA MAXPOOL2D function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6160",
+ "EventName": "NNPA_AVGPOOL2D",
+ "BriefDescription": "NNPA AVGPOOL2D function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6161",
+ "EventName": "NNPA_LSTMACT",
+ "BriefDescription": "NNPA LSTMACT function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6162",
+ "EventName": "NNPA_GRUACT",
+ "BriefDescription": "NNPA GRUACT function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6163",
+ "EventName": "NNPA_CONVOLUTION",
+ "BriefDescription": "NNPA CONVOLUTION function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6164",
+ "EventName": "NNPA_MATMUL_OP",
+ "BriefDescription": "NNPA MATMUL OP function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6165",
+ "EventName": "NNPA_MATMUL_OP_BCAST23",
+ "BriefDescription": "NNPA NNPA MATMUL OP BCAST23 function ending with CC=0"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6166",
+ "EventName": "NNPA_SMALLBATCH",
+ "BriefDescription": "NNPA SMALLBATCH OP function ending with CC=0",
+ "PublicDescription": "NNPA function with conditions as described in Common Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6167",
+ "EventName": "NNPA_LARGEDIM",
+ "BriefDescription": "NNPA LARGEDIM OP function ending with CC=0",
+ "PublicDescription": "NNPA function with conditions as described in Common Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6168",
+ "EventName": "NNPA_SMALLTENSOR",
+ "BriefDescription": "NNPA SMALLTENSOR OP function ending with CC=0",
+ "PublicDescription": "NNPA function with conditions as described in Common Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6169",
+ "EventName": "NNPA_1MFRAME",
+ "BriefDescription": "NNPA 1MFRAME OP function ending with CC=0",
+ "PublicDescription": "NNPA function with conditions as described in Common Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6170",
+ "EventName": "NNPA_2GFRAME",
+ "BriefDescription": "NNPA 2GFRAME OP function ending with CC=0",
+ "PublicDescription": "NNPA function with conditions as described in Common Operation"
+ },
+ {
+ "Unit": "PAI-EXT",
+ "EventCode": "6171",
+ "EventName": "NNPA_ACCESSEXCEPT",
+ "BriefDescription": "NNPA ACCESSEXCEPT OP function ending with CC=0",
+ "PublicDescription": "NNPA function with conditions as described in Common Operation"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
new file mode 100644
index 000000000000..ec2ff78e2b5f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/s390/cf_z16/transaction.json
@@ -0,0 +1,72 @@
+[
+ {
+ "BriefDescription": "Transaction count",
+ "MetricName": "transaction",
+ "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+ },
+ {
+ "BriefDescription": "Cycles per Instruction",
+ "MetricName": "cpi",
+ "MetricExpr": "CPU_CYCLES / INSTRUCTIONS"
+ },
+ {
+ "BriefDescription": "Problem State Instruction Ratio",
+ "MetricName": "prbstate",
+ "MetricExpr": "(PROBLEM_STATE_INSTRUCTIONS / INSTRUCTIONS) * 100"
+ },
+ {
+ "BriefDescription": "Level One Miss per 100 Instructions",
+ "MetricName": "l1mp",
+ "MetricExpr": "((L1I_DIR_WRITES + L1D_DIR_WRITES) / INSTRUCTIONS) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 2 cache",
+ "MetricName": "l2p",
+ "MetricExpr": "((DCW_REQ + DCW_REQ_IV + ICW_REQ + ICW_REQ_IV) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 3 on same chip cache",
+ "MetricName": "l3p",
+ "MetricExpr": "((DCW_REQ_CHIP_HIT + DCW_ON_CHIP + DCW_ON_CHIP_IV + DCW_ON_CHIP_CHIP_HIT + ICW_REQ_CHIP_HIT + ICW_ON_CHIP + ICW_ON_CHIP_IV + ICW_ON_CHIP_CHIP_HIT) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Local cache on same book",
+ "MetricName": "l4lp",
+ "MetricExpr": "((DCW_REQ_DRAWER_HIT + DCW_ON_CHIP_DRAWER_HIT + DCW_ON_MODULE + DCW_ON_DRAWER + IDCW_ON_MODULE_IV + IDCW_ON_MODULE_CHIP_HIT + IDCW_ON_MODULE_DRAWER_HIT + IDCW_ON_DRAWER_IV + IDCW_ON_DRAWER_CHIP_HIT + IDCW_ON_DRAWER_DRAWER_HIT + ICW_REQ_DRAWER_HIT + ICW_ON_CHIP_DRAWER_HIT + ICW_ON_MODULE + ICW_ON_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from Level 4 Remote cache on different book",
+ "MetricName": "l4rp",
+ "MetricExpr": "((DCW_OFF_DRAWER + IDCW_OFF_DRAWER_IV + IDCW_OFF_DRAWER_CHIP_HIT + IDCW_OFF_DRAWER_DRAWER_HIT + ICW_OFF_DRAWER) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Percentage sourced from memory",
+ "MetricName": "memp",
+ "MetricExpr": "((DCW_ON_CHIP_MEMORY + DCW_ON_MODULE_MEMORY + DCW_ON_DRAWER_MEMORY + DCW_OFF_DRAWER_MEMORY + ICW_ON_CHIP_MEMORY + ICW_ON_MODULE_MEMORY + ICW_ON_DRAWER_MEMORY + ICW_OFF_DRAWER_MEMORY) / (L1I_DIR_WRITES + L1D_DIR_WRITES)) * 100"
+ },
+ {
+ "BriefDescription": "Cycles per Instructions from Finite cache/memory",
+ "MetricName": "finite_cpi",
+ "MetricExpr": "L1C_TLB2_MISSES / INSTRUCTIONS"
+ },
+ {
+ "BriefDescription": "Estimated Instruction Complexity CPI infinite Level 1",
+ "MetricName": "est_cpi",
+ "MetricExpr": "(CPU_CYCLES / INSTRUCTIONS) - (L1C_TLB2_MISSES / INSTRUCTIONS)"
+ },
+ {
+ "BriefDescription": "Estimated Sourcing Cycles per Level 1 Miss",
+ "MetricName": "scpl1m",
+ "MetricExpr": "L1C_TLB2_MISSES / (L1I_DIR_WRITES + L1D_DIR_WRITES)"
+ },
+ {
+ "BriefDescription": "Estimated TLB CPU percentage of Total CPU",
+ "MetricName": "tlb_percent",
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / CPU_CYCLES) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES)) * 100"
+ },
+ {
+ "BriefDescription": "Estimated Cycles per TLB Miss",
+ "MetricName": "tlb_miss",
+ "MetricExpr": "((DTLB2_MISSES + ITLB2_MISSES) / (DTLB2_WRITES + ITLB2_WRITES)) * (L1C_TLB2_MISSES / (L1I_PENALTY_CYCLES + L1D_PENALTY_CYCLES))"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
index 2dd8dafff2ef..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
- },
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
index db286f19e7b6..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
index b7b42a870bb0..6ebbdbaf7951 100644
--- a/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_z196/extended.json
@@ -4,14 +4,14 @@
"EventCode": "128",
"EventName": "L1D_L2_SOURCED_WRITES",
"BriefDescription": "L1D L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from the Level-2 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from the Level-2 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "129",
"EventName": "L1I_L2_SOURCED_WRITES",
"BriefDescription": "L1I L2 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from the Level-2 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from the Level-2 cache."
},
{
"Unit": "CPU-M-CF",
@@ -32,139 +32,139 @@
"EventCode": "133",
"EventName": "L2C_STORES_SENT",
"BriefDescription": "L2C Stores Sent",
- "PublicDescription": "Incremented by one for every store sent to Level-2 cache"
+ "PublicDescription": "Incremented by one for every store sent to Level-2 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "134",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Book Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "136",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Book Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an On Chip Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Data Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an On Chip Level-3 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an On Chip Level-3 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 I-Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache"
- },
+ "PublicDescription": "A directory write to the Level-1 Instruction Cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
index 2dd8dafff2ef..9bd20a5f47af 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/basic.json
@@ -3,84 +3,84 @@
"Unit": "CPU-M-CF",
"EventCode": "0",
"EventName": "CPU_CYCLES",
- "BriefDescription": "CPU Cycles",
- "PublicDescription": "Cycle Count"
+ "BriefDescription": "Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "1",
"EventName": "INSTRUCTIONS",
- "BriefDescription": "Instructions",
- "PublicDescription": "Instruction Count"
+ "BriefDescription": "Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "2",
"EventName": "L1I_DIR_WRITES",
- "BriefDescription": "L1I Directory Writes",
- "PublicDescription": "Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "3",
"EventName": "L1I_PENALTY_CYCLES",
- "BriefDescription": "L1I Penalty Cycles",
- "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 instruction cache or unified cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "4",
"EventName": "L1D_DIR_WRITES",
- "BriefDescription": "L1D Directory Writes",
- "PublicDescription": "Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes."
},
{
"Unit": "CPU-M-CF",
"EventCode": "5",
"EventName": "L1D_PENALTY_CYCLES",
- "BriefDescription": "L1D Penalty Cycles",
- "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of cache penalty cycles for level-1 data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "32",
"EventName": "PROBLEM_STATE_CPU_CYCLES",
- "BriefDescription": "Problem-State CPU Cycles",
- "PublicDescription": "Problem-State Cycle Count"
+ "BriefDescription": "Problem-State Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the CPU is in the problem state, excluding the number of cycles while the CPU is in the wait state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "33",
"EventName": "PROBLEM_STATE_INSTRUCTIONS",
- "BriefDescription": "Problem-State Instructions",
- "PublicDescription": "Problem-State Instruction Count"
+ "BriefDescription": "Problem-State Instruction Count",
+ "PublicDescription": "This counter counts the total number of instructions executed by the CPU while in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "34",
"EventName": "PROBLEM_STATE_L1I_DIR_WRITES",
- "BriefDescription": "Problem-State L1I Directory Writes",
- "PublicDescription": "Problem-State Level-1 I-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 I-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 instruction-cache or unified-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "35",
"EventName": "PROBLEM_STATE_L1I_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1I Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 I-Cache Penalty Cycle Count"
+ "BriefDescription": "Level-1 I-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 instruction cache or unified cache while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "36",
"EventName": "PROBLEM_STATE_L1D_DIR_WRITES",
- "BriefDescription": "Problem-State L1D Directory Writes",
- "PublicDescription": "Problem-State Level-1 D-Cache Directory Write Count"
+ "BriefDescription": "Problem-State Level-1 D-Cache Directory Write Count",
+ "PublicDescription": "This counter counts the total number of level-1 data-cache directory writes while the CPU is in the problem state."
},
{
"Unit": "CPU-M-CF",
"EventCode": "37",
"EventName": "PROBLEM_STATE_L1D_PENALTY_CYCLES",
- "BriefDescription": "Problem-State L1D Penalty Cycles",
- "PublicDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count"
- },
+ "BriefDescription": "Problem-State Level-1 D-Cache Penalty Cycle Count",
+ "PublicDescription": "This counter counts the total number of penalty cycles for level-1 data cache while the CPU is in the problem state."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
index db286f19e7b6..a8d391ddeb8c 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/crypto.json
@@ -3,112 +3,112 @@
"Unit": "CPU-M-CF",
"EventCode": "64",
"EventName": "PRNG_FUNCTIONS",
- "BriefDescription": "PRNG Functions",
- "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "65",
"EventName": "PRNG_CYCLES",
- "BriefDescription": "PRNG Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+ "BriefDescription": "PRNG Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES/SHA coprocessor is busy performing the pseudorandom- number-generation functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "66",
"EventName": "PRNG_BLOCKED_FUNCTIONS",
- "BriefDescription": "PRNG Blocked Functions",
- "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the pseudorandom-number-generation functions that are issued by the CPU and are blocked because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "67",
"EventName": "PRNG_BLOCKED_CYCLES",
- "BriefDescription": "PRNG Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "PRNG Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the pseudorandom-number-generation functions issued by the CPU because the DEA/AES/SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "68",
"EventName": "SHA_FUNCTIONS",
- "BriefDescription": "SHA Functions",
- "PublicDescription": "Total number of SHA functions issued by the CPU"
+ "BriefDescription": "SHA Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "69",
"EventName": "SHA_CYCLES",
- "BriefDescription": "SHA Cycles",
- "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+ "BriefDescription": "SHA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "70",
"EventName": "SHA_BLOCKED_FUNCTIONS",
- "BriefDescription": "SHA Blocked Functions",
- "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "71",
"EventName": "SHA_BLOCKED_CYCLES",
- "BriefDescription": "SHA Bloced Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "SHA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "72",
"EventName": "DEA_FUNCTIONS",
- "BriefDescription": "DEA Functions",
- "PublicDescription": "Total number of the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "73",
"EventName": "DEA_CYCLES",
- "BriefDescription": "DEA Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+ "BriefDescription": "DEA Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "74",
"EventName": "DEA_BLOCKED_FUNCTIONS",
- "BriefDescription": "DEA Blocked Functions",
- "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "75",
"EventName": "DEA_BLOCKED_CYCLES",
- "BriefDescription": "DEA Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "DEA Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "76",
"EventName": "AES_FUNCTIONS",
- "BriefDescription": "AES Functions",
- "PublicDescription": "Total number of AES functions issued by the CPU"
+ "BriefDescription": "AES Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "77",
"EventName": "AES_CYCLES",
- "BriefDescription": "AES Cycles",
- "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+ "BriefDescription": "AES Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "78",
"EventName": "AES_BLOCKED_FUNCTIONS",
- "BriefDescription": "AES Blocked Functions",
- "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+ "BriefDescription": "AES Blocked Function Count",
+ "PublicDescription": "This counter counts the total number of the AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU."
},
{
"Unit": "CPU-M-CF",
"EventCode": "79",
"EventName": "AES_BLOCKED_CYCLES",
- "BriefDescription": "AES Blocked Cycles",
- "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
- },
+ "BriefDescription": "AES Blocked Cycle Count",
+ "PublicDescription": "This counter counts the total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
index 162251037219..9e765581382b 100644
--- a/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
+++ b/tools/perf/pmu-events/arch/s390/cf_zec12/extended.json
@@ -18,230 +18,230 @@
"EventCode": "130",
"EventName": "L1D_L2I_SOURCED_WRITES",
"BriefDescription": "L1D L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "131",
"EventName": "L1I_L2I_SOURCED_WRITES",
"BriefDescription": "L1I L2I Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "132",
"EventName": "L1D_L2D_SOURCED_WRITES",
"BriefDescription": "L1D L2D Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "133",
"EventName": "DTLB1_WRITES",
"BriefDescription": "DTLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer (DTLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "135",
"EventName": "L1D_LMEM_SOURCED_WRITES",
"BriefDescription": "L1D Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Data cache where the installed cache line was sourced from memory that is attached to the same book as the Data cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "137",
"EventName": "L1I_LMEM_SOURCED_WRITES",
"BriefDescription": "L1I Local Memory Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache where the installed cache line was sourced from memory that is attached to the same book as the Instruction cache (Local Memory)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "138",
"EventName": "L1D_RO_EXCL_WRITES",
"BriefDescription": "L1D Read-only Exclusive Writes",
- "PublicDescription": "A directory write to the Level-1 D-Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ "PublicDescription": "A directory write to the Level-1 Data Cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line."
},
{
"Unit": "CPU-M-CF",
"EventCode": "139",
"EventName": "DTLB1_HPAGE_WRITES",
"BriefDescription": "DTLB1 One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page"
+ "PublicDescription": "A translation entry has been written to the Level-1 Data Translation Lookaside Buffer for a one-megabyte page."
},
{
"Unit": "CPU-M-CF",
"EventCode": "140",
"EventName": "ITLB1_WRITES",
"BriefDescription": "ITLB1 Writes",
- "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer"
+ "PublicDescription": "A translation entry has been written to the Level-1 Instruction Translation Lookaside Buffer (ITLB1)."
},
{
"Unit": "CPU-M-CF",
"EventCode": "141",
"EventName": "TLB2_PTE_WRITES",
"BriefDescription": "TLB2 PTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Page Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "142",
"EventName": "TLB2_CRSTE_HPAGE_WRITES",
"BriefDescription": "TLB2 CRSTE One-Megabyte Page Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays for a one-megabyte large page translation."
},
{
"Unit": "CPU-M-CF",
"EventCode": "143",
"EventName": "TLB2_CRSTE_WRITES",
"BriefDescription": "TLB2 CRSTE Writes",
- "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays"
+ "PublicDescription": "A translation entry has been written to the Level-2 TLB Common Region Segment Table Entry arrays."
},
{
"Unit": "CPU-M-CF",
"EventCode": "144",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "145",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "146",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "147",
"EventName": "L1D_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "148",
"EventName": "L1D_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1D Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "149",
"EventName": "TX_NC_TEND",
"BriefDescription": "Completed TEND instructions in non-constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a nonconstrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "150",
"EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from a On Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "151",
"EventName": "L1D_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "152",
"EventName": "L1D_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1D Off-Book L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "153",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I On-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "154",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "155",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L3 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache without intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "156",
"EventName": "L1I_ONBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I On-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "157",
"EventName": "L1I_OFFBOOK_L4_SOURCED_WRITES",
"BriefDescription": "L1I Off-Book L4 Sourced Writes",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-4 cache."
},
{
"Unit": "CPU-M-CF",
"EventCode": "158",
"EventName": "TX_C_TEND",
"BriefDescription": "Completed TEND instructions in constrained TX mode",
- "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+ "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "159",
"EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On Chip Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "160",
"EventName": "L1I_OFFCHIP_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Chip L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Chip/On Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "161",
"EventName": "L1I_OFFBOOK_L3_SOURCED_WRITES_IV",
"BriefDescription": "L1I Off-Book L3 Sourced Writes with Intervention",
- "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention"
+ "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off Book Level-3 cache with intervention."
},
{
"Unit": "CPU-M-CF",
"EventCode": "177",
"EventName": "TX_NC_TABORT",
"BriefDescription": "Aborted transactions in non-constrained TX mode",
- "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode"
+ "PublicDescription": "A transaction abort has occurred in a nonconstrained transactional-execution mode."
},
{
"Unit": "CPU-M-CF",
"EventCode": "178",
"EventName": "TX_C_TABORT_NO_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete."
},
{
"Unit": "CPU-M-CF",
"EventCode": "179",
"EventName": "TX_C_TABORT_SPECIAL",
"BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
- "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
- },
+ "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete."
+ }
]
diff --git a/tools/perf/pmu-events/arch/s390/mapfile.csv b/tools/perf/pmu-events/arch/s390/mapfile.csv
index 61641a3480e0..a918e1af77a5 100644
--- a/tools/perf/pmu-events/arch/s390/mapfile.csv
+++ b/tools/perf/pmu-events/arch/s390/mapfile.csv
@@ -5,3 +5,4 @@ Family-model,Version,Filename,EventType
^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
+^IBM.393[12].*3\.7.[[:xdigit:]]+$,3,cf_z16,core
diff --git a/tools/perf/pmu-events/arch/test/arch-std-events.json b/tools/perf/pmu-events/arch/test/arch-std-events.json
new file mode 100644
index 000000000000..43f6f729d6ae
--- /dev/null
+++ b/tools/perf/pmu-events/arch/test/arch-std-events.json
@@ -0,0 +1,8 @@
+[
+ {
+ "PublicDescription": "Attributable Level 3 cache access, read",
+ "EventCode": "0x40",
+ "EventName": "L3_CACHE_RD",
+ "BriefDescription": "L3 cache access, read"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/test/test_cpu/uncore.json b/tools/perf/pmu-events/arch/test/test_cpu/uncore.json
deleted file mode 100644
index d0a890cc814d..000000000000
--- a/tools/perf/pmu-events/arch/test/test_cpu/uncore.json
+++ /dev/null
@@ -1,21 +0,0 @@
-[
- {
- "EventCode": "0x02",
- "EventName": "uncore_hisi_ddrc.flux_wcmd",
- "BriefDescription": "DDRC write commands",
- "PublicDescription": "DDRC write commands",
- "Unit": "hisi_sccl,ddrc"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x81",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
- "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- }
-]
diff --git a/tools/perf/pmu-events/arch/test/test_cpu/branch.json b/tools/perf/pmu-events/arch/test/test_soc/cpu/branch.json
index 93ddfd8053ca..93ddfd8053ca 100644
--- a/tools/perf/pmu-events/arch/test/test_cpu/branch.json
+++ b/tools/perf/pmu-events/arch/test/test_soc/cpu/branch.json
diff --git a/tools/perf/pmu-events/arch/test/test_soc/cpu/cache.json b/tools/perf/pmu-events/arch/test/test_soc/cpu/cache.json
new file mode 100644
index 000000000000..036d0efdb2bb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/test/test_soc/cpu/cache.json
@@ -0,0 +1,5 @@
+[
+ {
+ "ArchStdEvent": "L3_CACHE_RD"
+ }
+] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/test/test_soc/cpu/metrics.json b/tools/perf/pmu-events/arch/test/test_soc/cpu/metrics.json
new file mode 100644
index 000000000000..70ec8caaaf6f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/test/test_soc/cpu/metrics.json
@@ -0,0 +1,64 @@
+[
+ {
+ "MetricExpr": "1 / IPC",
+ "MetricName": "CPI"
+ },
+ {
+ "MetricExpr": "inst_retired.any / cpu_clk_unhalted.thread",
+ "MetricName": "IPC",
+ "MetricGroup": "group1"
+ },
+ {
+ "MetricExpr": "idq_uops_not_delivered.core / (4 * (( ( cpu_clk_unhalted.thread / 2 ) * ( 1 + cpu_clk_unhalted.one_thread_active / cpu_clk_unhalted.ref_xclk ) )))",
+ "MetricName": "Frontend_Bound_SMT"
+ },
+ {
+ "MetricExpr": "l1d\\-loads\\-misses / inst_retired.any",
+ "MetricName": "dcache_miss_cpi"
+ },
+ {
+ "MetricExpr": "l1i\\-loads\\-misses / inst_retired.any",
+ "MetricName": "icache_miss_cycles"
+ },
+ {
+ "MetricExpr": "(dcache_miss_cpi + icache_miss_cycles)",
+ "MetricName": "cache_miss_cycles",
+ "MetricGroup": "group1"
+ },
+ {
+ "MetricExpr": "l2_rqsts.demand_data_rd_hit + l2_rqsts.pf_hit + l2_rqsts.rfo_hit",
+ "MetricName": "DCache_L2_All_Hits"
+ },
+ {
+ "MetricExpr": "max(l2_rqsts.all_demand_data_rd - l2_rqsts.demand_data_rd_hit, 0) + l2_rqsts.pf_miss + l2_rqsts.rfo_miss",
+ "MetricName": "DCache_L2_All_Miss"
+ },
+ {
+ "MetricExpr": "DCache_L2_All_Hits + DCache_L2_All_Miss",
+ "MetricName": "DCache_L2_All"
+ },
+ {
+ "MetricExpr": "d_ratio(DCache_L2_All_Hits, DCache_L2_All)",
+ "MetricName": "DCache_L2_Hits"
+ },
+ {
+ "MetricExpr": "d_ratio(DCache_L2_All_Miss, DCache_L2_All)",
+ "MetricName": "DCache_L2_Misses"
+ },
+ {
+ "MetricExpr": "ipc + M2",
+ "MetricName": "M1"
+ },
+ {
+ "MetricExpr": "ipc + M1",
+ "MetricName": "M2"
+ },
+ {
+ "MetricExpr": "1/M3",
+ "MetricName": "M3"
+ },
+ {
+ "MetricExpr": "64 * l1d.replacement / 1000000000 / duration_time",
+ "MetricName": "L1D_Cache_Fill_BW"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/test/test_cpu/other.json b/tools/perf/pmu-events/arch/test/test_soc/cpu/other.json
index 7d53d7ecd723..7d53d7ecd723 100644
--- a/tools/perf/pmu-events/arch/test/test_cpu/other.json
+++ b/tools/perf/pmu-events/arch/test/test_soc/cpu/other.json
diff --git a/tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json b/tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json
new file mode 100644
index 000000000000..41bac1c6a008
--- /dev/null
+++ b/tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json
@@ -0,0 +1,58 @@
+[
+ {
+ "EventCode": "0x02",
+ "EventName": "uncore_hisi_ddrc.flux_wcmd",
+ "BriefDescription": "DDRC write commands",
+ "PublicDescription": "DDRC write commands",
+ "Unit": "hisi_sccl,ddrc"
+ },
+ {
+ "Unit": "CBO",
+ "EventCode": "0x22",
+ "UMask": "0x81",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "Counter": "0,1",
+ "CounterMask": "0",
+ "Invert": "0",
+ "EdgeDetect": "0"
+ },
+ {
+ "Unit": "CBO",
+ "EventCode": "0xE0",
+ "UMask": "0x00",
+ "EventName": "event-hyphen",
+ "BriefDescription": "UNC_CBO_HYPHEN",
+ "PublicDescription": "UNC_CBO_HYPHEN"
+ },
+ {
+ "Unit": "CBO",
+ "EventCode": "0xC0",
+ "UMask": "0x00",
+ "EventName": "event-two-hyph",
+ "BriefDescription": "UNC_CBO_TWO_HYPH",
+ "PublicDescription": "UNC_CBO_TWO_HYPH"
+ },
+ {
+ "EventCode": "0x7",
+ "EventName": "uncore_hisi_l3c.rd_hit_cpipe",
+ "BriefDescription": "Total read hits",
+ "PublicDescription": "Total read hits",
+ "Unit": "hisi_sccl,l3c"
+ },
+ {
+ "EventCode": "0x12",
+ "EventName": "uncore_imc_free_running.cache_miss",
+ "BriefDescription": "Total cache misses",
+ "PublicDescription": "Total cache misses",
+ "Unit": "imc_free_running"
+ },
+ {
+ "EventCode": "0x34",
+ "EventName": "uncore_imc.cache_hits",
+ "BriefDescription": "Total cache hits",
+ "PublicDescription": "Total cache hits",
+ "Unit": "imc"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json
new file mode 100644
index 000000000000..c7e7528db315
--- /dev/null
+++ b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json
@@ -0,0 +1,16 @@
+[
+ {
+ "BriefDescription": "ddr write-cycles event",
+ "EventCode": "0x2b",
+ "EventName": "sys_ddr_pmu.write_cycles",
+ "Unit": "sys_ddr_pmu",
+ "Compat": "v8"
+ },
+ {
+ "BriefDescription": "ccn read-cycles event",
+ "ConfigCode": "0x2c",
+ "EventName": "sys_ccn_pmu.read_cycles",
+ "Unit": "sys_ccn_pmu",
+ "Compat": "0x01"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
new file mode 100644
index 000000000000..75d80e70e5cd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
@@ -0,0 +1,2464 @@
+[
+ {
+ "BriefDescription": "C10 residency percent per package",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C10_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C8 residency percent per package",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C8_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C9 residency percent per package",
+ "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C9_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_elision",
+ "ScaleUnit": "1cycles / elision"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions.",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_alloc_restriction",
+ "MetricThreshold": "tma_alloc_restriction > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALL / tma_info_slots",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.1",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound. The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "tma_backend_bound",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound_aux",
+ "MetricThreshold": "tma_backend_bound_aux > 0.2",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that UOPS must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. All of these subevents count backend stalls, in slots, due to a resource limitation. These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based. These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
+ "MetricExpr": "(tma_info_slots - (TOPDOWN_FE_BOUND.ALL + TOPDOWN_BE_BOUND.ALL + TOPDOWN_RETIRING.ALL)) / tma_info_slots",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are not from the microsequencer.",
+ "MetricExpr": "(TOPDOWN_RETIRING.ALL - UOPS_RETIRED.MS) / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_base",
+ "MetricThreshold": "tma_base > 0.6",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_detect",
+ "MetricThreshold": "tma_branch_detect > 0.05",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts.",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteer",
+ "MetricThreshold": "tma_branch_resteer > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "MetricExpr": "TOPDOWN_FE_BOUND.CISC / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles due to backend bound stalls that are core execution bound and not attributed to outstanding demand load or store stalls.",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.DECODE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_decode",
+ "MetricThreshold": "tma_decode > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to memory disambiguation.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.DISAMBIGUATION / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_disambiguation",
+ "MetricThreshold": "tma_disambiguation > 0.02",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / tma_info_clks - MEM_BOUND_STALLS_AT_RET_CORRECTION * MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_BOUND_STALLS.LOAD",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.15",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to FP assists.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.FP_ASSIST / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_fp_assist",
+ "MetricThreshold": "tma_fp_assist > 0.02",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divide operations per uop.",
+ "MetricExpr": "UOPS_RETIRED.FPDIV / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
+ "MetricName": "tma_fpdiv_uops",
+ "MetricThreshold": "tma_fpdiv_uops > 0.2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ALL / tma_info_slots",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a address aliasing block",
+ "MetricExpr": "100 * LD_BLOCKS.4K_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_address_alias_blocks",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Ratio of all branches which mispredict",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_branch_mispredict_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Ratio between Mispredicted branches and unknown branches",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_branch_mispredict_to_unknown_branch_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_clks",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE_P",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_clks_p",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction",
+ "MetricExpr": "tma_info_clks / INST_RETIRED.ANY",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cpi",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cpu_utilization",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycle cost per DRAM hit",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cycles_per_demand_load_dram_hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycle cost per L2 hit",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cycles_per_demand_load_l2_hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycle cost per LLC hit",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cycles_per_demand_load_l3_hit",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are FPDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.FPDIV / UOPS_RETIRED.ALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_fpdiv_uop_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are IDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.IDIV / UOPS_RETIRED.ALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_idiv_uop_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percent of instruction miss cost that hit in DRAM",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_DRAM_HIT / MEM_BOUND_STALLS.IFETCH",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_inst_miss_cost_dramhit_percent",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percent of instruction miss cost that hit in the L2",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_L2_HIT / MEM_BOUND_STALLS.IFETCH",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_inst_miss_cost_l2hit_percent",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percent of instruction miss cost that hit in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_LLC_HIT / MEM_BOUND_STALLS.IFETCH",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_inst_miss_cost_l3hit_percent",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipbranch",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipc",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipcall",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch",
+ "MetricExpr": "INST_RETIRED.ANY / (BR_INST_RETIRED.FAR_BRANCH / 2)",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipfarbranch",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Load",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipload",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
+ "MetricExpr": "INST_RETIRED.ANY / (BR_MISP_RETIRED.COND - BR_MISP_RETIRED.COND_TAKEN)",
+ "MetricName": "tma_info_ipmisp_cond_ntaken",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricName": "tma_info_ipmisp_cond_taken",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired return Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RETURN",
+ "MetricName": "tma_info_ipmisp_ret",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per retired Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipmispredict",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instructions per Store",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipstore",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "cpu_atom@CPU_CLK_UNHALTED.CORE@k / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_kernel_utilization",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads that are splits",
+ "MetricExpr": "100 * MEM_UOPS_RETIRED.SPLIT_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_load_splits",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "load ops retired per 1000 instruction",
+ "MetricExpr": "1e3 * MEM_UOPS_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_memloadpki",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are ucode ops",
+ "MetricExpr": "100 * UOPS_RETIRED.MS / UOPS_RETIRED.ALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_microcode_uop_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "5 * tma_info_clks",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_slots",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_store_fwd_blocks",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_turbo_utilization",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.ALL / INST_RETIRED.ANY",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_upi",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are x87 uops",
+ "MetricExpr": "100 * UOPS_RETIRED.X87 / UOPS_RETIRED.ALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_x87_uop_ratio",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ITLB / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a load block.",
+ "MetricExpr": "LD_HEAD.L1_BOUND_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / tma_info_clks - MEM_BOUND_STALLS_AT_RET_CORRECTION * MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_BOUND_STALLS.LOAD",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / tma_info_clks - MEM_BOUND_STALLS_AT_RET_CORRECTION * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to load buffer full",
+ "MetricExpr": "tma_mem_scheduler * MEM_SCHEDULER_BLOCK.LD_BUF / MEM_SCHEDULER_BLOCK.ALL",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
+ "MetricName": "tma_ld_buffer",
+ "MetricThreshold": "tma_ld_buffer > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_mem_scheduler",
+ "MetricThreshold": "tma_mem_scheduler > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to stores or loads.",
+ "MetricExpr": "min(tma_backend_bound, LD_HEAD.ANY_AT_RET / tma_info_clks + tma_store_bound)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to memory ordering.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_memory_ordering",
+ "MetricThreshold": "tma_memory_ordering > 0.02",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS)",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_ms_uops",
+ "MetricThreshold": "tma_ms_uops > 0.05",
+ "PublicDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_non_mem_scheduler",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear (slow nuke).",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_nuke",
+ "MetricThreshold": "tma_nuke > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.OTHER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_other_fb",
+ "MetricThreshold": "tma_other_fb > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a number of other load blocks.",
+ "MetricExpr": "LD_HEAD.OTHER_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_other_l1",
+ "MetricThreshold": "tma_other_l1 > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hits in the L2, LLC, DRAM or MMIO (Non-DRAM) but could not be correctly attributed or cycles in which the load miss is waiting on a request buffer.",
+ "MetricExpr": "max(0, tma_memory_bound - (tma_store_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound))",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_other_load_store",
+ "MetricThreshold": "tma_other_load_store > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops retired excluding ms and fp div uops.",
+ "MetricExpr": "(TOPDOWN_RETIRING.ALL - UOPS_RETIRED.MS - UOPS_RETIRED.FPDIV) / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
+ "MetricName": "tma_other_ret",
+ "MetricThreshold": "tma_other_ret > 0.3",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to page faults.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.PAGE_FAULT / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_page_fault",
+ "MetricThreshold": "tma_page_fault > 0.02",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_predecode",
+ "MetricThreshold": "tma_predecode > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_register",
+ "MetricThreshold": "tma_register > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_reorder_buffer",
+ "MetricThreshold": "tma_reorder_buffer > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "tma_backend_bound",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_aux_group",
+ "MetricName": "tma_resource_bound",
+ "MetricThreshold": "tma_resource_bound > 0.2",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the numer of issue slots that result in retirement slots.",
+ "MetricExpr": "TOPDOWN_RETIRING.ALL / tma_info_slots",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.75",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to RSV full relative",
+ "MetricExpr": "tma_mem_scheduler * MEM_SCHEDULER_BLOCK.RSV / MEM_SCHEDULER_BLOCK.ALL",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
+ "MetricName": "tma_rsv",
+ "MetricThreshold": "tma_rsv > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_serialization",
+ "MetricThreshold": "tma_serialization > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to SMC.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.SMC / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_smc",
+ "MetricThreshold": "tma_smc > 0.02",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to store buffer full",
+ "MetricExpr": "tma_store_bound",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
+ "MetricName": "tma_st_buffer",
+ "MetricThreshold": "tma_st_buffer > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a first level TLB miss.",
+ "MetricExpr": "LD_HEAD.DTLB_MISS_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_stlb_hit",
+ "MetricThreshold": "tma_stlb_hit > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a second level TLB miss requiring a page walk.",
+ "MetricExpr": "LD_HEAD.PGWALK_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_stlb_miss",
+ "MetricThreshold": "tma_stlb_miss > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full.",
+ "MetricExpr": "tma_mem_scheduler * (MEM_SCHEDULER_BLOCK.ST_BUF / MEM_SCHEDULER_BLOCK.ALL)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
+ "MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.05",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5_11 + UOPS_DISPATCHED.PORT_6) / (5 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * cpu_core@ASSISTS.ANY\\,umask\\=0x1B@ / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
+ "MetricExpr": "63 * ASSISTS.SSE_AVX_MIX / tma_info_slots",
+ "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_avx_assists",
+ "MetricThreshold": "tma_avx_assists > 0.1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "max(1 - (tma_frontend_bound + tma_backend_bound + tma_retiring), 0)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources. Sample with: FRONTEND_RETIRED.MS_FLOWS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(25 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 24 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "24 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu_core@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu_core@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35))",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIV_ACTIVE / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "min(7 * cpu_core@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(7 * cpu_core@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "28 * tma_info_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "max(0, tma_heavy_operations - tma_microcode_sequencer)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists",
+ "MetricExpr": "30 * ASSISTS.FP / tma_info_slots",
+ "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_fp_assists",
+ "MetricThreshold": "tma_fp_assists > 0.1",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.MACRO_FUSED / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fused_instructions",
+ "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. Sample with: UOPS_RETIRED.HEAVY",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "ICACHE_DATA.STALLS / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
+ "MetricName": "tma_info_big_code",
+ "MetricThreshold": "tma_info_big_code > 20",
+ "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_branching_overhead",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_mispredictions, tma_mispredicts_resteers",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_slots)",
+ "MetricGroup": "Ret;tma_issueBC",
+ "MetricName": "tma_info_branching_overhead",
+ "MetricThreshold": "tma_info_branching_overhead > 10",
+ "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_big_code",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_callret",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_code_stlb_mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_nt",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_tk",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_core_bound_likely",
+ "MetricThreshold": "tma_info_core_bound_likely > 0.5",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 6 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_dsb_misses",
+ "MetricThreshold": "tma_info_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu_core@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_dsb_switch_cost",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu_core@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_fb_hpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu_core@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_fetch_upc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common).",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_ic_misses",
+ "MetricThreshold": "tma_info_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: ",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_DATA.STALLS / cpu_core@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_icache_miss_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_big_code",
+ "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_instruction_fetch_bw > 20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu_core@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu_core@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting.",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per a microcode Assist invocation",
+ "MetricExpr": "INST_RETIRED.ANY / cpu_core@ASSISTS.ANY\\,umask\\=0x1B@",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_ipassist",
+ "MetricThreshold": "tma_info_ipassist < 100e3",
+ "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_ipdsb_miss_ret < 50",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_ntaken",
+ "MetricThreshold": "tma_info_ipmisp_cond_ntaken < 200",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_taken",
+ "MetricThreshold": "tma_info_ipmisp_cond_taken < 200",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "cpu_core@BR_MISP_RETIRED.INDIRECT_CALL\\,umask\\=0x80@ / BR_MISP_RETIRED.INDIRECT",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_ret",
+ "MetricThreshold": "tma_info_ipmisp_ret < 500",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu_core@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_ipswpf",
+ "MetricThreshold": "tma_info_ipswpf < 100",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 13",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_lcp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_jump",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki_load",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_all",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code_all",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu_core@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l3_miss_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_load_stlb_mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
+ "MetricExpr": "LSD.UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "Fed;LSD",
+ "MetricName": "tma_info_lsd_coverage",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / cpu_core@UNC_ARB_DAT_OCCUPANCY.RD\\,cmask\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.RD + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.RD",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+ "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.ALL + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.ALL",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_request_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
+ "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_memory_bandwidth",
+ "MetricThreshold": "tma_info_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_memory_data_tlbs",
+ "MetricThreshold": "tma_info_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))",
+ "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_memory_latency",
+ "MetricThreshold": "tma_info_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_mispredictions",
+ "MetricThreshold": "tma_info_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - (tma_info_cond_nt + tma_info_cond_tk + tma_info_callret + tma_info_jump)",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_other_branches",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (4 * tma_info_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_slots / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "(tma_info_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
+ "MetricGroup": "SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots_utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_store_stlb_mpki",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu_core@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_strings_cycles",
+ "MetricThreshold": "tma_info_strings_cycles > 0.1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_turbo_utilization",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "tma_retiring * tma_info_slots / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "tma_retiring * tma_info_slots / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 9",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_int_vector_128b + tma_int_vector_256b + tma_shuffles",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_int_operations",
+ "MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired",
+ "MetricExpr": "(INT_VEC_RETIRED.ADD_128 + INT_VEC_RETIRED.VNNI_128) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
+ "MetricName": "tma_int_vector_128b",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired",
+ "MetricExpr": "(INT_VEC_RETIRED.ADD_256 + INT_VEC_RETIRED.MUL_256 + INT_VEC_RETIRED.VNNI_256) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
+ "MetricName": "tma_int_vector_256b",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_TAG.STALLS / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((EXE_ACTIVITY.BOUND_ON_LOADS - MEMORY_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "9 * tma_info_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "DECODE.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_2_3_10 / (3 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3_10",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit",
+ "MetricExpr": "(LSD.CYCLES_ACTIVE - LSD.CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_lsd",
+ "MetricThreshold": "tma_lsd > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu_core@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_sq_full",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricExpr": "min(tma_backend_bound, LD_HEAD.ANY_AT_RET / tma_info_clks + tma_store_bound)",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
+ "MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
+ "MetricName": "tma_memory_fence",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * MEM_UOP_RETIRED.ANY / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.MS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_info_mispredictions",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued",
+ "MetricExpr": "160 * ASSISTS.SSE_AVX_MIX / tma_info_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * cpu_core@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (tma_retiring * tma_info_slots / UOPS_ISSUED.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
+ "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - INST_RETIRED.MACRO_FUSED) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_non_fused_branches",
+ "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults",
+ "MetricExpr": "99 * ASSISTS.PAGE_FAULT / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_page_faults",
+ "MetricThreshold": "tma_page_faults > 0.05",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((cpu_core@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu_core@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / tma_info_clks if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu_core@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / tma_info_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "cpu_core@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Shuffle (cross \"vector lane\" data transfers) uops fraction the CPU has retired.",
+ "MetricExpr": "INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group",
+ "MetricName": "tma_shuffles",
+ "MetricThreshold": "tma_shuffles > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
+ "MetricName": "tma_slow_pause",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(XQ.FULL_CYCLES + L1D_PEND_MISS.L2_STALLS) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricExpr": "(MEM_STORE_RETIRED.L2_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_4_9 + UOPS_DISPATCHED.PORT_7_8) / (4 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores",
+ "MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
+ "MetricName": "tma_streaming_stores",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/cache.json b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
new file mode 100644
index 000000000000..51770416bcc2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/cache.json
@@ -0,0 +1,1064 @@
+[
+ {
+ "BriefDescription": "L1D.HWPF_MISS",
+ "EventCode": "0x51",
+ "EventName": "L1D.HWPF_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event L1D_PEND_MISS.L2_STALLS",
+ "Deprecated": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALLS",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_RQSTS.MISS]",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read access L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x27",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_HWPF",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf0",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.HWPF_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_REQUEST.MISS]",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x38",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the LLC or other core with HITE/F/M.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the LLC or other core with HITE/F/M.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "EventCode": "0x43",
+ "EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfd",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Data_LA": "1",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L2 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.ALL",
+ "SampleAfterValue": "20003",
+ "UMask": "0x7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
+ "SampleAfterValue": "20003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.RSV",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "EventCode": "0x44",
+ "EventName": "MEM_STORE_RETIRED.L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of load uops retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of store uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of store uops retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split load uops.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled. If PEBS is enabled and a PEBS record is generated, will populate PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Retired memory uops for any access",
+ "EventCode": "0xe5",
+ "EventName": "MEM_UOP_RETIRED.ANY",
+ "PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another cores caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Deprecated": "1",
+ "Errata": "ADL038",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CounterMask": "1",
+ "Errata": "ADL038",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "For every cycle where the core is waiting on at least 1 outstanding Demand RFO request, increments by 1.",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Errata": "ADL038",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to instruction cache misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ICACHE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json b/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json
new file mode 100644
index 000000000000..c8ba96c4a7f8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/floating-point.json
@@ -0,0 +1,151 @@
+[
+ {
+ "BriefDescription": "ARITH.FPDIV_ACTIVE",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FPDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.SSE_AVX_MIX",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/frontend.json b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
new file mode 100644
index 000000000000..81349100fe32
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/frontend.json
@@ -0,0 +1,426 @@
+[
+ {
+ "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Clears due to Unknown Branches.",
+ "EventCode": "0x60",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "EventCode": "0x87",
+ "EventName": "DECODE.MS_BUSY",
+ "SampleAfterValue": "500009",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "EventCode": "0x61",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600106",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x608006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x601006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600206",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x610006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x602006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600406",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x620006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x604006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MS_FLOWS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x17",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of requests to the instruction cache for one or more bytes of a cache line.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction cache misses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/memory.json b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
new file mode 100644
index 000000000000..55827b276e6e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/memory.json
@@ -0,0 +1,305 @@
+[
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.ANY_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.L1_BOUND_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xf4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.L1_MISS_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.OTHER_AT_RET",
+ "PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc0",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.PGWALK_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xa0",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.ST_ADDR_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x84",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "20003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "3",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "MEMORY_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "MEMORY_ACTIVITY.STALLS_L3_MISS",
+ "CounterMask": "9",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
+ "PEBS": "2",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/other.json b/tools/perf/pmu-events/arch/x86/alderlake/other.json
new file mode 100644
index 000000000000..1db73e020215
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/other.json
@@ -0,0 +1,174 @@
+[
+ {
+ "BriefDescription": "ASSISTS.HARDWARE",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.HARDWARE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "ASSISTS.PAGE_FAULT",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.PAGE_FAULT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "CORE_POWER.LICENSE_1",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LICENSE_1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "CORE_POWER.LICENSE_2",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LICENSE_2",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "CORE_POWER.LICENSE_3",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LICENSE_3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.COUNT",
+ "Invert": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "Deprecated": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.CYCLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles the uncore cannot take further requests",
+ "CounterMask": "1",
+ "EventCode": "0x2d",
+ "EventName": "XQ.FULL_CYCLES",
+ "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
new file mode 100644
index 000000000000..cb5b8611064b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/pipeline.json
@@ -0,0 +1,1658 @@
+[
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIV_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.FPDIV_ACTIVE",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FP_DIVIDER_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.IDIV_ACTIVE",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.INT_DIVIDER_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1b",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "SampleAfterValue": "200003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.NEAR_CALL",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf9",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xbf",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf9",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfd",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.NEAR_RETURN",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND_TAKEN",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "SampleAfterValue": "200003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT_CALL",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND_TAKEN",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C01",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C02",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C0_WAIT",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x70",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "5",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x21",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CounterMask": "2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x75",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the total number of instructions retired.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.MACRO_FUSED",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired NOP instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 instructions",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Iterations of Repeat string retired instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.REP_ITERATION",
+ "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Clears speculative count",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.128BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x13",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.256BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xac",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_128",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_256",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.MUL_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.SHUFFLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_128",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event LD_BLOCKS.ADDRESS_ALIAS",
+ "Deprecated": "1",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x88",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CounterMask": "6",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "SampleAfterValue": "20003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machines clears due to memory renaming.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MRN_NUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.PAGE_FAULT",
+ "SampleAfterValue": "20003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SLOW",
+ "SampleAfterValue": "20003",
+ "UMask": "0x6f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "LFENCE instructions retired",
+ "EventCode": "0xe0",
+ "EventName": "MISC2_RETIRED.LFENCE",
+ "PublicDescription": "number of LFENCE retired instructions",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.NON_C01_MS_SCB",
+ "PublicDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires. The most commonly executed instruction with an MS scoreboard is PAUSE.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "Number of slots in TMA method where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BAD_SPEC_SLOTS",
+ "PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of specualtive operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to fast nukes such as memory ordering and memory disambiguation machine clears.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to branch mispredicts.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to backend stalls.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to certain allocation restrictions.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REGISTER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.CISC",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stalls.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8d",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to a latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x72",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ITLB misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ITLB",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to wrong predecodes.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the total number of consumed retirement slots.",
+ "EventCode": "0xc2",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "UOPS_DECODED.DEC0_UOPS",
+ "EventCode": "0x76",
+ "EventName": "UOPS_DECODED.DEC0_UOPS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on port 0",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PublicDescription": "Number of uops dispatch to execution port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on port 1",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PublicDescription": "Number of uops dispatch to execution port 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3_10",
+ "PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 4 and 9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 5 and 11",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_5_11",
+ "PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on port 6",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PublicDescription": "Number of uops dispatch to execution port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 7 and 8",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UOPS_EXECUTED.STALLS",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the total number of uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Cycles with retired uop(s).",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.CYCLES",
+ "PublicDescription": "Counts cycles where at least one uop has retired.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired uops except the last uop of each instruction.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.HEAVY",
+ "PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of integer divide uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are from complex flows issued by the micro-sequencer (MS).",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "UOPS_RETIRED.MS",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UOPS_RETIRED.STALLS",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops retired, includes those in MS flows.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.X87",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
new file mode 100644
index 000000000000..34fc052d00e4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/uncore-interconnect.json
@@ -0,0 +1,90 @@
+[
+ {
+ "BriefDescription": "Number of requests allocated in Coherency Tracker.",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_DAT_REQUESTS.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_DAT_OCCUPANCY.ALL",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_IFA_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_OCCUPANCY.RD]",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_REQUESTS.RD]",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_OCCUPANCY.DRD]",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_REQUEST.DRD]",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/uncore-memory.json b/tools/perf/pmu-events/arch/x86/alderlake/uncore-memory.json
new file mode 100644
index 000000000000..163d7e7755c4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/uncore-memory.json
@@ -0,0 +1,183 @@
+[
+ {
+ "BriefDescription": "Counts every 64B read request entering the Memory Controller 0 to DRAM (sum of all channels).",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every 64B read request entering the Memory Controller 0 to DRAM (sum of all channels).",
+ "UMask": "0x20",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every 64B write request entering the Memory Controller 0 to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every 64B read request entering the Memory Controller 1 to DRAM (sum of all channels).",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every 64B read entering the Memory Controller 1 to DRAM (sum of all channels).",
+ "UMask": "0x20",
+ "Unit": "imc_free_running_1"
+ },
+ {
+ "BriefDescription": "Counts every 64B write request entering the Memory Controller 1 to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "imc_free_running_1"
+ },
+ {
+ "BriefDescription": "ACT command for a read request sent to DRAM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_ACT_COUNT_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command sent to DRAM",
+ "EventCode": "0x26",
+ "EventName": "UNC_M_ACT_COUNT_TOTAL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command for a write request sent to DRAM",
+ "EventCode": "0x25",
+ "EventName": "UNC_M_ACT_COUNT_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS command sent to DRAM",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_CAS_COUNT_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write CAS command sent to DRAM",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_CAS_COUNT_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of clocks",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming read request page status is Page Empty",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M_DRAM_PAGE_EMPTY_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming write request page status is Page Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_DRAM_PAGE_EMPTY_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming read request page status is Page Hit",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M_DRAM_PAGE_HIT_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming write request page status is Page Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M_DRAM_PAGE_HIT_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming read request page status is Page Miss",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M_DRAM_PAGE_MISS_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming write request page status is Page Miss",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_DRAM_PAGE_MISS_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Any Rank at Hot state",
+ "EventCode": "0x19",
+ "EventName": "UNC_M_DRAM_THERMAL_HOT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Any Rank at Warm state",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M_DRAM_THERMAL_WARM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming read prefetch request from IA.",
+ "EventCode": "0x0A",
+ "EventName": "UNC_M_PREFETCH_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command sent to DRAM due to page table idle timer expiration",
+ "EventCode": "0x28",
+ "EventName": "UNC_M_PRE_COUNT_IDLE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command sent to DRAM for a read/write request",
+ "EventCode": "0x27",
+ "EventName": "UNC_M_PRE_COUNT_PAGE_MISS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming VC0 read request",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_VC0_REQUESTS_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming VC0 write request",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_VC0_REQUESTS_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming VC1 read request",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_VC1_REQUESTS_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming VC1 write request",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_VC1_REQUESTS_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json
new file mode 100644
index 000000000000..2af92e43b28a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/uncore-other.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json
new file mode 100644
index 000000000000..3827d292da80
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlake/virtual-memory.json
@@ -0,0 +1,236 @@
+[
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CounterMask": "1",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CounterMask": "1",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks due to an instruction fetch that miss the PDE (Page Directory Entry) cache.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CounterMask": "1",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.DTLB_MISS_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x90",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
new file mode 100644
index 000000000000..1a85d935c733
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
@@ -0,0 +1,698 @@
+[
+ {
+ "BriefDescription": "C10 residency percent per package",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C10_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C8 residency percent per package",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C8_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C9 residency percent per package",
+ "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C9_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to certain allocation restrictions.",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_alloc_restriction",
+ "MetricThreshold": "tma_alloc_restriction > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "TOPDOWN_BE_BOUND.ALL / tma_info_slots",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.1",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound. The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "tma_backend_bound",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound_aux",
+ "MetricThreshold": "tma_backend_bound_aux > 0.2",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that UOPS must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count. All of these subevents count backend stalls, in slots, due to a resource limitation. These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based. These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear",
+ "MetricExpr": "(tma_info_slots - (TOPDOWN_FE_BOUND.ALL + TOPDOWN_BE_BOUND.ALL + TOPDOWN_RETIRING.ALL)) / tma_info_slots",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are not from the microsequencer.",
+ "MetricExpr": "(TOPDOWN_RETIRING.ALL - UOPS_RETIRED.MS) / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_base",
+ "MetricThreshold": "tma_base > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_DETECT / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_detect",
+ "MetricThreshold": "tma_branch_detect > 0.05",
+ "PublicDescription": "Counts the number of issue slots that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to branch mispredicts.",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MISPREDICT / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.BRANCH_RESTEER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteer",
+ "MetricThreshold": "tma_branch_resteer > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "MetricExpr": "TOPDOWN_FE_BOUND.CISC / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles due to backend bound stalls that are core execution bound and not attributed to outstanding demand load or store stalls.",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to decode stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.DECODE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_decode",
+ "MetricThreshold": "tma_decode > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to memory disambiguation.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.DISAMBIGUATION / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_disambiguation",
+ "MetricThreshold": "tma_disambiguation > 0.02",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / tma_info_clks - MEM_BOUND_STALLS_AT_RET_CORRECTION * MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_BOUND_STALLS.LOAD",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear classified as a fast nuke due to memory ordering, memory disambiguation and memory renaming.",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.FASTNUKE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_fast_nuke",
+ "MetricThreshold": "tma_fast_nuke > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.15",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to FP assists.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.FP_ASSIST / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_fp_assist",
+ "MetricThreshold": "tma_fp_assist > 0.02",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divide operations per uop.",
+ "MetricExpr": "UOPS_RETIRED.FPDIV / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
+ "MetricName": "tma_fpdiv_uops",
+ "MetricThreshold": "tma_fpdiv_uops > 0.2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to frontend stalls.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ALL / tma_info_slots",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to instruction cache misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ICACHE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a address aliasing block",
+ "MetricExpr": "100 * LD_BLOCKS.4K_ALIAS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_address_alias_blocks"
+ },
+ {
+ "BriefDescription": "Ratio of all branches which mispredict",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_branch_mispredict_ratio"
+ },
+ {
+ "BriefDescription": "Ratio between Mispredicted branches and unknown branches",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / BACLEARS.ANY",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_branch_mispredict_to_unknown_branch_ratio"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_clks"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "CPU_CLK_UNHALTED.CORE_P",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_clks_p"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction",
+ "MetricExpr": "tma_info_clks / INST_RETIRED.ANY",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cpi"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Cycle cost per DRAM hit",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_DRAM_HIT / MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cycles_per_demand_load_dram_hit"
+ },
+ {
+ "BriefDescription": "Cycle cost per L2 hit",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cycles_per_demand_load_l2_hit"
+ },
+ {
+ "BriefDescription": "Cycle cost per LLC hit",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_cycles_per_demand_load_l3_hit"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are FPDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.FPDIV / UOPS_RETIRED.ALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_fpdiv_uop_ratio"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are IDiv uops",
+ "MetricExpr": "100 * UOPS_RETIRED.IDIV / UOPS_RETIRED.ALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_idiv_uop_ratio"
+ },
+ {
+ "BriefDescription": "Percent of instruction miss cost that hit in DRAM",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_DRAM_HIT / MEM_BOUND_STALLS.IFETCH",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_inst_miss_cost_dramhit_percent"
+ },
+ {
+ "BriefDescription": "Percent of instruction miss cost that hit in the L2",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_L2_HIT / MEM_BOUND_STALLS.IFETCH",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_inst_miss_cost_l2hit_percent"
+ },
+ {
+ "BriefDescription": "Percent of instruction miss cost that hit in the L3",
+ "MetricExpr": "100 * MEM_BOUND_STALLS.IFETCH_LLC_HIT / MEM_BOUND_STALLS.IFETCH",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_inst_miss_cost_l3hit_percent"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipbranch"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipc"
+ },
+ {
+ "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.CALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipcall"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch",
+ "MetricExpr": "INST_RETIRED.ANY / (BR_INST_RETIRED.FAR_BRANCH / 2)",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipfarbranch"
+ },
+ {
+ "BriefDescription": "Instructions per Load",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipload"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was not taken",
+ "MetricExpr": "INST_RETIRED.ANY / (BR_MISP_RETIRED.COND - BR_MISP_RETIRED.COND_TAKEN)",
+ "MetricName": "tma_info_ipmisp_cond_ntaken"
+ },
+ {
+ "BriefDescription": "Instructions per retired conditional Branch Misprediction where the branch was taken",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricName": "tma_info_ipmisp_cond_taken"
+ },
+ {
+ "BriefDescription": "Instructions per retired indirect call or jump Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricName": "tma_info_ipmisp_indirect"
+ },
+ {
+ "BriefDescription": "Instructions per retired return Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RETURN",
+ "MetricName": "tma_info_ipmisp_ret"
+ },
+ {
+ "BriefDescription": "Instructions per retired Branch Misprediction",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipmispredict"
+ },
+ {
+ "BriefDescription": "Instructions per Store",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_ipstore"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in Kernel mode",
+ "MetricExpr": "cpu@CPU_CLK_UNHALTED.CORE@k / CPU_CLK_UNHALTED.CORE",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_kernel_utilization"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads that are splits",
+ "MetricExpr": "100 * MEM_UOPS_RETIRED.SPLIT_LOADS / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_load_splits"
+ },
+ {
+ "BriefDescription": "load ops retired per 1000 instruction",
+ "MetricExpr": "1e3 * MEM_UOPS_RETIRED.ALL_LOADS / INST_RETIRED.ANY",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_memloadpki"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are ucode ops",
+ "MetricExpr": "100 * UOPS_RETIRED.MS / UOPS_RETIRED.ALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_microcode_uop_ratio"
+ },
+ {
+ "BriefDescription": "",
+ "MetricExpr": "5 * tma_info_clks",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Percentage of total non-speculative loads with a store forward or unknown store address block",
+ "MetricExpr": "100 * LD_BLOCKS.DATA_UNKNOWN / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricName": "tma_info_store_fwd_blocks"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.ALL / INST_RETIRED.ANY",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_upi"
+ },
+ {
+ "BriefDescription": "Percentage of all uops which are x87 uops",
+ "MetricExpr": "100 * UOPS_RETIRED.X87 / UOPS_RETIRED.ALL",
+ "MetricGroup": " ",
+ "MetricName": "tma_info_x87_uop_ratio"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.ITLB / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a load block.",
+ "MetricExpr": "LD_HEAD.L1_BOUND_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the L2 Cache.",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_L2_HIT / tma_info_clks - MEM_BOUND_STALLS_AT_RET_CORRECTION * MEM_BOUND_STALLS.LOAD_L2_HIT / MEM_BOUND_STALLS.LOAD",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles a core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "MetricExpr": "MEM_BOUND_STALLS.LOAD_LLC_HIT / tma_info_clks - MEM_BOUND_STALLS_AT_RET_CORRECTION * MEM_BOUND_STALLS.LOAD_LLC_HIT / MEM_BOUND_STALLS.LOAD",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to load buffer full",
+ "MetricExpr": "tma_mem_scheduler * MEM_SCHEDULER_BLOCK.LD_BUF / MEM_SCHEDULER_BLOCK.ALL",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
+ "MetricName": "tma_ld_buffer",
+ "MetricThreshold": "tma_ld_buffer > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "MetricExpr": "TOPDOWN_BE_BOUND.MEM_SCHEDULER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_mem_scheduler",
+ "MetricThreshold": "tma_mem_scheduler > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to stores or loads.",
+ "MetricExpr": "min(tma_backend_bound, LD_HEAD.ANY_AT_RET / tma_info_clks + tma_store_bound)",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to memory ordering.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.MEMORY_ORDERING / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_memory_ordering",
+ "MetricThreshold": "tma_memory_ordering > 0.02",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS)",
+ "MetricExpr": "UOPS_RETIRED.MS / tma_info_slots",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_ms_uops",
+ "MetricThreshold": "tma_ms_uops > 0.05",
+ "PublicDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "MetricExpr": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_non_mem_scheduler",
+ "MetricThreshold": "tma_non_mem_scheduler > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to a machine clear (slow nuke).",
+ "MetricExpr": "TOPDOWN_BAD_SPECULATION.NUKE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_machine_clears_group",
+ "MetricName": "tma_nuke",
+ "MetricThreshold": "tma_nuke > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.OTHER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_other_fb",
+ "MetricThreshold": "tma_other_fb > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a number of other load blocks.",
+ "MetricExpr": "LD_HEAD.OTHER_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_other_l1",
+ "MetricThreshold": "tma_other_l1 > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hits in the L2, LLC, DRAM or MMIO (Non-DRAM) but could not be correctly attributed or cycles in which the load miss is waiting on a request buffer.",
+ "MetricExpr": "max(0, tma_memory_bound - (tma_store_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_dram_bound))",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_other_load_store",
+ "MetricThreshold": "tma_other_load_store > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of uops retired excluding ms and fp div uops.",
+ "MetricExpr": "(TOPDOWN_RETIRING.ALL - UOPS_RETIRED.MS - UOPS_RETIRED.FPDIV) / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_base_group",
+ "MetricName": "tma_other_ret",
+ "MetricThreshold": "tma_other_ret > 0.3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to page faults.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.PAGE_FAULT / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_page_fault",
+ "MetricThreshold": "tma_page_fault > 0.02",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not delivered by the frontend due to wrong predecodes.",
+ "MetricExpr": "TOPDOWN_FE_BOUND.PREDECODE / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_predecode",
+ "MetricThreshold": "tma_predecode > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REGISTER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_register",
+ "MetricThreshold": "tma_register > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "MetricExpr": "TOPDOWN_BE_BOUND.REORDER_BUFFER / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_reorder_buffer",
+ "MetricThreshold": "tma_reorder_buffer > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls",
+ "MetricExpr": "tma_backend_bound",
+ "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_aux_group",
+ "MetricName": "tma_resource_bound",
+ "MetricThreshold": "tma_resource_bound > 0.2",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend due to backend stalls. Note that uops must be available for consumption in order for this event to count. If a uop is not available (IQ is empty), this event will not count.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the numer of issue slots that result in retirement slots.",
+ "MetricExpr": "TOPDOWN_RETIRING.ALL / tma_info_slots",
+ "MetricGroup": "TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.75",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to RSV full relative",
+ "MetricExpr": "tma_mem_scheduler * MEM_SCHEDULER_BLOCK.RSV / MEM_SCHEDULER_BLOCK.ALL",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
+ "MetricName": "tma_rsv",
+ "MetricThreshold": "tma_rsv > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "MetricExpr": "TOPDOWN_BE_BOUND.SERIALIZATION / tma_info_slots",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_resource_bound_group",
+ "MetricName": "tma_serialization",
+ "MetricThreshold": "tma_serialization > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears relative to the number of nuke slots due to SMC.",
+ "MetricExpr": "tma_nuke * (MACHINE_CLEARS.SMC / MACHINE_CLEARS.SLOW)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_nuke_group",
+ "MetricName": "tma_smc",
+ "MetricThreshold": "tma_smc > 0.02",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles, relative to the number of mem_scheduler slots, in which uops are blocked due to store buffer full",
+ "MetricExpr": "tma_store_bound",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_mem_scheduler_group",
+ "MetricName": "tma_st_buffer",
+ "MetricThreshold": "tma_st_buffer > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a first level TLB miss.",
+ "MetricExpr": "LD_HEAD.DTLB_MISS_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_stlb_hit",
+ "MetricThreshold": "tma_stlb_hit > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a second level TLB miss requiring a page walk.",
+ "MetricExpr": "LD_HEAD.PGWALK_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_stlb_miss",
+ "MetricThreshold": "tma_stlb_miss > 0.05",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to store buffer full.",
+ "MetricExpr": "tma_mem_scheduler * (MEM_SCHEDULER_BLOCK.ST_BUF / MEM_SCHEDULER_BLOCK.ALL)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the oldest load of the load buffer is stalled at retirement due to a store forward block.",
+ "MetricExpr": "LD_HEAD.ST_ADDR_AT_RET / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.05",
+ "ScaleUnit": "100%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/cache.json b/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
new file mode 100644
index 000000000000..043445ae14a8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/cache.json
@@ -0,0 +1,330 @@
+[
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x38"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the LLC or other core with HITE/F/M.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the LLC or other core with HITE/F/M.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L2 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked for any of the following reasons: load buffer, store buffer or RSV full.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.ALL",
+ "SampleAfterValue": "20003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to a load buffer full condition.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.LD_BUF",
+ "SampleAfterValue": "20003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to an RSV full condition.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.RSV",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that uops are blocked due to a store buffer full condition.",
+ "EventCode": "0x04",
+ "EventName": "MEM_SCHEDULER_BLOCK.ST_BUF",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of load uops retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Counts the number of store uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of store uops retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 128 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 16 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 256 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 32 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 4 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 512 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 64 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of tagged loads with an instruction latency that exceeds or equals the threshold of 8 cycles as defined in MEC_CR_PEBS_LD_LAT_THRESHOLD (3F6H). Only counts with PEBS enabled. If a PEBS record is generated, will populate the PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split load uops.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of stores uops retired. Counts with or without PEBS enabled. If PEBS is enabled and a PEBS record is generated, will populate PEBS Latency and PEBS Data Source fields accordingly.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to instruction cache misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ICACHE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json b/tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json
new file mode 100644
index 000000000000..30e8ca3c1485
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/floating-point.json
@@ -0,0 +1,18 @@
+[
+ {
+ "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/frontend.json b/tools/perf/pmu-events/arch/x86/alderlaken/frontend.json
new file mode 100644
index 000000000000..36898bab2bba
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/frontend.json
@@ -0,0 +1,26 @@
+[
+ {
+ "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of requests to the instruction cache for one or more bytes of a cache line.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction cache misses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/memory.json b/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
new file mode 100644
index 000000000000..37259d38a222
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/memory.json
@@ -0,0 +1,88 @@
+[
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to any number of reasons, including an L1 miss, WCB full, pagewalk, store address block or store data block, on a load that retires.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.ANY_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer is stalled due to a core bound stall including a store address match, a DTLB miss or a page walk that detains the load from retiring.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.L1_BOUND_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xf4"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DL1 miss.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.L1_MISS_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.OTHER_AT_RET",
+ "PublicDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to other block cases such as pipeline conflicts, fences, etc.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc0"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a pagewalk.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.PGWALK_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xa0"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a store address match.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.ST_ADDR_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x84"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "20003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/other.json b/tools/perf/pmu-events/arch/x86/alderlaken/other.json
new file mode 100644
index 000000000000..6336de61f628
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/other.json
@@ -0,0 +1,38 @@
+[
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
new file mode 100644
index 000000000000..fa53ff11a509
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/pipeline.json
@@ -0,0 +1,533 @@
+[
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.NEAR_CALL",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf9"
+ },
+ {
+ "BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
+ },
+ {
+ "BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
+ },
+ {
+ "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xbf"
+ },
+ {
+ "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT_CALL",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
+ },
+ {
+ "BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf9"
+ },
+ {
+ "BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.INDIRECT",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfd"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.NEAR_RETURN",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_INST_RETIRED.COND_TAKEN",
+ "Deprecated": "1",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT_CALL",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.INDIRECT",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BR_MISP_RETIRED.COND_TAKEN",
+ "Deprecated": "1",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number of instructions retired.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event LD_BLOCKS.ADDRESS_ALIAS",
+ "Deprecated": "1",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "SampleAfterValue": "20003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of machines clears due to memory renaming.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MRN_NUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.PAGE_FAULT",
+ "SampleAfterValue": "20003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears that flush the pipeline and restart the machine with the use of microcode due to SMC, MEMORY_ORDERING, FP_ASSISTS, PAGE_FAULT, DISAMBIGUATION, and FPC_VIRTUAL_TRAP.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SLOW",
+ "SampleAfterValue": "20003",
+ "UMask": "0x6f"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires.",
+ "EventCode": "0x75",
+ "EventName": "SERIALIZATION.NON_C01_MS_SCB",
+ "PublicDescription": "Counts the number of issue slots not consumed by the backend due to a micro-sequencer (MS) scoreboard, which stalls the front-end from issuing from the UROM until a specified older uop retires. The most commonly executed instruction with an MS scoreboard is PAUSE.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to fast nukes such as memory ordering and memory disambiguation machine clears.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to branch mispredicts.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to a machine clear (nuke).",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.NUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to backend stalls.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to certain allocation restrictions.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REGISTER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.CISC",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stalls.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to frontend bandwidth restrictions due to decode, predecode, cisc, and other limitations.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.FRONTEND_BANDWIDTH",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8d"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to a latency related stalls including BACLEARs, BTCLEARs, ITLB misses, and ICache misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.FRONTEND_LATENCY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x72"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ITLB misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ITLB",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to wrong predecodes.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the total number of consumed retirement slots.",
+ "EventCode": "0xc2",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the total number of uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of integer divide uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are from complex flows issued by the micro-sequencer (MS).",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops retired, includes those in MS flows.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.X87",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
new file mode 100644
index 000000000000..4af695a5e755
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-interconnect.json
@@ -0,0 +1,26 @@
+[
+ {
+ "BriefDescription": "Number of requests allocated in Coherency Tracker.",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-memory.json b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-memory.json
new file mode 100644
index 000000000000..163d7e7755c4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-memory.json
@@ -0,0 +1,183 @@
+[
+ {
+ "BriefDescription": "Counts every 64B read request entering the Memory Controller 0 to DRAM (sum of all channels).",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every 64B read request entering the Memory Controller 0 to DRAM (sum of all channels).",
+ "UMask": "0x20",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every 64B write request entering the Memory Controller 0 to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every 64B read request entering the Memory Controller 1 to DRAM (sum of all channels).",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts every 64B read entering the Memory Controller 1 to DRAM (sum of all channels).",
+ "UMask": "0x20",
+ "Unit": "imc_free_running_1"
+ },
+ {
+ "BriefDescription": "Counts every 64B write request entering the Memory Controller 1 to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "imc_free_running_1"
+ },
+ {
+ "BriefDescription": "ACT command for a read request sent to DRAM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_ACT_COUNT_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command sent to DRAM",
+ "EventCode": "0x26",
+ "EventName": "UNC_M_ACT_COUNT_TOTAL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command for a write request sent to DRAM",
+ "EventCode": "0x25",
+ "EventName": "UNC_M_ACT_COUNT_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS command sent to DRAM",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_CAS_COUNT_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write CAS command sent to DRAM",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_CAS_COUNT_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of clocks",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming read request page status is Page Empty",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M_DRAM_PAGE_EMPTY_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming write request page status is Page Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_DRAM_PAGE_EMPTY_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming read request page status is Page Hit",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M_DRAM_PAGE_HIT_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming write request page status is Page Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M_DRAM_PAGE_HIT_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming read request page status is Page Miss",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M_DRAM_PAGE_MISS_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "incoming write request page status is Page Miss",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_DRAM_PAGE_MISS_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Any Rank at Hot state",
+ "EventCode": "0x19",
+ "EventName": "UNC_M_DRAM_THERMAL_HOT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Any Rank at Warm state",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M_DRAM_THERMAL_WARM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming read prefetch request from IA.",
+ "EventCode": "0x0A",
+ "EventName": "UNC_M_PREFETCH_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command sent to DRAM due to page table idle timer expiration",
+ "EventCode": "0x28",
+ "EventName": "UNC_M_PRE_COUNT_IDLE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command sent to DRAM for a read/write request",
+ "EventCode": "0x27",
+ "EventName": "UNC_M_PRE_COUNT_PAGE_MISS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming VC0 read request",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_VC0_REQUESTS_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming VC0 write request",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_VC0_REQUESTS_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming VC1 read request",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_VC1_REQUESTS_RD",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Incoming VC1 write request",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_VC1_REQUESTS_WR",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/uncore-other.json b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-other.json
new file mode 100644
index 000000000000..2af92e43b28a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/uncore-other.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json b/tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json
new file mode 100644
index 000000000000..67fd640f790e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/alderlaken/virtual-memory.json
@@ -0,0 +1,47 @@
+[
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks initiated by a instruction fetch that missed the first and second level TLBs.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSED_WALK",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks due to an instruction fetch that miss the PDE (Page Directory Entry) cache.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the head (oldest load) of the load buffer and retirement are both stalled due to a DTLB miss.",
+ "EventCode": "0x05",
+ "EventName": "LD_HEAD.DTLB_MISS_AT_RET",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x90"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/branch.json b/tools/perf/pmu-events/arch/x86/amdzen1/branch.json
index a9943eeb8d6b..4ceb67a0db21 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/branch.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/branch.json
@@ -19,5 +19,10 @@
"EventName": "bp_de_redirect",
"EventCode": "0x91",
"BriefDescription": "Decoder Overrides Existing Branch Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB."
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
index 404d4c569c01..0d46cb82bd52 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
@@ -38,31 +38,31 @@
"EventName": "ic_fetch_stall.ic_stall_any",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ic_fetch_stall.ic_stall_dq_empty",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_fetch_stall.ic_stall_back_pressure",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ic_cache_inval.l2_invalidating_probe",
"EventCode": "0x8c",
"BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS). The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_cache_inval.fill_invalidated",
"EventCode": "0x8c",
"BriefDescription": "IC line invalidated due to overwriting fill response. The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "bp_tlb_rel",
@@ -97,25 +97,30 @@
"EventName": "l2_request_g1.change_to_x",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache state change requests. Request change to writable, check L2 for current state.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_request_g1.prefetch_l2_cmd",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). PrefetchL2Cmd.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_request_g1.l2_hw_pf",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). L2 Prefetcher. All prefetches accepted by L2 pipeline, hit or miss. Types of PF and L2 hit/miss broken out in a separate perfmon event.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_request_g1.group2",
"EventCode": "0x60",
"BriefDescription": "Miscellaneous events covered in more detail by l2_request_g2 (PMCx061).",
- "UMask": "0x1"
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_request_g1.all_no_prefetch",
+ "EventCode": "0x60",
+ "UMask": "0xf9"
},
{
"EventName": "l2_request_g2.group1",
@@ -145,31 +150,31 @@
"EventName": "l2_request_g2.ic_rd_sized_nc",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized non-cacheable.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_request_g2.smc_inval",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Self-modifying code invalidates.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_request_g2.bus_locks_originator",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus locks.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_request_g2.bus_locks_responses",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus lock response.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_latency.l2_cycles_waiting_on_fills",
"EventCode": "0x62",
"BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_wcb_req.wcb_write",
@@ -187,13 +192,13 @@
"EventName": "l2_wcb_req.zero_byte_store",
"EventCode": "0x63",
"BriefDescription": "LS to L2 WCB zero byte store requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_wcb_req.cl_zero",
"EventCode": "0x63",
"BriefDescription": "LS to L2 WCB cache line zeroing requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_cache_req_stat.ls_rd_blk_cs",
@@ -223,31 +228,67 @@
"EventName": "l2_cache_req_stat.ls_rd_blk_c",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache request miss in L2 (all types).",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_cache_req_stat.ic_fill_hit_x",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit modifiable line in L2.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_cache_req_stat.ic_fill_hit_s",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit clean line in L2.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_cache_req_stat.ic_fill_miss",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2.",
- "UMask": "0x1"
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_access_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache requests in L2.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_miss_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2 and Data cache request miss in L2 (all types).",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_hit_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request hit in L2 and Data cache request hit in L2 (all types).",
+ "UMask": "0xf6"
},
{
"EventName": "l2_fill_pending.l2_fill_busy",
"EventCode": "0x6d",
"BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
- "UMask": "0x1"
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_pf_hit_l2",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetcher hits in L3. Counts all L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit the L3.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetcher misses in L3. All L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches.",
+ "UMask": "0xff"
},
{
"EventName": "l3_request_g1.caching_l3_cache_accesses",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/core.json b/tools/perf/pmu-events/arch/x86/amdzen1/core.json
index 7e1aa8273935..4dceeabc4a9f 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/core.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/core.json
@@ -61,28 +61,28 @@
{
"EventName": "ex_ret_brn_ind_misp",
"EventCode": "0xca",
- "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+ "BriefDescription": "Retired Indirect Branch Instructions Mispredicted."
},
{
"EventName": "ex_ret_mmx_fp_instr.sse_instr",
"EventCode": "0xcb",
"BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ex_ret_mmx_fp_instr.mmx_instr",
"EventCode": "0xcb",
"BriefDescription": "MMX instructions.",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ex_ret_mmx_fp_instr.x87_instr",
"EventCode": "0xcb",
"BriefDescription": "x87 instructions.",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ex_ret_cond",
@@ -103,19 +103,19 @@
"EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ex_ret_fus_brnch_inst",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/data-fabric.json b/tools/perf/pmu-events/arch/x86/amdzen1/data-fabric.json
new file mode 100644
index 000000000000..40271df40015
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/data-fabric.json
@@ -0,0 +1,98 @@
+[
+ {
+ "EventName": "remote_outbound_data_controller_0",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 0",
+ "EventCode": "0x7c7",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_1",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 1",
+ "EventCode": "0x807",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_2",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 2",
+ "EventCode": "0x847",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_3",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 3",
+ "EventCode": "0x887",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_0",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x07",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_1",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x47",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_2",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x87",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_3",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0xc7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_4",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x107",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_5",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x147",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_6",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x187",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_7",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x1c7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
index a35542bd3b36..3995b528ebd6 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/floating-point.json
@@ -39,35 +39,35 @@
"EventCode": "0x00",
"BriefDescription": "Total number uOps assigned to all fpu pipes.",
"PublicDescription": "The number of operations (uOps) and dual-pipe uOps dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to all pipes.",
- "UMask": "0xf"
+ "UMask": "0x0f"
},
{
"EventName": "fpu_pipe_assignment.total3",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 3.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one-cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 3.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fpu_pipe_assignment.total2",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 2.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 2.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fpu_pipe_assignment.total1",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 1.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 1.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fpu_pipe_assignment.total0",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 0.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 0.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_sched_empty",
@@ -79,28 +79,28 @@
"EventCode": "0x02",
"BriefDescription": "All Ops.",
"PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8.",
- "UMask": "0x7"
+ "UMask": "0x07"
},
{
"EventName": "fp_retx87_fp_ops.div_sqr_r_ops",
"EventCode": "0x02",
"BriefDescription": "Divide and square root Ops.",
"PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Divide and square root Ops.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_retx87_fp_ops.mul_ops",
"EventCode": "0x02",
"BriefDescription": "Multiply Ops.",
"PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Multiply Ops.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_retx87_fp_ops.add_sub_ops",
"EventCode": "0x02",
"BriefDescription": "Add/subtract Ops.",
"PublicDescription": "The number of x87 floating-point Ops that have retired. The number of events logged per cycle can vary from 0 to 8. Add/subtract Ops.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_ret_sse_avx_ops.all",
@@ -142,83 +142,83 @@
"EventCode": "0x03",
"BriefDescription": "Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
"PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single precision multiply-add FLOPS. Multiply-add counts as 2 FLOPS.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_ret_sse_avx_ops.sp_div_flops",
"EventCode": "0x03",
"BriefDescription": "Single-precision divide/square root FLOPS.",
"PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision divide/square root FLOPS.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_ret_sse_avx_ops.sp_mult_flops",
"EventCode": "0x03",
"BriefDescription": "Single-precision multiply FLOPS.",
"PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision multiply FLOPS.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_ret_sse_avx_ops.sp_add_sub_flops",
"EventCode": "0x03",
"BriefDescription": "Single-precision add/subtract FLOPS.",
"PublicDescription": "This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15. Single-precision add/subtract FLOPS.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_num_mov_elim_scal_op.optimized",
"EventCode": "0x04",
"BriefDescription": "Number of Scalar Ops optimized.",
"PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Scalar Ops optimized.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_num_mov_elim_scal_op.opt_potential",
"EventCode": "0x04",
"BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
"PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of Ops that are candidates for optimization (have Z-bit either set or pass).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
"EventCode": "0x04",
"BriefDescription": "Number of SSE Move Ops eliminated.",
"PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops eliminated.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
"EventCode": "0x04",
"BriefDescription": "Number of SSE Move Ops.",
"PublicDescription": "This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes. Number of SSE Move Ops.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_retired_ser_ops.x87_ctrl_ret",
"EventCode": "0x05",
"BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
"PublicDescription": "The number of serializing Ops retired. x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_retired_ser_ops.x87_bot_ret",
"EventCode": "0x05",
"BriefDescription": "x87 bottom-executing uOps retired.",
"PublicDescription": "The number of serializing Ops retired. x87 bottom-executing uOps retired.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_retired_ser_ops.sse_ctrl_ret",
"EventCode": "0x05",
"BriefDescription": "SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
"PublicDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_retired_ser_ops.sse_bot_ret",
"EventCode": "0x05",
"BriefDescription": "SSE bottom-executing uOps retired.",
"PublicDescription": "The number of serializing Ops retired. SSE bottom-executing uOps retired.",
- "UMask": "0x1"
+ "UMask": "0x01"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/memory.json b/tools/perf/pmu-events/arch/x86/amdzen1/memory.json
index b33a3c308019..385022fb026e 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/memory.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/memory.json
@@ -3,25 +3,25 @@
"EventName": "ls_locks.bus_lock",
"EventCode": "0x25",
"BriefDescription": "Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_dispatch.ld_st_dispatch",
"EventCode": "0x29",
"BriefDescription": "Counts the number of operations dispatched to the LS unit. Unit Masks ADDed. Load-op-Stores.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_dispatch.store_dispatch",
"EventCode": "0x29",
"BriefDescription": "Counts the number of stores dispatched to the LS unit. Unit Masks ADDed.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_dispatch.ld_dispatch",
"EventCode": "0x29",
"BriefDescription": "Counts the number of loads dispatched to the LS unit. Unit Masks ADDed.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_stlf",
@@ -37,13 +37,13 @@
"EventName": "ls_mab_alloc.dc_prefetcher",
"EventCode": "0x41",
"BriefDescription": "LS MAB allocates by type - DC prefetcher.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_mab_alloc.stores",
"EventCode": "0x41",
"BriefDescription": "LS MAB allocates by type - stores.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_mab_alloc.loads",
@@ -85,61 +85,61 @@
"EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 1G size.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 2M size.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_32k_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 32K size.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Reload of a page of 4K size.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_tablewalker.iside",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks on I-side.",
- "UMask": "0xc"
+ "UMask": "0x0c"
},
{
"EventName": "ls_tablewalker.ic_type1",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks IC Type 1.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_tablewalker.ic_type0",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks IC Type 0.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_tablewalker.dside",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks on D-side.",
- "UMask": "0x3"
+ "UMask": "0x03"
},
{
"EventName": "ls_tablewalker.dc_type1",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks DC Type 1.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_tablewalker.dc_type0",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks DC Type 0.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_misal_accesses",
@@ -150,31 +150,31 @@
"EventName": "ls_pref_instr_disp.prefetch_nta",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions (PREFETCHNTA instruction) Dispatched.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_pref_instr_disp.store_prefetch_w",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions (3DNow PREFETCHW instruction) Dispatched.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_pref_instr_disp.load_prefetch_w",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions Dispatched. Prefetch, Prefetch_T0_T1_T2.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_inef_sw_pref.mab_mch_cnt",
"EventCode": "0x52",
"BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a match on an already-allocated miss request buffer.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
"EventCode": "0x52",
"BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a DC hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_not_halted_cyc",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/other.json b/tools/perf/pmu-events/arch/x86/amdzen1/other.json
index ff780098d36e..7626986ce1fb 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen1/other.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/other.json
@@ -3,13 +3,13 @@
"EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
"EventCode": "0x28a",
"BriefDescription": "OC Mode Switch. OC to IC mode switch.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
"EventCode": "0x28a",
"BriefDescription": "OC Mode Switch. IC to OC mode switch.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "de_dis_dispatch_token_stalls0.retire_token_stall",
@@ -33,24 +33,24 @@
"EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3_0 Tokens unavailable.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq3_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 3 Tokens unavailable.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
- "UMask": "0x1"
+ "UMask": "0x01"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
new file mode 100644
index 000000000000..bf5083c1c260
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
@@ -0,0 +1,178 @@
+[
+ {
+ "MetricName": "branch_misprediction_ratio",
+ "BriefDescription": "Execution-Time Branch Misprediction Ratio (Non-Speculative)",
+ "MetricExpr": "d_ratio(ex_ret_brn_misp, ex_ret_brn)",
+ "MetricGroup": "branch_prediction",
+ "ScaleUnit": "100%"
+ },
+ {
+ "EventName": "all_dc_accesses",
+ "EventCode": "0x29",
+ "BriefDescription": "All L1 Data Cache Accesses",
+ "UMask": "0x07"
+ },
+ {
+ "MetricName": "all_l2_cache_accesses",
+ "BriefDescription": "All L2 Cache Accesses",
+ "MetricExpr": "l2_request_g1.all_no_prefetch + l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_ic_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Instruction Cache Misses (including prefetch)",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_dc_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Data Cache Misses (including prefetch)",
+ "UMask": "0xc8"
+ },
+ {
+ "MetricName": "l2_cache_accesses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Accesses from L2 HWPF",
+ "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_misses",
+ "BriefDescription": "All L2 Cache Misses",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_miss_in_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_misses_from_ic_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Instruction Cache Misses",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_misses_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Data Cache Misses",
+ "UMask": "0x08"
+ },
+ {
+ "MetricName": "l2_cache_misses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Misses from L2 HWPF",
+ "MetricExpr": "l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_hits",
+ "BriefDescription": "All L2 Cache Hits",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_hit_in_l2 + l2_pf_hit_l2",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_hits_from_ic_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Instruction Cache Misses",
+ "UMask": "0x06"
+ },
+ {
+ "EventName": "l2_cache_hits_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Data Cache Misses",
+ "UMask": "0x70"
+ },
+ {
+ "EventName": "l2_cache_hits_from_l2_hwpf",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 Cache Hits from L2 HWPF",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l3_accesses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Accesses",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_misses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Misses (includes Chg2X)",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "MetricName": "l3_read_miss_latency",
+ "BriefDescription": "Average L3 Read Miss Latency (in core clocks)",
+ "MetricExpr": "(xi_sys_fill_latency * 16) / xi_ccx_sdp_req1.all_l3_miss_req_typs",
+ "MetricGroup": "l3_cache",
+ "ScaleUnit": "1core clocks"
+ },
+ {
+ "MetricName": "ic_fetch_miss_ratio",
+ "BriefDescription": "L1 Instruction Cache (32B) Fetch Miss Ratio",
+ "MetricExpr": "d_ratio(l2_cache_req_stat.ic_access_in_l2, bp_l1_tlb_fetch_hit + bp_l1_tlb_miss_l2_hit + bp_l1_tlb_miss_l2_miss)",
+ "MetricGroup": "l2_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "l1_itlb_misses",
+ "BriefDescription": "L1 ITLB Misses",
+ "MetricExpr": "bp_l1_tlb_miss_l2_hit + bp_l1_tlb_miss_l2_miss",
+ "MetricGroup": "tlb"
+ },
+ {
+ "EventName": "l2_itlb_misses",
+ "EventCode": "0x85",
+ "BriefDescription": "L2 ITLB Misses & Instruction page walks",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l1_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Misses",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L2 DTLB Misses & Data page walks",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "all_tlbs_flushed",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLBs Flushed",
+ "UMask": "0xdf"
+ },
+ {
+ "EventName": "uops_dispatched",
+ "EventCode": "0xaa",
+ "BriefDescription": "Micro-ops Dispatched",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "sse_avx_stalls",
+ "EventCode": "0x0e",
+ "BriefDescription": "Mixed SSE/AVX Stalls",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "uops_retired",
+ "EventCode": "0xc1",
+ "BriefDescription": "Micro-ops Retired"
+ },
+ {
+ "MetricName": "all_remote_links_outbound",
+ "BriefDescription": "Approximate: Outbound data bytes for all Remote Links for a node (die)",
+ "MetricExpr": "remote_outbound_data_controller_0 + remote_outbound_data_controller_1 + remote_outbound_data_controller_2 + remote_outbound_data_controller_3",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "3e-5MiB"
+ },
+ {
+ "MetricName": "nps1_die_to_dram",
+ "BriefDescription": "Approximate: Combined DRAM B/bytes of all channels on a NPS1 node (die) (may need --metric-no-group)",
+ "MetricExpr": "dram_channel_data_controller_0 + dram_channel_data_controller_1 + dram_channel_data_controller_2 + dram_channel_data_controller_3 + dram_channel_data_controller_4 + dram_channel_data_controller_5 + dram_channel_data_controller_6 + dram_channel_data_controller_7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.1e-5MiB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/branch.json b/tools/perf/pmu-events/arch/x86/amdzen2/branch.json
index ef4166a66288..84fb43fa59ad 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/branch.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/branch.json
@@ -24,25 +24,25 @@
"EventName": "bp_l1_tlb_fetch_hit",
"EventCode": "0x94",
"BriefDescription": "The number of instruction fetches that hit in the L1 ITLB.",
- "UMask": "0xFF"
+ "UMask": "0xff"
},
{
"EventName": "bp_l1_tlb_fetch_hit.if1g",
"EventCode": "0x94",
"BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. Instruction fetches to a 1GB page.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "bp_l1_tlb_fetch_hit.if2m",
"EventCode": "0x94",
"BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. Instruction fetches to a 2MB page.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "bp_l1_tlb_fetch_hit.if4k",
"EventCode": "0x94",
"BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. Instruction fetches to a 4KB page.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "bp_tlb_rel",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
index 1c60bfa0f00b..c858fb9477e3 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
@@ -27,25 +27,30 @@
"EventName": "l2_request_g1.change_to_x",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache state change requests. Request change to writable, check L2 for current state.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_request_g1.prefetch_l2_cmd",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). PrefetchL2Cmd.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_request_g1.l2_hw_pf",
"EventCode": "0x60",
"BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). L2 Prefetcher. All prefetches accepted by L2 pipeline, hit or miss. Types of PF and L2 hit/miss broken out in a separate perfmon event.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_request_g1.group2",
"EventCode": "0x60",
"BriefDescription": "Miscellaneous events covered in more detail by l2_request_g2 (PMCx061).",
- "UMask": "0x1"
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_request_g1.all_no_prefetch",
+ "EventCode": "0x60",
+ "UMask": "0xf9"
},
{
"EventName": "l2_request_g2.group1",
@@ -75,31 +80,31 @@
"EventName": "l2_request_g2.ic_rd_sized_nc",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized non-cacheable.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_request_g2.smc_inval",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Self-modifying code invalidates.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_request_g2.bus_locks_originator",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus locks.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_request_g2.bus_locks_responses",
"EventCode": "0x61",
"BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus lock response.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_latency.l2_cycles_waiting_on_fills",
"EventCode": "0x62",
"BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_wcb_req.wcb_write",
@@ -117,13 +122,13 @@
"EventName": "l2_wcb_req.zero_byte_store",
"EventCode": "0x63",
"BriefDescription": "LS to L2 WCB zero byte store requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_wcb_req.cl_zero",
"EventCode": "0x63",
"BriefDescription": "LS to L2 WCB cache line zeroing requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_cache_req_stat.ls_rd_blk_cs",
@@ -153,36 +158,54 @@
"EventName": "l2_cache_req_stat.ls_rd_blk_c",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache request miss in L2 (all types).",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "l2_cache_req_stat.ic_fill_hit_x",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit modifiable line in L2.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "l2_cache_req_stat.ic_fill_hit_s",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit clean line in L2.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "l2_cache_req_stat.ic_fill_miss",
"EventCode": "0x64",
"BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2.",
- "UMask": "0x1"
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_access_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache requests in L2.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_miss_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2 and Data cache request miss in L2 (all types).",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_hit_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request hit in L2 and Data cache request hit in L2 (all types).",
+ "UMask": "0xf6"
},
{
"EventName": "l2_fill_pending.l2_fill_busy",
"EventCode": "0x6d",
"BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l2_pf_hit_l2",
"EventCode": "0x70",
- "BriefDescription": "L2 prefetch hit in L2.",
+ "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
"UMask": "0xff"
},
{
@@ -232,19 +255,19 @@
"EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g",
"EventCode": "0x85",
"BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs. Instruction fetches to a 1GB page.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m",
"EventCode": "0x85",
"BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs. Instruction fetches to a 2MB page.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k",
"EventCode": "0x85",
"BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs. Instruction fetches to a 4KB page.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "bp_snp_re_sync",
@@ -255,43 +278,43 @@
"EventName": "ic_fetch_stall.ic_stall_any",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ic_fetch_stall.ic_stall_dq_empty",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_fetch_stall.ic_stall_back_pressure",
"EventCode": "0x87",
"BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ic_cache_inval.l2_invalidating_probe",
"EventCode": "0x8c",
"BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS). The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_cache_inval.fill_invalidated",
"EventCode": "0x8c",
"BriefDescription": "IC line invalidated due to overwriting fill response. The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
"EventCode": "0x28a",
"BriefDescription": "OC Mode Switch. OC to IC mode switch.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
"EventCode": "0x28a",
"BriefDescription": "OC Mode Switch. IC to OC mode switch.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "l3_request_g1.caching_l3_cache_accesses",
@@ -330,7 +353,7 @@
},
{
"EventName": "xi_ccx_sdp_req1.all_l3_miss_req_typs",
- "EventCode": "0x9A",
+ "EventCode": "0x9a",
"BriefDescription": "All L3 Miss Request Types. Ignores SliceMask and ThreadMask.",
"UMask": "0x3f",
"Unit": "L3PMC"
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/core.json b/tools/perf/pmu-events/arch/x86/amdzen2/core.json
index de89e5a44ff1..bed14829f0bc 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/core.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/core.json
@@ -68,21 +68,21 @@
"EventCode": "0xcb",
"BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ex_ret_mmx_fp_instr.mmx_instr",
"EventCode": "0xcb",
"BriefDescription": "MMX instructions.",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ex_ret_mmx_fp_instr.x87_instr",
"EventCode": "0xcb",
"BriefDescription": "x87 instructions.",
"PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ex_ret_cond",
@@ -108,23 +108,23 @@
"EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
"EventCode": "0x1cf",
"BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ex_ret_fus_brnch_inst",
"EventCode": "0x1d0",
- "BriefDescription": "Retired Fused Instructions. The number of fuse-branch instructions retired per cycle. The number of events logged per cycle can vary from 0-8.",
+ "BriefDescription": "Retired Fused Instructions. The number of fuse-branch instructions retired per cycle. The number of events logged per cycle can vary from 0-8."
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/data-fabric.json b/tools/perf/pmu-events/arch/x86/amdzen2/data-fabric.json
new file mode 100644
index 000000000000..40271df40015
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/data-fabric.json
@@ -0,0 +1,98 @@
+[
+ {
+ "EventName": "remote_outbound_data_controller_0",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 0",
+ "EventCode": "0x7c7",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_1",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 1",
+ "EventCode": "0x807",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_2",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 2",
+ "EventCode": "0x847",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_3",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 3",
+ "EventCode": "0x887",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_0",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x07",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_1",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x47",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_2",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x87",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_3",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0xc7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_4",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x107",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_5",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x147",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_6",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x187",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_7",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x1c7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
index 622a0c420e46..91ed96f2580b 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/floating-point.json
@@ -4,35 +4,35 @@
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps.",
"PublicDescription": "Total number of fp uOps. The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS.",
- "UMask": "0xf"
+ "UMask": "0x0f"
},
{
"EventName": "fpu_pipe_assignment.total3",
"EventCode": "0x00",
"BriefDescription": "Total number uOps assigned to pipe 3.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one-cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 3.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fpu_pipe_assignment.total2",
"EventCode": "0x00",
"BriefDescription": "Total number uOps assigned to pipe 2.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 2.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fpu_pipe_assignment.total1",
"EventCode": "0x00",
"BriefDescription": "Total number uOps assigned to pipe 1.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 1.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fpu_pipe_assignment.total0",
"EventCode": "0x00",
"BriefDescription": "Total number of fp uOps on pipe 0.",
"PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 0.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_ret_sse_avx_ops.all",
@@ -45,96 +45,96 @@
"EventCode": "0x03",
"BriefDescription": "Multiply-add FLOPS. Multiply-add counts as 2 FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
"PublicDescription": "",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_ret_sse_avx_ops.div_flops",
"EventCode": "0x03",
"BriefDescription": "Divide/square root FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_ret_sse_avx_ops.mult_flops",
"EventCode": "0x03",
"BriefDescription": "Multiply FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_ret_sse_avx_ops.add_sub_flops",
"EventCode": "0x03",
"BriefDescription": "Add/subtract FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_num_mov_elim_scal_op.optimized",
"EventCode": "0x04",
"BriefDescription": "Number of Scalar Ops optimized. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_num_mov_elim_scal_op.opt_potential",
"EventCode": "0x04",
"BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass). This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
"EventCode": "0x04",
"BriefDescription": "Number of SSE Move Ops eliminated. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
"EventCode": "0x04",
"BriefDescription": "Number of SSE Move Ops. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_retired_ser_ops.sse_bot_ret",
"EventCode": "0x05",
"BriefDescription": "SSE bottom-executing uOps retired. The number of serializing Ops retired.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_retired_ser_ops.sse_ctrl_ret",
"EventCode": "0x05",
"BriefDescription": "The number of serializing Ops retired. SSE control word mispredict traps due to mispredictions in RC, FTZ or DAZ, or changes in mask bits.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_retired_ser_ops.x87_bot_ret",
"EventCode": "0x05",
"BriefDescription": "x87 bottom-executing uOps retired. The number of serializing Ops retired.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_retired_ser_ops.x87_ctrl_ret",
"EventCode": "0x05",
"BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits. The number of serializing Ops retired.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "fp_disp_faults.ymm_spill_fault",
"EventCode": "0x0e",
"BriefDescription": "Floating Point Dispatch Faults. YMM spill fault.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "fp_disp_faults.ymm_fill_fault",
"EventCode": "0x0e",
"BriefDescription": "Floating Point Dispatch Faults. YMM fill fault.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "fp_disp_faults.xmm_fill_fault",
"EventCode": "0x0e",
"BriefDescription": "Floating Point Dispatch Faults. XMM fill fault.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "fp_disp_faults.x87_fill_fault",
"EventCode": "0x0e",
"BriefDescription": "Floating Point Dispatch Faults. x87 fill fault.",
- "UMask": "0x1"
+ "UMask": "0x01"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/memory.json b/tools/perf/pmu-events/arch/x86/amdzen2/memory.json
index 715046b339cb..89822b9ddb79 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/memory.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/memory.json
@@ -4,31 +4,31 @@
"EventCode": "0x24",
"BriefDescription": "Non-forwardable conflict; used to reduce STLI's via software. All reasons. Store To Load Interlock (STLI) are loads that were unable to complete because of a possible match with an older store, and the older store could not do STLF for some reason.",
"PublicDescription" : "Store-to-load conflicts: A load was unable to complete due to a non-forwardable conflict with an older store. Most commonly, a load's address range partially but not completely overlaps with an uncompleted older store. Software can avoid this problem by using same-size and same-alignment loads and stores when accessing the same data. Vector/SIMD code is particularly susceptible to this problem; software should construct wide vector stores by manipulating vector elements in registers using shuffle/blend/swap instructions prior to storing to memory, instead of using narrow element-by-element stores.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_locks.spec_lock_hi_spec",
"EventCode": "0x25",
"BriefDescription": "Retired lock instructions. High speculative cacheable lock speculation succeeded.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_locks.spec_lock_lo_spec",
"EventCode": "0x25",
"BriefDescription": "Retired lock instructions. Low speculative cacheable lock speculation succeeded.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_locks.non_spec_lock",
"EventCode": "0x25",
"BriefDescription": "Retired lock instructions. Non-speculative lock succeeded.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_locks.bus_lock",
"EventCode": "0x25",
"BriefDescription": "Retired lock instructions. Bus lock when a locked operations crosses a cache boundary or is done on an uncacheable memory type. Comparable to legacy bus lock.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_ret_cl_flush",
@@ -44,33 +44,33 @@
"EventName": "ls_dispatch.ld_st_dispatch",
"EventCode": "0x29",
"BriefDescription": "Dispatch of a single op that performs a load from and store to the same memory address. Number of single ops that do load/store to an address.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_dispatch.store_dispatch",
"EventCode": "0x29",
"BriefDescription": "Number of stores dispatched. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_dispatch.ld_dispatch",
"EventCode": "0x29",
"BriefDescription": "Number of loads dispatched. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_smi_rx",
- "EventCode": "0x2B",
+ "EventCode": "0x2b",
"BriefDescription": "Number of SMIs received."
},
{
"EventName": "ls_int_taken",
- "EventCode": "0x2C",
+ "EventCode": "0x2c",
"BriefDescription": "Number of interrupts taken."
},
{
"EventName": "ls_rdtsc",
- "EventCode": "0x2D",
+ "EventCode": "0x2d",
"BriefDescription": "Number of reads of the TSC (RDTSC instructions). The count is speculative."
},
{
@@ -93,19 +93,19 @@
"EventName": "ls_mab_alloc.dc_prefetcher",
"EventCode": "0x41",
"BriefDescription": "LS MAB Allocates by Type. DC prefetcher.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_mab_alloc.stores",
"EventCode": "0x41",
"BriefDescription": "LS MAB Allocates by Type. Stores.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_mab_alloc.loads",
"EventCode": "0x41",
"BriefDescription": "LS MAB Allocates by Type. Loads.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_refills_from_sys.ls_mabresp_rmt_dram",
@@ -123,19 +123,19 @@
"EventName": "ls_refills_from_sys.ls_mabresp_lcl_dram",
"EventCode": "0x43",
"BriefDescription": "Demand Data Cache Fills by Data Source. DRAM or IO from this thread's die.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_refills_from_sys.ls_mabresp_lcl_cache",
"EventCode": "0x43",
"BriefDescription": "Demand Data Cache Fills by Data Source. Hit in cache; local CCX (not Local L2), or Remote CCX and the address's Home Node is on this thread's die.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_refills_from_sys.ls_mabresp_lcl_l2",
"EventCode": "0x43",
"BriefDescription": "Demand Data Cache Fills by Data Source. Local L2 hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_l1_d_tlb_miss.all",
@@ -171,61 +171,61 @@
"EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that hit in the L2 TLB.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that hit in the L2 TLB.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss. DTLB reload hit a coalesced page.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
"EventCode": "0x45",
"BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that hit in the L2 TLB.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_tablewalker.iside",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks on I-side.",
- "UMask": "0xc"
+ "UMask": "0x0c"
},
{
"EventName": "ls_tablewalker.ic_type1",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks IC Type 1.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_tablewalker.ic_type0",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks IC Type 0.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_tablewalker.dside",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks on D-side.",
- "UMask": "0x3"
+ "UMask": "0x03"
},
{
"EventName": "ls_tablewalker.dc_type1",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks DC Type 1.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_tablewalker.dc_type0",
"EventCode": "0x46",
"BriefDescription": "Total Page Table Walks DC Type 0.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_misal_accesses",
@@ -242,31 +242,31 @@
"EventName": "ls_pref_instr_disp.prefetch_nta",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchNTA instruction. See docAPM3 PREFETCHlevel.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "ls_pref_instr_disp.prefetch_w",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). See docAPM3 PREFETCHW.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_pref_instr_disp.prefetch",
"EventCode": "0x4b",
"BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). Prefetch_T0_T1_T2. PrefetchT0, T1 and T2 instructions. See docAPM3 PREFETCHlevel.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_inef_sw_pref.mab_mch_cnt",
"EventCode": "0x52",
"BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a match on an already-allocated miss request buffer.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
"EventCode": "0x52",
"BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a DC hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_sw_pf_dc_fill.ls_mabresp_rmt_dram",
@@ -284,49 +284,49 @@
"EventName": "ls_sw_pf_dc_fill.ls_mabresp_lcl_dram",
"EventCode": "0x59",
"BriefDescription": "Software Prefetch Data Cache Fills by Data Source. DRAM or IO from this thread's die. From DRAM (home node local).",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_sw_pf_dc_fill.ls_mabresp_lcl_cache",
"EventCode": "0x59",
"BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From another cache (home node local).",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_sw_pf_dc_fill.ls_mabresp_lcl_l2",
"EventCode": "0x59",
"BriefDescription": "Software Prefetch Data Cache Fills by Data Source. Local L2 hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_rmt_dram",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM (home node remote).",
"UMask": "0x40"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_rmt_cache",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From another cache (home node remote).",
"UMask": "0x10"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_lcl_dram",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM (home node local).",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_lcl_cache",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From another cache (home node local).",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "ls_hw_pf_dc_fill.ls_mabresp_lcl_l2",
- "EventCode": "0x5A",
+ "EventCode": "0x5a",
"BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. Local L2 hit.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "ls_not_halted_cyc",
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/other.json b/tools/perf/pmu-events/arch/x86/amdzen2/other.json
index e94994d4a60e..1bdf106ca785 100644
--- a/tools/perf/pmu-events/arch/x86/amdzen2/other.json
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/other.json
@@ -14,13 +14,13 @@
"EventName": "de_dis_uops_from_decoder.opcache_dispatched",
"EventCode": "0xaa",
"BriefDescription": "Count of dispatched Ops from OpCache.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "de_dis_uops_from_decoder.decoder_dispatched",
"EventCode": "0xaa",
"BriefDescription": "Count of dispatched Ops from Decoder.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "de_dis_dispatch_token_stalls1.fp_misc_rsrc_stall",
@@ -50,25 +50,25 @@
"EventName": "de_dis_dispatch_token_stalls1.int_sched_misc_token_stall",
"EventCode": "0xae",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Integer Scheduler miscellaneous resource stall.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "de_dis_dispatch_token_stalls1.store_queue_token_stall",
"EventCode": "0xae",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Store queue resource stall. Applies to all ops with store semantics.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "de_dis_dispatch_token_stalls1.load_queue_token_stall",
"EventCode": "0xae",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Load queue resource stall. Applies to all ops with load semantics.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "de_dis_dispatch_token_stalls1.int_phy_reg_file_token_stall",
"EventCode": "0xae",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Integer Physical Register File resource stall. Applies to all ops that have an integer destination register.",
- "UMask": "0x1"
+ "UMask": "0x01"
},
{
"EventName": "de_dis_dispatch_token_stalls0.sc_agu_dispatch_stall",
@@ -92,24 +92,24 @@
"EventName": "de_dis_dispatch_token_stalls0.alu_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALU tokens total unavailable.",
- "UMask": "0x8"
+ "UMask": "0x08"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq3_0_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ3_0_TokenStall.",
- "UMask": "0x4"
+ "UMask": "0x04"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq2_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 2 Tokens unavailable.",
- "UMask": "0x2"
+ "UMask": "0x02"
},
{
"EventName": "de_dis_dispatch_token_stalls0.alsq1_token_stall",
"EventCode": "0xaf",
"BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. ALSQ 1 Tokens unavailable.",
- "UMask": "0x1"
+ "UMask": "0x01"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
new file mode 100644
index 000000000000..a71694a043ba
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
@@ -0,0 +1,178 @@
+[
+ {
+ "MetricName": "branch_misprediction_ratio",
+ "BriefDescription": "Execution-Time Branch Misprediction Ratio (Non-Speculative)",
+ "MetricExpr": "d_ratio(ex_ret_brn_misp, ex_ret_brn)",
+ "MetricGroup": "branch_prediction",
+ "ScaleUnit": "100%"
+ },
+ {
+ "EventName": "all_dc_accesses",
+ "EventCode": "0x29",
+ "BriefDescription": "All L1 Data Cache Accesses",
+ "UMask": "0x07"
+ },
+ {
+ "MetricName": "all_l2_cache_accesses",
+ "BriefDescription": "All L2 Cache Accesses",
+ "MetricExpr": "l2_request_g1.all_no_prefetch + l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_ic_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Instruction Cache Misses (including prefetch)",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_dc_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Data Cache Misses (including prefetch)",
+ "UMask": "0xc8"
+ },
+ {
+ "MetricName": "l2_cache_accesses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Accesses from L2 HWPF",
+ "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_misses",
+ "BriefDescription": "All L2 Cache Misses",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_miss_in_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_misses_from_ic_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Instruction Cache Misses",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_misses_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Data Cache Misses",
+ "UMask": "0x08"
+ },
+ {
+ "MetricName": "l2_cache_misses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Misses from L2 HWPF",
+ "MetricExpr": "l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_hits",
+ "BriefDescription": "All L2 Cache Hits",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_hit_in_l2 + l2_pf_hit_l2",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_hits_from_ic_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Instruction Cache Misses",
+ "UMask": "0x06"
+ },
+ {
+ "EventName": "l2_cache_hits_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Data Cache Misses",
+ "UMask": "0x70"
+ },
+ {
+ "EventName": "l2_cache_hits_from_l2_hwpf",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 Cache Hits from L2 HWPF",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l3_accesses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Accesses",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_misses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Misses (includes Chg2X)",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "MetricName": "l3_read_miss_latency",
+ "BriefDescription": "Average L3 Read Miss Latency (in core clocks)",
+ "MetricExpr": "(xi_sys_fill_latency * 16) / xi_ccx_sdp_req1.all_l3_miss_req_typs",
+ "MetricGroup": "l3_cache",
+ "ScaleUnit": "1core clocks"
+ },
+ {
+ "MetricName": "ic_fetch_miss_ratio",
+ "BriefDescription": "L1 Instruction Cache (32B) Fetch Miss Ratio",
+ "MetricExpr": "d_ratio(l2_cache_req_stat.ic_access_in_l2, bp_l1_tlb_fetch_hit + bp_l1_tlb_miss_l2_hit + bp_l1_tlb_miss_l2_tlb_miss)",
+ "MetricGroup": "l2_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "l1_itlb_misses",
+ "BriefDescription": "L1 ITLB Misses",
+ "MetricExpr": "bp_l1_tlb_miss_l2_hit + bp_l1_tlb_miss_l2_tlb_miss",
+ "MetricGroup": "tlb"
+ },
+ {
+ "EventName": "l2_itlb_misses",
+ "EventCode": "0x85",
+ "BriefDescription": "L2 ITLB Misses & Instruction page walks",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l1_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Misses",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L2 DTLB Misses & Data page walks",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "all_tlbs_flushed",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLBs Flushed",
+ "UMask": "0xdf"
+ },
+ {
+ "EventName": "uops_dispatched",
+ "EventCode": "0xaa",
+ "BriefDescription": "Micro-ops Dispatched",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "sse_avx_stalls",
+ "EventCode": "0x0e",
+ "BriefDescription": "Mixed SSE/AVX Stalls",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "uops_retired",
+ "EventCode": "0xc1",
+ "BriefDescription": "Micro-ops Retired"
+ },
+ {
+ "MetricName": "all_remote_links_outbound",
+ "BriefDescription": "Approximate: Outbound data bytes for all Remote Links for a node (die)",
+ "MetricExpr": "remote_outbound_data_controller_0 + remote_outbound_data_controller_1 + remote_outbound_data_controller_2 + remote_outbound_data_controller_3",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "3e-5MiB"
+ },
+ {
+ "MetricName": "nps1_die_to_dram",
+ "BriefDescription": "Approximate: Combined DRAM B/bytes of all channels on a NPS1 node (die) (may need --metric-no-group)",
+ "MetricExpr": "dram_channel_data_controller_0 + dram_channel_data_controller_1 + dram_channel_data_controller_2 + dram_channel_data_controller_3 + dram_channel_data_controller_4 + dram_channel_data_controller_5 + dram_channel_data_controller_6 + dram_channel_data_controller_7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.1e-5MiB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/branch.json b/tools/perf/pmu-events/arch/x86/amdzen3/branch.json
new file mode 100644
index 000000000000..018a7fe94fb9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/branch.json
@@ -0,0 +1,53 @@
+[
+ {
+ "EventName": "bp_l1_btb_correct",
+ "EventCode": "0x8a",
+ "BriefDescription": "L1 Branch Prediction Overrides Existing Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_l2_btb_correct",
+ "EventCode": "0x8b",
+ "BriefDescription": "L2 Branch Prediction Overrides Existing Prediction (speculative)."
+ },
+ {
+ "EventName": "bp_dyn_ind_pred",
+ "EventCode": "0x8e",
+ "BriefDescription": "Dynamic Indirect Predictions.",
+ "PublicDescription": "The number of times a branch used the indirect predictor to make a prediction."
+ },
+ {
+ "EventName": "bp_de_redirect",
+ "EventCode": "0x91",
+ "BriefDescription": "Decode Redirects",
+ "PublicDescription": "The number of times the instruction decoder overrides the predicted target."
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if1g",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. L1 Instruction TLB hit (1G page size).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if2m",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. L1 Instruction TLB hit (2M page size).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if4k",
+ "EventCode": "0x94",
+ "BriefDescription": "The number of instruction fetches that hit in the L1 ITLB. L1 Instrcution TLB hit (4K or 16K page size).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "bp_tlb_rel",
+ "EventCode": "0x99",
+ "BriefDescription": "The number of ITLB reload requests."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/cache.json b/tools/perf/pmu-events/arch/x86/amdzen3/cache.json
new file mode 100644
index 000000000000..fa1d7499a2e3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/cache.json
@@ -0,0 +1,402 @@
+[
+ {
+ "EventName": "l2_request_g1.rd_blk_l",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache reads (including hardware and software prefetch).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache stores.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g1.ls_rd_blk_c_s",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache shared reads.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g1.cacheable_ic_read",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Instruction cache reads.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g1.change_to_x",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). Data cache state change requests. Request change to writable, check L2 for current state.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_request_g1.prefetch_l2_cmd",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). PrefetchL2Cmd.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_request_g1.l2_hw_pf",
+ "EventCode": "0x60",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 1 - Common). L2 Prefetcher. All prefetches accepted by L2 pipeline, hit or miss. Types of PF and L2 hit/miss broken out in a separate perfmon event.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_request_g1.group2",
+ "EventCode": "0x60",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g2 (PMCx061).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_request_g1.all_no_prefetch",
+ "EventCode": "0x60",
+ "UMask": "0xf9"
+ },
+ {
+ "EventName": "l2_request_g2.group1",
+ "EventCode": "0x61",
+ "BriefDescription": "Miscellaneous events covered in more detail by l2_request_g1 (PMCx060).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g2.ls_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Data cache read sized non-cacheable.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g2.ic_rd_sized_nc",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Instruction cache read sized non-cacheable.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_request_g2.smc_inval",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Self-modifying code invalidates.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_originator",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus locks.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_request_g2.bus_locks_responses",
+ "EventCode": "0x61",
+ "BriefDescription": "All L2 Cache Requests (Breakdown 2 - Rare). Bus lock response.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_latency.l2_cycles_waiting_on_fills",
+ "EventCode": "0x62",
+ "BriefDescription": "Total cycles spent waiting for L2 fills to complete from L3 or memory, divided by four. Event counts are for both threads. To calculate average latency, the number of fills from both threads must be used.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_write",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB write requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) write requests.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_wcb_req.wcb_close",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB close requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) close requests.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_wcb_req.zero_byte_store",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB zero byte store requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) zero byte store requests.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_wcb_req.cl_zero",
+ "EventCode": "0x63",
+ "BriefDescription": "LS to L2 WCB cache line zeroing requests. LS (Load/Store unit) to L2 WCB (Write Combining Buffer) cache line zeroing requests.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache shared read hit in L2",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit in L2. Modifiable.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache read hit non-modifiable line in L2.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache store or state change hit in L2.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Data cache request miss in L2 (all types). Use l2_cache_misses_from_dc_misses instead.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit modifiable line in L2.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache hit non-modifiable line in L2.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2. Use l2_cache_misses_from_ic_miss instead.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_access_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache requests in L2.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_miss_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request miss in L2 and Data cache request miss in L2 (all types).",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_hit_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cacheable request access status (not including L2 Prefetch). Instruction cache request hit in L2 and Data cache request hit in L2 (all types).",
+ "UMask": "0xf6"
+ },
+ {
+ "EventName": "l2_fill_pending.l2_fill_busy",
+ "EventCode": "0x6d",
+ "BriefDescription": "Cycles with fill pending from L2. Total cycles spent with one or more fill requests in flight from L2.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_pf_hit_l2",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetcher hits in L3. Counts all L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit the L3.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetcher misses in L3. Counts all L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ic_fw32",
+ "EventCode": "0x80",
+ "BriefDescription": "The number of 32B fetch windows transferred from IC pipe to DE instruction decoder (includes non-cacheable and cacheable fill responses)."
+ },
+ {
+ "EventName": "ic_fw32_miss",
+ "EventCode": "0x81",
+ "BriefDescription": "The number of 32B fetch windows tried to read the L1 IC and missed in the full tag."
+ },
+ {
+ "EventName": "ic_cache_fill_l2",
+ "EventCode": "0x82",
+ "BriefDescription": "Instruction Cache Refills from L2. The number of 64 byte instruction cache line was fulfilled from the L2 cache."
+ },
+ {
+ "EventName": "ic_cache_fill_sys",
+ "EventCode": "0x83",
+ "BriefDescription": "Instruction Cache Refills from System. The number of 64 byte instruction cache line fulfilled from system memory or another cache."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_hit",
+ "EventCode": "0x84",
+ "BriefDescription": "L1 ITLB Miss, L2 ITLB Hit. The number of instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of instruction fetches that miss in both the L1 and L2 TLBs.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.coalesced_4k",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk for >4K Coalesced page.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk for 1G page.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk for 2M page.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k",
+ "EventCode": "0x85",
+ "BriefDescription": "The number of valid fills into the ITLB originating from the LS Page-Table Walker. Tablewalk requests are issued for L1-ITLB and L2-ITLB misses. Walk to 4K page.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "bp_snp_re_sync",
+ "EventCode": "0x86",
+ "BriefDescription": "The number of pipeline restarts caused by invalidating probes that hit on the instruction stream currently being executed. This would happen if the active instruction stream was being modified by another processor in an MP system - typically a highly unlikely event."
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_any",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle for any reason (nothing valid in pipe ICM1).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_dq_empty",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to DQ empty.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ic_fetch_stall.ic_stall_back_pressure",
+ "EventCode": "0x87",
+ "BriefDescription": "Instruction Pipe Stall. IC pipe was stalled during this clock cycle (including IC to OC fetches) due to back-pressure.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ic_cache_inval.l2_invalidating_probe",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to L2 invalidating probe (external or LS). The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ic_cache_inval.fill_invalidated",
+ "EventCode": "0x8c",
+ "BriefDescription": "IC line invalidated due to overwriting fill response. The number of instruction cache lines invalidated. A non-SMC event is CMC (cross modifying code), either from the other thread of the core or another core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.all_instruction_cache_accesses",
+ "EventCode": "0x18e",
+ "BriefDescription": "All Instruction Cache Accesses. Counts various IC tag related hit and miss events.",
+ "UMask": "0x1f"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.instruction_cache_miss",
+ "EventCode": "0x18e",
+ "BriefDescription": "Instruction Cache Miss. Counts various IC tag related hit and miss events.",
+ "UMask": "0x18"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.instruction_cache_hit",
+ "EventCode": "0x18e",
+ "BriefDescription": "Instruction Cache Hit. Counts various IC tag related hit and miss events.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.oc_ic_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. OC to IC mode switch.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ic_oc_mode_switch.ic_oc_mode_switch",
+ "EventCode": "0x28a",
+ "BriefDescription": "OC Mode Switch. IC to OC mode switch.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "op_cache_hit_miss.all_op_cache_accesses",
+ "EventCode": "0x28f",
+ "BriefDescription": "All Op Cache accesses. Counts Op Cache micro-tag hit/miss events",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "op_cache_hit_miss.op_cache_miss",
+ "EventCode": "0x28f",
+ "BriefDescription": "Op Cache Miss. Counts Op Cache micro-tag hit/miss events",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "op_cache_hit_miss.op_cache_hit",
+ "EventCode": "0x28f",
+ "BriefDescription": "Op Cache Hit. Counts Op Cache micro-tag hit/miss events",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "l3_request_g1.caching_l3_cache_accesses",
+ "EventCode": "0x01",
+ "BriefDescription": "Caching: L3 cache accesses",
+ "UMask": "0x80",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_lookup_state.all_l3_req_typs",
+ "EventCode": "0x04",
+ "BriefDescription": "All L3 Request Types. All L3 cache Requests",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.other_l3_miss_typs",
+ "EventCode": "0x06",
+ "BriefDescription": "Other L3 Miss Request Types",
+ "UMask": "0xfe",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_comb_clstr_state.request_miss",
+ "EventCode": "0x06",
+ "BriefDescription": "L3 cache misses",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_sys_fill_latency",
+ "EventCode": "0x90",
+ "BriefDescription": "L3 Cache Miss Latency. Total cycles for all transactions divided by 16. Ignores SliceMask and ThreadMask.",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "xi_ccx_sdp_req1",
+ "EventCode": "0x9a",
+ "BriefDescription": "L3 Misses by Request Type. Ignores SliceID, EnAllSlices, CoreID, EnAllCores and ThreadMask. Requires unit mask 0xFF to engage event for counting.",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/core.json b/tools/perf/pmu-events/arch/x86/amdzen3/core.json
new file mode 100644
index 000000000000..4e27a2be359e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/core.json
@@ -0,0 +1,137 @@
+[
+ {
+ "EventName": "ex_ret_instr",
+ "EventCode": "0xc0",
+ "BriefDescription": "Retired Instructions."
+ },
+ {
+ "EventName": "ex_ret_ops",
+ "EventCode": "0xc1",
+ "BriefDescription": "Retired Ops. Use macro_ops_retired instead.",
+ "PublicDescription": "The number of macro-ops retired."
+ },
+ {
+ "EventName": "ex_ret_brn",
+ "EventCode": "0xc2",
+ "BriefDescription": "Retired Branch Instructions.",
+ "PublicDescription": "The number of branch instructions retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_misp",
+ "EventCode": "0xc3",
+ "BriefDescription": "Retired Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired branch instructions, that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn",
+ "EventCode": "0xc4",
+ "BriefDescription": "Retired Taken Branch Instructions.",
+ "PublicDescription": "The number of taken branches that were retired. This includes all types of architectural control flow changes, including exceptions and interrupts."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn_misp",
+ "EventCode": "0xc5",
+ "BriefDescription": "Retired Taken Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of retired taken branch instructions that were mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_far",
+ "EventCode": "0xc6",
+ "BriefDescription": "Retired Far Control Transfers.",
+ "PublicDescription": "The number of far control transfers retired including far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts. Far control transfers are not subject to branch prediction."
+ },
+ {
+ "EventName": "ex_ret_brn_resync",
+ "EventCode": "0xc7",
+ "BriefDescription": "Retired Branch Resyncs.",
+ "PublicDescription": "The number of resync branches. These reflect pipeline restarts due to certain microcode assists and events such as writes to the active instruction stream, among other things. Each occurrence reflects a restart penalty similar to a branch mispredict. This is relatively rare."
+ },
+ {
+ "EventName": "ex_ret_near_ret",
+ "EventCode": "0xc8",
+ "BriefDescription": "Retired Near Returns.",
+ "PublicDescription": "The number of near return instructions (RET or RET Iw) retired."
+ },
+ {
+ "EventName": "ex_ret_near_ret_mispred",
+ "EventCode": "0xc9",
+ "BriefDescription": "Retired Near Returns Mispredicted.",
+ "PublicDescription": "The number of near returns retired that were not correctly predicted by the return address predictor. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction."
+ },
+ {
+ "EventName": "ex_ret_brn_ind_misp",
+ "EventCode": "0xca",
+ "BriefDescription": "Retired Indirect Branch Instructions Mispredicted.",
+ "PublicDescription": "The number of indirect branches retired that were not correctly predicted. Each such mispredict incurs the same penalty as a mispredicted conditional branch instruction. Note that only EX mispredicts are counted."
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.sse_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "SSE instructions (SSE, SSE2, SSE3, SSSE3, SSE4A, SSE41, SSE42, AVX).",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.mmx_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "MMX instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. MMX instructions.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ex_ret_mmx_fp_instr.x87_instr",
+ "EventCode": "0xcb",
+ "BriefDescription": "x87 instructions.",
+ "PublicDescription": "The number of MMX, SSE or x87 instructions retired. The UnitMask allows the selection of the individual classes of instructions as given in the table. Each increment represents one complete instruction. Since this event includes non-numeric instructions it is not suitable for measuring MFLOPS. x87 instructions.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ex_ret_ind_brch_instr",
+ "EventCode": "0xcc",
+ "BriefDescription": "Retired Indirect Branch Instructions. The number of indirect branches retired."
+ },
+ {
+ "EventName": "ex_ret_cond",
+ "EventCode": "0xd1",
+ "BriefDescription": "Retired Conditional Branch Instructions."
+ },
+ {
+ "EventName": "ex_div_busy",
+ "EventCode": "0xd3",
+ "BriefDescription": "Div Cycles Busy count."
+ },
+ {
+ "EventName": "ex_div_count",
+ "EventCode": "0xd4",
+ "BriefDescription": "Div Op Count."
+ },
+ {
+ "EventName": "ex_ret_msprd_brnch_instr_dir_msmtch",
+ "EventCode": "0x1c7",
+ "BriefDescription": "Retired Mispredicted Branch Instructions due to Direction Mismatch",
+ "PublicDescription": "The number of retired conditional branch instructions that were not correctly predicted because of a branch direction mismatch."
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_count_rollover",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of times an op could not be tagged by IBS because of a previous tagged op that has not retired.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS that retired.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Tagged IBS Ops. Number of Ops tagged by IBS.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ex_ret_fused_instr",
+ "EventCode": "0x1d0",
+ "BriefDescription": "Counts retired Fused Instructions."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json b/tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json
new file mode 100644
index 000000000000..40271df40015
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/data-fabric.json
@@ -0,0 +1,98 @@
+[
+ {
+ "EventName": "remote_outbound_data_controller_0",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 0",
+ "EventCode": "0x7c7",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_1",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 1",
+ "EventCode": "0x807",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_2",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 2",
+ "EventCode": "0x847",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_outbound_data_controller_3",
+ "PublicDescription": "Remote Link Controller Outbound Packet Types: Data (32B): Remote Link Controller 3",
+ "EventCode": "0x887",
+ "UMask": "0x02",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_0",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x07",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_1",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x47",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_2",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x87",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_3",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0xc7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_4",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x107",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_5",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x147",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_6",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x187",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "dram_channel_data_controller_7",
+ "PublicDescription": "DRAM Channel Controller Request Types: Requests with Data (64B): DRAM Channel Controller 0",
+ "EventCode": "0x1c7",
+ "UMask": "0x38",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json
new file mode 100644
index 000000000000..98cfcb9c78ec
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/floating-point.json
@@ -0,0 +1,139 @@
+[
+ {
+ "EventName": "fpu_pipe_assignment.total",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps.",
+ "PublicDescription": "Total number of fp uOps. The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS.",
+ "UMask": "0x0f"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total3",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 3.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one-cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 3.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total2",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 2.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 2.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total1",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number uOps assigned to pipe 1.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 1.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fpu_pipe_assignment.total0",
+ "EventCode": "0x00",
+ "BriefDescription": "Total number of fp uOps on pipe 0.",
+ "PublicDescription": "The number of operations (uOps) dispatched to each of the 4 FPU execution pipelines. This event reflects how busy the FPU pipelines are and may be used for workload characterization. This includes all operations performed by x87, MMX, and SSE instructions, including moves. Each increment represents a one- cycle dispatch event. This event is a speculative event. Since this event includes non-numeric operations it is not suitable for measuring MFLOPS. Total number uOps assigned to pipe 0.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.all",
+ "EventCode": "0x03",
+ "BriefDescription": "All FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPS. The number of events logged per cycle can vary from 0 to 64. This event can count above 15.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mac_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Multiply-Accumulate FLOPs. Each MAC operation is counted as 2 FLOPS. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Divide/square root FLOPs. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Multiply FLOPs. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Add/subtract FLOPs. This is a retire-based event. The number of retired SSE/AVX FLOPs. The number of events logged per cycle can vary from 0 to 64. This event requires the use of the MergeEvent since it can count above 15 events per cycle. See 2.1.17.3 [Large Increment per Cycle Events]. It does not provide a useful count without the use of the MergeEvent.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.optimized",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Scalar Ops optimized. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.opt_potential",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of Ops that are candidates for optimization (have Z-bit either set or pass). This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops_elim",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops eliminated. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_num_mov_elim_scal_op.sse_mov_ops",
+ "EventCode": "0x04",
+ "BriefDescription": "Number of SSE Move Ops. This is a dispatch based speculative event, and is useful for measuring the effectiveness of the Move elimination and Scalar code optimization schemes.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE/AVX bottom-executing ops retired. The number of serializing Ops retired.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "SSE/AVX control word mispredict traps. The number of serializing Ops retired.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 bottom-executing ops retired. The number of serializing Ops retired.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "x87 control word mispredict traps due to mispredictions in RC or PC, or changes in mask bits. The number of serializing Ops retired.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_spill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. YMM spill fault.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. YMM fill fault.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_disp_faults.xmm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. XMM fill fault.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_disp_faults.x87_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating Point Dispatch Faults. x87 fill fault.",
+ "UMask": "0x01"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/memory.json b/tools/perf/pmu-events/arch/x86/amdzen3/memory.json
new file mode 100644
index 000000000000..a2833955dcd2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/memory.json
@@ -0,0 +1,428 @@
+[
+ {
+ "EventName": "ls_bad_status2.stli_other",
+ "EventCode": "0x24",
+ "BriefDescription": "Non-forwardable conflict; used to reduce STLI's via software. All reasons. Store To Load Interlock (STLI) are loads that were unable to complete because of a possible match with an older store, and the older store could not do STLF for some reason.",
+ "PublicDescription" : "Store-to-load conflicts: A load was unable to complete due to a non-forwardable conflict with an older store. Most commonly, a load's address range partially but not completely overlaps with an uncompleted older store. Software can avoid this problem by using same-size and same-alignment loads and stores when accessing the same data. Vector/SIMD code is particularly susceptible to this problem; software should construct wide vector stores by manipulating vector elements in registers using shuffle/blend/swap instructions prior to storing to memory, instead of using narrow element-by-element stores.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_locks.spec_lock_hi_spec",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. High speculative cacheable lock speculation succeeded.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_locks.spec_lock_lo_spec",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Low speculative cacheable lock speculation succeeded.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_locks.non_spec_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Non-speculative lock succeeded.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_locks.bus_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired lock instructions. Comparable to legacy bus lock.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_ret_cl_flush",
+ "EventCode": "0x26",
+ "BriefDescription": "The number of retired CLFLUSH instructions. This is a non-speculative event."
+ },
+ {
+ "EventName": "ls_ret_cpuid",
+ "EventCode": "0x27",
+ "BriefDescription": "The number of CPUID instructions retired."
+ },
+ {
+ "EventName": "ls_dispatch.ld_st_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Load-op-Store Dispatch. Dispatch of a single op that performs a load from and store to the same memory address. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_dispatch.store_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Dispatch of a single op that performs a memory store. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_dispatch.ld_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Dispatch of a single op that performs a memory load. Counts the number of operations dispatched to the LS unit. Unit Masks ADDed.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_smi_rx",
+ "EventCode": "0x2b",
+ "BriefDescription": "Counts the number of SMIs received."
+ },
+ {
+ "EventName": "ls_int_taken",
+ "EventCode": "0x2c",
+ "BriefDescription": "Counts the number of interrupts taken."
+ },
+ {
+ "EventName": "ls_rdtsc",
+ "EventCode": "0x2d",
+ "BriefDescription": "Number of reads of the TSC (RDTSC instructions). The count is speculative."
+ },
+ {
+ "EventName": "ls_stlf",
+ "EventCode": "0x35",
+ "BriefDescription": "Number of STLF hits."
+ },
+ {
+ "EventName": "ls_st_commit_cancel2.st_commit_cancel_wcb_full",
+ "EventCode": "0x37",
+ "BriefDescription": "A non-cacheable store and the non-cacheable commit buffer is full.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_dc_accesses",
+ "EventCode": "0x40",
+ "BriefDescription": "Number of accesses to the dcache for load/store references.",
+ "PublicDescription": "The number of accesses to the data cache for load and store references. This may include certain microcode scratchpad accesses, although these are generally rare. Each increment represents an eight-byte access, although the instruction may only be accessing a portion of that. This event is a speculative event."
+ },
+ {
+ "EventName": "ls_mab_alloc.all_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "All Allocations. Counts when a LS pipe allocates a MAB entry.",
+ "UMask": "0x7f"
+ },
+ {
+ "EventName": "ls_mab_alloc.hardware_prefetcher_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "Hardware Prefetcher Allocations. Counts when a LS pipe allocates a MAB entry.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_mab_alloc.load_store_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "Load Store Allocations. Counts when a LS pipe allocates a MAB entry.",
+ "UMask": "0x3f"
+ },
+ {
+ "EventName": "ls_mab_alloc.dc_prefetcher",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. DC prefetcher.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_mab_alloc.stores",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. Stores.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_mab_alloc.loads",
+ "EventCode": "0x41",
+ "BriefDescription": "LS MAB Allocates by Type. Loads.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.mem_io_remote",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.ext_cache_remote",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.mem_io_local",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.ext_cache_local",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.int_cache",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.lcl_l2",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.mem_io_remote",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.ext_cache_remote",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.mem_io_local",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.ext_cache_local",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.int_cache",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.lcl_l2",
+ "EventCode": "0x44",
+ "BriefDescription": "Any Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.all",
+ "EventCode": "0x45",
+ "BriefDescription": "All L1 DTLB Misses or Reloads. Use l1_dtlb_misses instead.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that also missed in the L2 TLB.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that also missed in the L2 TLB.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload coalesced page that also missed in the L2 TLB.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that missed the L2 TLB.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 1G page that hit in the L2 TLB.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 2M page that hit in the L2 TLB.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a coalesced page that hit in the L2 TLB.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Miss. DTLB reload to a 4K page that hit in the L2 TLB.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_tablewalker.iside",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks on I-side.",
+ "UMask": "0x0c"
+ },
+ {
+ "EventName": "ls_tablewalker.ic_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks IC Type 1.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_tablewalker.ic_type0",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks IC Type 0.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_tablewalker.dside",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks on D-side.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "ls_tablewalker.dc_type1",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 1.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_tablewalker.dc_type0",
+ "EventCode": "0x46",
+ "BriefDescription": "Total Page Table Walks DC Type 0.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_misal_loads.ma4k",
+ "EventCode": "0x47",
+ "BriefDescription": "The number of 4KB misaligned (i.e., page crossing) loads.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_misal_loads.ma64",
+ "EventCode": "0x47",
+ "BriefDescription": "The number of 64B misaligned (i.e., cacheline crossing) loads.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_pref_instr_disp",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative).",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_nta",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchNTA instruction. See docAPM3 PREFETCHlevel.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchW instruction. See docAPM3 PREFETCHW.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software Prefetch Instructions Dispatched (Speculative). PrefetchT0, T1 and T2 instructions. See docAPM3 PREFETCHlevel.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a match on an already-allocated miss request buffer.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+ "EventCode": "0x52",
+ "BriefDescription": "The number of software prefetches that did not fetch data outside of the processor core. Software PREFETCH instruction saw a DC hit.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.mem_io_remote",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.ext_cache_remote",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.mem_io_local",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.ext_cache_local",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.int_cache",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.lcl_l2",
+ "EventCode": "0x59",
+ "BriefDescription": "Software Prefetch Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.mem_io_remote",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in different Node.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.ext_cache_remote",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From CCX Cache in different Node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.mem_io_local",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From DRAM or IO connected in same node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.ext_cache_local",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From cache of different CCX in same node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.int_cache",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From L3 or different L2 in same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.lcl_l2",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware Prefetch Data Cache Fills by Data Source. From Local L2 to the core.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_alloc_mab_count",
+ "EventCode": "0x5f",
+ "BriefDescription": "Count of Allocated Mabs",
+ "PublicDescription": "This event counts the in-flight L1 data cache misses (allocated Miss Address Buffers) divided by 4 and rounded down each cycle unless used with the MergeEvent functionality. If the MergeEvent is used, it counts the exact number of outstanding L1 data cache misses. See 2.1.17.3 [Large Increment per Cycle Events]."
+ },
+ {
+ "EventName": "ls_not_halted_cyc",
+ "EventCode": "0x76",
+ "BriefDescription": "Cycles not in Halt."
+ },
+ {
+ "EventName": "ls_tlb_flush.all_tlb_flushes",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLB Flushes. Requires unit mask 0xFF to engage event for counting. Use all_tlbs_flushed instead",
+ "UMask": "0xff"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/other.json b/tools/perf/pmu-events/arch/x86/amdzen3/other.json
new file mode 100644
index 000000000000..7da5d0791ea3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/other.json
@@ -0,0 +1,103 @@
+[
+ {
+ "EventName": "de_dis_uop_queue_empty_di0",
+ "EventCode": "0xa9",
+ "BriefDescription": "Cycles where the Micro-Op Queue is empty."
+ },
+ {
+ "EventName": "de_dis_cops_from_decoder.disp_op_type.any_integer_dispatch",
+ "EventCode": "0xab",
+ "BriefDescription": "Any Integer dispatch. Types of Oops Dispatched from Decoder.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_cops_from_decoder.disp_op_type.any_fp_dispatch",
+ "EventCode": "0xab",
+ "BriefDescription": "Any FP dispatch. Types of Oops Dispatched from Decoder.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_flush_recovery_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. FP Flush recovery stall.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_sch_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. FP scheduler resource stall. Applies to ops that use the FP scheduler.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_reg_file_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Floating point register file resource stall. Applies to all FP ops that have a destination register.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.taken_brnch_buffer_rsrc",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Taken branch buffer resource stall.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.int_sched_misc_token_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Integer Scheduler miscellaneous resource stall.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.store_queue_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Store Queue resource stall. Applies to all ops with store semantics.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.load_queue_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Load Queue resource stall. Applies to all ops with load semantics.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.int_phy_reg_file_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a Token Stall. Also counts cycles when the thread is not selected to dispatch but would have been stalled due to a Token Stall. Integer Physical Register File resource stall. Integer Physical Register File, applies to all ops that have an integer destination register.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.retire_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. Insufficient Retire Queue tokens available.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.agsq_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. AGSQ Tokens unavailable.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch3_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 3 available.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch2_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 2 available.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch1_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 1 available.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch0_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Cycles where a dispatch group is valid but does not get dispatched due to a token stall. No tokens for Integer Scheduler Queue 0 available.",
+ "UMask": "0x01"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen3/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen3/recommended.json
new file mode 100644
index 000000000000..988cf68ae825
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen3/recommended.json
@@ -0,0 +1,214 @@
+[
+ {
+ "MetricName": "branch_misprediction_ratio",
+ "BriefDescription": "Execution-Time Branch Misprediction Ratio (Non-Speculative)",
+ "MetricExpr": "d_ratio(ex_ret_brn_misp, ex_ret_brn)",
+ "MetricGroup": "branch_prediction",
+ "ScaleUnit": "100%"
+ },
+ {
+ "EventName": "all_data_cache_accesses",
+ "EventCode": "0x29",
+ "BriefDescription": "All L1 Data Cache Accesses",
+ "UMask": "0x07"
+ },
+ {
+ "MetricName": "all_l2_cache_accesses",
+ "BriefDescription": "All L2 Cache Accesses",
+ "MetricExpr": "l2_request_g1.all_no_prefetch + l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_ic_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Instruction Cache Misses (including prefetch)",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_accesses_from_dc_misses",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 Cache Accesses from L1 Data Cache Misses (including prefetch)",
+ "UMask": "0xe8"
+ },
+ {
+ "MetricName": "l2_cache_accesses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Accesses from L2 HWPF",
+ "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_misses",
+ "BriefDescription": "All L2 Cache Misses",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_miss_in_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_misses_from_ic_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Instruction Cache Misses",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_misses_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Misses from L1 Data Cache Misses",
+ "UMask": "0x08"
+ },
+ {
+ "MetricName": "l2_cache_misses_from_l2_hwpf",
+ "BriefDescription": "L2 Cache Misses from L2 Cache HWPF",
+ "MetricExpr": "l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_hits",
+ "BriefDescription": "All L2 Cache Hits",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_hit_in_l2 + l2_pf_hit_l2",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "EventName": "l2_cache_hits_from_ic_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Instruction Cache Misses",
+ "UMask": "0x06"
+ },
+ {
+ "EventName": "l2_cache_hits_from_dc_misses",
+ "EventCode": "0x64",
+ "BriefDescription": "L2 Cache Hits from L1 Data Cache Misses",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "l2_cache_hits_from_l2_hwpf",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 Cache Hits from L2 Cache HWPF",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l3_cache_accesses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Cache Accesses",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_misses",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 Misses (includes cacheline state change requests)",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "MetricName": "l3_read_miss_latency",
+ "BriefDescription": "Average L3 Read Miss Latency (in core clocks)",
+ "MetricExpr": "(xi_sys_fill_latency * 16) / xi_ccx_sdp_req1",
+ "MetricGroup": "l3_cache",
+ "ScaleUnit": "1core clocks"
+ },
+ {
+ "MetricName": "op_cache_fetch_miss_ratio",
+ "BriefDescription": "Op Cache (64B) Fetch Miss Ratio",
+ "MetricExpr": "d_ratio(op_cache_hit_miss.op_cache_miss, op_cache_hit_miss.all_op_cache_accesses)",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "ic_fetch_miss_ratio",
+ "BriefDescription": "Instruction Cache (32B) Fetch Miss Ratio",
+ "MetricExpr": "d_ratio(ic_tag_hit_miss.instruction_cache_miss, ic_tag_hit_miss.all_instruction_cache_accesses)",
+ "MetricGroup": "l2_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_memory",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From Memory",
+ "UMask": "0x48"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_remote_node",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From Remote Node",
+ "UMask": "0x50"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_within_same_ccx",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From within same CCX",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "l1_data_cache_fills_from_external_ccx_cache",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: From External CCX Cache",
+ "UMask": "0x14"
+ },
+ {
+ "EventName": "l1_data_cache_fills_all",
+ "EventCode": "0x44",
+ "BriefDescription": "L1 Data Cache Fills: All",
+ "UMask": "0xff"
+ },
+ {
+ "MetricName": "l1_itlb_misses",
+ "BriefDescription": "L1 ITLB Misses",
+ "MetricExpr": "bp_l1_tlb_miss_l2_tlb_hit + bp_l1_tlb_miss_l2_tlb_miss",
+ "MetricGroup": "tlb"
+ },
+ {
+ "EventName": "l2_itlb_misses",
+ "EventCode": "0x85",
+ "BriefDescription": "L2 ITLB Misses & Instruction page walks",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l1_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB Misses",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_dtlb_misses",
+ "EventCode": "0x45",
+ "BriefDescription": "L2 DTLB Misses & Data page walks",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "all_tlbs_flushed",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLBs Flushed",
+ "UMask": "0xff"
+ },
+ {
+ "MetricName": "macro_ops_dispatched",
+ "BriefDescription": "Macro-ops Dispatched",
+ "MetricExpr": "de_dis_cops_from_decoder.disp_op_type.any_integer_dispatch + de_dis_cops_from_decoder.disp_op_type.any_fp_dispatch",
+ "MetricGroup": "decoder"
+ },
+ {
+ "EventName": "sse_avx_stalls",
+ "EventCode": "0x0e",
+ "BriefDescription": "Mixed SSE/AVX Stalls",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "macro_ops_retired",
+ "EventCode": "0xc1",
+ "BriefDescription": "Macro-ops Retired"
+ },
+ {
+ "MetricName": "all_remote_links_outbound",
+ "BriefDescription": "Approximate: Outbound data bytes for all Remote Links for a node (die)",
+ "MetricExpr": "remote_outbound_data_controller_0 + remote_outbound_data_controller_1 + remote_outbound_data_controller_2 + remote_outbound_data_controller_3",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "3e-5MiB"
+ },
+ {
+ "MetricName": "nps1_die_to_dram",
+ "BriefDescription": "Approximate: Combined DRAM B/bytes of all channels on a NPS1 node (die) (may need --metric-no-group)",
+ "MetricExpr": "dram_channel_data_controller_0 + dram_channel_data_controller_1 + dram_channel_data_controller_2 + dram_channel_data_controller_3 + dram_channel_data_controller_4 + dram_channel_data_controller_5 + dram_channel_data_controller_6 + dram_channel_data_controller_7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.1e-5MiB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/branch.json b/tools/perf/pmu-events/arch/x86/amdzen4/branch.json
new file mode 100644
index 000000000000..208c646c59ca
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/branch.json
@@ -0,0 +1,82 @@
+[
+ {
+ "EventName": "bp_l2_btb_correct",
+ "EventCode": "0x8b",
+ "BriefDescription": "L2 branch prediction overrides existing prediction (speculative)."
+ },
+ {
+ "EventName": "bp_dyn_ind_pred",
+ "EventCode": "0x8e",
+ "BriefDescription": "Dynamic indirect predictions (branch used the indirect predictor to make a prediction)."
+ },
+ {
+ "EventName": "bp_de_redirect",
+ "EventCode": "0x91",
+ "BriefDescription": "Instruction decoder corrects the predicted target and resteers the branch predictor."
+ },
+ {
+ "EventName": "ex_ret_brn",
+ "EventCode": "0xc2",
+ "BriefDescription": "Retired branch instructions (all types of architectural control flow changes, including exceptions and interrupts)."
+ },
+ {
+ "EventName": "ex_ret_brn_misp",
+ "EventCode": "0xc3",
+ "BriefDescription": "Retired branch instructions mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn",
+ "EventCode": "0xc4",
+ "BriefDescription": "Retired taken branch instructions (all types of architectural control flow changes, including exceptions and interrupts)."
+ },
+ {
+ "EventName": "ex_ret_brn_tkn_misp",
+ "EventCode": "0xc5",
+ "BriefDescription": "Retired taken branch instructions mispredicted."
+ },
+ {
+ "EventName": "ex_ret_brn_far",
+ "EventCode": "0xc6",
+ "BriefDescription": "Retired far control transfers (far call/jump/return, IRET, SYSCALL and SYSRET, plus exceptions and interrupts). Far control transfers are not subject to branch prediction."
+ },
+ {
+ "EventName": "ex_ret_near_ret",
+ "EventCode": "0xc8",
+ "BriefDescription": "Retired near returns (RET or RET Iw)."
+ },
+ {
+ "EventName": "ex_ret_near_ret_mispred",
+ "EventCode": "0xc9",
+ "BriefDescription": "Retired near returns mispredicted. Each misprediction incurs the same penalty as a mispredicted conditional branch instruction."
+ },
+ {
+ "EventName": "ex_ret_brn_ind_misp",
+ "EventCode": "0xca",
+ "BriefDescription": "Retired indirect branch instructions mispredicted (only EX mispredicts). Each misprediction incurs the same penalty as a mispredicted conditional branch instruction."
+ },
+ {
+ "EventName": "ex_ret_ind_brch_instr",
+ "EventCode": "0xcc",
+ "BriefDescription": "Retired indirect branch instructions."
+ },
+ {
+ "EventName": "ex_ret_cond",
+ "EventCode": "0xd1",
+ "BriefDescription": "Retired conditional branch instructions."
+ },
+ {
+ "EventName": "ex_ret_msprd_brnch_instr_dir_msmtch",
+ "EventCode": "0x1c7",
+ "BriefDescription": "Retired branch instructions mispredicted due to direction mismatch."
+ },
+ {
+ "EventName": "ex_ret_uncond_brnch_instr_mispred",
+ "EventCode": "0x1c8",
+ "BriefDescription": "Retired unconditional indirect branch instructions mispredicted."
+ },
+ {
+ "EventName": "ex_ret_uncond_brnch_instr",
+ "EventCode": "0x1c9",
+ "BriefDescription": "Retired unconditional branch instructions."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/cache.json b/tools/perf/pmu-events/arch/x86/amdzen4/cache.json
new file mode 100644
index 000000000000..ecbe9660b2b3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/cache.json
@@ -0,0 +1,772 @@
+[
+ {
+ "EventName": "ls_mab_alloc.load_store_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for load-store allocations.",
+ "UMask": "0x3f"
+ },
+ {
+ "EventName": "ls_mab_alloc.hardware_prefetcher_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for hardware prefetcher allocations.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_mab_alloc.all_allocations",
+ "EventCode": "0x41",
+ "BriefDescription": "Miss Address Buffer (MAB) entries allocated by a Load-Store (LS) pipe for all types of allocations.",
+ "UMask": "0x7f"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.local_l2",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand data cache fills from local L2 cache.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.local_ccx",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand data cache fills from L3 cache or different L2 cache in the same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.near_cache",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand data cache fills from cache of another CCX when the address was in the same NUMA node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.dram_io_near",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand data cache fills from either DRAM or MMIO in the same NUMA node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.far_cache",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand data cache fills from cache of another CCX when the address was in a different NUMA node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.dram_io_far",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.alternate_memories",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand data cache fills from extension memory.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_dmnd_fills_from_sys.all",
+ "EventCode": "0x43",
+ "BriefDescription": "Demand data cache fills from all types of data sources.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.local_l2",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from local L2 cache.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.local_ccx",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from L3 cache or different L2 cache in the same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.local_all",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from local L2 cache or L3 cache or different L2 cache in the same CCX.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.near_cache",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from cache of another CCX when the address was in the same NUMA node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.dram_io_near",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from either DRAM or MMIO in the same NUMA node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.far_cache",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from cache of another CCX when the address was in a different NUMA node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.remote_cache",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from cache of another CCX when the address was in the same or a different NUMA node.",
+ "UMask": "0x14"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.dram_io_far",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.dram_io_all",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from either DRAM or MMIO in any NUMA node (same or different socket).",
+ "UMask": "0x48"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.far_all",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from either cache of another CCX, DRAM or MMIO when the address was in a different NUMA node (same or different socket).",
+ "UMask": "0x50"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.all_dram_io",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from either DRAM or MMIO in any NUMA node (same or different socket).",
+ "UMask": "0x48"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.alternate_memories",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from extension memory.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_any_fills_from_sys.all",
+ "EventCode": "0x44",
+ "BriefDescription": "Any data cache fills from all types of data sources.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchT0 (move data to all cache levels), T1 (move data to all cache levels except L1) and T2 (move data to all cache levels except L1 and L2).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_w",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchW (move data to L1 cache and mark it modifiable).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.prefetch_nta",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software prefetch instructions dispatched (speculative) of type PrefetchNTA (move data with minimum cache pollution i.e. non-temporal access).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_pref_instr_disp.all",
+ "EventCode": "0x4b",
+ "BriefDescription": "Software prefetch instructions dispatched (speculative) of all types.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.data_pipe_sw_pf_dc_hit",
+ "EventCode": "0x52",
+ "BriefDescription": "Software prefetches that did not fetch data outside of the processor core as the PREFETCH instruction saw a data cache hit.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.mab_mch_cnt",
+ "EventCode": "0x52",
+ "BriefDescription": "Software prefetches that did not fetch data outside of the processor core as the PREFETCH instruction saw a match on an already allocated Miss Address Buffer (MAB).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_inef_sw_pref.all",
+ "EventCode": "0x52",
+ "BriefDescript6ion": "Software prefetches that did not fetch data outside of the processor core for any reason.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.local_l2",
+ "EventCode": "0x59",
+ "BriefDescription": "Software prefetch data cache fills from local L2 cache.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.local_ccx",
+ "EventCode": "0x59",
+ "BriefDescription": "Software prefetch data cache fills from L3 cache or different L2 cache in the same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.near_cache",
+ "EventCode": "0x59",
+ "BriefDescription": "Software prefetch data cache fills from cache of another CCX in the same NUMA node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.dram_io_near",
+ "EventCode": "0x59",
+ "BriefDescription": "Software prefetch data cache fills from either DRAM or MMIO in the same NUMA node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.far_cache",
+ "EventCode": "0x59",
+ "BriefDescription": "Software prefetch data cache fills from cache of another CCX in a different NUMA node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.dram_io_far",
+ "EventCode": "0x59",
+ "BriefDescription": "Software prefetch data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.alternate_memories",
+ "EventCode": "0x59",
+ "BriefDescription": "Software prefetch data cache fills from extension memory.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_sw_pf_dc_fills.all",
+ "EventCode": "0x59",
+ "BriefDescription": "Software prefetch data cache fills from all types of data sources.",
+ "UMask": "0xdf"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.local_l2",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware prefetch data cache fills from local L2 cache.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.local_ccx",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware prefetch data cache fills from L3 cache or different L2 cache in the same CCX.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.near_cache",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware prefetch data cache fills from cache of another CCX when the address was in the same NUMA node.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.dram_io_near",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware prefetch data cache fills from either DRAM or MMIO in the same NUMA node.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.far_cache",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware prefetch data cache fills from cache of another CCX when the address was in a different NUMA node.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.dram_io_far",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware prefetch data cache fills from either DRAM or MMIO in a different NUMA node (same or different socket).",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.alternate_memories",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware prefetch data cache fills from extension memory.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_hw_pf_dc_fills.all",
+ "EventCode": "0x5a",
+ "BriefDescription": "Hardware prefetch data cache fills from all types of data sources.",
+ "UMask": "0xdf"
+ },
+ {
+ "EventName": "ls_alloc_mab_count",
+ "EventCode": "0x5f",
+ "BriefDescription": "In-flight L1 data cache misses i.e. Miss Address Buffer (MAB) allocations each cycle."
+ },
+ {
+ "EventName": "l2_request_g1.group2",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests of non-cacheable type (non-cached data and instructions reads, self-modifying code checks).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_request_g1.l2_hw_pf",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests: from hardware prefetchers to prefetch directly into L2 (hit or miss).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_request_g1.prefetch_l2_cmd",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests: prefetch directly into L2.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_request_g1.change_to_x",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests: data cache state change to writable, check L2 for current state.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_request_g1.cacheable_ic_read",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests: instruction cache reads.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_request_g1.ls_rd_blk_c_s",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests: data cache shared reads.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_x",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests: data cache stores.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_request_g1.rd_blk_l",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests: data cache reads including hardware and software prefetch.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_request_g1.all_dc",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests of common types from L1 data cache (including prefetches).",
+ "UMask": "0xe8"
+ },
+ {
+ "EventName": "l2_request_g1.all_no_prefetch",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests of common types not including prefetches.",
+ "UMask": "0xf9"
+ },
+ {
+ "EventName": "l2_request_g1.all",
+ "EventCode": "0x60",
+ "BriefDescription": "L2 cache requests of all types.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_miss",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: instruction cache request miss in L2.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: instruction cache hit non-modifiable line in L2.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_fill_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: instruction cache hit modifiable line in L2.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_hit_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for instruction cache hits.",
+ "UMask": "0x06"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_access_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for instruction cache access.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_c",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache request miss in L2.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_miss_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data and instruction cache misses.",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache store or state change hit in L2.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_s",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache read hit non-modifiable line in L2.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_l_hit_x",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache read hit modifiable line in L2.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ls_rd_blk_cs",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) with status: data cache shared read hit in L2.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_cache_req_stat.dc_hit_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data cache hits.",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "l2_cache_req_stat.ic_dc_hit_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data and instruction cache hits.",
+ "UMask": "0xf6"
+ },
+ {
+ "EventName": "l2_cache_req_stat.dc_access_in_l2",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data cache access.",
+ "UMask": "0xf8"
+ },
+ {
+ "EventName": "l2_cache_req_stat.all",
+ "EventCode": "0x64",
+ "BriefDescription": "Core to L2 cache requests (not including L2 prefetch) for data and instruction cache access.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_hit_l2.l2_stream",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache of type L2Stream (fetch additional sequential lines into L2 cache).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_pf_hit_l2.l2_next_line",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache of type L2NextLine (fetch the next line into L2 cache).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_pf_hit_l2.l2_up_down",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache of type L2UpDown (fetch the next or previous line into L2 cache for all memory accesses).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_pf_hit_l2.l2_burst",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache of type L2Burst (aggressively fetch additional sequential lines into L2 cache).",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_pf_hit_l2.l2_stride",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache of type L2Stride (fetch additional lines into L2 cache when each access is at a constant distance from the previous).",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_pf_hit_l2.l1_stream",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache of type L1Stream (fetch additional sequential lines into L1 cache).",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_pf_hit_l2.l1_stride",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache of type L1Stride (fetch additional lines into L1 cache when each access is a constant distance from the previous).",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_pf_hit_l2.l1_region",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache of type L1Region (fetch additional lines into L1 cache when the data access for a given instruction tends to be followed by a consistent pattern of other accesses within a localized region).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_pf_hit_l2.all",
+ "EventCode": "0x70",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which hit in the L2 cache of all types.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3.l2_stream",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit in the L3 cache of type L2Stream (fetch additional sequential lines into L2 cache).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3.l2_next_line",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit in the L3 cache of type L2NextLine (fetch the next line into L2 cache).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3.l2_up_down",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit in the L3 cache of type L2UpDown (fetch the next or previous line into L2 cache for all memory accesses).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3.l2_burst",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit in the L3 cache of type L2Burst (aggressively fetch additional sequential lines into L2 cache).",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3.l2_stride",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit in the L3 cache of type L2Stride (fetch additional lines into L2 cache when each access is a constant distance from the previous).",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3.l1_stream",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit in the L3 cache of type L1Stream (fetch additional sequential lines into L1 cache).",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3.l1_stride",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit in the L3 cache of type L1Stride (fetch additional lines into L1 cache when each access is a constant distance from the previous).",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3.l1_region",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit in the L3 cache of type L1Region (fetch additional lines into L1 cache when the data access for a given instruction tends to be followed by a consistent pattern of other accesses within a localized region).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_hit_l3.all",
+ "EventCode": "0x71",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 cache and hit in the L3 cache cache of all types.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3.l2_stream",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches of type L2Stream (fetch additional sequential lines into L2 cache).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3.l2_next_line",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches of type L2NextLine (fetch the next line into L2 cache).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3.l2_up_down",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches of type L2UpDown (fetch the next or previous line into L2 cache for all memory accesses).",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3.l2_burst",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches of type L2Burst (aggressively fetch additional sequential lines into L2 cache).",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3.l2_stride",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches of type L2Stride (fetch additional lines into L2 cache when each access is a constant distance from the previous).",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3.l1_stream",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches of type L1Stream (fetch additional sequential lines into L1 cache).",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3.l1_stride",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches of type L1Stride (fetch additional lines into L1 cache when each access is a constant distance from the previous).",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3.l1_region",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches of type L1Region (fetch additional lines into L1 cache when the data access for a given instruction tends to be followed by a consistent pattern of other accesses within a localized region).",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "l2_pf_miss_l2_l3.all",
+ "EventCode": "0x72",
+ "BriefDescription": "L2 prefetches accepted by the L2 pipeline which miss the L2 and the L3 caches of all types.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ic_cache_fill_l2",
+ "EventCode": "0x82",
+ "BriefDescription": "Instruction cache lines (64 bytes) fulfilled from the L2 cache."
+ },
+ {
+ "EventName": "ic_cache_fill_sys",
+ "EventCode": "0x83",
+ "BriefDescription": "Instruction cache lines (64 bytes) fulfilled from system memory or another cache."
+ },
+ {
+ "EventName": "ic_tag_hit_miss.instruction_cache_hit",
+ "EventCode": "0x18e",
+ "BriefDescription": "Instruction cache hits.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.instruction_cache_miss",
+ "EventCode": "0x18e",
+ "BriefDescription": "Instruction cache misses.",
+ "UMask": "0x18"
+ },
+ {
+ "EventName": "ic_tag_hit_miss.all_instruction_cache_accesses",
+ "EventCode": "0x18e",
+ "BriefDescription": "Instruction cache accesses of all types.",
+ "UMask": "0x1f"
+ },
+ {
+ "EventName": "op_cache_hit_miss.op_cache_hit",
+ "EventCode": "0x28f",
+ "BriefDescription": "Op cache hits.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "op_cache_hit_miss.op_cache_miss",
+ "EventCode": "0x28f",
+ "BriefDescription": "Op cache misses.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "op_cache_hit_miss.all_op_cache_accesses",
+ "EventCode": "0x28f",
+ "BriefDescription": "Op cache accesses of all types.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "l3_lookup_state.l3_miss",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 cache misses.",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_lookup_state.l3_hit",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 cache hits.",
+ "UMask": "0xfe",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_lookup_state.all_coherent_accesses_to_l3",
+ "EventCode": "0x04",
+ "BriefDescription": "L3 cache requests for all coherent accesses.",
+ "UMask": "0xff",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency.dram_near",
+ "EventCode": "0xac",
+ "BriefDescription": "Average sampled latency when data is sourced from DRAM in the same NUMA node.",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency.dram_far",
+ "EventCode": "0xac",
+ "BriefDescription": "Average sampled latency when data is sourced from DRAM in a different NUMA node.",
+ "UMask": "0x02",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency.near_cache",
+ "EventCode": "0xac",
+ "BriefDescription": "Average sampled latency when data is sourced from another CCX's cache when the address was in the same NUMA node.",
+ "UMask": "0x04",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency.far_cache",
+ "EventCode": "0xac",
+ "BriefDescription": "Average sampled latency when data is sourced from another CCX's cache when the address was in a different NUMA node.",
+ "UMask": "0x08",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency.ext_near",
+ "EventCode": "0xac",
+ "BriefDescription": "Average sampled latency when data is sourced from extension memory (CXL) in the same NUMA node.",
+ "UMask": "0x10",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency.ext_far",
+ "EventCode": "0xac",
+ "BriefDescription": "Average sampled latency when data is sourced from extension memory (CXL) in a different NUMA node.",
+ "UMask": "0x20",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency.all",
+ "EventCode": "0xac",
+ "BriefDescription": "Average sampled latency from all data sources.",
+ "UMask": "0x3f",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency_requests.dram_near",
+ "EventCode": "0xad",
+ "BriefDescription": "L3 cache fill requests sourced from DRAM in the same NUMA node.",
+ "UMask": "0x01",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency_requests.dram_far",
+ "EventCode": "0xad",
+ "BriefDescription": "L3 cache fill requests sourced from DRAM in a different NUMA node.",
+ "UMask": "0x02",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency_requests.near_cache",
+ "EventCode": "0xad",
+ "BriefDescription": "L3 cache fill requests sourced from another CCX's cache when the address was in the same NUMA node.",
+ "UMask": "0x04",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency_requests.far_cache",
+ "EventCode": "0xad",
+ "BriefDescription": "L3 cache fill requests sourced from another CCX's cache when the address was in a different NUMA node.",
+ "UMask": "0x08",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency_requests.ext_near",
+ "EventCode": "0xad",
+ "BriefDescription": "L3 cache fill requests sourced from extension memory (CXL) in the same NUMA node.",
+ "UMask": "0x10",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency_requests.ext_far",
+ "EventCode": "0xad",
+ "BriefDescription": "L3 cache fill requests sourced from extension memory (CXL) in a different NUMA node.",
+ "UMask": "0x20",
+ "Unit": "L3PMC"
+ },
+ {
+ "EventName": "l3_xi_sampled_latency_requests.all",
+ "EventCode": "0xad",
+ "BriefDescription": "L3 cache fill requests sourced from all data sources.",
+ "UMask": "0x3f",
+ "Unit": "L3PMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/core.json b/tools/perf/pmu-events/arch/x86/amdzen4/core.json
new file mode 100644
index 000000000000..a56a41828bd4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/core.json
@@ -0,0 +1,122 @@
+[
+ {
+ "EventName": "ls_locks.bus_lock",
+ "EventCode": "0x25",
+ "BriefDescription": "Retired Lock instructions which caused a bus lock.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_ret_cl_flush",
+ "EventCode": "0x26",
+ "BriefDescription": "Retired CLFLUSH instructions."
+ },
+ {
+ "EventName": "ls_ret_cpuid",
+ "EventCode": "0x27",
+ "BriefDescription": "Retired CPUID instructions."
+ },
+ {
+ "EventName": "ls_smi_rx",
+ "EventCode": "0x2b",
+ "BriefDescription": "SMIs received."
+ },
+ {
+ "EventName": "ls_int_taken",
+ "EventCode": "0x2c",
+ "BriefDescription": "Interrupts taken."
+ },
+ {
+ "EventName": "ls_not_halted_cyc",
+ "EventCode": "0x76",
+ "BriefDescription": "Core cycles not in halt."
+ },
+ {
+ "EventName": "ex_ret_instr",
+ "EventCode": "0xc0",
+ "BriefDescription": "Retired instructions."
+ },
+ {
+ "EventName": "ex_ret_ops",
+ "EventCode": "0xc1",
+ "BriefDescription": "Retired macro-ops."
+ },
+ {
+ "EventName": "ex_div_busy",
+ "EventCode": "0xd3",
+ "BriefDescription": "Number of cycles the divider is busy."
+ },
+ {
+ "EventName": "ex_div_count",
+ "EventCode": "0xd4",
+ "BriefDescription": "Divide ops executed."
+ },
+ {
+ "EventName": "ex_no_retire.empty",
+ "EventCode": "0xd6",
+ "BriefDescription": "Cycles with no retire due to the lack of valid ops in the retire queue (may be caused by front-end bottlenecks or pipeline redirects).",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ex_no_retire.not_complete",
+ "EventCode": "0xd6",
+ "BriefDescription": "Cycles with no retire while the oldest op is waiting to be executed.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ex_no_retire.other",
+ "EventCode": "0xd6",
+ "BriefDescription": "Cycles with no retire caused by other reasons (retire breaks, traps, faults, etc.).",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ex_no_retire.thread_not_selected",
+ "EventCode": "0xd6",
+ "BriefDescription": "Cycles with no retire because thread arbitration did not select the thread.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ex_no_retire.load_not_complete",
+ "EventCode": "0xd6",
+ "BriefDescription": "Cycles with no retire while the oldest op is waiting for load data.",
+ "UMask": "0xa2"
+ },
+ {
+ "EventName": "ex_no_retire.all",
+ "EventCode": "0xd6",
+ "BriefDescription": "Cycles with no retire for any reason.",
+ "UMask": "0x1b"
+ },
+ {
+ "EventName": "ls_not_halted_p0_cyc.p0_freq_cyc",
+ "EventCode": "0x120",
+ "BriefDescription": "Reference cycles (P0 frequency) not in halt .",
+ "UMask": "0x1"
+ },
+ {
+ "EventName": "ex_ret_ucode_instr",
+ "EventCode": "0x1c1",
+ "BriefDescription": "Retired microcoded instructions."
+ },
+ {
+ "EventName": "ex_ret_ucode_ops",
+ "EventCode": "0x1c2",
+ "BriefDescription": "Retired microcode ops."
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Ops tagged by IBS.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ex_tagged_ibs_ops.ibs_tagged_ops_ret",
+ "EventCode": "0x1cf",
+ "BriefDescription": "Ops tagged by IBS that retired.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ex_ret_fused_instr",
+ "EventCode": "0x1d0",
+ "BriefDescription": "Retired fused instructions."
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/data-fabric.json b/tools/perf/pmu-events/arch/x86/amdzen4/data-fabric.json
new file mode 100644
index 000000000000..cf8f13075e62
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/data-fabric.json
@@ -0,0 +1,1090 @@
+[
+ {
+ "EventName": "local_processor_read_data_beats_cs0",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 0.",
+ "EventCode": "0x1f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs1",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 1.",
+ "EventCode": "0x5f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs2",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 2.",
+ "EventCode": "0x9f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs3",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 3.",
+ "EventCode": "0xdf",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs4",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 4.",
+ "EventCode": "0x11f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs5",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 5.",
+ "EventCode": "0x15f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs6",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 6.",
+ "EventCode": "0x19f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs7",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 7.",
+ "EventCode": "0x1df",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs8",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 8.",
+ "EventCode": "0x21f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs9",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 9.",
+ "EventCode": "0x25f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs10",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 10.",
+ "EventCode": "0x29f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_read_data_beats_cs11",
+ "PublicDescription": "Read data beats (64 bytes) for local processor at Coherent Station (CS) 11.",
+ "EventCode": "0x2df",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs0",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 0.",
+ "EventCode": "0x1f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs1",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 1.",
+ "EventCode": "0x5f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs2",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 2.",
+ "EventCode": "0x9f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs3",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 3.",
+ "EventCode": "0xdf",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs4",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 4.",
+ "EventCode": "0x11f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs5",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 5.",
+ "EventCode": "0x15f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs6",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 6.",
+ "EventCode": "0x19f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs7",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 7.",
+ "EventCode": "0x1df",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs8",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 8.",
+ "EventCode": "0x21f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs9",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 9.",
+ "EventCode": "0x25f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs10",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 10.",
+ "EventCode": "0x29f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_processor_write_data_beats_cs11",
+ "PublicDescription": "Write data beats (64 bytes) for local processor at Coherent Station (CS) 11.",
+ "EventCode": "0x2df",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs0",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 0.",
+ "EventCode": "0x1f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs1",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 1.",
+ "EventCode": "0x5f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs2",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 2.",
+ "EventCode": "0x9f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs3",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 3.",
+ "EventCode": "0xdf",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs4",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 4.",
+ "EventCode": "0x11f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs5",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 5.",
+ "EventCode": "0x15f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs6",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 6.",
+ "EventCode": "0x19f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs7",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 7.",
+ "EventCode": "0x1df",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs8",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 8.",
+ "EventCode": "0x21f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs9",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 9.",
+ "EventCode": "0x25f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs10",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 10.",
+ "EventCode": "0x29f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_read_data_beats_cs11",
+ "PublicDescription": "Read data beats (64 bytes) for remote processor at Coherent Station (CS) 11.",
+ "EventCode": "0x2df",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs0",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 0.",
+ "EventCode": "0x1f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs1",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 1.",
+ "EventCode": "0x5f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs2",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 2.",
+ "EventCode": "0x9f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs3",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 3.",
+ "EventCode": "0xdf",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs4",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 4.",
+ "EventCode": "0x11f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs5",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 5.",
+ "EventCode": "0x15f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs6",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 6.",
+ "EventCode": "0x19f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs7",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 7.",
+ "EventCode": "0x1df",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs8",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 8.",
+ "EventCode": "0x21f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs9",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 9.",
+ "EventCode": "0x25f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs10",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 10.",
+ "EventCode": "0x29f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_processor_write_data_beats_cs11",
+ "PublicDescription": "Write data beats (64 bytes) for remote processor at Coherent Station (CS) 11.",
+ "EventCode": "0x2df",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_upstream_read_beats_iom0",
+ "PublicDescription": "Read data beats (64 bytes) for local socket upstream DMA at IO Moderator (IOM) 0.",
+ "EventCode": "0x81f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_upstream_read_beats_iom1",
+ "PublicDescription": "Read data beats (64 bytes) for local socket upstream DMA at IO Moderator (IOM) 1.",
+ "EventCode": "0x85f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_upstream_read_beats_iom2",
+ "PublicDescription": "Read data beats (64 bytes) for local socket upstream DMA at IO Moderator (IOM) 2.",
+ "EventCode": "0x89f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_upstream_read_beats_iom3",
+ "PublicDescription": "Read data beats (64 bytes) for local socket upstream DMA at IO Moderator (IOM) 3.",
+ "EventCode": "0x8df",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_upstream_write_beats_iom0",
+ "PublicDescription": "Write data beats (64 bytes) for local socket upstream DMA at IO Moderator (IOM) 0.",
+ "EventCode": "0x81f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_upstream_write_beats_iom1",
+ "PublicDescription": "Write data beats (64 bytes) for local socket upstream DMA at IO Moderator (IOM) 1.",
+ "EventCode": "0x85f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_upstream_write_beats_iom2",
+ "PublicDescription": "Write data beats (64 bytes) for local socket upstream DMA at IO Moderator (IOM) 2.",
+ "EventCode": "0x89f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_upstream_write_beats_iom3",
+ "PublicDescription": "Write data beats (64 bytes) for local socket upstream DMA at IO Moderator (IOM) 3.",
+ "EventCode": "0x8df",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_upstream_read_beats_iom0",
+ "PublicDescription": "Read data beats (64 bytes) for remote socket upstream DMA at IO Moderator (IOM) 0.",
+ "EventCode": "0x81f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_upstream_read_beats_iom1",
+ "PublicDescription": "Read data beats (64 bytes) for remote socket upstream DMA at IO Moderator (IOM) 1.",
+ "EventCode": "0x85f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_upstream_read_beats_iom2",
+ "PublicDescription": "Read data beats (64 bytes) for remote socket upstream DMA at IO Moderator (IOM) 2.",
+ "EventCode": "0x89f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_upstream_read_beats_iom3",
+ "PublicDescription": "Read data beats (64 bytes) for remote socket upstream DMA at IO Moderator (IOM) 3.",
+ "EventCode": "0x8df",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_upstream_write_beats_iom0",
+ "PublicDescription": "Write data beats (64 bytes) for remote socket upstream DMA at IO Moderator (IOM) 0.",
+ "EventCode": "0x81f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_upstream_write_beats_iom1",
+ "PublicDescription": "Write data beats (64 bytes) for remote socket upstream DMA at IO Moderator (IOM) 1.",
+ "EventCode": "0x85f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_upstream_write_beats_iom2",
+ "PublicDescription": "Write data beats (64 bytes) for remote socket upstream DMA at IO Moderator (IOM) 2.",
+ "EventCode": "0x89f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_upstream_write_beats_iom3",
+ "PublicDescription": "Write data beats (64 bytes) for remote socket upstream DMA at IO Moderator (IOM) 3.",
+ "EventCode": "0x8df",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_inbound_data_beats_ccm0",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for local socket inbound data to CPU Moderator (CCM) 0.",
+ "EventCode": "0x41e",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_inbound_data_beats_ccm1",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for local socket inbound data to CPU Moderator (CCM) 1.",
+ "EventCode": "0x45e",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_inbound_data_beats_ccm2",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for local socket inbound data to CPU Moderator (CCM) 2.",
+ "EventCode": "0x49e",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_inbound_data_beats_ccm3",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for local socket inbound data to CPU Moderator (CCM) 3.",
+ "EventCode": "0x4de",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_inbound_data_beats_ccm4",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for local socket inbound data to CPU Moderator (CCM) 4.",
+ "EventCode": "0x51e",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_inbound_data_beats_ccm5",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for local socket inbound data to CPU Moderator (CCM) 5.",
+ "EventCode": "0x55e",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_inbound_data_beats_ccm6",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for local socket inbound data to CPU Moderator (CCM) 6.",
+ "EventCode": "0x59e",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_inbound_data_beats_ccm7",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for local socket inbound data to CPU Moderator (CCM) 7.",
+ "EventCode": "0x5de",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_inbound_data_beats_ccm0",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for local socket inbound data to CPU Moderator (CCM) 0.",
+ "EventCode": "0x41f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_inbound_data_beats_ccm1",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for local socket inbound data to CPU Moderator (CCM) 1.",
+ "EventCode": "0x45f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_inbound_data_beats_ccm2",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for local socket inbound data to CPU Moderator (CCM) 2.",
+ "EventCode": "0x49f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_inbound_data_beats_ccm3",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for local socket inbound data to CPU Moderator (CCM) 3.",
+ "EventCode": "0x4df",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_inbound_data_beats_ccm4",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for local socket inbound data to CPU Moderator (CCM) 4.",
+ "EventCode": "0x51f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_inbound_data_beats_ccm5",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for local socket inbound data to CPU Moderator (CCM) 5.",
+ "EventCode": "0x55f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_inbound_data_beats_ccm6",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for local socket inbound data to CPU Moderator (CCM) 6.",
+ "EventCode": "0x59f",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_inbound_data_beats_ccm7",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for local socket inbound data to CPU Moderator (CCM) 7.",
+ "EventCode": "0x5df",
+ "UMask": "0x7fe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_outbound_data_beats_ccm0",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for local socket outbound data from CPU Moderator (CCM) 0.",
+ "EventCode": "0x41e",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_outbound_data_beats_ccm1",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for local socket outbound data from CPU Moderator (CCM) 1.",
+ "EventCode": "0x45e",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_outbound_data_beats_ccm2",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for local socket outbound data from CPU Moderator (CCM) 2.",
+ "EventCode": "0x49e",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_outbound_data_beats_ccm3",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for local socket outbound data from CPU Moderator (CCM) 3.",
+ "EventCode": "0x4de",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_outbound_data_beats_ccm4",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for local socket outbound data from CPU Moderator (CCM) 4.",
+ "EventCode": "0x51e",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_outbound_data_beats_ccm5",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for local socket outbound data from CPU Moderator (CCM) 5.",
+ "EventCode": "0x55e",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_outbound_data_beats_ccm6",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for local socket outbound data from CPU Moderator (CCM) 6.",
+ "EventCode": "0x59e",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf0_outbound_data_beats_ccm7",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for local socket outbound data from CPU Moderator (CCM) 7.",
+ "EventCode": "0x5de",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_outbound_data_beats_ccm0",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for local socket outbound data from CPU Moderator (CCM) 0.",
+ "EventCode": "0x41f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_outbound_data_beats_ccm1",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for local socket outbound data from CPU Moderator (CCM) 1.",
+ "EventCode": "0x45f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_outbound_data_beats_ccm2",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for local socket outbound data from CPU Moderator (CCM) 2.",
+ "EventCode": "0x49f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_outbound_data_beats_ccm3",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for local socket outbound data from CPU Moderator (CCM) 3.",
+ "EventCode": "0x4df",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_outbound_data_beats_ccm4",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for local socket outbound data from CPU Moderator (CCM) 4.",
+ "EventCode": "0x51f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_outbound_data_beats_ccm5",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for local socket outbound data from CPU Moderator (CCM) 5.",
+ "EventCode": "0x55f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_outbound_data_beats_ccm6",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for local socket outbound data from CPU Moderator (CCM) 6.",
+ "EventCode": "0x59f",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_inf1_outbound_data_beats_ccm7",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for local socket outbound data from CPU Moderator (CCM) 7.",
+ "EventCode": "0x5df",
+ "UMask": "0x7ff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_inbound_data_beats_ccm0",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for remote socket inbound data to CPU Moderator (CCM) 0.",
+ "EventCode": "0x41e",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_inbound_data_beats_ccm1",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for remote socket inbound data to CPU Moderator (CCM) 1.",
+ "EventCode": "0x45e",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_inbound_data_beats_ccm2",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for remote socket inbound data to CPU Moderator (CCM) 2.",
+ "EventCode": "0x49e",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_inbound_data_beats_ccm3",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for remote socket inbound data to CPU Moderator (CCM) 3.",
+ "EventCode": "0x4de",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_inbound_data_beats_ccm4",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for remote socket inbound data to CPU Moderator (CCM) 4.",
+ "EventCode": "0x51e",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_inbound_data_beats_ccm5",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for remote socket inbound data to CPU Moderator (CCM) 5.",
+ "EventCode": "0x55e",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_inbound_data_beats_ccm6",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for remote socket inbound data to CPU Moderator (CCM) 6.",
+ "EventCode": "0x59e",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_inbound_data_beats_ccm7",
+ "PublicDescription": "Data beats (32 bytes) at interface 0 for remote socket inbound data to CPU Moderator (CCM) 7.",
+ "EventCode": "0x5de",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_inbound_data_beats_ccm0",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for remote socket inbound data to CPU Moderator (CCM) 0.",
+ "EventCode": "0x41f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_inbound_data_beats_ccm1",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for remote socket inbound data to CPU Moderator (CCM) 1.",
+ "EventCode": "0x45f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_inbound_data_beats_ccm2",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for remote socket inbound data to CPU Moderator (CCM) 2.",
+ "EventCode": "0x49f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_inbound_data_beats_ccm3",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for remote socket inbound data to CPU Moderator (CCM) 3.",
+ "EventCode": "0x4df",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_inbound_data_beats_ccm4",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for remote socket inbound data to CPU Moderator (CCM) 4.",
+ "EventCode": "0x51f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_inbound_data_beats_ccm5",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for remote socket inbound data to CPU Moderator (CCM) 5.",
+ "EventCode": "0x55f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_inbound_data_beats_ccm6",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for remote socket inbound data to CPU Moderator (CCM) 6.",
+ "EventCode": "0x59f",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_inbound_data_beats_ccm7",
+ "PublicDescription": "Data beats (32 bytes) at interface 1 for remote socket inbound data to CPU Moderator (CCM) 7.",
+ "EventCode": "0x5df",
+ "UMask": "0xbfe",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_outbound_data_beats_ccm0",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for remote socket outbound data from CPU Moderator (CCM) 0.",
+ "EventCode": "0x41e",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_outbound_data_beats_ccm1",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for remote socket outbound data from CPU Moderator (CCM) 1.",
+ "EventCode": "0x45e",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_outbound_data_beats_ccm2",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for remote socket outbound data from CPU Moderator (CCM) 2.",
+ "EventCode": "0x49e",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_outbound_data_beats_ccm3",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for remote socket outbound data from CPU Moderator (CCM) 3.",
+ "EventCode": "0x4de",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_outbound_data_beats_ccm4",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for remote socket outbound data from CPU Moderator (CCM) 4.",
+ "EventCode": "0x51e",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_outbound_data_beats_ccm5",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for remote socket outbound data from CPU Moderator (CCM) 5.",
+ "EventCode": "0x55e",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_outbound_data_beats_ccm6",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for remote socket outbound data from CPU Moderator (CCM) 6.",
+ "EventCode": "0x59e",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf0_outbound_data_beats_ccm7",
+ "PublicDescription": "Data beats (64 bytes) at interface 0 for remote socket outbound data from CPU Moderator (CCM) 7.",
+ "EventCode": "0x5de",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_outbound_data_beats_ccm0",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for remote socket outbound data from CPU Moderator (CCM) 0.",
+ "EventCode": "0x41f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_outbound_data_beats_ccm1",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for remote socket outbound data from CPU Moderator (CCM) 1.",
+ "EventCode": "0x45f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_outbound_data_beats_ccm2",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for remote socket outbound data from CPU Moderator (CCM) 2.",
+ "EventCode": "0x49f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_outbound_data_beats_ccm3",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for remote socket outbound data from CPU Moderator (CCM) 3.",
+ "EventCode": "0x4df",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_outbound_data_beats_ccm4",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for remote socket outbound data from CPU Moderator (CCM) 4.",
+ "EventCode": "0x51f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_outbound_data_beats_ccm5",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for remote socket outbound data from CPU Moderator (CCM) 5.",
+ "EventCode": "0x55f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_outbound_data_beats_ccm6",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for remote socket outbound data from CPU Moderator (CCM) 6.",
+ "EventCode": "0x59f",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "remote_socket_inf1_outbound_data_beats_ccm7",
+ "PublicDescription": "Data beats (64 bytes) at interface 1 for remote socket outbound data from CPU Moderator (CCM) 7.",
+ "EventCode": "0x5df",
+ "UMask": "0xbff",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_outbound_data_beats_link0",
+ "PublicDescription": "Data beats (64 bytes) for local socket outbound data from inter-socket xGMI link 0.",
+ "EventCode": "0xb5f",
+ "UMask": "0xf3e",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_outbound_data_beats_link1",
+ "PublicDescription": "Data beats (64 bytes) for local socket outbound data from inter-socket xGMI link 1.",
+ "EventCode": "0xb9f",
+ "UMask": "0xf3e",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_outbound_data_beats_link2",
+ "PublicDescription": "Data beats (64 bytes) for local socket outbound data from inter-socket xGMI link 2.",
+ "EventCode": "0xbdf",
+ "UMask": "0xf3e",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_outbound_data_beats_link3",
+ "PublicDescription": "Data beats (64 bytes) for local socket outbound data from inter-socket xGMI link 3.",
+ "EventCode": "0xc1f",
+ "UMask": "0xf3e",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_outbound_data_beats_link4",
+ "PublicDescription": "Data beats (64 bytes) for local socket outbound data from inter-socket xGMI link 4.",
+ "EventCode": "0xc5f",
+ "UMask": "0xf3e",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_outbound_data_beats_link5",
+ "PublicDescription": "Data beats (64 bytes) for local socket outbound data from inter-socket xGMI link 5.",
+ "EventCode": "0xc9f",
+ "UMask": "0xf3e",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_outbound_data_beats_link6",
+ "PublicDescription": "Data beats (64 bytes) for local socket outbound data from inter-socket xGMI link 6.",
+ "EventCode": "0xcdf",
+ "UMask": "0xf3e",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ },
+ {
+ "EventName": "local_socket_outbound_data_beats_link7",
+ "PublicDescription": "Data beats (64 bytes) for local socket outbound data from inter-socket xGMI link 7.",
+ "EventCode": "0xd1f",
+ "UMask": "0xf3e",
+ "PerPkg": "1",
+ "Unit": "DFPMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/floating-point.json b/tools/perf/pmu-events/arch/x86/amdzen4/floating-point.json
new file mode 100644
index 000000000000..cd7328fb7998
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/floating-point.json
@@ -0,0 +1,818 @@
+[
+ {
+ "EventName": "fp_ret_x87_fp_ops.add_sub_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Retired x87 floating-point add and subtract ops.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_ret_x87_fp_ops.mul_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Retired x87 floating-point multiply ops.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_ret_x87_fp_ops.div_sqrt_ops",
+ "EventCode": "0x02",
+ "BriefDescription": "Retired x87 floating-point divide and square root ops.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_ret_x87_fp_ops.all",
+ "EventCode": "0x02",
+ "BriefDescription": "Retired x87 floating-point ops of all types.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.add_sub_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Retired SSE and AVX floating-point add and subtract ops.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mult_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Retired SSE and AVX floating-point multiply ops.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.div_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Retired SSE and AVX floating-point divide and square root ops.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.mac_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Retired SSE and AVX floating-point multiply-accumulate ops (each operation is counted as 2 ops).",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.bfloat_mac_flops",
+ "EventCode": "0x03",
+ "BriefDescription": "Retired SSE and AVX floating-point bfloat multiply-accumulate ops (each operation is counted as 2 ops).",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "fp_ret_sse_avx_ops.all",
+ "EventCode": "0x03",
+ "BriefDescription": "Retired SSE and AVX floating-point ops of all types.",
+ "UMask": "0x1f"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "Retired x87 control word mispredict traps due to mispredictions in RC or PC, or changes in exception mask bits.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.x87_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "Retired x87 bottom-executing ops. Bottom-executing ops wait for all older ops to retire before executing.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_ctrl_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "Retired SSE and AVX control word mispredict traps.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.sse_bot_ret",
+ "EventCode": "0x05",
+ "BriefDescription": "Retired SSE and AVX bottom-executing ops. Bottom-executing ops wait for all older ops to retire before executing.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_retired_ser_ops.all",
+ "EventCode": "0x05",
+ "BriefDescription": "Retired SSE and AVX serializing ops of all types.",
+ "UMask": "0x0f"
+ },
+ {
+ "EventName": "fp_ops_retired_by_width.x87_uops_retired",
+ "EventCode": "0x08",
+ "BriefDescription": "Retired x87 floating-point ops.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_ops_retired_by_width.mmx_uops_retired",
+ "EventCode": "0x08",
+ "BriefDescription": "Retired MMX floating-point ops.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_ops_retired_by_width.scalar_uops_retired",
+ "EventCode": "0x08",
+ "BriefDescription": "Retired scalar floating-point ops.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_ops_retired_by_width.pack_128_uops_retired",
+ "EventCode": "0x08",
+ "BriefDescription": "Retired packed 128-bit floating-point ops.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_ops_retired_by_width.pack_256_uops_retired",
+ "EventCode": "0x08",
+ "BriefDescription": "Retired packed 256-bit floating-point ops.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "fp_ops_retired_by_width.pack_512_uops_retired",
+ "EventCode": "0x08",
+ "BriefDescription": "Retired packed 512-bit floating-point ops.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "fp_ops_retired_by_width.all",
+ "EventCode": "0x08",
+ "BriefDescription": "Retired floating-point ops of all widths.",
+ "UMask": "0x3f"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_add",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point add ops.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_sub",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point subtract ops.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_mul",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point multiply ops.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_mac",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point multiply-accumulate ops.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_div",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point divide ops.",
+ "UMask": "0x05"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_sqrt",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point square root ops.",
+ "UMask": "0x06"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_cmp",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point compare ops.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_cvt",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point convert ops.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_blend",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point blend ops.",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_other",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point ops of other types.",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.scalar_all",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired scalar floating-point ops of all types.",
+ "UMask": "0x0f"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_add",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point add ops.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_sub",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point subtract ops.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_mul",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point multiply ops.",
+ "UMask": "0x30"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_mac",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point multiply-accumulate ops.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_div",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point divide ops.",
+ "UMask": "0x50"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_sqrt",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point square root ops.",
+ "UMask": "0x60"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_cmp",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point compare ops.",
+ "UMask": "0x70"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_cvt",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point convert ops.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_blend",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point blend ops.",
+ "UMask": "0x90"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_shuffle",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).",
+ "UMask": "0xb0"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_logical",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point logical ops.",
+ "UMask": "0xd0"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_other",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point ops of other types.",
+ "UMask": "0xe0"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.vector_all",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired vector floating-point ops of all types.",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "fp_ops_retired_by_type.all",
+ "EventCode": "0x0a",
+ "BriefDescription": "Retired floating-point ops of all types.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_add",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer add.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_sub",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer subtract ops.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_mul",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer multiply ops.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_mac",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer multiply-accumulate ops.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_cmp",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer compare ops.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_shift",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer shift ops.",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_mov",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer MOV ops.",
+ "UMask": "0x0a"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_shuffle",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).",
+ "UMask": "0x0b"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_pack",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer pack ops.",
+ "UMask": "0x0c"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_logical",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer logical ops.",
+ "UMask": "0x0d"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_other",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer multiply ops of other types.",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.mmx_all",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired MMX integer ops of all types.",
+ "UMask": "0x0f"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_add",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer add ops.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_sub",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer subtract ops.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_mul",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer multiply ops.",
+ "UMask": "0x30"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_mac",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer multiply-accumulate ops.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_aes",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer AES ops.",
+ "UMask": "0x50"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_sha",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer SHA ops.",
+ "UMask": "0x60"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_cmp",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer compare ops.",
+ "UMask": "0x70"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_clm",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer CLM ops.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_shift",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer shift ops.",
+ "UMask": "0x90"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_mov",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer MOV ops.",
+ "UMask": "0xa0"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_shuffle",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).",
+ "UMask": "0xb0"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_pack",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer pack ops.",
+ "UMask": "0xc0"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_logical",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer logical ops.",
+ "UMask": "0xd0"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_other",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer ops of other types.",
+ "UMask": "0xe0"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.sse_avx_all",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE and AVX integer ops of all types.",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "sse_avx_ops_retired.all",
+ "EventCode": "0x0b",
+ "BriefDescription": "Retired SSE, AVX and MMX integer ops of all types.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_add",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point add ops.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_sub",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point subtract ops.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_mul",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point multiply ops.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_mac",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point multiply-accumulate ops.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_div",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point divide ops.",
+ "UMask": "0x05"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_sqrt",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point square root ops.",
+ "UMask": "0x06"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_cmp",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point compare ops.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_cvt",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point convert ops.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_blend",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point blend ops.",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_shuffle",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).",
+ "UMask": "0x0b"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_logical",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point logical ops.",
+ "UMask": "0x0d"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_other",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point ops of other types.",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp128_all",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 128-bit packed floating-point ops of all types.",
+ "UMask": "0x0f"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_add",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point add ops.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_sub",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point subtract ops.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_mul",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point multiply ops.",
+ "UMask": "0x30"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_mac",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point multiply-accumulate ops.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_div",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point divide ops.",
+ "UMask": "0x50"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_sqrt",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point square root ops.",
+ "UMask": "0x60"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_cmp",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point compare ops.",
+ "UMask": "0x70"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_cvt",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point convert ops.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_blend",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point blend ops.",
+ "UMask": "0x90"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_shuffle",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).",
+ "UMask": "0xb0"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_logical",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point logical ops.",
+ "UMask": "0xd0"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_other",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point ops of other types.",
+ "UMask": "0xe0"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.fp256_all",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired 256-bit packed floating-point ops of all types.",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "fp_pack_ops_retired.all",
+ "EventCode": "0x0c",
+ "BriefDescription": "Retired packed floating-point ops of all types.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_add",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer add ops.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_sub",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer subtract ops.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_mul",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer multiply ops.",
+ "UMask": "0x03"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_mac",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer multiply-accumulate ops.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_aes",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer AES ops.",
+ "UMask": "0x05"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_sha",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer SHA ops.",
+ "UMask": "0x06"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_cmp",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer compare ops.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_clm",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer CLM ops.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_shift",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer shift ops.",
+ "UMask": "0x09"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_mov",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer MOV ops.",
+ "UMask": "0x0a"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_shuffle",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).",
+ "UMask": "0x0b"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_pack",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer pack ops.",
+ "UMask": "0x0c"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_logical",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer logical ops.",
+ "UMask": "0x0d"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_other",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer ops of other types.",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "packed_int_op_type.int128_all",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 128-bit packed integer ops of all types.",
+ "UMask": "0x0f"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_add",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer add ops.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_sub",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer subtract ops.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_mul",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer multiply ops.",
+ "UMask": "0x30"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_mac",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer multiply-accumulate ops.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_cmp",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer compare ops.",
+ "UMask": "0x70"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_shift",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer shift ops.",
+ "UMask": "0x90"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_mov",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer MOV ops.",
+ "UMask": "0xa0"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_shuffle",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer shuffle ops (may include instructions not necessarily thought of as including shuffles e.g. horizontal add, dot product, and certain MOV instructions).",
+ "UMask": "0xb0"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_pack",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer pack ops.",
+ "UMask": "0xc0"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_logical",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer logical ops.",
+ "UMask": "0xd0"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_other",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer ops of other types.",
+ "UMask": "0xe0"
+ },
+ {
+ "EventName": "packed_int_op_type.int256_all",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired 256-bit packed integer ops of all types.",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "packed_int_op_type.all",
+ "EventCode": "0x0d",
+ "BriefDescription": "Retired packed integer ops of all types.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "fp_disp_faults.x87_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating-point dispatch faults for x87 fills.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "fp_disp_faults.xmm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating-point dispatch faults for XMM fills.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_fill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating-point dispatch faults for YMM fills.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "fp_disp_faults.ymm_spill_fault",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating-point dispatch faults for YMM spills.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "fp_disp_faults.sse_avx_all",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating-point dispatch faults of all types for SSE and AVX ops.",
+ "UMask": "0x0e"
+ },
+ {
+ "EventName": "fp_disp_faults.all",
+ "EventCode": "0x0e",
+ "BriefDescription": "Floating-point dispatch faults of all types.",
+ "UMask": "0x0f"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/memory.json b/tools/perf/pmu-events/arch/x86/amdzen4/memory.json
new file mode 100644
index 000000000000..cb1517f8f399
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/memory.json
@@ -0,0 +1,174 @@
+[
+ {
+ "EventName": "ls_bad_status2.stli_other",
+ "EventCode": "0x24",
+ "BriefDescription": "Store-to-load conflicts (load unable to complete due to a non-forwardable conflict with an older store).",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_dispatch.ld_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Number of memory load operations dispatched to the load-store unit.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_dispatch.store_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Number of memory store operations dispatched to the load-store unit.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_dispatch.ld_st_dispatch",
+ "EventCode": "0x29",
+ "BriefDescription": "Number of memory load-store operations dispatched to the load-store unit.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_stlf",
+ "EventCode": "0x35",
+ "BriefDescription": "Store-to-load-forward (STLF) hits."
+ },
+ {
+ "EventName": "ls_st_commit_cancel2.st_commit_cancel_wcb_full",
+ "EventCode": "0x37",
+ "BriefDescription": "Non-cacheable store commits cancelled due to the non-cacheable commit buffer being full.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 4k pages.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses with L2 DTLB hits for coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 2M pages.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_hit",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses with L2 DTLB hits for 1G pages.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_4k_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 4k pages.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_coalesced_page_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_2m_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 2M pages.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.tlb_reload_1g_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for 1G pages.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.all_l2_miss",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses with L2 DTLB misses (page-table walks are requested) for all page sizes.",
+ "UMask": "0xf0"
+ },
+ {
+ "EventName": "ls_l1_d_tlb_miss.all",
+ "EventCode": "0x45",
+ "BriefDescription": "L1 DTLB misses for all page sizes.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "ls_misal_loads.ma64",
+ "EventCode": "0x47",
+ "BriefDescription": "64B misaligned (cacheline crossing) loads.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "ls_misal_loads.ma4k",
+ "EventCode": "0x47",
+ "BriefDescription": "4kB misaligned (page crossing) loads.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "ls_tlb_flush.all",
+ "EventCode": "0x78",
+ "BriefDescription": "All TLB Flushes.",
+ "UMask": "0xff"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_hit",
+ "EventCode": "0x84",
+ "BriefDescription": "Instruction fetches that miss in the L1 ITLB but hit in the L2 ITLB."
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if4k",
+ "EventCode": "0x85",
+ "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 4k pages.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if2m",
+ "EventCode": "0x85",
+ "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 2M pages.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.if1g",
+ "EventCode": "0x85",
+ "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for 1G pages.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.coalesced_4k",
+ "EventCode": "0x85",
+ "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "bp_l1_tlb_miss_l2_tlb_miss.all",
+ "EventCode": "0x85",
+ "BriefDescription": "Instruction fetches that miss in both the L1 and L2 ITLBs (page-table walks are requested) for all page sizes.",
+ "UMask": "0x0f"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if4k",
+ "EventCode": "0x94",
+ "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 4k or coalesced pages. A coalesced page is a 16k page created from four adjacent 4k pages.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if2m",
+ "EventCode": "0x94",
+ "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 2M pages.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.if1g",
+ "EventCode": "0x94",
+ "BriefDescription": "Instruction fetches that hit in the L1 ITLB for 1G pages.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "bp_l1_tlb_fetch_hit.all",
+ "EventCode": "0x94",
+ "BriefDescription": "Instruction fetches that hit in the L1 ITLB for all page sizes.",
+ "UMask": "0x07"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/other.json b/tools/perf/pmu-events/arch/x86/amdzen4/other.json
new file mode 100644
index 000000000000..a02a9c807289
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/other.json
@@ -0,0 +1,138 @@
+[
+ {
+ "EventName": "resyncs_or_nc_redirects",
+ "EventCode": "0x96",
+ "BriefDescription": "Pipeline restarts not caused by branch mispredicts."
+ },
+ {
+ "EventName": "de_op_queue_empty",
+ "EventCode": "0xa9",
+ "BriefDescription": "Cycles when the op queue is empty. Such cycles indicate that the front-end is not delivering instructions fast enough."
+ },
+ {
+ "EventName": "de_src_op_disp.decoder",
+ "EventCode": "0xaa",
+ "BriefDescription": "Ops fetched from instruction cache and dispatched.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "de_src_op_disp.op_cache",
+ "EventCode": "0xaa",
+ "BriefDescription": "Ops fetched from op cache and dispatched.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "de_src_op_disp.loop_buffer",
+ "EventCode": "0xaa",
+ "BriefDescription": "Ops dispatched from loop buffer.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_src_op_disp.all",
+ "EventCode": "0xaa",
+ "BriefDescription": "Ops dispatched from any source.",
+ "UMask": "0x07"
+ },
+ {
+ "EventName": "de_dis_ops_from_decoder.any_fp_dispatch",
+ "EventCode": "0xab",
+ "BriefDescription": "Number of ops dispatched to the floating-point unit.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_ops_from_decoder.disp_op_type.any_integer_dispatch",
+ "EventCode": "0xab",
+ "BriefDescription": "Number of ops dispatched to the integer execution unit.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.int_phy_reg_file_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Number of cycles dispatch is stalled for integer physical register file tokens.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.load_queue_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Number of cycles dispatch is stalled for Load queue token.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.store_queue_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Number of cycles dispatch is stalled for store queue tokens.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.taken_brnch_buffer_rsrc",
+ "EventCode": "0xae",
+ "BriefDescription": "Number of cycles dispatch is stalled for taken branch buffer tokens.",
+ "UMask": "0x10"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_reg_file_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Number of cycles dispatch is stalled for floating-point register file tokens.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_sch_rsrc_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Number of cycles dispatch is stalled for floating-point scheduler tokens.",
+ "UMask": "0x40"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls1.fp_flush_recovery_stall",
+ "EventCode": "0xae",
+ "BriefDescription": "Number of cycles dispatch is stalled for floating-point flush recovery.",
+ "UMask": "0x80"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch0_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Number of cycles dispatch is stalled for integer scheduler queue 0 tokens.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch1_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Number of cycles dispatch is stalled for integer scheduler queue 1 tokens.",
+ "UMask": "0x02"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch2_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Number of cycles dispatch is stalled for integer scheduler queue 2 tokens.",
+ "UMask": "0x04"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.int_sch3_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Number of cycles dispatch is stalled for integer scheduler queue 3 tokens.",
+ "UMask": "0x08"
+ },
+ {
+ "EventName": "de_dis_dispatch_token_stalls2.retire_token_stall",
+ "EventCode": "0xaf",
+ "BriefDescription": "Number of cycles dispatch is stalled for retire queue tokens.",
+ "UMask": "0x20"
+ },
+ {
+ "EventName": "de_no_dispatch_per_slot.no_ops_from_frontend",
+ "EventCode": "0x1a0",
+ "BriefDescription": "In each cycle counts dispatch slots left empty because the front-end did not supply ops.",
+ "UMask": "0x01"
+ },
+ {
+ "EventName": "de_no_dispatch_per_slot.backend_stalls",
+ "EventCode": "0x1a0",
+ "BriefDescription": "In each cycle counts ops unable to dispatch because of back-end stalls.",
+ "UMask": "0x1e"
+ },
+ {
+ "EventName": "de_no_dispatch_per_slot.smt_contention",
+ "EventCode": "0x1a0",
+ "BriefDescription": "In each cycle counts ops unable to dispatch because the dispatch cycle was granted to the other SMT thread.",
+ "UMask": "0x60"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/pipeline.json b/tools/perf/pmu-events/arch/x86/amdzen4/pipeline.json
new file mode 100644
index 000000000000..4ae8316c7507
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/pipeline.json
@@ -0,0 +1,98 @@
+[
+ {
+ "MetricName": "total_dispatch_slots",
+ "BriefDescription": "Total dispatch slots (upto 6 instructions can be dispatched in each cycle).",
+ "MetricExpr": "6 * ls_not_halted_cyc"
+ },
+ {
+ "MetricName": "frontend_bound",
+ "BriefDescription": "Fraction of dispatch slots that remained unused because the frontend did not supply enough instructions/ops.",
+ "MetricExpr": "d_ratio(de_no_dispatch_per_slot.no_ops_from_frontend, total_dispatch_slots)",
+ "MetricGroup": "PipelineL1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "bad_speculation",
+ "BriefDescription": "Fraction of dispatched ops that did not retire.",
+ "MetricExpr": "d_ratio(de_src_op_disp.all - ex_ret_ops, total_dispatch_slots)",
+ "MetricGroup": "PipelineL1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "backend_bound",
+ "BriefDescription": "Fraction of dispatch slots that remained unused because of backend stalls.",
+ "MetricExpr": "d_ratio(de_no_dispatch_per_slot.backend_stalls, total_dispatch_slots)",
+ "MetricGroup": "PipelineL1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "smt_contention",
+ "BriefDescription": "Fraction of dispatch slots that remained unused because the other thread was selected.",
+ "MetricExpr": "d_ratio(de_no_dispatch_per_slot.smt_contention, total_dispatch_slots)",
+ "MetricGroup": "PipelineL1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "retiring",
+ "BriefDescription": "Fraction of dispatch slots used by ops that retired.",
+ "MetricExpr": "d_ratio(ex_ret_ops, total_dispatch_slots)",
+ "MetricGroup": "PipelineL1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "frontend_bound_latency",
+ "BriefDescription": "Fraction of dispatch slots that remained unused because of a latency bottleneck in the frontend (such as instruction cache or TLB misses).",
+ "MetricExpr": "d_ratio((6 * cpu@de_no_dispatch_per_slot.no_ops_from_frontend\\,cmask\\=0x6@), total_dispatch_slots)",
+ "MetricGroup": "PipelineL2;frontend_bound_group",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "frontend_bound_bandwidth",
+ "BriefDescription": "Fraction of dispatch slots that remained unused because of a bandwidth bottleneck in the frontend (such as decode or op cache fetch bandwidth).",
+ "MetricExpr": "d_ratio(de_no_dispatch_per_slot.no_ops_from_frontend - (6 * cpu@de_no_dispatch_per_slot.no_ops_from_frontend\\,cmask\\=0x6@), total_dispatch_slots)",
+ "MetricGroup": "PipelineL2;frontend_bound_group",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "bad_speculation_mispredicts",
+ "BriefDescription": "Fraction of dispatched ops that were flushed due to branch mispredicts.",
+ "MetricExpr": "d_ratio(bad_speculation * ex_ret_brn_misp, ex_ret_brn_misp + resyncs_or_nc_redirects)",
+ "MetricGroup": "PipelineL2;bad_speculation_group",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "bad_speculation_pipeline_restarts",
+ "BriefDescription": "Fraction of dispatched ops that were flushed due to pipeline restarts (resyncs).",
+ "MetricExpr": "d_ratio(bad_speculation * resyncs_or_nc_redirects, ex_ret_brn_misp + resyncs_or_nc_redirects)",
+ "MetricGroup": "PipelineL2;bad_speculation_group",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "backend_bound_memory",
+ "BriefDescription": "Fraction of dispatch slots that remained unused because of stalls due to the memory subsystem.",
+ "MetricExpr": "backend_bound * d_ratio(ex_no_retire.load_not_complete, ex_no_retire.not_complete)",
+ "MetricGroup": "PipelineL2;backend_bound_group",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "backend_bound_cpu",
+ "BriefDescription": "Fraction of dispatch slots that remained unused because of stalls not related to the memory subsystem.",
+ "MetricExpr": "backend_bound * (1 - d_ratio(ex_no_retire.load_not_complete, ex_no_retire.not_complete))",
+ "MetricGroup": "PipelineL2;backend_bound_group",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "retiring_fastpath",
+ "BriefDescription": "Fraction of dispatch slots used by fastpath ops that retired.",
+ "MetricExpr": "retiring * (1 - d_ratio(ex_ret_ucode_ops, ex_ret_ops))",
+ "MetricGroup": "PipelineL2;retiring_group",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "retiring_microcode",
+ "BriefDescription": "Fraction of dispatch slots used by microcode ops that retired.",
+ "MetricExpr": "retiring * d_ratio(ex_ret_ucode_ops, ex_ret_ops)",
+ "MetricGroup": "PipelineL2;retiring_group",
+ "ScaleUnit": "100%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json
new file mode 100644
index 000000000000..5e6a793acf7b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json
@@ -0,0 +1,334 @@
+[
+ {
+ "MetricName": "branch_misprediction_ratio",
+ "BriefDescription": "Execution-time branch misprediction ratio (non-speculative).",
+ "MetricExpr": "d_ratio(ex_ret_brn_misp, ex_ret_brn)",
+ "MetricGroup": "branch_prediction",
+ "ScaleUnit": "100%"
+ },
+ {
+ "EventName": "all_data_cache_accesses",
+ "EventCode": "0x29",
+ "BriefDescription": "All data cache accesses.",
+ "UMask": "0x07"
+ },
+ {
+ "MetricName": "all_l2_cache_accesses",
+ "BriefDescription": "All L2 cache accesses.",
+ "MetricExpr": "l2_request_g1.all_no_prefetch + l2_pf_hit_l2.all + l2_pf_miss_l2_hit_l3.all + l2_pf_miss_l2_l3.all",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l2_cache_accesses_from_l1_ic_misses",
+ "BriefDescription": "L2 cache accesses from L1 instruction cache misses (including prefetch).",
+ "MetricExpr": "l2_request_g1.cacheable_ic_read",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l2_cache_accesses_from_l1_dc_misses",
+ "BriefDescription": "L2 cache accesses from L1 data cache misses (including prefetch).",
+ "MetricExpr": "l2_request_g1.all_dc",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l2_cache_accesses_from_l2_hwpf",
+ "BriefDescription": "L2 cache accesses from L2 cache hardware prefetcher.",
+ "MetricExpr": "l2_pf_hit_l2.all + l2_pf_miss_l2_hit_l3.all + l2_pf_miss_l2_l3.all",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_misses",
+ "BriefDescription": "All L2 cache misses.",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_miss_in_l2 + l2_pf_miss_l2_hit_l3.all + l2_pf_miss_l2_l3.all",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l2_cache_misses_from_l1_ic_miss",
+ "BriefDescription": "L2 cache misses from L1 instruction cache misses.",
+ "MetricExpr": "l2_cache_req_stat.ic_fill_miss",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l2_cache_misses_from_l1_dc_miss",
+ "BriefDescription": "L2 cache misses from L1 data cache misses.",
+ "MetricExpr": "l2_cache_req_stat.ls_rd_blk_c",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l2_cache_misses_from_l2_hwpf",
+ "BriefDescription": "L2 cache misses from L2 cache hardware prefetcher.",
+ "MetricExpr": "l2_pf_miss_l2_hit_l3.all + l2_pf_miss_l2_l3.all",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "all_l2_cache_hits",
+ "BriefDescription": "All L2 cache hits.",
+ "MetricExpr": "l2_cache_req_stat.ic_dc_hit_in_l2 + l2_pf_hit_l2.all",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l2_cache_hits_from_l1_ic_miss",
+ "BriefDescription": "L2 cache hits from L1 instruction cache misses.",
+ "MetricExpr": "l2_cache_req_stat.ic_hit_in_l2",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l2_cache_hits_from_l1_dc_miss",
+ "BriefDescription": "L2 cache hits from L1 data cache misses.",
+ "MetricExpr": "l2_cache_req_stat.dc_hit_in_l2",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l2_cache_hits_from_l2_hwpf",
+ "BriefDescription": "L2 cache hits from L2 cache hardware prefetcher.",
+ "MetricExpr": "l2_pf_hit_l2.all",
+ "MetricGroup": "l2_cache"
+ },
+ {
+ "MetricName": "l3_cache_accesses",
+ "BriefDescription": "L3 cache accesses.",
+ "MetricExpr": "l3_lookup_state.all_coherent_accesses_to_l3",
+ "MetricGroup": "l3_cache"
+ },
+ {
+ "MetricName": "l3_misses",
+ "BriefDescription": "L3 misses (including cacheline state change requests).",
+ "MetricExpr": "l3_lookup_state.l3_miss",
+ "MetricGroup": "l3_cache"
+ },
+ {
+ "MetricName": "l3_read_miss_latency",
+ "BriefDescription": "Average L3 read miss latency (in core clocks).",
+ "MetricExpr": "(l3_xi_sampled_latency.all * 10) / l3_xi_sampled_latency_requests.all",
+ "MetricGroup": "l3_cache",
+ "ScaleUnit": "1core clocks"
+ },
+ {
+ "MetricName": "op_cache_fetch_miss_ratio",
+ "BriefDescription": "Op cache miss ratio for all fetches.",
+ "MetricExpr": "d_ratio(op_cache_hit_miss.op_cache_miss, op_cache_hit_miss.all_op_cache_accesses)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "ic_fetch_miss_ratio",
+ "BriefDescription": "Instruction cache miss ratio for all fetches. An instruction cache miss will not be counted by this metric if it is an OC hit.",
+ "MetricExpr": "d_ratio(ic_tag_hit_miss.instruction_cache_miss, ic_tag_hit_miss.all_instruction_cache_accesses)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "MetricName": "l1_data_cache_fills_from_memory",
+ "BriefDescription": "L1 data cache fills from DRAM or MMIO in any NUMA node.",
+ "MetricExpr": "ls_any_fills_from_sys.dram_io_all",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_data_cache_fills_from_remote_node",
+ "BriefDescription": "L1 data cache fills from a different NUMA node.",
+ "MetricExpr": "ls_any_fills_from_sys.far_all",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_data_cache_fills_from_same_ccx",
+ "BriefDescription": "L1 data cache fills from within the same CCX.",
+ "MetricExpr": "ls_any_fills_from_sys.local_all",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_data_cache_fills_from_different_ccx",
+ "BriefDescription": "L1 data cache fills from another CCX cache in any NUMA node.",
+ "MetricExpr": "ls_any_fills_from_sys.remote_cache",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "all_l1_data_cache_fills",
+ "BriefDescription": "All L1 data cache fills.",
+ "MetricExpr": "ls_any_fills_from_sys.all",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_demand_data_cache_fills_from_local_l2",
+ "BriefDescription": "L1 demand data cache fills from local L2 cache.",
+ "MetricExpr": "ls_dmnd_fills_from_sys.local_l2",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_demand_data_cache_fills_from_same_ccx",
+ "BriefDescription": "L1 demand data cache fills from within the same CCX.",
+ "MetricExpr": "ls_dmnd_fills_from_sys.local_ccx",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_demand_data_cache_fills_from_near_cache",
+ "BriefDescription": "L1 demand data cache fills from another CCX cache in the same NUMA node.",
+ "MetricExpr": "ls_dmnd_fills_from_sys.near_cache",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_demand_data_cache_fills_from_near_memory",
+ "BriefDescription": "L1 demand data cache fills from DRAM or MMIO in the same NUMA node.",
+ "MetricExpr": "ls_dmnd_fills_from_sys.dram_io_near",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_demand_data_cache_fills_from_far_cache",
+ "BriefDescription": "L1 demand data cache fills from another CCX cache in a different NUMA node.",
+ "MetricExpr": "ls_dmnd_fills_from_sys.far_cache",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_demand_data_cache_fills_from_far_memory",
+ "BriefDescription": "L1 demand data cache fills from DRAM or MMIO in a different NUMA node.",
+ "MetricExpr": "ls_dmnd_fills_from_sys.dram_io_far",
+ "MetricGroup": "l1_dcache"
+ },
+ {
+ "MetricName": "l1_itlb_misses",
+ "BriefDescription": "L1 instruction TLB misses.",
+ "MetricExpr": "bp_l1_tlb_miss_l2_tlb_hit + bp_l1_tlb_miss_l2_tlb_miss.all",
+ "MetricGroup": "tlb"
+ },
+ {
+ "MetricName": "l2_itlb_misses",
+ "BriefDescription": "L2 instruction TLB misses and instruction page walks.",
+ "MetricExpr": "bp_l1_tlb_miss_l2_tlb_miss.all",
+ "MetricGroup": "tlb"
+ },
+ {
+ "MetricName": "l1_dtlb_misses",
+ "BriefDescription": "L1 data TLB misses.",
+ "MetricExpr": "ls_l1_d_tlb_miss.all",
+ "MetricGroup": "tlb"
+ },
+ {
+ "MetricName": "l2_dtlb_misses",
+ "BriefDescription": "L2 data TLB misses and data page walks.",
+ "MetricExpr": "ls_l1_d_tlb_miss.all_l2_miss",
+ "MetricGroup": "tlb"
+ },
+ {
+ "MetricName": "all_tlbs_flushed",
+ "BriefDescription": "All TLBs flushed.",
+ "MetricExpr": "ls_tlb_flush.all",
+ "MetricGroup": "tlb"
+ },
+ {
+ "MetricName": "macro_ops_dispatched",
+ "BriefDescription": "Macro-ops dispatched.",
+ "MetricExpr": "de_src_op_disp.all",
+ "MetricGroup": "decoder"
+ },
+ {
+ "MetricName": "sse_avx_stalls",
+ "BriefDescription": "Mixed SSE/AVX stalls.",
+ "MetricExpr": "fp_disp_faults.sse_avx_all"
+ },
+ {
+ "MetricName": "macro_ops_retired",
+ "BriefDescription": "Macro-ops retired.",
+ "MetricExpr": "ex_ret_ops"
+ },
+ {
+ "MetricName": "dram_read_data_for_local_processor",
+ "BriefDescription": "DRAM read data for local processor.",
+ "MetricExpr": "local_processor_read_data_beats_cs0 + local_processor_read_data_beats_cs1 + local_processor_read_data_beats_cs2 + local_processor_read_data_beats_cs3 + local_processor_read_data_beats_cs4 + local_processor_read_data_beats_cs5 + local_processor_read_data_beats_cs6 + local_processor_read_data_beats_cs7 + local_processor_read_data_beats_cs8 + local_processor_read_data_beats_cs9 + local_processor_read_data_beats_cs10 + local_processor_read_data_beats_cs11",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "dram_write_data_for_local_processor",
+ "BriefDescription": "DRAM write data for local processor.",
+ "MetricExpr": "local_processor_write_data_beats_cs0 + local_processor_write_data_beats_cs1 + local_processor_write_data_beats_cs2 + local_processor_write_data_beats_cs3 + local_processor_write_data_beats_cs4 + local_processor_write_data_beats_cs5 + local_processor_write_data_beats_cs6 + local_processor_write_data_beats_cs7 + local_processor_write_data_beats_cs8 + local_processor_write_data_beats_cs9 + local_processor_write_data_beats_cs10 + local_processor_write_data_beats_cs11",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "dram_read_data_for_remote_processor",
+ "BriefDescription": "DRAM read data for remote processor.",
+ "MetricExpr": "remote_processor_read_data_beats_cs0 + remote_processor_read_data_beats_cs1 + remote_processor_read_data_beats_cs2 + remote_processor_read_data_beats_cs3 + remote_processor_read_data_beats_cs4 + remote_processor_read_data_beats_cs5 + remote_processor_read_data_beats_cs6 + remote_processor_read_data_beats_cs7 + remote_processor_read_data_beats_cs8 + remote_processor_read_data_beats_cs9 + remote_processor_read_data_beats_cs10 + remote_processor_read_data_beats_cs11",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "dram_write_data_for_remote_processor",
+ "BriefDescription": "DRAM write data for remote processor.",
+ "MetricExpr": "remote_processor_write_data_beats_cs0 + remote_processor_write_data_beats_cs1 + remote_processor_write_data_beats_cs2 + remote_processor_write_data_beats_cs3 + remote_processor_write_data_beats_cs4 + remote_processor_write_data_beats_cs5 + remote_processor_write_data_beats_cs6 + remote_processor_write_data_beats_cs7 + remote_processor_write_data_beats_cs8 + remote_processor_write_data_beats_cs9 + remote_processor_write_data_beats_cs10 + remote_processor_write_data_beats_cs11",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "local_socket_upstream_dma_read_data",
+ "BriefDescription": "Local socket upstream DMA read data.",
+ "MetricExpr": "local_socket_upstream_read_beats_iom0 + local_socket_upstream_read_beats_iom1 + local_socket_upstream_read_beats_iom2 + local_socket_upstream_read_beats_iom3",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "local_socket_upstream_dma_write_data",
+ "BriefDescription": "Local socket upstream DMA write data.",
+ "MetricExpr": "local_socket_upstream_write_beats_iom0 + local_socket_upstream_write_beats_iom1 + local_socket_upstream_write_beats_iom2 + local_socket_upstream_write_beats_iom3",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "remote_socket_upstream_dma_read_data",
+ "BriefDescription": "Remote socket upstream DMA read data.",
+ "MetricExpr": "remote_socket_upstream_read_beats_iom0 + remote_socket_upstream_read_beats_iom1 + remote_socket_upstream_read_beats_iom2 + remote_socket_upstream_read_beats_iom3",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "remote_socket_upstream_dma_write_data",
+ "BriefDescription": "Remote socket upstream DMA write data.",
+ "MetricExpr": "remote_socket_upstream_write_beats_iom0 + remote_socket_upstream_write_beats_iom1 + remote_socket_upstream_write_beats_iom2 + remote_socket_upstream_write_beats_iom3",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "local_socket_inbound_data_to_cpu",
+ "BriefDescription": "Local socket inbound data to the CPU (e.g. read data).",
+ "MetricExpr": "local_socket_inf0_inbound_data_beats_ccm0 + local_socket_inf1_inbound_data_beats_ccm0 + local_socket_inf0_inbound_data_beats_ccm1 + local_socket_inf1_inbound_data_beats_ccm1 + local_socket_inf0_inbound_data_beats_ccm2 + local_socket_inf1_inbound_data_beats_ccm2 + local_socket_inf0_inbound_data_beats_ccm3 + local_socket_inf1_inbound_data_beats_ccm3 + local_socket_inf0_inbound_data_beats_ccm4 + local_socket_inf1_inbound_data_beats_ccm4 + local_socket_inf0_inbound_data_beats_ccm5 + local_socket_inf1_inbound_data_beats_ccm5 + local_socket_inf0_inbound_data_beats_ccm6 + local_socket_inf1_inbound_data_beats_ccm6 + local_socket_inf0_inbound_data_beats_ccm7 + local_socket_inf1_inbound_data_beats_ccm7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "3.0517578125e-5MiB"
+ },
+ {
+ "MetricName": "local_socket_outbound_data_from_cpu",
+ "BriefDescription": "Local socket outbound data from the CPU (e.g. write data).",
+ "MetricExpr": "local_socket_inf0_outbound_data_beats_ccm0 + local_socket_inf1_outbound_data_beats_ccm0 + local_socket_inf0_outbound_data_beats_ccm1 + local_socket_inf1_outbound_data_beats_ccm1 + local_socket_inf0_outbound_data_beats_ccm2 + local_socket_inf1_outbound_data_beats_ccm2 + local_socket_inf0_outbound_data_beats_ccm3 + local_socket_inf1_outbound_data_beats_ccm3 + local_socket_inf0_outbound_data_beats_ccm4 + local_socket_inf1_outbound_data_beats_ccm4 + local_socket_inf0_outbound_data_beats_ccm5 + local_socket_inf1_outbound_data_beats_ccm5 + local_socket_inf0_outbound_data_beats_ccm6 + local_socket_inf1_outbound_data_beats_ccm6 + local_socket_inf0_outbound_data_beats_ccm7 + local_socket_inf1_outbound_data_beats_ccm7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "remote_socket_inbound_data_to_cpu",
+ "BriefDescription": "Remote socket inbound data to the CPU (e.g. read data).",
+ "MetricExpr": "remote_socket_inf0_inbound_data_beats_ccm0 + remote_socket_inf1_inbound_data_beats_ccm0 + remote_socket_inf0_inbound_data_beats_ccm1 + remote_socket_inf1_inbound_data_beats_ccm1 + remote_socket_inf0_inbound_data_beats_ccm2 + remote_socket_inf1_inbound_data_beats_ccm2 + remote_socket_inf0_inbound_data_beats_ccm3 + remote_socket_inf1_inbound_data_beats_ccm3 + remote_socket_inf0_inbound_data_beats_ccm4 + remote_socket_inf1_inbound_data_beats_ccm4 + remote_socket_inf0_inbound_data_beats_ccm5 + remote_socket_inf1_inbound_data_beats_ccm5 + remote_socket_inf0_inbound_data_beats_ccm6 + remote_socket_inf1_inbound_data_beats_ccm6 + remote_socket_inf0_inbound_data_beats_ccm7 + remote_socket_inf1_inbound_data_beats_ccm7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "3.0517578125e-5MiB"
+ },
+ {
+ "MetricName": "remote_socket_outbound_data_from_cpu",
+ "BriefDescription": "Remote socket outbound data from the CPU (e.g. write data).",
+ "MetricExpr": "remote_socket_inf0_outbound_data_beats_ccm0 + remote_socket_inf1_outbound_data_beats_ccm0 + remote_socket_inf0_outbound_data_beats_ccm1 + remote_socket_inf1_outbound_data_beats_ccm1 + remote_socket_inf0_outbound_data_beats_ccm2 + remote_socket_inf1_outbound_data_beats_ccm2 + remote_socket_inf0_outbound_data_beats_ccm3 + remote_socket_inf1_outbound_data_beats_ccm3 + remote_socket_inf0_outbound_data_beats_ccm4 + remote_socket_inf1_outbound_data_beats_ccm4 + remote_socket_inf0_outbound_data_beats_ccm5 + remote_socket_inf1_outbound_data_beats_ccm5 + remote_socket_inf0_outbound_data_beats_ccm6 + remote_socket_inf1_outbound_data_beats_ccm6 + remote_socket_inf0_outbound_data_beats_ccm7 + remote_socket_inf1_outbound_data_beats_ccm7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ },
+ {
+ "MetricName": "local_socket_outbound_data_from_all_links",
+ "BriefDescription": "Outbound data from all links (local socket).",
+ "MetricExpr": "local_socket_outbound_data_beats_link0 + local_socket_outbound_data_beats_link1 + local_socket_outbound_data_beats_link2 + local_socket_outbound_data_beats_link3 + local_socket_outbound_data_beats_link4 + local_socket_outbound_data_beats_link5 + local_socket_outbound_data_beats_link6 + local_socket_outbound_data_beats_link7",
+ "MetricGroup": "data_fabric",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625e-5MiB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/cache.json b/tools/perf/pmu-events/arch/x86/bonnell/cache.json
index ffab90c5891c..1ca95a70d48a 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/cache.json
@@ -1,746 +1,653 @@
[
{
- "EventCode": "0x21",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "L2_ADS.SELF",
- "SampleAfterValue": "200000",
- "BriefDescription": "Cycles L2 address bus is in use."
+ "BriefDescription": "L1 Data Cacheable reads and writes",
+ "EventCode": "0x40",
+ "EventName": "L1D_CACHE.ALL_CACHE_REF",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xa3"
},
{
- "EventCode": "0x22",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "L2_DBUS_BUSY.SELF",
- "SampleAfterValue": "200000",
- "BriefDescription": "Cycles the L2 cache data bus is busy."
+ "BriefDescription": "L1 Data reads and writes",
+ "EventCode": "0x40",
+ "EventName": "L1D_CACHE.ALL_REF",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x83"
},
{
- "EventCode": "0x23",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "L2_DBUS_BUSY_RD.SELF",
+ "BriefDescription": "Modified cache lines evicted from the L1 data cache",
+ "EventCode": "0x40",
+ "EventName": "L1D_CACHE.EVICT",
"SampleAfterValue": "200000",
- "BriefDescription": "Cycles the L2 transfers data to the core."
+ "UMask": "0x10"
},
{
- "EventCode": "0x24",
- "Counter": "0,1",
- "UMask": "0x70",
- "EventName": "L2_LINES_IN.SELF.ANY",
+ "BriefDescription": "L1 Cacheable Data Reads",
+ "EventCode": "0x40",
+ "EventName": "L1D_CACHE.LD",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xa1"
+ },
+ {
+ "BriefDescription": "L1 Data line replacements",
+ "EventCode": "0x40",
+ "EventName": "L1D_CACHE.REPL",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache misses."
+ "UMask": "0x8"
},
{
- "EventCode": "0x24",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "L2_LINES_IN.SELF.DEMAND",
+ "BriefDescription": "Modified cache lines allocated in the L1 data cache",
+ "EventCode": "0x40",
+ "EventName": "L1D_CACHE.REPLM",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache misses."
+ "UMask": "0x48"
},
{
- "EventCode": "0x24",
- "Counter": "0,1",
- "UMask": "0x50",
- "EventName": "L2_LINES_IN.SELF.PREFETCH",
+ "BriefDescription": "L1 Cacheable Data Writes",
+ "EventCode": "0x40",
+ "EventName": "L1D_CACHE.ST",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xa2"
+ },
+ {
+ "BriefDescription": "Cycles L2 address bus is in use.",
+ "EventCode": "0x21",
+ "EventName": "L2_ADS.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache misses."
+ "UMask": "0x40"
},
{
- "EventCode": "0x25",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "L2_M_LINES_IN.SELF",
+ "BriefDescription": "All data requests from the L1 data cache",
+ "EventCode": "0x2C",
+ "EventName": "L2_DATA_RQSTS.SELF.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache line modifications."
+ "UMask": "0x44"
},
{
- "EventCode": "0x26",
- "Counter": "0,1",
- "UMask": "0x70",
- "EventName": "L2_LINES_OUT.SELF.ANY",
+ "BriefDescription": "All data requests from the L1 data cache",
+ "EventCode": "0x2C",
+ "EventName": "L2_DATA_RQSTS.SELF.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache lines evicted."
+ "UMask": "0x41"
},
{
- "EventCode": "0x26",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "L2_LINES_OUT.SELF.DEMAND",
+ "BriefDescription": "All data requests from the L1 data cache",
+ "EventCode": "0x2C",
+ "EventName": "L2_DATA_RQSTS.SELF.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache lines evicted."
+ "UMask": "0x4f"
},
{
- "EventCode": "0x26",
- "Counter": "0,1",
- "UMask": "0x50",
- "EventName": "L2_LINES_OUT.SELF.PREFETCH",
+ "BriefDescription": "All data requests from the L1 data cache",
+ "EventCode": "0x2C",
+ "EventName": "L2_DATA_RQSTS.SELF.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache lines evicted."
+ "UMask": "0x48"
},
{
- "EventCode": "0x27",
- "Counter": "0,1",
- "UMask": "0x70",
- "EventName": "L2_M_LINES_OUT.SELF.ANY",
+ "BriefDescription": "All data requests from the L1 data cache",
+ "EventCode": "0x2C",
+ "EventName": "L2_DATA_RQSTS.SELF.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Modified lines evicted from the L2 cache"
+ "UMask": "0x42"
},
{
- "EventCode": "0x27",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "L2_M_LINES_OUT.SELF.DEMAND",
+ "BriefDescription": "Cycles the L2 cache data bus is busy.",
+ "EventCode": "0x22",
+ "EventName": "L2_DBUS_BUSY.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Modified lines evicted from the L2 cache"
+ "UMask": "0x40"
},
{
- "EventCode": "0x27",
- "Counter": "0,1",
- "UMask": "0x50",
- "EventName": "L2_M_LINES_OUT.SELF.PREFETCH",
+ "BriefDescription": "Cycles the L2 transfers data to the core.",
+ "EventCode": "0x23",
+ "EventName": "L2_DBUS_BUSY_RD.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Modified lines evicted from the L2 cache"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "L2 cacheable instruction fetch requests",
"EventCode": "0x28",
- "Counter": "0,1",
- "UMask": "0x44",
"EventName": "L2_IFETCH.SELF.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cacheable instruction fetch requests"
+ "UMask": "0x44"
},
{
+ "BriefDescription": "L2 cacheable instruction fetch requests",
"EventCode": "0x28",
- "Counter": "0,1",
- "UMask": "0x41",
"EventName": "L2_IFETCH.SELF.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cacheable instruction fetch requests"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "L2 cacheable instruction fetch requests",
"EventCode": "0x28",
- "Counter": "0,1",
- "UMask": "0x48",
- "EventName": "L2_IFETCH.SELF.M_STATE",
+ "EventName": "L2_IFETCH.SELF.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cacheable instruction fetch requests"
+ "UMask": "0x4f"
},
{
+ "BriefDescription": "L2 cacheable instruction fetch requests",
"EventCode": "0x28",
- "Counter": "0,1",
- "UMask": "0x42",
- "EventName": "L2_IFETCH.SELF.S_STATE",
+ "EventName": "L2_IFETCH.SELF.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cacheable instruction fetch requests"
+ "UMask": "0x48"
},
{
+ "BriefDescription": "L2 cacheable instruction fetch requests",
"EventCode": "0x28",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "L2_IFETCH.SELF.MESI",
+ "EventName": "L2_IFETCH.SELF.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cacheable instruction fetch requests"
+ "UMask": "0x42"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x74",
"EventName": "L2_LD.SELF.ANY.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x74"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x71",
"EventName": "L2_LD.SELF.ANY.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x71"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x78",
- "EventName": "L2_LD.SELF.ANY.M_STATE",
+ "EventName": "L2_LD.SELF.ANY.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x7f"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x72",
- "EventName": "L2_LD.SELF.ANY.S_STATE",
+ "EventName": "L2_LD.SELF.ANY.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x78"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x7f",
- "EventName": "L2_LD.SELF.ANY.MESI",
+ "EventName": "L2_LD.SELF.ANY.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x72"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x44",
"EventName": "L2_LD.SELF.DEMAND.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x44"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x41",
"EventName": "L2_LD.SELF.DEMAND.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x48",
- "EventName": "L2_LD.SELF.DEMAND.M_STATE",
+ "EventName": "L2_LD.SELF.DEMAND.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x4f"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x42",
- "EventName": "L2_LD.SELF.DEMAND.S_STATE",
+ "EventName": "L2_LD.SELF.DEMAND.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x48"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "L2_LD.SELF.DEMAND.MESI",
+ "EventName": "L2_LD.SELF.DEMAND.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x42"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x54",
"EventName": "L2_LD.SELF.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x54"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x51",
"EventName": "L2_LD.SELF.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x51"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x58",
- "EventName": "L2_LD.SELF.PREFETCH.M_STATE",
+ "EventName": "L2_LD.SELF.PREFETCH.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x5f"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x52",
- "EventName": "L2_LD.SELF.PREFETCH.S_STATE",
+ "EventName": "L2_LD.SELF.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
+ "UMask": "0x58"
},
{
+ "BriefDescription": "L2 cache reads",
"EventCode": "0x29",
- "Counter": "0,1",
- "UMask": "0x5f",
- "EventName": "L2_LD.SELF.PREFETCH.MESI",
- "SampleAfterValue": "200000",
- "BriefDescription": "L2 cache reads"
- },
- {
- "EventCode": "0x2A",
- "Counter": "0,1",
- "UMask": "0x44",
- "EventName": "L2_ST.SELF.E_STATE",
- "SampleAfterValue": "200000",
- "BriefDescription": "L2 store requests"
- },
- {
- "EventCode": "0x2A",
- "Counter": "0,1",
- "UMask": "0x41",
- "EventName": "L2_ST.SELF.I_STATE",
+ "EventName": "L2_LD.SELF.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 store requests"
+ "UMask": "0x52"
},
{
- "EventCode": "0x2A",
- "Counter": "0,1",
- "UMask": "0x48",
- "EventName": "L2_ST.SELF.M_STATE",
+ "BriefDescription": "All read requests from L1 instruction and data caches",
+ "EventCode": "0x2D",
+ "EventName": "L2_LD_IFETCH.SELF.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 store requests"
+ "UMask": "0x44"
},
{
- "EventCode": "0x2A",
- "Counter": "0,1",
- "UMask": "0x42",
- "EventName": "L2_ST.SELF.S_STATE",
+ "BriefDescription": "All read requests from L1 instruction and data caches",
+ "EventCode": "0x2D",
+ "EventName": "L2_LD_IFETCH.SELF.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 store requests"
+ "UMask": "0x41"
},
{
- "EventCode": "0x2A",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "L2_ST.SELF.MESI",
+ "BriefDescription": "All read requests from L1 instruction and data caches",
+ "EventCode": "0x2D",
+ "EventName": "L2_LD_IFETCH.SELF.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 store requests"
+ "UMask": "0x4f"
},
{
- "EventCode": "0x2B",
- "Counter": "0,1",
- "UMask": "0x44",
- "EventName": "L2_LOCK.SELF.E_STATE",
+ "BriefDescription": "All read requests from L1 instruction and data caches",
+ "EventCode": "0x2D",
+ "EventName": "L2_LD_IFETCH.SELF.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 locked accesses"
+ "UMask": "0x48"
},
{
- "EventCode": "0x2B",
- "Counter": "0,1",
- "UMask": "0x41",
- "EventName": "L2_LOCK.SELF.I_STATE",
+ "BriefDescription": "All read requests from L1 instruction and data caches",
+ "EventCode": "0x2D",
+ "EventName": "L2_LD_IFETCH.SELF.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 locked accesses"
+ "UMask": "0x42"
},
{
- "EventCode": "0x2B",
- "Counter": "0,1",
- "UMask": "0x48",
- "EventName": "L2_LOCK.SELF.M_STATE",
+ "BriefDescription": "L2 cache misses.",
+ "EventCode": "0x24",
+ "EventName": "L2_LINES_IN.SELF.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 locked accesses"
+ "UMask": "0x70"
},
{
- "EventCode": "0x2B",
- "Counter": "0,1",
- "UMask": "0x42",
- "EventName": "L2_LOCK.SELF.S_STATE",
+ "BriefDescription": "L2 cache misses.",
+ "EventCode": "0x24",
+ "EventName": "L2_LINES_IN.SELF.DEMAND",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 locked accesses"
+ "UMask": "0x40"
},
{
- "EventCode": "0x2B",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "L2_LOCK.SELF.MESI",
+ "BriefDescription": "L2 cache misses.",
+ "EventCode": "0x24",
+ "EventName": "L2_LINES_IN.SELF.PREFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 locked accesses"
+ "UMask": "0x50"
},
{
- "EventCode": "0x2C",
- "Counter": "0,1",
- "UMask": "0x44",
- "EventName": "L2_DATA_RQSTS.SELF.E_STATE",
+ "BriefDescription": "L2 cache lines evicted.",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SELF.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All data requests from the L1 data cache"
+ "UMask": "0x70"
},
{
- "EventCode": "0x2C",
- "Counter": "0,1",
- "UMask": "0x41",
- "EventName": "L2_DATA_RQSTS.SELF.I_STATE",
+ "BriefDescription": "L2 cache lines evicted.",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SELF.DEMAND",
"SampleAfterValue": "200000",
- "BriefDescription": "All data requests from the L1 data cache"
+ "UMask": "0x40"
},
{
- "EventCode": "0x2C",
- "Counter": "0,1",
- "UMask": "0x48",
- "EventName": "L2_DATA_RQSTS.SELF.M_STATE",
+ "BriefDescription": "L2 cache lines evicted.",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SELF.PREFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "All data requests from the L1 data cache"
+ "UMask": "0x50"
},
{
- "EventCode": "0x2C",
- "Counter": "0,1",
- "UMask": "0x42",
- "EventName": "L2_DATA_RQSTS.SELF.S_STATE",
+ "BriefDescription": "L2 locked accesses",
+ "EventCode": "0x2B",
+ "EventName": "L2_LOCK.SELF.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "All data requests from the L1 data cache"
+ "UMask": "0x44"
},
{
- "EventCode": "0x2C",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "L2_DATA_RQSTS.SELF.MESI",
+ "BriefDescription": "L2 locked accesses",
+ "EventCode": "0x2B",
+ "EventName": "L2_LOCK.SELF.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "All data requests from the L1 data cache"
+ "UMask": "0x41"
},
{
- "EventCode": "0x2D",
- "Counter": "0,1",
- "UMask": "0x44",
- "EventName": "L2_LD_IFETCH.SELF.E_STATE",
+ "BriefDescription": "L2 locked accesses",
+ "EventCode": "0x2B",
+ "EventName": "L2_LOCK.SELF.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "All read requests from L1 instruction and data caches"
+ "UMask": "0x4f"
},
{
- "EventCode": "0x2D",
- "Counter": "0,1",
- "UMask": "0x41",
- "EventName": "L2_LD_IFETCH.SELF.I_STATE",
+ "BriefDescription": "L2 locked accesses",
+ "EventCode": "0x2B",
+ "EventName": "L2_LOCK.SELF.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "All read requests from L1 instruction and data caches"
+ "UMask": "0x48"
},
{
- "EventCode": "0x2D",
- "Counter": "0,1",
- "UMask": "0x48",
- "EventName": "L2_LD_IFETCH.SELF.M_STATE",
+ "BriefDescription": "L2 locked accesses",
+ "EventCode": "0x2B",
+ "EventName": "L2_LOCK.SELF.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "All read requests from L1 instruction and data caches"
+ "UMask": "0x42"
},
{
- "EventCode": "0x2D",
- "Counter": "0,1",
- "UMask": "0x42",
- "EventName": "L2_LD_IFETCH.SELF.S_STATE",
+ "BriefDescription": "L2 cache line modifications.",
+ "EventCode": "0x25",
+ "EventName": "L2_M_LINES_IN.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "All read requests from L1 instruction and data caches"
+ "UMask": "0x40"
},
{
- "EventCode": "0x2D",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "L2_LD_IFETCH.SELF.MESI",
+ "BriefDescription": "Modified lines evicted from the L2 cache",
+ "EventCode": "0x27",
+ "EventName": "L2_M_LINES_OUT.SELF.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All read requests from L1 instruction and data caches"
+ "UMask": "0x70"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x74",
- "EventName": "L2_RQSTS.SELF.ANY.E_STATE",
+ "BriefDescription": "Modified lines evicted from the L2 cache",
+ "EventCode": "0x27",
+ "EventName": "L2_M_LINES_OUT.SELF.DEMAND",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x40"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x71",
- "EventName": "L2_RQSTS.SELF.ANY.I_STATE",
+ "BriefDescription": "Modified lines evicted from the L2 cache",
+ "EventCode": "0x27",
+ "EventName": "L2_M_LINES_OUT.SELF.PREFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x50"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x78",
- "EventName": "L2_RQSTS.SELF.ANY.M_STATE",
+ "BriefDescription": "Cycles no L2 cache requests are pending",
+ "EventCode": "0x32",
+ "EventName": "L2_NO_REQ.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x40"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x72",
- "EventName": "L2_RQSTS.SELF.ANY.S_STATE",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x74"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x7f",
- "EventName": "L2_RQSTS.SELF.ANY.MESI",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x71"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x44",
- "EventName": "L2_RQSTS.SELF.DEMAND.E_STATE",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x7f"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x48",
- "EventName": "L2_RQSTS.SELF.DEMAND.M_STATE",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x78"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x42",
- "EventName": "L2_RQSTS.SELF.DEMAND.S_STATE",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.ANY.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x72"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x54",
- "EventName": "L2_RQSTS.SELF.PREFETCH.E_STATE",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x44"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x51",
- "EventName": "L2_RQSTS.SELF.PREFETCH.I_STATE",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x41"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x58",
- "EventName": "L2_RQSTS.SELF.PREFETCH.M_STATE",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x4f"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x52",
- "EventName": "L2_RQSTS.SELF.PREFETCH.S_STATE",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x48"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x5f",
- "EventName": "L2_RQSTS.SELF.PREFETCH.MESI",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x42"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x41",
- "EventName": "L2_RQSTS.SELF.DEMAND.I_STATE",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache demand requests from this core that missed the L2"
+ "UMask": "0x54"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "L2_RQSTS.SELF.DEMAND.MESI",
+ "BriefDescription": "Rejected L2 cache requests",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 cache demand requests from this core"
+ "UMask": "0x51"
},
{
+ "BriefDescription": "Rejected L2 cache requests",
"EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x74",
- "EventName": "L2_REJECT_BUSQ.SELF.ANY.E_STATE",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x5f"
},
{
+ "BriefDescription": "Rejected L2 cache requests",
"EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x71",
- "EventName": "L2_REJECT_BUSQ.SELF.ANY.I_STATE",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x58"
},
{
+ "BriefDescription": "Rejected L2 cache requests",
"EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x78",
- "EventName": "L2_REJECT_BUSQ.SELF.ANY.M_STATE",
+ "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x52"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x72",
- "EventName": "L2_REJECT_BUSQ.SELF.ANY.S_STATE",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.ANY.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x74"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x7f",
- "EventName": "L2_REJECT_BUSQ.SELF.ANY.MESI",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.ANY.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x71"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x44",
- "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.E_STATE",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.ANY.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x7f"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x41",
- "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.I_STATE",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.ANY.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x78"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x48",
- "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.M_STATE",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.ANY.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x72"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x42",
- "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.S_STATE",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.DEMAND.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x44"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "L2_REJECT_BUSQ.SELF.DEMAND.MESI",
+ "BriefDescription": "L2 cache demand requests from this core that missed the L2",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.DEMAND.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x41"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x54",
- "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.E_STATE",
+ "BriefDescription": "L2 cache demand requests from this core",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.DEMAND.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x4f"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x51",
- "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.I_STATE",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.DEMAND.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x48"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x58",
- "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.M_STATE",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.DEMAND.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x42"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x52",
- "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.S_STATE",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x54"
},
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x5f",
- "EventName": "L2_REJECT_BUSQ.SELF.PREFETCH.MESI",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Rejected L2 cache requests"
+ "UMask": "0x51"
},
{
- "EventCode": "0x32",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "L2_NO_REQ.SELF",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "Cycles no L2 cache requests are pending"
+ "UMask": "0x5f"
},
{
- "EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0xa1",
- "EventName": "L1D_CACHE.LD",
- "SampleAfterValue": "2000000",
- "BriefDescription": "L1 Cacheable Data Reads"
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.M_STATE",
+ "SampleAfterValue": "200000",
+ "UMask": "0x58"
},
{
- "EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0xa2",
- "EventName": "L1D_CACHE.ST",
- "SampleAfterValue": "2000000",
- "BriefDescription": "L1 Cacheable Data Writes"
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "L2_RQSTS.SELF.PREFETCH.S_STATE",
+ "SampleAfterValue": "200000",
+ "UMask": "0x52"
},
{
- "EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x83",
- "EventName": "L1D_CACHE.ALL_REF",
- "SampleAfterValue": "2000000",
- "BriefDescription": "L1 Data reads and writes"
+ "BriefDescription": "L2 store requests",
+ "EventCode": "0x2A",
+ "EventName": "L2_ST.SELF.E_STATE",
+ "SampleAfterValue": "200000",
+ "UMask": "0x44"
},
{
- "EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0xa3",
- "EventName": "L1D_CACHE.ALL_CACHE_REF",
- "SampleAfterValue": "2000000",
- "BriefDescription": "L1 Data Cacheable reads and writes"
+ "BriefDescription": "L2 store requests",
+ "EventCode": "0x2A",
+ "EventName": "L2_ST.SELF.I_STATE",
+ "SampleAfterValue": "200000",
+ "UMask": "0x41"
},
{
- "EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "L1D_CACHE.REPL",
+ "BriefDescription": "L2 store requests",
+ "EventCode": "0x2A",
+ "EventName": "L2_ST.SELF.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L1 Data line replacements"
+ "UMask": "0x4f"
},
{
- "EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x48",
- "EventName": "L1D_CACHE.REPLM",
+ "BriefDescription": "L2 store requests",
+ "EventCode": "0x2A",
+ "EventName": "L2_ST.SELF.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Modified cache lines allocated in the L1 data cache"
+ "UMask": "0x48"
},
{
- "EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "L1D_CACHE.EVICT",
+ "BriefDescription": "L2 store requests",
+ "EventCode": "0x2A",
+ "EventName": "L2_ST.SELF.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "Modified cache lines evicted from the L1 data cache"
+ "UMask": "0x42"
},
{
+ "BriefDescription": "Retired loads that hit the L2 cache (precise event).",
"EventCode": "0xCB",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "MEM_LOAD_RETIRED.L2_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that hit the L2 cache (precise event)."
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired loads that miss the L2 cache",
"EventCode": "0xCB",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
"SampleAfterValue": "10000",
- "BriefDescription": "Retired loads that miss the L2 cache"
+ "UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json b/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
index f0e090cdb9f0..18bf5ec47e72 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/floating-point.json
@@ -1,261 +1,224 @@
[
{
- "EventCode": "0x10",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "X87_COMP_OPS_EXE.ANY.S",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Floating point computational micro-ops executed."
+ "BriefDescription": "Floating point assists for retired operations.",
+ "EventCode": "0x11",
+ "EventName": "FP_ASSIST.AR",
+ "SampleAfterValue": "10000",
+ "UMask": "0x81"
},
{
- "PEBS": "2",
- "EventCode": "0x10",
- "Counter": "0,1",
- "UMask": "0x81",
- "EventName": "X87_COMP_OPS_EXE.ANY.AR",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Floating point computational micro-ops retired."
+ "BriefDescription": "Floating point assists.",
+ "EventCode": "0x11",
+ "EventName": "FP_ASSIST.S",
+ "SampleAfterValue": "10000",
+ "UMask": "0x1"
},
{
- "EventCode": "0x10",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "X87_COMP_OPS_EXE.FXCH.S",
+ "BriefDescription": "SIMD assists invoked.",
+ "EventCode": "0xCD",
+ "EventName": "SIMD_ASSIST",
+ "SampleAfterValue": "100000"
+ },
+ {
+ "BriefDescription": "Retired computational Streaming SIMD Extensions (SSE) packed-single instructions.",
+ "EventCode": "0xCA",
+ "EventName": "SIMD_COMP_INST_RETIRED.PACKED_SINGLE",
"SampleAfterValue": "2000000",
- "BriefDescription": "FXCH uops executed."
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0x10",
- "Counter": "0,1",
- "UMask": "0x82",
- "EventName": "X87_COMP_OPS_EXE.FXCH.AR",
+ "BriefDescription": "Retired computational Streaming SIMD Extensions 2 (SSE2) scalar-double instructions.",
+ "EventCode": "0xCA",
+ "EventName": "SIMD_COMP_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000000",
- "BriefDescription": "FXCH uops retired."
+ "UMask": "0x8"
},
{
- "EventCode": "0x11",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "FP_ASSIST.S",
- "SampleAfterValue": "10000",
- "BriefDescription": "Floating point assists."
+ "BriefDescription": "Retired computational Streaming SIMD Extensions (SSE) scalar-single instructions.",
+ "EventCode": "0xCA",
+ "EventName": "SIMD_COMP_INST_RETIRED.SCALAR_SINGLE",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
},
{
- "EventCode": "0x11",
- "Counter": "0,1",
- "UMask": "0x81",
- "EventName": "FP_ASSIST.AR",
- "SampleAfterValue": "10000",
- "BriefDescription": "Floating point assists for retired operations."
+ "BriefDescription": "SIMD Instructions retired.",
+ "EventCode": "0xCE",
+ "EventName": "SIMD_INSTR_RETIRED",
+ "SampleAfterValue": "2000000"
},
{
- "EventCode": "0xB0",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "SIMD_UOPS_EXEC.S",
+ "BriefDescription": "Retired Streaming SIMD Extensions (SSE) packed-single instructions.",
+ "EventCode": "0xC7",
+ "EventName": "SIMD_INST_RETIRED.PACKED_SINGLE",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD micro-ops executed (excluding stores)."
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xB0",
- "Counter": "0,1",
- "UMask": "0x80",
- "EventName": "SIMD_UOPS_EXEC.AR",
+ "BriefDescription": "Retired Streaming SIMD Extensions 2 (SSE2) scalar-double instructions.",
+ "EventCode": "0xC7",
+ "EventName": "SIMD_INST_RETIRED.SCALAR_DOUBLE",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD micro-ops retired (excluding stores)."
+ "UMask": "0x8"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "SIMD_SAT_UOP_EXEC.S",
+ "BriefDescription": "Retired Streaming SIMD Extensions (SSE) scalar-single instructions.",
+ "EventCode": "0xC7",
+ "EventName": "SIMD_INST_RETIRED.SCALAR_SINGLE",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD saturated arithmetic micro-ops executed."
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1",
- "UMask": "0x80",
- "EventName": "SIMD_SAT_UOP_EXEC.AR",
+ "BriefDescription": "Retired Streaming SIMD Extensions 2 (SSE2) vector instructions.",
+ "EventCode": "0xC7",
+ "EventName": "SIMD_INST_RETIRED.VECTOR",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD saturated arithmetic micro-ops retired."
+ "UMask": "0x10"
},
{
- "EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "SIMD_UOP_TYPE_EXEC.MUL.S",
- "SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed multiply micro-ops executed"
+ "BriefDescription": "Saturated arithmetic instructions retired.",
+ "EventCode": "0xCF",
+ "EventName": "SIMD_SAT_INSTR_RETIRED",
+ "SampleAfterValue": "2000000"
},
{
- "EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x81",
- "EventName": "SIMD_UOP_TYPE_EXEC.MUL.AR",
+ "BriefDescription": "SIMD saturated arithmetic micro-ops retired.",
+ "EventCode": "0xB1",
+ "EventName": "SIMD_SAT_UOP_EXEC.AR",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed multiply micro-ops retired"
+ "UMask": "0x80"
},
{
- "EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "SIMD_UOP_TYPE_EXEC.SHIFT.S",
- "SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed shift micro-ops executed"
+ "BriefDescription": "SIMD saturated arithmetic micro-ops executed.",
+ "EventCode": "0xB1",
+ "EventName": "SIMD_SAT_UOP_EXEC.S",
+ "SampleAfterValue": "2000000"
},
{
- "EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x82",
- "EventName": "SIMD_UOP_TYPE_EXEC.SHIFT.AR",
+ "BriefDescription": "SIMD micro-ops retired (excluding stores).",
+ "EventCode": "0xB0",
+ "EventName": "SIMD_UOPS_EXEC.AR",
+ "PEBS": "2",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed shift micro-ops retired"
+ "UMask": "0x80"
},
{
- "EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "SIMD_UOP_TYPE_EXEC.PACK.S",
- "SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed micro-ops executed"
+ "BriefDescription": "SIMD micro-ops executed (excluding stores).",
+ "EventCode": "0xB0",
+ "EventName": "SIMD_UOPS_EXEC.S",
+ "SampleAfterValue": "2000000"
},
{
+ "BriefDescription": "SIMD packed arithmetic micro-ops retired",
"EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x84",
- "EventName": "SIMD_UOP_TYPE_EXEC.PACK.AR",
+ "EventName": "SIMD_UOP_TYPE_EXEC.ARITHMETIC.AR",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed micro-ops retired"
+ "UMask": "0xa0"
},
{
+ "BriefDescription": "SIMD packed arithmetic micro-ops executed",
"EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "SIMD_UOP_TYPE_EXEC.UNPACK.S",
+ "EventName": "SIMD_UOP_TYPE_EXEC.ARITHMETIC.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD unpacked micro-ops executed"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "SIMD packed logical micro-ops retired",
"EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x88",
- "EventName": "SIMD_UOP_TYPE_EXEC.UNPACK.AR",
+ "EventName": "SIMD_UOP_TYPE_EXEC.LOGICAL.AR",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD unpacked micro-ops retired"
+ "UMask": "0x90"
},
{
+ "BriefDescription": "SIMD packed logical micro-ops executed",
"EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x10",
"EventName": "SIMD_UOP_TYPE_EXEC.LOGICAL.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed logical micro-ops executed"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "SIMD packed multiply micro-ops retired",
"EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x90",
- "EventName": "SIMD_UOP_TYPE_EXEC.LOGICAL.AR",
+ "EventName": "SIMD_UOP_TYPE_EXEC.MUL.AR",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed logical micro-ops retired"
+ "UMask": "0x81"
},
{
+ "BriefDescription": "SIMD packed multiply micro-ops executed",
"EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "SIMD_UOP_TYPE_EXEC.ARITHMETIC.S",
+ "EventName": "SIMD_UOP_TYPE_EXEC.MUL.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed arithmetic micro-ops executed"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "SIMD packed micro-ops retired",
"EventCode": "0xB3",
- "Counter": "0,1",
- "UMask": "0xa0",
- "EventName": "SIMD_UOP_TYPE_EXEC.ARITHMETIC.AR",
+ "EventName": "SIMD_UOP_TYPE_EXEC.PACK.AR",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD packed arithmetic micro-ops retired"
+ "UMask": "0x84"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "SIMD_INST_RETIRED.PACKED_SINGLE",
+ "BriefDescription": "SIMD packed micro-ops executed",
+ "EventCode": "0xB3",
+ "EventName": "SIMD_UOP_TYPE_EXEC.PACK.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired Streaming SIMD Extensions (SSE) packed-single instructions."
+ "UMask": "0x4"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "SIMD_INST_RETIRED.SCALAR_SINGLE",
+ "BriefDescription": "SIMD packed shift micro-ops retired",
+ "EventCode": "0xB3",
+ "EventName": "SIMD_UOP_TYPE_EXEC.SHIFT.AR",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired Streaming SIMD Extensions (SSE) scalar-single instructions."
+ "UMask": "0x82"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "SIMD_INST_RETIRED.SCALAR_DOUBLE",
+ "BriefDescription": "SIMD packed shift micro-ops executed",
+ "EventCode": "0xB3",
+ "EventName": "SIMD_UOP_TYPE_EXEC.SHIFT.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired Streaming SIMD Extensions 2 (SSE2) scalar-double instructions."
+ "UMask": "0x2"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "SIMD_INST_RETIRED.VECTOR",
+ "BriefDescription": "SIMD unpacked micro-ops retired",
+ "EventCode": "0xB3",
+ "EventName": "SIMD_UOP_TYPE_EXEC.UNPACK.AR",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired Streaming SIMD Extensions 2 (SSE2) vector instructions."
+ "UMask": "0x88"
},
{
- "EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "SIMD_COMP_INST_RETIRED.PACKED_SINGLE",
+ "BriefDescription": "SIMD unpacked micro-ops executed",
+ "EventCode": "0xB3",
+ "EventName": "SIMD_UOP_TYPE_EXEC.UNPACK.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired computational Streaming SIMD Extensions (SSE) packed-single instructions."
+ "UMask": "0x8"
},
{
- "EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "SIMD_COMP_INST_RETIRED.SCALAR_SINGLE",
+ "BriefDescription": "Floating point computational micro-ops retired.",
+ "EventCode": "0x10",
+ "EventName": "X87_COMP_OPS_EXE.ANY.AR",
+ "PEBS": "2",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired computational Streaming SIMD Extensions (SSE) scalar-single instructions."
+ "UMask": "0x81"
},
{
- "EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "SIMD_COMP_INST_RETIRED.SCALAR_DOUBLE",
+ "BriefDescription": "Floating point computational micro-ops executed.",
+ "EventCode": "0x10",
+ "EventName": "X87_COMP_OPS_EXE.ANY.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired computational Streaming SIMD Extensions 2 (SSE2) scalar-double instructions."
- },
- {
- "EventCode": "0xCD",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "SIMD_ASSIST",
- "SampleAfterValue": "100000",
- "BriefDescription": "SIMD assists invoked."
+ "UMask": "0x1"
},
{
- "EventCode": "0xCE",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "SIMD_INSTR_RETIRED",
+ "BriefDescription": "FXCH uops retired.",
+ "EventCode": "0x10",
+ "EventName": "X87_COMP_OPS_EXE.FXCH.AR",
+ "PEBS": "2",
"SampleAfterValue": "2000000",
- "BriefDescription": "SIMD Instructions retired."
+ "UMask": "0x82"
},
{
- "EventCode": "0xCF",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "SIMD_SAT_INSTR_RETIRED",
+ "BriefDescription": "FXCH uops executed.",
+ "EventCode": "0x10",
+ "EventName": "X87_COMP_OPS_EXE.FXCH.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "Saturated arithmetic instructions retired."
+ "UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
index ef69540ab61d..8d2f4edfb597 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/frontend.json
@@ -1,83 +1,80 @@
[
{
- "EventCode": "0x80",
- "Counter": "0,1",
- "UMask": "0x3",
- "EventName": "ICACHE.ACCESSES",
- "SampleAfterValue": "200000",
- "BriefDescription": "Instruction fetches."
- },
- {
- "EventCode": "0x80",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
- "SampleAfterValue": "200000",
- "BriefDescription": "Icache hit"
- },
- {
- "EventCode": "0x80",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200000",
- "BriefDescription": "Icache miss"
+ "BriefDescription": "BACLEARS asserted.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles during which instruction fetches are stalled.",
"EventCode": "0x86",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "CYCLES_ICACHE_MEM_STALLED.ICACHE_MEM_STALLED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles during which instruction fetches are stalled."
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Decode stall due to IQ full",
"EventCode": "0x87",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "DECODE_STALL.PFB_EMPTY",
+ "EventName": "DECODE_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Decode stall due to PFB empty"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Decode stall due to PFB empty",
"EventCode": "0x87",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "DECODE_STALL.IQ_FULL",
+ "EventName": "DECODE_STALL.PFB_EMPTY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Decode stall due to IQ full"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Instruction fetches.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "200000",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Icache hit",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "SampleAfterValue": "200000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Icache miss",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "All Instructions decoded",
"EventCode": "0xAA",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "MACRO_INSTS.NON_CISC_DECODED",
+ "EventName": "MACRO_INSTS.ALL_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Non-CISC nacro instructions decoded"
+ "UMask": "0x3"
},
{
+ "BriefDescription": "CISC macro instructions decoded",
"EventCode": "0xAA",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "MACRO_INSTS.CISC_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "CISC macro instructions decoded"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Non-CISC nacro instructions decoded",
"EventCode": "0xAA",
- "Counter": "0,1",
- "UMask": "0x3",
- "EventName": "MACRO_INSTS.ALL_DECODED",
+ "EventName": "MACRO_INSTS.NON_CISC_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "All Instructions decoded"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ.",
+ "CounterMask": "1",
"EventCode": "0xA9",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "UOPS.MS_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "This event counts the cycles where 1 or more uops are issued by the micro-sequencer (MS), including microcode assists and inserted flows, and written to the IQ.",
- "CounterMask": "1"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/memory.json b/tools/perf/pmu-events/arch/x86/bonnell/memory.json
index 3ae843b20c8a..ac02dc2482c8 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/memory.json
@@ -1,154 +1,135 @@
[
{
+ "BriefDescription": "Nonzero segbase 1 bubble",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0xf",
- "EventName": "MISALIGN_MEM_REF.SPLIT",
- "SampleAfterValue": "200000",
- "BriefDescription": "Memory references that cross an 8-byte boundary."
- },
- {
- "EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0x9",
- "EventName": "MISALIGN_MEM_REF.LD_SPLIT",
+ "EventName": "MISALIGN_MEM_REF.BUBBLE",
"SampleAfterValue": "200000",
- "BriefDescription": "Load splits"
+ "UMask": "0x97"
},
{
+ "BriefDescription": "Nonzero segbase load 1 bubble",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0xa",
- "EventName": "MISALIGN_MEM_REF.ST_SPLIT",
+ "EventName": "MISALIGN_MEM_REF.LD_BUBBLE",
"SampleAfterValue": "200000",
- "BriefDescription": "Store splits"
+ "UMask": "0x91"
},
{
+ "BriefDescription": "Load splits",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0x8f",
- "EventName": "MISALIGN_MEM_REF.SPLIT.AR",
+ "EventName": "MISALIGN_MEM_REF.LD_SPLIT",
"SampleAfterValue": "200000",
- "BriefDescription": "Memory references that cross an 8-byte boundary (At Retirement)"
+ "UMask": "0x9"
},
{
+ "BriefDescription": "Load splits (At Retirement)",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0x89",
"EventName": "MISALIGN_MEM_REF.LD_SPLIT.AR",
"SampleAfterValue": "200000",
- "BriefDescription": "Load splits (At Retirement)"
+ "UMask": "0x89"
},
{
+ "BriefDescription": "Nonzero segbase ld-op-st 1 bubble",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0x8a",
- "EventName": "MISALIGN_MEM_REF.ST_SPLIT.AR",
+ "EventName": "MISALIGN_MEM_REF.RMW_BUBBLE",
"SampleAfterValue": "200000",
- "BriefDescription": "Store splits (Ar Retirement)"
+ "UMask": "0x94"
},
{
+ "BriefDescription": "ld-op-st splits",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0x8c",
"EventName": "MISALIGN_MEM_REF.RMW_SPLIT",
"SampleAfterValue": "200000",
- "BriefDescription": "ld-op-st splits"
+ "UMask": "0x8c"
},
{
+ "BriefDescription": "Memory references that cross an 8-byte boundary.",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0x97",
- "EventName": "MISALIGN_MEM_REF.BUBBLE",
+ "EventName": "MISALIGN_MEM_REF.SPLIT",
"SampleAfterValue": "200000",
- "BriefDescription": "Nonzero segbase 1 bubble"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "Memory references that cross an 8-byte boundary (At Retirement)",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0x91",
- "EventName": "MISALIGN_MEM_REF.LD_BUBBLE",
+ "EventName": "MISALIGN_MEM_REF.SPLIT.AR",
"SampleAfterValue": "200000",
- "BriefDescription": "Nonzero segbase load 1 bubble"
+ "UMask": "0x8f"
},
{
+ "BriefDescription": "Nonzero segbase store 1 bubble",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0x92",
"EventName": "MISALIGN_MEM_REF.ST_BUBBLE",
"SampleAfterValue": "200000",
- "BriefDescription": "Nonzero segbase store 1 bubble"
+ "UMask": "0x92"
},
{
+ "BriefDescription": "Store splits",
"EventCode": "0x5",
- "Counter": "0,1",
- "UMask": "0x94",
- "EventName": "MISALIGN_MEM_REF.RMW_BUBBLE",
+ "EventName": "MISALIGN_MEM_REF.ST_SPLIT",
"SampleAfterValue": "200000",
- "BriefDescription": "Nonzero segbase ld-op-st 1 bubble"
+ "UMask": "0xa"
},
{
- "EventCode": "0x7",
- "Counter": "0,1",
- "UMask": "0x81",
- "EventName": "PREFETCH.PREFETCHT0",
+ "BriefDescription": "Store splits (Ar Retirement)",
+ "EventCode": "0x5",
+ "EventName": "MISALIGN_MEM_REF.ST_SPLIT.AR",
"SampleAfterValue": "200000",
- "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT0 instructions executed."
+ "UMask": "0x8a"
},
{
+ "BriefDescription": "L1 hardware prefetch request",
"EventCode": "0x7",
- "Counter": "0,1",
- "UMask": "0x82",
- "EventName": "PREFETCH.PREFETCHT1",
- "SampleAfterValue": "200000",
- "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT1 instructions executed."
+ "EventName": "PREFETCH.HW_PREFETCH",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Streaming SIMD Extensions (SSE) Prefetch NTA instructions executed",
"EventCode": "0x7",
- "Counter": "0,1",
- "UMask": "0x84",
- "EventName": "PREFETCH.PREFETCHT2",
+ "EventName": "PREFETCH.PREFETCHNTA",
"SampleAfterValue": "200000",
- "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT2 instructions executed."
+ "UMask": "0x88"
},
{
+ "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT0 instructions executed.",
"EventCode": "0x7",
- "Counter": "0,1",
- "UMask": "0x86",
- "EventName": "PREFETCH.SW_L2",
+ "EventName": "PREFETCH.PREFETCHT0",
"SampleAfterValue": "200000",
- "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT1 and PrefetchT2 instructions executed"
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT1 instructions executed.",
"EventCode": "0x7",
- "Counter": "0,1",
- "UMask": "0x88",
- "EventName": "PREFETCH.PREFETCHNTA",
+ "EventName": "PREFETCH.PREFETCHT1",
"SampleAfterValue": "200000",
- "BriefDescription": "Streaming SIMD Extensions (SSE) Prefetch NTA instructions executed"
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT2 instructions executed.",
"EventCode": "0x7",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "PREFETCH.HW_PREFETCH",
- "SampleAfterValue": "2000000",
- "BriefDescription": "L1 hardware prefetch request"
+ "EventName": "PREFETCH.PREFETCHT2",
+ "SampleAfterValue": "200000",
+ "UMask": "0x84"
},
{
+ "BriefDescription": "Any Software prefetch",
"EventCode": "0x7",
- "Counter": "0,1",
- "UMask": "0xf",
"EventName": "PREFETCH.SOFTWARE_PREFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "Any Software prefetch"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "Any Software prefetch",
"EventCode": "0x7",
- "Counter": "0,1",
- "UMask": "0x8f",
"EventName": "PREFETCH.SOFTWARE_PREFETCH.AR",
"SampleAfterValue": "200000",
- "BriefDescription": "Any Software prefetch"
+ "UMask": "0x8f"
+ },
+ {
+ "BriefDescription": "Streaming SIMD Extensions (SSE) PrefetchT1 and PrefetchT2 instructions executed",
+ "EventCode": "0x7",
+ "EventName": "PREFETCH.SW_L2",
+ "SampleAfterValue": "200000",
+ "UMask": "0x86"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/other.json b/tools/perf/pmu-events/arch/x86/bonnell/other.json
index 4bc1c582d1cd..782594c8bda5 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/other.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/other.json
@@ -1,450 +1,388 @@
[
{
- "EventCode": "0x6",
- "Counter": "0,1",
- "UMask": "0x80",
- "EventName": "SEGMENT_REG_LOADS.ANY",
+ "BriefDescription": "Bus queue is empty.",
+ "EventCode": "0x7D",
+ "EventName": "BUSQ_EMPTY.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Number of segment register loads."
+ "UMask": "0x40"
},
{
- "EventCode": "0x9",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "DISPATCH_BLOCKED.ANY",
+ "BriefDescription": "Number of Bus Not Ready signals asserted.",
+ "EventCode": "0x61",
+ "EventName": "BUS_BNR_DRV.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Memory cluster signals to block micro-op dispatch for any reason"
+ "UMask": "0x20"
},
{
- "EventCode": "0x3A",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "EIST_TRANS",
- "SampleAfterValue": "200000",
- "BriefDescription": "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions"
+ "BriefDescription": "Number of Bus Not Ready signals asserted.",
+ "EventCode": "0x61",
+ "EventName": "BUS_BNR_DRV.THIS_AGENT",
+ "SampleAfterValue": "200000"
},
{
- "EventCode": "0x3B",
- "Counter": "0,1",
- "UMask": "0xc0",
- "EventName": "THERMAL_TRIP",
+ "BriefDescription": "Bus cycles while processor receives data.",
+ "EventCode": "0x64",
+ "EventName": "BUS_DATA_RCV.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Number of thermal trips"
+ "UMask": "0x40"
},
{
- "EventCode": "0x60",
- "Counter": "0,1",
- "UMask": "0xe0",
- "EventName": "BUS_REQUEST_OUTSTANDING.ALL_AGENTS",
+ "BriefDescription": "Bus cycles when data is sent on the bus.",
+ "EventCode": "0x62",
+ "EventName": "BUS_DRDY_CLOCKS.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Outstanding cacheable data read bus requests duration."
+ "UMask": "0x20"
},
{
- "EventCode": "0x60",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_REQUEST_OUTSTANDING.SELF",
- "SampleAfterValue": "200000",
- "BriefDescription": "Outstanding cacheable data read bus requests duration."
+ "BriefDescription": "Bus cycles when data is sent on the bus.",
+ "EventCode": "0x62",
+ "EventName": "BUS_DRDY_CLOCKS.THIS_AGENT",
+ "SampleAfterValue": "200000"
},
{
- "EventCode": "0x61",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "BUS_BNR_DRV.ALL_AGENTS",
+ "BriefDescription": "HITM signal asserted.",
+ "EventCode": "0x7B",
+ "EventName": "BUS_HITM_DRV.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Number of Bus Not Ready signals asserted."
+ "UMask": "0x20"
},
{
- "EventCode": "0x61",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "BUS_BNR_DRV.THIS_AGENT",
- "SampleAfterValue": "200000",
- "BriefDescription": "Number of Bus Not Ready signals asserted."
+ "BriefDescription": "HITM signal asserted.",
+ "EventCode": "0x7B",
+ "EventName": "BUS_HITM_DRV.THIS_AGENT",
+ "SampleAfterValue": "200000"
},
{
- "EventCode": "0x62",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "BUS_DRDY_CLOCKS.ALL_AGENTS",
+ "BriefDescription": "HIT signal asserted.",
+ "EventCode": "0x7A",
+ "EventName": "BUS_HIT_DRV.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Bus cycles when data is sent on the bus."
+ "UMask": "0x20"
},
{
- "EventCode": "0x62",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "BUS_DRDY_CLOCKS.THIS_AGENT",
+ "BriefDescription": "HIT signal asserted.",
+ "EventCode": "0x7A",
+ "EventName": "BUS_HIT_DRV.THIS_AGENT",
+ "SampleAfterValue": "200000"
+ },
+ {
+ "BriefDescription": "IO requests waiting in the bus queue.",
+ "EventCode": "0x7F",
+ "EventName": "BUS_IO_WAIT.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Bus cycles when data is sent on the bus."
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Bus cycles when a LOCK signal is asserted.",
"EventCode": "0x63",
- "Counter": "0,1",
- "UMask": "0xe0",
"EventName": "BUS_LOCK_CLOCKS.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Bus cycles when a LOCK signal is asserted."
+ "UMask": "0xe0"
},
{
+ "BriefDescription": "Bus cycles when a LOCK signal is asserted.",
"EventCode": "0x63",
- "Counter": "0,1",
- "UMask": "0x40",
"EventName": "BUS_LOCK_CLOCKS.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Bus cycles when a LOCK signal is asserted."
+ "UMask": "0x40"
},
{
- "EventCode": "0x64",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_DATA_RCV.SELF",
+ "BriefDescription": "Outstanding cacheable data read bus requests duration.",
+ "EventCode": "0x60",
+ "EventName": "BUS_REQUEST_OUTSTANDING.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Bus cycles while processor receives data."
+ "UMask": "0xe0"
},
{
+ "BriefDescription": "Outstanding cacheable data read bus requests duration.",
+ "EventCode": "0x60",
+ "EventName": "BUS_REQUEST_OUTSTANDING.SELF",
+ "SampleAfterValue": "200000",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "All bus transactions.",
+ "EventCode": "0x70",
+ "EventName": "BUS_TRANS_ANY.ALL_AGENTS",
+ "SampleAfterValue": "200000",
+ "UMask": "0xe0"
+ },
+ {
+ "BriefDescription": "All bus transactions.",
+ "EventCode": "0x70",
+ "EventName": "BUS_TRANS_ANY.SELF",
+ "SampleAfterValue": "200000",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Burst read bus transactions.",
"EventCode": "0x65",
- "Counter": "0,1",
- "UMask": "0xe0",
"EventName": "BUS_TRANS_BRD.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Burst read bus transactions."
+ "UMask": "0xe0"
},
{
+ "BriefDescription": "Burst read bus transactions.",
"EventCode": "0x65",
- "Counter": "0,1",
- "UMask": "0x40",
"EventName": "BUS_TRANS_BRD.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Burst read bus transactions."
+ "UMask": "0x40"
},
{
- "EventCode": "0x66",
- "Counter": "0,1",
- "UMask": "0xe0",
- "EventName": "BUS_TRANS_RFO.ALL_AGENTS",
+ "BriefDescription": "Burst (full cache-line) bus transactions.",
+ "EventCode": "0x6E",
+ "EventName": "BUS_TRANS_BURST.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "RFO bus transactions."
+ "UMask": "0xe0"
},
{
- "EventCode": "0x66",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_TRANS_RFO.SELF",
+ "BriefDescription": "Burst (full cache-line) bus transactions.",
+ "EventCode": "0x6E",
+ "EventName": "BUS_TRANS_BURST.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "RFO bus transactions."
+ "UMask": "0x40"
},
{
- "EventCode": "0x67",
- "Counter": "0,1",
- "UMask": "0xe0",
- "EventName": "BUS_TRANS_WB.ALL_AGENTS",
+ "BriefDescription": "Deferred bus transactions.",
+ "EventCode": "0x6D",
+ "EventName": "BUS_TRANS_DEF.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Explicit writeback bus transactions."
+ "UMask": "0xe0"
},
{
- "EventCode": "0x67",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_TRANS_WB.SELF",
+ "BriefDescription": "Deferred bus transactions.",
+ "EventCode": "0x6D",
+ "EventName": "BUS_TRANS_DEF.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Explicit writeback bus transactions."
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Instruction-fetch bus transactions.",
"EventCode": "0x68",
- "Counter": "0,1",
- "UMask": "0xe0",
"EventName": "BUS_TRANS_IFETCH.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Instruction-fetch bus transactions."
+ "UMask": "0xe0"
},
{
+ "BriefDescription": "Instruction-fetch bus transactions.",
"EventCode": "0x68",
- "Counter": "0,1",
- "UMask": "0x40",
"EventName": "BUS_TRANS_IFETCH.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Instruction-fetch bus transactions."
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Invalidate bus transactions.",
"EventCode": "0x69",
- "Counter": "0,1",
- "UMask": "0xe0",
"EventName": "BUS_TRANS_INVAL.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Invalidate bus transactions."
+ "UMask": "0xe0"
},
{
+ "BriefDescription": "Invalidate bus transactions.",
"EventCode": "0x69",
- "Counter": "0,1",
- "UMask": "0x40",
"EventName": "BUS_TRANS_INVAL.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Invalidate bus transactions."
- },
- {
- "EventCode": "0x6A",
- "Counter": "0,1",
- "UMask": "0xe0",
- "EventName": "BUS_TRANS_PWR.ALL_AGENTS",
- "SampleAfterValue": "200000",
- "BriefDescription": "Partial write bus transaction."
- },
- {
- "EventCode": "0x6A",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_TRANS_PWR.SELF",
- "SampleAfterValue": "200000",
- "BriefDescription": "Partial write bus transaction."
- },
- {
- "EventCode": "0x6B",
- "Counter": "0,1",
- "UMask": "0xe0",
- "EventName": "BUS_TRANS_P.ALL_AGENTS",
- "SampleAfterValue": "200000",
- "BriefDescription": "Partial bus transactions."
- },
- {
- "EventCode": "0x6B",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_TRANS_P.SELF",
- "SampleAfterValue": "200000",
- "BriefDescription": "Partial bus transactions."
+ "UMask": "0x40"
},
{
+ "BriefDescription": "IO bus transactions.",
"EventCode": "0x6C",
- "Counter": "0,1",
- "UMask": "0xe0",
"EventName": "BUS_TRANS_IO.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "IO bus transactions."
+ "UMask": "0xe0"
},
{
+ "BriefDescription": "IO bus transactions.",
"EventCode": "0x6C",
- "Counter": "0,1",
- "UMask": "0x40",
"EventName": "BUS_TRANS_IO.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "IO bus transactions."
+ "UMask": "0x40"
},
{
- "EventCode": "0x6D",
- "Counter": "0,1",
- "UMask": "0xe0",
- "EventName": "BUS_TRANS_DEF.ALL_AGENTS",
+ "BriefDescription": "Memory bus transactions.",
+ "EventCode": "0x6F",
+ "EventName": "BUS_TRANS_MEM.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Deferred bus transactions."
+ "UMask": "0xe0"
},
{
- "EventCode": "0x6D",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_TRANS_DEF.SELF",
+ "BriefDescription": "Memory bus transactions.",
+ "EventCode": "0x6F",
+ "EventName": "BUS_TRANS_MEM.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Deferred bus transactions."
+ "UMask": "0x40"
},
{
- "EventCode": "0x6E",
- "Counter": "0,1",
- "UMask": "0xe0",
- "EventName": "BUS_TRANS_BURST.ALL_AGENTS",
+ "BriefDescription": "Partial bus transactions.",
+ "EventCode": "0x6B",
+ "EventName": "BUS_TRANS_P.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Burst (full cache-line) bus transactions."
+ "UMask": "0xe0"
},
{
- "EventCode": "0x6E",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_TRANS_BURST.SELF",
+ "BriefDescription": "Partial bus transactions.",
+ "EventCode": "0x6B",
+ "EventName": "BUS_TRANS_P.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Burst (full cache-line) bus transactions."
+ "UMask": "0x40"
},
{
- "EventCode": "0x6F",
- "Counter": "0,1",
- "UMask": "0xe0",
- "EventName": "BUS_TRANS_MEM.ALL_AGENTS",
+ "BriefDescription": "Partial write bus transaction.",
+ "EventCode": "0x6A",
+ "EventName": "BUS_TRANS_PWR.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Memory bus transactions."
+ "UMask": "0xe0"
},
{
- "EventCode": "0x6F",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_TRANS_MEM.SELF",
+ "BriefDescription": "Partial write bus transaction.",
+ "EventCode": "0x6A",
+ "EventName": "BUS_TRANS_PWR.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Memory bus transactions."
+ "UMask": "0x40"
},
{
- "EventCode": "0x70",
- "Counter": "0,1",
- "UMask": "0xe0",
- "EventName": "BUS_TRANS_ANY.ALL_AGENTS",
+ "BriefDescription": "RFO bus transactions.",
+ "EventCode": "0x66",
+ "EventName": "BUS_TRANS_RFO.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "All bus transactions."
+ "UMask": "0xe0"
},
{
- "EventCode": "0x70",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_TRANS_ANY.SELF",
+ "BriefDescription": "RFO bus transactions.",
+ "EventCode": "0x66",
+ "EventName": "BUS_TRANS_RFO.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "All bus transactions."
+ "UMask": "0x40"
},
{
- "EventCode": "0x77",
- "Counter": "0,1",
- "UMask": "0xb",
- "EventName": "EXT_SNOOP.THIS_AGENT.ANY",
+ "BriefDescription": "Explicit writeback bus transactions.",
+ "EventCode": "0x67",
+ "EventName": "BUS_TRANS_WB.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "External snoops."
+ "UMask": "0xe0"
},
{
- "EventCode": "0x77",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "EXT_SNOOP.THIS_AGENT.CLEAN",
+ "BriefDescription": "Explicit writeback bus transactions.",
+ "EventCode": "0x67",
+ "EventName": "BUS_TRANS_WB.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "External snoops."
+ "UMask": "0x40"
},
{
- "EventCode": "0x77",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "EXT_SNOOP.THIS_AGENT.HIT",
- "SampleAfterValue": "200000",
- "BriefDescription": "External snoops."
+ "BriefDescription": "Cycles during which interrupts are disabled.",
+ "EventCode": "0xC6",
+ "EventName": "CYCLES_INT_MASKED.CYCLES_INT_MASKED",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
},
{
- "EventCode": "0x77",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "EXT_SNOOP.THIS_AGENT.HITM",
+ "BriefDescription": "Cycles during which interrupts are pending and disabled.",
+ "EventCode": "0xC6",
+ "EventName": "CYCLES_INT_MASKED.CYCLES_INT_PENDING_AND_MASKED",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Memory cluster signals to block micro-op dispatch for any reason",
+ "EventCode": "0x9",
+ "EventName": "DISPATCH_BLOCKED.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "External snoops."
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of Enhanced Intel SpeedStep(R) Technology (EIST) transitions",
+ "EventCode": "0x3A",
+ "EventName": "EIST_TRANS",
+ "SampleAfterValue": "200000"
+ },
+ {
+ "BriefDescription": "External snoops.",
"EventCode": "0x77",
- "Counter": "0,1",
- "UMask": "0x2b",
"EventName": "EXT_SNOOP.ALL_AGENTS.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "External snoops."
+ "UMask": "0x2b"
},
{
+ "BriefDescription": "External snoops.",
"EventCode": "0x77",
- "Counter": "0,1",
- "UMask": "0x21",
"EventName": "EXT_SNOOP.ALL_AGENTS.CLEAN",
"SampleAfterValue": "200000",
- "BriefDescription": "External snoops."
+ "UMask": "0x21"
},
{
+ "BriefDescription": "External snoops.",
"EventCode": "0x77",
- "Counter": "0,1",
- "UMask": "0x22",
"EventName": "EXT_SNOOP.ALL_AGENTS.HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "External snoops."
+ "UMask": "0x22"
},
{
+ "BriefDescription": "External snoops.",
"EventCode": "0x77",
- "Counter": "0,1",
- "UMask": "0x28",
"EventName": "EXT_SNOOP.ALL_AGENTS.HITM",
"SampleAfterValue": "200000",
- "BriefDescription": "External snoops."
+ "UMask": "0x28"
},
{
- "EventCode": "0x7A",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "BUS_HIT_DRV.ALL_AGENTS",
+ "BriefDescription": "External snoops.",
+ "EventCode": "0x77",
+ "EventName": "EXT_SNOOP.THIS_AGENT.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "HIT signal asserted."
+ "UMask": "0xb"
},
{
- "EventCode": "0x7A",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "BUS_HIT_DRV.THIS_AGENT",
+ "BriefDescription": "External snoops.",
+ "EventCode": "0x77",
+ "EventName": "EXT_SNOOP.THIS_AGENT.CLEAN",
"SampleAfterValue": "200000",
- "BriefDescription": "HIT signal asserted."
+ "UMask": "0x1"
},
{
- "EventCode": "0x7B",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "BUS_HITM_DRV.ALL_AGENTS",
+ "BriefDescription": "External snoops.",
+ "EventCode": "0x77",
+ "EventName": "EXT_SNOOP.THIS_AGENT.HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "HITM signal asserted."
+ "UMask": "0x2"
},
{
- "EventCode": "0x7B",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "BUS_HITM_DRV.THIS_AGENT",
+ "BriefDescription": "External snoops.",
+ "EventCode": "0x77",
+ "EventName": "EXT_SNOOP.THIS_AGENT.HITM",
"SampleAfterValue": "200000",
- "BriefDescription": "HITM signal asserted."
+ "UMask": "0x8"
},
{
- "EventCode": "0x7D",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUSQ_EMPTY.SELF",
+ "BriefDescription": "Hardware interrupts received.",
+ "EventCode": "0xC8",
+ "EventName": "HW_INT_RCV",
+ "SampleAfterValue": "200000"
+ },
+ {
+ "BriefDescription": "Number of segment register loads.",
+ "EventCode": "0x6",
+ "EventName": "SEGMENT_REG_LOADS.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "Bus queue is empty."
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Bus stalled for snoops.",
"EventCode": "0x7E",
- "Counter": "0,1",
- "UMask": "0xe0",
"EventName": "SNOOP_STALL_DRV.ALL_AGENTS",
"SampleAfterValue": "200000",
- "BriefDescription": "Bus stalled for snoops."
+ "UMask": "0xe0"
},
{
+ "BriefDescription": "Bus stalled for snoops.",
"EventCode": "0x7E",
- "Counter": "0,1",
- "UMask": "0x40",
"EventName": "SNOOP_STALL_DRV.SELF",
"SampleAfterValue": "200000",
- "BriefDescription": "Bus stalled for snoops."
+ "UMask": "0x40"
},
{
- "EventCode": "0x7F",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "BUS_IO_WAIT.SELF",
- "SampleAfterValue": "200000",
- "BriefDescription": "IO requests waiting in the bus queue."
- },
- {
- "EventCode": "0xC6",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "CYCLES_INT_MASKED.CYCLES_INT_MASKED",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Cycles during which interrupts are disabled."
- },
- {
- "EventCode": "0xC6",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "CYCLES_INT_MASKED.CYCLES_INT_PENDING_AND_MASKED",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Cycles during which interrupts are pending and disabled."
- },
- {
- "EventCode": "0xC8",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "HW_INT_RCV",
+ "BriefDescription": "Number of thermal trips",
+ "EventCode": "0x3B",
+ "EventName": "THERMAL_TRIP",
"SampleAfterValue": "200000",
- "BriefDescription": "Hardware interrupts received."
+ "UMask": "0xc0"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
index 09c6de13de20..91b98ee8ba9a 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/pipeline.json
@@ -1,364 +1,305 @@
[
{
- "EventCode": "0x2",
- "Counter": "0,1",
- "UMask": "0x83",
- "EventName": "STORE_FORWARDS.ANY",
- "SampleAfterValue": "200000",
- "BriefDescription": "All store forwards"
- },
- {
- "EventCode": "0x2",
- "Counter": "0,1",
- "UMask": "0x81",
- "EventName": "STORE_FORWARDS.GOOD",
- "SampleAfterValue": "200000",
- "BriefDescription": "Good store forwards"
- },
- {
- "EventCode": "0x3",
- "Counter": "0,1",
- "UMask": "0x7f",
- "EventName": "REISSUE.ANY",
- "SampleAfterValue": "200000",
- "BriefDescription": "Micro-op reissues for any cause"
- },
- {
- "EventCode": "0x3",
- "Counter": "0,1",
- "UMask": "0xff",
- "EventName": "REISSUE.ANY.AR",
- "SampleAfterValue": "200000",
- "BriefDescription": "Micro-op reissues for any cause (At Retirement)"
- },
- {
- "EventCode": "0x12",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "MUL.S",
+ "BriefDescription": "Bogus branches",
+ "EventCode": "0xE4",
+ "EventName": "BOGUS_BR",
"SampleAfterValue": "2000000",
- "BriefDescription": "Multiply operations executed."
+ "UMask": "0x1"
},
{
- "EventCode": "0x12",
- "Counter": "0,1",
- "UMask": "0x81",
- "EventName": "MUL.AR",
+ "BriefDescription": "Branch instructions decoded",
+ "EventCode": "0xE0",
+ "EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Multiply operations retired"
+ "UMask": "0x1"
},
{
- "EventCode": "0x13",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "DIV.S",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Divide operations executed."
+ "BriefDescription": "Retired branch instructions.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ANY",
+ "SampleAfterValue": "2000000"
},
{
- "EventCode": "0x13",
- "Counter": "0,1",
- "UMask": "0x81",
- "EventName": "DIV.AR",
+ "BriefDescription": "Retired branch instructions.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ANY1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Divide operations retired"
+ "UMask": "0xf"
},
{
- "EventCode": "0x14",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "CYCLES_DIV_BUSY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Cycles the divider is busy."
+ "BriefDescription": "Retired mispredicted branch instructions (precise event).",
+ "EventCode": "0xC5",
+ "EventName": "BR_INST_RETIRED.MISPRED",
+ "PEBS": "1",
+ "SampleAfterValue": "200000"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.CORE_P",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Core cycles when core is not halted"
+ "BriefDescription": "Retired branch instructions that were mispredicted not-taken.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.MISPRED_NOT_TAKEN",
+ "SampleAfterValue": "200000",
+ "UMask": "0x2"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.BUS",
+ "BriefDescription": "Retired branch instructions that were mispredicted taken.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.MISPRED_TAKEN",
"SampleAfterValue": "200000",
- "BriefDescription": "Bus cycles when core is not halted"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA",
- "Counter": "Fixed counter 2",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.CORE",
+ "BriefDescription": "Retired branch instructions that were predicted not-taken.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.PRED_NOT_TAKEN",
"SampleAfterValue": "2000000",
- "BriefDescription": "Core cycles when core is not halted"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA",
- "Counter": "Fixed counter 3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.REF",
+ "BriefDescription": "Retired branch instructions that were predicted taken.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.PRED_TAKEN",
"SampleAfterValue": "2000000",
- "BriefDescription": "Reference cycles when core is not halted."
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired taken branch instructions.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.TAKEN",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "All macro conditional branch instructions.",
"EventCode": "0x88",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "BR_INST_TYPE_RETIRED.COND",
"SampleAfterValue": "2000000",
- "BriefDescription": "All macro conditional branch instructions."
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Only taken macro conditional branch instructions",
"EventCode": "0x88",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "BR_INST_TYPE_RETIRED.UNCOND",
+ "EventName": "BR_INST_TYPE_RETIRED.COND_TAKEN",
"SampleAfterValue": "2000000",
- "BriefDescription": "All macro unconditional branch instructions, excluding calls and indirects"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "All non-indirect calls",
"EventCode": "0x88",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "BR_INST_TYPE_RETIRED.IND",
+ "EventName": "BR_INST_TYPE_RETIRED.DIR_CALL",
"SampleAfterValue": "2000000",
- "BriefDescription": "All indirect branches that are not calls."
+ "UMask": "0x10"
},
{
+ "BriefDescription": "All indirect branches that are not calls.",
"EventCode": "0x88",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "BR_INST_TYPE_RETIRED.RET",
+ "EventName": "BR_INST_TYPE_RETIRED.IND",
"SampleAfterValue": "2000000",
- "BriefDescription": "All indirect branches that have a return mnemonic"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "All indirect calls, including both register and memory indirect.",
"EventCode": "0x88",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "BR_INST_TYPE_RETIRED.DIR_CALL",
+ "EventName": "BR_INST_TYPE_RETIRED.IND_CALL",
"SampleAfterValue": "2000000",
- "BriefDescription": "All non-indirect calls"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "All indirect branches that have a return mnemonic",
"EventCode": "0x88",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "BR_INST_TYPE_RETIRED.IND_CALL",
+ "EventName": "BR_INST_TYPE_RETIRED.RET",
"SampleAfterValue": "2000000",
- "BriefDescription": "All indirect calls, including both register and memory indirect."
+ "UMask": "0x8"
},
{
+ "BriefDescription": "All macro unconditional branch instructions, excluding calls and indirects",
"EventCode": "0x88",
- "Counter": "0,1",
- "UMask": "0x41",
- "EventName": "BR_INST_TYPE_RETIRED.COND_TAKEN",
+ "EventName": "BR_INST_TYPE_RETIRED.UNCOND",
"SampleAfterValue": "2000000",
- "BriefDescription": "Only taken macro conditional branch instructions"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Mispredicted cond branch instructions retired",
"EventCode": "0x89",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "BR_MISSP_TYPE_RETIRED.COND",
"SampleAfterValue": "200000",
- "BriefDescription": "Mispredicted cond branch instructions retired"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Mispredicted and taken cond branch instructions retired",
"EventCode": "0x89",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "BR_MISSP_TYPE_RETIRED.IND",
+ "EventName": "BR_MISSP_TYPE_RETIRED.COND_TAKEN",
"SampleAfterValue": "200000",
- "BriefDescription": "Mispredicted ind branches that are not calls"
+ "UMask": "0x11"
},
{
+ "BriefDescription": "Mispredicted ind branches that are not calls",
"EventCode": "0x89",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "BR_MISSP_TYPE_RETIRED.RETURN",
+ "EventName": "BR_MISSP_TYPE_RETIRED.IND",
"SampleAfterValue": "200000",
- "BriefDescription": "Mispredicted return branches"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect.",
"EventCode": "0x89",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "BR_MISSP_TYPE_RETIRED.IND_CALL",
"SampleAfterValue": "200000",
- "BriefDescription": "Mispredicted indirect calls, including both register and memory indirect."
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Mispredicted return branches",
"EventCode": "0x89",
- "Counter": "0,1",
- "UMask": "0x11",
- "EventName": "BR_MISSP_TYPE_RETIRED.COND_TAKEN",
+ "EventName": "BR_MISSP_TYPE_RETIRED.RETURN",
"SampleAfterValue": "200000",
- "BriefDescription": "Mispredicted and taken cond branch instructions retired"
+ "UMask": "0x4"
},
{
- "PEBS": "2",
- "EventCode": "0xC0",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired (precise event)."
+ "BriefDescription": "Bus cycles when core is not halted",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.BUS",
+ "SampleAfterValue": "200000",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Core cycles when core is not halted",
"EventCode": "0xA",
- "Counter": "Fixed counter 1",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired."
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000000"
},
{
- "EventCode": "0xC2",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "UOPS_RETIRED.ANY",
+ "BriefDescription": "Core cycles when core is not halted",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000000"
+ },
+ {
+ "BriefDescription": "Reference cycles when core is not halted.",
+ "EventCode": "0xA",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "SampleAfterValue": "2000000"
+ },
+ {
+ "BriefDescription": "Cycles the divider is busy.",
+ "EventCode": "0x14",
+ "EventName": "CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Micro-ops retired."
+ "UMask": "0x1"
},
{
- "EventCode": "0xC2",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "UOPS_RETIRED.STALLED_CYCLES",
+ "BriefDescription": "Divide operations retired",
+ "EventCode": "0x13",
+ "EventName": "DIV.AR",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no micro-ops retired."
+ "UMask": "0x81"
},
{
- "EventCode": "0xC2",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "UOPS_RETIRED.STALLS",
+ "BriefDescription": "Divide operations executed.",
+ "EventCode": "0x13",
+ "EventName": "DIV.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "Periods no micro-ops retired."
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Instructions retired.",
+ "EventCode": "0xA",
+ "EventName": "INST_RETIRED.ANY",
+ "SampleAfterValue": "2000000"
+ },
+ {
+ "BriefDescription": "Instructions retired (precise event).",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "2",
+ "SampleAfterValue": "2000000"
+ },
+ {
+ "BriefDescription": "Self-Modifying Code detected.",
"EventCode": "0xC3",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "200000",
- "BriefDescription": "Self-Modifying Code detected."
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ANY",
+ "BriefDescription": "Multiply operations retired",
+ "EventCode": "0x12",
+ "EventName": "MUL.AR",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired branch instructions."
+ "UMask": "0x81"
},
{
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.PRED_NOT_TAKEN",
+ "BriefDescription": "Multiply operations executed.",
+ "EventCode": "0x12",
+ "EventName": "MUL.S",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired branch instructions that were predicted not-taken."
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.MISPRED_NOT_TAKEN",
+ "BriefDescription": "Micro-op reissues for any cause",
+ "EventCode": "0x3",
+ "EventName": "REISSUE.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired branch instructions that were mispredicted not-taken."
- },
- {
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.PRED_TAKEN",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Retired branch instructions that were predicted taken."
+ "UMask": "0x7f"
},
{
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.MISPRED_TAKEN",
+ "BriefDescription": "Micro-op reissues for any cause (At Retirement)",
+ "EventCode": "0x3",
+ "EventName": "REISSUE.ANY.AR",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired branch instructions that were mispredicted taken."
- },
- {
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xc",
- "EventName": "BR_INST_RETIRED.TAKEN",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Retired taken branch instructions."
+ "UMask": "0xff"
},
{
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xf",
- "EventName": "BR_INST_RETIRED.ANY1",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Retired branch instructions."
+ "BriefDescription": "Micro-op reissues on a store-load collision",
+ "EventCode": "0x3",
+ "EventName": "REISSUE.OVERLAP_STORE",
+ "SampleAfterValue": "200000",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.MISPRED",
+ "BriefDescription": "Micro-op reissues on a store-load collision (At Retirement)",
+ "EventCode": "0x3",
+ "EventName": "REISSUE.OVERLAP_STORE.AR",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired mispredicted branch instructions (precise event)."
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Cycles issue is stalled due to div busy.",
"EventCode": "0xDC",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "RESOURCE_STALLS.DIV_BUSY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles issue is stalled due to div busy."
+ "UMask": "0x2"
},
{
- "EventCode": "0xE0",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "BR_INST_DECODED",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Branch instructions decoded"
+ "BriefDescription": "All store forwards",
+ "EventCode": "0x2",
+ "EventName": "STORE_FORWARDS.ANY",
+ "SampleAfterValue": "200000",
+ "UMask": "0x83"
},
{
- "EventCode": "0xE4",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "BOGUS_BR",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Bogus branches"
+ "BriefDescription": "Good store forwards",
+ "EventCode": "0x2",
+ "EventName": "STORE_FORWARDS.GOOD",
+ "SampleAfterValue": "200000",
+ "UMask": "0x81"
},
{
- "EventCode": "0xE6",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "BACLEARS.ANY",
+ "BriefDescription": "Micro-ops retired.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "BACLEARS asserted."
+ "UMask": "0x10"
},
{
- "EventCode": "0x3",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "REISSUE.OVERLAP_STORE",
- "SampleAfterValue": "200000",
- "BriefDescription": "Micro-op reissues on a store-load collision"
+ "BriefDescription": "Cycles no micro-ops retired.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALLED_CYCLES",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x10"
},
{
- "EventCode": "0x3",
- "Counter": "0,1",
- "UMask": "0x81",
- "EventName": "REISSUE.OVERLAP_STORE.AR",
- "SampleAfterValue": "200000",
- "BriefDescription": "Micro-op reissues on a store-load collision (At Retirement)"
+ "BriefDescription": "Periods no micro-ops retired.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALLS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
index 7bb817588721..82e07c73cff0 100644
--- a/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/bonnell/virtual-memory.json
@@ -1,124 +1,109 @@
[
{
+ "BriefDescription": "Memory accesses that missed the DTLB.",
"EventCode": "0x8",
- "Counter": "0,1",
- "UMask": "0x7",
"EventName": "DATA_TLB_MISSES.DTLB_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "Memory accesses that missed the DTLB."
+ "UMask": "0x7"
},
{
+ "BriefDescription": "DTLB misses due to load operations.",
"EventCode": "0x8",
- "Counter": "0,1",
- "UMask": "0x5",
"EventName": "DATA_TLB_MISSES.DTLB_MISS_LD",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB misses due to load operations."
+ "UMask": "0x5"
},
{
+ "BriefDescription": "DTLB misses due to store operations.",
"EventCode": "0x8",
- "Counter": "0,1",
- "UMask": "0x9",
- "EventName": "DATA_TLB_MISSES.L0_DTLB_MISS_LD",
+ "EventName": "DATA_TLB_MISSES.DTLB_MISS_ST",
"SampleAfterValue": "200000",
- "BriefDescription": "L0 DTLB misses due to load operations."
+ "UMask": "0x6"
},
{
+ "BriefDescription": "L0 DTLB misses due to load operations.",
"EventCode": "0x8",
- "Counter": "0,1",
- "UMask": "0x6",
- "EventName": "DATA_TLB_MISSES.DTLB_MISS_ST",
+ "EventName": "DATA_TLB_MISSES.L0_DTLB_MISS_LD",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB misses due to store operations."
+ "UMask": "0x9"
},
{
+ "BriefDescription": "L0 DTLB misses due to store operations",
"EventCode": "0x8",
- "Counter": "0,1",
- "UMask": "0xa",
"EventName": "DATA_TLB_MISSES.L0_DTLB_MISS_ST",
"SampleAfterValue": "200000",
- "BriefDescription": "L0 DTLB misses due to store operations"
+ "UMask": "0xa"
},
{
- "EventCode": "0xC",
- "Counter": "0,1",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.WALKS",
+ "BriefDescription": "ITLB flushes.",
+ "EventCode": "0x82",
+ "EventName": "ITLB.FLUSH",
"SampleAfterValue": "200000",
- "BriefDescription": "Number of page-walks executed."
+ "UMask": "0x4"
},
{
- "EventCode": "0xC",
- "Counter": "0,1",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.CYCLES",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Duration of page-walks in core cycles"
+ "BriefDescription": "ITLB hits.",
+ "EventCode": "0x82",
+ "EventName": "ITLB.HIT",
+ "SampleAfterValue": "200000",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.D_SIDE_WALKS",
+ "BriefDescription": "ITLB misses.",
+ "EventCode": "0x82",
+ "EventName": "ITLB.MISSES",
+ "PEBS": "2",
"SampleAfterValue": "200000",
- "BriefDescription": "Number of D-side only page walks"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Duration of D-side only page walks"
+ "BriefDescription": "Retired loads that miss the DTLB (precise event).",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "200000",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Duration of page-walks in core cycles",
"EventCode": "0xC",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "PAGE_WALKS.I_SIDE_WALKS",
- "SampleAfterValue": "200000",
- "BriefDescription": "Number of I-Side page walks"
+ "EventName": "PAGE_WALKS.CYCLES",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Duration of D-side only page walks",
"EventCode": "0xC",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Duration of I-Side page walks"
+ "UMask": "0x1"
},
{
- "EventCode": "0x82",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "ITLB.HIT",
+ "BriefDescription": "Number of D-side only page walks",
+ "EventCode": "0xC",
+ "EventName": "PAGE_WALKS.D_SIDE_WALKS",
"SampleAfterValue": "200000",
- "BriefDescription": "ITLB hits."
+ "UMask": "0x1"
},
{
- "EventCode": "0x82",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "ITLB.FLUSH",
- "SampleAfterValue": "200000",
- "BriefDescription": "ITLB flushes."
+ "BriefDescription": "Duration of I-Side page walks",
+ "EventCode": "0xC",
+ "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "EventCode": "0x82",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "ITLB.MISSES",
+ "BriefDescription": "Number of I-Side page walks",
+ "EventCode": "0xC",
+ "EventName": "PAGE_WALKS.I_SIDE_WALKS",
"SampleAfterValue": "200000",
- "BriefDescription": "ITLB misses."
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "BriefDescription": "Number of page-walks executed.",
+ "EventCode": "0xC",
+ "EventName": "PAGE_WALKS.WALKS",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that miss the DTLB (precise event)."
+ "UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
index 8cdc7c13dc2a..51cf8560a8d3 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
@@ -1,352 +1,1136 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "MACHINE_CLEARS.COUNT * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fetch_BW;PGO",
- "MetricName": "IpTB"
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS)))) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Branch instructions per taken branch. ",
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
- "MetricName": "BpTB"
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "60 * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
},
{
- "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpL"
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
},
{
- "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpS"
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;Instruction_Type",
- "MetricName": "IpB"
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
- "MetricName": "IpCall"
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
- "MetricName": "FLOPc"
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / cycles",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization"
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TLB_SMT",
- "MetricName": "Page_Walks_Utilization_SMT"
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_lcp"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L1MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
},
{
- "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI"
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI_All"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
},
{
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2HPKI_All"
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L3MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
+ },
+ {
+ "BriefDescription": "Average number of parallel requests to external memory",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_parallel_requests",
+ "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests"
+ },
+ {
+ "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_request_latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * (DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED)) / tma_info_core_clks",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "MetricName": "tma_info_turbo_utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + RESOURCE_STALLS.SB) / (CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Sample with: UOPS_DISPATCHED_PORT.PORT_4. Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_7 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_store_op_utilization_group",
+ "MetricName": "tma_port_7",
+ "MetricThreshold": "tma_port_7 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address). Sample with: UOPS_DISPATCHED_PORT.PORT_7",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (CYCLE_ACTIVITY.STALLS_TOTAL - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "INST_RETIRED.X87 * tma_info_uoppi / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/cache.json b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
index 7938bf5689ab..f8ee5aefccea 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/cache.json
@@ -1,3399 +1,2453 @@
[
{
- "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read miss L2, no rejects",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1D miss outstandings duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "EventCode": "0x27",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x50"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
},
{
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "This event counts the total number of L2 code requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
+ "BriefDescription": "Demand Data Read requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe1"
},
{
- "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
+ "BriefDescription": "Demand requests that miss L2 cache.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.L2_PF_MISS",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 prefetch requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x27"
},
{
+ "BriefDescription": "Demand requests to L2 cache.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "L2_RQSTS.MISS",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe7"
},
{
- "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf8"
},
{
+ "BriefDescription": "RFO requests to L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe2"
},
{
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
- "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
+ "BriefDescription": "L2 cache misses when fetching instructions.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "L2_RQSTS.L2_PF_HIT",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 prefetch requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe1",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
- "PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe2",
- "EventName": "L2_RQSTS.ALL_RFO",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x21"
},
{
- "PublicDescription": "This event counts the total number of L2 code requests.",
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe4",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "BriefDescription": "All requests that miss L2 cache.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xf8",
- "EventName": "L2_RQSTS.ALL_PF",
+ "EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from L2 hardware prefetchers",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3f"
},
{
+ "BriefDescription": "All L2 requests.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200003",
- "BriefDescription": "All L2 requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x50",
- "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc2"
},
{
- "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss oustandings duration in cycles",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "This event counts L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "This event counts L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "BDM76",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
+ "BriefDescription": "Cycles when L1D is locked",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1D is locked",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
- "EventCode": "0xb2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "PublicDescription": "This event counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Data_LA": "1",
+ "Errata": "BDE70, BDM100",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "Errata": "BDM35",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This event counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Data_LA": "1",
"Errata": "BDM35",
+ "EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Data_LA": "1",
"Errata": "BDM100",
+ "EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
- "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDE70",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Retired load uops.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "PublicDescription": "Counts all retired load uops. This event accounts for SW prefetch uops of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Retired store uops.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "Errata": "BDM100, BDE70",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "PublicDescription": "Counts all retired store uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Retired load uops with locked access.",
+ "Data_LA": "1",
+ "Errata": "BDM35",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts load uops with locked access retired to the architected path.",
"SampleAfterValue": "100007",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x21"
},
{
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "BDM100",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
},
{
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "BDM100",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "PublicDescription": "This event counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
},
{
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDM100",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "PublicDescription": "This event counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "BDM100",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "BDE70, BDM100",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
- "PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cacheable and non-cacheable code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "This event counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_TRANS.RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_TRANS.CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache accesses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_TRANS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "EventCode": "0xb2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts L1D writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_TRANS.L1D_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L1D writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts L2 fill requests that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_TRANS.L2_FILL",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 fill requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_TRANS.ALL_REQUESTS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Transactions accessing L2 pipe",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_IN.I",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in I state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_IN.S",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in S state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_IN.E",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in E state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "L2_LINES_IN.ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by demand.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of split locks in the super queue.",
- "EventCode": "0xf4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
+ "BriefDescription": "Counts all demand & prefetch data reads have any response type.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10091",
"SampleAfterValue": "100003",
- "BriefDescription": "Split locks in SQ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads have any response type.",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all prefetch code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads have any response type.",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive) have any response type.",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive) have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive) have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts any other requests have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000018000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads have any response type.",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads have any response type.",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs have any response type.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F80020122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00803C0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01003C0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02003C0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "BriefDescription": "Split locks in SQ",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "This event counts the number of split locks in the super queue.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
index 15291239c128..e4826dc7f797 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/floating-point.json
@@ -1,172 +1,160 @@
[
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "BDM30",
- "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable (Precise Event)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "Errata": "BDM30",
- "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable (Precise Event)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "UMask": "0x15"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* packed double and single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "UMask": "0x3c"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation operation. Applies to SSE* and AVX* scalar double and single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x3"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x15",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"EventCode": "0xc7",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x2a",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2a"
},
{
- "EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ASSIST.X87_OUTPUT",
+ "EventName": "FP_ASSIST.ANY",
+ "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
- "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result (Precise Event)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1e"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
+ "BriefDescription": "Number of SIMD FP assists due to input values",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "FP_ASSIST.X87_INPUT",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"SampleAfterValue": "100003",
- "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand (Precise Event)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
"SampleAfterValue": "100003",
- "BriefDescription": "SSE* FP micro-code assist when output value is invalid. (Precise Event)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts any input SSE* floating-point (FP) assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "BriefDescription": "Number of X87 assists due to input value.",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ASSIST.SIMD_INPUT",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"SampleAfterValue": "100003",
- "BriefDescription": "Any input SSE* FP Assist - (Precise Event)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1. Uses PEBS.",
+ "BriefDescription": "Number of X87 assists due to output value.",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x1e",
- "EventName": "FP_ASSIST.ANY",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any FP_ASSIST umask was incrementing (Precise Event)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Errata": "BDM30",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Errata": "BDM30",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "EventCode": "0xA0",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
index aa4a5d762f21..bd5da39564e1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/frontend.json
@@ -1,286 +1,239 @@
[
{
- "PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "IDQ.EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "EventName": "IDQ.DSB_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
+ "EventName": "IDQ.EMPTY",
+ "PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3c"
},
{
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "EventName": "IDQ.MITE_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
"EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "IDQ.MITE_ALL_UOPS",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE.IFDATA_STALL",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "4",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
"SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
"CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "SampleAfterValue": "2000003",
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/memory.json b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
index b6b5247d3d5a..ac7cdb831960 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/memory.json
@@ -1,3045 +1,2163 @@
[
{
- "PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a TSX line had a cache conflict",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times we could not allocate Lock Buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TX_EXEC.MISC1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "TX_EXEC.MISC2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "TX_EXEC.MISC3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "RTM region detected inside HLE.",
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "TX_EXEC.MISC4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "TX_EXEC.MISC5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
- "EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "HLE_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times HLE commit succeeded.",
+ "BriefDescription": "Number of times HLE abort was triggered",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "HLE_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE commit succeeded",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
- "EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "HLE_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times HLE abort was triggered.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "HLE_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
+ "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "HLE_RETIRED.ABORTED_MISC2",
+ "PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "HLE_RETIRED.ABORTED_MISC3",
+ "PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Number of times HLE caused a fault.",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "HLE_RETIRED.ABORTED_MISC4",
+ "PublicDescription": "Number of times HLE caused a fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
+ "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "HLE_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RTM_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Number of times RTM commit succeeded.",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RTM_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times RTM commit succeeded",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RTM_RETIRED.ABORTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RTM_RETIRED.ABORTED_MISC1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RTM_RETIRED.ABORTED_MISC2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "RTM_RETIRED.ABORTED_MISC3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Number of times a RTM caused a fault.",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "BriefDescription": "Number of times HLE commit succeeded",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads with latency value being above four.",
- "EventCode": "0xCD",
- "MSRValue": "0x4",
- "Counter": "3",
- "UMask": "0x1",
- "Errata": "BDM100, BDM35",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
- "MSRIndex": "0x3F6",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
"SampleAfterValue": "100003",
- "BriefDescription": "Randomly selected loads with latency value being above 4",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
- "EventCode": "0xCD",
- "MSRValue": "0x8",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 128",
+ "Data_LA": "1",
"Errata": "BDM100, BDM35",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "50021",
- "BriefDescription": "Randomly selected loads with latency value being above 8",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
- "EventCode": "0xCD",
- "MSRValue": "0x10",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 16",
+ "Data_LA": "1",
"Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
"SampleAfterValue": "20011",
- "BriefDescription": "Randomly selected loads with latency value being above 16",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Randomly selected loads with latency value being above 256",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
"PEBS": "2",
- "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
- "EventCode": "0xCD",
- "MSRValue": "0x20",
- "Counter": "3",
- "UMask": "0x1",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Randomly selected loads with latency value being above 32",
+ "Data_LA": "1",
"Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
"SampleAfterValue": "100007",
- "BriefDescription": "Randomly selected loads with latency value being above 32",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads with latency value being above 64.",
- "EventCode": "0xCD",
- "MSRValue": "0x40",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 4",
+ "Data_LA": "1",
"Errata": "BDM100, BDM35",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Randomly selected loads with latency value being above 64",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above four.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
- "EventCode": "0xCD",
- "MSRValue": "0x80",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 512",
+ "Data_LA": "1",
"Errata": "BDM100, BDM35",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Randomly selected loads with latency value being above 128",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
- "EventCode": "0xCD",
- "MSRValue": "0x100",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 64",
+ "Data_LA": "1",
"Errata": "BDM100, BDM35",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Randomly selected loads with latency value being above 256",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 64.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
- "EventCode": "0xCD",
- "MSRValue": "0x200",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 8",
+ "Data_LA": "1",
"Errata": "BDM100, BDM35",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Randomly selected loads with latency value being above 512",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts all prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts writebacks (modified to exclusive)",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts writebacks (modified to exclusive)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts all demand & prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts writebacks (modified to exclusive)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch data reads",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch code reads",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20003C0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20003C0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F84000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C000122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times RTM abort was triggered",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times RTM abort was triggered .",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "PublicDescription": "Number of times a RTM caused a fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times RTM commit succeeded",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a TSX line had a cache conflict",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times we could not allocate Lock Buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/other.json b/tools/perf/pmu-events/arch/x86/broadwell/other.json
index 4f829c5febbe..1c2a5b001949 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/other.json
@@ -1,44 +1,36 @@
[
{
- "PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPL_CYCLES.RING0",
+ "PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "CounterMask": "1",
"EdgeDetect": "1",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0_TRANS",
+ "PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
"SampleAfterValue": "100007",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
index bb25574b8d21..9a902d2160e6 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/pipeline.json
@@ -1,1429 +1,1116 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "Counter": "Fixed counter 0",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 0"
- },
- {
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "Counter": "Fixed counter 2",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "BriefDescription": "Cycles when divider is busy executing divide operations",
+ "EventCode": "0x14",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_ISSUED.FLAGS_MERGE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired direct near calls",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd0"
},
{
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_ISSUED.SINGLE_MUL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
},
{
- "PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
- "EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.FPU_DIV_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divider is busy executing divide operations",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken macro-conditional branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts not taken macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
},
{
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired macro-conditional branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
},
{
- "PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired direct near calls",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x90"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x84"
},
{
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect calls",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x88"
},
{
- "EventCode": "0x3c",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "Errata": "BDW98",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
- "EventCode": "0x4c",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOAD_HIT_PRE.HW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Far branch instructions retired.",
+ "Errata": "BDW98",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PublicDescription": "This event counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
- "EventCode": "0x5E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts not taken macro-conditional branch instructions.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not taken macro-conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
- "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
- "PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "BriefDescription": "Speculative mispredicted indirect branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
- "PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "PublicDescription": "This event counts taken speculative and retired direct near calls.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired direct near calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa0"
},
{
- "PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc8",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired direct near calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "EventCode": "0xA0",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Errata": "BDM61",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Errata": "BDM11, BDM55",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts resource-related stall cycles.",
- "EventCode": "0xa2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RESOURCE_STALLS.RS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "False dependencies in MOB due to partial compare",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RESOURCE_STALLS.ROB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "BriefDescription": "Resource-related stall cycles",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "This event counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.RS",
+ "PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "BriefDescription": "Count cases of saving new LBR",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of Uops delivered by the LSD.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.THREAD",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Number of uops executed from any thread.",
+ "BriefDescription": "Number of uops executed on the core.",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Number of uops executed from any thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
"EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
"EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
"EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"EventCode": "0xb1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "BDM61",
- "EventName": "INST_RETIRED.ANY_P",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "Errata": "BDM11, BDM55",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "CounterHTOff": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "INST_RETIRED.X87",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.ALL",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts cycles without actually retired uops.",
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles no executable uops retired (Precise Event)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to PEBS uops retired event.",
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles using always true condition applied to PEBS uops retired event.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MACHINE_CLEARS.CYCLES",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDW98",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Counts all not taken macro branch instructions retired. (Precise Event)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "Errata": "BDW98",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of far branch instructions retired.(Precise Event)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Actually retired uops.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_MISP_RETIRED.RET",
- "SampleAfterValue": "100007",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "This event counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xe6",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "16",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json
new file mode 100644
index 000000000000..c5cc43825cb9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/uncore-cache.json
@@ -0,0 +1,133 @@
+[
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "UMask": "0x86",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "UMask": "0x8f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "UMask": "0x16",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "UMask": "0x18",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in M-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in M-state.",
+ "UMask": "0x11",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "UMask": "0x1f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "UMask": "0x26",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "UMask": "0x2f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore-interconnect.json
new file mode 100644
index 000000000000..64af685274a2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/uncore-interconnect.json
@@ -0,0 +1,61 @@
+[
+ {
+ "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.;",
+ "CounterMask": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries that are in DirectData mode. Such entry is defined as valid when it is allocated till data sent to Core (first chunk, IDI0). Applicable for IA Cores' requests in normal case.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.DRD_DIRECT",
+ "PerPkg": "1",
+ "PublicDescription": "Each cycle count number of valid coherent Data Read entries that are in DirectData mode. Such entry is defined as valid when it is allocated till data sent to Core (first chunk, IDI0). Applicable for IA Cores' requests in normal case.",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Core coherent Data Read entries allocated in DirectData mode",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Core coherent Data Read entries allocated in DirectData mode.",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json
new file mode 100644
index 000000000000..58be90d7cc93
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwell/uncore-other.json
@@ -0,0 +1,10 @@
+[
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/uncore.json b/tools/perf/pmu-events/arch/x86/broadwell/uncore.json
deleted file mode 100644
index 28e1e159a3cb..000000000000
--- a/tools/perf/pmu-events/arch/x86/broadwell/uncore.json
+++ /dev/null
@@ -1,278 +0,0 @@
-[
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x41",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x81",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
- "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x44",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x48",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x11",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
- "BriefDescription": "L3 Lookup read request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x21",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
- "BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x81",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
- "BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x18",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
- "BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x88",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
- "BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x1f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
- "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x2f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
- "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x8f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
- "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x86",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
- "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x16",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
- "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x26",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
- "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
- "BriefDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0,",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x02",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.DRD_DIRECT",
- "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries that are in DirectData mode. Such entry is defined as valid when it is allocated till data sent to Core (first chunk, IDI0). Applicable for IA Cores' requests in normal case.",
- "PublicDescription": "Each cycle count number of 'valid' coherent Data Read entries that are in DirectData mode. Such entry is defined as valid when it is allocated till data sent to Core (first chunk, IDI0). Applicable for IA Cores' requests in normal case.",
- "Counter": "0,",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
- "BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x02",
- "EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
- "BriefDescription": "Number of Core coherent Data Read entries allocated in DirectData mode",
- "PublicDescription": "Number of Core coherent Data Read entries allocated in DirectData mode.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x20",
- "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
- "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "PublicDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x84",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
- "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "PublicDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
- "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.;",
- "PublicDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "Counter": "0,",
- "CounterMask": "1",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "NCU",
- "EventCode": "0x0",
- "UMask": "0x01",
- "EventName": "UNC_CLOCK.SOCKET",
- "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
- "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Counter": "FIXED",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
index 2a015e4c7e21..93621e004d88 100644
--- a/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwell/virtual-memory.json
@@ -1,388 +1,312 @@
[
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
"Errata": "BDM69",
+ "EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses in all DTLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "BDM69",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDM69",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "BDM69",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
"Errata": "BDM69",
+ "EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
"Errata": "BDM69",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Errata": "BDM69",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Errata": "BDM69",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Errata": "BDM69",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
"Errata": "BDM69",
+ "EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "BDM69",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDM69",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "BDM69",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
"Errata": "BDM69",
+ "EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
"Errata": "BDM69",
- "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Errata": "BDM69",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Errata": "BDM69",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Errata": "BDM69",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
+ "BriefDescription": "Cycle count for an Extended Page table walk.",
"EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "EPT.WALK_CYCLES",
+ "PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycle count for an Extended Page table walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
"Errata": "BDM69",
+ "EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "BDM69",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "BDM69",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "BDM69",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
"Errata": "BDM69",
+ "EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
"Errata": "BDM69",
- "EventName": "ITLB_MISSES.WALK_DURATION",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Errata": "BDM69",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Errata": "BDM69",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Errata": "BDM69",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
+ "BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x11"
},
{
- "EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
+ "BriefDescription": "Number of DTLB page walker hits in the L2.",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L2.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x12"
},
{
- "EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x14"
},
{
- "EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory.",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in Memory.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x18"
},
{
- "EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x21"
},
{
- "EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
+ "BriefDescription": "Number of ITLB page walker hits in the L2.",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L2.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x22"
},
{
- "EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x24"
},
{
- "PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
+ "BriefDescription": "STLB flush attempts",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
"SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
index 16fd8a7490fc..fb57c7382408 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
@@ -1,164 +1,1102 @@
[
{
- "BriefDescription": "Instructions Per Cycle (per logical thread)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline",
- "MetricName": "UPI"
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "MACHINE_CLEARS.COUNT * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS)))) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely consumed by program instructions",
- "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "Frontend",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded Icache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / ( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS )",
- "MetricGroup": "DSB; Frontend_Bandwidth",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Cycles Per Instruction (threaded)",
- "MetricExpr": "1 / INST_RETIRED.ANY / cycles",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Per-thread actual clocks when the logical processor is active. This is called 'Clockticks' in VTune.",
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
},
{
- "BriefDescription": "Total issue-pipeline slots",
- "MetricExpr": "4*( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / ( cpu@uops_executed.core\\,cmask\\=1@ / 2) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "MetricGroup": "Pipeline;Ports_Utilization",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Average Branch Address Clear Cost (fraction of cycles)",
- "MetricExpr": "2* ( RS_EVENTS.EMPTY_CYCLES - ICACHE.IFDATA_STALL - ( 14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7* ITLB_MISSES.WALK_COMPLETED ) ) / RS_EVENTS.EMPTY_END",
- "MetricGroup": "Unknown_Branches",
- "MetricName": "BAClear_Cost"
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Core actual clocks when any thread is active on the physical core",
- "MetricExpr": "( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least 1 such miss)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( cpu@l1d_pend_miss.pending_cycles\\,any\\=1@ / 2) if #SMT_on else L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
+ },
+ {
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
"BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7*(DTLB_STORE_MISSES.WALK_COMPLETED+DTLB_LOAD_MISSES.WALK_COMPLETED+ITLB_MISSES.WALK_COMPLETED)) / ( CPU_CLK_UNHALTED.THREAD_ANY / 2 ) if #SMT_on else cycles",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization"
+ "MetricExpr": "(cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * (DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED)) / tma_info_core_clks",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( 1*( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2* FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4*( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8* FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE ) / 1000000000 / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "MetricName": "tma_info_turbo_utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware threads were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3_10",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + RESOURCE_STALLS.SB) / (CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_7 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_store_op_utilization_group",
+ "MetricName": "tma_port_7",
+ "MetricThreshold": "tma_port_7 > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (CYCLE_ACTIVITY.STALLS_TOTAL - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "INST_RETIRED.X87 * tma_info_uoppi / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
index bf243fe2a0ec..6784331ac1cb 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/cache.json
@@ -1,809 +1,665 @@
[
{
- "EventCode": "0x24",
- "UMask": "0x21",
- "BriefDescription": "Demand Data Read miss L2, no rejects",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
- "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D miss outstandings duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x30",
- "BriefDescription": "L2 prefetch requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.L2_PF_MISS",
- "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "EventCode": "0x27",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x50"
},
{
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "UMask": "0x41",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "This event counts the number of demand Data Read requests that hit L2 cache. Only not rejected loads are counted.",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "This event counts the total number of L2 code requests.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
+ "BriefDescription": "Demand Data Read requests",
"EventCode": "0x24",
- "UMask": "0x42",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe1"
},
{
+ "BriefDescription": "Demand requests that miss L2 cache.",
"EventCode": "0x24",
- "UMask": "0x44",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x27"
},
{
+ "BriefDescription": "Demand requests to L2 cache.",
"EventCode": "0x24",
- "UMask": "0x50",
- "BriefDescription": "L2 prefetch requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.L2_PF_HIT",
- "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe7"
},
{
+ "BriefDescription": "Requests from L2 hardware prefetchers",
"EventCode": "0x24",
- "UMask": "0xe1",
- "BriefDescription": "Demand Data Read requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf8"
},
{
- "EventCode": "0x24",
- "UMask": "0xe2",
"BriefDescription": "RFO requests to L2 cache",
- "Counter": "0,1,2,3",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe2"
},
{
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
- "UMask": "0xe4",
- "BriefDescription": "L2 code requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "PublicDescription": "This event counts the total number of L2 code requests.",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
+ "BriefDescription": "L2 cache misses when fetching instructions.",
"EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"EventCode": "0x24",
- "UMask": "0xf8",
- "BriefDescription": "Requests from L2 hardware prefetchers",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_PF",
- "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
"EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x21"
},
{
- "EventCode": "0x27",
- "UMask": "0x50",
- "BriefDescription": "Not rejected writebacks that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_DEMAND_RQSTS.WB_HIT",
- "PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
- "EventCode": "0x2E",
- "UMask": "0x41",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x2E",
- "UMask": "0x4f",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "L1D miss oustandings duration in cycles",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING",
- "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30"
},
{
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "BriefDescription": "All requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
},
{
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "BriefDescription": "All L2 requests.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "EventCode": "0x51",
- "UMask": "0x1",
- "BriefDescription": "L1D data line replacements",
- "Counter": "0,1,2,3",
- "EventName": "L1D.REPLACEMENT",
- "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "BDM76",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x60",
- "UMask": "0x2",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "This event counts L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "This event counts L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x63",
- "UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
- "Counter": "0,1,2,3",
+ "EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB0",
- "UMask": "0x1",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB0",
- "UMask": "0x2",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB0",
- "UMask": "0x4",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0xB0",
- "UMask": "0x8",
- "BriefDescription": "Demand and prefetch data reads",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "EventCode": "0xb2",
- "UMask": "0x1",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD0",
- "UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
"Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
"Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xD0",
- "UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS)",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
+ "Errata": "BDE70, BDM100",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "Errata": "BDM35",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.(Precise Event - PEBS)",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
+ "Errata": "BDE70",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD0",
- "UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
"Data_LA": "1",
+ "Errata": "BDE70",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "SampleAfterValue": "100003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
},
{
- "EventCode": "0xD0",
- "UMask": "0x81",
- "BriefDescription": "All retired load uops. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
"Data_LA": "1",
+ "Errata": "BDE70",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
},
{
- "EventCode": "0xD0",
- "UMask": "0x82",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event ?ounts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
- "SampleAfterValue": "2000003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xD1",
- "UMask": "0x1",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
+ "EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data source were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD1",
- "UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "Errata": "BDM35",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This event counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xD1",
- "UMask": "0x4",
- "BriefDescription": "Hit in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"Data_LA": "1",
+ "Errata": "BDM35",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
- "Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
- "SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xD1",
- "UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources. Uses PEBS.",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
},
{
- "EventCode": "0xD1",
- "UMask": "0x10",
- "BriefDescription": "Retired load uops with L2 cache misses as data sources. Uses PEBS.",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
"Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This event counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "EventCode": "0xD1",
- "UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source. (Precise Event - PEBS).",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"Errata": "BDM100, BDE70",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xD1",
- "UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Counts all retired load uops. This event accounts for SW prefetch uops of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
- "EventCode": "0xD2",
- "UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS)",
+ "BriefDescription": "Retired store uops.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
- "Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Counts all retired store uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
},
{
- "EventCode": "0xD2",
- "UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops with locked access.",
"Data_LA": "1",
+ "Errata": "BDM35",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
- "Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts load uops with locked access retired to the architected path.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
},
{
- "EventCode": "0xD2",
- "UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
- "Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
- "EventCode": "0xD2",
- "UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required. (Precise Event - PEBS)",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
- "Errata": "BDM100",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x42"
},
{
- "EventCode": "0xD3",
- "UMask": "0x1",
+ "BriefDescription": "Retired load uops that miss the STLB.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
- "Errata": "BDE70, BDM100",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
},
{
- "EventCode": "0xD3",
- "UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI) (Precise Event)",
+ "BriefDescription": "Retired store uops that miss the STLB.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
- "Errata": "BDE70",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
},
{
- "EventCode": "0xD3",
- "UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM (Precise Event)",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
- "Errata": "BDE70",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xD3",
- "UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache (Precise Event)",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
- "Errata": "BDE70",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xF0",
- "UMask": "0x1",
- "BriefDescription": "Demand Data Read requests that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
- "PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cacheable and non-cacheable code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "This event counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF0",
- "UMask": "0x2",
- "BriefDescription": "RFO requests that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.RFO",
- "PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF0",
- "UMask": "0x4",
- "BriefDescription": "L2 cache accesses when fetching instructions",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.CODE_RD",
- "PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF0",
- "UMask": "0x8",
- "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.ALL_PF",
- "PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "EventCode": "0xb2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF0",
- "UMask": "0x10",
- "BriefDescription": "L1D writebacks that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L1D_WB",
- "PublicDescription": "This event counts L1D writebacks that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xF0",
- "UMask": "0x20",
- "BriefDescription": "L2 fill requests that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L2_FILL",
- "PublicDescription": "This event counts L2 fill requests that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xF0",
- "UMask": "0x40",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L2_WB",
- "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF0",
- "UMask": "0x80",
- "BriefDescription": "Transactions accessing L2 pipe",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.ALL_REQUESTS",
- "PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF1",
- "UMask": "0x1",
- "BriefDescription": "L2 cache lines in I state filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.I",
- "PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF1",
- "UMask": "0x2",
- "BriefDescription": "L2 cache lines in S state filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.S",
- "PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF1",
- "UMask": "0x4",
- "BriefDescription": "L2 cache lines in E state filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.E",
- "PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF1",
- "UMask": "0x7",
- "BriefDescription": "L2 cache lines filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.ALL",
- "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF2",
- "UMask": "0x5",
- "BriefDescription": "Clean L2 cache lines evicted by demand.",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xf4",
- "UMask": "0x10",
"BriefDescription": "Split locks in SQ",
- "Counter": "0,1,2,3",
+ "EventCode": "0xf4",
"EventName": "SQ_MISC.SPLIT_LOCK",
"PublicDescription": "This event counts the number of split locks in the super queue.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
index d7b9d9c9c518..e4826dc7f797 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/floating-point.json
@@ -1,165 +1,160 @@
[
{
- "EventCode": "0xC1",
- "UMask": "0x8",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
- "Errata": "BDM30",
- "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC1",
- "UMask": "0x10",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
- "Errata": "BDM30",
- "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC7",
- "UMask": "0x1",
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xC7",
- "UMask": "0x2",
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0xC7",
- "UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xC7",
- "UMask": "0x4",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "UMask": "0x15"
},
{
- "EventCode": "0xC7",
- "UMask": "0x8",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* packed double and single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "UMask": "0x3c"
},
{
- "EventCode": "0xC7",
- "UMask": "0x10",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation operation. Applies to SSE* and AVX* scalar double and single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x3"
},
{
- "EventCode": "0xC7",
- "UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"EventCode": "0xc7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC7",
- "UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. ?.",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2a"
},
{
- "EventCode": "0xC7",
- "UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.ANY",
+ "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1e"
},
{
+ "BriefDescription": "Number of SIMD FP assists due to input values",
"EventCode": "0xCA",
- "UMask": "0x2",
- "BriefDescription": "Number of X87 assists due to output value.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.X87_OUTPUT",
- "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
"EventCode": "0xCA",
- "UMask": "0x4",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Number of X87 assists due to input value.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of X87 assists due to output value.",
"EventCode": "0xCA",
- "UMask": "0x8",
- "BriefDescription": "Number of SIMD FP assists due to Output values",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.SIMD_OUTPUT",
- "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCA",
- "UMask": "0x10",
- "BriefDescription": "Number of SIMD FP assists due to input values",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Errata": "BDM30",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xCA",
- "UMask": "0x1e",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.ANY",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Errata": "BDM30",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "EventCode": "0xA0",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
index 72781e1e3362..bd5da39564e1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/frontend.json
@@ -1,286 +1,239 @@
[
{
- "EventCode": "0x79",
- "UMask": "0x2",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.EMPTY",
- "PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
},
{
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
"EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.DSB_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
"EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.EMPTY",
+ "PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3c"
},
{
- "EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
"EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"EventCode": "0x79",
- "UMask": "0x3c",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_ALL_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x80",
- "UMask": "0x1",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.HIT",
- "PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x80",
- "UMask": "0x2",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.MISSES",
- "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "EventCode": "0x80",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.IFDATA_STALL",
- "PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"CounterMask": "4",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
"EventCode": "0x9C",
- "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"CounterMask": "3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"CounterMask": "2",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Invert": "1",
"EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xAB",
- "UMask": "0x2",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "Counter": "0,1,2,3",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
index e44f73c24ac8..041b6ff4062e 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/memory.json
@@ -1,432 +1,354 @@
[
{
- "EventCode": "0x05",
- "UMask": "0x1",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
- "Counter": "0,1,2,3",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
+ "BriefDescription": "Number of times HLE abort was triggered",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times HLE abort was triggered.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x05",
- "UMask": "0x2",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
- "Counter": "0,1,2,3",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x54",
- "UMask": "0x1",
- "BriefDescription": "Number of times a TSX line had a cache conflict",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MISC2",
+ "PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x54",
- "UMask": "0x2",
- "BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
- "PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MISC3",
+ "PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x54",
- "UMask": "0x4",
- "BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MISC4",
+ "PublicDescription": "Number of times HLE caused a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0x54",
- "UMask": "0x8",
- "BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0x54",
- "UMask": "0x10",
- "BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "BriefDescription": "Number of times HLE commit succeeded",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x54",
- "UMask": "0x20",
- "BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x54",
- "UMask": "0x40",
- "BriefDescription": "Number of times we could not allocate Lock Buffer",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x5d",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Randomly selected loads with latency value being above 128",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5d",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC2",
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Randomly selected loads with latency value being above 16",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5d",
- "UMask": "0x4",
- "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC3",
- "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Randomly selected loads with latency value being above 256",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5d",
- "UMask": "0x8",
- "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC4",
- "PublicDescription": "RTM region detected inside HLE.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Randomly selected loads with latency value being above 32",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "EventCode": "0x5d",
- "UMask": "0x10",
- "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Randomly selected loads with latency value being above 4",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above four.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC3",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Randomly selected loads with latency value being above 512",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "EventCode": "0xc8",
- "UMask": "0x1",
- "BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.START",
- "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Randomly selected loads with latency value being above 64",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 64.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xc8",
- "UMask": "0x2",
- "BriefDescription": "Number of times HLE commit succeeded",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.COMMIT",
- "PublicDescription": "Number of times HLE commit succeeded.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Randomly selected loads with latency value being above 8",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
},
{
- "EventCode": "0xc8",
- "UMask": "0x4",
- "BriefDescription": "Number of times HLE abort was triggered (PEBS)",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered (PEBS).",
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xc8",
- "UMask": "0x8",
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MISC1",
- "PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xc8",
- "UMask": "0x10",
- "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MISC2",
- "PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
+ "BriefDescription": "Number of times RTM abort was triggered",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times RTM abort was triggered .",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xc8",
- "UMask": "0x20",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MISC3",
- "PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xc8",
- "UMask": "0x40",
- "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MISC4",
- "PublicDescription": "Number of times HLE caused a fault.",
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xc8",
- "UMask": "0x80",
- "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MISC5",
- "PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
"EventCode": "0xc9",
- "UMask": "0x1",
- "BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.START",
- "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "PublicDescription": "Number of times a RTM caused a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
"EventCode": "0xc9",
- "UMask": "0x2",
- "BriefDescription": "Number of times RTM commit succeeded",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.COMMIT",
- "PublicDescription": "Number of times RTM commit succeeded.",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Number of times RTM commit succeeded",
"EventCode": "0xc9",
- "UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered (PEBS)",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered (PEBS).",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
"EventCode": "0xc9",
- "UMask": "0x8",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC1",
- "PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x10",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC2",
- "PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x20",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC3",
- "PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xc9",
- "UMask": "0x40",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC4",
- "PublicDescription": "Number of times a RTM caused a fault.",
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "EventCode": "0xc9",
- "UMask": "0x80",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC5",
- "PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 4",
- "PEBS": "2",
- "MSRValue": "0x4",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above four.",
- "TakenAlone": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "3"
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 8",
- "PEBS": "2",
- "MSRValue": "0x8",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above eight.",
- "TakenAlone": "1",
- "SampleAfterValue": "50021",
- "CounterHTOff": "3"
+ "BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 16",
- "PEBS": "2",
- "MSRValue": "0x10",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 16.",
- "TakenAlone": "1",
- "SampleAfterValue": "20011",
- "CounterHTOff": "3"
+ "BriefDescription": "Number of times a TSX line had a cache conflict",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 32",
- "PEBS": "2",
- "MSRValue": "0x20",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 32.",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "3"
+ "BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 64",
- "PEBS": "2",
- "MSRValue": "0x40",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 64.",
- "TakenAlone": "1",
- "SampleAfterValue": "2003",
- "CounterHTOff": "3"
+ "BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 128",
- "PEBS": "2",
- "MSRValue": "0x80",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 128.",
- "TakenAlone": "1",
- "SampleAfterValue": "1009",
- "CounterHTOff": "3"
+ "BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 256",
- "PEBS": "2",
- "MSRValue": "0x100",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 256.",
- "TakenAlone": "1",
- "SampleAfterValue": "503",
- "CounterHTOff": "3"
+ "BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Loads with latency value being above 512",
- "PEBS": "2",
- "MSRValue": "0x200",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "This event counts loads with latency value being above 512.",
- "TakenAlone": "1",
- "SampleAfterValue": "101",
- "CounterHTOff": "3"
+ "BriefDescription": "Number of times we could not allocate Lock Buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/other.json b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
index 4475249ea9da..1c2a5b001949 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/other.json
@@ -1,44 +1,36 @@
[
{
- "EventCode": "0x5C",
- "UMask": "0x1",
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
- "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
- "UMask": "0x1",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
- "Counter": "0,1,2,3",
"EventName": "CPL_CYCLES.RING0_TRANS",
- "CounterMask": "1",
"PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x63",
- "UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
- "Counter": "0,1,2,3",
+ "EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
index e2f0540625a2..9a902d2160e6 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/pipeline.json
@@ -1,1423 +1,1116 @@
[
{
- "UMask": "0x1",
- "BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 0",
- "EventName": "INST_RETIRED.ANY",
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 0"
- },
- {
- "UMask": "0x2",
- "BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "UMask": "0x3",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x03",
- "UMask": "0x2",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x03",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x07",
- "UMask": "0x1",
- "BriefDescription": "False dependencies in MOB due to partial compare",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x8",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.ANY",
- "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x0E",
- "UMask": "0x10",
- "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.FLAGS_MERGE",
- "PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0E",
- "UMask": "0x20",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0E",
- "UMask": "0x40",
- "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.SINGLE_MUL",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x14",
- "UMask": "0x1",
"BriefDescription": "Cycles when divider is busy executing divide operations",
- "Counter": "0,1,2,3",
+ "EventCode": "0x14",
"EventName": "ARITH.FPU_DIV_ACTIVE",
"PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3c",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4c",
- "UMask": "0x1",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
- "Counter": "0,1,2,3",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4C",
- "UMask": "0x2",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
- "Counter": "0,1,2,3",
- "EventName": "LOAD_HIT_PRE.HW_PF",
- "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "UMask": "0x1",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x58",
- "UMask": "0x2",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x58",
- "UMask": "0x4",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "EventCode": "0x58",
- "UMask": "0x8",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired direct near calls",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd0"
},
{
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
- "EventCode": "0x87",
- "UMask": "0x1",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "Counter": "0,1,2,3",
- "EventName": "ILD_STALL.LCP",
- "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
},
{
- "EventCode": "0x88",
- "UMask": "0x41",
"BriefDescription": "Not taken macro-conditional branches",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken macro-conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0x88",
- "UMask": "0x81",
"BriefDescription": "Taken speculative and retired macro-conditional branches",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "EventCode": "0x88",
- "UMask": "0x82",
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Taken speculative and retired direct near calls",
"EventCode": "0x88",
- "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x90"
+ },
+ {
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
+ "BriefDescription": "Taken speculative and retired indirect calls",
"EventCode": "0x88",
- "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
+ },
+ {
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "EventCode": "0x88",
- "UMask": "0x90",
- "BriefDescription": "Taken speculative and retired direct near calls",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
- "PublicDescription": "This event counts taken speculative and retired direct near calls.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x88",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired indirect calls",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "Errata": "BDW98",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x88",
- "UMask": "0xc1",
- "BriefDescription": "Speculative and retired macro-conditional branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
- "PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "UMask": "0xc2",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Far branch instructions retired.",
+ "Errata": "BDW98",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PublicDescription": "This event counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "EventCode": "0x88",
- "UMask": "0xc4",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0x88",
- "UMask": "0xc8",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "EventCode": "0x88",
- "UMask": "0xd0",
- "BriefDescription": "Speculative and retired direct near calls",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
- "EventCode": "0x88",
- "UMask": "0xff",
- "BriefDescription": "Speculative and retired branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
+ "BriefDescription": "Speculative mispredicted indirect branches",
"EventCode": "0x89",
- "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
- "Counter": "0,1,2,3",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0x89",
- "UMask": "0x81",
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
- "Counter": "0,1,2,3",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "EventCode": "0x89",
- "UMask": "0x84",
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
- "Counter": "0,1,2,3",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
"EventCode": "0x89",
- "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
+ },
+ {
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
- "Counter": "0,1,2,3",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x89",
- "UMask": "0xc1",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
- "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x89",
- "UMask": "0xc4",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "UMask": "0xff",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0xA0",
- "UMask": "0x3",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "Counter": "0,1,2,3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
"AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
"AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA2",
- "UMask": "0x1",
- "BriefDescription": "Resource-related stall cycles",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.ANY",
- "PublicDescription": "This event counts resource-related stall cycles. Reasons for stalls can be as follows:\n - *any* u-arch structure got full (LB, SB, RS, ROB, BOB, LM, Physical Register Reclaim Table (PRRT), or Physical History Table (PHT) slots)\n - *any* u-arch structure got empty (like INT/SIMD FreeLists)\n - FPU control word (FPCW), MXCSR\nand others. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Errata": "BDM61",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA2",
- "UMask": "0x4",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.RS",
- "PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Errata": "BDM11, BDM55",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA2",
- "UMask": "0x8",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.SB",
- "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA2",
- "UMask": "0x10",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.ROB",
- "PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"CounterMask": "1",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
- "CounterMask": "2",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
- "CounterMask": "4",
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "False dependencies in MOB due to partial compare",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
"CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
- "CounterMask": "5",
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
- "CounterMask": "6",
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "CounterMask": "6",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Resource-related stall cycles",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "This event counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x10"
},
{
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
- "CounterMask": "12",
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.RS",
+ "PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.UOPS",
+ "BriefDescription": "Count cases of saving new LBR",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.THREAD",
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
"BriefDescription": "Number of uops executed on the core.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Number of uops executed from any thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "BDM61",
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC0",
- "UMask": "0x1",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "PEBS": "2",
- "Counter": "1",
- "EventName": "INST_RETIRED.PREC_DIST",
- "Errata": "BDM11, BDM55",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "CounterHTOff": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC0",
- "UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
- "Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC1",
- "UMask": "0x40",
- "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Actually retired uops. (Precise Event - PEBS)",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Cycles without actually retired uops.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles without actually retired uops.",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "CounterMask": "10",
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used. (Precise Event - PEBS)",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts the number of retirement slots used.",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.CYCLES",
- "PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC3",
- "UMask": "0x4",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.SMC",
- "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC3",
- "UMask": "0x20",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xC4",
- "UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xC4",
- "UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS)",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC4",
- "UMask": "0x2",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS)",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect macro near call instructions retired (captured in ring 3).",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "BDW98",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xC4",
- "UMask": "0x8",
- "BriefDescription": "Return instructions retired. (Precise Event - PEBS)",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xC4",
- "UMask": "0x10",
- "BriefDescription": "Not taken branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "This event counts not taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xC4",
- "UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xC4",
- "UMask": "0x40",
- "BriefDescription": "Far branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "Errata": "BDW98",
- "PublicDescription": "This event counts far branch instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xC5",
- "UMask": "0x0",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC5",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired.(Precise Event)",
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Actually retired uops.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted return instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken. (Precise Event - PEBS).",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "This event counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xe6",
- "UMask": "0x1f",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "Counter": "0,1,2,3",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "16",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
index 58ed6d33d1f4..56bba6d4e0f6 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-cache.json
@@ -1,316 +1,3373 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Bounce Control",
+ "EventCode": "0xA",
+ "EventName": "UNC_C_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Counter 0 Occupancy",
+ "EventCode": "0x1F",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "EventCode": "0x9",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local distress or incoming distress signals are asserted. Incoming distress includes both up and dn.",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Request",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
- "Filter": "filter_state=0x1",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x11",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "UMask": "0x9",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "M line evictions from LLC (writebacks to memory)",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x5",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.I_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in M state",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_READ",
- "Filter": "filter_opc=0x182",
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; DRd hitting non-M with raw CV=0",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Clean Victim with raw CV=0",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Number of times that an RFO hit in S state. This is useful for determining if it might be good for a workload to use RspIWB instead of RspSWB.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE0",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 0",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE1",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 2",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE2",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 2",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE3",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 3",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.LRU_DECREMENT",
+ "PerPkg": "1",
+ "PublicDescription": "How often all LRU bits were decremented by 1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
+ "PerPkg": "1",
+ "PublicDescription": "How often we picked a victim that had a non-zero age",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; All",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "filter_opc=0x187",
+ "BriefDescription": "AD Ring In Use; Down and Even",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; All",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "MMIO reads. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_READ",
- "Filter": "filter_opc=0x187,filter_nc=1",
+ "BriefDescription": "AK Ring In Use; Down and Even",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "MMIO writes. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_WRITE",
- "Filter": "filter_opc=0x18f,filter_nc=1",
+ "BriefDescription": "BL Ring in Use; Down and Even",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AD",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AK",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; BL",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters for Down polarity",
+ "UMask": "0xcc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "AK",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "IV",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.",
+ "EventCode": "0x7",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IPQ is externally startved and therefore we are blocking the IRQ.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IRQ is externally starved and therefore we are blocking the IPQ.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; Number of times that the ISMQ Bid.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; PRQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IPQ in Internal Starvation.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IRQ in Internal Starvation.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the ISMQ in Internal Starvation.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; PRQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from an address conflicts. Address conflicts out of the IPQ should be rare. They will generally only occur if two different sockets are sending requests to the same address at the same time. This is a true conflict case, unlike the IPQ Address Conflict which is commonly caused by prefetching characteristics.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject. TOR rejects from the IPQ can be caused by the Egress being full or Address Conflicts.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from the Egress being full. IPQ requests make use of the AD Egress for regular responses, the BL egress to forward data, and the AK egress to return credits.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No AD Sbo Credits",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Target Node Filter",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request from the IPQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because of an address match in the TOR. In order to maintain coherency, requests to the same address are not allowed to pass each other up in the Cbo. Therefore, if there is an outstanding request to a given address, one cannot issue another request to that address until it is complete. This comes up most commonly with prefetches. Outstanding prefetches occasionally will not complete their memory fetch and a demand request to the same address will then sit in the IRQ and get retried until the prefetch fills the data into the LLC. Therefore, it will not be uncommon to see this case in high bandwidth streaming workloads when the LLC Prefetcher in the core is enabled.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of IRQ retries that occur. Requests from the IRQ are retried if they are rejected from the TOR pipeline for a variety of reasons. Some of the most common reasons include if the Egress is full, there are no RTIDs, or there is a Physical Address match to another outstanding request.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because it failed to acquire an entry in the Egress. The egress is the buffer that queues up for allocating onto the ring. IRQ requests can make use of all four rings and all four Egresses. If any of the queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of requests rejects because of lack of QPI Ingress credits. These credits are required in order to send transactions to the QPI agent. Please see the QPI_IGR_CREDITS events for more information.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that requests from the IRQ were retried because there were no RTIDs available. RTIDs are required after a request misses the LLC and needs to send snoops and/or requests to memory. If there are no RTIDs available, requests will queue up in the IRQ and retry until one becomes available. Note that there are multiple RTID pools for the different sockets. There may be cases where the local RTIDs are all used, but requests destined for remote memory can still acquire an RTID because there are remote RTIDs available. This event does not provide any filtering for this case.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No AD Sbo Credits",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No BL Sbo Credits",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an BL packet to the Sbo.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Target Node Filter",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the total number of times that a request from the ISMQ retried because of a TOR reject. ISMQ requests generally will not need to retry (or at least ISMQ retries are less common than IRQ retries). ISMQ requests will retry if they are not able to acquire a needed Egress credit to get onto the ring, or for cache evictions that need to acquire an RTID. Most ISMQ requests already have an RTID, so eviction retries will be less common here.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by a lack of Egress credits. The egress is the buffer that queues up for allocating onto the ring. If any of the Egress queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by no RTIDs. M-state cache evictions are serviced through the ISMQ, and must acquire an RTID in order to write back to memory. If no RTIDs are available, they will be retried.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x80",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No AD Sbo Credits",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No BL Sbo Credits",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried because of it lacked credits to send an BL packet to the Sbo.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; Target Node Filter",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; PRQ Rejects",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For AD Ring",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For BL Ring",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For AD Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For BL Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
- "Filter": "filter_opc=0x190",
+ "EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; Evictions",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x191",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Eviction transactions inserted into the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; Local Memory",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
- "Filter": "filter_opc=0x192",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x28",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisfied by an opcode, inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; Misses to Local Memory",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x2a",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisfied by an opcode, inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
- "Filter": "filter_opc=0x180,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
"PerPkg": "1",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x8a",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x181",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisfied by an opcode, inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; NID Matched",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "filter_opc=0x18c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched (matches an RTID destination) transactions inserted into the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "filter_opc=0x18d",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched eviction transactions inserted into the TOR.",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched miss requests that were inserted into the TOR.",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched write transactions inserted into the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Opcode Match",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisfied by an opcode, inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Inserts; Writebacks",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Write transactions inserted into the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All valid TOR entries. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding eviction transactions in the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisfied by an opcode, in the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss All",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding miss requests in the TOR. 'Miss' means the allocation requires an RTID. This generally means that the request was sent to memory or MMIO.",
+ "UMask": "0xa",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x2a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisfied by an opcode, in the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries for miss transactions that match an opcode. This generally means that the request was sent to memory or MMIO.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x8a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisfied by an opcode, in the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of NID matched outstanding requests in the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid.In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding NID matched eviction transactions in the TOR .",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID.",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); NID matched write transactions int the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc).",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
+ "BriefDescription": "TOR Occupancy",
"EventCode": "0x36",
- "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
- "Filter": "filter_opc=0x182",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisfied by an opcode, in the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Write transactions in the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto AD Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto AK Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto BL Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AK ring. This is commonly used for snoop responses coming from the core and destined for a Cachebo.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the core AD egress spent in starvation",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both AK egresses spent in starvation",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.BL_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both BL egresses spent in starvation",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the cachebo IV egress spent in starvation",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming snoop hazard",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the bypass.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Direct2Core messages sent",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles in which Direct2Core was disabled",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Reads where Direct2Core overridden",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lat Opt Return",
+ "EventCode": "0x41",
+ "EventName": "UNC_H_DIRECTORY_LAT_OPT",
"PerPkg": "1",
+ "PublicDescription": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory returned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that did not have to send any snoops because the directory bit was clear.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that had to send one or more snoops because the directory bit was set.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory clears. This occurs when snoops were sent and all returned with RspI.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory sets. This occurs when a remote read transaction requests memory, bringing it to a remote cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is AckCnfltWbI",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; All Requests",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; HOM Requests",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.HOM",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Invalidations",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RsSFwd or RspSFwdWb",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE or WbMtoS",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is AckCnfltWbI",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; All Requests",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; HOM Requests",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.HOM",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RsSFwd or RspSFwdWb",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoE or WbMtoS",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoI",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is AckCnfltWbI",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; All Requests",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Allocations",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; HOM Requests",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.HOM",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Invalidations",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RsSFwd or RspSFwdWb",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
},
{
- "BriefDescription": "read requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE or WbMtoS",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoI",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "EventCode": "0x1E",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Cancelled",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.CANCELLED",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.; OSB Snoop broadcast cancelled due to D2C or Other. OSB cancel is counted when OSB local read is not allowed even when the transaction in local InItoE. It also counts D2C OSB cancel, but also includes the cases were D2C was not set in the first place for the transaction coming from the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Reads Local - Useful",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL_USEFUL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote - Useful",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE_USEFUL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; All",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from the local socket.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming ead requests. This is a good proxy for LLC Read Misses (including RFOs).",
"UMask": "0x3",
"Unit": "HA"
},
{
- "BriefDescription": "read requests to local home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Local Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the local socket. This is a good proxy for LLC Read Misses (including RFOs) from the local socket.",
"UMask": "0x1",
"Unit": "HA"
},
{
- "BriefDescription": "read requests to remote home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Remote Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the remote socket. This is a good proxy for LLC Read Misses (including RFOs) from the remote socket.",
"UMask": "0x2",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
- "UMask": "0xC",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming write requests.",
+ "UMask": "0xc",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to local home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Local Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from the local socket.",
"UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to remote home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Remote Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from remote sockets.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; All",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; All",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
"UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "Conflict requests (requests for same address from multiple agents simultaneously)",
- "Counter": "0,1,2,3",
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Local Requests",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of reads when the snoop was on the critical path to the data return.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Remote Requests",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of reads when the snoop was on the critical path to the data return.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; All Requests",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; Tracked for snoops from both local and remote sockets.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Local Requests",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Remote Requests",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Local Requests",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Remote Requests",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Snoop Responses Received; RspI",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x20",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache with no writeback to memory",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Snoop Responses Received; RspIFwd",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line response from remote cache",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Snoop Responses Received; RspS",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line forwarded from remote cache",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Snoop Responses Received; RspSFwd",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*Fwd*WB",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Other",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for all other snoop responses.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 2",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 3",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 4",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 5",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 6",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 7",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 10",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 11",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 8",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 9",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles Completely Used",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Counts the number of cycles when the HA tracker pool (HT) is completely used including reserved HT entries. It will not return valid count when BT is disabled.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles GP Completely Used",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.GP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Counts the number of cycles when the general purpose (GP) HA tracker pool (HT) is completely used. It will not return valid count when BT is disabled.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; All Requests",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Requests coming from both local and remote sockets.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Local Requests",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Remote Requests",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Local InvItoE Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Remote InvItoE Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Local Read Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Remote Read Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Local Write Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Remote Write Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumulator; Local Requests",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumulator; Remote Requests",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "EventCode": "0xF",
+ "EventName": "UNC_H_TxR_AD.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.; Filter for outbound NDR transactions sent on the AD ring. NDR stands for non-data response and is generally used for completions that do not include data. AD NDR is used for transactions to remote sockets.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to the cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent directly to the requesting core.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to a remote socket over QPI.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For AK Ring",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For BL Ring",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
"UMask": "0x8",
"Unit": "HA"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json
new file mode 100644
index 000000000000..8a327e0f1441
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-interconnect.json
@@ -0,0 +1,614 @@
+[
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of clocks in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIItoM",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; RFO",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; WbMtoI",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_TIMEOUT",
+ "PerPkg": "1",
+ "PublicDescription": "Indicates the fetch for a previous prefetch wasn't accepted by the prefetch. This happens in the case of a prefetch TimeOut",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Data Throttled",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.DATA_THROTTLE",
+ "PerPkg": "1",
+ "PublicDescription": "IRP throttled switch data",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Hit E or S",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Hit I",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Hit M",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Miss",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : SnpCode",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : SnpData",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : SnpInv",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of atomic transactions",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of 'other' kinds of transactions.",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Write Prefetches",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of write prefetches.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; PREQ, PSMI, P2U, Thermal, PCUSMI, PMI",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-io.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-io.json
new file mode 100644
index 000000000000..01e04daf03da
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-io.json
@@ -0,0 +1,555 @@
+[
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; All",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Dn",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Up",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; All",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; All",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AD",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_BL",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
index f4b0745cdbbf..a764234a3584 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-memory.json
@@ -1,86 +1,2852 @@
[
{
- "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_READ",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Read CAS commands issued on this channel (including underfills).",
"UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the number of underfill reads that are issued by the memory controller. This will generally be about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ. While it is possible for underfills to be issed in both WMM and RMM, this event counts both.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Write CAS commands issued on this channel.",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_WRITE",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0xC",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
+ "UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Clockticks",
"EventName": "UNC_M_DCLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_DCLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
- "Counter": "0,1,2,3",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_DCLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_DCLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charges due to page misses",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of page misses. This does not include explicit precharge commands sent with CAS commands in Auto-Precharge mode. This does not include PRE commands sent as a result of the page close counter expiration.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for reads",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to read",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for writes",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to write",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
"UMask": "0x8",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE MXB write buffer occupancy",
+ "EventCode": "0x91",
+ "EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.RMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.WMM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
index dd1b95655d1d..83d20130c217 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/uncore-power.json
@@ -1,92 +1,457 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
- "Counter": "0,1,2,3",
+ "BriefDescription": "pclk Cycles",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6A",
+ "EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6B",
+ "EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
- "Counter": "0,1,2,3",
- "EventCode": "0xA",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6C",
+ "EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6D",
+ "EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6E",
+ "EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6F",
+ "EventName": "UNC_P_CORE15_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x70",
+ "EventName": "UNC_P_CORE16_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x71",
+ "EventName": "UNC_P_CORE17_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x61",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x62",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x63",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x64",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x65",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x66",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x67",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x68",
+ "EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x69",
+ "EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x31",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3A",
+ "EventName": "UNC_P_DEMOTIONS_CORE10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3B",
+ "EventName": "UNC_P_DEMOTIONS_CORE11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3C",
+ "EventName": "UNC_P_DEMOTIONS_CORE12",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3D",
+ "EventName": "UNC_P_DEMOTIONS_CORE13",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3E",
+ "EventName": "UNC_P_DEMOTIONS_CORE14",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3F",
+ "EventName": "UNC_P_DEMOTIONS_CORE15",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x40",
+ "EventName": "UNC_P_DEMOTIONS_CORE16",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x41",
+ "EventName": "UNC_P_DEMOTIONS_CORE17",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x33",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x34",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x35",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x36",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x37",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x38",
+ "EventName": "UNC_P_DEMOTIONS_CORE8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x39",
+ "EventName": "UNC_P_DEMOTIONS_CORE9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C1E",
+ "EventCode": "0x4E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C1E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C1E. This event can be used in conjunction with edge detect to count C1E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C7 State Residency",
+ "EventCode": "0x2E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C7_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C7. This event can be used in conjunction with edge detect to count C7 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "PerPkg": "1",
+ "PublicDescription": "Ring GV with same final and initial frequency",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
"Unit": "PCU"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
index 7d79c707c6d1..93621e004d88 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellde/virtual-memory.json
@@ -1,388 +1,312 @@
[
{
- "EventCode": "0x08",
- "UMask": "0x1",
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"Errata": "BDM69",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
"EventCode": "0x08",
- "UMask": "0x2",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "Errata": "BDM69",
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
"EventCode": "0x08",
- "UMask": "0x4",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "Errata": "BDM69",
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
"EventCode": "0x08",
- "UMask": "0x8",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "Errata": "BDM69",
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x08",
- "UMask": "0xe",
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"Errata": "BDM69",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "EventCode": "0x08",
- "UMask": "0x10",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
"Errata": "BDM69",
- "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Errata": "BDM69",
"EventCode": "0x08",
- "UMask": "0x20",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Errata": "BDM69",
"EventCode": "0x08",
- "UMask": "0x40",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Errata": "BDM69",
"EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x49",
- "UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"Errata": "BDM69",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"EventCode": "0x49",
- "UMask": "0x2",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
"EventCode": "0x49",
- "UMask": "0x4",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
"EventCode": "0x49",
- "UMask": "0x8",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x49",
- "UMask": "0xe",
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"Errata": "BDM69",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "EventCode": "0x49",
- "UMask": "0x10",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
"Errata": "BDM69",
- "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Errata": "BDM69",
"EventCode": "0x49",
- "UMask": "0x20",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Errata": "BDM69",
"EventCode": "0x49",
- "UMask": "0x40",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Errata": "BDM69",
"EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x4F",
- "UMask": "0x10",
"BriefDescription": "Cycle count for an Extended Page table walk.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "EventCode": "0x85",
- "UMask": "0x1",
"BriefDescription": "Misses at all ITLB levels that cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"Errata": "BDM69",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
"EventCode": "0x85",
- "UMask": "0x2",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
"EventCode": "0x85",
- "UMask": "0x4",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
"EventCode": "0x85",
- "UMask": "0x8",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x85",
- "UMask": "0xe",
"BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
"Errata": "BDM69",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "EventCode": "0x85",
- "UMask": "0x10",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_DURATION",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
"Errata": "BDM69",
- "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Errata": "BDM69",
"EventCode": "0x85",
- "UMask": "0x20",
- "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Errata": "BDM69",
"EventCode": "0x85",
- "UMask": "0x40",
- "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Errata": "BDM69",
"EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xAE",
- "UMask": "0x1",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB.ITLB_FLUSH",
- "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xBC",
- "UMask": "0x11",
"BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L1",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x11"
},
{
- "EventCode": "0xBC",
- "UMask": "0x12",
"BriefDescription": "Number of DTLB page walker hits in the L2.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x12"
},
{
- "EventCode": "0xBC",
- "UMask": "0x14",
"BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x14"
},
{
- "EventCode": "0xBC",
- "UMask": "0x18",
"BriefDescription": "Number of DTLB page walker hits in Memory.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x18"
},
{
- "EventCode": "0xBC",
- "UMask": "0x21",
"BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x21"
},
{
- "EventCode": "0xBC",
- "UMask": "0x22",
"BriefDescription": "Number of ITLB page walker hits in the L2.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x22"
},
{
- "EventCode": "0xBC",
- "UMask": "0x24",
"BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x24"
},
{
- "EventCode": "0xBD",
- "UMask": "0x1",
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xBD",
- "UMask": "0x20",
"BriefDescription": "STLB flush attempts",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
index 1eb0415fa11a..65ec0c9e55d1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
@@ -1,370 +1,1167 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "MACHINE_CLEARS.COUNT * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fetch_BW;PGO",
- "MetricName": "IpTB"
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Branch instructions per taken branch. ",
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
- "MetricName": "BpTB"
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + cpu@DTLB_LOAD_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_LOAD_MISSES.WALK_COMPLETED) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + cpu@DTLB_STORE_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * DTLB_STORE_MISSES.WALK_COMPLETED) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "(200 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM + 60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_mispredicts_resteers"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
},
{
- "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpL"
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
},
{
- "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpS"
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;Instruction_Type",
- "MetricName": "IpB"
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
- "MetricName": "IpCall"
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
- "MetricName": "FLOPc"
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (12 * ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * cycles )",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization"
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * ( DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED ) ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
- "MetricGroup": "TLB_SMT",
- "MetricName": "Page_Walks_Utilization_SMT"
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_lcp"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L1MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
},
{
- "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI"
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI_All"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
},
{
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2HPKI_All"
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L3MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182\\,thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
- "MetricGroup": "Memory_Lat",
- "MetricName": "DRAM_Read_Latency"
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION + 7 * (DTLB_STORE_MISSES.WALK_COMPLETED + DTLB_LOAD_MISSES.WALK_COMPLETED + ITLB_MISSES.WALK_COMPLETED)) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
},
{
- "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_Parallel_Reads"
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
},
{
"BriefDescription": "Socket actual clocks when any core is active on that socket",
"MetricExpr": "cbox_0@event\\=0x0@",
- "MetricGroup": "",
- "MetricName": "Socket_CLKS"
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "MetricName": "tma_info_turbo_utilization"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + cpu@ITLB_MISSES.WALK_DURATION\\,cmask\\=1@ + 7 * ITLB_MISSES.WALK_COMPLETED) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_MISS / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_local_dram",
+ "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + RESOURCE_STALLS.SB) / (CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES * tma_branch_resteers / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY)",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Sample with: UOPS_DISPATCHED_PORT.PORT_4. Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_7 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_store_op_utilization_group",
+ "MetricName": "tma_port_7",
+ "MetricThreshold": "tma_port_7 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address). Sample with: UOPS_DISPATCHED_PORT.PORT_7",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_TOTAL + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (CYCLE_ACTIVITY.STALLS_TOTAL - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 180 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_clks",
+ "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
+ "MetricName": "tma_remote_cache",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "310 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_remote_dram",
+ "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "tma_branch_resteers - tma_mispredicts_resteers - tma_clears_resteers",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "INST_RETIRED.X87 * tma_info_uoppi / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
index 75a3098d5775..781e7c64e71f 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/cache.json
@@ -1,966 +1,773 @@
[
{
- "EventCode": "0x24",
- "UMask": "0x21",
- "BriefDescription": "Demand Data Read miss L2, no rejects",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
- "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D miss outstandings duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x30",
- "BriefDescription": "L2 prefetch requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.L2_PF_MISS",
- "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "EventCode": "0x27",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x50"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "UMask": "0x5"
},
{
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "UMask": "0xc1",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "This event counts the total number of L2 code requests.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
+ "BriefDescription": "Demand Data Read requests",
"EventCode": "0x24",
- "UMask": "0xc2",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe1"
},
{
+ "BriefDescription": "Demand requests that miss L2 cache.",
"EventCode": "0x24",
- "UMask": "0xc4",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x27"
},
{
+ "BriefDescription": "Demand requests to L2 cache.",
"EventCode": "0x24",
- "UMask": "0xd0",
- "BriefDescription": "L2 prefetch requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.L2_PF_HIT",
- "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe7"
},
{
+ "BriefDescription": "Requests from L2 hardware prefetchers",
"EventCode": "0x24",
- "UMask": "0xe1",
- "BriefDescription": "Demand Data Read requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "PublicDescription": "This event counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf8"
},
{
- "EventCode": "0x24",
- "UMask": "0xe2",
"BriefDescription": "RFO requests to L2 cache",
- "Counter": "0,1,2,3",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.ALL_RFO",
"PublicDescription": "This event counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe2"
},
{
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
- "UMask": "0xe4",
- "BriefDescription": "L2 code requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "PublicDescription": "This event counts the total number of L2 code requests.",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
+ "BriefDescription": "L2 cache misses when fetching instructions.",
"EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"EventCode": "0x24",
- "UMask": "0xf8",
- "BriefDescription": "Requests from L2 hardware prefetchers",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_PF",
- "PublicDescription": "This event counts the total number of requests from the L2 hardware prefetchers.",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
"EventCode": "0x24",
- "UMask": "0xff",
- "BriefDescription": "All L2 requests.",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.REFERENCES",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "This event counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x21"
},
{
- "EventCode": "0x27",
- "UMask": "0x50",
- "BriefDescription": "Not rejected writebacks that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_DEMAND_RQSTS.WB_HIT",
- "PublicDescription": "This event counts the number of WB requests that hit L2 cache.",
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that hit L2 cache. L3 prefetch new types.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
- "EventCode": "0x2E",
- "UMask": "0x41",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x2E",
- "UMask": "0x4f",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "L1D miss oustandings duration in cycles",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING",
- "PublicDescription": "This event counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand; from the demand Hit FB, if it is allocated by hardware or software prefetch.\nNote: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "PublicDescription": "This event counts the number of requests from the L2 hardware prefetchers that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30"
},
{
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts duration of L1D miss outstanding in cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "BriefDescription": "All requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
},
{
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "BriefDescription": "All L2 requests.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "EventCode": "0x51",
- "UMask": "0x1",
- "BriefDescription": "L1D data line replacements",
- "Counter": "0,1,2,3",
- "EventName": "L1D.REPLACEMENT",
- "PublicDescription": "This event counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "BDM76",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x60",
- "UMask": "0x2",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "This event counts L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "This event counts L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "Errata": "BDM76",
- "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "CounterMask": "1",
- "Errata": "BDM76",
- "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x63",
- "UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
- "Counter": "0,1,2,3",
+ "EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB0",
- "UMask": "0x1",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB0",
- "UMask": "0x2",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "PublicDescription": "This event counts both cacheable and noncachaeble code read requests.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB0",
- "UMask": "0x4",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that miss the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0xB0",
- "UMask": "0x8",
- "BriefDescription": "Demand and prefetch data reads",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts core-originated cacheable demand requests that refer to the last level cache (LLC). Demand requests include loads, RFOs, and hardware prefetches from L1D, and instruction fetches from IFU.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "EventCode": "0xb2",
- "UMask": "0x1",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD0",
- "UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
"Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "This event counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
"Data_LA": "1",
+ "Errata": "BDM100",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "This event counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
"SampleAfterValue": "100003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xD0",
- "UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
+ "Errata": "BDE70, BDM100",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "Errata": "BDM35",
- "PublicDescription": "This event counts load uops with locked access retired to the architected path.",
+ "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
+ "Errata": "BDE70",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD0",
- "UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
"Data_LA": "1",
+ "Errata": "BDE70",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "SampleAfterValue": "100003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
},
{
- "EventCode": "0xD0",
- "UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
"Data_LA": "1",
+ "Errata": "BDE70",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "PublicDescription": "This event counts load uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement. This event also counts SW prefetches.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
},
{
- "EventCode": "0xD0",
- "UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "PublicDescription": "This event counts store uops retired to the architected path with a filter on bits 0 and 1 applied.\nNote: This event counts AVX-256bit load/store double-pump memory uops as a single uop at retirement.",
- "SampleAfterValue": "2000003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xD1",
- "UMask": "0x1",
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
+ "EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PEBS": "1",
"PublicDescription": "This event counts retired load uops which data sources were hits in the nearest-level (L1) cache.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load. This event also counts SW prefetches independent of the actual data source.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD1",
- "UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "Errata": "BDM35",
- "PublicDescription": "This event counts retired load uops which data sources were hits in the mid-level (L2) cache.",
+ "PublicDescription": "This event counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xD1",
- "UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
- "Errata": "BDM100",
- "PublicDescription": "This event counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
- "SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "Errata": "BDM35",
"EventCode": "0xD1",
- "UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
- "Data_LA": "1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "PublicDescription": "This event counts retired load uops which data sources were misses in the nearest-level (L1) cache. Counting excludes unknown and UC data source.",
+ "PublicDescription": "This event counts retired load uops which data sources were hits in the mid-level (L2) cache.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xD1",
- "UMask": "0x10",
"BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
+ "EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PEBS": "1",
"PublicDescription": "This event counts retired load uops which data sources were misses in the mid-level (L2) cache. Counting excludes unknown and UC data source.",
"SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Data_LA": "1",
+ "Errata": "BDM100",
"EventCode": "0xD1",
- "UMask": "0x20",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops which data sources were data hits in the last-level (L3) cache without snoops required.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"Errata": "BDM100, BDE70",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xD1",
- "UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "BriefDescription": "Retired load uops.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "PublicDescription": "This event counts retired load uops which data sources were load uops missed L1 but hit a fill buffer due to a preceding miss to the same cache line with the data not ready.\nNote: Only two data-sources of L1/FB are applicable for AVX-256bit even though the corresponding AVX load could be serviced by a deeper level in the memory hierarchy. Data source is reported for the Low-half load.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Counts all retired load uops. This event accounts for SW prefetch uops of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
- "EventCode": "0xD2",
- "UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Retired store uops.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
- "Errata": "BDM100",
- "PublicDescription": "This event counts retired load uops which data sources were L3 Hit and a cross-core snoop missed in the on-pkg core cache.",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Counts all retired store uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
},
{
- "EventCode": "0xD2",
- "UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops with locked access.",
"Data_LA": "1",
+ "Errata": "BDM35",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
- "Errata": "BDM100",
- "PublicDescription": "This event counts retired load uops which data sources were L3 hit and a cross-core snoop hit in the on-pkg core cache.",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts load uops with locked access retired to the architected path.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
},
{
- "EventCode": "0xD2",
- "UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
- "Errata": "BDM100",
- "PublicDescription": "This event counts retired load uops which data sources were HitM responses from a core on same socket (shared L3).",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
- "EventCode": "0xD2",
- "UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
- "Errata": "BDM100",
- "PublicDescription": "This event counts retired load uops which data sources were hits in the last-level (L3) cache without snoops required.",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x42"
},
{
- "EventCode": "0xD3",
- "UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired load uops that miss the STLB.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
- "Errata": "BDE70, BDM100",
- "PublicDescription": "Retired load uop whose Data Source was: local DRAM either Snoop not needed or Snoop Miss (RspI).",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts load uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
},
{
- "EventCode": "0xD3",
- "UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired store uops that miss the STLB.",
"Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
- "Errata": "BDE70",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts store uops with true STLB miss retired to the architected path. True STLB miss is an uop triggering page walk that gets completed without blocks, and later gets retired. This page walk can end up with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
},
{
- "EventCode": "0xD3",
- "UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
- "Errata": "BDE70",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "This event counts the demand and prefetch data reads. All Core Data Reads include cacheable Demands and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xD3",
- "UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
- "Errata": "BDE70",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "This event counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, and so on.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xF0",
- "UMask": "0x1",
- "BriefDescription": "Demand Data Read requests that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
- "PublicDescription": "This event counts Demand Data Read requests that access L2 cache, including rejects.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cacheable and non-cacheable code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "This event counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF0",
- "UMask": "0x2",
- "BriefDescription": "RFO requests that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.RFO",
- "PublicDescription": "This event counts Read for Ownership (RFO) requests that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF0",
- "UMask": "0x4",
- "BriefDescription": "L2 cache accesses when fetching instructions",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.CODE_RD",
- "PublicDescription": "This event counts the number of L2 cache accesses when fetching instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "This event counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF0",
- "UMask": "0x8",
- "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.ALL_PF",
- "PublicDescription": "This event counts L2 or L3 HW prefetches that access L2 cache including rejects.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "EventCode": "0xb2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "This event counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.\nNote: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF0",
- "UMask": "0x10",
- "BriefDescription": "L1D writebacks that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L1D_WB",
- "PublicDescription": "This event counts L1D writebacks that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "This event counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xF0",
- "UMask": "0x20",
- "BriefDescription": "L2 fill requests that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L2_FILL",
- "PublicDescription": "This event counts L2 fill requests that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "This event counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xF0",
- "UMask": "0x40",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L2_WB",
- "PublicDescription": "This event counts L2 writebacks that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "This event counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF0",
- "UMask": "0x80",
- "BriefDescription": "Transactions accessing L2 pipe",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.ALL_REQUESTS",
- "PublicDescription": "This event counts transactions that access the L2 pipe including snoops, pagewalks, and so on.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "This event counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF1",
- "UMask": "0x1",
- "BriefDescription": "L2 cache lines in I state filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.I",
- "PublicDescription": "This event counts the number of L2 cache lines in the Invalidate state filling the L2. Counting does not cover rejects.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "This event counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The Offcore outstanding state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF1",
- "UMask": "0x2",
- "BriefDescription": "L2 cache lines in S state filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.S",
- "PublicDescription": "This event counts the number of L2 cache lines in the Shared state filling the L2. Counting does not cover rejects.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "This event counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.\nNote: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF1",
- "UMask": "0x4",
- "BriefDescription": "L2 cache lines in E state filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.E",
- "PublicDescription": "This event counts the number of L2 cache lines in the Exclusive state filling the L2. Counting does not cover rejects.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF1",
- "UMask": "0x7",
- "BriefDescription": "L2 cache lines filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.ALL",
- "PublicDescription": "This event counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "BDM76",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "This event counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF2",
- "UMask": "0x5",
- "BriefDescription": "Clean L2 cache lines evicted by demand.",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xf4",
- "UMask": "0x10",
- "BriefDescription": "Split locks in SQ",
- "Counter": "0,1,2,3",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "PublicDescription": "This event counts the number of split locks in the super queue.",
+ "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0244",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all requests hit in the L3",
- "MSRValue": "0x3F803C8FFF",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests hit in the L3",
+ "MSRValue": "0x10003C0091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0091",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C07F7",
- "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C07F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C07F7",
- "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x4003C07F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all requests hit in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C0244",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x3F803C8FFF",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C0122",
- "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C0122",
- "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x4003C0122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C0091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x3F803C0002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C0091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x10003C0002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
- "MSRValue": "0x3F803C0200",
- "Counter": "0,1,2,3",
+ "EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "MSRValue": "0x3F803C0200",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
- "MSRValue": "0x3F803C0100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Offcore": "1",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C0002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x3F803C0100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3",
- "MSRValue": "0x3F803C0002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3",
+ "BriefDescription": "Split locks in SQ",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "This event counts the number of split locks in the super queue.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
index ba0e0c4e74eb..e4826dc7f797 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/floating-point.json
@@ -1,165 +1,160 @@
[
{
- "EventCode": "0xC1",
- "UMask": "0x8",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
- "Errata": "BDM30",
- "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC1",
- "UMask": "0x10",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
- "Errata": "BDM30",
- "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC7",
- "UMask": "0x1",
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xC7",
- "UMask": "0x2",
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 4 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0xC7",
- "UMask": "0x3",
- "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RSQRT RCP SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single precision?)",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 8 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xC7",
- "UMask": "0x4",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
+ "SampleAfterValue": "2000006",
+ "UMask": "0x15"
},
{
- "EventCode": "0xC7",
- "UMask": "0x8",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* packed double and single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.PACKED",
+ "SampleAfterValue": "2000004",
+ "UMask": "0x3c"
},
{
- "EventCode": "0xC7",
- "UMask": "0x10",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation operation. Applies to SSE* and AVX* scalar double and single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x3"
},
{
- "EventCode": "0xC7",
- "UMask": "0x15",
- "BriefDescription": "Number of SSE/AVX computational double precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.DOUBLE",
- "SampleAfterValue": "2000006",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
"EventCode": "0xc7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC7",
- "UMask": "0x2a",
- "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired. Applies to SSE* and AVX*scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Number of SSE/AVX computational single precision floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar and packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
"EventName": "FP_ARITH_INST_RETIRED.SINGLE",
"SampleAfterValue": "2000005",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2a"
},
{
- "EventCode": "0xC7",
- "UMask": "0x3c",
- "BriefDescription": "Number of SSE/AVX computational packed floating-point instructions retired. Applies to SSE* and AVX*, packed, double and single precision floating-point: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element. (RSQRT for single-precision?)",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.PACKED",
- "SampleAfterValue": "2000004",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.ANY",
+ "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1e"
},
{
+ "BriefDescription": "Number of SIMD FP assists due to input values",
"EventCode": "0xCA",
- "UMask": "0x2",
- "BriefDescription": "Number of X87 assists due to output value.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.X87_OUTPUT",
- "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
"EventCode": "0xCA",
- "UMask": "0x4",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Number of X87 assists due to input value.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xCA",
"EventName": "FP_ASSIST.X87_INPUT",
"PublicDescription": "This event counts x87 floating point (FP) micro-code assist (invalid operation, denormal operand, SNaN operand) when the input value (one of the source operands to an FP instruction) is invalid.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of X87 assists due to output value.",
"EventCode": "0xCA",
- "UMask": "0x8",
- "BriefDescription": "Number of SIMD FP assists due to Output values",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.SIMD_OUTPUT",
- "PublicDescription": "This event counts the number of SSE* floating point (FP) micro-code assist (numeric overflow/underflow) when the output value (destination register) is invalid. Counting covers only cases involving penalties that require micro-code assist intervention.",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "This event counts the number of x87 floating point (FP) micro-code assist (numeric overflow/underflow, inexact result) when the output value (destination register) is invalid.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCA",
- "UMask": "0x10",
- "BriefDescription": "Number of SIMD FP assists due to input values",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "PublicDescription": "This event counts any input SSE* FP assist - invalid operation, denormal operand, dividing by zero, SNaN operand. Counting includes only cases involving penalties that required micro-code assist intervention.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Errata": "BDM30",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "PublicDescription": "This event counts the number of transitions from AVX-256 to legacy SSE when penalty is applicable.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xCA",
- "UMask": "0x1e",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.ANY",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Errata": "BDM30",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "PublicDescription": "This event counts the number of transitions from legacy SSE to AVX-256 when penalty is applicable.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
+ "EventCode": "0xA0",
+ "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
+ "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
index 72781e1e3362..bd5da39564e1 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/frontend.json
@@ -1,286 +1,239 @@
[
{
- "EventCode": "0x79",
- "UMask": "0x2",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.EMPTY",
- "PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
},
{
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.IFDATA_STALL",
+ "PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
"EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.DSB_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
"EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ.",
+ "EventName": "IDQ.EMPTY",
+ "PublicDescription": "This counts the number of cycles that the instruction decoder queue is empty and can indicate that the application may be bound in the front end. It does not determine whether there are uops being delivered to the Alloc stage since uops can be delivered by bypass skipping the Instruction Decode Queue (IDQ) when it is empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "This event counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3c"
},
{
- "EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
"EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequenser (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "PublicDescription": "This event counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "PublicDescription": "This event counts the number of deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"EventCode": "0x79",
- "UMask": "0x3c",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_ALL_UOPS",
- "PublicDescription": "This event counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may bypass the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x80",
- "UMask": "0x1",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.HIT",
- "PublicDescription": "This event counts the number of both cacheable and noncacheable Instruction Cache, Streaming Buffer and Victim Cache Reads including UC fetches.",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "This event counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x80",
- "UMask": "0x2",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.MISSES",
- "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes UC accesses.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "EventCode": "0x80",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.IFDATA_STALL",
- "PublicDescription": "This event counts cycles during which the demand fetch waits for data (wfdM104H) from L2 or iSB (opportunistic hit).",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts the total number of uops delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may bypass the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
+ "EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "This event counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when:\n a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread;\n b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions); \n c. Instruction Decode Queue (IDQ) delivers four uops.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"CounterMask": "4",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"PublicDescription": "This event counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
"EventCode": "0x9C",
- "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"CounterMask": "3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"PublicDescription": "This event counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >=3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"CounterMask": "2",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Invert": "1",
"EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xAB",
- "UMask": "0x2",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "Counter": "0,1,2,3",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "This event counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. \nMM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.\nPenalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
index ecb413bb67ca..a7449e5b68dc 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/memory.json
@@ -1,679 +1,525 @@
[
{
- "EventCode": "0x05",
- "UMask": "0x1",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
- "Counter": "0,1,2,3",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x05",
- "UMask": "0x2",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
- "Counter": "0,1,2,3",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x1",
- "BriefDescription": "Number of times a TSX line had a cache conflict",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x2",
- "BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
- "PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x4",
- "BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x8",
- "BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x10",
- "BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x20",
- "BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x40",
- "BriefDescription": "Number of times we could not allocate Lock Buffer",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC2",
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x4",
- "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC3",
- "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x8",
- "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC4",
- "PublicDescription": "RTM region detected inside HLE.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x10",
- "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xc8",
- "UMask": "0x1",
- "BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.START",
- "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xc8",
- "UMask": "0x2",
- "BriefDescription": "Number of times HLE commit succeeded",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.COMMIT",
- "PublicDescription": "Number of times HLE commit succeeded.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xc8",
- "UMask": "0x4",
"BriefDescription": "Number of times HLE abort was triggered",
- "PEBS": "1",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED",
+ "PEBS": "1",
"PublicDescription": "Number of times HLE abort was triggered.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xc8",
- "UMask": "0x8",
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC1",
"PublicDescription": "Number of times an HLE abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xc8",
- "UMask": "0x10",
"BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC2",
"PublicDescription": "Number of times the TSX watchdog signaled an HLE abort.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xc8",
- "UMask": "0x20",
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC3",
"PublicDescription": "Number of times a disallowed operation caused an HLE abort.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xc8",
- "UMask": "0x40",
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC4",
"PublicDescription": "Number of times HLE caused a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xc8",
- "UMask": "0x80",
"BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times HLE aborted and was not due to the abort conditions in subevents 3-6.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xc9",
- "UMask": "0x1",
- "BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.START",
- "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "EventCode": "0xc9",
- "UMask": "0x2",
- "BriefDescription": "Number of times RTM commit succeeded",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.COMMIT",
- "PublicDescription": "Number of times RTM commit succeeded.",
+ "BriefDescription": "Number of times HLE commit succeeded",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xc9",
- "UMask": "0x4",
- "BriefDescription": "Number of times RTM abort was triggered",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered .",
+ "BriefDescription": "Number of times we entered an HLE region; does not count nested transactions",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region\n does not count nested transactions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x8",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC1",
- "PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:\n1. memory disambiguation,\n2. external snoop, or\n3. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xc9",
- "UMask": "0x10",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC2",
- "PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Randomly selected loads with latency value being above 128",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x20",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC3",
- "PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Randomly selected loads with latency value being above 16",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x40",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC4",
- "PublicDescription": "Number of times a RTM caused a fault.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Randomly selected loads with latency value being above 256",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x80",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC5",
- "PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Randomly selected loads with latency value being above 32",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
"BriefDescription": "Randomly selected loads with latency value being above 4",
- "PEBS": "2",
- "MSRValue": "0x4",
- "Counter": "3",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
+ "MSRValue": "0x4",
+ "PEBS": "2",
"PublicDescription": "Counts randomly selected loads with latency value being above four.",
- "TakenAlone": "1",
"SampleAfterValue": "100003",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 8",
- "PEBS": "2",
- "MSRValue": "0x8",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
+ "BriefDescription": "Randomly selected loads with latency value being above 512",
+ "Data_LA": "1",
"Errata": "BDM100, BDM35",
- "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
- "TakenAlone": "1",
- "SampleAfterValue": "50021",
- "CounterHTOff": "3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 16",
- "PEBS": "2",
- "MSRValue": "0x10",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "Counts randomly selected loads with latency value being above 16.",
- "TakenAlone": "1",
- "SampleAfterValue": "20011",
- "CounterHTOff": "3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 32",
+ "MSRValue": "0x200",
"PEBS": "2",
- "MSRValue": "0x20",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "Counts randomly selected loads with latency value being above 32.",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "3"
+ "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
"BriefDescription": "Randomly selected loads with latency value being above 64",
- "PEBS": "2",
- "MSRValue": "0x40",
- "Counter": "3",
+ "Data_LA": "1",
+ "Errata": "BDM100, BDM35",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
+ "MSRValue": "0x40",
+ "PEBS": "2",
"PublicDescription": "Counts randomly selected loads with latency value being above 64.",
- "TakenAlone": "1",
"SampleAfterValue": "2003",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 128",
- "PEBS": "2",
- "MSRValue": "0x80",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
- "MSRIndex": "0x3F6",
+ "BriefDescription": "Randomly selected loads with latency value being above 8",
+ "Data_LA": "1",
"Errata": "BDM100, BDM35",
- "PublicDescription": "Counts randomly selected loads with latency value being above 128.",
- "TakenAlone": "1",
- "SampleAfterValue": "1009",
- "CounterHTOff": "3"
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads with latency value being above eight.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 256",
- "PEBS": "2",
- "MSRValue": "0x100",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "Counts randomly selected loads with latency value being above 256.",
- "TakenAlone": "1",
- "SampleAfterValue": "503",
- "CounterHTOff": "3"
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "This event counts speculative cache-line split load uops dispatched to the L1 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 512",
- "PEBS": "2",
- "MSRValue": "0x200",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
- "MSRIndex": "0x3F6",
- "Errata": "BDM100, BDM35",
- "PublicDescription": "Counts randomly selected loads with latency value being above 512.",
- "TakenAlone": "1",
- "SampleAfterValue": "101",
- "CounterHTOff": "3"
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "This event counts speculative cache line split store-address (STA) uops dispatched to the L1 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all requests miss in the L3",
- "MSRValue": "0x3FBFC08FFF",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests miss in the L3",
+ "MSRValue": "0x3FBFC00244",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x087FC007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x604000244",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103FC007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x3FBFC00091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063BC007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x604000091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
- "MSRValue": "0x06040007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x63BC00091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
- "MSRValue": "0x3FBFC007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+ "MSRValue": "0x103FC00091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
- "MSRValue": "0x0604000244",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x87FC00091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
- "MSRValue": "0x3FBFC00244",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "MSRValue": "0x3FBFC007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
- "MSRValue": "0x0604000122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x6040007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
- "MSRValue": "0x3FBFC00122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "MSRValue": "0x63BC007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x087FC00091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x103FC007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103FC00091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x87FC007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all requests miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063BC00091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x3FBFC08FFF",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
- "MSRValue": "0x0604000091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x3FBFC00122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
- "MSRValue": "0x3FBFC00091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "MSRValue": "0x604000122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
- "MSRValue": "0x3FBFC00200",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "MSRValue": "0x3FBFC00002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
- "MSRValue": "0x3FBFC00100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "MSRValue": "0x103FC00002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103FC00002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x3FBFC00200",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
- "MSRValue": "0x3FBFC00002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "MSRValue": "0x3FBFC00100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times RTM abort was triggered",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times RTM abort was triggered .",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an RTM abort was attributed to a Memory condition (See TSX_Memory event for additional details).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "PublicDescription": "Number of times the TSX watchdog signaled an RTM abort.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "PublicDescription": "Number of times a disallowed operation caused an RTM abort.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "PublicDescription": "Number of times a RTM caused a fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times RTM aborted and was not due to the abort conditions in subevents 3-6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times RTM commit succeeded",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times we entered an RTM region; does not count nested transactions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region\n does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to an evicted line caused by a transaction overflow.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a TSX line had a cache conflict",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times we could not allocate Lock Buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/other.json b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
index 4475249ea9da..1c2a5b001949 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/other.json
@@ -1,44 +1,36 @@
[
{
- "EventCode": "0x5C",
- "UMask": "0x1",
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
- "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
- "UMask": "0x1",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
- "Counter": "0,1,2,3",
"EventName": "CPL_CYCLES.RING0_TRANS",
- "CounterMask": "1",
"PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x63",
- "UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
- "Counter": "0,1,2,3",
+ "EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
index c2f6932a5817..9a902d2160e6 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/pipeline.json
@@ -1,1423 +1,1116 @@
[
{
- "UMask": "0x1",
- "BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 0",
- "EventName": "INST_RETIRED.ANY",
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 0"
- },
- {
- "UMask": "0x2",
- "BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "UMask": "0x3",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x03",
- "UMask": "0x2",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x03",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x07",
- "UMask": "0x1",
- "BriefDescription": "False dependencies in MOB due to partial compare",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x8",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.ANY",
- "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x0E",
- "UMask": "0x10",
- "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.FLAGS_MERGE",
- "PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0E",
- "UMask": "0x20",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0E",
- "UMask": "0x40",
- "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.SINGLE_MUL",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x14",
- "UMask": "0x1",
"BriefDescription": "Cycles when divider is busy executing divide operations",
- "Counter": "0,1,2,3",
+ "EventCode": "0x14",
"EventName": "ARITH.FPU_DIV_ACTIVE",
"PublicDescription": "This event counts the number of the divide operations executed. Uses edge-detect and a cmask value of 1 on ARITH.FPU_DIV_ACTIVE to get the number of the divide operations executed.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3c",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4c",
- "UMask": "0x1",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
- "Counter": "0,1,2,3",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4C",
- "UMask": "0x2",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
- "Counter": "0,1,2,3",
- "EventName": "LOAD_HIT_PRE.HW_PF",
- "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "UMask": "0x1",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "UMask": "0x2",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x58",
- "UMask": "0x4",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "EventCode": "0x58",
- "UMask": "0x8",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired direct near calls",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd0"
},
{
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
- "EventCode": "0x87",
- "UMask": "0x1",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "Counter": "0,1,2,3",
- "EventName": "ILD_STALL.LCP",
- "PublicDescription": "This event counts stalls occured due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
},
{
- "EventCode": "0x88",
- "UMask": "0x41",
"BriefDescription": "Not taken macro-conditional branches",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken macro-conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0x88",
- "UMask": "0x81",
"BriefDescription": "Taken speculative and retired macro-conditional branches",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "EventCode": "0x88",
- "UMask": "0x82",
"BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"PublicDescription": "This event counts taken speculative and retired macro-conditional branch instructions excluding calls and indirect branches.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Taken speculative and retired direct near calls",
"EventCode": "0x88",
- "UMask": "0x84",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired direct near calls.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x90"
+ },
+ {
"BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired indirect branches excluding calls and return branches.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
+ "BriefDescription": "Taken speculative and retired indirect calls",
"EventCode": "0x88",
- "UMask": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
+ },
+ {
"BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
- "Counter": "0,1,2,3",
+ "EventCode": "0x88",
"EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"PublicDescription": "This event counts taken speculative and retired indirect branches that have a return mnemonic.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "EventCode": "0x88",
- "UMask": "0x90",
- "BriefDescription": "Taken speculative and retired direct near calls",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
- "PublicDescription": "This event counts taken speculative and retired direct near calls.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x88",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired indirect calls",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "PublicDescription": "This event counts taken speculative and retired indirect calls including both register and memory indirect.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
+ "Errata": "BDW98",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x88",
- "UMask": "0xc1",
- "BriefDescription": "Speculative and retired macro-conditional branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
- "PublicDescription": "This event counts both taken and not taken speculative and retired macro-conditional branch instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "UMask": "0xc2",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "PublicDescription": "This event counts both taken and not taken speculative and retired macro-unconditional branch instructions, excluding calls and indirects.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Far branch instructions retired.",
+ "Errata": "BDW98",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PublicDescription": "This event counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "EventCode": "0x88",
- "UMask": "0xc4",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches excluding calls and return branches.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0x88",
- "UMask": "0xc8",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "PublicDescription": "This event counts both taken and not taken speculative and retired indirect branches that have a return mnemonic.",
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect macro near call instructions retired (captured in ring 3).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "EventCode": "0x88",
- "UMask": "0xd0",
- "BriefDescription": "Speculative and retired direct near calls",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "PublicDescription": "This event counts both taken and not taken speculative and retired direct near calls.",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
- "EventCode": "0x88",
- "UMask": "0xff",
- "BriefDescription": "Speculative and retired branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "PublicDescription": "This event counts both taken and not taken speculative and retired branch instructions.",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
+ "BriefDescription": "Speculative mispredicted indirect branches",
"EventCode": "0x89",
- "UMask": "0x41",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
"BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
- "Counter": "0,1,2,3",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"PublicDescription": "This event counts not taken speculative and retired mispredicted macro conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0x89",
- "UMask": "0x81",
"BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
- "Counter": "0,1,2,3",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"PublicDescription": "This event counts taken speculative and retired mispredicted macro conditional branch instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "EventCode": "0x89",
- "UMask": "0x84",
"BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
- "Counter": "0,1,2,3",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
"EventCode": "0x89",
- "UMask": "0x88",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
+ },
+ {
"BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
- "Counter": "0,1,2,3",
+ "EventCode": "0x89",
"EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"PublicDescription": "This event counts taken speculative and retired mispredicted indirect branches that have a return mnemonic.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x89",
- "UMask": "0xc1",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
- "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted macro conditional branch instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x89",
- "UMask": "0xc4",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "PublicDescription": "This event counts both taken and not taken mispredicted indirect branches excluding calls and returns.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "UMask": "0xff",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0xA0",
- "UMask": "0x3",
- "BriefDescription": "Micro-op dispatches cancelled due to insufficient SIMD physical register file read ports",
- "Counter": "0,1,2,3",
- "EventName": "UOP_DISPATCHES_CANCELLED.SIMD_PRF",
- "PublicDescription": "This event counts the number of micro-operations cancelled after they were dispatched from the scheduler to the execution units when the total number of physical register read ports across all dispatch ports exceeds the read bandwidth of the physical register file. The SIMD_PRF subevent applies to the following instructions: VDPPS, DPPS, VPCMPESTRI, PCMPESTRI, VPCMPESTRM, PCMPESTRM, VFMADD*, VFMADDSUB*, VFMSUB*, VMSUBADD*, VFNMADD*, VFNMSUB*. See the Broadwell Optimization Guide for more information.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "This is a fixed-frequency event programmed to general counters. It counts when the core is unhalted at 100 Mhz.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are exectuted in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. \nNote: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. This event is clocked by base clock (100 Mhz) on Sandy Bridge. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are exectuted in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
"AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
"AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are exectuted in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are exectuted in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are exectuted in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "This event counts stalls occurred due to changing prefix length (66, 67 or REX.W when they change the length of the decoded instruction). Occurrences counting is proportional to the number of prefixes in a 16B-line. This may result in the following penalties: three-cycle penalty for each LCP in a 16-byte chunk.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. \nNotes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. \nCounting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xa2",
- "UMask": "0x1",
- "BriefDescription": "Resource-related stall cycles",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.ANY",
- "PublicDescription": "This event counts resource-related stall cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Errata": "BDM61",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA2",
- "UMask": "0x4",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.RS",
- "PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Errata": "BDM11, BDM55",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA2",
- "UMask": "0x8",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.SB",
- "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA2",
- "UMask": "0x10",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.ROB",
- "PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "PublicDescription": "This event counts the number of cycles during which Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the current thread. This also includes the cycles during which the Allocator is serving another thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
"CounterMask": "1",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand* load request missing the L2 cache.",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Cycles checkpoints in Resource Allocation Table (RAT) are recovering from JEClear or machine clear.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
- "CounterMask": "2",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request (that is cycles with non-completed load waiting for its data from memory subsystem).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:\n - preceding store conflicts with the load (incomplete overlap);\n - store forwarding is impossible due to u-arch limitations;\n - preceding lock RMW operations are not forwarded;\n - store has the no-forward bit set (uncacheable/page-split/masked stores);\n - all-blocking stores are used (mostly, fences and port I/O);\nand others.\nThe most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events.\nSee the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
- "CounterMask": "4",
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "False dependencies in MOB due to partial compare",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "This event counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the hardware prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "This event counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by asm inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
"CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
- "CounterMask": "5",
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand* load request missing the L2 cache.(as a footprint) * includes also L1 HW prefetch requests that may or may not be required by demands.",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
- "CounterMask": "6",
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "CounterMask": "6",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.CYCLES",
+ "PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Counts number of cycles the CPU has at least one pending demand load request missing the L1 data cache.",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Resource-related stall cycles",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "This event counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "PublicDescription": "This event counts ROB full stall cycles. This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x10"
},
{
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
- "CounterMask": "12",
- "PublicDescription": "Counts number of cycles nothing is executed on any execution port, while there was at least one pending demand load request missing the L1 data cache.",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.RS",
+ "PublicDescription": "This event counts stall cycles caused by absence of eligible entries in the reservation station (RS). This may result from RS overflow, or from RS deallocation because of the RS array Write Port allocation scheme (each RS entry has two write ports instead of four. As a result, empty entries could not be used, although RS is not really full). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "This event counts stall cycles caused by the store buffer (SB) overflow (excluding draining from synch). This counts cycles that the pipeline backend blocked uop delivery from the front end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.UOPS",
+ "BriefDescription": "Count cases of saving new LBR",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "This event counts cycles during which the reservation station (RS) is empty for the thread.\nNote: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.THREAD",
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
"BriefDescription": "Number of uops executed on the core.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
"PublicDescription": "Number of uops executed from any thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
"BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "BDM61",
- "PublicDescription": "This event counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC0",
- "UMask": "0x1",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "PEBS": "2",
- "Counter": "1",
- "EventName": "INST_RETIRED.PREC_DIST",
- "Errata": "BDM11, BDM55",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts instructions retired.",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "CounterHTOff": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC0",
- "UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions:",
- "Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This event counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC1",
- "UMask": "0x40",
- "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Actually retired uops.",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Cycles without actually retired uops.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts cycles without actually retired uops.",
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "CounterMask": "10",
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This event counts the number of retirement slots used.",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.CYCLES",
- "PublicDescription": "This event counts both thread-specific (TS) and all-thread (AT) nukes.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC3",
- "UMask": "0x4",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.SMC",
- "PublicDescription": "This event counts self-modifying code (SMC) detected, which causes a machine clear.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC3",
- "UMask": "0x20",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xC4",
- "UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "This event counts conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xC4",
- "UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 4.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC4",
- "UMask": "0x2",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "PublicDescription": "This event counts both direct and indirect macro near call instructions retired (captured in ring 3).",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "BDW98",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xC4",
- "UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "This event counts return instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xC4",
- "UMask": "0x10",
- "BriefDescription": "Not taken branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "This event counts not taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xC4",
- "UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "This event counts taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xC4",
- "UMask": "0x40",
- "BriefDescription": "Far branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "Errata": "BDW98",
- "PublicDescription": "This event counts far branch instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "This event counts, on the per-thread basis, cycles during which uops are dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xC5",
- "UMask": "0x0",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "This event counts all mispredicted macro branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive\n added by GSR u-arch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC5",
- "UMask": "0x8",
- "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Actually retired uops.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.RET",
- "PublicDescription": "This event counts mispredicted return instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "This event counts all actually retired uops. Counting increments by two for micro-fused uops, and by one for macro-fused and other uops. Maximal increment value for one cycle is eight.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "This event counts the number of retirement slots used.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "This event counts cases of saving new LBR records by hardware. This assumes proper enabling of LBRs and takes into account LBR filtering done by the LBR_SELECT register.",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xe6",
- "UMask": "0x1f",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "Counter": "0,1,2,3",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "16",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
index 58ed6d33d1f4..400d784d1457 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-cache.json
@@ -1,206 +1,1334 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
- "Counter": "0,1,2,3",
- "EventName": "UNC_C_CLOCKTICKS",
- "PerPkg": "1",
- "Unit": "CBO"
- },
- {
- "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
- "Counter": "0,1,2,3",
- "EventCode": "0x34",
- "EventName": "UNC_C_LLC_LOOKUP.ANY",
- "Filter": "filter_state=0x1",
+ "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
+ "Filter": "filter_opc=0x191",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
- "UMask": "0x11",
- "Unit": "CBO"
+ "UMask": "0x3",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "M line evictions from LLC (writebacks to memory)",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+ "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
+ "Filter": "filter_opc=0x192",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "UMask": "0x3",
+ "Unit": "CBOX"
},
{
"BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.DATA_READ",
"Filter": "filter_opc=0x182",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
- },
- {
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "filter_opc=0x187",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
"BriefDescription": "MMIO reads. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_READ",
"Filter": "filter_opc=0x187,filter_nc=1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
"BriefDescription": "MMIO writes. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_WRITE",
"Filter": "filter_opc=0x18f,filter_nc=1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
- "Filter": "filter_opc=0x190",
+ "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
+ "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x191",
+ "EventName": "LLC_MISSES.PCIE_READ",
+ "Filter": "filter_opc=0x19e",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
- "Filter": "filter_opc=0x192",
+ "EventName": "LLC_MISSES.PCIE_WRITE",
+ "Filter": "filter_opc=0x1c8",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
+ "Filter": "filter_opc=0x190",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "LLC_MISSES.UNCACHEABLE",
+ "Filter": "filter_opc=0x187",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
+ "Filter": "filter_opc=0x181",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "UMask": "0x1",
+ "Unit": "CBOX"
},
{
"BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
"Filter": "filter_opc=0x180,filter_tid=0x3e",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x181",
+ "EventName": "LLC_REFERENCES.PCIE_READ",
+ "Filter": "filter_opc=0x19e",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"ScaleUnit": "64Bytes",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.PCIE_WRITE",
+ "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBOX"
},
{
"BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_FULL",
"Filter": "filter_opc=0x18c",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"ScaleUnit": "64Bytes",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
"BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
"Filter": "filter_opc=0x18d",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"ScaleUnit": "64Bytes",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "BriefDescription": "Bounce Control",
+ "EventCode": "0xA",
+ "EventName": "UNC_C_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "EventCode": "0x1F",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "EventCode": "0x9",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local distress or incoming distress signals are asserted. Incoming distress includes both up and dn.",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.ANY",
+ "Filter": "filter_state=0x1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x11",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "UMask": "0x9",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x5",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.I_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "M line evictions from LLC (writebacks to memory)",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"ScaleUnit": "64Bytes",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; DRd hitting non-M with raw CV=0",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Clean Victim with raw CV=0",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Number of times that an RFO hit in S state. This is useful for determining if it might be good for a workload to use RspIWB instead of RspSWB.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE0",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 0",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE1",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 2",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE2",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 2",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE3",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 3",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.LRU_DECREMENT",
+ "PerPkg": "1",
+ "PublicDescription": "How often all LRU bits were decremented by 1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
+ "PerPkg": "1",
+ "PublicDescription": "How often we picked a victim that had a non-zero age",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; All",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX-- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; All",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AD",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AK",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; BL",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters for Down polarity",
+ "UMask": "0xcc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in BDX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "IV",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.",
+ "EventCode": "0x7",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IPQ is externally startved and therefore we are blocking the IRQ.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IRQ is externally starved and therefore we are blocking the IPQ.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; Number of times that the ISMQ Bid.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; PRQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IPQ in Internal Starvation.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IRQ in Internal Starvation.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the ISMQ in Internal Starvation.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; PRQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from an address conflicts. Address conflicts out of the IPQ should be rare. They will generally only occur if two different sockets are sending requests to the same address at the same time. This is a true conflict case, unlike the IPQ Address Conflict which is commonly caused by prefetching characteristics.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject. TOR rejects from the IPQ can be caused by the Egress being full or Address Conflicts.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from the Egress being full. IPQ requests make use of the AD Egress for regular responses, the BL egress to forward data, and the AK egress to return credits.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No AD Sbo Credits",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Target Node Filter",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request from the IPQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because of an address match in the TOR. In order to maintain coherency, requests to the same address are not allowed to pass each other up in the Cbo. Therefore, if there is an outstanding request to a given address, one cannot issue another request to that address until it is complete. This comes up most commonly with prefetches. Outstanding prefetches occasionally will not complete their memory fetch and a demand request to the same address will then sit in the IRQ and get retried until the prefetch fills the data into the LLC. Therefore, it will not be uncommon to see this case in high bandwidth streaming workloads when the LLC Prefetcher in the core is enabled.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of IRQ retries that occur. Requests from the IRQ are retried if they are rejected from the TOR pipeline for a variety of reasons. Some of the most common reasons include if the Egress is full, there are no RTIDs, or there is a Physical Address match to another outstanding request.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because it failed to acquire an entry in the Egress. The egress is the buffer that queues up for allocating onto the ring. IRQ requests can make use of all four rings and all four Egresses. If any of the queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of requests rejects because of lack of QPI Ingress credits. These credits are required in order to send transactions to the QPI agent. Please see the QPI_IGR_CREDITS events for more information.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that requests from the IRQ were retried because there were no RTIDs available. RTIDs are required after a request misses the LLC and needs to send snoops and/or requests to memory. If there are no RTIDs available, requests will queue up in the IRQ and retry until one becomes available. Note that there are multiple RTID pools for the different sockets. There may be cases where the local RTIDs are all used, but requests destined for remote memory can still acquire an RTID because there are remote RTIDs available. This event does not provide any filtering for this case.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No AD Sbo Credits",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No BL Sbo Credits",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an BL packet to the Sbo.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Target Node Filter",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the total number of times that a request from the ISMQ retried because of a TOR reject. ISMQ requests generally will not need to retry (or at least ISMQ retries are less common than IRQ retries). ISMQ requests will retry if they are not able to acquire a needed Egress credit to get onto the ring, or for cache evictions that need to acquire an RTID. Most ISMQ requests already have an RTID, so eviction retries will be less common here.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by a lack of Egress credits. The egress is the buffer that queues up for allocating onto the ring. If any of the Egress queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by no RTIDs. M-state cache evictions are serviced through the ISMQ, and must acquire an RTID in order to write back to memory. If no RTIDs are available, they will be retried.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x80",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No AD Sbo Credits",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No BL Sbo Credits",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried because of it lacked credits to send an BL packet to the Sbo.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; Target Node Filter",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; PRQ Rejects",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For AD Ring",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For BL Ring",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For AD Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For BL Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Eviction transactions inserted into the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Local Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisfied by an opcode, inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Local Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x2a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisfied by an opcode, inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x8a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisfied by an opcode, inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched (matches an RTID destination) transactions inserted into the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched eviction transactions inserted into the TOR.",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched miss requests that were inserted into the TOR.",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched write transactions inserted into the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Opcode Match",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisfied by an opcode, inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Writebacks",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Write transactions inserted into the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All valid TOR entries. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding eviction transactions in the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBOX"
},
{
"BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
@@ -208,109 +1336,2230 @@
"EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
"Filter": "filter_opc=0x182",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries for miss transactions that match an opcode. This generally means that the request was sent to memory or MMIO.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisfied by an opcode, in the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss All",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding miss requests in the TOR. 'Miss' means the allocation requires an RTID. This generally means that the request was sent to memory or MMIO.",
+ "UMask": "0xa",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x2a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisfied by an opcode, in the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries for miss transactions that match an opcode. This generally means that the request was sent to memory or MMIO.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x8a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisfied by an opcode, in the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of NID matched outstanding requests in the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid.In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding NID matched eviction transactions in the TOR .",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID.",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); NID matched write transactions int the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc).",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisfied by an opcode, in the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Write transactions in the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto AD Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto AK Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto BL Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AK ring. This is commonly used for snoop responses coming from the core and destined for a Cachebo.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the core AD egress spent in starvation",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both AK egresses spent in starvation",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.BL_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both BL egresses spent in starvation",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the cachebo IV egress spent in starvation",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming snoop hazard",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the bypass.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Direct2Core messages sent",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles in which Direct2Core was disabled",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Reads where Direct2Core overridden",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lat Opt Return",
+ "EventCode": "0x41",
+ "EventName": "UNC_H_DIRECTORY_LAT_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory returned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that did not have to send any snoops because the directory bit was clear.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that had to send one or more snoops because the directory bit was set.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory clears. This occurs when snoops were sent and all returned with RspI.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory sets. This occurs when a remote read transaction requests memory, bringing it to a remote cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is AckCnfltWbI",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; All Requests",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; HOM Requests",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.HOM",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Invalidations",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RsSFwd or RspSFwdWb",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE or WbMtoS",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is AckCnfltWbI",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; All Requests",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; HOM Requests",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.HOM",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RsSFwd or RspSFwdWb",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoE or WbMtoS",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoI",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is AckCnfltWbI",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; All Requests",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Allocations",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; HOM Requests",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.HOM",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Invalidations",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RsSFwd or RspSFwdWb",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE or WbMtoS",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoI",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "EventCode": "0x1E",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Cancelled",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.CANCELLED",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.; OSB Snoop broadcast cancelled due to D2C or Other. OSB cancel is counted when OSB local read is not allowed even when the transaction in local InItoE. It also counts D2C OSB cancel, but also includes the cases were D2C was not set in the first place for the transaction coming from the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Reads Local - Useful",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL_USEFUL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote - Useful",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE_USEFUL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; All",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x4",
+ "Unit": "HA"
},
{
- "BriefDescription": "read requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from the local socket.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming ead requests. This is a good proxy for LLC Read Misses (including RFOs).",
"UMask": "0x3",
"Unit": "HA"
},
{
- "BriefDescription": "read requests to local home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Local Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the local socket. This is a good proxy for LLC Read Misses (including RFOs) from the local socket.",
"UMask": "0x1",
"Unit": "HA"
},
{
- "BriefDescription": "read requests to remote home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Remote Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the remote socket. This is a good proxy for LLC Read Misses (including RFOs) from the remote socket.",
"UMask": "0x2",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
- "UMask": "0xC",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming write requests.",
+ "UMask": "0xc",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to local home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Local Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from the local socket.",
"UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to remote home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Remote Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from remote sockets.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; All",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
"UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "Conflict requests (requests for same address from multiple agents simultaneously)",
- "Counter": "0,1,2,3",
+ "BriefDescription": "HA AK Ring in Use; Clockwise",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; All",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Local Requests",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of reads when the snoop was on the critical path to the data return.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Remote Requests",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of reads when the snoop was on the critical path to the data return.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; All Requests",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; Tracked for snoops from both local and remote sockets.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Local Requests",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Remote Requests",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Local Requests",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Remote Requests",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Snoop Responses Received; RspI",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x20",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
"Unit": "HA"
},
{
"BriefDescription": "M line forwarded from remote cache with no writeback to memory",
- "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"ScaleUnit": "64Bytes",
"UMask": "0x4",
"Unit": "HA"
},
{
"BriefDescription": "Shared line response from remote cache",
- "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"ScaleUnit": "64Bytes",
"UMask": "0x2",
"Unit": "HA"
},
{
"BriefDescription": "Shared line forwarded from remote cache",
- "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"ScaleUnit": "64Bytes",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Other",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for all other snoop responses.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 2",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 3",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 4",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 5",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 6",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 7",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 10",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 11",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 8",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 9",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles Completely Used",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Counts the number of cycles when the HA tracker pool (HT) is completely used including reserved HT entries. It will not return valid count when BT is disabled.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles GP Completely Used",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.GP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Counts the number of cycles when the general purpose (GP) HA tracker pool (HT) is completely used. It will not return valid count when BT is disabled.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; All Requests",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Requests coming from both local and remote sockets.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Local Requests",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Remote Requests",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Local InvItoE Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Remote InvItoE Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Local Read Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Remote Read Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Local Write Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Remote Write Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumulator; Local Requests",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumulator; Remote Requests",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "EventCode": "0xF",
+ "EventName": "UNC_H_TxR_AD.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.; Filter for outbound NDR transactions sent on the AD ring. NDR stands for non-data response and is generally used for completions that do not include data. AD NDR is used for transactions to remote sockets.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to the cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent directly to the requesting core.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to a remote socket over QPI.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For AK Ring",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For BL Ring",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
"UMask": "0x8",
"Unit": "HA"
}
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
index 824961318c1e..e61a23f68899 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-interconnect.json
@@ -1,28 +1,4018 @@
[
{
- "BriefDescription": "QPI clock ticks",
- "Counter": "0,1,2,3",
- "EventCode": "0x14",
- "EventName": "UNC_Q_CLOCKTICKS",
+ "BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
+ "EventName": "QPI_CTL_BANDWIDTH_TX",
"PerPkg": "1",
- "Unit": "QPI LL"
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
+ "ScaleUnit": "8Bytes",
+ "UMask": "0x4",
+ "Unit": "QPI"
},
{
"BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
- "Counter": "0,1,2,3",
"EventName": "QPI_DATA_BANDWIDTH_TX",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
"ScaleUnit": "8Bytes",
"UMask": "0x2",
- "Unit": "QPI LL"
+ "Unit": "QPI"
},
{
- "BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
- "Counter": "0,1,2,3",
- "EventName": "QPI_CTL_BANDWIDTH_TX",
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of clocks in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIItoM",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; RFO",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; WbMtoI",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_TIMEOUT",
+ "PerPkg": "1",
+ "PublicDescription": "Indicates the fetch for a previous prefetch wasn't accepted by the prefetch. This happens in the case of a prefetch TimeOut",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Data Throttled",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.DATA_THROTTLE",
+ "PerPkg": "1",
+ "PublicDescription": "IRP throttled switch data",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Hit E or S",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Hit I",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Hit M",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Miss",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : SnpCode",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : SnpData",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : SnpInv",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of atomic transactions",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of 'other' kinds of transactions.",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Write Prefetches",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of write prefetches.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of qfclks",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of clocks in the QPI LL. This clock runs at 1/4th the GT/s speed of the QPI link. For example, a 4GT/s link will have qfclk or 1GHz. BDX does not support dynamic link speeds, so this frequency is fixed.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Count of CTO Events",
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_CTO_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits. Had there been enough credits, the spawn would have worked as the RBT bit was set and the RBT tag matched.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and there weren't enough Egress credits. The valid bit was set.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits AND the RBT bit was not set, but the RBT tag matched.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match, the valid bit was not set and there weren't enough Egress credits.",
+ "UMask": "0x80",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match although the valid bit was set and there were enough Egress credits.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the route-back table (RBT) specified that the transaction should not trigger a direct2core transaction. This is common for IO transactions. There were enough Egress credits and the RBT tag matched but the valid bit was not set.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and the valid bit was not set although there were enough Egress credits.",
+ "UMask": "0x40",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn was successful. There were sufficient credits, the RBT valid bit was set and there was an RBT tag match. The message was marked to spawn direct2core.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Bypassed",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; LinkInit",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).; CRC errors detected during link initialization.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; DRS",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; HOM",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the HOM message class.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCB",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCB message class.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCS",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCS message class.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NDR",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NDR message class.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; SNP",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the SNP message class.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; DRS",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; HOM",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the HOM message class.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCB",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCB message class.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCS",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCS message class.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NDR",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NDR message class.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; SNP",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the SNP message class.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "EventCode": "0x1D",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of flits received over QPI that do not hold protocol payload. When QPI is not in a power saving state, it continuously transmits flits across the link. When there are no protocol flits to send, it will send IDLE and NULL flits across. These flits sometimes do carry a payload, such as credit returns, but are generally not considered part of the QPI bandwidth.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data.",
+ "UMask": "0x18",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits received over QPI on the home channel.",
+ "UMask": "0x6",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits received over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request received over QPI on the home channel. This basically counts the number of remote memory requests received over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits received over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are received on the home channel.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
+ "UMask": "0xc",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not the NCB headers.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits received over QPI. This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations",
+ "EventCode": "0x8",
+ "EventName": "UNC_Q_RxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN0",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN1",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN1",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN0",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN1",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN0",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN1",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN0",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN1",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN0",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN1",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet because there were insufficient BGF credits. For details on a message class granularity, use the Egress Credit Occupancy events.",
+ "UMask": "0x40",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.GV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled because a GV transition (frequency transition) was taking place.",
+ "UMask": "0x80",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "EventCode": "0x5",
+ "EventName": "UNC_Q_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is almost full, we block some but not all packets.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is totally full, we are not allowed to send any packets.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "EventCode": "0x6",
+ "EventName": "UNC_Q_TxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G0.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency.",
+ "UMask": "0x18",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits transmitted over QPI on the home channel.",
+ "UMask": "0x6",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits transmitted over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request transmitted over QPI on the home channel. This basically counts the number of remote memory requests transmitted over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits transmitted over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are transmitted on the home channel.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
+ "UMask": "0xc",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not the NCB headers.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits transmitted over QPI. This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "EventCode": "0x4",
+ "EventName": "UNC_Q_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "EventCode": "0x7",
+ "EventName": "UNC_Q_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
+ "EventCode": "0x2B",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Returned",
+ "EventCode": "0x1C",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURNS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits returned.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "EventCode": "0x1B",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_R3_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 10",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 11",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 12",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 13",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14_16",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 14&16",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 8",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 9",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO_15_17",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 15&17",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 0",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 2",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 3",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 4",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 5",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 6",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 7",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA0",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCB Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCS Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0xB",
+ "EventName": "UNC_R3_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0xB",
+ "EventName": "UNC_R3_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0xD",
+ "EventName": "UNC_R3_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0xD",
+ "EventName": "UNC_R3_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0xC",
+ "EventName": "UNC_R3_IOT_CTS_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0xC",
+ "EventName": "UNC_R3_IOT_CTS_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; All",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; All",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; All",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Any",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Clockwise",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ring Stop Starved; AK",
+ "EventCode": "0xE",
+ "EventName": "UNC_R3_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the ringstop is in starvation (per ring)",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; DRS",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; HOM",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NDR",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; SNP",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; DRS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; HOM",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NDR",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; SNP",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; DRS",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; HOM",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NDR",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; SNP",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; HOM",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NCB",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NCS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NDR",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; SNP",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2B",
+ "EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2B",
+ "EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_AD",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_BL",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; DRS Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; HOM Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCS Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NDR Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Standard (NCS).",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Bounce Control",
+ "EventCode": "0xA",
+ "EventName": "UNC_S_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "EventName": "UNC_S_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "EventCode": "0x9",
+ "EventName": "UNC_S_FAST_ASSERTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; All",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Event",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Event ring polarity.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; All",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Event",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Event ring polarity.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; All",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Event",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Event ring polarity.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in BDX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_S_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in HSX. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xc",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_S_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in HSX. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0x3",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Bounces",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Credits",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Bounces",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Credits",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
"UMask": "0x4",
- "Unit": "QPI LL"
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; AD - Bounces",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; AD - Credits",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; AK",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; BL - Bounces",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; BL - Credits",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; IV",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Bounces",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Credits",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AK",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Bounces",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Credits",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; IVF Credit",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x40",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; IV",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AD - Bounces",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AD - Credits",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AK",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; BL - Bounces",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; BL - Credits",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IV",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AD - Bounces",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AD - Credits",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AK",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; BL - Bounces",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; BL - Credits",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IV",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.AD",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.AK",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.BL",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Bounces",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Credits",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Bounces",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Credits",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AD - Bounces",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AD - Credits",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AK",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; BL - Bounces",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; BL - Credits",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; IV",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; PREQ, PSMI, P2U, Thermal, PCUSMI, PMI",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x20",
+ "Unit": "UBOX"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-io.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-io.json
new file mode 100644
index 000000000000..01e04daf03da
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-io.json
@@ -0,0 +1,555 @@
+[
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; All",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Dn",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Up",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; All",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; All",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AD",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_BL",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
index 66eed399724c..b5a33e7a68c6 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-memory.json
@@ -1,86 +1,2885 @@
[
{
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_READ",
"PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Read CAS commands issued on this channel (including underfills).",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_WRITE",
"PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Write CAS commands issued on this channel.",
"ScaleUnit": "64Bytes",
- "UMask": "0xC",
+ "UMask": "0xc",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Read CAS commands issued on this channel (including underfills).",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the number of underfill reads that are issued by the memory controller. This will generally be about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ. While it is possible for underfills to be issed in both WMM and RMM, this event counts both.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Write CAS commands issued on this channel.",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clockticks in the Memory Controller using a dedicated 48-bit Fixed Counter",
+ "EventCode": "0xff",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Clockticks in the Memory Controller using one of the programmable counters",
+ "EventName": "UNC_M_CLOCKTICKS_P",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_CLOCKTICKS_P",
+ "EventName": "UNC_M_DCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
- "Counter": "0,1,2,3",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charges due to page misses",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of page misses. This does not include explicit precharge commands sent with CAS commands in Auto-Precharge mode. This does not include PRE commands sent as a result of the page close counter expiration.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for reads",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to read",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for writes",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to write",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE MXB write buffer occupancy",
+ "EventCode": "0x91",
+ "EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.RMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.WMM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
"UMask": "0x8",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
index dd1b95655d1d..83d20130c217 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/uncore-power.json
@@ -1,92 +1,457 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
- "Counter": "0,1,2,3",
+ "BriefDescription": "pclk Cycles",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6A",
+ "EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6B",
+ "EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
- "Counter": "0,1,2,3",
- "EventCode": "0xA",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6C",
+ "EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6D",
+ "EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6E",
+ "EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6F",
+ "EventName": "UNC_P_CORE15_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x70",
+ "EventName": "UNC_P_CORE16_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x71",
+ "EventName": "UNC_P_CORE17_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x61",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x62",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x63",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x64",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x65",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x66",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x67",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x68",
+ "EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x69",
+ "EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x31",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3A",
+ "EventName": "UNC_P_DEMOTIONS_CORE10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3B",
+ "EventName": "UNC_P_DEMOTIONS_CORE11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3C",
+ "EventName": "UNC_P_DEMOTIONS_CORE12",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3D",
+ "EventName": "UNC_P_DEMOTIONS_CORE13",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3E",
+ "EventName": "UNC_P_DEMOTIONS_CORE14",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3F",
+ "EventName": "UNC_P_DEMOTIONS_CORE15",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x40",
+ "EventName": "UNC_P_DEMOTIONS_CORE16",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x41",
+ "EventName": "UNC_P_DEMOTIONS_CORE17",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x33",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x34",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x35",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x36",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x37",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x38",
+ "EventName": "UNC_P_DEMOTIONS_CORE8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x39",
+ "EventName": "UNC_P_DEMOTIONS_CORE9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C1E",
+ "EventCode": "0x4E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C1E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C1E. This event can be used in conjunction with edge detect to count C1E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C7 State Residency",
+ "EventCode": "0x2E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C7_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C7. This event can be used in conjunction with edge detect to count C7 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "PerPkg": "1",
+ "PublicDescription": "Ring GV with same final and initial frequency",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
"Unit": "PCU"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
index 7d79c707c6d1..93621e004d88 100644
--- a/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/broadwellx/virtual-memory.json
@@ -1,388 +1,312 @@
[
{
- "EventCode": "0x08",
- "UMask": "0x1",
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"Errata": "BDM69",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "This event counts load misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
"EventCode": "0x08",
- "UMask": "0x2",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "Errata": "BDM69",
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
"EventCode": "0x08",
- "UMask": "0x4",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "Errata": "BDM69",
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
"EventCode": "0x08",
- "UMask": "0x8",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "Errata": "BDM69",
- "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x08",
- "UMask": "0xe",
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"Errata": "BDM69",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "EventCode": "0x08",
- "UMask": "0x10",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
"Errata": "BDM69",
- "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
+ "Errata": "BDM69",
"EventCode": "0x08",
- "UMask": "0x20",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
+ "Errata": "BDM69",
"EventCode": "0x08",
- "UMask": "0x40",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "This event counts load misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Errata": "BDM69",
"EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x49",
- "UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"Errata": "BDM69",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"EventCode": "0x49",
- "UMask": "0x2",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
"EventCode": "0x49",
- "UMask": "0x4",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
"EventCode": "0x49",
- "UMask": "0x8",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x49",
- "UMask": "0xe",
"BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"Errata": "BDM69",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "EventCode": "0x49",
- "UMask": "0x10",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (1G)",
"Errata": "BDM69",
- "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
+ "Errata": "BDM69",
"EventCode": "0x49",
- "UMask": "0x20",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
+ "Errata": "BDM69",
"EventCode": "0x49",
- "UMask": "0x40",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Errata": "BDM69",
"EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x4F",
- "UMask": "0x10",
"BriefDescription": "Cycle count for an Extended Page table walk.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x4F",
"EventName": "EPT.WALK_CYCLES",
"PublicDescription": "This event counts cycles for an extended page table walk. The Extended Page directory cache differs from standard TLB caches by the operating system that use it. Virtual machine operating systems use the extended page directory cache, while guest operating systems use the standard TLB caches.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "EventCode": "0x85",
- "UMask": "0x1",
"BriefDescription": "Misses at all ITLB levels that cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"Errata": "BDM69",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "This event counts store misses in all DTLB levels that cause page walks of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
"EventCode": "0x85",
- "UMask": "0x2",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
"EventCode": "0x85",
- "UMask": "0x4",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
"EventCode": "0x85",
- "UMask": "0x8",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
- "Errata": "BDM69",
- "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x85",
- "UMask": "0xe",
"BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
"Errata": "BDM69",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "EventCode": "0x85",
- "UMask": "0x10",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_DURATION",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
"Errata": "BDM69",
- "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "Errata": "BDM69",
"EventCode": "0x85",
- "UMask": "0x20",
- "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K).",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "Errata": "BDM69",
"EventCode": "0x85",
- "UMask": "0x40",
- "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M).",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "This event counts store misses in all DTLB levels that cause a completed page walk (4K page size). The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "Errata": "BDM69",
"EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts the number of cycles while PMH is busy with the page walk.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xAE",
- "UMask": "0x1",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB.ITLB_FLUSH",
- "PublicDescription": "This event counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xBC",
- "UMask": "0x11",
"BriefDescription": "Number of DTLB page walker hits in the L1+FB.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L1",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x11"
},
{
- "EventCode": "0xBC",
- "UMask": "0x12",
"BriefDescription": "Number of DTLB page walker hits in the L2.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x12"
},
{
- "EventCode": "0xBC",
- "UMask": "0x14",
"BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x14"
},
{
- "EventCode": "0xBC",
- "UMask": "0x18",
"BriefDescription": "Number of DTLB page walker hits in Memory.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x18"
},
{
- "EventCode": "0xBC",
- "UMask": "0x21",
"BriefDescription": "Number of ITLB page walker hits in the L1+FB.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x21"
},
{
- "EventCode": "0xBC",
- "UMask": "0x22",
"BriefDescription": "Number of ITLB page walker hits in the L2.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x22"
},
{
- "EventCode": "0xBC",
- "UMask": "0x24",
"BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"Errata": "BDM69, BDM98",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x24"
},
{
- "EventCode": "0xBD",
- "UMask": "0x1",
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "This event counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xBD",
- "UMask": "0x20",
"BriefDescription": "STLB flush attempts",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "This event counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, and so on).",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
index 3fba310a5012..a842f05cb60d 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/cache.json
@@ -1,941 +1,5039 @@
[
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1D miss outstandings duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
"Deprecated": "1",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.USELESS_PREF",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x27"
+ },
+ {
+ "BriefDescription": "Demand requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe7"
+ },
+ {
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf8"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "All requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "All requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd8"
+ },
+ {
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x38"
+ },
+ {
+ "BriefDescription": "All L2 requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "All L2 requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Errata": "SKL057",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Errata": "SKL057",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
+ "Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was remote HITM",
+ "Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Data_LA": "1",
+ "EventCode": "0xD4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Demand requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "PublicDescription": "Demand requests that miss L2 cache.",
- "SampleAfterValue": "200003",
- "UMask": "0x27"
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD1",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
- "SampleAfterValue": "50021",
- "UMask": "0x4"
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "L2 writebacks that access L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xF0",
- "EventName": "L2_TRANS.L2_WB",
- "PublicDescription": "Counts L2 writebacks that access L2 cache.",
- "SampleAfterValue": "200003",
- "UMask": "0x40"
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "L2 cache lines filling L2",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xF1",
- "EventName": "L2_LINES_IN.ALL",
- "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040491",
"SampleAfterValue": "100003",
- "UMask": "0x1f"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xF2",
- "EventName": "L2_LINES_OUT.SILENT",
- "SampleAfterValue": "200003",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100491",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT.ANY_SNOOP OCR.ALL_READS.L3_HIT.ANY_SNOOP OCR.ALL_READS.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C07F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C07F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C07F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C07F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C07F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C07F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT.SNOOP_MISS OCR.ALL_READS.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C07F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT.SNOOP_NONE OCR.ALL_READS.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C07F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_E.ANY_SNOOP OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F800807F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000807F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8000807F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4000807F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000807F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000807F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800807F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_F.ANY_SNOOP OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F802007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8002007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4002007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2002007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x802007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_M.ANY_SNOOP OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F800407F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000407F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8000407F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4000407F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000407F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000407F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800407F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_S.ANY_SNOOP OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F801007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8001007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4001007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2001007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x801007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT.ANY_SNOOP OCR.ALL_RFO.L3_HIT.ANY_SNOOP OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT.SNOOP_MISS OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT.SNOOP_NONE OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F804007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "All requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.MISS",
- "PublicDescription": "All requests that miss L2 cache.",
- "SampleAfterValue": "200003",
- "UMask": "0x3f"
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
- "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
- "SampleAfterValue": "100007",
- "UMask": "0x8"
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all demand data writes (RFOs)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.ANY_SNOOP OCR.OTHER.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.HITM_OTHER_CORE OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80088000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000088000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800088000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400088000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100088000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200088000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80088000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80208000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000208000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800208000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400208000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100208000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200208000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80208000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80048000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000048000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800048000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400048000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100048000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200048000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80048000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80108000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000108000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800108000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400108000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100108000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200108000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts any other requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80108000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F803C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "SampleAfterValue": "20011",
- "UMask": "0x2"
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD1",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0400",
"SampleAfterValue": "100003",
- "UMask": "0x8"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "AnyThread": "1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x48",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040010",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100080",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80080100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80200100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800200100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100200100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200200100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
"BriefDescription": "Any memory transaction that reached the SQ.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xB0",
"EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
"PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
@@ -943,9034 +5041,6727 @@
"UMask": "0x80"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Cacheable and non-cacheable code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD4",
- "EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "PEBS": "1",
- "SampleAfterValue": "100007",
- "UMask": "0x4"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions with remote Intel\u00ae Optane\u2122 DC persistent memory as the data source where the data request missed all caches. Precise event.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "ELLC": "1",
- "EventCode": "0xD3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions with remote Intel\u00ae Optane\u2122 DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
- "SampleAfterValue": "100007",
- "UMask": "0x10"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803C0491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200491",
"SampleAfterValue": "100003",
- "UMask": "0x8"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Demand Data Read requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
- "SampleAfterValue": "200003",
- "UMask": "0xe1"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400200491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of cache line split locks sent to uncore.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xF4",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040491",
"SampleAfterValue": "100003",
- "UMask": "0x10"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
- "SampleAfterValue": "200003",
- "UMask": "0xc4"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80100491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
- "SampleAfterValue": "20011",
- "UMask": "0x4"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020491",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD0",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "L1_Hit_Indication": "1",
- "PEBS": "1",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10490",
"SampleAfterValue": "100003",
- "UMask": "0x42"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x48",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800100490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200100490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB2",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400490",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions missed L2 cache as data sources",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD1",
- "EventName": "MEM_LOAD_RETIRED.L2_MISS",
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
- "SampleAfterValue": "50021",
- "UMask": "0x10"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020490",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.PF_MISS",
- "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
- "SampleAfterValue": "200003",
- "UMask": "0x38"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00800207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200100120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD1",
- "EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
- "SampleAfterValue": "100007",
- "UMask": "0x20"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80020120",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x107F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C07F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C07F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL057",
- "EventCode": "0x2E",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C07F7",
"SampleAfterValue": "100003",
- "UMask": "0x41"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C07F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C07F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C07F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C07F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C07F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F800807F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10000807F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8000807F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4000807F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000807F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2000807F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800807F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F802007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10002007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01004007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8002007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4002007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2002007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x802007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F800407F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10000407F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8000407F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xF2",
- "EventName": "L2_LINES_OUT.NON_SILENT",
- "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
- "SampleAfterValue": "200003",
- "UMask": "0x2"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4000407F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000407F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00804007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2000407F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800407F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F801007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "RFO requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.RFO_HIT",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
- "SampleAfterValue": "200003",
- "UMask": "0xc2"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8001007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4001007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1001007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2001007F7",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x801007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00000107F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F804007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F800807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F800207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080408000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F801007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "L1D data line replacements",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x51",
- "EventName": "L1D.REPLACEMENT",
- "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040122",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
- "SampleAfterValue": "200003",
- "UMask": "0xc1"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "All retired load instructions.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD0",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "PEBS": "1",
- "SampleAfterValue": "2000003",
- "UMask": "0x81"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80400122",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040004",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD0",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800020004",
"SampleAfterValue": "100003",
- "UMask": "0x41"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "6",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020004",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired store instructions that miss the STLB.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD0",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "L1_Hit_Indication": "1",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
"SampleAfterValue": "100003",
- "UMask": "0x12"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "RFO requests to L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.ALL_RFO",
- "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
- "SampleAfterValue": "200003",
- "UMask": "0xe2"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.ALL_PF",
- "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
- "SampleAfterValue": "200003",
- "UMask": "0xf8"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800020001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "L2 code requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "PublicDescription": "Counts the total number of L2 code requests.",
- "SampleAfterValue": "200003",
- "UMask": "0xe4"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Demand requests to L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "PublicDescription": "Demand requests to L2 cache.",
- "SampleAfterValue": "200003",
- "UMask": "0xe7"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
- "PEBS": "1",
- "SampleAfterValue": "100007",
- "UMask": "0x2"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F800207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "L2 cache misses when fetching instructions",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "PublicDescription": "Counts L2 cache misses when fetching instructions.",
- "SampleAfterValue": "200003",
- "UMask": "0x24"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000200002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80200002",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
- "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200100002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL057",
- "EventCode": "0x2E",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80400002",
"SampleAfterValue": "100003",
- "UMask": "0x4f"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x18000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C8000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions that miss the STLB.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD0",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "PEBS": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C8000",
"SampleAfterValue": "100003",
- "UMask": "0x11"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C8000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C8000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C8000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C8000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C8000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C8000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions with local Intel\u00ae Optane\u2122 DC persistent memory as the data source where the data request missed all caches. Precise event.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "ELLC": "1",
- "EventCode": "0xD1",
- "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions with local Intel\u00ae Optane\u2122 DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode). Precise event",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.ANY_SNOOP",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80088000",
"SampleAfterValue": "100003",
- "UMask": "0x80"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000088000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800088000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400088000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100088000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200088000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80088000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
- "EventCode": "0xF2",
- "EventName": "L2_LINES_OUT.USELESS_PREF",
- "SampleAfterValue": "200003",
- "UMask": "0x4"
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80208000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000208000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800208000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Demand Data Read miss L2, no rejects",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
- "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
- "SampleAfterValue": "200003",
- "UMask": "0x21"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400208000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100208000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200208000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "PEBS": "1",
- "SampleAfterValue": "20011",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80208000",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80048000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000048000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800048000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "All L2 requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.REFERENCES",
- "PublicDescription": "All L2 requests.",
- "SampleAfterValue": "200003",
- "UMask": "0xff"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400048000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100048000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200048000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x48",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80048000",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80108000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000108000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800108000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400108000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100108000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200108000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80108000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80408000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80408000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100408000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00801007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions with locked access.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD0",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "PEBS": "1",
- "SampleAfterValue": "100007",
- "UMask": "0x21"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "All retired store instructions.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD0",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "L1_Hit_Indication": "1",
- "PEBS": "1",
- "SampleAfterValue": "2000003",
- "UMask": "0x82"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200100400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
- "SampleAfterValue": "100007",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100400",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB0",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800080010",
"SampleAfterValue": "100003",
- "UMask": "0x4"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB0",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F80040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions whose data sources was remote HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
- "PEBS": "1",
- "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
- "SampleAfterValue": "100007",
- "UMask": "0x4"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000018000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "RFO requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.RFO_MISS",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
- "SampleAfterValue": "200003",
- "UMask": "0x22"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800020010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xF2",
- "EventName": "L2_LINES_OUT.USELESS_HWPF",
- "SampleAfterValue": "200003",
- "UMask": "0x4"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x24",
- "EventName": "L2_RQSTS.PF_HIT",
- "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
- "SampleAfterValue": "200003",
- "UMask": "0xd8"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB0",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8007C0020",
"SampleAfterValue": "100003",
- "UMask": "0x2"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100408000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80408000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200100020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00800407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00800807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD1",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
- "SampleAfterValue": "100007",
- "UMask": "0x40"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F800407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD1",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040080",
"SampleAfterValue": "100003",
- "UMask": "0x2"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800040080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400040080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00802007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "L1D miss outstandings duration in cycles",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x48",
- "EventName": "L1D_PEND_MISS.PENDING",
- "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040080",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200100080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80100080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.ANY_RESPONSE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F803C0100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10003C0100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8003C0100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4003C0100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1003C0100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8007C0100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2003C0100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x803C0100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80080100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000080100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800080100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400080100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100080100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200080100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80080100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80200100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000200100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800200100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400200100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100200100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200200100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80200100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80040100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000040100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "EventCode": "0xD1",
- "EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800040100",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Demand and prefetch data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB0",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040100",
"SampleAfterValue": "100003",
- "UMask": "0x8"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100040100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200040100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80040100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80100100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000100100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800100100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400100100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100100100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200100100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F802007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.ANY_RESPONSE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cache line split locks sent to uncore.",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
index 7fde0d2943cd..8f7dc72accd0 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
@@ -1,413 +1,1608 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "MetricExpr": "1 - tma_frontend_bound - (UOPS_ISSUED.ANY + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(44 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) + 44 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "44 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35))",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound - tma_pmm_bound if #has_pmem > 0 else CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound)",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(110 * tma_info_average_frequency * (OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM + OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_average_frequency * (OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_512b",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
+ "MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fused_instructions",
+ "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "(UOPS_RETIRED.RETIRE_SLOTS + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY) / tma_info_slots",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fetch_BW;PGO",
- "MetricName": "IpTB"
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
+ "MetricName": "tma_info_big_code",
+ "MetricThreshold": "tma_info_big_code > 20",
+ "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_branching_overhead"
},
{
- "BriefDescription": "Branch instructions per taken branch. ",
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
- "MetricName": "BpTB"
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_mispredictions, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.CONDITIONAL + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_slots)",
+ "MetricGroup": "Ret;tma_issueBC",
+ "MetricName": "tma_info_branching_overhead",
+ "MetricThreshold": "tma_info_branching_overhead > 10",
+ "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_big_code"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_callret"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_code_stlb_mpki"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.NOT_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_nt"
},
{
- "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpL"
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "(BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_tk"
},
{
- "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpS"
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_core_bound_likely",
+ "MetricThreshold": "tma_info_core_bound_likely > 0.5"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;Instruction_Type",
- "MetricName": "IpB"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
- "MetricName": "IpCall"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_misses, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_dsb_misses",
+ "MetricThreshold": "tma_info_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / DSB2MITE_SWITCHES.COUNT",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_dsb_switch_cost"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_fb_hpki"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_fetch_upc"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_ic_misses",
+ "MetricThreshold": "tma_info_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@ + 2",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_icache_miss_latency"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_big_code",
+ "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]",
+ "MetricExpr": "(UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3) * 4 / 1e9 / duration_time",
+ "MetricGroup": "IoBW;Mem;Server;SoC",
+ "MetricName": "tma_info_io_read_bw"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3) * 4 / 1e9 / duration_time",
+ "MetricGroup": "IoBW;Mem;Server;SoC",
+ "MetricName": "tma_info_io_write_bw"
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
- "MetricName": "FLOPc"
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx512",
+ "MetricThreshold": "tma_info_iparith_avx512 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_ipdsb_miss_ret < 50"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_ipswpf",
+ "MetricThreshold": "tma_info_ipswpf < 100"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_lcp"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization",
- "MetricConstraint": "NO_NMI_WATCHDOG"
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
- "MetricGroup": "TLB_SMT",
- "MetricName": "Page_Walks_Utilization_SMT"
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_jump"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Access_BW"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L1MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
},
{
- "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI"
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki_load"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI_All"
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+ "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / tma_info_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_l2_evictions_nonsilent_pki"
+ },
+ {
+ "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+ "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / tma_info_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_l2_evictions_silent_pki"
},
{
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2HPKI_All"
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L3MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
},
{
- "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
- "MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
- "MetricGroup": "",
- "MetricName": "L2_Evictions_Silent_PKI"
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
},
{
- "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
- "MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
- "MetricGroup": "",
- "MetricName": "L2_Evictions_NonSilent_PKI"
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_load_stlb_mpki"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]",
+ "MetricExpr": "1e9 * (UNC_M_RPQ_OCCUPANCY / UNC_M_RPQ_INSERTS) / imc_0@event\\=0x0@",
+ "MetricGroup": "Mem;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_mem_dram_read_latency",
+ "PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]",
+ "MetricExpr": "(1e9 * (UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS) / imc_0@event\\=0x0@ if #has_pmem > 0 else 0)",
+ "MetricGroup": "Mem;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_mem_pmm_read_latency",
+ "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
},
{
- "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
- "MetricGroup": "Memory_Lat",
- "MetricName": "DRAM_Read_Latency"
+ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
+ "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_memory_bandwidth",
+ "MetricThreshold": "tma_info_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
- "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_Parallel_Reads"
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
+ "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_memory_data_tlbs",
+ "MetricThreshold": "tma_info_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store"
},
{
- "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches",
- "MetricExpr": "( 1000000000 * ( imc@event\\=0xe0\\\\\\,umask\\=0x1@ / imc@event\\=0xe3@ ) / imc_0@event\\=0x0@ ) if 1 if 0 == 1 else 0 else 0",
- "MetricGroup": "Memory_Lat",
- "MetricName": "MEM_PMM_Read_Latency"
+ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound))",
+ "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_memory_latency",
+ "MetricThreshold": "tma_info_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
- "MetricExpr": "( ( 64 * imc@event\\=0xe3@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
- "MetricGroup": "Memory_BW",
- "MetricName": "PMM_Read_BW"
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_mispredictions",
+ "MetricThreshold": "tma_info_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
- "MetricExpr": "( ( 64 * imc@event\\=0xe7@ / 1000000000 ) / duration_time ) if 1 if 0 == 1 else 0 else 0",
- "MetricGroup": "Memory_BW",
- "MetricName": "PMM_Write_BW"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "Socket actual clocks when any core is active on that socket",
- "MetricExpr": "cha_0@event\\=0x0@",
- "MetricGroup": "",
- "MetricName": "Socket_CLKS"
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions. )",
- "MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
- "MetricGroup": "",
- "MetricName": "IpFarBranch"
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "Mem;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_pmm_read_bw"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "Mem;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_pmm_write_bw"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0",
+ "MetricExpr": "(CORE_POWER.LVL0_TURBO_LICENSE / 2 / tma_info_core_clks if #SMT_on else CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_clks)",
"MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "MetricName": "tma_info_power_license0_utilization",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
+ "MetricExpr": "(CORE_POWER.LVL1_TURBO_LICENSE / 2 / tma_info_core_clks if #SMT_on else CORE_POWER.LVL1_TURBO_LICENSE / tma_info_core_clks)",
"MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "MetricName": "tma_info_power_license1_utilization",
+ "MetricThreshold": "tma_info_power_license1_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
+ "MetricExpr": "(CORE_POWER.LVL2_TURBO_LICENSE / 2 / tma_info_core_clks if #SMT_on else CORE_POWER.LVL2_TURBO_LICENSE / tma_info_core_clks)",
"MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "MetricName": "tma_info_power_license2_utilization",
+ "MetricThreshold": "tma_info_power_license2_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cha_0@event\\=0x0@",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_store_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "MetricName": "tma_info_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "17 * tma_info_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricExpr": "59.5 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_local_dram",
+ "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricExpr": "(12 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (11 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_info_mispredictions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued",
+ "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
+ "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_non_fused_branches",
+ "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 0)) * (CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_pmm_bound",
+ "MetricThreshold": "tma_pmm_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for Persistent Memory Module.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Sample with: UOPS_DISPATCHED_PORT.PORT_4. Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_7 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_store_op_utilization_group",
+ "MetricName": "tma_port_7",
+ "MetricThreshold": "tma_port_7 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address). Sample with: UOPS_DISPATCHED_PORT.PORT_7",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_NONE / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_1 - UOPS_EXECUTED.CORE_CYCLES_GE_2) / 2 if #SMT_on else EXE_ACTIVITY.1_PORTS_UTIL) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_2 - UOPS_EXECUTED.CORE_CYCLES_GE_3) / 2 if #SMT_on else EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(89.5 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 89.5 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
+ "MetricName": "tma_remote_cache",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricExpr": "127 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_remote_dram",
+ "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricExpr": "40 * ROB_MISC_EVENTS.PAUSE_INST / tma_info_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
+ "MetricName": "tma_slow_pause",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 11 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "9 * BACLEARS.ANY / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_elision",
+ "ScaleUnit": "1cycles / elision"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
index 3c0b95fd60ad..1f46e6b33856 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/floating-point.json
@@ -1,85 +1,99 @@
[
{
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "UMask": "0x4"
},
{
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instruction retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x8"
},
{
+ "BriefDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "EventCode": "0xC7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "EventCode": "0xC7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
"BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x40"
},
{
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xC7",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "UMask": "0x20"
+ "UMask": "0x80"
},
{
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
"EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Intel AVX-512 computational 512-bit packed BFloat16 instructions retired.",
+ "EventCode": "0xCF",
+ "EventName": "FP_ARITH_INST_RETIRED2.128BIT_PACKED_BF16",
+ "PublicDescription": "Counts once for each Intel AVX-512 computational 512-bit packed BFloat16 floating-point instruction retired. Applies to the ZMM based VDPBF16PS instruction. Each count represents 64 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Intel AVX-512 computational 128-bit packed BFloat16 instructions retired.",
+ "EventCode": "0xCF",
+ "EventName": "FP_ARITH_INST_RETIRED2.256BIT_PACKED_BF16",
+ "PublicDescription": "Counts once for each Intel AVX-512 computational 128-bit packed BFloat16 floating-point instruction retired. Applies to the XMM based VDPBF16PS instruction. Each count represents 16 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Intel AVX-512 computational 256-bit packed BFloat16 instructions retired.",
+ "EventCode": "0xCF",
+ "EventName": "FP_ARITH_INST_RETIRED2.512BIT_PACKED_BF16",
+ "PublicDescription": "Counts once for each Intel AVX-512 computational 256-bit packed BFloat16 floating-point instruction retired. Applies to the YMM based VDPBF16PS instruction. Each count represents 32 computation operations. This event is only supported on products formerly named Cooper Lake and is not supported on products formerly named Cascade Lake.",
"SampleAfterValue": "2000003",
"UMask": "0x80"
},
{
"BriefDescription": "Cycles with any input/output SSE or FP assist",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0xCA",
"EventName": "FP_ASSIST.ANY",
"PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
"UMask": "0x1e"
- },
- {
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC7",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC7",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
index 3553472ad266..04f08e4d2402 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/frontend.json
@@ -1,347 +1,229 @@
[
{
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "4",
- "EventCode": "0x9C",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.L2_MISS",
- "MSRIndex": "0x3F7",
- "MSRValue": "0x13",
- "PEBS": "1",
- "SampleAfterValue": "100007",
- "TakenAlone": "1",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses.\nNote: Invoking MITE requires two or three cycles delay.",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x79",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
- "UMask": "0x20"
+ "UMask": "0x2"
},
{
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
"EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
- "MSRValue": "0x200206",
+ "MSRValue": "0x1",
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
"EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
- "MSRValue": "0x300206",
+ "MSRValue": "0x11",
"PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
"EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
- "MSRValue": "0x100206",
+ "MSRValue": "0x14",
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x9C",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "Invert": "1",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x79",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
- "SampleAfterValue": "2000003",
- "UMask": "0x30"
- },
- {
"BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
"MSRValue": "0x12",
"PEBS": "1",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x9C",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x79",
- "EventName": "IDQ.DSB_CYCLES",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x400106",
+ "PEBS": "2",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
- "MSRValue": "0x11",
+ "MSRValue": "0x408006",
"PEBS": "1",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x79",
- "EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x401006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x79",
- "EventName": "IDQ.MS_CYCLES",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "SampleAfterValue": "2000003",
- "UMask": "0x30"
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x400206",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x79",
- "EventName": "IDQ.MITE_CYCLES",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x410006",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x9C",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
"UMask": "0x1"
},
{
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xAB",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x200206",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
- "MSRValue": "0x400806",
+ "MSRValue": "0x300206",
"PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
- "MSRValue": "0x400206",
+ "MSRValue": "0x402006",
"PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
"MSRValue": "0x400406",
"PEBS": "1",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x79",
- "EventName": "IDQ.MS_DSB_CYCLES",
- "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x83",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
- "MSRValue": "0x408006",
+ "MSRValue": "0x420006",
"PEBS": "1",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "3",
- "EventCode": "0x9C",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
- "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x83",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
"UMask": "0x1"
},
{
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
"MSRValue": "0x404006",
"PEBS": "1",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
- "MSRValue": "0x15",
+ "MSRValue": "0x400806",
"PEBS": "1",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x79",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
- "MSRValue": "0x410006",
+ "MSRValue": "0x15",
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"SampleAfterValue": "100007",
- "TakenAlone": "1",
"UMask": "0x1"
},
{
"BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x80",
"EventName": "ICACHE_16B.IFDATA_STALL",
"PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
@@ -349,9 +231,55 @@
"UMask": "0x4"
},
{
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x24"
+ },
+ {
"BriefDescription": "Cycles MITE is delivering any Uop",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x79",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
@@ -360,48 +288,67 @@
"UMask": "0x24"
},
{
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
- "MSRIndex": "0x3F7",
- "MSRValue": "0x401006",
- "PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
- "SampleAfterValue": "100007",
- "TakenAlone": "1",
- "UMask": "0x1"
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "2",
- "EventCode": "0x9C",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
- "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x8"
},
{
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
- "MSRIndex": "0x3F7",
- "MSRValue": "0x402006",
- "PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
- "SampleAfterValue": "100007",
- "TakenAlone": "1",
- "UMask": "0x1"
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
"BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x79",
@@ -411,72 +358,64 @@
"UMask": "0x30"
},
{
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.ITLB_MISS",
- "MSRIndex": "0x3F7",
- "MSRValue": "0x14",
- "PEBS": "1",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
- "SampleAfterValue": "100007",
- "TakenAlone": "1",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x79",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "4",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
"SampleAfterValue": "2000003",
- "UMask": "0x18"
+ "UMask": "0x1"
},
{
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xC6",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
- "MSRIndex": "0x3F7",
- "MSRValue": "0x420006",
- "PEBS": "1",
- "SampleAfterValue": "100007",
- "TakenAlone": "1",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "4",
- "EventCode": "0x79",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
"SampleAfterValue": "2000003",
- "UMask": "0x24"
+ "UMask": "0x1"
},
{
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "4",
- "EventCode": "0x79",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
"SampleAfterValue": "2000003",
- "UMask": "0x18"
+ "UMask": "0x1"
},
{
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x83",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "UMask": "0x4"
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
index cc66a51c6a7b..a00ad0aaf1ba 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/memory.json
@@ -1,9909 +1,7023 @@
[
{
- "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x06040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times HLE abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution successfully committed",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an HLE execution started.",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Errata": "SKL089",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
"SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x06040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC9",
- "EventName": "RTM_RETIRED.ABORTED_MEM",
- "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F840007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x54",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x54",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x110000490",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x5d",
- "EventName": "TX_EXEC.MISC5",
- "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x5d",
- "EventName": "TX_EXEC.MISC4",
- "PublicDescription": "RTM region detected inside HLE.",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x5d",
- "EventName": "TX_EXEC.MISC3",
- "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x5d",
- "EventName": "TX_EXEC.MISC2",
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x5d",
- "EventName": "TX_EXEC.MISC1",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C000120",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B8007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS.ANY_SNOOP OCR.ALL_READS.L3_MISS.ANY_SNOOP OCR.ALL_READS.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.ALL_READS.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00840007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC9",
- "EventName": "RTM_RETIRED.ABORTED_TIMER",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS.REMOTE_HITM OCR.ALL_READS.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS.SNOOP_MISS OCR.ALL_READS.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS.SNOOP_NONE OCR.ALL_READS.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F900007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F840007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x6040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x840007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B8007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F900007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x900007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS.ANY_SNOOP OCR.ALL_RFO.L3_MISS.ANY_SNOOP OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an HLE execution successfully committed",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC8",
- "EventName": "HLE_RETIRED.COMMIT",
- "PublicDescription": "Number of times HLE commit succeeded.",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS.REMOTE_HITM OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS.SNOOP_MISS OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS.SNOOP_NONE OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F840007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F90000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xCD",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
- "MSRIndex": "0x3F6",
- "MSRValue": "0x80",
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
- "SampleAfterValue": "1009",
- "TakenAlone": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C000001",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x54",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "SampleAfterValue": "2000003",
- "UMask": "0x40"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC9",
- "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
- "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
- "SampleAfterValue": "2000003",
- "UMask": "0x40"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00900007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.ANY_SNOOP OCR.OTHER.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.OTHER.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.HITM_OTHER_CORE OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B8007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.OTHER.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC08000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC08000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.OTHER.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.OTHER.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B808000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B808000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x54",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.REMOTE_HITM",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B808000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC08000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC9",
- "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
- "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
- "SampleAfterValue": "2000003",
- "UMask": "0x20"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC9",
- "EventName": "RTM_RETIRED.ABORTED",
- "PEBS": "1",
- "PublicDescription": "Number of times RTM abort was triggered.",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL089",
- "EventCode": "0xC3",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000080",
"SampleAfterValue": "100003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x54",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC9",
- "EventName": "RTM_RETIRED.ABORTED_EVENTS",
- "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
- "SampleAfterValue": "2000003",
- "UMask": "0x80"
- },
- {
- "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "6",
- "EventCode": "0xA3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
- "SampleAfterValue": "2000003",
- "UMask": "0x6"
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x804000100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC8",
- "EventName": "HLE_RETIRED.ABORTED_TIMER",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "ALL_RFO & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xCD",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
- "MSRIndex": "0x3F6",
- "MSRValue": "0x20",
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
- "SampleAfterValue": "100007",
- "TakenAlone": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC000490",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC8",
- "EventName": "HLE_RETIRED.ABORTED_MEM",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC08000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HITM",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC0007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F840007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x6040007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x840007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B8007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F900007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2100007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x900007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xCD",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
- "MSRIndex": "0x3F6",
- "MSRValue": "0x100",
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
- "SampleAfterValue": "503",
- "TakenAlone": "1",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xCD",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
- "MSRIndex": "0x3F6",
- "MSRValue": "0x10",
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
- "SampleAfterValue": "20011",
- "TakenAlone": "1",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x54",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "SampleAfterValue": "2000003",
- "UMask": "0x20"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x54",
- "EventName": "TX_MEM.ABORT_CAPACITY",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC8",
- "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
- "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
- "SampleAfterValue": "2000003",
- "UMask": "0x40"
- },
- {
- "BriefDescription": "Number of times an RTM execution started.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC9",
- "EventName": "RTM_RETIRED.START",
- "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xCD",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
- "MSRIndex": "0x3F6",
- "MSRValue": "0x200",
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
- "SampleAfterValue": "101",
- "TakenAlone": "1",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC8",
- "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
- "SampleAfterValue": "2000003",
- "UMask": "0x20"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "6",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xCD",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
- "MSRIndex": "0x3F6",
- "MSRValue": "0x40",
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
- "SampleAfterValue": "2003",
- "TakenAlone": "1",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000001",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "2",
- "EventCode": "0xA3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC08000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC08000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
"EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC08000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC08000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an RTM execution successfully committed",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC9",
- "EventName": "RTM_RETIRED.COMMIT",
- "PublicDescription": "Number of times RTM commit succeeded.",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B808000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC8",
- "EventName": "HLE_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "UMask": "0x80"
- },
- {
- "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x60",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_LOCAL_DRAM & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F900007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90008000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_DRAM & SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS & REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Demand Data Read requests who miss L3 cache",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB0",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x110000010",
"SampleAfterValue": "100003",
- "UMask": "0x10"
+ "UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01100007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00900007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00BC000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08040007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_LOCAL_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0084000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC8",
- "EventName": "HLE_RETIRED.ABORTED",
- "PEBS": "1",
- "PublicDescription": "Number of times HLE abort was triggered.",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x013C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0604000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x063B800120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_LOCAL_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F84000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3FBC000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1010000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_MISS_LOCAL_DRAM & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0104000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_MISS & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x043C000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3FBC000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_LOCAL_DRAM & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0804000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS_REMOTE_HOP1_DRAM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0210000122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x43C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810000001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x13C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F90000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x103FC00100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0090000490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x83FC00100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x23C000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0110000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0xBC000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F84000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of times an HLE execution started.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC8",
- "EventName": "HLE_RETIRED.START",
- "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000100",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1004000020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1010000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x404000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083FC00004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x104000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_MISS_REMOTE_HOP1_DRAM & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x204000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C000120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x604000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0410000400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x84000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x63B800100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0404000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F90000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_MISS_REMOTE_HOP1_DRAM & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1010000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_MISS.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x810000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0810008000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x410000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_MISS & REMOTE_HITM",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x103FC00120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x110000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0204000010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x210000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS_LOCAL_DRAM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
+ "Deprecated": "1",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_HOP1_DRAM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00840007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x90000100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xCD",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
- "MSRIndex": "0x3F6",
- "MSRValue": "0x4",
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
- "SampleAfterValue": "100003",
- "TakenAlone": "1",
- "UMask": "0x1"
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times RTM abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "BriefDescription": "This event is deprecated. Refer to new event OCR.PF_L3_RFO.L3_MISS.SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Deprecated": "1",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x023C000100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xCD",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
- "MSRValue": "0x8",
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
- "SampleAfterValue": "50021",
- "TakenAlone": "1",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_MISS & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_MISS.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x083C0007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC5",
+ "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
index 05d13d53c374..3ab5e91a4c1c 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/other.json
@@ -1,463 +1,95 @@
[
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x18"
},
{
- "BriefDescription": "ALL_READS & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server michroarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "Core cycles the core was throttled due to a pending power level request.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.THROTTLE",
+ "PublicDescription": "Core cycles the out-of-order engine was throttled due to a pending power level request.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "BriefDescription": "Counts any other requests OTHER & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80408000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "BriefDescription": "ALL_READS & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F804007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
+ "BriefDescription": "Number of hardware interrupts received by the processor.",
+ "EventCode": "0xCB",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
+ "SampleAfterValue": "203",
"UMask": "0x1"
},
{
"BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xFE",
"EventName": "IDI_MISC.WB_DOWNGRADE",
"PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
@@ -465,8198 +97,1397 @@
"UMask": "0x4"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
+ "EventCode": "0xFE",
+ "EventName": "IDI_MISC.WB_UPGRADE",
+ "PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
"SampleAfterValue": "100003",
- "UMask": "0x1"
+ "UMask": "0x2"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.ANY_RESPONSE have any response type.",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_F & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_M & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020491",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & ANY_RESPONSE have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.ANY_RESPONSE have any response type.",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_S & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F800807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & ANY_RESPONSE have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.ANY_RESPONSE",
+ "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020490",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.ANY_RESPONSE have any response type.",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020120",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.ANY_RESPONSE have any response type.",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_E.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x107F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F804007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x804007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1004007F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F800207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_NONE",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x8000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x4000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x2000207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & SUPPLIER_NONE & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00800207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800207F7",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.ANY_RESPONSE have any response type.",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_E & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_M & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OCR.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020122",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x28",
- "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
- "PublicDescription": "Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
- "SampleAfterValue": "200003",
- "UMask": "0x7"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_F & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & ANY_RESPONSE have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_M & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Core cycles the core was throttled due to a pending power level request.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x28",
- "EventName": "CORE_POWER.THROTTLE",
- "PublicDescription": "Core cycles the out-of-order engine was throttled due to a pending power level request.",
- "SampleAfterValue": "200003",
- "UMask": "0x40"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_M & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000018000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020004",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts demand data reads have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT_E & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_MISS",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & ANY_RESPONSE have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_S & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT_F & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020001",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs) OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_NONE",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020002",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests have any response type.",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x18000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80408000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
+ "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80408000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100408000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.OTHER.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80028000",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.SNOOP_MISS",
+ "EventName": "OCR.PF_L1D_AND_SW.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_READS & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.ANY_SNOOP",
+ "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020400",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xFE",
- "EventName": "IDI_MISC.WB_UPGRADE",
- "PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
- "SampleAfterValue": "100003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT_F & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020010",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OCR.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080408000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x32",
- "EventName": "SW_PREFETCH_ACCESS.T1_T2",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
+ "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "EventName": "OCR.PF_L2_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.ANY_SNOOP",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x1000020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_RFO & L3_HIT_F & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020020",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100408000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00804007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT & SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F800407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C07F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & ANY_RESPONSE have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00000107F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800028000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_E & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_F.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F802007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_S & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00801007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_S & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_E & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00800807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & SUPPLIER_NONE & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Number of PREFETCHT0 instructions executed.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x32",
- "EventName": "SW_PREFETCH_ACCESS.T0",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_M & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00800407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x28",
- "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
- "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server michroarchtecture). This includes high current AVX 512-bit instructions.",
- "SampleAfterValue": "200003",
- "UMask": "0x20"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
"MSRValue": "0x3F80400080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_F & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_F.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_F & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00802007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_S & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_M & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_E & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_S & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10000807F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & PMM_HIT_LOCAL_PMM & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80400004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_HIT_WITH_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01004007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.PF_L3_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all demand code reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_MISS",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_NONE",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.SNOOP_MISS",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10000407F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10000207F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200088000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_M & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Number of PREFETCHW instructions executed.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x32",
- "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
- "SampleAfterValue": "2000003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_M & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200040120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_E & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x28",
- "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
- "PublicDescription": "Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
- "SampleAfterValue": "200003",
- "UMask": "0x18"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080400001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_E & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_S & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Number of hardware interrupts received by the processor.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xCB",
- "EventName": "HW_INTERRUPTS.RECEIVED",
- "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
- "SampleAfterValue": "203",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000108000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & SUPPLIER_NONE & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400020002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020080",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
"EventName": "OCR.PF_L3_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0000010100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_E & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02003C0491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400048000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F803C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x00803C0490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & SUPPLIER_NONE & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80020120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800200491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_S & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_S.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000100400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_M & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_M.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000040010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200020001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04002007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_E & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08007C0020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs) DEMAND_RFO & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080020400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x10100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.ANY_SNOOP",
+ "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80400100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "ALL_PF_RFO & L3_HIT_F & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_F.ANY_SNOOP",
+ "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80200120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80400100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L3_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x01003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100400100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.ANY_SNOOP",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x3F80020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x1000020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_M.NO_SNOOP_NEEDED",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x800020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts any other requests OTHER & L3_HIT & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT.HITM_OTHER_CORE",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x10003C8000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x400020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_M.HIT_OTHER_CORE_FWD",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x100020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200100100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_F & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_F.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000200001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Number of PREFETCHNTA instructions executed.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x32",
- "EventName": "SW_PREFETCH_ACCESS.NTA",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100200004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads DEMAND_CODE_RD & L3_HIT_M & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_M.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800040004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & L3_HIT_S & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand code reads",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080100004",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_F.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200200400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_E & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080080122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs PF_L2_RFO & L3_HIT_S & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_RFO.L3_HIT_S.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100100020",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & L3_HIT_F & SNOOP_NONE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x08003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts demand data reads DEMAND_DATA_RD & L3_HIT_M & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_DATA_RD.L3_HIT_M.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100040001",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_S & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_S.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800100010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & HITM_OTHER_CORE",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x1000020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080490",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts any other requests OTHER & L3_HIT_F & NO_SNOOP_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.OTHER.L3_HIT_F.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100208000",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_S & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F801007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0200080002",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x04003C0080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x200020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
},
{
"BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
"EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_F.SNOOP_NONE",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0080200100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400122",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs PF_L3_RFO & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_RFO.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080100",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads PF_L3_DATA_RD & L3_HIT_M & ANY_SNOOP",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L3_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x3F80040080",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_PF_RFO & PMM_HIT_LOCAL_PMM & SNOOP_NOT_NEEDED",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_PF_RFO.PMM_HIT_LOCAL_PMM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0100400120",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_DATA_RD & SUPPLIER_NONE & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_DATA_RD.SUPPLIER_NONE.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800020491",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests PF_L1D_AND_SW & L3_HIT_E & HIT_OTHER_CORE_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L1D_AND_SW.L3_HIT_E.HIT_OTHER_CORE_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0800080400",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads PF_L2_DATA_RD & L3_HIT_F & HIT_OTHER_CORE_NO_FWD",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.PF_L2_DATA_RD.L3_HIT_F.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x0400200010",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
- },
- {
- "BriefDescription": "ALL_READS & L3_HIT_S & SNOOP_MISS",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xB7, 0xBB",
- "EventName": "OCR.ALL_READS.L3_HIT_S.SNOOP_MISS",
+ "EventName": "OCR.PF_L3_RFO.SUPPLIER_NONE.SNOOP_NONE",
"MSRIndex": "0x1a6,0x1a7",
- "MSRValue": "0x02001007F7",
- "Offcore": "1",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "MSRValue": "0x80020100",
"SampleAfterValue": "100003",
"UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
index 5ec668f46ac1..0f06e314fe36 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/pipeline.json
@@ -1,154 +1,150 @@
[
{
- "BriefDescription": "Far branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "All (macro) branch instructions retired.",
"Errata": "SKL091",
"EventCode": "0xC4",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PEBS": "1",
- "PublicDescription": "This event counts far branch instructions retired.",
- "SampleAfterValue": "100007",
- "UMask": "0x40"
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "4",
- "EventCode": "0xA3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
"UMask": "0x4"
},
{
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x0E",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "UMask": "0x20"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "10",
- "EventCode": "0xC2",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "Invert": "1",
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x3C",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003"
+ "BriefDescription": "Far branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "This event counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "BriefDescription": "Cycles without actually retired uops.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0xC2",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "Invert": "1",
- "PublicDescription": "This event counts cycles without actually retired uops.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
"UMask": "0x2"
},
{
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "Invert": "1",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "BriefDescription": "Return instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "AnyThread": "1",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x0D",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "BriefDescription": "Taken branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "4",
- "EventCode": "0xA8",
- "EventName": "LSD.CYCLES_4_UOPS",
- "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
- "SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 1",
- "CounterHTOff": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "AnyThread": "1",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x3C",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "25003",
- "UMask": "0x1"
+ "BriefDescription": "Speculative mispredicted indirect branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
},
{
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL091",
- "EventCode": "0xC4",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "PEBS": "1",
- "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
- "SampleAfterValue": "100007",
- "UMask": "0x2"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009"
},
{
- "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x14",
- "EventName": "ARITH.DIVIDER_ACTIVE",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
"UMask": "0x1"
},
{
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x3C",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "25003",
+ "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
"UMask": "0x2"
},
{
"BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xC5",
"EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
"PEBS": "1",
@@ -156,222 +152,176 @@
"UMask": "0x20"
},
{
- "BriefDescription": "Increments whenever there is an update to the LBR array.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xCC",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
- "SampleAfterValue": "2000003",
- "UMask": "0x20"
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 0",
- "CounterHTOff": "Fixed counter 0",
- "EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "25003",
"UMask": "0x1"
},
{
- "BriefDescription": "Conditional branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL091",
- "EventCode": "0xC4",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PEBS": "1",
- "PublicDescription": "This event counts conditional branch instructions retired.",
- "SampleAfterValue": "400009",
+ "AnyThread": "1",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "25003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x0D",
- "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
- "SampleAfterValue": "2000003",
- "UMask": "0x80"
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
},
{
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x3"
},
{
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "Invert": "1",
- "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "25003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA6",
- "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
- "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
+ "AnyThread": "1",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "5",
- "EventCode": "0xA3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "UMask": "0x5"
+ "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
+ "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "SampleAfterValue": "100007"
},
{
- "BriefDescription": "Mispredicted macro branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "EventCode": "0xC5",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "SampleAfterValue": "400009",
- "UMask": "0x4"
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "2",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x2"
},
{
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"EventCode": "0x3C",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "25003",
- "UMask": "0x1"
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
},
{
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"EventCode": "0x3C",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "25003",
- "UMask": "0x2"
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "BriefDescription": "All (macro) branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL091",
- "EventCode": "0xC4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009"
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "UMask": "0x10"
},
{
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "UMask": "0x4"
+ "UMask": "0xc"
},
{
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "UMask": "0x8"
+ "UMask": "0x5"
},
{
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "UMask": "0x10"
+ "UMask": "0x14"
},
{
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "UMask": "0x20"
+ "UMask": "0x4"
},
{
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "UMask": "0x40"
+ "UMask": "0x2"
},
{
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "UMask": "0x80"
+ "UMask": "0x4"
},
{
"BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xA6",
"EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
"PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
@@ -379,44 +329,30 @@
"UMask": "0x8"
},
{
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0x5E",
- "EventName": "RS_EVENTS.EMPTY_END",
- "Invert": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x10"
},
{
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0x0E",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "Invert": "1",
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x40"
},
{
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA8",
- "EventName": "LSD.UOPS",
- "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
"BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x87",
"EventName": "ILD_STALL.LCP",
"PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
@@ -424,122 +360,108 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Taken branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL091",
- "EventCode": "0xC4",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PEBS": "1",
- "PublicDescription": "This event counts taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "UMask": "0x20"
- },
- {
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x0E",
- "EventName": "UOPS_ISSUED.ANY",
- "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Resource-related stall cycles",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xa2",
- "EventName": "RESOURCE_STALLS.ANY",
- "PublicDescription": "Counts resource-related stall cycles.",
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "CounterMask": "20",
- "EventCode": "0xA3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Number of all retired NOP instructions.",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.NOP",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
- "UMask": "0x14"
+ "UMask": "0x2"
},
{
- "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA6",
- "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
- "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA2",
- "EventName": "RESOURCE_STALLS.SB",
- "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
+ "CounterMask": "10",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "Invert": "1",
+ "PEBS": "2",
+ "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
"SampleAfterValue": "2000003",
- "UMask": "0x8"
+ "UMask": "0x1"
},
{
- "BriefDescription": "Not taken branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL091",
- "EventCode": "0xC4",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "This event counts not taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "UMask": "0x10"
+ "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "12",
- "EventCode": "0xA3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
"SampleAfterValue": "2000003",
- "UMask": "0xc"
+ "UMask": "0x1"
},
{
- "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA6",
- "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
- "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "UMask": "0x4"
+ "UMask": "0x1"
},
{
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL091, SKL044",
- "EventCode": "0xC0",
- "EventName": "INST_RETIRED.ANY_P",
- "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "SampleAfterValue": "2000003"
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "BriefDescription": "Counts the number of x87 uops dispatched.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.X87",
- "PublicDescription": "Counts the number of x87 uops executed.",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
"BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x4C",
"EventName": "LOAD_HIT_PRE.SW_PF",
"PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
@@ -547,224 +469,195 @@
"UMask": "0x1"
},
{
- "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC5",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "PEBS": "1",
- "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
- "SampleAfterValue": "400009",
- "UMask": "0x2"
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.THREAD",
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "3",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"CounterMask": "1",
- "EventCode": "0xA8",
- "EventName": "LSD.CYCLES_ACTIVE",
- "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
- "SampleAfterValue": "2000003",
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x0D",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x3C",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "25003",
+ "BriefDescription": "Resource-related stall cycles",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "Counts resource-related stall cycles.",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "8",
- "EventCode": "0xA3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
"SampleAfterValue": "2000003",
"UMask": "0x8"
},
{
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "Counter": "1",
- "CounterHTOff": "1",
- "Errata": "SKL091, SKL044",
- "EventCode": "0xC0",
- "EventName": "INST_RETIRED.PREC_DIST",
- "PEBS": "2",
- "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x20"
},
{
- "BriefDescription": "Not taken branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL091",
- "EventCode": "0xc4",
- "EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "PublicDescription": "This event counts not taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "UMask": "0x10"
+ "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "3",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
"SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "UMask": "0x1"
},
{
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"CounterMask": "1",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
"SampleAfterValue": "2000003",
- "UMask": "0x2"
+ "UMask": "0x1"
},
{
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "4",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
- "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC1",
- "EventName": "OTHER_ASSISTS.ANY",
- "SampleAfterValue": "100003",
- "UMask": "0x3f"
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA6",
- "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
"SampleAfterValue": "2000003",
- "UMask": "0x40"
+ "UMask": "0x8"
},
{
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "16",
- "EventCode": "0xA3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
"SampleAfterValue": "2000003",
"UMask": "0x10"
},
{
- "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xCC",
- "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
"SampleAfterValue": "2000003",
- "UMask": "0x40"
+ "UMask": "0x20"
},
{
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "4",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x40"
},
{
- "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x59",
- "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
- "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x80"
},
{
- "BriefDescription": "Retirement slots used.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "Counts the retirement slots used.",
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Number of uops executed from any thread.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
- "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x0E",
- "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
- "BriefDescription": "Return instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Errata": "SKL091",
- "EventCode": "0xC4",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PEBS": "1",
- "PublicDescription": "This event counts return instructions retired.",
- "SampleAfterValue": "100007",
- "UMask": "0x8"
- },
- {
"BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "2",
"EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
@@ -772,198 +665,159 @@
"UMask": "0x2"
},
{
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC5",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PEBS": "1",
- "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "UMask": "0x1"
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
- "Counter": "0,2,3",
- "CounterHTOff": "0,2,3",
- "CounterMask": "10",
- "Errata": "SKL091, SKL044",
- "EventCode": "0xC0",
- "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
- "Invert": "1",
- "PEBS": "2",
- "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x2"
},
{
- "BriefDescription": "False dependencies in MOB due to partial compare on address.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x07",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "SampleAfterValue": "100003",
- "UMask": "0x1"
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
"CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "AnyThread": "1",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 1",
- "CounterHTOff": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
"SampleAfterValue": "2000003",
- "UMask": "0x2"
- },
- {
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xE6",
- "EventName": "BACLEARS.ANY",
- "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
- "SampleAfterValue": "100003",
"UMask": "0x1"
},
{
- "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x03",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
- "SampleAfterValue": "100003",
- "UMask": "0x2"
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "AnyThread": "1",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x3C",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "25003",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 2",
- "CounterHTOff": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
"SampleAfterValue": "2000003",
- "UMask": "0x3"
+ "UMask": "0x1"
},
{
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC5",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
- "SampleAfterValue": "400009"
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x03",
- "EventName": "LD_BLOCKS.NO_SR",
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "SampleAfterValue": "100003",
- "UMask": "0x8"
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xC3",
- "EventName": "MACHINE_CLEARS.SMC",
- "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
- "SampleAfterValue": "100003",
- "UMask": "0x4"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "AnyThread": "1",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x3C",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003"
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x5E",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
"SampleAfterValue": "2000003",
"UMask": "0x1"
},
{
- "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EdgeDetect": "1",
- "EventCode": "0x3C",
- "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
- "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
- "SampleAfterValue": "100007"
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "BriefDescription": "All (macro) branch instructions retired.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3",
- "Errata": "SKL091",
- "EventCode": "0xC4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
+ "BriefDescription": "Number of macro-fused uops retired. (non precise)",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "PublicDescription": "Counts the number of macro-fused uops retired. (non precise)",
+ "SampleAfterValue": "2000003",
"UMask": "0x4"
},
{
- "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xA6",
- "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
- "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "Counts the retirement slots used.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
- "BriefDescription": "Number of uops executed on the core.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xB1",
- "EventName": "UOPS_EXECUTED.CORE",
- "PublicDescription": "Number of uops executed from any thread.",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
- "EventCode": "0xA3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "16",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
"SampleAfterValue": "2000003",
- "UMask": "0x1"
+ "UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
new file mode 100644
index 000000000000..2c880535cc82
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-cache.json
@@ -0,0 +1,10764 @@
+[
+ {
+ "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.MMIO_READ",
+ "Filter": "config1=0x40040e33",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.MMIO_WRITE",
+ "Filter": "config1=0x40041e33",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.UNCACHEABLE",
+ "Filter": "config1=0x40e33",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_FULL",
+ "Filter": "config1=0x41833",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+ "Filter": "config1=0x41a33",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Intermediate bypass Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the intermediate bypass.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Not Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the full bypass.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Uncore cache clock ticks",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the clock controlling the uncore caching and home agent (CHA).",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xC0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C1 State",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C1_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C1 Transition",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C6 State",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C6_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C6 Transition",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; GV",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.GV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Cycle with Multiple Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xe2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Single Snoop",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xe1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Snoop to Remote Node",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xe4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Single Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Core Request to Remote Node",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Single Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Eviction to Remote Node",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Single External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; External Snoop to Remote Node",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "EventCode": "0x1F",
+ "EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "EventCode": "0xAE",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "EventCode": "0xAE",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; No SF/LLC HitS/F and op is RdInvOwn",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; SF/LLC HitS/F and op is RdInvOwn",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Deallocate HitME$ on Reads without RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "PerPkg": "1",
+ "PublicDescription": "Received RspFwdI* for a local request, but converted HitME$ to SF entry",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "PublicDescription": "Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache to SHARed",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued; ISOCH",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Full Line MIG",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Full Line",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Partial Non-ISOCH",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Partial MIG",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.; Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Partial",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.INVITOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations dropped due to IODC Full",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.IODCFULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IDOC allocation dropped due to OSB gate",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.OSBGATED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to any reason",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to conflicting transaction",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.SNPOUT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoE",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoI",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbPushMtoI",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Moved to Cbo section",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Local",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Remote",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
+ "UMask": "0x91",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; External Snoop Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "UMask": "0x9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Write Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x5",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in E State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in F State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in M State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in E State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in F State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_F",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in M State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in F State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in M state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; CV0 Prefetch Miss",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; CV0 Prefetch Victim",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state.",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in IODC",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.IODC",
+ "PerPkg": "1",
+ "PublicDescription": "2LM related events; Counts the number of times CHA saw NM Set conflict in IODC",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "NM evictions due to another read to the same near memory set in the LLC.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "PerPkg": "1",
+ "PublicDescription": "NM evictions due to another read to the same near memory set in the SF.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw NM Set conflict in TOR",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "No Reject in the CHA due to a pending read to the same near memory set in the TOR.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory mode related events; Counts the number of times CHA saw NM Set conflict in TOR and the transaction was rejected",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR_REJECT",
+ "PerPkg": "1",
+ "PublicDescription": "Rejects in the CHA due to a pending read to the same near memory set in the TOR.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC0_SMI2",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC1_SMI3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC2_SMI4",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC3_SMI5",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; MC0_SMI0",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; MC1_SMI1",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a unit on this socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a remote socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a remote socket made into the CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write requests",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0xc",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write Requests from a unit on this socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Writes Remote",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; RRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; WBQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; AD REQ on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; AD RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Non UPI AK Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL NCB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL NCS on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL WB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Non UPI IV Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Allow Snoop",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; ANY0",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; HA",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; LLC Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; PhyAddr Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; SF Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; AD REQ on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; AD RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; Non UPI AK Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL NCB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL NCS on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL WB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; Non UPI IV Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; AD REQ on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; AD RSP on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Non UPI AK Request",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL NCB on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL NCS on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL RSP on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL WB on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Non UPI IV Request",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; ANY0",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; HA",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; ANY0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; HA",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; RRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; WBQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; AD REQ on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; AD RSP on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Non UPI AK Request",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL NCB on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL NCS on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL RSP on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL WB on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Non UPI IV Request",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Allow Snoop",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; ANY0",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; HA",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; LLC Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; PhyAddr Match",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; SF Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC OR SF Way",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; AD REQ on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; AD RSP on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Non UPI AK Request",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL NCB on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL NCS on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL RSP on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL WB on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Non UPI IV Request",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Allow Snoop",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; ANY0",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; HA",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; LLC Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; PhyAddr Match",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; SF Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; AD REQ on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; AD RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Non UPI AK Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL NCB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL NCS on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL WB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Non UPI IV Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Allow Snoop",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; ANY0",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; HA",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; LLC Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; PhyAddr Match",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; SF Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; AD REQ on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; AD RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Non UPI AK Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL NCB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL NCS on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL WB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Non UPI IV Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Allow Snoop",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; ANY0",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; HA",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; LLC Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; PhyAddr Match",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; SF Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; All",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast snoop for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast snoops issued by the HA. This filter includes only requests coming from local sockets.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast snoops issued by the HA.This filter includes only requests coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Directed snoops for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of directed snoops issued by the HA. This filter includes only requests coming from local sockets.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Directed snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of directed snoops issued by the HA. This filter includes only requests coming from remote sockets.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the local socket.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the remote socket.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspCnflct* Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspCnflct* Snoop Response was received. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent. This triggers conflict resolution hardware. This covers both the opcode RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspFwd",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspI Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspIFwd Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspS",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : RspS : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspSFwd Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to its home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in >= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to its home socket to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Rsp*WB Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to its home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This response will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0xff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x15",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local iA and IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_IO_IA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally initiated requests",
+ "UMask": "0x35",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from Local",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x25",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; SF/LLC Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hit (Not a Miss)",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; HITs (hit is defined to be not a miss [see below], as a result for any request allocated into the TOR, one of either HIT or MISS must be true)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally initiated requests from iA Cores",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally generated IO traffic",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM misses from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "Filter": "config1=0x49033",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM request is used by IIO to request a data write without first reading the data for ownership.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur misses from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR",
+ "Filter": "config1=0x43C33",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RdCur requests and miss the LLC. A RdCur request is used by IIO to read data without changing state.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests a cache line to be cached in E state with the intent to modify.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; IPQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; IRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Miss",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; Misses. (a miss is defined to be any transaction from the IRQ, PRQ, RRQ, IPQ or (in the victim case) the ISMQ, that required the CHA to spawn a new UPI/SMI3 request on the UPI fabric (including UPI snoops and/or any RD/WR to a local memory controller, in the event that the CHA is the home node)). Basically, if the LLC/SF/MLC complex were not able to service the request without involving another agent...it is a miss. If only IDI snoops were required, it is not a miss (that means the SF/MLC com",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; PRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x60",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All remotely generated requests",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x17",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x27",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hit (Not a Miss)",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; HITs (hit is defined to be not a miss [see below], as a result for any request allocated into the TOR, one of either HIT or MISS must be true)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; All locally initiated requests from iA Cores",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; All locally generated IO traffic",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM Misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "Filter": "config1=0x49033",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM is used by IIO to request a data write without first reading the data for ownership.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RDCUR misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR",
+ "Filter": "config1=0x43C33",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RdCur requests that miss the LLC. A RdCur request is used by IIO to read data without changing state.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests data to be cached in E state with the intent to modify.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; IPQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; IRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; Misses. (a miss is defined to be any transaction from the IRQ, PRQ, RRQ, IPQ or (in the victim case) the ISMQ, that required the CHA to spawn a new UPI/SMI3 request on the UPI fabric (including UPI snoops and/or any RD/WR to a local memory controller, in the event that the CHA is the home node)). Basically, if the LLC/SF/MLC complex were not able to service the request without involving another agent...it is a miss. If only IDI snoops were required, it is not a miss (that means the SF/MLC com",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; PRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; AD REQ Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; AD RSP VN0 Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL NCB Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL NCS Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL RSP Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL DRS Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; VN0 Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; VNA Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD REQ VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD RSP VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCB VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCS VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL RSP VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL DRS VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD VNA Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL VNA Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI; Pushed to LLC",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was received WbPushMtoI; Counts the number of times when the CHA was able to push WbPushMToI to LLC",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI; Pushed to Memory",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was received WbPushMtoI; Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC0_SMI2",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC1_SMI3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC2_SMI4",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC3_SMI5",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC0_SMI0",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC1_SMI1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspIFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response I to Fwd F/E",
+ "UMask": "0xe4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response I to Fwd M",
+ "UMask": "0xf0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspSFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response S to Fwd F/E",
+ "UMask": "0xe2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspSFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response S to Fwd M",
+ "UMask": "0xe8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspHitFSE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response any to Hit F/S/E",
+ "UMask": "0xe1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspIFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response I to Fwd F/E",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspIFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response I to Fwd M",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspSFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response S to Fwd F/E",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspSFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response S to Fwd M",
+ "UMask": "0x48",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspHitFSE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response any to Hit F/S/E",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response I to Fwd F/E",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response I to Fwd M",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response S to Fwd F/E",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response S to Fwd M",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspHitFSE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response any to Hit F/S/E",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspIFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response I to Fwd F/E",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspIFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response I to Fwd M",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspSFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response S to Fwd F/E",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspSFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response S to Fwd M",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspHitFSE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response any to Hit F/S/E",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CLOCKTICKS",
+ "Deprecated": "1",
+ "EventName": "UNC_C_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_FAST_ASSERTED.HORZ",
+ "Deprecated": "1",
+ "EventCode": "0xA5",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.ANY",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x91",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x2f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SRC_THRTL",
+ "Deprecated": "1",
+ "EventCode": "0xA4",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.EVICT",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.HIT",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.MISS",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.PRQ",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REM_ALL",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.RRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.RRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x60",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WBQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WBQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Deprecated": "1",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Deprecated": "1",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Deprecated": "1",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CMS_CLOCKTICKS",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_H_CLOCK",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C1_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.C1_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.C1_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C6_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.C6_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.C6_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.GV",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.GV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "UMask": "0xe2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_ONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "UMask": "0xe1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.ANY_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xe4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_GTONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_ONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.CORE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EVICT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_ONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EXT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_COUNTER0_OCCUPANCY",
+ "Deprecated": "1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_H_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.SNP",
+ "Deprecated": "1",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.HA",
+ "Deprecated": "1",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.TOR",
+ "Deprecated": "1",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Deprecated": "1",
+ "EventCode": "0xAE",
+ "EventName": "UNC_H_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Deprecated": "1",
+ "EventCode": "0xAE",
+ "EventName": "UNC_H_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.EX_RDS",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.WBMTOE",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_LOOKUP.READ",
+ "Deprecated": "1",
+ "EventCode": "0x5E",
+ "EventName": "UNC_H_HITME_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_LOOKUP.WRITE",
+ "Deprecated": "1",
+ "EventCode": "0x5E",
+ "EventName": "UNC_H_HITME_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Deprecated": "1",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.READ_OR_INV",
+ "Deprecated": "1",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "Deprecated": "1",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_HITME_MISS.SHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.SHARED",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "Deprecated": "1",
+ "EventCode": "0xAD",
+ "EventName": "UNC_H_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "Deprecated": "1",
+ "EventCode": "0xAD",
+ "EventName": "UNC_H_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "Deprecated": "1",
+ "EventCode": "0x59",
+ "EventName": "UNC_H_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Deprecated": "1",
+ "EventCode": "0x59",
+ "EventName": "UNC_H_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.FULL_MIG",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.INVITOM",
+ "Deprecated": "1",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IODC_ALLOC.INVITOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.IODCFULL",
+ "Deprecated": "1",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IODC_ALLOC.IODCFULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.OSBGATED",
+ "Deprecated": "1",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IODC_ALLOC.OSBGATED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.ALL",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.SNPOUT",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.SNPOUT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.WBPUSHMTOI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.CV0_PREF_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.CV0_PREF_VIC",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RFO_HIT_S",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.WC_ALIASING",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_OSB",
+ "Deprecated": "1",
+ "EventCode": "0x55",
+ "EventName": "UNC_H_OSB",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from local home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from remote home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from local home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from remote home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "Deprecated": "1",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "Deprecated": "1",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "Deprecated": "1",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "Deprecated": "1",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.AD",
+ "Deprecated": "1",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.AK",
+ "Deprecated": "1",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.BL",
+ "Deprecated": "1",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.IV",
+ "Deprecated": "1",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "Deprecated": "1",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "Deprecated": "1",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "Deprecated": "1",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "Deprecated": "1",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.PRQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.RRQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.WBQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.ANY_IPQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x25",
+ "EventName": "UNC_H_RxC_ISMQ1_REJECT.ANY_ISMQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x25",
+ "EventName": "UNC_H_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_H_RxC_ISMQ1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Deprecated": "1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_H_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.ANY_PRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.ANY_RRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.ANY_WBQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.IFV",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.E_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x3D",
+ "EventName": "UNC_H_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.M_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x3D",
+ "EventName": "UNC_H_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.S_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x3D",
+ "EventName": "UNC_H_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.ALL",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.BCST_LOC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.BCST_REM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.DIRECT_LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.DIRECT_REM",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPI",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPS",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.IV",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.IV",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.IV",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.IV",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "Deprecated": "1",
+ "EventCode": "0xAC",
+ "EventName": "UNC_H_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "Deprecated": "1",
+ "EventCode": "0xAC",
+ "EventName": "UNC_H_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Deprecated": "1",
+ "EventCode": "0x56",
+ "EventName": "UNC_H_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Deprecated": "1",
+ "EventCode": "0x56",
+ "EventName": "UNC_H_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0xe4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0xf0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0xe2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0xe8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0xe1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json
new file mode 100644
index 000000000000..725780fb3990
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-interconnect.json
@@ -0,0 +1,11334 @@
+[
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Snoops",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests.",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "Total IRP occupancy of inbound read and write requests. This is effectively the sum of read occupancy and write occupancy.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "IRP Clocks",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "PublicDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline to coherent memory, without a RFO. PCIITOM is a speculative Invalidate to Modified command that requests ownership of the cacheline and does not move data from the mesh to IRP cache.",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline to coherent memory. RFO is a Read For Ownership command that requests ownership of the cacheline and moves data from the mesh to IRP cache.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; WbMtoI",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound read requests to coherent memory, received by the IRP and inserted into the Fire and Forget queue (FAF), a queue used for processing inbound reads in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue.",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy of the IRP Fire and Forget (FAF) queue, a queue used for processing inbound reads in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "All Inserts Inbound (p2p + faf + cset)",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.UNKNOWN",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Lost Forward",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop pulled away ownership before a write was committed",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Requests",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_P2P_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "P2P requests from the ITC",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Occupancy",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_P2P_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "P2P B & S Queue Occupancy",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P completions",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if local only",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if local and target matches",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P Message",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P reads",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; Match if remote only",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if remote and target matches",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P Writes",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M line in the IIO cache",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of atomic transactions",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of 'other' kinds of transactions.",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations",
+ "EventCode": "0xB",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Cycles Full",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Inserts",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Occupancy",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Cycles Full",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Inserts",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Occupancy",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Cycles Full",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Inserts",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Occupancy",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_TxR2_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x1B",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0xC",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts traffic in which the M2M (Mesh to Memory) to iMC (Memory Controller) bypass was not taken",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Taken",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_Egress.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Not Taken",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Taken",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles - at UCLK",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when messages were sent direct to core (bypassing the CHA)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts reads in which direct to core transactions (which would have bypassed the CHA) were overridden",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel(R) UPI transactions were overridden",
+ "EventCode": "0x28",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts reads in which direct to Intel(R) Ultra Path Interconnect (UPI) transactions (which would have bypassed the CHA) were overridden",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel(R) UPI was disabled",
+ "EventCode": "0x27",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the ability to send messages direct to the Intel(R) Ultra Path Interconnect (bypassing the CHA) was disabled",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to the Intel(R) UPI",
+ "EventCode": "0x26",
+ "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when messages were sent direct to the Intel(R) Ultra Path Interconnect (bypassing the CHA)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel(R) UPI was overridden",
+ "EventCode": "0x29",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a read message that was sent direct to the Intel(R) Ultra Path Interconnect (bypassing the CHA) was overridden",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in A State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in I State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in L State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in S State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in A State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in I State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in L State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in S State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in Any State (A, I, S or unused)",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in the A (SnoopAll) state, indicating the cacheline is stored in another socket in any state, and we must snoop the other sockets to make sure we get the latest data. The data may be stored in any state in the local socket.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the I (Invalid) state indicating the cacheline is not stored in another socket, and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the S (Shared) state indicating the cacheline is either stored in another socket in the S(hared) state , and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in A State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in I State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in L State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in S State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in A State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in I State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in L State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in S State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from A (SnoopAll) to I (Invalid)",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from A (SnoopAll) to S (Shared)",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory to a new state",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from I (Invalid) to A (SnoopAll)",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from I (Invalid) to S (Shared)",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from S (Shared) to A (SnoopAll)",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from S (Shared) to I (Invalid)",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller).",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC; All, regardless of priority.",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TRANSGRESS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC; Critical Priority",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller). It only counts normal priority non-isochronous reads.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Read requests to Intel(R) Optane(TM) DC persistent memory issued to the iMC from M2M",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "M2M Reads Issued to iMC; All, regardless of priority.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Writes to iMC issued",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues writes to the iMC (Memory Controller).",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TRANSGRESS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; Full Line Non-ISOCH",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; ISOCH Full Line",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Partial Non-Isochronous writes to the iMC",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues partial writes to the iMC (Memory Controller). It only counts normal priority non-isochronous writes.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; ISOCH Partial",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write requests to Intel(R) Optane(TM) DC persistent memory issued to the iMC from M2M",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches; MC Match",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches; Mesh Match",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC RPQ Cycles w/Credits - Regular; Channel 0",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_PMM_RPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC RPQ Cycles w/Credits - Regular; Channel 1",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_PMM_RPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC RPQ Cycles w/Credits - Regular; Channel 2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_PMM_RPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 0",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_PMM_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_PMM_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_PMM_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full",
+ "EventCode": "0x53",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch requests that got turn into a demand request",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) promotes a outstanding request in the prefetch queue due to a subsequent demand read request that entered the M2M with the same address. Explanatory Side Note: The Prefetch queue is made of CAM (Content Addressable Memory)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
+ "EventCode": "0x57",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) receives a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 0",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 1",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 0",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Full",
+ "EventCode": "0x4",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "EventCode": "0x3",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x1",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the a new entry is Received(RxC) and then added to the AD (Address Ring) Ingress Queue from the CMS (Common Mesh Stop). This is generally used for reads, and",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "EventCode": "0x2",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Full",
+ "EventCode": "0x8",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "EventCode": "0x7",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "EventCode": "0x5",
+ "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "EventCode": "0x6",
+ "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clean line read hits(Regular and RFO) to Near Memory(DRAM cache) in Memory Mode and regular reads to DRAM in 1LM",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit; Read Hit from NearMem, Clean Line",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Dirty line read hits(Regular and RFO) to Near Memory(DRAM cache) in Memory Mode",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit; Read Hit from NearMem, Dirty Line",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clean line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit; Underfill Rd Hit from NearMem, Clean Line",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Dirty line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit; Underfill Rd Hit from NearMem, Dirty Line",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 0",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 1",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 0",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 1",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 0",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 1",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 0",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 1",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2M_TRACKER_PENDING_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "EventCode": "0xD",
+ "EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "EventCode": "0xE",
+ "EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Full",
+ "EventCode": "0xC",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Not Empty",
+ "EventCode": "0xB",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Allocations",
+ "EventCode": "0x9",
+ "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "EventCode": "0xF",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "EventCode": "0xA",
+ "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK; CRD Transactions to Cbo",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK; NDR Transactions",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.NDR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; All",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; All",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Read Credit Request",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Write Compare Request",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Write Credit Request",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; All",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Prefetch Read Cam Hit",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Read Credit Request",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Write Compare Request",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Write Credit Request",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; All",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Read Credit Request",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Write Compare Request",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Write Credit Request",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Sideband",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_TxC_AK_SIDEBAND.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Sideband",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_TxC_AK_SIDEBAND.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; All",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; All",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; All",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; All",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 0",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 1",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 0",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 1",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 0",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 1",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 0",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 1",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 0",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 1",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 2",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Requests",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Snoops",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; VNA Messages",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Writebacks",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the M3 uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the M3 is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2C Sent",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M3UPI_D2C_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases BL sends direct to core",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2U Sent",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M3UPI_D2U_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "Cases where SMI3 sends D2U command",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO0_IIO1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO4",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO5",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; All IIO targets for NCS are in single mask. ORs them together",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; Selected M2p BL NCS credits",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 0",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 1",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AK - Slot 0",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AK - Slot 2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; BL - Slot 0",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; REQ on AD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; RSP on AD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; SNP on AD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; NCB on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; NCS on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; RSP on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; WB on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; REQ on AD",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; RSP on AD",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; SNP on AD",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; NCB on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; NCS on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; RSP on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; WB on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; AD, BL Parallel Win",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN",
+ "PerPkg": "1",
+ "PublicDescription": "AD and BL messages won arbitration concurrently / in parallel",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; Parallel Bias to VN0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "VN0/VN1 arbiter gave second, consecutive win to vn0, delaying vn1 win, because vn0 offered parallel ad/bl",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; Parallel Bias to VN1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "VN0/VN1 arbiter gave second, consecutive win to vn1, delaying vn0 win, because vn1 offered parallel ad/bl",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; REQ on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; RSP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; SNP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; NCB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; NCS on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; RSP on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; WB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; REQ on AD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; RSP on AD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; SNP on AD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; NCB on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; NCS on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; RSP on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; WB on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; REQ on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; RSP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; SNP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; NCB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; NCS on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; RSP on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; WB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; REQ on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; RSP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; SNP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; NCB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; NCS on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; RSP on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; WB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on BL Arb",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on Idle",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to slot 0 of independent flit while pipeline is idle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to flit slot 1 while merging with bl message in same flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to flit slot 2 while merging with bl message in same flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; REQ on AD",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; RSP on AD",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; SNP on AD",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; NCB on BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; NCS on BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; RSP on BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; WB on BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; REQ on AD",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; RSP on AD",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; SNP on AD",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; NCB on BL",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; NCS on BL",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; RSP on BL",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; WB on BL",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; Any In BGF FIFO",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Indication that at least one packet (flit) is in the bgf (fifo only)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; Any in BGF Path",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; No D2K For Arb",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 or VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; D2K Credits",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Packets in BGF FIFO",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Packets in BGF Path",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "count of bl messages in pump-1-pending state, in completion fifo only",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "PerPkg": "1",
+ "PublicDescription": "count of bl messages in pump-1-pending state, in marker table and in fifo",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Transmit Credits",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; VNA In Use",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; All",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Data flit is ready for transmission but could not be sent",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; No BGF Credits",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_BGF",
+ "PerPkg": "1",
+ "PublicDescription": "Data flit is ready for transmission but could not be sent",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; No TxQ Credits",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_TXQ",
+ "PerPkg": "1",
+ "PublicDescription": "Data flit is ready for transmission but could not be sent",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 0",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "generating bl data flit sequence; waiting for data pump 0",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "PerPkg": "1",
+ "PublicDescription": "pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "PerPkg": "1",
+ "PublicDescription": "pump-1-pending logic is tracking at least one message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "pump-1-pending completion fifo is full",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "PerPkg": "1",
+ "PublicDescription": "pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "PerPkg": "1",
+ "PublicDescription": "a bl message finished but is in limbo and moved to pump-1-pending logic",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 1",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "generating bl data flit sequence; waiting for data pump 1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; One Message",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "One message in flit; VNA or non-VNA flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; One Message in non-VNA",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG_VNX",
+ "PerPkg": "1",
+ "PublicDescription": "One message in flit; non-VNA flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; Two Messages",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.2_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Two messages in flit; VNA flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; Three Messages",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.3_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Three messages in flit; VNA flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_3",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; All",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Needs Data Flit",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "BL message requires data flit sequence",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 0",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Waiting for header pump 0",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Header pump 1 is not required for flit",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Bubble",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "PerPkg": "1",
+ "PublicDescription": "Header pump 1 is not required for flit but flit transmission delayed",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Not Avail",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "PerPkg": "1",
+ "PublicDescription": "Header pump 1 is not required for flit and not available",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 1",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Waiting for header pump 1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate Ready",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate Wasted",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Run-Ahead - Blocked",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Run-Ahead - Message",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting is in run-ahead to start new flit, and message is actually slotted into new flit",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Ok",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; New header flit construction may proceed in parallel with data flit sequence",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Flit Finished",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit finished assembly in parallel with data flit sequence",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Message",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Message is slotted into header flit in parallel with data flit sequence",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2; Rate-matching Stall",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 2; Rate-matching stall injected",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2; Rate-matching Stall - No Message",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 2; Rate matching stall injected, but no additional message slotted during stall cycle",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; All",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No BGF Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; No BGF credits available",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No BGF Credits + No Extra Message Slotted",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; No BGF credits available; no additional message slotted into flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No TxQ Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; No TxQ credits available",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No TxQ Credits + No Extra Message Slotted",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; No TxQ credits available; no additional message slotted into flit",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - One Slot Taken",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ONE_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with only one slot taken (two slots free)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - Three Slots Taken",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.THREE_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with three slots taken (no slots free)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - Two Slots Taken",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.TWO_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with only two slots taken (one slots free)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Can't Slot AD",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "PerPkg": "1",
+ "PublicDescription": "some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Can't Slot BL",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "PerPkg": "1",
+ "PublicDescription": "some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel AD Lost",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_AD_LOST",
+ "PerPkg": "1",
+ "PublicDescription": "some AD message lost contest for slot 0 (logical OR of all AD events under INGR_SLOT_LOST_MC_VN{0,1})",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel Attempt",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "PerPkg": "1",
+ "PublicDescription": "ad and bl messages attempted to slot into the same flit in parallel",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel BL Lost",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_BL_LOST",
+ "PerPkg": "1",
+ "PublicDescription": "some BL message lost contest for slot 0 (logical OR of all BL events under INGR_SLOT_LOST_MC_VN{0,1})",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel Success",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "PerPkg": "1",
+ "PublicDescription": "ad and bl messages were actually slotted into the same flit in paralle",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; VN0",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; VN1",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; REQ on AD",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; RSP on AD",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; SNP on AD",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; NCB on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; NCS on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; RSP on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; WB on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; REQ on AD",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; RSP on AD",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; SNP on AD",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; NCB on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; NCS on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; RSP on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; WB on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Lost Arbitration",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARB_LOST",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Arrived",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARRIVED",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Dropped - Old",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_OLD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Dropped - Wrap",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_WRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Dropped because it was overwritten by new message while prefetch queue was full",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Slotted",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.SLOTTED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Any In Use",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "At least one remote vna credit is in use",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Corrected",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of remote vna credits corrected (local return) per cycle",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level < 1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "PerPkg": "1",
+ "PublicDescription": "Remote vna credit level is less than 1 (i.e. no vna credits available)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level < 4",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "PerPkg": "1",
+ "PublicDescription": "Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level < 5",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "PerPkg": "1",
+ "PublicDescription": "Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Used",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.USED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of remote vna credits consumed per cycle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 REQ Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 RSP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 SNP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 WB Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 REQ Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 RSP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 SNP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 WB Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; CHA on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_CHA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to CHA",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_NON_IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of non-idle cycles in issuing Vn0 Snpf",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to peer UPI0",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to peer UPI1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; CHA on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_CHA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to CHA",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_NON_IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of non-idle cycles in issuing Vn1 Snpf",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to peer UPI0",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to peer UPI1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_NONSNP",
+ "PerPkg": "1",
+ "PublicDescription": "Outcome of SnpF pending arbitration; FlowQ txn issued when SnpF pending on Vn0",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_VN2SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Outcome of SnpF pending arbitration; FlowQ Vn0 SnpF issued when SnpF pending on Vn1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_NONSNP",
+ "PerPkg": "1",
+ "PublicDescription": "Outcome of SnpF pending arbitration; FlowQ txn issued when SnpF pending on Vn1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_VN0SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Outcome of SnpF pending arbitration; FlowQ Vn1 SnpF issued when SnpF pending on Vn0",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 REQ Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 SNP Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 WB Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 REQ Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 SNP Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 WB Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 REQ Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 SNP Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 WB Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 REQ Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 SNP Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 WB Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 REQ Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 RSP Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 SNP Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 WB Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 REQ Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 RSP Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 SNP Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 WB Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Inserts",
+ "EventCode": "0x2F",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Occupancy",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 RSP Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 WB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 NCS Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 NCB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1 RSP Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1 WB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1_NCB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1_NCS Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1_NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1_NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 NCS Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 WB Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 NCB Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 RSP Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCB Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCS Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 RSP Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 WB Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCS Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCB Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 RSP Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 WB Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VNA",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VNA",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit.",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where flow control queue that sits between the Intel(R) Ultra Path Interconnect (UPI) and the mesh spawns a prefetch to the iMC (Memory Controller)",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; WB on BL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB on BL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; REQ on AD",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; RSP on AD",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP on AD",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; RSP on BL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; WB on BL",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; NCB on BL",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; REQ on AD",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; RSP on AD",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; SNP on AD",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; RSP on BL",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; WB on BL",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB on BL",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; REQ on AD",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; RSP on AD",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP on AD",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; RSP on BL",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; WB on BL",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; NCB on BL",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; REQ on AD",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; RSP on AD",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; SNP on AD",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; RSP on BL",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_TxC_BL.DRS_UPI",
+ "Deprecated": "1",
+ "EventCode": "0x40",
+ "EventName": "UNC_NoUnit_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clocks of the Intel(R) Ultra Path Interconnect (UPI)",
+ "EventCode": "0x1",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the fixed frequency clock controlling the Intel(R) Ultra Path Interconnect (UPI). This clock runs at1/8th the 'GT/s' speed of the UPI link. For example, a 9.6GT/s link will have a fixed Frequency of 1.2 Ghz.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to core",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to core bypassing the CHA.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "Deprecated": "1",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to Intel(R) UPI",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to Intel(R) Ultra Path Interconnect (UPI) bypassing the CHA .",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles Intel(R) UPI is in L1 power mode (shutdown)",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the Intel(R) Ultra Path Interconnect (UPI) is in L1 power mode. L1 is a mode that totally shuts down the UPI link. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another, this event only coutns when both links are shutdown.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "EventCode": "0x16",
+ "EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "EventCode": "0x20",
+ "EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req Nack",
+ "EventCode": "0x23",
+ "EventName": "UNC_UPI_POWER_L1_NACK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req (same as L1 Ack).",
+ "EventCode": "0x22",
+ "EventName": "UNC_UPI_POWER_L1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles the Rx of the Intel(R) UPI is in L0p power mode",
+ "EventCode": "0x25",
+ "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the receive side (Rx) of the Intel(R) Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0. Receive side.",
+ "EventCode": "0x24",
+ "EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCB",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCB",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCS",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCS",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Request",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "REQ Message Class",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Request Opcode",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match REQ Opcodes - Specified in Umask[7:4]",
+ "UMask": "0x108",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Conflict",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1aa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Invalid",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x12a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0x10c",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - RSP",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - RSP",
+ "UMask": "0x10a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Snoop",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "SNP Message Class",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Snoop Opcode",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match SNP Opcodes - Specified in Umask[7:4]",
+ "UMask": "0x109",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0x10d",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed",
+ "EventCode": "0x39",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed",
+ "EventCode": "0x3A",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "EventCode": "0x38",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid data FLITs received from any slot",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) received from any of the 3 Intel(R) Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Null FLITs received from any slot",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) received from any of the 3 Intel(R) Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Data",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Idle",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; LLCRD Not Empty",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; LLCTRL",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs received from any slot",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) received from any of the 3 UPI slots on this UPI unit.",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.ALL_NULL",
+ "Deprecated": "1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Protocol Header",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.PROTHDR",
+ "Deprecated": "1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.PROT_HDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 0",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 2",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.RSP",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 0",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 1",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 2",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 2",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in which the Tx of the Intel(R) Ultra Path Interconnect (UPI) is in L0p power mode",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the transmit side (Tx) of the Intel(R) Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "EventCode": "0x28",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "EventCode": "0x29",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0. Transmit side.",
+ "EventCode": "0x26",
+ "EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCB",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCB",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCS",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCS",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Request",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "REQ Message Class",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Request Opcode",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match REQ Opcodes - Specified in Umask[7:4]",
+ "UMask": "0x108",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Conflict",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1aa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Invalid",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x12a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0x10c",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - RSP",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - RSP",
+ "UMask": "0x10a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Snoop",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "SNP Message Class",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Snoop Opcode",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match SNP Opcodes - Specified in Umask[7:4]",
+ "UMask": "0x109",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0x10d",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "FLITs that bypassed the TxL Buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the TxL(transmit) FLIT buffer and pass directly out the UPI Link. Generally, when data is transmitted across the Intel(R) Ultra Path Interconnect (UPI), it will bypass the TxQ and pass directly to the link. However, the TxQ will be used in L0p (Low Power) mode and (Link Layer Retry) LLR mode, increasing latency to transfer out to the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid data FLITs transmitted via any slot",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) transmitted (TxL) via any of the 3 Intel(R) Ultra Path Interconnect (UPI) slots on this UPI unit.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Null FLITs transmitted from any slot",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) transmitted via any of the 3 Intel(R) Ulra Path Interconnect (UPI) slots on this UPI unit.",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Data",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Idle FLITs transmitted",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the Intel Ultra Path Interconnect(UPI) transmits an idle FLIT(80 bit FLow control unITs). Every UPI cycle must be sending either data FLITs, protocol/credit FLITs or idle FLITs.",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; LLCRD Not Empty",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; LLCTRL",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) transmitted across any of the 3 UPI (Ultra Path Interconnect) slots on this UPI unit.",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.ALL_NULL",
+ "Deprecated": "1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Protocol Header",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.PROTHDR",
+ "Deprecated": "1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.PROT_HDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 0",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 2",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.DATA_HDR",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.DUAL_SLOT_HDR",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.LOC",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NON_DATA_HDR",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.REM",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.SGL_SLOT_HDR",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "EventCode": "0x45",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "EventCode": "0x44",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; IPI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.; Inter Processor Interrupts",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; MSI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.; Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; VLW",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times an IDI Lock/SplitLock sequence was started",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
+ "EventCode": "0x2",
+ "EventName": "UPI_DATA_BANDWIDTH_TX",
+ "PerPkg": "1",
+ "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) transmitted (TxL) via any of the 3 Intel(R) Ultra Path Interconnect (UPI) slots on this UPI unit.",
+ "ScaleUnit": "7.11E-06Bytes",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-io.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-io.json
new file mode 100644
index 000000000000..743c91f3d2f0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-io.json
@@ -0,0 +1,4250 @@
+[
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_READ",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_WRITE",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Clockticks of the IIO Traffic Controller",
+ "EventCode": "0x1",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the 1GHz traffic controller clock in the IIO unit.",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x0f",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 0",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 1",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 2",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num Link Correctable Errors",
+ "EventCode": "0xF",
+ "EventName": "UNC_IIO_LINK_NUM_CORR_ERR",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num Link Retries",
+ "EventCode": "0xE",
+ "EventName": "UNC_IIO_LINK_NUM_RETRIES",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number packets that passed the Mask/Match Filter",
+ "EventCode": "0x21",
+ "EventName": "UNC_IIO_MASK_MATCH",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; PCIE bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; PCIE bus",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Counting disabled",
+ "EventName": "UNC_IIO_NOTHING",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Symbol Times on Link",
+ "EventCode": "0x82",
+ "EventName": "UNC_IIO_SYMBOL_TIMES",
+ "PerPkg": "1",
+ "PublicDescription": "Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part1 to the MMIO space of an IIO target.In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; context cache miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.CTXT_MISS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L1 miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L1_MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L2 miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L2_MISS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L3 miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L3_MISS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; Vtd hit",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L4_PAGE_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB1_MISS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB is full",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Occupancy",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_VTD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
index 3fb5cdce842f..f761856d738e 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-memory.json
@@ -1,247 +1,2200 @@
[
{
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_READ",
"PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Access Select) read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every read. This event includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_WRITE",
"PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"ScaleUnit": "64Bytes",
- "UMask": "0xC",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Bypass",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Page Activate commands sent due to a write request",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts DRAM Page Activate commands sent on this channel due to a write request to the iMC (Memory Controller). Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS (Column Access Select) command.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS Commands issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, so this event increments for every read and write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM Read CAS Commands issued (including underfills)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Access Select) read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every read. This event includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Read ISOCH Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "Counts CAS (Column Access Select) regular read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every regular read. This event only counts regular reads and does not includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in RMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Underfill Read CAS Commands issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts CAS (Column Access Select) underfill read commands issued to DRAM due to a partial write, on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this command counts underfill reads. Partial writes must be completed by first reading in the underfill from DRAM and then merging in the partial write data before writing the full line back to DRAM. This event will generally count about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ (due to a previous write request).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in WMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM Write CAS commands issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Write ISOCH Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
+ "UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Memory controller clock ticks",
- "Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the fixed frequency clock of the memory controller using one of the programmable counters.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clockticks in the Memory Controller using a dedicated 48-bit Fixed Counter",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_CLOCKTICKS_F",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_MAJMODE2.DRAM_CYC",
+ "EventCode": "0xED",
+ "EventName": "UNC_M_MAJMODE2.DRAM_CYC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_MAJMODE2.DRAM_ENTER",
+ "EventCode": "0xED",
+ "EventName": "UNC_M_MAJMODE2.DRAM_ENTER",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Major Mode 2 : Cycles in PMM major mode",
+ "EventCode": "0xED",
+ "EventName": "UNC_M_MAJMODE2.PMM_CYC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Major Mode 2 : Entered PMM major mode",
+ "EventCode": "0xED",
+ "EventName": "UNC_M_MAJMODE2.PMM_ENTER",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth read (MB/sec). Derived from unc_m_pmm_rpq_inserts",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_BANDWIDTH.READ",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625E-5MB/sec",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec). Derived from unc_m_pmm_rpq_inserts",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_BANDWIDTH.TOTAL",
+ "MetricExpr": "UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS",
+ "MetricName": "UNC_M_PMM_BANDWIDTH.TOTAL",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625E-5MB/sec",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Intel Optane DC persistent memory bandwidth write (MB/sec). Derived from unc_m_pmm_wpq_inserts",
+ "EventCode": "0xE7",
+ "EventName": "UNC_M_PMM_BANDWIDTH.WRITE",
+ "PerPkg": "1",
+ "ScaleUnit": "6.103515625E-5MB/sec",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All commands for Intel(R) Optane(TM) DC persistent memory",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Misc Commands (error, flow ACKs)",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.MISC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Misc GNTs",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.MISC_GNT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Regular reads(RPQ) commands for Intel(R) Optane(TM) DC persistent memory",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RD",
+ "PerPkg": "1",
+ "PublicDescription": "All Reads - RPQ or Ufill",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RPQ GNTs",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RPQ_GNTS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Underfill read commands for Intel(R) Optane(TM) DC persistent memory",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.UFILL_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Underfill reads",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Underfill GNTs",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WPQ_GNTS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write commands for Intel(R) Optane(TM) DC persistent memory",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Writes",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Expected No data packet (ERID matched NDP encoding)",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.NODATA_EXP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Unexpected No data packet (ERID matched a Read, but data was a NDP)",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.NODATA_UNEXP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Opportunistic Reads",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.OPP_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM ECC Errors",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ECC_ERROR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM ERID detectable parity error",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ERID_ERROR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Requests - Slot 0",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.REQS_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Requests - Slot 1",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.REQS_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode; Cycles PMM is in Partial Write Major Mode",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.PARTIAL_WR_CYC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.PARTIAL_WR_ENTER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.PARTIAL_WR_EXIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode; Cycles PMM is in Read Major Mode",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.RD_CYC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Major Mode; Cycles PMM is in Write Major Mode",
+ "EventCode": "0xEC",
+ "EventName": "UNC_M_PMM_MAJMODE1.WR_CYC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_READ_LATENCY",
+ "MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
+ "MetricName": "UNC_M_PMM_READ_LATENCY",
+ "PerPkg": "1",
+ "ScaleUnit": "6000000000ns",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Cycles Full",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M_PMM_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Cycles Not Empty",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M_PMM_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy of all read requests for Intel Optane DC persistent memory",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Occupancy",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Cycles Full",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Cycles Not Empty",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
+ "EventCode": "0xE7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy of all write requests for Intel(R) Optane(TM) DC persistent memory",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Occupancy",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Occupancy",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PMM_WPQ_PCOMMIT",
+ "EventCode": "0xE8",
+ "EventName": "UNC_M_PMM_WPQ_PCOMMIT",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PMM_WPQ_PCOMMIT_CYC",
+ "EventCode": "0xE9",
+ "EventName": "UNC_M_PMM_WPQ_PCOMMIT_CYC",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode+C37",
- "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
+ "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100",
+ "MetricName": "power_channel_ppd",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when all the ranks in the channel are in PPD (PreCharge Power Down) mode. If IBT (Input Buffer Terminators)=off is enabled, then this event counts the cycles in PPD mode. If IBT=off is not enabled, then this event counts the number of cycles when being in PPD mode could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles Memory is in self refresh power mode",
- "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
+ "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100",
+ "MetricName": "power_self_refresh",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC (memory controller) is in self-refresh and has a clock. This happens in some ACPI CPU package C-states for the sleep levels. For example, the PCU (Power Control Unit) may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Intel? Dynamic Power Technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Pre-charges due to page misses",
- "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of explicit DRAM Precharge commands sent on this channel as a result of a DRAM page miss. This does not include the implicit precharge commands sent with CAS commands in Auto-Precharge mode. This does not include Precharge commands sent as a result of a page close counter expiration.",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Pre-charge for reads",
- "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of explicit DRAM Precharge commands issued on a per channel basis due to a read, so as to close the previous DRAM page, before opening the requested page.",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Pre-charge for writes",
- "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
- "Counter": "0,1,2,3",
- "EventCode": "0xE3",
- "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
"PerPkg": "1",
+ "UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Write requests allocated in the PMM Write Pending Queue for Intel Optane DC persistent memory",
- "Counter": "0,1,2,3",
- "EventCode": "0xE7",
- "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
"PerPkg": "1",
+ "UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth read (MB/sec). Derived from unc_m_pmm_rpq_inserts",
- "Counter": "0,1,2,3",
- "EventCode": "0xE3",
- "EventName": "UNC_M_PMM_BANDWIDTH.READ",
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
"PerPkg": "1",
- "ScaleUnit": "6.103515625E-5MB/sec",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth write (MB/sec). Derived from unc_m_pmm_wpq_inserts",
- "Counter": "0,1,2,3",
- "EventCode": "0xE7",
- "EventName": "UNC_M_PMM_BANDWIDTH.WRITE",
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
"PerPkg": "1",
- "ScaleUnit": "6.103515625E-5MB/sec",
+ "UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory bandwidth total (MB/sec). Derived from unc_m_pmm_rpq_inserts",
- "Counter": "0,1,2,3",
- "EventCode": "0xE3",
- "EventName": "UNC_M_PMM_BANDWIDTH.TOTAL",
- "MetricExpr": "UNC_M_PMM_RPQ_INSERTS + UNC_M_PMM_WPQ_INSERTS",
- "MetricName": "UNC_M_PMM_BANDWIDTH.TOTAL",
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
"PerPkg": "1",
- "ScaleUnit": "6.103515625E-5MB/sec",
+ "UMask": "0x10",
"Unit": "iMC"
},
{
- "BriefDescription": "Read Pending Queue Occupancy of all read requests for Intel Optane DC persistent memory",
- "Counter": "0,1,2,3",
- "EventCode": "0xE0",
- "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
"PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
- "Counter": "0,1,2,3",
- "EventCode": "0xE0",
- "EventName": "UNC_M_PMM_READ_LATENCY",
- "MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
- "MetricName": "UNC_M_PMM_READ_LATENCY",
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
"PerPkg": "1",
- "ScaleUnit": "6000000000ns",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM Page Activate commands sent due to a write request",
- "Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_M_ACT_COUNT.WR",
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
"PerPkg": "1",
- "PublicDescription": "Counts DRAM Page Activate commands sent on this channel due to a write request to the iMC (Memory Controller). Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS (Column Access Select) command.",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "All DRAM CAS Commands issued",
- "Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.ALL",
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
"PerPkg": "1",
- "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, so this event increments for every read and write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
- "UMask": "0xF",
+ "UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
- "Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; All Banks",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK1",
"PerPkg": "1",
- "PublicDescription": "Counts CAS (Column Access Select) regular read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every regular read. This event only counts regular reads and does not includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM Underfill Read CAS Commands issued",
- "Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 10",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 11",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 12",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 13",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 14",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 15",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK2",
"PerPkg": "1",
- "PublicDescription": "Counts CAS (Column Access Select) underfill read commands issued to DRAM due to a partial write, on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this command counts underfill reads. Partial writes must be completed by first reading in the underfill from DRAM and then merging in the partial write data before writing the full line back to DRAM. This event will generally count about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ (due to a previous write request).",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
- "Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 4",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK4",
"PerPkg": "1",
- "PublicDescription": "Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "All commands for Intel Optane DC persistent memory",
- "Counter": "0,1,2,3",
- "EventCode": "0xEA",
- "EventName": "UNC_M_PMM_CMD1.ALL",
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 5",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 6",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 7",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 8",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 9",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; All Banks",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 0",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK1",
"PerPkg": "1",
- "PublicDescription": "All commands for Intel Optane DC persistent memory",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Regular reads(RPQ) commands for Intel Optane DC persistent memory",
- "Counter": "0,1,2,3",
- "EventCode": "0xEA",
- "EventName": "UNC_M_PMM_CMD1.RD",
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 10",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 11",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 12",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 13",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 14",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 15",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK2",
"PerPkg": "1",
- "PublicDescription": "All Reads - RPQ or Ufill",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Underfill read commands for Intel Optane DC persistent memory",
- "Counter": "0,1,2,3",
- "EventCode": "0xEA",
- "EventName": "UNC_M_PMM_CMD1.UFILL_RD",
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 4",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 5",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 6",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 7",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 8",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK8",
"PerPkg": "1",
- "PublicDescription": "Underfill reads",
"UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "Write commands for Intel Optane DC persistent memory",
- "Counter": "0,1,2,3",
- "EventCode": "0xEA",
- "EventName": "UNC_M_PMM_CMD1.WR",
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 9",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
"PerPkg": "1",
- "PublicDescription": "Writes",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Write Pending Queue Occupancy of all write requests for Intel Optane DC persistent memory",
- "Counter": "0,1,2,3",
- "EventCode": "0xE4",
- "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
"PerPkg": "1",
- "PublicDescription": "Write Pending Queue Occupancy of all write requests for Intel Optane DC persistent memory",
"UMask": "0x1",
"Unit": "iMC"
},
{
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Read Pending Queue Allocations",
- "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
@@ -250,7 +2203,6 @@
},
{
"BriefDescription": "Read Pending Queue Occupancy",
- "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY",
"PerPkg": "1",
@@ -258,8 +2210,453 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Scoreboard Accesses; Write Accepts",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; Write Rejects",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; FM read completions",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; FM write completions",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; Read Accepts",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; Read Rejects",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; NM read completions",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses; NM write completions",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Alloc",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.ALLOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Dealloc",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Read Starved",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FMRD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Write Starved",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FMWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Read Starved",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.NMRD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Write Starved",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.NMWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Reject",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.REJ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Valid",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.VLD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Full",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M_SB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Not-Empty",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M_SB_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Block region reads",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Block region writes",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Dealloc all commands (for error flows)",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Patrol inserts",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PATROL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Persistent Mem reads",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Persistent Mem writes",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Reads",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts; Writes",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.WRS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Block region reads",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Block region writes",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Patrol",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PATROL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Persistent Mem reads",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Persistent Mem writes",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Reads",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy; Writes",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.WRS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected; FM requests rejected due to full address conflict",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected; NM requests rejected due to set conflict",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected; Patrol requests rejected due to set conflict",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Read - Clear",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMRD_CLR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Read - Set",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMRD_SET",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Write - Clear",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMWR_CLR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Write - Set",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMWR_SET",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Read - Clear",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMRD_CLR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Read - Set",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMRD_SET",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Write - Clear",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMWR_CLR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Write - Set",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMWR_SET",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Read",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Far Mem Write",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMWR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Read",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.NMRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Near Mem Write",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.NMWR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.NEW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.OCC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "All hits to Near Memory(DRAM cache) in Memory Mode",
- "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.HIT",
"PerPkg": "1",
@@ -269,7 +2666,6 @@
},
{
"BriefDescription": "All Clean line misses to Near Memory(DRAM cache) in Memory Mode",
- "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.MISS_CLEAN",
"PerPkg": "1",
@@ -279,7 +2675,6 @@
},
{
"BriefDescription": "All dirty line misses to Near Memory(DRAM cache) in Memory Mode",
- "Counter": "0,1,2,3",
"EventCode": "0xD3",
"EventName": "UNC_M_TAGCHK.MISS_DIRTY",
"PerPkg": "1",
@@ -288,8 +2683,47 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Write Pending Queue Allocations",
- "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS",
"PerPkg": "1",
@@ -298,11 +2732,1369 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
- "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_WPQ_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts. Is there a filter of sorts???",
+ "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts. Is there a filter of sorts?",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; All Banks",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 0",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 1",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 10",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 11",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 12",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 13",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 14",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 15",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 2",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 4",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 5",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 6",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 7",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 8",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 9",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; All Banks",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 0",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 1",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 10",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 11",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 12",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 13",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 14",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 15",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 2",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 4",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 5",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 6",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 7",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 8",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 9",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
"Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
deleted file mode 100644
index df355ba7acc8..000000000000
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-other.json
+++ /dev/null
@@ -1,1770 +0,0 @@
-[
- {
- "BriefDescription": "Uncore cache clock ticks",
- "Counter": "0,1,2,3",
- "EventName": "UNC_CHA_CLOCKTICKS",
- "PerPkg": "1",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "config1=0x40e33",
- "PerPkg": "1",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_READ",
- "Filter": "config1=0x40040e33",
- "PerPkg": "1",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_WRITE",
- "Filter": "config1=0x40041e33",
- "PerPkg": "1",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "config1=0x41833",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "config1=0x41a33",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "read requests from home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS",
- "PerPkg": "1",
- "UMask": "0x03",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "read requests from local home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
- "PerPkg": "1",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "read requests from remote home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
- "PerPkg": "1",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "write requests from home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES",
- "PerPkg": "1",
- "UMask": "0x0C",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "write requests from local home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
- "PerPkg": "1",
- "UMask": "0x04",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "write requests from remote home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
- "PerPkg": "1",
- "UMask": "0x08",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UPI_DATA_BANDWIDTH_TX",
- "PerPkg": "1",
- "ScaleUnit": "7.11E-06Bytes",
- "UMask": "0xf",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_READ",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
- "Counter": "0,1,2,3",
- "EventCode": "0x33",
- "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
- "UMask": "0x42",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
- "Counter": "0,1,2,3",
- "EventCode": "0x33",
- "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
- "UMask": "0x82",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
- "Counter": "0,1,2,3",
- "EventCode": "0x53",
- "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
- "PerPkg": "1",
- "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
- "Counter": "0,1,2,3",
- "EventCode": "0x53",
- "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
- "PerPkg": "1",
- "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
- "Counter": "0,1,2,3",
- "EventCode": "0x54",
- "EventName": "UNC_CHA_DIR_UPDATE.HA",
- "PerPkg": "1",
- "PublicDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
- "Counter": "0,1,2,3",
- "EventCode": "0x54",
- "EventName": "UNC_CHA_DIR_UPDATE.TOR",
- "PerPkg": "1",
- "PublicDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "FaST wire asserted; Horizontal",
- "Counter": "0,1,2,3",
- "EventCode": "0xA5",
- "EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
- "Counter": "0,1,2,3",
- "EventCode": "0x5F",
- "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
- "PerPkg": "1",
- "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*)",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
- "Counter": "0,1,2,3",
- "EventCode": "0x59",
- "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
- "PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
- "Counter": "0,1,2,3",
- "EventCode": "0x5B",
- "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
- "PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Lines Victimized; Lines in E state",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Lines Victimized; Lines in F State",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
- "UMask": "0x08",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Lines Victimized; Lines in M state",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Lines Victimized; Lines in S State",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
- "UMask": "0x04",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Number of times that an RFO hit in S state.",
- "Counter": "0,1,2,3",
- "EventCode": "0x39",
- "EventName": "UNC_CHA_MISC.RFO_HIT_S",
- "PerPkg": "1",
- "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
- "UMask": "0x08",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
- "PerPkg": "1",
- "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
- "UMask": "0x10",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
- "PerPkg": "1",
- "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
- "UMask": "0x20",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Ingress (from CMS) Allocations; IRQ",
- "Counter": "0,1,2,3",
- "EventCode": "0x13",
- "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
- "PerPkg": "1",
- "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
- "Counter": "0,1,2,3",
- "EventCode": "0x19",
- "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
- "PerPkg": "1",
- "PublicDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
- "UMask": "0x80",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
- "EventCode": "0x11",
- "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
- "PerPkg": "1",
- "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Snoop filter capacity evictions for E-state entries.",
- "Counter": "0,1,2,3",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
- "PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Snoop filter capacity evictions for M-state entries.",
- "Counter": "0,1,2,3",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
- "PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Snoop filter capacity evictions for S-state entries.",
- "Counter": "0,1,2,3",
- "EventCode": "0x3D",
- "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
- "PerPkg": "1",
- "PublicDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
- "UMask": "0x04",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "RspCnflct* Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
- "PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspCnflct* Snoop Response was received. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent. This triggers conflict resolution hardware. This covers both the opcode RspCnflct and RspCnflctWbI.",
- "UMask": "0x40",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "RspI Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
- "PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "RspIFwd Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
- "PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
- "UMask": "0x04",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "RspSFwd Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
- "PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
- "UMask": "0x08",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
- "PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to it's home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in >= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to it's home socket to be written back to memory.",
- "UMask": "0x20",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Rsp*WB Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
- "PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to it's home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This reponse will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
- "UMask": "0x10",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Clockticks of the IIO Traffic Controller",
- "Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_IIO_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Counts clockticks of the 1GHz trafiic controller clock in the IIO unit.",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x08",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part1 to the MMIO space of an IIO target.In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x02",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
- "Counter": "0,1,2,3",
- "EventCode": "0x22",
- "EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
- "PerPkg": "1",
- "PublicDescription": "Counts traffic in which the M2M (Mesh to Memory) to iMC (Memory Controller) bypass was not taken",
- "UMask": "0x2",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
- "Counter": "0,1,2,3",
- "EventCode": "0x24",
- "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when direct to core mode (which bypasses the CHA) was disabled",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
- "Counter": "0,1,2,3",
- "EventCode": "0x23",
- "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
- "PerPkg": "1",
- "PublicDescription": "Counts when messages were sent direct to core (bypassing the CHA)",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Number of reads in which direct to core transaction were overridden",
- "Counter": "0,1,2,3",
- "EventCode": "0x25",
- "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
- "PerPkg": "1",
- "PublicDescription": "Counts reads in which direct to core transactions (which would have bypassed the CHA) were overridden",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
- "Counter": "0,1,2,3",
- "EventCode": "0x28",
- "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
- "PerPkg": "1",
- "PublicDescription": "Counts reads in which direct to Intel Ultra Path Interconnect (UPI) transactions (which would have bypassed the CHA) were overridden",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Cycles when direct to Intel UPI was disabled",
- "Counter": "0,1,2,3",
- "EventCode": "0x27",
- "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when the ability to send messages direct to the Intel Ultra Path Interconnect (bypassing the CHA) was disabled",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Messages sent direct to the Intel UPI",
- "Counter": "0,1,2,3",
- "EventCode": "0x26",
- "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
- "PerPkg": "1",
- "PublicDescription": "Counts when messages were sent direct to the Intel Ultra Path Interconnect (bypassing the CHA)",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
- "Counter": "0,1,2,3",
- "EventCode": "0x29",
- "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
- "PerPkg": "1",
- "PublicDescription": "Counts when a read message that was sent direct to the Intel Ultra Path Interconnect (bypassing the CHA) was overridden",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
- "Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in Any State (A, I, S or unused)",
- "UMask": "0x1",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
- "Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in the A (SnoopAll) state, indicating the cacheline is stored in another socket in any state, and we must snoop the other sockets to make sure we get the latest data. The data may be stored in any state in the local socket.",
- "UMask": "0x8",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
- "Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the I (Invalid) state indicating the cacheline is not stored in another socket, and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
- "UMask": "0x2",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
- "Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the S (Shared) state indicating the cacheline is either stored in another socket in the S(hared) state , and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
- "UMask": "0x4",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from A to I",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to I (Invalid)",
- "UMask": "0x20",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from A to S",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to S (Shared)",
- "UMask": "0x40",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory to a new state",
- "UMask": "0x1",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from I to A",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to A (SnoopAll)",
- "UMask": "0x4",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from I to S",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to S (Shared)",
- "UMask": "0x2",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from S to A",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to A (SnoopAll)",
- "UMask": "0x10",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from S to I",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to I (Invalid)",
- "UMask": "0x8",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Reads to iMC issued",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.ALL",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller).",
- "UMask": "0x4",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.NORMAL",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller). It only counts normal priority non-isochronous reads.",
- "UMask": "0x1",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Read requests to Intel Optane DC persistent memory issued to the iMC from M2M",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.TO_PMM",
- "PerPkg": "1",
- "PublicDescription": "M2M Reads Issued to iMC; All, regardless of priority.",
- "UMask": "0x8",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Writes to iMC issued",
- "Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.ALL",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues writes to the iMC (Memory Controller).",
- "UMask": "0x10",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
- "Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.NI",
- "PerPkg": "1",
- "PublicDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
- "UMask": "0x80",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Partial Non-Isochronous writes to the iMC",
- "Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues partial writes to the iMC (Memory Controller). It only counts normal priority non-isochronous writes.",
- "UMask": "0x2",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Write requests to Intel Optane DC persistent memory issued to the iMC from M2M",
- "Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
- "PerPkg": "1",
- "PublicDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
- "UMask": "0x20",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Prefecth requests that got turn into a demand request",
- "Counter": "0,1,2,3",
- "EventCode": "0x56",
- "EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) promotes a outstanding request in the prefetch queue due to a subsequent demand read request that entered the M2M with the same address. Explanatory Side Note: The Prefecth queue is made of CAM (Content Addressable Memory)",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
- "Counter": "0,1,2,3",
- "EventCode": "0x57",
- "EventName": "UNC_M2M_PREFCAM_INSERTS",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) recieves a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "AD Ingress (from CMS) Queue Inserts",
- "Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_M2M_RxC_AD_INSERTS",
- "PerPkg": "1",
- "PublicDescription": "Counts when the a new entry is Received(RxC) and then added to the AD (Address Ring) Ingress Queue from the CMS (Common Mesh Stop). This is generally used for reads, and",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "AD Ingress (from CMS) Occupancy",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
- "PerPkg": "1",
- "PublicDescription": "AD Ingress (from CMS) Occupancy",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "BL Ingress (from CMS) Allocations",
- "Counter": "0,1,2,3",
- "EventCode": "0x5",
- "EventName": "UNC_M2M_RxC_BL_INSERTS",
- "PerPkg": "1",
- "PublicDescription": "BL Ingress (from CMS) Allocations",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "BL Ingress (from CMS) Occupancy",
- "Counter": "0,1,2,3",
- "EventCode": "0x6",
- "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
- "PerPkg": "1",
- "PublicDescription": "BL Ingress (from CMS) Occupancy",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Dirty line read hits(Regular and RFO) to Near Memory(DRAM cache) in Memory Mode",
- "Counter": "0,1,2,3",
- "EventCode": "0x2C",
- "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
- "PerPkg": "1",
- "PublicDescription": "Tag Hit; Read Hit from NearMem, Dirty Line",
- "UMask": "0x02",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Clean line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
- "Counter": "0,1,2,3",
- "EventCode": "0x2C",
- "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
- "PerPkg": "1",
- "PublicDescription": "Tag Hit; Underfill Rd Hit from NearMem, Clean Line",
- "UMask": "0x04",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Dirty line underfill read hits to Near Memory(DRAM cache) in Memory Mode",
- "Counter": "0,1,2,3",
- "EventCode": "0x2C",
- "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
- "PerPkg": "1",
- "PublicDescription": "Tag Hit; Underfill Rd Hit from NearMem, Dirty Line",
- "UMask": "0x08",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "AD Egress (to CMS) Allocations",
- "Counter": "0,1,2,3",
- "EventCode": "0x9",
- "EventName": "UNC_M2M_TxC_AD_INSERTS",
- "PerPkg": "1",
- "PublicDescription": "AD Egress (to CMS) Allocations",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "AD Egress (to CMS) Occupancy",
- "Counter": "0,1,2,3",
- "EventCode": "0xA",
- "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
- "PerPkg": "1",
- "PublicDescription": "AD Egress (to CMS) Occupancy",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "BL Egress (to CMS) Allocations; All",
- "Counter": "0,1,2,3",
- "EventCode": "0x15",
- "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
- "PerPkg": "1",
- "PublicDescription": "BL Egress (to CMS) Allocations; All",
- "UMask": "0x03",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "BL Egress (to CMS) Occupancy; All",
- "Counter": "0,1,2,3",
- "EventCode": "0x16",
- "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
- "PerPkg": "1",
- "PublicDescription": "BL Egress (to CMS) Occupancy; All",
- "UMask": "0x03",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit.",
- "Counter": "0,1,2",
- "EventCode": "0x29",
- "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
- "PerPkg": "1",
- "PublicDescription": "Count cases where flow control queue that sits between the Intel Ultra Path Interconnect (UPI) and the mesh spawns a prefetch to the iMC (Memory Controller)",
- "Unit": "M3UPI"
- },
- {
- "BriefDescription": "Clocks of the Intel Ultra Path Interconnect (UPI)",
- "Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_UPI_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Counts clockticks of the fixed frequency clock controlling the Intel Ultra Path Interconnect (UPI). This clock runs at1/8th the 'GT/s' speed of the UPI link. For example, a 9.6GT/s link will have a fixed Frequency of 1.2 Ghz.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Data Response packets that go direct to core",
- "Counter": "0,1,2,3",
- "EventCode": "0x12",
- "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
- "PerPkg": "1",
- "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to core bypassing the CHA.",
- "UMask": "0x1",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Data Response packets that go direct to Intel UPI",
- "Counter": "0,1,2,3",
- "EventCode": "0x12",
- "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
- "PerPkg": "1",
- "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to Intel Ultra Path Interconnect (UPI) bypassing the CHA .",
- "UMask": "0x2",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Cycles Intel UPI is in L1 power mode (shutdown)",
- "Counter": "0,1,2,3",
- "EventCode": "0x21",
- "EventName": "UNC_UPI_L1_POWER_CYCLES",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when the Intel Ultra Path Interconnect (UPI) is in L1 power mode. L1 is a mode that totally shuts down the UPI link. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another, this event only coutns when both links are shutdown.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Cycles the Rx of the Intel UPI is in L0p power mode",
- "Counter": "0,1,2,3",
- "EventCode": "0x25",
- "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when the the receive side (Rx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
- "Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
- "PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x1",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
- "Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
- "PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x2",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "FLITs received which bypassed the Slot0 Recieve Buffer",
- "Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
- "PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) whcih bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transfered, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x4",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Valid data FLITs received from any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
- "PerPkg": "1",
- "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
- "UMask": "0x0F",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Null FLITs received from any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
- "PerPkg": "1",
- "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
- "UMask": "0x27",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Protocol header and credit FLITs received from any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
- "PerPkg": "1",
- "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) received from any of the 3 UPI slots on this UPI unit.",
- "UMask": "0x97",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Cycles in which the Tx of the Intel Ultra Path Interconnect (UPI) is in L0p power mode",
- "Counter": "0,1,2,3",
- "EventCode": "0x27",
- "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when the transmit side (Tx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "FLITs that bypassed the TxL Buffer",
- "Counter": "0,1,2,3",
- "EventCode": "0x41",
- "EventName": "UNC_UPI_TxL_BYPASSED",
- "PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the TxL(transmit) FLIT buffer and pass directly out the UPI Link. Generally, when data is transmitted across the Intel Ultra Path Interconnect (UPI), it will bypass the TxQ and pass directly to the link. However, the TxQ will be used in L0p (Low Power) mode and (Link Layer Retry) LLR mode, increasing latency to transfer out to the link.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Null FLITs transmitted from any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
- "PerPkg": "1",
- "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) transmitted via any of the 3 Intel Ulra Path Interconnect (UPI) slots on this UPI unit.",
- "UMask": "0x27",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Valid Flits Sent; Data",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.DATA",
- "PerPkg": "1",
- "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
- "UMask": "0x8",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Idle FLITs transmitted",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.IDLE",
- "PerPkg": "1",
- "PublicDescription": "Counts when the Intel Ultra Path Interconnect(UPI) transmits an idle FLIT(80 bit FLow control unITs). Every UPI cycle must be sending either data FLITs, protocol/credit FLITs or idle FLITs.",
- "UMask": "0x47",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
- "PerPkg": "1",
- "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) transmitted across any of the 3 UPI (Ultra Path Interconnect) slots on this UPI unit.",
- "UMask": "0x97",
- "Unit": "UPI LL"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json
new file mode 100644
index 000000000000..c6254af7a468
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/uncore-power.json
@@ -0,0 +1,196 @@
+[
+ {
+ "BriefDescription": "pclk Cycles",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent in phase-shedding power state 0",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent in phase-shedding power state 1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent in phase-shedding power state 2",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent in phase-shedding power state 3",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "EventCode": "0x4",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "EventCode": "0x5",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_MCP_PROCHOT_CYCLES",
+ "EventCode": "0x6",
+ "EventName": "UNC_P_MCP_PROCHOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
index d13b4111eb52..f59405877ae8 100644
--- a/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/cascadelakex/virtual-memory.json
@@ -1,38 +1,6 @@
[
{
- "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x49",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "100003",
- "UMask": "0x4"
- },
- {
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x85",
- "EventName": "ITLB_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
- "SampleAfterValue": "100003",
- "UMask": "0x10"
- },
- {
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x85",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
- "SampleAfterValue": "100003",
- "UMask": "0xe"
- },
- {
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
@@ -40,19 +8,15 @@
"UMask": "0x1"
},
{
- "BriefDescription": "STLB flush attempts",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xBD",
- "EventName": "TLB_FLUSH.STLB_ANY",
- "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
- "SampleAfterValue": "100007",
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "2000003",
"UMask": "0x20"
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
@@ -61,69 +25,39 @@
"UMask": "0x10"
},
{
- "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x49",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "100003",
- "UMask": "0x8"
- },
- {
- "BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x49",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
- "PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "UMask": "0x1"
+ "UMask": "0xe"
},
{
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0xAE",
- "EventName": "ITLB.ITLB_FLUSH",
- "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "SampleAfterValue": "100007",
- "UMask": "0x1"
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x49",
- "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "UMask": "0x10"
+ "UMask": "0x4"
},
{
"BriefDescription": "Page walk completed due to a demand data load to a 4K page",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
"UMask": "0x2"
},
{
- "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x08",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "2000003",
- "UMask": "0x4"
- },
- {
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
"PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
@@ -131,19 +65,23 @@
"UMask": "0x10"
},
{
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x4F",
- "EventName": "EPT.WALK_PENDING",
- "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
- "SampleAfterValue": "2000003",
- "UMask": "0x10"
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
"BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"CounterMask": "1",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
@@ -152,133 +90,139 @@
"UMask": "0x10"
},
{
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x85",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
- "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "UMask": "0x1"
+ "UMask": "0xe"
},
{
- "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
"EventCode": "0x49",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "UMask": "0x20"
+ "UMask": "0x8"
},
{
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x08",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "UMask": "0xe"
+ "UMask": "0x4"
},
{
"BriefDescription": "Page walk completed due to a demand data store to a 4K page",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"UMask": "0x2"
},
{
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x85",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
- "SampleAfterValue": "100003",
- "UMask": "0x2"
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.WALK_PENDING",
+ "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
"EventCode": "0x85",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
"SampleAfterValue": "100003",
- "UMask": "0x8"
+ "UMask": "0x1"
},
{
"BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
"UMask": "0x20"
},
{
- "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x08",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "2000003",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
"UMask": "0x8"
},
{
"BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
"UMask": "0x4"
},
{
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x49",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "UMask": "0xe"
+ "UMask": "0x2"
},
{
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "CounterMask": "1",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"EventCode": "0x85",
- "EventName": "ITLB_MISSES.WALK_ACTIVE",
- "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
"SampleAfterValue": "100003",
"UMask": "0x10"
},
{
- "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "EventCode": "0x08",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
- "SampleAfterValue": "2000003",
- "UMask": "0x20"
- },
- {
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "Counter": "0,1,2,3",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
"EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
"UMask": "0x1"
+ },
+ {
+ "BriefDescription": "STLB flush attempts",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json b/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
new file mode 100644
index 000000000000..0ab90e3bf76b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/cache.json
@@ -0,0 +1,886 @@
+[
+ {
+ "BriefDescription": "Counts the number of core requests (demand and L1 prefetchers) rejected by the L2 queue (L2Q) due to a full condition.",
+ "EventCode": "0x31",
+ "EventName": "CORE_REJECT_L2Q.ANY",
+ "PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2 queue (L2Q) due to a full or nearly full condition, which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the External Queue (XQ), but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a cores dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event). Counts on a per core basis.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
+ "EventCode": "0x51",
+ "EventName": "DL1.DIRTY_EVICTION",
+ "PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition.",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_XQ.ANY",
+ "PublicDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBL (L2 misses) and WOB (L2 write-back victims).",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the total number of L2 Cache accesses. Counts on a per core basis.",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts the total number of L2 Cache Accesses, includes hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only. Counts on a per core basis.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache accesses that resulted in a hit. Counts on a per core basis.",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "PublicDescription": "Counts the number of L2 Cache accesses that resulted in a hit from a front door request only (does not include rejects or recycles), Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache accesses that resulted in a miss. Counts on a per core basis.",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PublicDescription": "Counts the number of L2 Cache accesses that resulted in a miss from a front door request only (does not include rejects or recycles). Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache accesses that miss the L2 and get rejected. Counts on a per core basis.",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.REJECTS",
+ "PublicDescription": "Counts the number of L2 Cache accesses that miss the L2 and get BBL reject short and long rejects (includes those counted in L2_reject_XQ.any). Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH",
+ "SampleAfterValue": "200003",
+ "UMask": "0x38"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the LLC or other core with HITE/F/M.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the LLC or other core with HITE/F/M.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a store buffer being full.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.STORE_BUFFER_FULL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L3 cache, in which a snoop was required and modified data was forwarded from another core or module.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L1 data cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that miss in the L1 data cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L2 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that miss in the L2 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST)",
+ "SampleAfterValue": "200003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of load uops retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Counts the number of store uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of store uops retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that performed one or more locks.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x43"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split load uops.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split store uops.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3001F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x801F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2001F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x401F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x101F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x201F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to instruction cache misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ICACHE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/ehl-metrics.json b/tools/perf/pmu-events/arch/x86/elkhartlake/ehl-metrics.json
new file mode 100644
index 000000000000..b6f7126be1fd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/ehl-metrics.json
@@ -0,0 +1,57 @@
+[
+ {
+ "MetricExpr": "INST_RETIRED.ANY / cycles",
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricName": "IPC"
+ },
+ {
+ "MetricExpr": "1 / IPC",
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricName": "CPI"
+ },
+ {
+ "MetricExpr": "cycles",
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricName": "CLKS"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "MetricName": "IpMispredict"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricName": "IpBranch"
+ },
+ {
+ "MetricExpr": "INST_RETIRED.ANY",
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricName": "Instructions"
+ },
+ {
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 ",
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricName": "L3_Cache_Fill_BW"
+ },
+ {
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
+ "BriefDescription": "Average CPU Utilization",
+ "MetricName": "CPU_Utilization"
+ },
+ {
+ "MetricExpr": "(cycles / CPU_CLK_UNHALTED.REF_TSC) * msr@tsc@ / 1000000000 ",
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricName": "Average_Frequency"
+ },
+ {
+ "MetricExpr": "cycles / CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricName": "Turbo_Utilization"
+ },
+ {
+ "MetricExpr": "cycles:k / cycles",
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricName": "Kernel_Utilization"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json b/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
new file mode 100644
index 000000000000..88522244b760
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/floating-point.json
@@ -0,0 +1,26 @@
+[
+ {
+ "BriefDescription": "Counts the number of cycles the floating point divider is busy.",
+ "EventCode": "0xcd",
+ "EventName": "CYCLES_DIV_BUSY.FPDIV",
+ "PublicDescription": "Counts the number of cycles the floating point divider is busy. Does not imply a stall waiting for the divider.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json b/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json
new file mode 100644
index 000000000000..5ba998e06592
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/frontend.json
@@ -0,0 +1,69 @@
+[
+ {
+ "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a conditional jump.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.COND",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to an indirect branch.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.INDIRECT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a return branch.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a direct, unconditional jump.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.UNCOND",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of times a decode restriction reduces the decode throughput due to wrong instruction length prediction.",
+ "EventCode": "0xe9",
+ "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of requests to the instruction cache for one or more bytes of a cache line.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction cache hits.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "Counts the number of requests that hit in the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction cache misses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json b/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
new file mode 100644
index 000000000000..18621909d1a9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/memory.json
@@ -0,0 +1,358 @@
+[
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "20003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of misaligned load uops that are 4K page splits.",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of misaligned store uops that are 4K page splits.",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x802184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x802184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.OTHER.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x402184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x402184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all hardware and software prefetches that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PREFETCHES.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000470",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x102184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x102184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x202184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x202184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/other.json b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
new file mode 100644
index 000000000000..00ae180ded25
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/other.json
@@ -0,0 +1,532 @@
+[
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.SELF_LOCKS",
+ "EdgeDetect": "1",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.ALL",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock issued by other cores.",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.BLOCK_CYCLES",
+ "PublicDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock issued by other cores. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.BLOCK_CYCLES",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.CYCLES_OTHER_BLOCK",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.LOCK_CYCLES",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.CYCLES_SELF_BLOCK",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock it issued.",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.LOCK_CYCLES",
+ "PublicDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock it issued. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of bus locks a core issued its self (e.g. lock to UC or Split Lock) and does not include cache locks.",
+ "EdgeDetect": "1",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.SELF_LOCKS",
+ "PublicDescription": "Counts the number of bus locks a core issued its self (e.g. lock to UC or Split Lock) and does not include cache locks. Counts on a per core basis.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_DRAM_HIT",
+ "EventCode": "0x34",
+ "EventName": "C0_STALLS.LOAD_DRAM_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_L2_HIT",
+ "EventCode": "0x34",
+ "EventName": "C0_STALLS.LOAD_L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_LLC_HIT",
+ "EventCode": "0x34",
+ "EventName": "C0_STALLS.LOAD_LLC_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of core cycles during which interrupts are masked (disabled).",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.MASKED",
+ "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled).",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
+ "PublicDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled). Increments by 1 each core cycle that both EFLAGS.IF is 0 and an INTR is pending (which means the APIC is telling the ROB to cause an INTR). This event does not increment if EFLAGS.IF is 0 but all interrupt in the APICs Interrupt Request Register (IRR) are inhibited by the PPR (thus either by ISRV or TPR) because in these cases the interrupts would be held up in the APIC and would not be pended to the ROB. This event does count when an interrupt is only inhibited by MOV/POP SS state machines or the STI state machine. These extra inhibits only last for a single instructions and would not be important.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of hardware interrupts received by the processor.",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "SampleAfterValue": "203",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3000000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8003000000000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all hardware and software prefetches that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PREFETCHES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10470",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000100000000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
new file mode 100644
index 000000000000..9dd8c909facc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/pipeline.json
@@ -0,0 +1,450 @@
+[
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf9"
+ },
+ {
+ "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xbf"
+ },
+ {
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
+ },
+ {
+ "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfd"
+ },
+ {
+ "BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7"
+ },
+ {
+ "BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
+ },
+ {
+ "BriefDescription": "Counts the total number of BTCLEARS.",
+ "EventCode": "0xe8",
+ "EventName": "BTCLEAR.ANY",
+ "PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "EventCode": "0xcd",
+ "EventName": "CYCLES_DIV_BUSY.ANY",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the integer divider is busy.",
+ "EventCode": "0xcd",
+ "EventName": "CYCLES_DIV_BUSY.IDIV",
+ "PublicDescription": "Counts the number of cycles the integer divider is busy. Does not imply a stall waiting for the divider.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number of instructions retired.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked for any of the following reasons: DTLB miss, address alias, store forward or data unknown (includes memory disambiguation blocks and ESP consuming load blocks).",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of machine clears for any reason including, but not limited to, memory ordering, memory disambiguation, SMC, and FP assist.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.ANY",
+ "SampleAfterValue": "20003"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "SampleAfterValue": "20003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.PAGE_FAULT",
+ "SampleAfterValue": "20003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to fast nukes such as memory ordering and memory disambiguation machine clears.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to branch mispredicts.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MONUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to backend stalls.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to certain allocation restrictions.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REGISTER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.STORE_BUFFER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.CISC",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stalls.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ITLB misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ITLB",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to wrong predecodes.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the total number of consumed retirement slots.",
+ "EventCode": "0xc2",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the total number of uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of integer divide uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are from complex flows issued by the micro-sequencer (MS).",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops retired, includes those in MS flows.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.X87",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
new file mode 100644
index 000000000000..cabe29e70e79
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/elkhartlake/virtual-memory.json
@@ -0,0 +1,247 @@
+[
+ {
+ "BriefDescription": "Counts the number of page walks due to loads that miss the PDE (Page Directory Entry) cache.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Account for all page sizes. Will result in a DTLB write from STLB.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1GB pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks due to stores that miss the PDE (Page Directory Entry) cache.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Account for all pages sizes. Will result in a DTLB write from STLB.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of Extended Page Directory Entry hits.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.EPDE_HIT",
+ "PublicDescription": "Counts the number of Extended Page Directory Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of Extended Page Directory Entry misses.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.EPDE_MISS",
+ "PublicDescription": "Counts the number Extended Page Directory Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of Extended Page Directory Pointer Entry hits.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.EPDPE_HIT",
+ "PublicDescription": "Counts the number Extended Page Directory Pointer Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of Extended Page Directory Pointer Entry misses.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.EPDPE_MISS",
+ "PublicDescription": "Counts the number Extended Page Directory Pointer Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of times there was an ITLB miss and a new translation was filled into the ITLB.",
+ "EventCode": "0x81",
+ "EventName": "ITLB.FILLS",
+ "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) and a new translation was filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks due to an instruction fetch that miss the PDE (Page Directory Entry) cache.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 1G page.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked due to a first level TLB miss.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DTLB_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x13"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x12"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/cache.json b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
index 52a105666afc..ee47a09172a1 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/cache.json
@@ -1,1305 +1,1015 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
+ "BriefDescription": "Requests rejected by the L2Q",
+ "EventCode": "0x31",
+ "EventName": "CORE_REJECT_L2Q.ALL",
+ "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "L1 Cache evictions for dirty data",
+ "EventCode": "0x51",
+ "EventName": "DL1.DIRTY_EVICTION",
+ "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache request misses"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss.",
+ "EventCode": "0x86",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
+ "BriefDescription": "Requests rejected by the XQ",
"EventCode": "0x30",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
"EventName": "L2_REJECT_XQ.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests rejected by the XQ"
+ "PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
+ "SampleAfterValue": "200003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
- "EventCode": "0x31",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CORE_REJECT_L2Q.ALL",
+ "BriefDescription": "L2 cache request misses",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests rejected by the L2Q"
+ "UMask": "0x41"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DL1.DIRTY_EVICTION",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L1 Cache evictions for dirty data"
+ "UMask": "0x4f"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
- "EventCode": "0x86",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "PEBS": "2",
+ "PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
- },
- {
- "CollectPEBSRecord": "1",
- "EventCode": "0xB7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100007",
- "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Locked load uops retired (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "Counts load uops retired that hit the L1 data cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x43",
- "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of load uops retired.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PublicDescription": "Counts load uops retired that miss in the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of store uops retired.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x40"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
+ "BriefDescription": "Memory uops retired (Precise event capable)",
+ "Data_LA": "1",
"EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x83",
"EventName": "MEM_UOPS_RETIRED.ALL",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x83"
},
{
+ "BriefDescription": "Load uops retired (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that hit the L1 data cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PublicDescription": "Counts the number of load uops retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Store uops retired (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PublicDescription": "Counts the number of store uops retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Locked load uops retired (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x21"
},
{
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that miss in the L2 cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x43"
},
{
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
+ "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
+ "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x42"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
- "Data_LA": "1"
+ "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x36000032b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000043091",
+ "PublicDescription": "Counts data reads (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x10000032b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600003091",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x04000032b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000003091",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x02000032b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400003091",
+ "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x00000432b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200003091",
+ "PublicDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000043010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600003010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000003010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400003010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200003010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00000432b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x36000032b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000032b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x04000032b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0000043091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x02000032b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x3600003010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000018000",
+ "PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000003010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000048000",
+ "PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400003010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000008000",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200003010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400008000",
+ "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0000043010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200008000",
+ "PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000040022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0400008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600000022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0000048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0000018000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x3600004800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010400",
+ "PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000044800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x0000040008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600004000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.ANY",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x3600000008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x1000004000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x1000000008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400004000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x0400000008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0200004000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x0200000008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000044000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000040004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600002000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600000004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x1000002000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0400002000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x0200002000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000042000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000040001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600001000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600000001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x1000001000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400001000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0200001000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x0000041000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000040002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests that have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000040800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600000800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600000080",
+ "PublicDescription": "Counts demand data partial reads, including data in uncacheable (UC) or uncacheable write combining (USWC) memory types that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000044000",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600004000",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000004000",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400004000",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200004000",
+ "PublicDescription": "Counts partial cache line data writes to uncacheable write combining (USWC) memory region that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600000100",
+ "PublicDescription": "Counts the number of demand write requests (RFO) generated by a write to partial data cache line, including the writes to uncacheable (UC) and write through (WT), and write protected (WP) types of memory that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000042000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600002000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000002000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400002000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200002000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000040010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600000010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000040020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600000020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000044800",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600004800",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x3600000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000041000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3600001000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000001000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400001000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that miss the L2 cache with a snoop hit in the other processor module, no data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200001000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions that true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines that hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json b/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json
new file mode 100644
index 000000000000..a3f03855ca05
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmont/floating-point.json
@@ -0,0 +1,27 @@
+[
+ {
+ "BriefDescription": "Cycles the FP divide unit is busy",
+ "EventCode": "0xCD",
+ "EventName": "CYCLES_DIV_BUSY.FPDIV",
+ "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Machine clears due to FP assists",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Floating point divide uops retired. (Precise Event Capable)",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of floating point divide uops retired.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/frontend.json b/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
index 9ba08518649e..ace2a114b546 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/frontend.json
@@ -1,52 +1,66 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
+ "BriefDescription": "BACLEARs asserted for any branch type",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ALL",
+ "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
+ "BriefDescription": "BACLEARs asserted for conditional branch",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.COND",
+ "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture"
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "BriefDescription": "BACLEARs asserted for return branch",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.RETURN",
+ "PublicDescription": "Counts BACLEARS on return instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Decode restrictions due to predicting wrong instruction length",
+ "EventCode": "0xE9",
+ "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"EventName": "ICACHE.ACCESSES",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
"SampleAfterValue": "200003",
- "BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture"
+ "UMask": "0x3"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
- "EventCode": "0xE7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MS_DECODED.MS_ENTRY",
+ "BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
"SampleAfterValue": "200003",
- "BriefDescription": "MS decode starts"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
- "EventCode": "0xE9",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "MS decode starts",
+ "EventCode": "0xE7",
+ "EventName": "MS_DECODED.MS_ENTRY",
+ "PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
"SampleAfterValue": "200003",
- "BriefDescription": "Decode restrictions due to predicting wrong instruction length"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/memory.json b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
index 197dc76d49dd..b97642a109ee 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/memory.json
@@ -1,34 +1,28 @@
[
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
- "EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "BriefDescription": "Machine clears due to memory ordering issue",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved as another core is in the process of modifying the data.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops that split a page (Precise event capable)"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
+ "BriefDescription": "Load uops that split a page (Precise event capable)",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "PEBS": "2",
+ "PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops that split a page (Precise event capable)"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved as another core is in the process of modifying the data.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "BriefDescription": "Store uops that split a page (Precise event capable)",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "PEBS": "2",
+ "PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Machine clears due to memory ordering issue"
+ "UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/other.json b/tools/perf/pmu-events/arch/x86/goldmont/other.json
index 959cadd7cb0e..c4fd0acb15bc 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/other.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/other.json
@@ -1,82 +1,41 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "BriefDescription": "Cycles code-fetch stalled due to any reason.",
"EventCode": "0x86",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
"EventName": "FETCH_STALL.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles code-fetch stalled due to any reason."
+ "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "SampleAfterValue": "200003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss.",
"EventCode": "0x86",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss."
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
- "SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "BriefDescription": "Cycles hardware interrupts are masked",
+ "EventCode": "0xCB",
+ "EventName": "HW_INTERRUPTS.MASKED",
+ "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "BriefDescription": "Cycles pending interrupts are masked",
+ "EventCode": "0xCB",
+ "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
+ "PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle to recover"
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts hardware interrupts received by the processor.",
+ "BriefDescription": "Hardware interrupts received",
"EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "HW_INTERRUPTS.RECEIVED",
+ "PublicDescription": "Counts hardware interrupts received by the processor.",
"SampleAfterValue": "203",
- "BriefDescription": "Hardware interrupts received"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "HW_INTERRUPTS.MASKED",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles hardware interrupts are masked"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles pending interrupts are masked"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
index 6342368accf8..acb897483a87 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/pipeline.json
@@ -1,452 +1,334 @@
[
{
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
- "Counter": "Fixed counter 0",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Fixed event)"
- },
- {
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted (Fixed event)"
- },
- {
- "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
- "Counter": "Fixed counter 2",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
+ "BriefDescription": "Retired branch instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "2",
+ "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
+ "SampleAfterValue": "200003"
},
{
+ "BriefDescription": "Retired taken branch instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "PublicDescription": "Counts the number of taken branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Retired near call instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CALL",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts near CALL branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
+ "UMask": "0xf9"
},
{
+ "BriefDescription": "Retired far branch instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "LD_BLOCKS.4K_ALIAS",
+ "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
+ "UMask": "0xbf"
},
{
+ "BriefDescription": "Retired near indirect call instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.UTLB_MISS",
+ "PublicDescription": "Counts near indirect CALL branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
+ "UMask": "0xfb"
},
{
+ "BriefDescription": "Retired conditional branch instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.JCC",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked (Precise event capable)"
+ "UMask": "0x7e"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "UOPS_ISSUED.ANY",
+ "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "PEBS": "2",
+ "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops issued to the back end per cycle"
+ "UMask": "0xeb"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.CORE_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Reference cycles when core is not halted. This event uses a programmable general purpose performance counter.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted"
+ "BriefDescription": "Retired near relative call instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "PEBS": "2",
+ "PublicDescription": "Counts near relative CALL branch instructions retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfd"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "UOPS_NOT_DELIVERED.ANY",
+ "BriefDescription": "Retired near return instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "PEBS": "2",
+ "PublicDescription": "Counts near return branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
+ "UMask": "0xf7"
},
{
+ "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Precise event capable)"
+ "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
},
{
+ "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts uops which retired.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "UOPS_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops retired (Precise event capable)"
+ "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
+ "SampleAfterValue": "200003"
},
{
+ "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.MS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "MS uops retired (Precise event capable)"
+ "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
},
{
+ "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of floating point divide uops retired.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_RETIRED.FPDIV",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Floating point divide uops retired. (Precise Event Capable)"
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
},
{
+ "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of integer divide uops retired.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_RETIRED.IDIV",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Integer divide uops retired. (Precise Event Capable)"
+ "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears for any reason.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "MACHINE_CLEARS.ALL",
+ "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "PEBS": "2",
+ "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
"SampleAfterValue": "200003",
- "BriefDescription": "All machine clears"
+ "UMask": "0xf7"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MACHINE_CLEARS.SMC",
+ "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "PEBS": "2",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
"SampleAfterValue": "200003",
- "BriefDescription": "Self-Modifying Code detected"
+ "UMask": "0xfe"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.FP_ASSIST",
- "SampleAfterValue": "200003",
- "BriefDescription": "Machine clears due to FP assists"
+ "BriefDescription": "Core cycles when core is not halted (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
- "SampleAfterValue": "200003",
- "BriefDescription": "Machine clears due to memory disambiguation"
+ "BriefDescription": "Core cycles when core is not halted",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "SampleAfterValue": "2000003"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired branch instructions (Precise event capable)"
+ "BriefDescription": "Reference cycles when core is not halted",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "PublicDescription": "Reference cycles when core is not halted. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "EventName": "BR_INST_RETIRED.JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
+ "BriefDescription": "Reference cycles when core is not halted (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired taken branch instructions (Precise event capable)"
+ "BriefDescription": "Cycles a divider is busy",
+ "EventCode": "0xCD",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "PublicDescription": "Counts core cycles if either divide unit is busy.",
+ "SampleAfterValue": "2000003"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xbf",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "BriefDescription": "Cycles the integer divide unit is busy",
+ "EventCode": "0xCD",
+ "EventName": "CYCLES_DIV_BUSY.IDIV",
+ "PublicDescription": "Counts core cycles the integer divide unit is busy.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired far branch instructions (Precise event capable)"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xeb",
- "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
+ "BriefDescription": "Instructions retired (Fixed event)",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Instructions retired (Precise event capable)",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near return branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xf7",
- "EventName": "BR_INST_RETIRED.RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired near return instructions (Precise event capable)"
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
+ "SampleAfterValue": "2000003"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near CALL branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xf9",
- "EventName": "BR_INST_RETIRED.CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired near call instructions (Precise event capable)"
+ "BriefDescription": "Unfilled issue slots per cycle",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
+ "SampleAfterValue": "200003"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect CALL branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xfb",
- "EventName": "BR_INST_RETIRED.IND_CALL",
+ "BriefDescription": "Unfilled issue slots per cycle to recover",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near relative CALL branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xfd",
- "EventName": "BR_INST_RETIRED.REL_CALL",
+ "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near relative call instructions (Precise event capable)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xfe",
- "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Loads blocked (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "EventName": "BR_MISP_RETIRED.JCC",
+ "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0xeb",
- "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call. (Precise event capable)"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.UTLB_MISS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0xf7",
- "EventName": "BR_MISP_RETIRED.RETURN",
+ "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
+ "UMask": "0x8"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0xfb",
- "EventName": "BR_MISP_RETIRED.IND_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
+ "BriefDescription": "All machine clears",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.ALL",
+ "PublicDescription": "Counts machine clears for any reason.",
+ "SampleAfterValue": "200003"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0xfe",
- "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "BriefDescription": "Machine clears due to memory disambiguation",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles if either divide unit is busy.",
- "EventCode": "0xCD",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CYCLES_DIV_BUSY.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a divider is busy"
+ "BriefDescription": "Self-Modifying Code detected",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel(R) architecture processors.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the integer divide unit is busy.",
- "EventCode": "0xCD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLES_DIV_BUSY.IDIV",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles the integer divide unit is busy"
+ "BriefDescription": "Uops issued to the back end per cycle",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
+ "SampleAfterValue": "200003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
- "EventCode": "0xCD",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLES_DIV_BUSY.FPDIV",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles the FP divide unit is busy"
+ "BriefDescription": "Uops requested but not-delivered to the back-end per cycle",
+ "EventCode": "0x9C",
+ "EventName": "UOPS_NOT_DELIVERED.ANY",
+ "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
+ "SampleAfterValue": "200003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BACLEARS.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for any branch type"
+ "BriefDescription": "Uops retired (Precise event capable)",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ANY",
+ "PEBS": "2",
+ "PublicDescription": "Counts uops which retired.",
+ "SampleAfterValue": "2000003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on return instructions.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BACLEARS.RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for return branch"
+ "BriefDescription": "Integer divide uops retired. (Precise Event Capable)",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of integer divide uops retired.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BACLEARS.COND",
- "SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for conditional branch"
+ "BriefDescription": "MS uops retired (Precise event capable)",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.MS",
+ "PEBS": "2",
+ "PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
index 343d66bbd777..8c4929a517fa 100644
--- a/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmont/virtual-memory.json
@@ -1,78 +1,64 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "BriefDescription": "ITLB misses",
+ "EventCode": "0x81",
+ "EventName": "ITLB.MISS",
+ "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
"SampleAfterValue": "200003",
- "BriefDescription": "Duration of D-side page-walks in cycles"
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
+ "PEBS": "2",
+ "PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
"SampleAfterValue": "200003",
- "BriefDescription": "Duration of I-side pagewalks in cycles"
+ "UMask": "0x13"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.CYCLES",
+ "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "PEBS": "2",
+ "PublicDescription": "Counts load uops retired that caused a DTLB miss.",
"SampleAfterValue": "200003",
- "BriefDescription": "Duration of page-walks in cycles"
+ "UMask": "0x11"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
- "EventCode": "0x81",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB.MISS",
+ "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
+ "PEBS": "2",
+ "PublicDescription": "Counts store uops retired that caused a DTLB miss.",
"SampleAfterValue": "200003",
- "BriefDescription": "ITLB misses"
+ "UMask": "0x12"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that caused a DTLB miss.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "BriefDescription": "Duration of page-walks in cycles",
+ "EventCode": "0x05",
+ "EventName": "PAGE_WALKS.CYCLES",
+ "PublicDescription": "Counts every core cycle a page-walk is in progress due to either a data memory operation or an instruction fetch.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x3"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts store uops retired that caused a DTLB miss.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
+ "BriefDescription": "Duration of D-side page-walks in cycles",
+ "EventCode": "0x05",
+ "EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "PublicDescription": "Counts every core cycle when a Data-side (walks due to a data operation) page walk is in progress.",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x13",
- "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
+ "BriefDescription": "Duration of I-side pagewalks in cycles",
+ "EventCode": "0x05",
+ "EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "PublicDescription": "Counts every core cycle when a Instruction-side (walks due to an instruction fetch) page walk is in progress.",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
index 5a6ac8285ad4..a7f80fd1b1df 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/cache.json
@@ -1,1467 +1,995 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "PEBScounters": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "Requests rejected by the L2Q",
+ "EventCode": "0x31",
+ "EventName": "CORE_REJECT_L2Q.ALL",
+ "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "L1 Cache evictions for dirty data",
+ "EventCode": "0x51",
+ "EventName": "DL1.REPLACEMENT",
+ "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache request misses"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "PEBScounters": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss.",
+ "EventCode": "0x86",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache requests"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
+ "BriefDescription": "Requests rejected by the XQ",
"EventCode": "0x30",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
"EventName": "L2_REJECT_XQ.ALL",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests rejected by the XQ"
+ "PublicDescription": "Counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the intra-die interconnect (IDI) fabric. The XQ may reject transactions from the L2Q (non-cacheable requests), L2 misses and L2 write-back victims.",
+ "SampleAfterValue": "200003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of demand and L1 prefetcher requests rejected by the L2Q due to a full or nearly full condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core's dirty eviction when the address conflicts with incoming external snoops.",
- "EventCode": "0x31",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "CORE_REJECT_L2Q.ALL",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "L2 cache request misses",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts memory requests originating from the core that miss in the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests rejected by the L2Q"
+ "UMask": "0x41"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts when a modified (dirty) cache line is evicted from the data L1 cache and needs to be written back to memory. No count will occur if the evicted line is clean, and hence does not require a writeback.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "DL1.REPLACEMENT",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "L2 cache requests",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts memory requests originating from the core that reference a cache line in the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L1 Cache evictions for dirty data"
+ "UMask": "0x4f"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.",
- "EventCode": "0x86",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "PEBS": "2",
+ "PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
- },
- {
- "CollectPEBSRecord": "1",
- "EventCode": "0xB7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "100007",
- "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Locked load uops retired (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "Counts load uops retired that hit the L1 data cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x43",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of load uops retired.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PublicDescription": "Counts load uops retired that miss in the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of store uops retired.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x40"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
+ "BriefDescription": "Memory uops retired (Precise event capable)",
+ "Data_LA": "1",
"EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x83",
- "PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.ALL",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of memory uops retired that is either a loads or a store or both.",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x83"
},
{
+ "BriefDescription": "Load uops retired (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that hit the L1 data cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PublicDescription": "Counts the number of load uops retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L1 data cache (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Store uops retired (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that hit in the L2 cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PublicDescription": "Counts the number of store uops retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that hit L2 (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Locked load uops retired (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that miss the L1 data cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PublicDescription": "Counts locked memory uops retired. This includes regular locks and bus locks. (To specifically count bus locks only, see the Offcore response event.) A locked access is one with a lock prefix, or an exchange to memory. See the SDM for a complete description of which memory load accesses are locks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L1 data cache (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x21"
},
{
+ "BriefDescription": "Memory uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that miss in the L2 cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PublicDescription": "Counts memory uops retired where the data requested spans a 64 byte cache line boundary.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed L2 (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x43"
},
{
+ "BriefDescription": "Load uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired where the cache line containing the data was in the modified state of another core or modules cache (HITM). More specifically, this means that when the load address was checked by other caching agents (typically another processor) in the system, one of those caching agents indicated that they had a dirty copy of the data. Loads that obtain a HITM response incur greater latency than most is typical for a load. In addition, since HITM indicates that some other processor had this data in its cache, it implies that the data was shared between processors, or potentially was a lock or semaphore value. This event is useful for locating sharing, false sharing, and contended locks.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
+ "PublicDescription": "Counts load uops retired where the data requested spans a 64 byte cache line boundary.",
"SampleAfterValue": "200003",
- "BriefDescription": "Memory uop retired where cross core or cross module HITM occurred (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Stores uops retired that split a cache-line (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory load uops retired where the data is retrieved from the WCB (or fill buffer), indicating that the load found its data while that data was in the process of being brought into the L1 cache. Typically a load will receive this indication when some other load or prefetch missed the L1 cache and was in the process of retrieving the cache line containing the data, but that process had not yet finished (and written the data back to the cache). For example, consider load X and Y, both referencing the same cache line that is not in the L1 cache. If load X misses cache first, it obtains and WCB (or fill buffer) and begins the process of requesting the data. When load Y requests the data, it will either hit the WCB, or the L1 cache, depending on exactly what time the request to Y occurs.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.WCB_HIT",
+ "PublicDescription": "Counts store uops retired where the data requested spans a 64 byte cache line boundary.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that hit WCB (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x42"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory load uops retired where the data is retrieved from DRAM. Event is counted at retirement, so the speculative loads are ignored. A memory load can hit (or miss) the L1 cache, hit (or miss) the L2 cache, hit DRAM, hit in the WCB or receive a HITM response.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Loads retired that came from DRAM (Precise event capable)",
- "Data_LA": "1"
+ "BriefDescription": "Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "EventCode": "0xB7",
+ "EventName": "OFFCORE_RESPONSE",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000013091",
+ "PublicDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000043091",
+ "PublicDescription": "Counts data reads (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000003091",
+ "PublicDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200003091",
+ "PublicDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000003091",
+ "PublicDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000013010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000043010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000003010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200003010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000003010",
+ "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x00000132b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x00000432b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x10000032b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x02000032b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x40000032b7",
+ "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000018000",
+ "PublicDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000048000",
+ "PublicDescription": "Counts requests to the uncore subsystem hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000008000",
+ "PublicDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200008000",
+ "PublicDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000008000",
+ "PublicDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000010022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000040022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000000022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200000022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000022",
+ "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000010400",
+ "PublicDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts bus lock and split lock requests hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000040400",
+ "PublicDescription": "Counts bus lock and split lock requests hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000000400",
+ "PublicDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200000400",
+ "PublicDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000400",
+ "PublicDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000010008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000040008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000000008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200000008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.COREWB.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000008",
+ "PublicDescription": "Counts the number of writeback transactions caused by L1 or L2 cache evictions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts bus lock and split lock requests outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000010004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000040004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000000004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200000004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000004",
+ "PublicDescription": "Counts demand instruction cacheline and I-side prefetch requests that miss the instruction cache outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000011000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000010001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000041000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000040001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200001000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000000001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000001000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200000001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000001000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000001",
+ "PublicDescription": "Counts demand cacheable data reads of full cache lines outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000012000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000010002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000042000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000040002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200002000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000002000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000002000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000002",
+ "PublicDescription": "Counts demand reads for ownership (RFO) requests generated by a write to full data cache line outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000014800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000010800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000044800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000040800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200004800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000000800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000004800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200000800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000004800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000800",
+ "PublicDescription": "Counts full cache line data writes to uncacheable write combining (USWC) memory region and full cache-line non-temporal writes outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000018000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000012000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000042000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000002000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200002000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000002000",
+ "PublicDescription": "Counts data cache line reads generated by hardware L1 data cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts requests to the uncore subsystem outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000013010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000010010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000043010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000040010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200003010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000000010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000003010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200000010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000003010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_DATA_RD.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000010",
+ "PublicDescription": "Counts data cacheline reads generated by hardware L2 cache prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads generated by L1 or L2 prefetchers outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000013091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000010020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000043091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000040020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000000020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200000020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000003091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000020",
+ "PublicDescription": "Counts reads for ownership (RFO) requests generated by L2 prefetcher outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data reads (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000014800",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0000040022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000044800",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000004800",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200004800",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000022",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000004800",
+ "PublicDescription": "Counts any data writes to uncacheable write combining (USWC) memory region outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts reads for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem.",
"EventCode": "0xB7",
- "MSRValue": "0x00000132b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000011000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions have any transaction responses from the uncore subsystem. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) have any transaction responses from the uncore subsystem.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x00000432b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_HIT",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0000041000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions hit the L2 cache. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) hit the L2 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
"EventCode": "0xB7",
- "MSRValue": "0x02000032b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x1000001000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) true miss for the L2 cache with a snoop miss in the other processor module.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module.",
"EventCode": "0xB7",
- "MSRValue": "0x10000032b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_MISS.HITM_OTHER_CORE",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.L2_MISS.SNOOP_MISS_OR_NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6, 0x1a7",
+ "MSRValue": "0x0200001000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions true miss for the L2 cache with a snoop miss in the other processor module. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) miss the L2 cache with a snoop hit in the other processor module, data forwarding is required.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
+ "BriefDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x40000032b7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
- "PDIR_COUNTER": "na",
+ "EventName": "OFFCORE_RESPONSE.SW_PREFETCH.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000001000",
+ "PublicDescription": "Counts data cache lines requests by software prefetch instructions outstanding, per cycle, from the time of the L2 miss to when any response is received. Requires MSR_OFFCORE_RESP[0,1] to specify request type and response. (duplicated for both MSRs)",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data read, code read, and read for ownership (RFO) requests (demand & prefetch) outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json b/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json
new file mode 100644
index 000000000000..822a7a6bcaeb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/floating-point.json
@@ -0,0 +1,27 @@
+[
+ {
+ "BriefDescription": "Cycles the FP divide unit is busy",
+ "EventCode": "0xCD",
+ "EventName": "CYCLES_DIV_BUSY.FPDIV",
+ "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Machine clears due to FP assists",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Floating point divide uops retired (Precise Event Capable)",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of floating point divide uops retired.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
index a7878965ceab..ace2a114b546 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/frontend.json
@@ -1,62 +1,66 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE.HIT",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "BACLEARs asserted for any branch type",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ALL",
+ "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE.MISSES",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "BACLEARs asserted for conditional branch",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.COND",
+ "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture"
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "BriefDescription": "BACLEARs asserted for return branch",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.RETURN",
+ "PublicDescription": "Counts BACLEARS on return instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Decode restrictions due to predicting wrong instruction length",
+ "EventCode": "0xE9",
+ "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "PEBScounters": "0,1,2,3",
"EventName": "ICACHE.ACCESSES",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.\r\nThis event counts differently than Intel processors based on Silvermont microarchitecture.",
"SampleAfterValue": "200003",
- "BriefDescription": "References per ICache line. This event counts differently than Intel processors based on Silvermont microarchitecture"
+ "UMask": "0x3"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
- "EventCode": "0xE7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "MS_DECODED.MS_ENTRY",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "References per ICache line that are available in the ICache (hit). This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is in the ICache (hit). The event strives to count on a cache line basis, so that multiple accesses which hit in a single cache line count as one ICACHE.HIT. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
"SampleAfterValue": "200003",
- "BriefDescription": "MS decode starts"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times the prediction (from the predecode cache) for instruction length is incorrect.",
- "EventCode": "0xE9",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "References per ICache line that are not available in the ICache (miss). This event counts differently than Intel processors based on Silvermont microarchitecture",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache. This event counts differently than Intel processors based on Silvermont microarchitecture.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "MS decode starts",
+ "EventCode": "0xE7",
+ "EventName": "MS_DECODED.MS_ENTRY",
+ "PublicDescription": "Counts the number of times the Microcode Sequencer (MS) starts a flow of uops from the MSROM. It does not count every time a uop is read from the MSROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort that initiates a flow of uops. The event will count MS startups for uops that are speculative, and subsequently cleared by branch mispredict or a machine clear.",
"SampleAfterValue": "200003",
- "BriefDescription": "Decode restrictions due to predicting wrong instruction length"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
index 91e0815f3ffb..7038873a5c8d 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/memory.json
@@ -1,38 +1,28 @@
[
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
+ "BriefDescription": "Machine clears due to memory ordering issue",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Load uops that split a page (Precise event capable)",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
"EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "PEBS": "2",
+ "PublicDescription": "Counts when a memory load of a uop spans a page boundary (a split) is retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops that split a page (Precise event capable)"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
+ "BriefDescription": "Store uops that split a page (Precise event capable)",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
"EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "PEBS": "2",
+ "PublicDescription": "Counts when a memory store of a uop spans a page boundary (a split) is retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops that split a page (Precise event capable)"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears due to memory ordering issues. This occurs when a snoop request happens and the machine is uncertain if memory ordering will be preserved - as another core is in the process of modifying the data.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "20003",
- "BriefDescription": "Machine clears due to memory ordering issue"
+ "UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
index b860374418ab..ec0ce9078c98 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/other.json
@@ -1,98 +1,41 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "BriefDescription": "Cycles code-fetch stalled due to any reason.",
"EventCode": "0x86",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
"EventName": "FETCH_STALL.ALL",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles code-fetch stalled due to any reason."
+ "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "SampleAfterValue": "200003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+ "BriefDescription": "Cycles the code-fetch stalls and an ITLB miss is outstanding.",
"EventCode": "0x86",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
"EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles the code-fetch stalls and an ITLB miss is outstanding."
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "Cycles hardware interrupts are masked",
+ "EventCode": "0xCB",
+ "EventName": "HW_INTERRUPTS.MASKED",
+ "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "Cycles pending interrupts are masked",
+ "EventCode": "0xCB",
+ "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
+ "PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
"SampleAfterValue": "200003",
- "BriefDescription": "Unfilled issue slots per cycle to recover"
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts hardware interrupts received by the processor.",
+ "BriefDescription": "Hardware interrupts received",
"EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
"EventName": "HW_INTERRUPTS.RECEIVED",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts hardware interrupts received by the processor.",
"SampleAfterValue": "203",
- "BriefDescription": "Hardware interrupts received"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "HW_INTERRUPTS.MASKED",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles hardware interrupts are masked"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts core cycles during which there are pending interrupts, but interrupts are masked (EFLAGS.IF = 0).",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles pending interrupts are masked"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
index e3fa1a0ba71b..33ef331e77e0 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/pipeline.json
@@ -1,541 +1,351 @@
[
{
+ "BriefDescription": "Retired branch instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
- "Counter": "Fixed counter 0",
- "UMask": "0x1",
- "PEBScounters": "32",
- "EventName": "INST_RETIRED.ANY",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Fixed event)"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "PEBScounters": "33",
- "EventName": "CPU_CLK_UNHALTED.CORE",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted (Fixed event)"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
- "Counter": "Fixed counter 2",
- "UMask": "0x3",
- "PEBScounters": "34",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted (Fixed event)"
+ "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
+ "SampleAfterValue": "200003"
},
{
+ "BriefDescription": "Retired taken branch instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "PublicDescription": "Counts the number of taken branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Retired near call instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CALL",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts near CALL branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)"
+ "UMask": "0xf9"
},
{
+ "BriefDescription": "Retired far branch instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.4K_ALIAS",
+ "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)"
+ "UMask": "0xbf"
},
{
+ "BriefDescription": "Retired near indirect call instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.UTLB_MISS",
+ "PublicDescription": "Counts near indirect CALL branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)"
+ "UMask": "0xfb"
},
{
+ "BriefDescription": "Retired conditional branch instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.JCC",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked (Precise event capable)"
+ "UMask": "0x7e"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "UOPS_ISSUED.ANY",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "PEBS": "2",
+ "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops issued to the back end per cycle"
+ "UMask": "0xeb"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.CORE_P",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted"
- },
- {
- "CollectPEBSRecord": "1",
- "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted"
+ "BriefDescription": "Retired near relative call instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "PEBS": "2",
+ "PublicDescription": "Counts near relative CALL branch instructions retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfd"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "UOPS_NOT_DELIVERED.ANY",
- "PDIR_COUNTER": "na",
+ "BriefDescription": "Retired near return instructions (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "PEBS": "2",
+ "PublicDescription": "Counts near return branch instructions retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops requested but not-delivered to the back-end per cycle"
+ "UMask": "0xf7"
},
{
+ "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired (Precise event capable)"
+ "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
},
{
+ "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts INST_RETIRED.ANY using the Reduced Skid PEBS feature that reduces the shadow in which events aren't counted allowing for a more unbiased distribution of samples across instructions retired.",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.PREC_DIST",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired - using Reduced Skid PEBS feature"
+ "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
+ "SampleAfterValue": "200003"
},
{
+ "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts uops which retired.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "UOPS_RETIRED.ANY",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops retired (Precise event capable)"
+ "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
},
{
+ "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.JCC",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "UOPS_RETIRED.MS",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "MS uops retired (Precise event capable)"
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
},
{
+ "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of floating point divide uops retired.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "UOPS_RETIRED.FPDIV",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Floating point divide uops retired (Precise Event Capable)"
+ "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
},
{
+ "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.RETURN",
"PEBS": "2",
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of integer divide uops retired.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "UOPS_RETIRED.IDIV",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Integer divide uops retired (Precise Event Capable)"
+ "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears for any reason.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.ALL",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "20003",
- "BriefDescription": "All machine clears"
+ "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "PEBS": "2",
+ "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel\u00ae architecture processors.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.SMC",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "20003",
- "BriefDescription": "Self-Modifying Code detected"
+ "BriefDescription": "Core cycles when core is not halted (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1. You cannot collect a PEBs record for this event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears due to floating point (FP) operations needing assists. For instance, if the result was a floating point denormal, the hardware clears the pipeline and reissues uops to produce the correct IEEE compliant denormal result.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.FP_ASSIST",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "20003",
- "BriefDescription": "Machine clears due to FP assists"
+ "BriefDescription": "Core cycles when core is not halted",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PublicDescription": "Core cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "SampleAfterValue": "2000003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "20003",
- "BriefDescription": "Machine clears due to memory disambiguation"
+ "BriefDescription": "Reference cycles when core is not halted",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "PublicDescription": "Reference cycles when core is not halted. This event uses a (_P)rogrammable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times that the machines clears due to a page fault. Covers both I-side and D-side(Loads/Stores) page faults. A page fault occurs when either page is not present, or an access violation",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.PAGE_FAULT",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "20003",
- "BriefDescription": "Machines clear due to a page fault"
+ "BriefDescription": "Reference cycles when core is not halted (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. This event uses fixed counter 2. You cannot collect a PEBs record for this event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts branch instructions retired for all branch types. This is an architectural performance event.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired branch instructions (Precise event capable)"
+ "BriefDescription": "Cycles a divider is busy",
+ "EventCode": "0xCD",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "PublicDescription": "Counts core cycles if either divide unit is busy.",
+ "SampleAfterValue": "2000003"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was taken and when it was not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.JCC",
+ "BriefDescription": "Cycles the integer divide unit is busy",
+ "EventCode": "0xCD",
+ "EventName": "CYCLES_DIV_BUSY.IDIV",
+ "PublicDescription": "Counts core cycles the integer divide unit is busy.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions (Precise event capable)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Instructions retired (Fixed event)",
+ "EventName": "INST_RETIRED.ANY",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired taken branch instructions (Precise event capable)"
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0. You cannot collect a PEBs record for this event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Instructions retired (Precise event capable)",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts far branch instructions retired. This includes far jump, far call and return, and Interrupt call and return.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xbf",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired far branch instructions (Precise event capable)"
+ "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a (_P)rogrammable general purpose performance counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event. Note: Because PEBS records can be collected only on IA32_PMC0, only one event can use the PEBS facility at a time.",
+ "SampleAfterValue": "2000003"
},
{
+ "BriefDescription": "Instructions retired - using Reduced Skid PEBS feature",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect call or near indirect jmp branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xeb",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired instructions of near indirect Jmp or call (Precise event capable)"
+ "PublicDescription": "Counts INST_RETIRED.ANY using the Reduced Skid PEBS feature that reduces the shadow in which events aren't counted allowing for a more unbiased distribution of samples across instructions retired.",
+ "SampleAfterValue": "2000003"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near return branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xf7",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired near return instructions (Precise event capable)"
+ "BriefDescription": "Unfilled issue slots per cycle",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.ANY",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend due to either a full resource in the backend (RESOURCE_FULL) or due to the processor recovering from some event (RECOVERY).",
+ "SampleAfterValue": "200003"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near CALL branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xf9",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.CALL",
+ "BriefDescription": "Unfilled issue slots per cycle to recover",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RECOVERY",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed by the backend because allocation is stalled waiting for a mispredicted jump to retire or other branch-like conditions (e.g. the event is relevant during certain microcode flows). Counts all issue slots blocked while within this window including slots where uops were not available in the Instruction Queue.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near call instructions (Precise event capable)"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near indirect CALL branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xfb",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.IND_CALL",
+ "BriefDescription": "Unfilled issue slots per cycle because of a full resource in the backend",
+ "EventCode": "0xCA",
+ "EventName": "ISSUE_SLOTS_NOT_CONSUMED.RESOURCE_FULL",
+ "PublicDescription": "Counts the number of issue slots per core cycle that were not consumed because of a full resource in the backend. Including but not limited to resources such as the Re-order Buffer (ROB), reservation stations (RS), load/store buffers, physical registers, or any other needed machine resource that is currently unavailable. Note that uops must be available for consumption in order for this event to fire. If a uop is not available (Instruction Queue is empty), this event will not count.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near indirect call instructions (Precise event capable)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads blocked because address has 4k partial address false dependence (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts near relative CALL branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xfd",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.REL_CALL",
+ "PublicDescription": "Counts loads that block because their address modulo 4K matches a pending store.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired near relative call instructions (Precise event capable)"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Loads blocked (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were taken and does not count when the Jcc branch instruction were not taken.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0xfe",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "PublicDescription": "Counts anytime a load that retires is blocked for any reason.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired conditional branch instructions that were taken (Precise event capable)"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Loads blocked due to store data not ready (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired including all branch types.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts a load blocked from using a store forward, but did not occur because the store data was not available at the right time. The forward might occur subsequently when the data is available.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted branch instructions (Precise event capable)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads blocked due to store forward restriction (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired, including both when the branch was supposed to be taken and when it was not supposed to be taken (but the processor predicted the opposite condition).",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x7e",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.JCC",
+ "PublicDescription": "Counts a load blocked from using a store forward because of an address/size mismatch, only one of the loads blocked from each store will be counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions (Precise event capable)"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Loads blocked because address in not in the UTLB (Precise event capable)",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.UTLB_MISS",
"PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired that were near indirect call or near indirect jmp, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0xeb",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "PublicDescription": "Counts loads blocked because they are unable to find their physical address in the micro TLB (UTLB).",
"SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted instructions of near indirect Jmp or near indirect call (Precise event capable)"
+ "UMask": "0x8"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near RET branch instructions retired, where the return address taken was not what the processor predicted.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0xf7",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near return instructions (Precise event capable)"
+ "BriefDescription": "All machine clears",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.ALL",
+ "PublicDescription": "Counts machine clears for any reason.",
+ "SampleAfterValue": "20003"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted near indirect CALL branch instructions retired, where the target address taken was not what the processor predicted.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0xfb",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.IND_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted near indirect call instructions (Precise event capable)"
+ "BriefDescription": "Machine clears due to memory disambiguation",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "PublicDescription": "Counts machine clears due to memory disambiguation. Memory disambiguation happens when a load which has been issued conflicts with a previous unretired store in the pipeline whose address was not known at issue time, but is later resolved to be the same as the load address.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x8"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted retired Jcc (Jump on Conditional Code/Jump if Condition is Met) branch instructions retired that were supposed to be taken but the processor predicted that it would not be taken.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0xfe",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Retired mispredicted conditional branch instructions that were taken (Precise event capable)"
+ "BriefDescription": "Machines clear due to a page fault",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.PAGE_FAULT",
+ "PublicDescription": "Counts the number of times that the machines clears due to a page fault. Covers both I-side and D-side(Loads/Stores) page faults. A page fault occurs when either page is not present, or an access violation",
+ "SampleAfterValue": "20003",
+ "UMask": "0x20"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles if either divide unit is busy.",
- "EventCode": "0xCD",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLES_DIV_BUSY.ALL",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a divider is busy"
+ "BriefDescription": "Self-Modifying Code detected",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts the number of times that the processor detects that a program is writing to a code section and has to perform a machine clear because of that modification. Self-modifying code (SMC) causes a severe penalty in all Intel(R) architecture processors.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the integer divide unit is busy.",
- "EventCode": "0xCD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLES_DIV_BUSY.IDIV",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles the integer divide unit is busy"
+ "BriefDescription": "Uops issued to the back end per cycle",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts uops issued by the front end and allocated into the back end of the machine. This event counts uops that retire as well as uops that were speculatively executed but didn't retire. The sort of speculative uops that might be counted includes, but is not limited to those uops issued in the shadow of a miss-predicted branch, those uops that are inserted during an assist (such as for a denormal floating point result), and (previously allocated) uops that might be canceled during a machine clear.",
+ "SampleAfterValue": "200003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts core cycles the floating point divide unit is busy.",
- "EventCode": "0xCD",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLES_DIV_BUSY.FPDIV",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles the FP divide unit is busy"
+ "BriefDescription": "Uops requested but not-delivered to the back-end per cycle",
+ "EventCode": "0x9C",
+ "EventName": "UOPS_NOT_DELIVERED.ANY",
+ "PublicDescription": "This event used to measure front-end inefficiencies. I.e. when front-end of the machine is not delivering uops to the back-end and the back-end has is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into uops in machine understandable format and putting them into a uop queue to be consumed by back end. The back-end then takes these uops, allocates the required resources. When all resources are ready, uops are executed. If the back-end is not ready to accept uops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more uops. This event counts only when back-end is requesting more uops and front-end is not able to provide them. When 3 uops are requested and no uops are delivered, the event counts 3. When 3 are requested, and only 1 is delivered, the event counts 2. When only 2 are delivered, the event counts 1. Alternatively stated, the event will not count if 3 uops are delivered, or if the back end is stalled and not requesting any uops at all. Counts indicate missed opportunities for the front-end to deliver a uop to the back end. Some examples of conditions that cause front-end efficiencies are: ICache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth. Known Issues: Some uops require multiple allocation slots. These uops will not be charged as a front end 'not delivered' opportunity, and will be regarded as a back end problem. For example, the INC instruction has one uop that requires 2 issue slots. A stream of INC instructions will not count as UOPS_NOT_DELIVERED, even though only one instruction can be issued per clock. The low uop issue rate for a stream of INC instructions is considered to be a back end issue.",
+ "SampleAfterValue": "200003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times a BACLEAR is signaled for any reason, including, but not limited to indirect branch/call, Jcc (Jump on Conditional Code/Jump if Condition is Met) branch, unconditional branch/call, and returns.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "BACLEARS.ALL",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for any branch type"
+ "BriefDescription": "Uops retired (Precise event capable)",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ANY",
+ "PEBS": "2",
+ "PublicDescription": "Counts uops which retired.",
+ "SampleAfterValue": "2000003"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on return instructions.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "BACLEARS.RETURN",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for return branch"
+ "BriefDescription": "Integer divide uops retired (Precise Event Capable)",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "PEBS": "2",
+ "PublicDescription": "Counts the number of integer divide uops retired.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts BACLEARS on Jcc (Jump on Conditional Code/Jump if Condition is Met) branches.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "BACLEARS.COND",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "BACLEARs asserted for conditional branch"
+ "BriefDescription": "MS uops retired (Precise event capable)",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.MS",
+ "PEBS": "2",
+ "PublicDescription": "Counts uops retired that are from the complex flows issued by the micro-sequencer (MS). Counts both the uops from a micro-coded instruction, and the uops that might be generated from a micro-coded assist.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
index 0d32fd26ded1..3d6feb45a50b 100644
--- a/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/goldmontplus/virtual-memory.json
@@ -1,221 +1,152 @@
[
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Page walk completed due to a demand load to a 1GB page",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PDIR_COUNTER": "na",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1GB",
+ "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "200003",
- "BriefDescription": "Page walk completed due to a demand load to a 4K page"
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Page walk completed due to a demand load to a 2M or 4M page",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "200003",
- "BriefDescription": "Page walk completed due to a demand load to a 2M or 4M page"
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 1GB pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Page walk completed due to a demand load to a 4K page",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1GB",
- "PDIR_COUNTER": "na",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts page walks completed due to demand data loads (including SW prefetches) whose address translations missed in all TLB levels and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "200003",
- "BriefDescription": "Page walk completed due to a demand load to a 1GB page"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts once per cycle for each page walk occurring due to a load (demand data loads or SW prefetches). Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
+ "BriefDescription": "Page walks outstanding due to a demand load every cycle.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts once per cycle for each page walk occurring due to a load (demand data loads or SW prefetches). Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Page walks outstanding due to a demand load every cycle."
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1GB page",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PDIR_COUNTER": "na",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1GB",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Page walk completed due to a demand data store to a 4K page"
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M or 4M page",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Page walk completed due to a demand data store to a 2M or 4M page"
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1GB",
- "PDIR_COUNTER": "na",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Page walk completed due to a demand data store to a 1GB page"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts once per cycle for each page walk occurring due to a demand data store. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
+ "BriefDescription": "Page walks outstanding due to a demand data store every cycle.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts once per cycle for each page walk occurring due to a demand data store. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Page walks outstanding due to a demand data store every cycle."
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts once per cycle for each page walk only while traversing the Extended Page Table (EPT), and does not count during the rest of the translation. The EPT is used for translating Guest-Physical Addresses to Physical Addresses for Virtual Machine Monitors (VMMs). Average cycles per walk can be calculated by dividing the count by number of walks.",
+ "BriefDescription": "Page walks outstanding due to walking the EPT every cycle",
"EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
"EventName": "EPT.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts once per cycle for each page walk only while traversing the Extended Page Table (EPT), and does not count during the rest of the translation. The EPT is used for translating Guest-Physical Addresses to Physical Addresses for Virtual Machine Monitors (VMMs). Average cycles per walk can be calculated by dividing the count by number of walks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Page walks outstanding due to walking the EPT every cycle"
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
+ "BriefDescription": "ITLB misses",
"EventCode": "0x81",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
"EventName": "ITLB.MISS",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) for a linear address of an instruction fetch. It counts when new translation are filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
"SampleAfterValue": "200003",
- "BriefDescription": "ITLB misses"
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Page walk completed due to an instruction fetch in a 1GB page",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "PDIR_COUNTER": "na",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1GB",
+ "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Page walk completed due to an instruction fetch in a 4K page"
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Page walk completed due to an instruction fetch in a 2M or 4M page",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 2M or 4M pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Page walk completed due to an instruction fetch in a 2M or 4M page"
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 1GB pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Page walk completed due to an instruction fetch in a 4K page",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1GB",
- "PDIR_COUNTER": "na",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts page walks completed due to instruction fetches whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Page walk completed due to an instruction fetch in a 1GB page"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts once per cycle for each page walk occurring due to an instruction fetch. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
+ "BriefDescription": "Page walks outstanding due to an instruction fetch every cycle.",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_PENDING",
- "PDIR_COUNTER": "na",
+ "PublicDescription": "Counts once per cycle for each page walk occurring due to an instruction fetch. Includes cycles spent traversing the Extended Page Table (EPT). Average cycles per walk can be calculated by dividing by the number of walks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Page walks outstanding due to an instruction fetch every cycle."
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "1",
- "PublicDescription": "Counts STLB flushes. The TLBs are flushed on instructions like INVLPG and MOV to CR3.",
- "EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
- "EventName": "TLB_FLUSHES.STLB_ANY",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "20003",
- "BriefDescription": "STLB flushes"
+ "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
+ "PEBS": "2",
+ "PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x13"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts load uops retired that caused a DTLB miss.",
+ "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1",
"EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "PEBS": "2",
+ "PublicDescription": "Counts load uops retired that caused a DTLB miss.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops retired that missed the DTLB (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x11"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts store uops retired that caused a DTLB miss.",
+ "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
+ "Data_LA": "1",
"EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "PEBScounters": "0,1,2,3",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
+ "PEBS": "2",
+ "PublicDescription": "Counts store uops retired that caused a DTLB miss.",
"SampleAfterValue": "200003",
- "BriefDescription": "Store uops retired that missed the DTLB (Precise event capable)",
- "Data_LA": "1"
+ "UMask": "0x12"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts uops retired that had a DTLB miss on load, store or either. Note that when two distinct memory operations to the same page miss the DTLB, only one of them will be recorded as a DTLB miss.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x13",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Memory uops retired that missed the DTLB (Precise event capable)",
- "Data_LA": "1"
+ "BriefDescription": "STLB flushes",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSHES.STLB_ANY",
+ "PublicDescription": "Counts STLB flushes. The TLBs are flushed on instructions like INVLPG and MOV to CR3.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/cache.json b/tools/perf/pmu-events/arch/x86/grandridge/cache.json
new file mode 100644
index 000000000000..7f0dc65a55d2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/grandridge/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Counts the number of store ops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/frontend.json b/tools/perf/pmu-events/arch/x86/grandridge/frontend.json
new file mode 100644
index 000000000000..be8f1c7e195c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/grandridge/frontend.json
@@ -0,0 +1,16 @@
+[
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/memory.json b/tools/perf/pmu-events/arch/x86/grandridge/memory.json
new file mode 100644
index 000000000000..79d8af45100c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/grandridge/memory.json
@@ -0,0 +1,20 @@
+[
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/other.json b/tools/perf/pmu-events/arch/x86/grandridge/other.json
new file mode 100644
index 000000000000..2414f6ff53b0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/grandridge/other.json
@@ -0,0 +1,20 @@
+[
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
new file mode 100644
index 000000000000..41212957ef21
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/grandridge/pipeline.json
@@ -0,0 +1,96 @@
+[
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL",
+ "EventCode": "0x72",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json
new file mode 100644
index 000000000000..bd5f2b634c98
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/grandridge/virtual-memory.json
@@ -0,0 +1,24 @@
+[
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/cache.json b/tools/perf/pmu-events/arch/x86/graniterapids/cache.json
new file mode 100644
index 000000000000..56212827870c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/cache.json
@@ -0,0 +1,54 @@
+[
+ {
+ "BriefDescription": "L2 code requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Demand Data Read access L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json b/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
new file mode 100644
index 000000000000..c6d5016e7337
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/frontend.json
@@ -0,0 +1,10 @@
+[
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CORE",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.\nThe count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/memory.json b/tools/perf/pmu-events/arch/x86/graniterapids/memory.json
new file mode 100644
index 000000000000..1c0e0e86e58e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/memory.json
@@ -0,0 +1,174 @@
+[
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
+ "PEBS": "2",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/other.json b/tools/perf/pmu-events/arch/x86/graniterapids/other.json
new file mode 100644
index 000000000000..5e799bae03ea
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/other.json
@@ -0,0 +1,29 @@
+[
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json b/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json
new file mode 100644
index 000000000000..764c0435d1d2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/pipeline.json
@@ -0,0 +1,102 @@
+[
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.\nThe count is distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.\nSoftware can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json b/tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json
new file mode 100644
index 000000000000..8784c97b7534
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/graniterapids/virtual-memory.json
@@ -0,0 +1,26 @@
+[
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/cache.json b/tools/perf/pmu-events/arch/x86/haswell/cache.json
index 7fb0ad8d8ca1..0831f14b3cc6 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/cache.json
@@ -1,1063 +1,832 @@
[
{
- "PublicDescription": "Demand data read requests that missed L2, no rejects.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read miss L2, no rejects",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1D miss outstanding duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "EventCode": "0x27",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "PublicDescription": "Not rejected writebacks that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x50"
},
{
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "L2 cache lines in E state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "L2 cache lines in I state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "L2 cache lines in S state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Clean L2 cache lines evicted by demand",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "PublicDescription": "Clean L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "PublicDescription": "Dirty L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts all L2 code requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
- "PublicDescription": "Demand requests that miss L2 cache.",
+ "BriefDescription": "Demand Data Read requests",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe1"
},
{
- "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.L2_PF_MISS",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 prefetch requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x27"
},
{
- "PublicDescription": "All requests that missed L2.",
+ "BriefDescription": "Demand requests to L2 cache",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.MISS",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Demand requests to L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe7"
},
{
- "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts all L2 HW prefetcher requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf8"
},
{
- "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "BriefDescription": "RFO requests to L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts all L2 store RFO requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe2"
},
{
- "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
"EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
- "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
+ "BriefDescription": "L2 cache misses when fetching instructions",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "L2_RQSTS.L2_PF_HIT",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 prefetch requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe1",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
- "PublicDescription": "Counts all L2 store RFO requests.",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe2",
- "EventName": "L2_RQSTS.ALL_RFO",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Demand data read requests that missed L2, no rejects.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x21"
},
{
- "PublicDescription": "Counts all L2 code requests.",
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe4",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
- "PublicDescription": "Demand requests to L2 cache.",
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
- "Errata": "HSD78",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "PublicDescription": "Counts all L2 HW prefetcher requests.",
+ "BriefDescription": "All requests that miss L2 cache",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xf8",
- "EventName": "L2_RQSTS.ALL_PF",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "All requests that missed L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from L2 hardware prefetchers",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3f"
},
{
- "PublicDescription": "All requests to L2 cache.",
+ "BriefDescription": "All L2 requests",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "Errata": "HSD78",
"EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "All requests to L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "All L2 requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "PublicDescription": "Not rejected writebacks that hit L2 cache.",
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x50",
- "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss oustandings duration in cycles",
- "CounterHTOff": "2"
+ "UMask": "0xc2"
},
{
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
},
{
- "PublicDescription": "Offcore outstanding demand data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78, HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "Any MLC or L3 HW prefetch accessing L2, including rejects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78, HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "Transactions accessing L2 pipe.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78, HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "L2 cache accesses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Offcore outstanding Demand code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "Demand data read requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "HSD62, HSD61",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "RFO requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles in which the L1D is locked.",
+ "BriefDescription": "Cycles when L1D is locked",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "Cycles in which the L1D is locked.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1D is locked",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand data read requests sent to uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand code read requests sent to uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xb2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "Errata": "HSD29, HSM30",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB. (precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "Errata": "HSD29, HSM30",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB. (precise Event)",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "Data_LA": "1",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "Errata": "HSD76, HSD29, HSM30",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with locked access. (precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "Data_LA": "1",
+ "Errata": "HSD74, HSD29, HSD25, HSM30",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"PEBS": "1",
- "PublicDescription": "This event counts load uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "Errata": "HSD29, HSM30",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary. (precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "Data_LA": "1",
+ "Errata": "HSM30",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
- "PublicDescription": "This event counts store uops retired which had memory addresses spilt across 2 cache lines. A line split is across 64B cache-lines which may include a page split (4K). This is a precise event.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "Errata": "HSD29, HSM30",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "Data_LA": "1",
"Errata": "HSD29, HSM30",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops. (precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "1",
- "PublicDescription": "This event counts all store uops retired. This is a precise event.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "Errata": "HSD29, HSM30",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops. (precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
+ "Data_LA": "1",
+ "Errata": "HSM30",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD29, HSM30",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops missed L1 cache as data sources.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD29, HSM30",
+ "EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops in which data sources were data hits in the L3 cache without snoops required. This does not include hardware prefetches. This is a precise event.",
+ "BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSM30",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "Data_LA": "1",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
"SampleAfterValue": "50021",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops in which data sources missed in the L1 cache. This does not include hardware prefetches. This is a precise event.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "Data_LA": "1",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "HSM30",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
+ "BriefDescription": "Retired load uops.",
+ "Data_LA": "1",
"Errata": "HSD29, HSM30",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load uops. This event accounts for SW prefetch uops of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Retired store uops.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "PublicDescription": "Counts all retired store uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Retired load uops with locked access.",
+ "Data_LA": "1",
+ "Errata": "HSD76, HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "Errata": "HSM30",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x21"
},
{
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD29, HSD25, HSM26, HSM30",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HIT in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD29, HSD25, HSM26, HSM30",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
},
{
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the L3 cache, but required a cross-core snoop which resulted in a HITM (hit modified) in an on-pkg core cache. This does not include hardware prefetches. This is a precise event.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "HSD29, HSD25, HSM26, HSM30",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
},
{
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x12"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches. This is a precise event.",
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD74, HSD29, HSD25, HSM30",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Demand data read requests that access L2 cache.",
- "EventCode": "0xf0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cacheable and noncacheable code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Demand code read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "RFO requests that access L2 cache.",
- "EventCode": "0xf0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_TRANS.RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "Errata": "HSD78, HSM80",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Demand data read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "L2 cache accesses when fetching instructions.",
- "EventCode": "0xf0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_TRANS.CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache accesses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Any MLC or L3 HW prefetch accessing L2, including rejects.",
- "EventCode": "0xf0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_TRANS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "EventCode": "0xb2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "L1D writebacks that access L2 cache.",
- "EventCode": "0xf0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_TRANS.L1D_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L1D writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "L2 fill requests that access L2 cache.",
- "EventCode": "0xf0",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_TRANS.L2_FILL",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 fill requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "L2 writebacks that access L2 cache.",
- "EventCode": "0xf0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Transactions accessing L2 pipe.",
- "EventCode": "0xf0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_TRANS.ALL_REQUESTS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Transactions accessing L2 pipe",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterMask": "1",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "L2 cache lines in I state filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_IN.I",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in I state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Offcore outstanding Demand code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "L2 cache lines in S state filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_IN.S",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in S state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Offcore outstanding demand data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "L2 cache lines in E state filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_IN.E",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in E state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "L2_LINES_IN.ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Clean L2 cache lines evicted by demand.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by demand",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Dirty L2 cache lines evicted by demand.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0244",
"SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by demand",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xf4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Split locks in SQ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all requests hit in the L3",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C8FFF",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all requests hit in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C07F7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C07F7",
"SampleAfterValue": "100003",
- "BriefDescription": "hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C07F7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C07F7",
"SampleAfterValue": "100003",
- "BriefDescription": "hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all requests hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C8FFF",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3F803C0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "SampleAfterValue": "100003",
"BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04003C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "BriefDescription": "Split locks in SQ",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
index f5a3beaa19fc..8fcc10f74ad9 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/floating-point.json
@@ -1,92 +1,83 @@
[
{
- "PEBS": "1",
- "PublicDescription": "",
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "HSD56, HSM57",
- "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "PublicDescription": "",
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "Errata": "HSD56, HSM57",
- "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from legacy SSE to AVX-256 when penalty applicable",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
+ "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
"EventCode": "0xC6",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "AVX_INSTS.ALL",
+ "PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x7"
},
{
- "PEBS": "1",
- "PublicDescription": "",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ASSIST.X87_OUTPUT",
+ "EventName": "FP_ASSIST.ANY",
+ "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
"SampleAfterValue": "100003",
- "BriefDescription": "output - Numeric Overflow, Numeric Underflow, Inexact Result",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1e"
},
{
- "PEBS": "1",
- "PublicDescription": "",
+ "BriefDescription": "Number of SIMD FP assists due to input values",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "FP_ASSIST.X87_INPUT",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "Number of SIMD FP assists due to input values.",
"SampleAfterValue": "100003",
- "BriefDescription": "input - Invalid Operation, Denormal Operand, SNaN Operand",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PEBS": "1",
- "PublicDescription": "",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "PublicDescription": "Number of SIMD FP assists due to output values.",
"SampleAfterValue": "100003",
- "BriefDescription": "SSE* FP micro-code assist when output value is invalid.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "PublicDescription": "",
+ "BriefDescription": "Number of X87 assists due to input value.",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ASSIST.SIMD_INPUT",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "PublicDescription": "Number of X87 FP assists due to input values.",
"SampleAfterValue": "100003",
- "BriefDescription": "Any input SSE* FP Assist",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "PublicDescription": "",
+ "BriefDescription": "Number of X87 assists due to output value.",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x1e",
- "EventName": "FP_ASSIST.ANY",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "Number of X87 FP assists due to output values.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any FP_ASSIST umask was incrementing",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "PublicDescription": "Number of SIMD move elimination candidate uops that were eliminated.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "PublicDescription": "Number of SIMD move elimination candidate uops that were not eliminated.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Errata": "HSD56, HSM57",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Errata": "HSD56, HSM57",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/frontend.json b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
index c0a5bedcc15c..73d6d681dfa7 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/frontend.json
@@ -1,294 +1,246 @@
[
{
- "PublicDescription": "Counts cycles the IDQ is empty.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "HSD135",
- "EventName": "IDQ.EMPTY",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.IFDATA_STALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.IFETCH_STALL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts Instruction Cache (ICACHE) misses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts cycles MITE is delivered at least one uop. Set Cmask = 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Errata": "HSD135",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
+ "EventName": "IDQ.EMPTY",
+ "PublicDescription": "Counts cycles the IDQ is empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "Number of uops delivered to IDQ from any path.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3c"
},
{
- "PublicDescription": "Counts cycles MITE is delivered at least one uop. Set Cmask = 1.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
"EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
+ "EventName": "IDQ.MS_DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of uops delivered to IDQ from any path.",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "IDQ.MITE_ALL_UOPS",
+ "EventName": "IDQ.MS_DSB_OCCUR",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts Instruction Cache (ICACHE) misses.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE.IFETCH_STALL",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE.IFDATA_STALL",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "PublicDescription": "This event count the number of undelivered (unallocated) uops from the Front-end to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. The Front-end can allocate up to 4 uops per cycle so this event can increment 0-4 times per cycle depending on the number of unallocated uops. This event is counted on a per-core basis.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"Errata": "HSD135",
+ "EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "This event count the number of undelivered (unallocated) uops from the Front-end to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. The Front-end can allocate up to 4 uops per cycle so this event can increment 0-4 times per cycle depending on the number of unallocated uops. This event is counted on a per-core basis.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number cycles during which the Front-end allocated exactly zero uops to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. This event is counted on a per-core basis.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "4",
"Errata": "HSD135",
+ "EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "This event counts the number cycles during which the Front-end allocated exactly zero uops to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. This event is counted on a per-core basis.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
"Errata": "HSD135",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
"SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
"CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "Errata": "HSD135",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
"Errata": "HSD135",
+ "EventCode": "0x9C",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD135",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "SampleAfterValue": "2000003",
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"Errata": "HSD135",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
index f57c5f3506c2..2528418200bb 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
@@ -1,322 +1,953 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fetch_BW;PGO",
- "MetricName": "IpTB"
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Branch instructions per taken branch. ",
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
- "MetricName": "BpTB"
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS)))) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "10 * ARITH.DIVIDER_UOPS / tma_info_core_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "60 * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.REQUEST_FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * min(CPU_CLK_UNHALTED.THREAD, IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE) / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
},
{
- "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpL"
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
},
{
- "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpS"
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;Instruction_Type",
- "MetricName": "IpB"
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
- "MetricName": "IpCall"
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "(UOPS_EXECUTED.CORE / 2 / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) if #SMT_on else UOPS_EXECUTED.CORE / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@))",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization"
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TLB_SMT",
- "MetricName": "Page_Walks_Utilization_SMT"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_lcp"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L1MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
},
{
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI_All"
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
},
{
- "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2HPKI_All"
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L3MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Average number of parallel requests to external memory",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_parallel_requests",
+ "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_request_latency"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_core_clks",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "MetricName": "tma_info_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.L3_MISS))) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Sample with: UOPS_DISPATCHED_PORT.PORT_4. Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_7 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_store_op_utilization_group",
+ "MetricName": "tma_port_7",
+ "MetricThreshold": "tma_port_7 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address). Sample with: UOPS_DISPATCHED_PORT.PORT_7",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING)) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "INST_RETIRED.X87 * tma_info_uoppi / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/memory.json b/tools/perf/pmu-events/arch/x86/haswell/memory.json
index ef13ed88e2ea..2fc25e22a42a 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/memory.json
@@ -1,676 +1,514 @@
[
{
- "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Speculative cache-line split store-address uops dispatched to L1D.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional writes.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TX_EXEC.MISC1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "TX_EXEC.MISC2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "TX_EXEC.MISC3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "TX_EXEC.MISC4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "TX_EXEC.MISC5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of memory ordering machine clears detected. Memory ordering machine clears can result from memory address aliasing or snoops from another hardware thread or core to data inflight in the pipeline. Machine clears can have a significant performance impact if they are happening frequently.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "HLE_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution started.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "HLE_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution successfully committed.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "HLE_RETIRED.ABORTED",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "HLE_RETIRED.ABORTED_MISC1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions.",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "HLE_RETIRED.ABORTED_MISC2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions.",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "HLE_RETIRED.ABORTED_MISC3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
"Errata": "HSD65",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts).",
+ "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
"EventCode": "0xc8",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "HLE_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RTM_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution started.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RTM_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution successfully committed.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RTM_RETIRED.ABORTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RTM_RETIRED.ABORTED_MISC1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RTM_RETIRED.ABORTED_MISC2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "RTM_RETIRED.ABORTED_MISC3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "Errata": "HSD65",
- "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "BriefDescription": "Number of times an HLE execution successfully committed.",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.COMMIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
- "EventCode": "0xc9",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "BriefDescription": "Number of times an HLE execution started.",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.START",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x4",
- "Counter": "3",
- "UMask": "0x1",
- "Errata": "HSD76, HSD25, HSM26",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
- "MSRIndex": "0x3F6",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering machine clears detected. Memory ordering machine clears can result from memory address aliasing or snoops from another hardware thread or core to data inflight in the pipeline. Machine clears can have a significant performance impact if they are happening frequently.",
"SampleAfterValue": "100003",
- "BriefDescription": "Randomly selected loads with latency value being above 4.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x8",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 128.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "50021",
- "BriefDescription": "Randomly selected loads with latency value being above 8.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x10",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 16.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
"SampleAfterValue": "20011",
- "BriefDescription": "Randomly selected loads with latency value being above 16.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Randomly selected loads with latency value being above 256.",
+ "Data_LA": "1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
"PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x20",
- "Counter": "3",
- "UMask": "0x1",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Randomly selected loads with latency value being above 32.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
"SampleAfterValue": "100003",
- "BriefDescription": "Randomly selected loads with latency value being above 32.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x40",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 4.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Randomly selected loads with latency value being above 64.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x80",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 512.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Randomly selected loads with latency value being above 128.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x100",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 64.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Randomly selected loads with latency value being above 256.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x200",
- "Counter": "3",
- "UMask": "0x1",
+ "BriefDescription": "Randomly selected loads with latency value being above 8.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Randomly selected loads with latency value being above 512.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "Speculative cache-line split store-address uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts all requests miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC08FFF",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all requests miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01004007F7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400244",
"SampleAfterValue": "100003",
- "BriefDescription": "miss the L3 and the data is returned from local dram",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC007F7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00091",
"SampleAfterValue": "100003",
- "BriefDescription": "miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "BriefDescription": "miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC007F7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
+ "BriefDescription": "miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004007F7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "BriefDescription": "Counts all requests miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC08FFF",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "BriefDescription": "Counts all demand code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
+ "BriefDescription": "Counts demand data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
+ "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand code reads miss in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_CODE_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.LOCAL_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads miss in the L3",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC00001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads miss in the L3",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "Errata": "HSD65",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC4",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/other.json b/tools/perf/pmu-events/arch/x86/haswell/other.json
index 8a4d898d76c1..2395ebf112db 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/other.json
@@ -1,43 +1,35 @@
[
{
- "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPL_CYCLES.RING0",
+ "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "CounterMask": "1",
"EdgeDetect": "1",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0_TRANS",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
index 734d3873729e..540f4372623c 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/pipeline.json
@@ -1,1343 +1,1050 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "Counter": "Fixed counter 0",
- "UMask": "0x1",
- "Errata": "HSD140, HSD143",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 0"
- },
- {
- "PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
- "Counter": "Fixed counter 2",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "loads blocked by overlapping with store buffer that cannot be forwarded",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline which can have a performance impact.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of cycles spent waiting for a recovery after an event such as a processor nuke, JEClear, assist, hle/rtm abort etc.",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "PublicDescription": "This event counts the number of uops issued by the Front-end of the pipeline to the Back-end. This event is counted at the allocation stage and will count both retired and non-retired uops.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd0"
},
{
- "PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_ISSUED.FLAGS_MERGE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (for example, 2 sources + immediate) regardless of whether it is a result of LEA instruction or not.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
},
{
- "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_ISSUED.SINGLE_MUL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken macro-conditional branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
},
{
- "EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ARITH.DIVIDER_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
},
{
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x90"
},
{
- "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0x84"
},
{
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect calls.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
},
{
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x88"
},
{
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Branch instructions at retirement.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x3c",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
- "EventCode": "0x4c",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PublicDescription": "Number of far branches retired.",
"SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
- "EventCode": "0x4c",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOAD_HIT_PRE.HW_PF",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of integer move elimination candidate uops that were eliminated.",
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of SIMD move elimination candidate uops that were eliminated.",
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of integer move elimination candidate uops that were not eliminated.",
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of near return instructions retired.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "Number of SIMD move elimination candidate uops that were not eliminated.",
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Number of near taken branches retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "PublicDescription": "This event counts cycles when the Reservation Station ( RS ) is empty for the thread. The RS is a structure that buffers allocated micro-ops from the Front-end. If there are many cycles when the RS is empty, it may represent an underflow of instructions delivered from the Front-end.",
- "EventCode": "0x5E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "PublicDescription": "Stall cycles due to IQ is full.",
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ILD_STALL.IQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stall cycles because IQ is full",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "BriefDescription": "Speculative mispredicted indirect branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
"SampleAfterValue": "200003",
- "BriefDescription": "Not taken macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa0"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This event counts all mispredicted branch instructions retired. This is a precise event.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc8",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all near executed branches (not necessarily retired).",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "SampleAfterValue": "2000003"
},
{
- "PublicDescription": "Counts all near executed branches (not necessarily retired).",
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "PublicDescription": "Cycles which a uop is dispatched on port 0 in this thread.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
+ "BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "CounterMask": "1",
+ "Errata": "HSD78, HSM63, HSM80",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "PublicDescription": "Cycles with pending L2 miss loads. Set Cmask=2 to count cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are executed in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "BriefDescription": "Cycles with pending memory loads.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "PublicDescription": "Cycles with pending memory loads. Set Cmask=2 to count cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "BriefDescription": "Execution stalls due to L1 data cache misses",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are executed in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "BriefDescription": "Execution stalls due to L2 cache misses.",
+ "CounterMask": "5",
+ "Errata": "HSM63, HSM80",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "PublicDescription": "Number of loads missed L2.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "BriefDescription": "Execution stalls due to memory subsystem.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline and there were memory instructions pending (waiting for data).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
+ "BriefDescription": "Stall cycles because IQ is full",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "PublicDescription": "Stall cycles due to IQ is full.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "BriefDescription": "Instructions retired from execution.",
+ "Errata": "HSD140, HSD143",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Errata": "HSD11, HSD140",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Number of instructions at retirement.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Errata": "HSD140",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "This event counts the number of cycles spent waiting for a recovery after an event such as a processor nuke, JEClear, assist, hle/rtm abort etc.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are executed in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "loads blocked by overlapping with store buffer that cannot be forwarded",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are executed in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline which can have a performance impact.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are executed in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of uops delivered by the LSD.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles allocation is stalled due to resource related reason.",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD135",
- "EventName": "RESOURCE_STALLS.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RESOURCE_STALLS.RS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts cycles during which no instructions were allocated because no Store Buffers (SB) were available.",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "PublicDescription": "Number of integer move elimination candidate uops that were eliminated.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RESOURCE_STALLS.ROB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "PublicDescription": "Number of integer move elimination candidate uops that were not eliminated.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles with pending L2 miss loads. Set Cmask=2 to count cycle.",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD78",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L2 cache miss loads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Cycles with pending memory loads. Set Cmask=2 to count cycle.",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "BriefDescription": "Resource-related stall cycles",
+ "Errata": "HSD135",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "Cycles allocation is stalled due to resource related reason.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending memory loads.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline.",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
- "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of loads missed L2.",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls due to L2 cache misses.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline and there were memory instructions pending (waiting for data).",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "This event counts cycles during which no instructions were allocated because no Store Buffers (SB) were available.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls due to memory subsystem.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "BriefDescription": "Count cases of saving new LBR",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "This event counts cycles when the Reservation Station ( RS ) is empty for the thread. The RS is a structure that buffers allocated micro-ops from the Front-end. If there are many cycles when the RS is empty, it may represent an underflow of instructions delivered from the Front-end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls due to L1 data cache misses",
- "CounterMask": "12",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of uops delivered by the LSD.",
- "EventCode": "0xa8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "HSD144, HSD30, HSM31",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
+ "BriefDescription": "Number of uops executed on the core.",
"Errata": "HSD30, HSM31",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
"Errata": "HSD30, HSM31",
+ "EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"Errata": "HSD30, HSM31",
+ "EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
"Errata": "HSD30, HSM31",
+ "EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
"Errata": "HSD30, HSM31",
+ "EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"Errata": "HSD30, HSM31",
+ "EventCode": "0xb1",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of instructions at retirement.",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "HSD11, HSD140",
- "EventName": "INST_RETIRED.ANY_P",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "Errata": "HSD140",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "CounterHTOff": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "INST_RETIRED.X87",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "",
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "CounterMask": "4",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.ALL",
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops.",
- "CounterHTOff": "0,1,2,3,4,5,6,7",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "",
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "Cycles which a uop is dispatched on port 0 in this thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles no executable uops retired",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "",
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles using always true condition applied to PEBS uops retired event.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "",
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
- "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles no executable uops retired on core",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MACHINE_CLEARS.CYCLES",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Branch instructions at retirement.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "SampleAfterValue": "100003",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Return instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "PublicDescription": "",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Counts all not taken macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of uops issued by the Front-end of the pipeline to the Back-end. This event is counted at the allocation stage and will count both retired and non-retired uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of far branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (for example, 2 sources + immediate) regardless of whether it is a result of LEA instruction or not.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Actually retired uops.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "This event counts all mispredicted branch instructions retired. This is a precise event.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
- "EventCode": "0xe6",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "16",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/uncore-cache.json b/tools/perf/pmu-events/arch/x86/haswell/uncore-cache.json
new file mode 100644
index 000000000000..be9a3ed1a940
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/uncore-cache.json
@@ -0,0 +1,202 @@
+[
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
+ "PerPkg": "1",
+ "UMask": "0x86",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
+ "PerPkg": "1",
+ "UMask": "0x8f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_ES",
+ "PerPkg": "1",
+ "UMask": "0x46",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_I",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_M",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_MESI",
+ "PerPkg": "1",
+ "UMask": "0x4f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
+ "PerPkg": "1",
+ "UMask": "0x16",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
+ "PerPkg": "1",
+ "UMask": "0x1f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_I",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
+ "PerPkg": "1",
+ "UMask": "0x2f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "An external snoop hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EXTERNAL",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "An external snoop hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EXTERNAL",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "An external snoop misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EXTERNAL",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/haswell/uncore-interconnect.json
new file mode 100644
index 000000000000..8da28239ebf9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/uncore-interconnect.json
@@ -0,0 +1,52 @@
+[
+ {
+ "BriefDescription": "Each cycle count number of valid entries in Coherency Tracker queue from allocation till deallocation. Aperture requests (snoops) appear as NC decoded internally and become coherent (snoop L3, access memory)",
+ "EventCode": "0x83",
+ "EventName": "UNC_ARB_COH_TRK_OCCUPANCY.All",
+ "PerPkg": "1",
+ "PublicDescription": "Each cycle count number of valid entries in Coherency Tracker queue from allocation till deallocation. Aperture requests (snoops) appear as NC decoded internally and become coherent (snoop L3, access memory).",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "CounterMask": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json b/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json
new file mode 100644
index 000000000000..2af92e43b28a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswell/uncore-other.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswell/uncore.json b/tools/perf/pmu-events/arch/x86/haswell/uncore.json
deleted file mode 100644
index 3ef5c21fef56..000000000000
--- a/tools/perf/pmu-events/arch/x86/haswell/uncore.json
+++ /dev/null
@@ -1,374 +0,0 @@
-[
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x21",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EXTERNAL",
- "BriefDescription": "An external snoop misses in some processor core.",
- "PublicDescription": "An external snoop misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x41",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x81",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
- "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x24",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EXTERNAL",
- "BriefDescription": "An external snoop hits a non-modified line in some processor core.",
- "PublicDescription": "An external snoop hits a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x44",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x84",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EVICTION",
- "BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a non-modified line in some processor core.",
- "PublicDescription": "A cross-core snoop resulted from L3 Eviction which hits a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x28",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EXTERNAL",
- "BriefDescription": "An external snoop hits a modified line in some processor core.",
- "PublicDescription": "An external snoop hits a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x48",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x88",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EVICTION",
- "BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a modified line in some processor core.",
- "PublicDescription": "A cross-core snoop resulted from L3 Eviction which hits a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x11",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
- "BriefDescription": "L3 Lookup read request that access cache and found line in M-state.",
- "PublicDescription": "L3 Lookup read request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x21",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
- "BriefDescription": "L3 Lookup write request that access cache and found line in M-state.",
- "PublicDescription": "L3 Lookup write request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x41",
- "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_M",
- "BriefDescription": "L3 Lookup external snoop request that access cache and found line in M-state.",
- "PublicDescription": "L3 Lookup external snoop request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x81",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
- "BriefDescription": "L3 Lookup any request that access cache and found line in M-state.",
- "PublicDescription": "L3 Lookup any request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x18",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
- "BriefDescription": "L3 Lookup read request that access cache and found line in I-state.",
- "PublicDescription": "L3 Lookup read request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x28",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_I",
- "BriefDescription": "L3 Lookup write request that access cache and found line in I-state.",
- "PublicDescription": "L3 Lookup write request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x48",
- "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_I",
- "BriefDescription": "L3 Lookup external snoop request that access cache and found line in I-state.",
- "PublicDescription": "L3 Lookup external snoop request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x88",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
- "BriefDescription": "L3 Lookup any request that access cache and found line in I-state.",
- "PublicDescription": "L3 Lookup any request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x1f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
- "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
- "PublicDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x2f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
- "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
- "PublicDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x4f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_MESI",
- "BriefDescription": "L3 Lookup external snoop request that access cache and found line in MESI-state.",
- "PublicDescription": "L3 Lookup external snoop request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x8f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
- "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
- "PublicDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x86",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
- "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
- "PublicDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x46",
- "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_ES",
- "BriefDescription": "L3 Lookup external snoop request that access cache and found line in E or S-state.",
- "PublicDescription": "L3 Lookup external snoop request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x16",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
- "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
- "PublicDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x26",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
- "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
- "PublicDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
- "BriefDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from it's allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
- "BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x20",
- "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
- "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "PublicDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x83",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_OCCUPANCY.All",
- "BriefDescription": "Each cycle count number of valid entries in Coherency Tracker queue from allocation till deallocation. Aperture requests (snoops) appear as NC decoded internally and become coherent (snoop L3, access memory)",
- "PublicDescription": "Each cycle count number of valid entries in Coherency Tracker queue from allocation till deallocation. Aperture requests (snoops) appear as NC decoded internally and become coherent (snoop L3, access memory).",
- "Counter": "0",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x84",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
- "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "PublicDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "NCU",
- "EventCode": "0x0",
- "UMask": "0x01",
- "EventName": "UNC_CLOCK.SOCKET",
- "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Counter": "FIXED",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
index 777b500a5c9f..87a4ec1ee7d7 100644
--- a/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswell/virtual-memory.json
@@ -1,484 +1,386 @@
[
{
- "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size.",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size.",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses in all DTLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Completed page walks due to demand load misses that caused 4K page walks in any TLB levels.",
+ "BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
+ "PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
- "PublicDescription": "Completed page walks due to demand load misses that caused 2M/4M page walks in any TLB levels.",
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M)",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "PublicDescription": "This event counts load operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K)",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "PublicDescription": "This event counts load operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts load operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Completed page walks due to demand load misses that caused 2M/4M page walks in any TLB levels.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts load operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Completed page walks due to demand load misses that caused 4K page walks in any TLB levels.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 4K page structure.",
+ "BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
+ "PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 2M/4M page structure.",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M)",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "PublicDescription": "This event counts store operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks. (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K)",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "PublicDescription": "This event counts store operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "PublicDescription": "This event counts store operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks. (1G)",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts store operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 2M/4M page structure.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 4K page structure.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
"SampleAfterValue": "100003",
- "BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Cycle count for an Extended Page table walk.",
"EventCode": "0x4f",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycle count for an Extended Page table walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Misses in ITLB that causes a page walk of any page size.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xae",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Completed page walks due to misses in ITLB 4K page entries.",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Misses in ITLB that causes a page walk of any page size.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Completed page walks due to misses in ITLB 2M/4M page entries.",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "PublicDescription": "ITLB misses that hit STLB (2M).",
"SampleAfterValue": "100003",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Completed page walks in ITLB of any page size.",
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "PublicDescription": "ITLB misses that hit STLB (4K).",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "ITLB_MISSES.WALK_DURATION",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in ITLB of any page size.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "PublicDescription": "ITLB misses that hit STLB (4K).",
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "ITLB misses that hit STLB (2M).",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Completed page walks due to misses in ITLB 2M/4M page entries.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "ITLB misses that hit STLB. No page walk.",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x60",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Completed page walks due to misses in ITLB 4K page entries.",
"SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
- "EventCode": "0xae",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
"SampleAfterValue": "100003",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of DTLB page walker loads that hit in the L1+FB.",
+ "BriefDescription": "Number of DTLB page walker hits in the L1+FB",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
"EventName": "PAGE_WALKER_LOADS.DTLB_L1",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L1+FB.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L1+FB",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x11"
},
{
- "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
+ "BriefDescription": "Number of DTLB page walker hits in the L2",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
"EventName": "PAGE_WALKER_LOADS.DTLB_L2",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x12"
},
{
- "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
- "EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x14",
+ "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
"Errata": "HSD25",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L3",
+ "PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x14"
},
{
- "PublicDescription": "Number of DTLB page walker loads from memory.",
- "EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
+ "BriefDescription": "Number of DTLB page walker hits in Memory",
"Errata": "HSD25",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
+ "PublicDescription": "Number of DTLB page walker loads from memory.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of DTLB page walker hits in Memory",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x18"
},
{
- "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x41"
},
{
- "PublicDescription": "Number of ITLB page walker loads that hit in the L2.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x42"
},
{
- "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L3.",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x44"
},
{
- "PublicDescription": "Number of ITLB page walker loads from memory.",
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x28",
- "Errata": "HSD25",
- "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of ITLB page walker hits in Memory",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x48"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x44",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L3.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x84"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in memory.",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x48",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_MEMORY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x88"
},
{
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x21"
},
{
+ "BriefDescription": "Number of ITLB page walker hits in the L2",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L2.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x22"
},
{
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "Errata": "HSD25",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "Errata": "HSD25",
"EventCode": "0xBC",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in memory.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x28"
},
{
- "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100003",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Count number of STLB flush attempts.",
+ "BriefDescription": "STLB flush attempts",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Count number of STLB flush attempts.",
"SampleAfterValue": "100003",
- "BriefDescription": "STLB flush attempts",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/cache.json b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
index a9e62d4357af..a6c81010b394 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/cache.json
@@ -1,1097 +1,862 @@
[
{
- "EventCode": "0x24",
- "UMask": "0x21",
- "BriefDescription": "Demand Data Read miss L2, no rejects",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
- "Errata": "HSD78",
- "PublicDescription": "Demand data read requests that missed L2, no rejects.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1D miss outstanding duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Not rejected writebacks that hit L2 cache",
+ "EventCode": "0x27",
+ "EventName": "L2_DEMAND_RQSTS.WB_HIT",
+ "PublicDescription": "Not rejected writebacks that hit L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x50"
},
{
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "L2 cache lines in E state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "L2 cache lines in I state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "L2 cache lines in S state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Clean L2 cache lines evicted by demand",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "PublicDescription": "Clean L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "PublicDescription": "Dirty L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts all L2 code requests.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
+ "BriefDescription": "Demand Data Read requests",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "Errata": "HSD78",
- "PublicDescription": "Demand requests that miss L2 cache.",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe1"
},
{
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "UMask": "0x30",
- "BriefDescription": "L2 prefetch requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.L2_PF_MISS",
- "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x27"
},
{
+ "BriefDescription": "Demand requests to L2 cache",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "Errata": "HSD78",
- "PublicDescription": "All requests that missed L2.",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Demand requests to L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe7"
},
{
+ "BriefDescription": "Requests from L2 hardware prefetchers",
"EventCode": "0x24",
- "UMask": "0xc1",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "Errata": "HSD78",
- "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts all L2 HW prefetcher requests.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf8"
},
{
+ "BriefDescription": "RFO requests to L2 cache",
"EventCode": "0x24",
- "UMask": "0xc2",
- "BriefDescription": "RFO requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts all L2 store RFO requests.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe2"
},
{
- "EventCode": "0x24",
- "UMask": "0xc4",
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
+ "BriefDescription": "L2 cache misses when fetching instructions",
"EventCode": "0x24",
- "UMask": "0xd0",
- "BriefDescription": "L2 prefetch requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.L2_PF_HIT",
- "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "UMask": "0xe1",
- "BriefDescription": "Demand Data Read requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "Errata": "HSD78",
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "UMask": "0xe2",
- "BriefDescription": "RFO requests to L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_RFO",
- "PublicDescription": "Counts all L2 store RFO requests.",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Demand data read requests that missed L2, no rejects.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x21"
},
{
+ "BriefDescription": "L2 prefetch requests that hit L2 cache",
"EventCode": "0x24",
- "UMask": "0xe4",
- "BriefDescription": "L2 code requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "PublicDescription": "Counts all L2 code requests.",
+ "EventName": "L2_RQSTS.L2_PF_HIT",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
+ "BriefDescription": "L2 prefetch requests that miss L2 cache",
"EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "Errata": "HSD78",
- "PublicDescription": "Demand requests to L2 cache.",
+ "EventName": "L2_RQSTS.L2_PF_MISS",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "All requests that miss L2 cache",
+ "Errata": "HSD78, HSM80",
"EventCode": "0x24",
- "UMask": "0xf8",
- "BriefDescription": "Requests from L2 hardware prefetchers",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_PF",
- "PublicDescription": "Counts all L2 HW prefetcher requests.",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "All requests that missed L2.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3f"
},
{
- "EventCode": "0x24",
- "UMask": "0xff",
"BriefDescription": "All L2 requests",
- "Counter": "0,1,2,3",
+ "Errata": "HSD78, HSM80",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
- "Errata": "HSD78",
"PublicDescription": "All requests to L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "EventCode": "0x27",
- "UMask": "0x50",
- "BriefDescription": "Not rejected writebacks that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_DEMAND_RQSTS.WB_HIT",
- "PublicDescription": "Not rejected writebacks that hit L2 cache.",
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the number of store RFO requests that hit the L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x2E",
- "UMask": "0x41",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x2E",
- "UMask": "0x4f",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "L1D miss oustandings duration in cycles",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING",
- "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "2",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch. HWP are e.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.REQUEST_FB_FULL",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc2"
},
{
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x51",
- "UMask": "0x1",
- "BriefDescription": "L1D data line replacements",
- "Counter": "0,1,2,3",
- "EventName": "L1D.REPLACEMENT",
- "PublicDescription": "This event counts when new data lines are brought into the L1 Data cache, which cause other lines to be evicted from the cache.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "Errata": "HSD78, HSD62, HSD61",
- "PublicDescription": "Offcore outstanding demand data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "Any MLC or L3 HW prefetch accessing L2, including rejects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "Errata": "HSD78, HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "Transactions accessing L2 pipe.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "Errata": "HSD78, HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "L2 cache accesses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x60",
- "UMask": "0x2",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "Errata": "HSD62, HSD61",
- "PublicDescription": "Offcore outstanding Demand code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "Demand data read requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "Errata": "HSD62, HSD61",
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "Errata": "HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "Errata": "HSD62, HSD61",
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "CounterMask": "1",
- "Errata": "HSD62, HSD61",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "RFO requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x63",
- "UMask": "0x2",
"BriefDescription": "Cycles when L1D is locked",
- "Counter": "0,1,2,3",
+ "EventCode": "0x63",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D is locked.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB0",
- "UMask": "0x1",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "Errata": "HSD78",
- "PublicDescription": "Demand data read requests sent to uncore.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB0",
- "UMask": "0x2",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "PublicDescription": "Demand code read requests sent to uncore.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB0",
- "UMask": "0x4",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0xB0",
- "UMask": "0x8",
- "BriefDescription": "Demand and prefetch data reads",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "EventCode": "0xb2",
- "UMask": "0x1",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "Data_LA": "1",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD0",
- "UMask": "0x11",
- "BriefDescription": "Retired load uops that miss the STLB.",
+ "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
"Data_LA": "1",
+ "Errata": "HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "Errata": "HSD29, HSM30",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x12",
- "BriefDescription": "Retired store uops that miss the STLB.",
+ "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
"Data_LA": "1",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "Errata": "HSD29, HSM30",
- "L1_Hit_Indication": "1",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xD0",
- "UMask": "0x21",
- "BriefDescription": "Retired load uops with locked access.",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
+ "Errata": "HSD74, HSD29, HSD25, HSM30",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "Errata": "HSD76, HSD29, HSM30",
+ "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x41",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"Errata": "HSD29, HSM30",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "EventCode": "0xD0",
- "UMask": "0x42",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
"Data_LA": "1",
+ "Errata": "HSM30",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "Errata": "HSD29, HSM30",
- "L1_Hit_Indication": "1",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xD0",
- "UMask": "0x81",
- "BriefDescription": "All retired load uops.",
+ "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
"Data_LA": "1",
+ "Errata": "HSM30",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "Errata": "HSD29, HSM30",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xD0",
- "UMask": "0x82",
- "BriefDescription": "All retired store uops.",
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
"Data_LA": "1",
+ "Errata": "HSM30",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "Errata": "HSD29, HSM30",
- "L1_Hit_Indication": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xD1",
- "UMask": "0x1",
"BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"Errata": "HSD29, HSM30",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD1",
- "UMask": "0x2",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
"Data_LA": "1",
+ "Errata": "HSM30",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "Errata": "HSD76, HSD29, HSM30",
+ "PublicDescription": "Retired load uops missed L1 cache as data sources.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xD1",
- "UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
- "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
- "SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "Errata": "HSD76, HSD29, HSM30",
"EventCode": "0xD1",
- "UMask": "0x8",
- "BriefDescription": "Retired load uops misses in L1 cache as data sources.",
- "Data_LA": "1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "Errata": "HSM30",
- "PublicDescription": "Retired load uops missed L1 cache as data sources.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xD1",
- "UMask": "0x10",
"BriefDescription": "Miss in mid-level (L2) cache. Excludes Unknown data-source.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"Errata": "HSD29, HSM30",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PEBS": "1",
"PublicDescription": "Retired load uops missed L2. Unknown data source excluded.",
"SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0xD1",
- "UMask": "0x20",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "BriefDescription": "Retired load uops which data sources were data hits in L3 without snoops required.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
- "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0xD1",
- "UMask": "0x40",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "Data_LA": "1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "Errata": "HSM30",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Retired load uops with L3 cache hits as data sources.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD2",
- "UMask": "0x1",
- "BriefDescription": "Retired load uops which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"Data_LA": "1",
+ "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS",
- "Errata": "HSD29, HSD25, HSM26, HSM30",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Retired load uops missed L3. Excludes unknown data source .",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xD2",
- "UMask": "0x2",
- "BriefDescription": "Retired load uops which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "BriefDescription": "Retired load uops.",
"Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT",
- "Errata": "HSD29, HSD25, HSM26, HSM30",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Counts all retired load uops. This event accounts for SW prefetch uops of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
- "EventCode": "0xD2",
- "UMask": "0x4",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared L3.",
+ "BriefDescription": "Retired store uops.",
"Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM",
- "Errata": "HSD29, HSD25, HSM26, HSM30",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Counts all retired store uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
},
{
- "EventCode": "0xD2",
- "UMask": "0x8",
- "BriefDescription": "Retired load uops which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Retired load uops with locked access.",
"Data_LA": "1",
+ "Errata": "HSD76, HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_NONE",
- "Errata": "HSD74, HSD29, HSD25, HSM26, HSM30",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x21"
},
{
- "EventCode": "0xD3",
- "UMask": "0x1",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
"Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM",
- "Errata": "HSD74, HSD29, HSD25, HSM30",
- "PublicDescription": "This event counts retired load uops where the data came from local DRAM. This does not include hardware prefetches.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x41"
},
{
- "EventCode": "0xD3",
- "UMask": "0x4",
- "BriefDescription": "Retired load uop whose Data Source was: remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM",
"Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x42"
},
{
- "EventCode": "0xD3",
- "UMask": "0x10",
- "BriefDescription": "Retired load uop whose Data Source was: Remote cache HITM",
+ "BriefDescription": "Retired load uops that miss the STLB.",
"Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM",
- "Errata": "HSM30",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x11"
},
{
- "EventCode": "0xD3",
- "UMask": "0x20",
- "BriefDescription": "Retired load uop whose Data Source was: forwarded from remote cache",
+ "BriefDescription": "Retired store uops that miss the STLB.",
"Data_LA": "1",
+ "Errata": "HSD29, HSM30",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD",
- "Errata": "HSM30",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x12"
},
{
- "EventCode": "0xf0",
- "UMask": "0x1",
- "BriefDescription": "Demand Data Read requests that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
- "PublicDescription": "Demand data read requests that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xf0",
- "UMask": "0x2",
- "BriefDescription": "RFO requests that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.RFO",
- "PublicDescription": "RFO requests that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cacheable and noncacheable code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Demand code read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xf0",
- "UMask": "0x4",
- "BriefDescription": "L2 cache accesses when fetching instructions",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.CODE_RD",
- "PublicDescription": "L2 cache accesses when fetching instructions.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "Errata": "HSD78, HSM80",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Demand data read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xf0",
- "UMask": "0x8",
- "BriefDescription": "L2 or L3 HW prefetches that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.ALL_PF",
- "PublicDescription": "Any MLC or L3 HW prefetch accessing L2, including rejects.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xf0",
- "UMask": "0x10",
- "BriefDescription": "L1D writebacks that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L1D_WB",
- "PublicDescription": "L1D writebacks that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "EventCode": "0xb2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xf0",
- "UMask": "0x20",
- "BriefDescription": "L2 fill requests that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L2_FILL",
- "PublicDescription": "L2 fill requests that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xf0",
- "UMask": "0x40",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L2_WB",
- "PublicDescription": "L2 writebacks that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xf0",
- "UMask": "0x80",
- "BriefDescription": "Transactions accessing L2 pipe",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.ALL_REQUESTS",
- "PublicDescription": "Transactions accessing L2 pipe.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF1",
- "UMask": "0x1",
- "BriefDescription": "L2 cache lines in I state filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.I",
- "PublicDescription": "L2 cache lines in I state filling L2.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterMask": "1",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF1",
- "UMask": "0x2",
- "BriefDescription": "L2 cache lines in S state filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.S",
- "PublicDescription": "L2 cache lines in S state filling L2.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Offcore outstanding Demand code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF1",
- "UMask": "0x4",
- "BriefDescription": "L2 cache lines in E state filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.E",
- "PublicDescription": "L2 cache lines in E state filling L2.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Offcore outstanding demand data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF1",
- "UMask": "0x7",
- "BriefDescription": "L2 cache lines filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.ALL",
- "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "Errata": "HSD78, HSD62, HSD61, HSM63, HSM80",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF2",
- "UMask": "0x5",
- "BriefDescription": "Clean L2 cache lines evicted by demand",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
- "PublicDescription": "Clean L2 cache lines evicted by demand.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "Errata": "HSD62, HSD61, HSM63",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF2",
- "UMask": "0x6",
- "BriefDescription": "Dirty L2 cache lines evicted by demand",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
- "PublicDescription": "Dirty L2 cache lines evicted by demand.",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xf4",
- "UMask": "0x10",
- "BriefDescription": "Split locks in SQ",
- "Counter": "0,1,2,3",
- "EventName": "SQ_MISC.SPLIT_LOCK",
+ "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0244",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C0001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x10003C0091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C0001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x4003C0091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C0002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x10003C07F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C0002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x4003C07F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all requests hit in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C0004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x3F803C8FFF",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C0004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x10003C0122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
- "MSRValue": "0x3F803C0010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
+ "MSRValue": "0x4003C0122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
- "MSRValue": "0x3F803C0020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
+ "MSRValue": "0x10003C0004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
- "MSRValue": "0x3F803C0040",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
+ "MSRValue": "0x4003C0004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
- "MSRValue": "0x3F803C0080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
+ "MSRValue": "0x10003C0001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
- "MSRValue": "0x3F803C0100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
+ "MSRValue": "0x4003C0001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
- "MSRValue": "0x3F803C0200",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
+ "MSRValue": "0x10003C0002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C0091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x4003C0002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C0091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x3F803C0040",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C0122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x3F803C0010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs hit in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C0122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x3F803C0020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C0244",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x3F803C0200",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads hit in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "MSRValue": "0x04003C07F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
+ "MSRValue": "0x3F803C0080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs hit in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "MSRValue": "0x10003C07F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
+ "MSRValue": "0x3F803C0100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all requests hit in the L3",
- "MSRValue": "0x3F803C8FFF",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_HIT.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests hit in the L3",
+ "BriefDescription": "Split locks in SQ",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
index bc08cc1f2f7e..8fcc10f74ad9 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/floating-point.json
@@ -1,83 +1,83 @@
[
{
- "EventCode": "0xC1",
- "UMask": "0x8",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
- "Errata": "HSD56, HSM57",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC1",
- "UMask": "0x10",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
- "Errata": "HSD56, HSM57",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC6",
- "UMask": "0x7",
"BriefDescription": "Approximate counts of AVX & AVX2 256-bit instructions, including non-arithmetic instructions, loads, and stores. May count non-AVX instructions that employ 256-bit operations, including (but not necessarily limited to) rep string instructions that use 256-bit loads and stores for optimized performance, XSAVE* and XRSTOR*, and operations that transition the x87 FPU data registers between x87 and MMX.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xC6",
"EventName": "AVX_INSTS.ALL",
"PublicDescription": "Note that a whole rep string only counts AVX_INST.ALL once.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
"EventCode": "0xCA",
- "UMask": "0x2",
- "BriefDescription": "Number of X87 assists due to output value.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.X87_OUTPUT",
- "PublicDescription": "Number of X87 FP assists due to output values.",
+ "EventName": "FP_ASSIST.ANY",
+ "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1e"
},
{
+ "BriefDescription": "Number of SIMD FP assists due to input values",
"EventCode": "0xCA",
- "UMask": "0x4",
- "BriefDescription": "Number of X87 assists due to input value.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.X87_INPUT",
- "PublicDescription": "Number of X87 FP assists due to input values.",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "Number of SIMD FP assists due to input values.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xCA",
- "UMask": "0x8",
"BriefDescription": "Number of SIMD FP assists due to Output values",
- "Counter": "0,1,2,3",
+ "EventCode": "0xCA",
"EventName": "FP_ASSIST.SIMD_OUTPUT",
"PublicDescription": "Number of SIMD FP assists due to output values.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of X87 assists due to input value.",
"EventCode": "0xCA",
- "UMask": "0x10",
- "BriefDescription": "Number of SIMD FP assists due to input values",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "PublicDescription": "Number of SIMD FP assists due to input values.",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "PublicDescription": "Number of X87 FP assists due to input values.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of X87 assists due to output value.",
"EventCode": "0xCA",
- "UMask": "0x1e",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.ANY",
- "CounterMask": "1",
- "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "Number of X87 FP assists due to output values.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "PublicDescription": "Number of SIMD move elimination candidate uops that were eliminated.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "PublicDescription": "Number of SIMD move elimination candidate uops that were not eliminated.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "Errata": "HSD56, HSM57",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "Errata": "HSD56, HSM57",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
index a4d9f1fcf940..73d6d681dfa7 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/frontend.json
@@ -1,294 +1,246 @@
[
{
- "EventCode": "0x79",
- "UMask": "0x2",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.EMPTY",
- "Errata": "HSD135",
- "PublicDescription": "Counts cycles the IDQ is empty.",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
- "CounterMask": "1",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.IFDATA_STALL",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.IFETCH_STALL",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts Instruction Cache (ICACHE) misses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_UOPS",
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_CYCLES",
- "CounterMask": "1",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
"CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts cycles MITE is delivered at least one uop. Set Cmask = 1.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "CounterMask": "1",
"EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
+ "Errata": "HSD135",
"EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "EventName": "IDQ.EMPTY",
+ "PublicDescription": "Counts cycles the IDQ is empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "Number of uops delivered to IDQ from any path.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3c"
},
{
- "EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"CounterMask": "1",
- "PublicDescription": "Counts cycles MITE is delivered at least one uop. Set Cmask = 1.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
"EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
"PublicDescription": "This event counts cycles during which the microcode sequencer assisted the Front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
"CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_CYCLES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0x79",
- "UMask": "0x3c",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_ALL_UOPS",
- "PublicDescription": "Number of uops delivered to IDQ from any path.",
+ "EventName": "IDQ.MS_DSB_OCCUR",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x80",
- "UMask": "0x1",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.HIT",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x80",
- "UMask": "0x2",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes Uncacheable accesses.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.MISSES",
- "PublicDescription": "This event counts Instruction Cache (ICACHE) misses.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0x80",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.IFETCH_STALL",
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0x80",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction-cache miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE.IFDATA_STALL",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "This event counts uops delivered by the Front-end with the assistance of the microcode sequencer. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"Errata": "HSD135",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
"PublicDescription": "This event count the number of undelivered (unallocated) uops from the Front-end to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. The Front-end can allocate up to 4 uops per cycle so this event can increment 0-4 times per cycle depending on the number of unallocated uops. This event is counted on a per-core basis.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"CounterMask": "4",
"Errata": "HSD135",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"PublicDescription": "This event counts the number cycles during which the Front-end allocated exactly zero uops to the Resource Allocation Table (RAT) while the Back-end of the processor is not stalled. This event is counted on a per-core basis.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "Errata": "HSD135",
"EventCode": "0x9C",
- "UMask": "0x1",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"CounterMask": "3",
"Errata": "HSD135",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"CounterMask": "2",
"Errata": "HSD135",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "UMask": "0x1",
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"CounterMask": "1",
"Errata": "HSD135",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "Invert": "1",
"EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "CounterMask": "1",
- "Errata": "HSD135",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xAB",
- "UMask": "0x2",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "Counter": "0,1,2,3",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
index 311a005dc35b..11f152c346eb 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
@@ -1,340 +1,984 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fetch_BW;PGO",
- "MetricName": "IpTB"
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Branch instructions per taken branch. ",
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
- "MetricName": "BpTB"
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , IDQ.MITE_UOPS / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 16 * ( ICACHE.HIT + ICACHE.MISSES ) / 4.0 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(60 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "43 * (MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "10 * ARITH.DIVIDER_UOPS / tma_info_core_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(8 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(8 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "(200 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM + 60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.REQUEST_FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * min(CPU_CLK_UNHALTED.THREAD, IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE) / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "ICACHE.IFDATA_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
},
{
- "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpL"
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
},
{
- "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpS"
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;Instruction_Type",
- "MetricName": "IpB"
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
- "MetricName": "IpCall"
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "(UOPS_EXECUTED.CORE / 2 / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) if #SMT_on else UOPS_EXECUTED.CORE / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@))",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "( UOPS_EXECUTED.CORE / 2 / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@) ) if #SMT_on else UOPS_EXECUTED.CORE / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization"
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TLB_SMT",
- "MetricName": "Page_Walks_Utilization_SMT"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_lcp"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L1MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
},
{
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI_All"
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
},
{
- "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2HPKI_All"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L3MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182\\,thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
},
{
- "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "1000000000 * ( cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x35\\,umask\\=0x3\\,filter_opc\\=0x182@ ) / ( cbox_0@event\\=0x0@ / duration_time )",
- "MetricGroup": "Memory_Lat",
- "MetricName": "DRAM_Read_Latency"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182@ / cbox@event\\=0x36\\,umask\\=0x3\\,filter_opc\\=0x182\\,thresh\\=1@",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_Parallel_Reads"
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_core_clks",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "Socket actual clocks when any core is active on that socket",
- "MetricExpr": "cbox_0@event\\=0x0@",
- "MetricGroup": "",
- "MetricName": "Socket_CLKS"
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "MetricName": "tma_info_turbo_utilization"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(14 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.L3_HIT / (MEM_LOAD_UOPS_RETIRED.L3_HIT + 7 * MEM_LOAD_UOPS_RETIRED.L3_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.L3_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_local_dram",
+ "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Sample with: UOPS_DISPATCHED_PORT.PORT_4. Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_7 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_store_op_utilization_group",
+ "MetricName": "tma_port_7",
+ "MetricThreshold": "tma_port_7 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address). Sample with: UOPS_DISPATCHED_PORT.PORT_7",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@)) / 2 - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB if #SMT_on else min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - (cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING)) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(200 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) + 180 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD)))) / tma_info_clks",
+ "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
+ "MetricName": "tma_remote_cache",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "310 * (MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.L3_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_L3_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_remote_dram",
+ "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "INST_RETIRED.X87 * tma_info_uoppi / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/memory.json b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
index a42d5ce86b6f..2d212cf59e92 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/memory.json
@@ -1,767 +1,577 @@
[
{
- "EventCode": "0x05",
- "UMask": "0x1",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
- "Counter": "0,1,2,3",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x05",
- "UMask": "0x2",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
- "Counter": "0,1,2,3",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "PublicDescription": "Speculative cache-line split store-address uops dispatched to L1D.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x1",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x2",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional writes.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x4",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x8",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x10",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x20",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x40",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x4",
- "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x8",
- "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x10",
- "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "PublicDescription": "This event counts the number of memory ordering machine clears detected. Memory ordering machine clears can result from memory address aliasing or snoops from another hardware thread or core to data inflight in the pipeline. Machine clears can have a significant performance impact if they are happening frequently.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC8",
- "UMask": "0x1",
- "BriefDescription": "Number of times an HLE execution started.",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.START",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xc8",
- "UMask": "0x2",
- "BriefDescription": "Number of times an HLE execution successfully committed.",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xc8",
- "UMask": "0x4",
"BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
- "PEBS": "1",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xc8",
- "UMask": "0x8",
"BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xc8",
- "UMask": "0x10",
"BriefDescription": "Number of times an HLE execution aborted due to uncommon conditions.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xc8",
- "UMask": "0x20",
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xc8",
- "UMask": "0x40",
"BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MISC4",
"Errata": "HSD65",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MISC4",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xc8",
- "UMask": "0x80",
"BriefDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts)",
- "Counter": "0,1,2,3",
+ "EventCode": "0xc8",
"EventName": "HLE_RETIRED.ABORTED_MISC5",
"PublicDescription": "Number of times an HLE execution aborted due to none of the previous 4 categories (e.g. interrupts).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x1",
- "BriefDescription": "Number of times an RTM execution started.",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.START",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "EventCode": "0xc9",
- "UMask": "0x2",
- "BriefDescription": "Number of times an RTM execution successfully committed.",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.COMMIT",
+ "BriefDescription": "Number of times an HLE execution successfully committed.",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.COMMIT",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xc9",
- "UMask": "0x4",
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED",
+ "BriefDescription": "Number of times an HLE execution started.",
+ "EventCode": "0xC8",
+ "EventName": "HLE_RETIRED.START",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x8",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC1",
- "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering machine clears detected. Memory ordering machine clears can result from memory address aliasing or snoops from another hardware thread or core to data inflight in the pipeline. Machine clears can have a significant performance impact if they are happening frequently.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xc9",
- "UMask": "0x10",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Randomly selected loads with latency value being above 128.",
+ "Data_LA": "1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x20",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC3",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Randomly selected loads with latency value being above 16.",
+ "Data_LA": "1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x40",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC4",
- "Errata": "HSD65",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Randomly selected loads with latency value being above 256.",
+ "Data_LA": "1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
},
{
- "EventCode": "0xc9",
- "UMask": "0x80",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MISC5",
- "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Randomly selected loads with latency value being above 32.",
+ "Data_LA": "1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
"BriefDescription": "Randomly selected loads with latency value being above 4.",
- "PEBS": "2",
- "MSRValue": "0x4",
- "Counter": "3",
+ "Data_LA": "1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "Errata": "HSD76, HSD25, HSM26",
- "TakenAlone": "1",
+ "MSRValue": "0x4",
+ "PEBS": "2",
"SampleAfterValue": "100003",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 8.",
- "PEBS": "2",
- "MSRValue": "0x8",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
+ "BriefDescription": "Randomly selected loads with latency value being above 512.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
- "TakenAlone": "1",
- "SampleAfterValue": "50021",
- "CounterHTOff": "3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 16.",
- "PEBS": "2",
- "MSRValue": "0x10",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "Errata": "HSD76, HSD25, HSM26",
- "TakenAlone": "1",
- "SampleAfterValue": "20011",
- "CounterHTOff": "3"
- },
- {
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 32.",
+ "MSRValue": "0x200",
"PEBS": "2",
- "MSRValue": "0x20",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
- "MSRIndex": "0x3F6",
- "Errata": "HSD76, HSD25, HSM26",
- "TakenAlone": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "3"
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
"BriefDescription": "Randomly selected loads with latency value being above 64.",
- "PEBS": "2",
- "MSRValue": "0x40",
- "Counter": "3",
+ "Data_LA": "1",
+ "Errata": "HSD76, HSD25, HSM26",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "Errata": "HSD76, HSD25, HSM26",
- "TakenAlone": "1",
+ "MSRValue": "0x40",
+ "PEBS": "2",
"SampleAfterValue": "2003",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 128.",
- "PEBS": "2",
- "MSRValue": "0x80",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
- "MSRIndex": "0x3F6",
+ "BriefDescription": "Randomly selected loads with latency value being above 8.",
+ "Data_LA": "1",
"Errata": "HSD76, HSD25, HSM26",
- "TakenAlone": "1",
- "SampleAfterValue": "1009",
- "CounterHTOff": "3"
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 256.",
- "PEBS": "2",
- "MSRValue": "0x100",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
- "MSRIndex": "0x3F6",
- "Errata": "HSD76, HSD25, HSM26",
- "TakenAlone": "1",
- "SampleAfterValue": "503",
- "CounterHTOff": "3"
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Randomly selected loads with latency value being above 512.",
- "PEBS": "2",
- "MSRValue": "0x200",
- "Counter": "3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
- "MSRIndex": "0x3F6",
- "Errata": "HSD76, HSD25, HSM26",
- "TakenAlone": "1",
- "SampleAfterValue": "101",
- "CounterHTOff": "3"
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "Speculative cache-line split store-address uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads miss in the L3",
- "MSRValue": "0x3FBFC00001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads miss in the L3",
+ "MSRValue": "0x3FBFC00244",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
- "MSRValue": "0x0600400001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x600400244",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
- "MSRValue": "0x3FBFC00002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) miss in the L3",
+ "MSRValue": "0x3FBFC00091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
- "MSRValue": "0x0600400002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x600400091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103FC00002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x63F800091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads miss in the L3",
- "MSRValue": "0x3FBFC00004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads miss in the L3",
+ "MSRValue": "0x103FC00091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
- "MSRValue": "0x0600400004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x83FC00091",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
- "MSRValue": "0x3FBFC00010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
+ "MSRValue": "0x3FBFC007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
- "MSRValue": "0x3FBFC00020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
+ "MSRValue": "0x6004007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
- "MSRValue": "0x3FBFC00040",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
+ "MSRValue": "0x63F8007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
- "MSRValue": "0x3FBFC00080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
+ "MSRValue": "0x103FC007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
- "MSRValue": "0x3FBFC00100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
+ "MSRValue": "0x83FC007F7",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all requests miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
- "MSRValue": "0x3FBFC00200",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
+ "MSRValue": "0x3FBFC08FFF",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss in the L3",
- "MSRValue": "0x3FBFC00091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss in the L3",
+ "MSRValue": "0x3FBFC00122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
- "MSRValue": "0x0600400091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x600400122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063F800091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x3FBFC00004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103FC00091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x600400004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x083FC00091",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch data reads miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x3FBFC00001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs miss in the L3",
- "MSRValue": "0x3FBFC00122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs miss in the L3",
+ "MSRValue": "0x600400001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
- "MSRValue": "0x0600400122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch RFOs miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x3FBFC00002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the data is returned from local dram",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads miss in the L3",
- "MSRValue": "0x3FBFC00244",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads miss in the L3",
+ "MSRValue": "0x600400002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) miss the L3 and the modified data is transferred from remote cache",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
- "MSRValue": "0x0600400244",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all demand & prefetch code reads miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x103FC00002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
- "MSRValue": "0x3FBFC007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss in the L3",
+ "MSRValue": "0x3FBFC00040",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
- "MSRValue": "0x06004007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from local dram",
+ "MSRValue": "0x3FBFC00010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
- "MSRValue": "0x063F8007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the data is returned from remote dram",
+ "MSRValue": "0x3FBFC00020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
- "MSRValue": "0x103FC007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and the modified data is transferred from remote cache",
+ "MSRValue": "0x3FBFC00200",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
- "MSRValue": "0x083FC007F7",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all data/code/rfo reads (demand & prefetch) miss the L3 and clean or shared data is transferred from remote cache",
+ "MSRValue": "0x3FBFC00080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs miss in the L3",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all requests miss in the L3",
- "MSRValue": "0x3FBFC08FFF",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_REQUESTS.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
- "PublicDescription": "Counts all requests miss in the L3",
+ "MSRValue": "0x3FBFC00100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC1",
+ "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "Errata": "HSD65",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC4",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MISC5",
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.START",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC4",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/other.json b/tools/perf/pmu-events/arch/x86/haswellx/other.json
index 800e65df31bc..2395ebf112db 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/other.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/other.json
@@ -1,43 +1,35 @@
[
{
- "EventCode": "0x5C",
- "UMask": "0x1",
"BriefDescription": "Unhalted core cycles when the thread is in ring 0",
- "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0",
"PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "CounterMask": "1",
"EdgeDetect": "1",
"EventCode": "0x5C",
- "UMask": "0x1",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
- "Counter": "0,1,2,3",
"EventName": "CPL_CYCLES.RING0_TRANS",
- "CounterMask": "1",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5C",
- "UMask": "0x2",
"BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "Counter": "0,1,2,3",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING123",
"PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x63",
- "UMask": "0x1",
"BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
- "Counter": "0,1,2,3",
+ "EventCode": "0x63",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
index 26f2888341ee..540f4372623c 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/pipeline.json
@@ -1,1340 +1,1050 @@
[
{
- "UMask": "0x1",
- "BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 0",
- "EventName": "INST_RETIRED.ANY",
- "Errata": "HSD140, HSD143",
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 0"
- },
- {
- "UMask": "0x2",
- "BriefDescription": "Core cycles when the thread is not in halt state.",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "UMask": "0x3",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "EventCode": "0x03",
- "UMask": "0x2",
- "BriefDescription": "loads blocked by overlapping with store buffer that cannot be forwarded",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x03",
- "UMask": "0x8",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.NO_SR",
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x07",
- "UMask": "0x1",
- "BriefDescription": "False dependencies in MOB due to partial compare on address.",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline which can have a performance impact.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "This event counts the number of cycles spent waiting for a recovery after an event such as a processor nuke, JEClear, assist, hle/rtm abort etc.",
+ "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_UOPS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x0D",
- "UMask": "0x3",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.ANY",
- "PublicDescription": "This event counts the number of uops issued by the Front-end of the pipeline to the Back-end. This event is counted at the allocation stage and will count both retired and non-retired uops.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd0"
},
{
- "EventCode": "0x0E",
- "UMask": "0x10",
- "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.FLAGS_MERGE",
- "PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "EventCode": "0x0E",
- "UMask": "0x20",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (for example, 2 sources + immediate) regardless of whether it is a result of LEA instruction or not.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
},
{
- "EventCode": "0x0E",
- "UMask": "0x40",
- "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.SINGLE_MUL",
- "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken macro-conditional branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
},
{
- "EventCode": "0x14",
- "UMask": "0x2",
- "BriefDescription": "Any uop executed by the Divider. (This includes all divide uops, sqrt, ...)",
- "Counter": "0,1,2,3",
- "EventName": "ARITH.DIVIDER_UOPS",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
},
{
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
},
{
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x90"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0x84"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect calls.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x88"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Branch instructions at retirement.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x3c",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x4c",
- "UMask": "0x1",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
- "Counter": "0,1,2,3",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PublicDescription": "Number of far branches retired.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0x4c",
- "UMask": "0x2",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
- "Counter": "0,1,2,3",
- "EventName": "LOAD_HIT_PRE.HW_PF",
- "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "UMask": "0x1",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
- "PublicDescription": "Number of integer move elimination candidate uops that were eliminated.",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x58",
- "UMask": "0x2",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
- "PublicDescription": "Number of SIMD move elimination candidate uops that were eliminated.",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x58",
- "UMask": "0x4",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
- "PublicDescription": "Number of integer move elimination candidate uops that were not eliminated.",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of near return instructions retired.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x58",
- "UMask": "0x8",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
- "Counter": "0,1,2,3",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
- "PublicDescription": "Number of SIMD move elimination candidate uops that were not eliminated.",
- "SampleAfterValue": "1000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Number of near taken branches retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "PublicDescription": "This event counts cycles when the Reservation Station ( RS ) is empty for the thread. The RS is a structure that buffers allocated micro-ops from the Front-end. If there are many cycles when the RS is empty, it may represent an underflow of instructions delivered from the Front-end.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "EventCode": "0x87",
- "UMask": "0x1",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "Counter": "0,1,2,3",
- "EventName": "ILD_STALL.LCP",
- "PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "EventCode": "0x87",
- "UMask": "0x4",
- "BriefDescription": "Stall cycles because IQ is full",
- "Counter": "0,1,2,3",
- "EventName": "ILD_STALL.IQ_FULL",
- "PublicDescription": "Stall cycles due to IQ is full.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "EventCode": "0x88",
- "UMask": "0x41",
- "BriefDescription": "Not taken macro-conditional branches.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "BriefDescription": "Speculative mispredicted indirect branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
- "EventCode": "0x88",
- "UMask": "0x81",
- "BriefDescription": "Taken speculative and retired macro-conditional branches.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0x88",
- "UMask": "0x82",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "EventCode": "0x88",
- "UMask": "0x84",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "EventCode": "0x88",
- "UMask": "0x88",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa0"
},
{
- "EventCode": "0x88",
- "UMask": "0x90",
- "BriefDescription": "Taken speculative and retired direct near calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "EventCode": "0x88",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x88",
- "UMask": "0xc1",
- "BriefDescription": "Speculative and retired macro-conditional branches.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This event counts all mispredicted branch instructions retired. This is a precise event.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x88",
- "UMask": "0xc2",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "UMask": "0xc4",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0x88",
- "UMask": "0xc8",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x88",
- "UMask": "0xd0",
- "BriefDescription": "Speculative and retired direct near calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "UMask": "0xff",
- "BriefDescription": "Speculative and retired branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "PublicDescription": "Counts all near executed branches (not necessarily retired).",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "UMask": "0x41",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
- "UMask": "0x81",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "EventCode": "0x89",
- "UMask": "0x84",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "UMask": "0x88",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "PublicDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "UMask": "0xa0",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
- "UMask": "0xc1",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
- "UMask": "0xc4",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0x89",
- "UMask": "0xff",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "PublicDescription": "Counts all near executed branches (not necessarily retired).",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0",
- "PublicDescription": "Cycles which a uop is dispatched on port 0 in this thread.",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per core when uops are executed in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
- "AnyThread": "1",
- "PublicDescription": "Cycles per core when uops are exectuted in port 0.",
+ "BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "CounterMask": "1",
+ "Errata": "HSD78, HSM63, HSM80",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "PublicDescription": "Cycles with pending L2 miss loads. Set Cmask=2 to count cycle.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "BriefDescription": "Cycles with pending memory loads.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "PublicDescription": "Cycles with pending memory loads. Set Cmask=2 to count cycle.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1",
- "PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per core when uops are executed in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
- "AnyThread": "1",
- "PublicDescription": "Cycles per core when uops are exectuted in port 1.",
+ "BriefDescription": "Execution stalls due to L1 data cache misses",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "BriefDescription": "Execution stalls due to L2 cache misses.",
+ "CounterMask": "5",
+ "Errata": "HSM63, HSM80",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "PublicDescription": "Number of loads missed L2.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2",
- "PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
+ "BriefDescription": "Execution stalls due to memory subsystem.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline and there were memory instructions pending (waiting for data).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Stall cycles because IQ is full",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "PublicDescription": "Stall cycles due to IQ is full.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "This event counts cycles where the decoder is stalled on an instruction with a length changing prefix (LCP).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3",
- "PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
+ "BriefDescription": "Instructions retired from execution.",
+ "Errata": "HSD140, HSD143",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. INST_RETIRED.ANY is counted by a designated fixed counter, leaving the programmable counters available for other events. Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
- "AnyThread": "1",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Errata": "HSD11, HSD140",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Number of instructions at retirement.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Errata": "HSD140",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.X87",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4",
- "PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "This event counts the number of cycles spent waiting for a recovery after an event such as a processor nuke, JEClear, assist, hle/rtm abort etc.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per core when uops are executed in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
"AnyThread": "1",
- "PublicDescription": "Cycles per core when uops are exectuted in port 4.",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke)",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "PublicDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5",
- "PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "loads blocked by overlapping with store buffer that cannot be forwarded",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. The penalty for blocked store forwarding is that the load must wait for the store to write its value to the cache before it can be issued.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per core when uops are executed in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
- "AnyThread": "1",
- "PublicDescription": "Cycles per core when uops are exectuted in port 5.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline which can have a performance impact.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6",
- "PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per core when uops are executed in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
- "AnyThread": "1",
- "PublicDescription": "Cycles per core when uops are exectuted in port 6.",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7",
- "PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of uops delivered by the LSD.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "PublicDescription": "Number of integer move elimination candidate uops that were eliminated.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "PublicDescription": "Number of integer move elimination candidate uops that were not eliminated.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xA2",
- "UMask": "0x1",
"BriefDescription": "Resource-related stall cycles",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.ANY",
"Errata": "HSD135",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ANY",
"PublicDescription": "Cycles allocation is stalled due to resource related reason.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
"EventCode": "0xA2",
- "UMask": "0x4",
+ "EventName": "RESOURCE_STALLS.ROB",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
"BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA2",
- "UMask": "0x8",
"BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "Counter": "0,1,2,3",
+ "EventCode": "0xA2",
"EventName": "RESOURCE_STALLS.SB",
"PublicDescription": "This event counts cycles during which no instructions were allocated because no Store Buffers (SB) were available.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA2",
- "UMask": "0x10",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.ROB",
+ "BriefDescription": "Count cases of saving new LBR",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles with pending L2 cache miss loads.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
- "CounterMask": "1",
- "Errata": "HSD78",
- "PublicDescription": "Cycles with pending L2 miss loads. Set Cmask=2 to count cycle.",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "This event counts cycles when the Reservation Station ( RS ) is empty for the thread. The RS is a structure that buffers allocated micro-ops from the Front-end. If there are many cycles when the RS is empty, it may represent an underflow of instructions delivered from the Front-end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x2",
- "BriefDescription": "Cycles with pending memory loads.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
- "CounterMask": "2",
- "PublicDescription": "Cycles with pending memory loads. Set Cmask=2 to count cycle.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
- "CounterMask": "4",
- "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls due to L2 cache misses.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
- "CounterMask": "5",
- "PublicDescription": "Number of loads missed L2.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA3",
- "UMask": "0x6",
- "BriefDescription": "Execution stalls due to memory subsystem.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
- "CounterMask": "6",
- "PublicDescription": "This event counts cycles during which no instructions were executed in the execution stage of the pipeline and there were memory instructions pending (waiting for data).",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "CounterMask": "8",
- "PublicDescription": "Cycles with pending L1 data cache miss loads. Set Cmask=8 to count cycle.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls due to L1 data cache misses",
- "Counter": "2",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
- "CounterMask": "12",
- "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "CounterHTOff": "2"
+ "UMask": "0x10"
},
{
- "EventCode": "0xa8",
- "UMask": "0x1",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.UOPS",
- "PublicDescription": "Number of uops delivered by the LSD.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
- "CounterMask": "1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
- "CounterMask": "4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "Invert": "1",
+ "BriefDescription": "Number of uops executed on the core.",
+ "Errata": "HSD30, HSM31",
"EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "CounterMask": "1",
- "Errata": "HSD144, HSD30, HSM31",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"CounterMask": "1",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
+ "Errata": "HSD30, HSM31",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
"CounterMask": "2",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
+ "Errata": "HSD30, HSM31",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
"CounterMask": "3",
- "Errata": "HSD144, HSD30, HSM31",
- "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
+ "Errata": "HSD30, HSM31",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
"CounterMask": "4",
- "Errata": "HSD144, HSD30, HSM31",
+ "Errata": "HSD30, HSM31",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Number of uops executed on the core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE",
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"Errata": "HSD30, HSM31",
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
"CounterMask": "1",
- "Errata": "HSD30, HSM31",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "PublicDescription": "This events counts the cycles where at least one uop was executed. It is counted per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
"CounterMask": "2",
- "Errata": "HSD30, HSM31",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "PublicDescription": "This events counts the cycles where at least two uop were executed. It is counted per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
"CounterMask": "3",
- "Errata": "HSD30, HSM31",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "PublicDescription": "This events counts the cycles where at least three uop were executed. It is counted per thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread.",
"CounterMask": "4",
- "Errata": "HSD30, HSM31",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "Errata": "HSD144, HSD30, HSM31",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
"Invert": "1",
- "EventCode": "0xb1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "Errata": "HSD30, HSM31",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "HSD11, HSD140",
- "PublicDescription": "Number of instructions at retirement.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0",
+ "PublicDescription": "Cycles which a uop is dispatched on port 0 in this thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC0",
- "UMask": "0x1",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "PEBS": "2",
- "Counter": "1",
- "EventName": "INST_RETIRED.PREC_DIST",
- "Errata": "HSD140",
- "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC0",
- "UMask": "0x2",
- "BriefDescription": "FP operations retired. X87 FP operations that have no exceptions: Counts also flows that have several X87 or flows that use X87 uops in the exception handling.",
- "Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.X87",
- "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts FP operations retired. For X87 FP operations that have no exceptions counting also includes flows that have several X87, or flows that use X87 uops in the exception handling.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1",
+ "PublicDescription": "Cycles which a uop is dispatched on port 1 in this thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC1",
- "UMask": "0x40",
- "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
- "PublicDescription": "Number of microcode assists invoked by HW upon uop writeback.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_1_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Actually retired uops.",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.ALL",
- "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2",
+ "PublicDescription": "Cycles which a uop is dispatched on port 2 in this thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Cycles without actually retired uops.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
- "CounterMask": "1",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "CounterMask": "10",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3",
+ "PublicDescription": "Cycles which a uop is dispatched on port 3 in this thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x1",
- "BriefDescription": "Cycles without actually retired uops.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
"AnyThread": "1",
- "CounterMask": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4",
+ "PublicDescription": "Cycles which a uop is dispatched on port 4 in this thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Cycles there was a Nuke. Account for both thread-specific and All Thread Nukes.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.CYCLES",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xC3",
- "UMask": "0x4",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.SMC",
- "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5",
+ "PublicDescription": "Cycles which a uop is dispatched on port 5 in this thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xC3",
- "UMask": "0x20",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_5_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Branch instructions at retirement.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6",
+ "PublicDescription": "Cycles which a uop is dispatched on port 6 in this thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xC4",
- "UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "PublicDescription": "Counts the number of conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are executed in port 6.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_6_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xC4",
- "UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7",
+ "PublicDescription": "Cycles which a uop is dispatched on port 7 in this thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xC4",
- "UMask": "0x2",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 7.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_EXECUTED_PORT.PORT_7_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired.",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of uops issued by the Front-end of the pipeline to the Back-end. This event is counted at the allocation stage and will count both retired and non-retired uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "PublicDescription": "Counts the number of near return instructions retired.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x10",
- "BriefDescription": "Not taken branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "PublicDescription": "Counts the number of not taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of flags-merge uops being allocated. Such uops considered perf sensitive; added by GSR u-arch.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops allocated. Such uops add delay.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC4",
- "UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near taken branches retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0xC4",
- "UMask": "0x40",
- "BriefDescription": "Far branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "PublicDescription": "Number of far branches retired.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (for example, 2 sources + immediate) regardless of whether it is a result of LEA instruction or not.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xC5",
- "UMask": "0x0",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "BriefDescription": "Actually retired uops.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "Counts the number of micro-ops retired. Use Cmask=1 and invert to count active cycles or stalled cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired.",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "PublicDescription": "This event counts all mispredicted branch instructions retired. This is a precise event.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "UMask": "0x20",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were taken but mispredicted.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 uops or 4 instructions could retire each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Count cases of saving new LBR",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xe6",
- "UMask": "0x1f",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "Counter": "0,1,2,3",
- "EventName": "BACLEARS.ANY",
- "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "16",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
index 58ed6d33d1f4..9227cc226002 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-cache.json
@@ -1,206 +1,1343 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
- "Counter": "0,1,2,3",
- "EventName": "UNC_C_CLOCKTICKS",
- "PerPkg": "1",
- "Unit": "CBO"
- },
- {
- "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
- "Counter": "0,1,2,3",
- "EventCode": "0x34",
- "EventName": "UNC_C_LLC_LOOKUP.ANY",
- "Filter": "filter_state=0x1",
+ "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
+ "Filter": "filter_opc=0x191",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
- "UMask": "0x11",
- "Unit": "CBO"
+ "UMask": "0x3",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "M line evictions from LLC (writebacks to memory)",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+ "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
+ "Filter": "filter_opc=0x192",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "UMask": "0x3",
+ "Unit": "CBOX"
},
{
"BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.DATA_READ",
"Filter": "filter_opc=0x182",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
- },
- {
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "filter_opc=0x187",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
"BriefDescription": "MMIO reads. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_READ",
"Filter": "filter_opc=0x187,filter_nc=1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
"BriefDescription": "MMIO writes. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_MISSES.MMIO_WRITE",
"Filter": "filter_opc=0x18f,filter_nc=1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
- "Filter": "filter_opc=0x190",
+ "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
+ "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x191",
+ "EventName": "LLC_MISSES.PCIE_READ",
+ "Filter": "filter_opc=0x19e",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
- "Filter": "filter_opc=0x192",
+ "EventName": "LLC_MISSES.PCIE_WRITE",
+ "Filter": "filter_opc=0x1c8",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
+ "Filter": "filter_opc=0x190",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "ItoM write misses (as part of fast string memcpy stores) + PCIe full line writes. Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_c_tor_inserts.miss_opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "LLC_MISSES.UNCACHEABLE",
+ "Filter": "filter_opc=0x187",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe write misses (full cache line). Derived from unc_c_tor_inserts.miss_opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
+ "Filter": "filter_opc=0x181",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "UMask": "0x1",
+ "Unit": "CBOX"
},
{
"BriefDescription": "PCIe writes (partial cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
"Filter": "filter_opc=0x180,filter_tid=0x3e",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "L2 demand and L2 prefetch code references to LLC. Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x181",
+ "EventName": "LLC_REFERENCES.PCIE_READ",
+ "Filter": "filter_opc=0x19e",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"ScaleUnit": "64Bytes",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.PCIE_WRITE",
+ "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x1",
+ "Unit": "CBOX"
},
{
"BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_FULL",
"Filter": "filter_opc=0x18c",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"ScaleUnit": "64Bytes",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
"BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
"EventCode": "0x35",
"EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
"Filter": "filter_opc=0x18d",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"ScaleUnit": "64Bytes",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "BriefDescription": "Bounce Control",
+ "EventCode": "0xA",
+ "EventName": "UNC_C_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "EventCode": "0x1F",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "EventCode": "0x9",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local distress or incoming distress signals are asserted. Incoming distress includes both up and dn.",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.ANY",
+ "Filter": "filter_state=0x1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x11",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "UMask": "0x9",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x5",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.I_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "M line evictions from LLC (writebacks to memory)",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"ScaleUnit": "64Bytes",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe write references (full cache line). Derived from unc_c_tor_inserts.opcode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; DRd hitting non-M with raw CV=0",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Clean Victim with raw CV=0",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.CVZERO_PREFETCH_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Number of times that an RFO hit in S state. This is useful for determining if it might be good for a workload to use RspIWB instead of RspSWB.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE0",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 0",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE1",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 2",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE2",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 2",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 3",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.AGE3",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 3",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.LRU_DECREMENT",
+ "PerPkg": "1",
+ "PublicDescription": "How often all LRU bits were decremented by 1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "EventCode": "0x3C",
+ "EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
+ "PerPkg": "1",
+ "PublicDescription": "How often we picked a victim that had a non-zero age",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; All",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; All",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AD",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; AK",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; BL",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in HSX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in HSX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in HSX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters for Down polarity",
+ "UMask": "0xcc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_C_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in HSX Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "UNC_C_RING_SINK_STARVED.AD",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "UNC_C_RING_SINK_STARVED.AK",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "UNC_C_RING_SINK_STARVED.BL",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "UNC_C_RING_SINK_STARVED.IV",
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of cycles the Cbo is actively throttling traffic onto the Ring in order to limit bounce traffic.",
+ "EventCode": "0x7",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IPQ is externally startved and therefore we are blocking the IRQ.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IRQ is externally starved and therefore we are blocking the IPQ.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; Number of times that the ISMQ Bid.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; PRQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IPQ in Internal Starvation.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IRQ in Internal Starvation.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the ISMQ in Internal Starvation.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; PRQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from an address conflicts. Address conflicts out of the IPQ should be rare. They will generally only occur if two different sockets are sending requests to the same address at the same time. This is a true conflict case, unlike the IPQ Address Conflict which is commonly caused by prefetching characteristics.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject. TOR rejects from the IPQ can be caused by the Egress being full or Address Conflicts.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from the Egress being full. IPQ requests make use of the AD Egress for regular responses, the BL egress to forward data, and the AK egress to return credits.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No AD Sbo Credits",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Target Node Filter",
+ "EventCode": "0x28",
+ "EventName": "UNC_C_RxR_IPQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request from the IPQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because of an address match in the TOR. In order to maintain coherency, requests to the same address are not allowed to pass each other up in the Cbo. Therefore, if there is an outstanding request to a given address, one cannot issue another request to that address until it is complete. This comes up most commonly with prefetches. Outstanding prefetches occasionally will not complete their memory fetch and a demand request to the same address will then sit in the IRQ and get retried until the prefetch fills the data into the LLC. Therefore, it will not be uncommon to see this case in high bandwidth streaming workloads when the LLC Prefetcher in the core is enabled.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of IRQ retries that occur. Requests from the IRQ are retried if they are rejected from the TOR pipeline for a variety of reasons. Some of the most common reasons include if the Egress is full, there are no RTIDs, or there is a Physical Address match to another outstanding request.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because it failed to acquire an entry in the Egress. The egress is the buffer that queues up for allocating onto the ring. IRQ requests can make use of all four rings and all four Egresses. If any of the queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of requests rejects because of lack of QPI Ingress credits. These credits are required in order to send transactions to the QPI agent. Please see the QPI_IGR_CREDITS events for more information.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that requests from the IRQ were retried because there were no RTIDs available. RTIDs are required after a request misses the LLC and needs to send snoops and/or requests to memory. If there are no RTIDs available, requests will queue up in the IRQ and retry until one becomes available. Note that there are multiple RTID pools for the different sockets. There may be cases where the local RTIDs are all used, but requests destined for remote memory can still acquire an RTID because there are remote RTIDs available. This event does not provide any filtering for this case.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No AD Sbo Credits",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No BL Sbo Credits",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried because of it lacked credits to send an BL packet to the Sbo.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Target Node Filter",
+ "EventCode": "0x29",
+ "EventName": "UNC_C_RxR_IRQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IPQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the total number of times that a request from the ISMQ retried because of a TOR reject. ISMQ requests generally will not need to retry (or at least ISMQ retries are less common than IRQ retries). ISMQ requests will retry if they are not able to acquire a needed Egress credit to get onto the ring, or for cache evictions that need to acquire an RTID. Most ISMQ requests already have an RTID, so eviction retries will be less common here.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by a lack of Egress credits. The egress is the buffer that queues up for allocating onto the ring. If any of the Egress queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by no RTIDs. M-state cache evictions are serviced through the ISMQ, and must acquire an RTID in order to write back to memory. If no RTIDs are available, they will be retried.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Qualify one of the other subevents by a given RTID destination NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER1.nid.",
+ "UMask": "0x80",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No AD Sbo Credits",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.AD_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried because of it lacked credits to send an AD packet to the Sbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; No BL Sbo Credits",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.BL_SBO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried because of it lacked credits to send an BL packet to the Sbo.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Request Queue Rejects; Target Node Filter",
+ "EventCode": "0x2A",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY2.TARGET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the ISMQ was retried filtered by the Target NodeID as specified in the Cbox's Filter register.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; PRQ Rejects",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For AD Ring",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Acquired; For BL Ring",
+ "EventCode": "0x3D",
+ "EventName": "UNC_C_SBO_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits acquired in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For AD Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "SBo Credits Occupancy; For BL Ring",
+ "EventCode": "0x3E",
+ "EventName": "UNC_C_SBO_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo credits in use in a given cycle, per ring. Each Cbo is assigned an Sbo it can communicate with.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x1c8,filter_tid=0x3e",
+ "EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Eviction transactions inserted into the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Local Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisfied by an opcode, inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Local Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x2a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisfied by an opcode, inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x8a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisfied by an opcode, inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched (matches an RTID destination) transactions inserted into the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched eviction transactions inserted into the TOR.",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched miss requests that were inserted into the TOR.",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched write transactions inserted into the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Opcode Match",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisfied by an opcode, inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Writebacks",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Write transactions inserted into the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All valid TOR entries. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding eviction transactions in the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBOX"
},
{
"BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode",
@@ -208,109 +1345,2212 @@
"EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
"Filter": "filter_opc=0x182",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries for miss transactions that match an opcode. This generally means that the request was sent to memory or MMIO.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisfied by an opcode, in the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss All",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding miss requests in the TOR. 'Miss' means the allocation requires an RTID. This generally means that the request was sent to memory or MMIO.",
+ "UMask": "0xa",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x2a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisfied by an opcode, in the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries for miss transactions that match an opcode. This generally means that the request was sent to memory or MMIO.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x8a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisfied by an opcode, in the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of NID matched outstanding requests in the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid.In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "read requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding NID matched eviction transactions in the TOR .",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID.",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); NID matched write transactions int the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc).",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisfied by an opcode, in the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Write transactions in the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto AD Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto AK Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto BL Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AK ring. This is commonly used for snoop responses coming from the core and destined for a Cachebo.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the core AD egress spent in starvation",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both AK egresses spent in starvation",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.BL_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both BL egresses spent in starvation",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the cachebo IV egress spent in starvation",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming snoop hazard",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the bypass.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Direct2Core messages sent",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles in which Direct2Core was disabled",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Reads where Direct2Core overridden",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lat Opt Return",
+ "EventCode": "0x41",
+ "EventName": "UNC_H_DIRECTORY_LAT_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory returned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that did not have to send any snoops because the directory bit was clear.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "EventCode": "0xC",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that had to send one or more snoops because the directory bit was set.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory clears. This occurs when snoops were sent and all returned with RspI.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory sets. This occurs when a remote read transaction requests memory, bringing it to a remote cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is AckCnfltWbI",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; All Requests",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Allocations",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; HOM Requests",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.HOM",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Invalidations",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is RsSFwd or RspSFwdWb",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE or WbMtoS",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI",
+ "EventCode": "0x71",
+ "EventName": "UNC_H_HITME_HIT.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is AckCnfltWbI",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; All Requests",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; HOM Requests",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.HOM",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is RsSFwd or RspSFwdWb",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoE or WbMtoS",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Accumulates Number of PV bits set on HitMe Cache Hits; op is WbMtoI",
+ "EventCode": "0x72",
+ "EventName": "UNC_H_HITME_HIT_PV_BITS_SET.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is AckCnfltWbI",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ACKCNFLTWBI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; All Requests",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Allocations",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.ALLOCS",
+ "PerPkg": "1",
+ "UMask": "0x70",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; HOM Requests",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.HOM",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; Invalidations",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.INVALS",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdInvOwn, RdCur or InvItoE",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.READ_OR_INVITOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspI, RspIWb, RspS, RspSWb, RspCnflt or RspCnfltWbI",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSP",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDI_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RsSFwd or RspSFwdWb",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.RSPFWDS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE or WbMtoS",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOE_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoI",
+ "EventCode": "0x70",
+ "EventName": "UNC_H_HITME_LOOKUP.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "EventCode": "0x1E",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "EventCode": "0x1A",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x64",
+ "EventName": "UNC_H_IOT_CTS_EAST_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0x65",
+ "EventName": "UNC_H_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IOT_CTS_WEST_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Cancelled",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.CANCELLED",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.; OSB Snoop broadcast cancelled due to D2C or Other. OSB cancel is counted when OSB local read is not allowed even when the transaction in local InItoE. It also counts D2C OSB cancel, but also includes the cases were D2C was not set in the first place for the transaction coming from the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Reads Local - Useful",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL_USEFUL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote - Useful",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE_USEFUL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; All",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from the local socket.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming ead requests. This is a good proxy for LLC Read Misses (including RFOs).",
"UMask": "0x3",
"Unit": "HA"
},
{
- "BriefDescription": "read requests to local home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Local Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_LOCAL",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the local socket. This is a good proxy for LLC Read Misses (including RFOs) from the local socket.",
"UMask": "0x1",
"Unit": "HA"
},
{
- "BriefDescription": "read requests to remote home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Remote Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS_REMOTE",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the remote socket. This is a good proxy for LLC Read Misses (including RFOs) from the remote socket.",
"UMask": "0x2",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
- "UMask": "0xC",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming write requests.",
+ "UMask": "0xc",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to local home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Local Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from the local socket.",
"UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to remote home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Remote Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from remote sockets.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
"UMask": "0x8",
"Unit": "HA"
},
{
- "BriefDescription": "Conflict requests (requests for same address from multiple agents simultaneously)",
- "Counter": "0,1,2,3",
+ "BriefDescription": "HA BL Ring in Use; Clockwise",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "EventCode": "0x68",
+ "EventName": "UNC_H_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x6A",
+ "EventName": "UNC_H_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "EventCode": "0x69",
+ "EventName": "UNC_H_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "EventCode": "0x6B",
+ "EventName": "UNC_H_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Local Requests",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of reads when the snoop was on the critical path to the data return.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data beat the Snoop Responses; Remote Requests",
+ "EventCode": "0xA",
+ "EventName": "UNC_H_SNOOPS_RSP_AFTER_DATA.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of reads when the snoop was on the critical path to the data return.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; All Requests",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; Tracked for snoops from both local and remote sockets.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Local Requests",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles with Snoops Outstanding; Remote Requests",
+ "EventCode": "0x8",
+ "EventName": "UNC_H_SNOOP_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when one or more snoops are outstanding.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Local Requests",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Snoops Outstanding Accumulator; Remote Requests",
+ "EventCode": "0x9",
+ "EventName": "UNC_H_SNOOP_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of either the local HA tracker pool that have snoops pending in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if an HT (HomeTracker) entry is available and this occupancy is decremented when all the snoop responses have returned.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
"UMask": "0x40",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Snoop Responses Received; RspI",
"EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x20",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
"Unit": "HA"
},
{
"BriefDescription": "M line forwarded from remote cache with no writeback to memory",
- "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"ScaleUnit": "64Bytes",
"UMask": "0x4",
"Unit": "HA"
},
{
"BriefDescription": "Shared line response from remote cache",
- "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"ScaleUnit": "64Bytes",
"UMask": "0x2",
"Unit": "HA"
},
{
"BriefDescription": "Shared line forwarded from remote cache",
- "Counter": "0,1,2,3",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its currently copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
"ScaleUnit": "64Bytes",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Other",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for all other snoop responses.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its currently copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "EventCode": "0x6C",
+ "EventName": "UNC_H_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 2",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 3",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 4",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 5",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 6",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "EventCode": "0x1B",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 7",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 10",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 11",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 8",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "EventCode": "0x1C",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 9",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles Completely Used",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Counts the number of cycles when the HA tracker pool (HT) is completely used including reserved HT entries. It will not return valid count when BT is disabled.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Cycles GP Completely Used",
+ "EventCode": "0x2",
+ "EventName": "UNC_H_TRACKER_CYCLES_FULL.GP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is completely used. This can be used with edge detect to identify the number of situations when the pool became fully utilized. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, the system could be starved for RTIDs but not fill up the HA trackers. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Counts the number of cycles when the general purpose (GP) HA tracker pool (HT) is completely used. It will not return valid count when BT is disabled.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; All Requests",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; Requests coming from both local and remote sockets.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Local Requests",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Remote Requests",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Local InvItoE Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Remote InvItoE Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Local Read Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Remote Read Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Local Write Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy Accumulator; Remote Write Requests",
+ "EventCode": "0x4",
+ "EventName": "UNC_H_TRACKER_OCCUPANCY.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the local HA tracker pool in every cycle. This can be used in conjection with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA trackers are allocated as soon as a request enters the HA if a HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumulator; Local Requests",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.; This filter includes only requests coming from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy Accumulator; Remote Requests",
+ "EventCode": "0x5",
+ "EventName": "UNC_H_TRACKER_PENDING_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of transactions that have data from the memory controller until they get scheduled to the Egress. This can be used to calculate the queuing latency for two things. (1) If the system is waiting for snoops, this will increase. (2) If the system can't schedule to the Egress because of either (a) Egress Credits or (b) QPI BL IGR credits for remote requests.; This filter includes only requests coming from remote sockets.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "EventCode": "0xF",
+ "EventName": "UNC_H_TxR_AD.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.; Filter for outbound NDR transactions sent on the AD ring. NDR stands for non-data response and is generally used for completions that do not include data. AD NDR is used for transactions to remote sockets.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to the cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent directly to the requesting core.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to a remote socket over QPI.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For AK Ring",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Injection Starvation; For BL Ring",
+ "EventCode": "0x6D",
+ "EventName": "UNC_H_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
"UMask": "0x8",
"Unit": "HA"
}
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
index 824961318c1e..954e8198c7a5 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-interconnect.json
@@ -1,28 +1,3964 @@
[
{
- "BriefDescription": "QPI clock ticks",
- "Counter": "0,1,2,3",
- "EventCode": "0x14",
- "EventName": "UNC_Q_CLOCKTICKS",
+ "BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
+ "EventName": "QPI_CTL_BANDWIDTH_TX",
"PerPkg": "1",
- "Unit": "QPI LL"
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
+ "ScaleUnit": "8Bytes",
+ "UMask": "0x4",
+ "Unit": "QPI"
},
{
"BriefDescription": "Number of data flits transmitted . Derived from unc_q_txl_flits_g0.data",
- "Counter": "0,1,2,3",
"EventName": "QPI_DATA_BANDWIDTH_TX",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
"ScaleUnit": "8Bytes",
"UMask": "0x2",
- "Unit": "QPI LL"
+ "Unit": "QPI"
},
{
- "BriefDescription": "Number of non data (control) flits transmitted . Derived from unc_q_txl_flits_g0.non_data",
- "Counter": "0,1,2,3",
- "EventName": "QPI_CTL_BANDWIDTH_TX",
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of clocks in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIItoM",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; RFO",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; WbMtoI",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch TimeOut",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_MISC0.PF_TIMEOUT",
+ "PerPkg": "1",
+ "PublicDescription": "Indicates the fetch for a previous prefetch wasn't accepted by the prefetch. This happens in the case of a prefetch TimeOut",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Data Throttled",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.DATA_THROTTLE",
+ "PerPkg": "1",
+ "PublicDescription": "IRP throttled switch data",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Hit E or S",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Hit I",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Hit M",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : Miss",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : SnpCode",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : SnpData",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses : SnpInv",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of atomic transactions",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of 'other' kinds of transactions.",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Write Prefetches",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of write prefetches.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of qfclks",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of clocks in the QPI LL. This clock runs at 1/4th the GT/s speed of the QPI link. For example, a 4GT/s link will have qfclk or 1GHz. HSX does not support dynamic link speeds, so this frequency is fixed.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Count of CTO Events",
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_CTO_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits. Had there been enough credits, the spawn would have worked as the RBT bit was set and the RBT tag matched.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and there weren't enough Egress credits. The valid bit was set.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits AND the RBT bit was not set, but the RBT tag matched.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match, the valid bit was not set and there weren't enough Egress credits.",
+ "UMask": "0x80",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match although the valid bit was set and there were enough Egress credits.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the route-back table (RBT) specified that the transaction should not trigger a direct2core transaction. This is common for IO transactions. There were enough Egress credits and the RBT tag matched but the valid bit was not set.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and the valid bit was not set although there were enough Egress credits.",
+ "UMask": "0x40",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn was successful. There were sufficient credits, the RBT valid bit was set and there was an RBT tag match. The message was marked to spawn direct2core.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Bypassed",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; LinkInit",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).; CRC errors detected during link initialization.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; Normal Operations",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).; CRC errors detected during normal operation.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; DRS",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; HOM",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the HOM message class.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCB",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCB message class.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCS",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCS message class.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NDR",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NDR message class.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; SNP",
+ "EventCode": "0x1E",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the SNP message class.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; DRS",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; HOM",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the HOM message class.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCB",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCB message class.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCS",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCS message class.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NDR",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NDR message class.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; SNP",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the SNP message class.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "EventCode": "0x1D",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of flits received over QPI that do not hold protocol payload. When QPI is not in a power saving state, it continuously transmits flits across the link. When there are no protocol flits to send, it will send IDLE and NULL flits across. These flits sometimes do carry a payload, such as credit returns, but are generally not considered part of the QPI bandwidth.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data.",
+ "UMask": "0x18",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits received over QPI on the home channel.",
+ "UMask": "0x6",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits received over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request received over QPI on the home channel. This basically counts the number of remote memory requests received over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits received over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are received on the home channel.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
+ "UMask": "0xc",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not the NCB headers.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits received over QPI. This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations",
+ "EventCode": "0x8",
+ "EventName": "UNC_Q_RxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN0",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN1",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN1",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN0",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN1",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN0",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN1",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN0",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN1",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN0",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN1",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet because there were insufficient BGF credits. For details on a message class granularity, use the Egress Credit Occupancy events.",
+ "UMask": "0x40",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.GV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled because a GV transition (frequency transition) was taking place.",
+ "UMask": "0x80",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
+ "EventCode": "0x3A",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "EventCode": "0x5",
+ "EventName": "UNC_Q_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is almost full, we block some but not all packets.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is totally full, we are not allowed to send any packets.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "EventCode": "0x6",
+ "EventName": "UNC_Q_TxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G0.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency.",
+ "UMask": "0x18",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits transmitted over QPI on the home channel.",
+ "UMask": "0x6",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits transmitted over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request transmitted over QPI on the home channel. This basically counts the number of remote memory requests transmitted over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits transmitted over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are transmitted on the home channel.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
+ "UMask": "0xc",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not the NCB headers.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits transmitted over QPI. This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "EventCode": "0x4",
+ "EventName": "UNC_Q_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "EventCode": "0x7",
+ "EventName": "UNC_Q_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
+ "EventCode": "0x2A",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
+ "EventCode": "0x1F",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
+ "EventCode": "0x2B",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Returned",
+ "EventCode": "0x1C",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURNS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits returned.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "EventCode": "0x1B",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_R3_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 10",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 11",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 12",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 13",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14_16",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 14&16",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 8",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 9",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x1F",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO_15_17",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 15&17",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 0",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 2",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 3",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 4",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 5",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 6",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 7",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA0",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCB Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCS Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0xB",
+ "EventName": "UNC_R3_IOT_BACKPRESSURE.HUB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Backpressure",
+ "EventCode": "0xB",
+ "EventName": "UNC_R3_IOT_BACKPRESSURE.SAT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0xD",
+ "EventName": "UNC_R3_IOT_CTS_HI.CTS2",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Hi",
+ "EventCode": "0xD",
+ "EventName": "UNC_R3_IOT_CTS_HI.CTS3",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0xC",
+ "EventName": "UNC_R3_IOT_CTS_LO.CTS0",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "IOT Common Trigger Sequencer - Lo",
+ "EventCode": "0xC",
+ "EventName": "UNC_R3_IOT_CTS_LO.CTS1",
+ "PerPkg": "1",
+ "PublicDescription": "Debug Mask/Match Tie-Ins",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2E",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2F",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
"UMask": "0x4",
- "Unit": "QPI LL"
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Any",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Clockwise",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ring Stop Starved; AK",
+ "EventCode": "0xE",
+ "EventName": "UNC_R3_RING_SINK_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the ringstop is in starvation (per ring)",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; DRS",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; HOM",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; NDR",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Cycles Not Empty; SNP",
+ "EventCode": "0x14",
+ "EventName": "UNC_R3_RxR_CYCLES_NE_VN1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI VN1 Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; DRS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; HOM",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NDR",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; SNP",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; DRS",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; HOM",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; NDR",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Allocations; SNP",
+ "EventCode": "0x15",
+ "EventName": "UNC_R3_RxR_INSERTS_VN1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI VN1 Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; HOM",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NCB",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NCS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; NDR",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress Occupancy Accumulator; SNP",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY_VN1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI VN1 Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI VN1 Ingress Not Empty event to calculate average occupancy or the QPI VN1 Ingress Allocations event in order to calculate average queuing latency.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R3_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For AD Ring",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Acquired; For BL Ring",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_SBO1_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2B",
+ "EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "SBo1 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2B",
+ "EventName": "UNC_R3_SBO1_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 1 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R3_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.DN_BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_AD",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK.UP_BL",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; DRS Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; HOM Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCS Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NDR Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Standard (NCS).",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Bounce Control",
+ "EventCode": "0xA",
+ "EventName": "UNC_S_BOUNCE_CONTROL",
+ "PerPkg": "1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "EventName": "UNC_S_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "FaST wire asserted",
+ "EventCode": "0x9",
+ "EventName": "UNC_S_FAST_ASSERTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Event",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Event ring polarity.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "EventCode": "0x1B",
+ "EventName": "UNC_S_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Event",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Event ring polarity.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "EventCode": "0x1C",
+ "EventName": "UNC_S_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Event",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Event ring polarity.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x3",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "EventCode": "0x1D",
+ "EventName": "UNC_S_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. We really have two rings in HSX -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "EventCode": "0x5",
+ "EventName": "UNC_S_RING_BOUNCES.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_S_RING_IV_USED.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in HSX. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0xc",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1E",
+ "EventName": "UNC_S_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. There is only 1 IV ring in HSX. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.; Filters any polarity",
+ "UMask": "0x3",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "EventCode": "0x6",
+ "EventName": "UNC_S_RING_SINK_STARVED.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Bounces",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Credits",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Bounces",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Credits",
+ "EventCode": "0x15",
+ "EventName": "UNC_S_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress because a message (credited/bounceable) is being sent.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; AD - Bounces",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; AD - Credits",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; AK",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; BL - Bounces",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; BL - Credits",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Bypass; IV",
+ "EventCode": "0x12",
+ "EventName": "UNC_S_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Bypass the Sbo Ingress.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Bounces",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AD - Credits",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; AK",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Bounces",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; BL - Credits",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; IVF Credit",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x40",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; IV",
+ "EventCode": "0x14",
+ "EventName": "UNC_S_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Ingress cannot send a transaction onto the ring for a long period of time. In this case, the Ingress but unable to forward to Egress due to lack of credit.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AD - Bounces",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AD - Credits",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; AK",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; BL - Bounces",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; BL - Credits",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IV",
+ "EventCode": "0x13",
+ "EventName": "UNC_S_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Ingress The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AD - Bounces",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AD - Credits",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; AK",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; BL - Bounces",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; BL - Credits",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IV",
+ "EventCode": "0x11",
+ "EventName": "UNC_S_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the Sbo. The Ingress is used to queue up requests received from the ring.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.AD",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.AK",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_S_TxR_ADS_USED.BL",
+ "EventCode": "0x4",
+ "EventName": "UNC_S_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Bounces",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Credits",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Bounces",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Credits",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV",
+ "EventCode": "0x2",
+ "EventName": "UNC_S_TxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Sbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AD - Bounces",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AD - Credits",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; AK",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x10",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; BL - Bounces",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; BL - Credits",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Egress Occupancy; IV",
+ "EventCode": "0x1",
+ "EventName": "UNC_S_TxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Sbo. The egress is used to queue up requests destined for the ring.",
+ "UMask": "0x20",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_S_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "SBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_CLOCKTICKS",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; PREQ, PSMI, P2U, Thermal, PCUSMI, PMI",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x20",
+ "Unit": "UBOX"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-io.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-io.json
new file mode 100644
index 000000000000..bd64a8a1625f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-io.json
@@ -0,0 +1,528 @@
+[
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.ISOCH_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_R2_IIO_CREDIT.PRQ_QPI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Dn",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Up",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RING_AK_BOUNCES.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0xc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x3",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For AD Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Acquired; For BL Ring",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_SBO0_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits acquired in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For AD Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "SBo0 Credits Occupancy; For BL Ring",
+ "EventCode": "0x2A",
+ "EventName": "UNC_R2_SBO0_CREDIT_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Sbo 0 credits in use in a given cycle, per ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo0, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO0_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, AD Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No Sbo Credits; For SBo1, BL Ring",
+ "EventCode": "0x2C",
+ "EventName": "UNC_R2_STALL_NO_SBO_CREDIT.SBO1_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles Egress is stalled waiting for an Sbo credit to become available. Per Sbo, per Ring.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.DN_BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AD",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.UP_BL",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
index 66eed399724c..c005f5115722 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-memory.json
@@ -1,86 +1,2878 @@
[
{
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_READ",
"PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Read CAS commands issued on this channel (including underfills).",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_WRITE",
"PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Write CAS commands issued on this channel.",
"ScaleUnit": "64Bytes",
- "UMask": "0xC",
+ "UMask": "0xc",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Read CAS commands issued on this channel (including underfills).",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the number of underfill reads that are issued by the memory controller. This will generally be about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ. While it is possible for underfills to be issed in both WMM and RMM, this event counts both.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Write CAS commands issued on this channel.",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Clockticks",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Clockticks",
+ "EventName": "UNC_M_DCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
- "Counter": "0,1,2,3",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charges due to page misses",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of page misses. This does not include explicit precharge commands sent with CAS commands in Auto-Precharge mode. This does not include PRE commands sent as a result of the page close counter expiration.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for reads",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to read",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x4",
"Unit": "iMC"
},
{
- "BriefDescription": "Pre-charge for writes",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to write",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "RD_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE MXB write buffer occupancy",
+ "EventCode": "0x91",
+ "EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.RMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.WMM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
"UMask": "0x8",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : All Banks",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 10",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 11",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 12",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 13",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 14",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 15",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 2",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 3",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 4",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 5",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 6",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 7",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 8",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank 9",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 0 (Banks 0-3)",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 1 (Banks 4-7)",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 2 (Banks 8-11)",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "PublicDescription": "WR_CAS Access to Rank 0 : Bank Group 3 (Banks 12-15)",
+ "UMask": "0x14",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json b/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
index dd1b95655d1d..daebf1050acb 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/uncore-power.json
@@ -1,92 +1,497 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
- "Counter": "0,1,2,3",
+ "BriefDescription": "pclk Cycles",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6A",
+ "EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6B",
+ "EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
- "Counter": "0,1,2,3",
- "EventCode": "0xA",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6C",
+ "EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6D",
+ "EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6E",
+ "EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6F",
+ "EventName": "UNC_P_CORE15_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x70",
+ "EventName": "UNC_P_CORE16_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x71",
+ "EventName": "UNC_P_CORE17_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x61",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x62",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x63",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x64",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x65",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x66",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x67",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x68",
+ "EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x69",
+ "EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x31",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3A",
+ "EventName": "UNC_P_DEMOTIONS_CORE10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3B",
+ "EventName": "UNC_P_DEMOTIONS_CORE11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3C",
+ "EventName": "UNC_P_DEMOTIONS_CORE12",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3D",
+ "EventName": "UNC_P_DEMOTIONS_CORE13",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3E",
+ "EventName": "UNC_P_DEMOTIONS_CORE14",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x3F",
+ "EventName": "UNC_P_DEMOTIONS_CORE15",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x40",
+ "EventName": "UNC_P_DEMOTIONS_CORE16",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x41",
+ "EventName": "UNC_P_DEMOTIONS_CORE17",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x33",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x34",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x35",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x36",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x37",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x38",
+ "EventName": "UNC_P_DEMOTIONS_CORE8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x39",
+ "EventName": "UNC_P_DEMOTIONS_CORE9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xB",
+ "EventName": "UNC_P_FREQ_BAND0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xC",
+ "EventName": "UNC_P_FREQ_BAND1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xD",
+ "EventName": "UNC_P_FREQ_BAND2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xE",
+ "EventName": "UNC_P_FREQ_BAND3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
"EventCode": "0x74",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C1E",
+ "EventCode": "0x4E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C1E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C1E. This event can be used in conjunction with edge detect to count C1E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C7 State Residency",
+ "EventCode": "0x2E",
+ "EventName": "UNC_P_PKG_RESIDENCY_C7_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C7. This event can be used in conjunction with edge detect to count C7 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_UFS_TRANSITIONS_NO_CHANGE",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_UFS_TRANSITIONS_NO_CHANGE",
+ "PerPkg": "1",
+ "PublicDescription": "Ring GV with same final and initial frequency",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_UFS_TRANSITIONS_RING_GV",
+ "PerPkg": "1",
+ "PublicDescription": "Ring GV with same final and initial frequency",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
"Unit": "PCU"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
index 168df552b1a8..87a4ec1ee7d7 100644
--- a/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/haswellx/virtual-memory.json
@@ -1,484 +1,386 @@
[
{
- "EventCode": "0x08",
- "UMask": "0x1",
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
+ "EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in all TLB levels that cause a page walk of any page size.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
"EventCode": "0x08",
- "UMask": "0x2",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Completed page walks due to demand load misses that caused 4K page walks in any TLB levels.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
+ "PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
"EventCode": "0x08",
- "UMask": "0x4",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Completed page walks due to demand load misses that caused 2M/4M page walks in any TLB levels.",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M)",
"EventCode": "0x08",
- "UMask": "0x8",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
+ "PublicDescription": "This event counts load operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K)",
"EventCode": "0x08",
- "UMask": "0xe",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
+ "PublicDescription": "This event counts load operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"PublicDescription": "Completed page walks in any TLB of any page size due to demand load misses.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (1G)",
"EventCode": "0x08",
- "UMask": "0x10",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (2M/4M).",
"EventCode": "0x08",
- "UMask": "0x20",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (4K)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_4K",
- "PublicDescription": "This event counts load operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Completed page walks due to demand load misses that caused 2M/4M page walks in any TLB levels.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes (4K).",
"EventCode": "0x08",
- "UMask": "0x40",
- "BriefDescription": "Load misses that miss the DTLB and hit the STLB (2M)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT_2M",
- "PublicDescription": "This event counts load operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Completed page walks due to demand load misses that caused 4K page walks in any TLB levels.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
"EventCode": "0x08",
- "UMask": "0x60",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x08",
- "UMask": "0x80",
- "BriefDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
- "PublicDescription": "DTLB demand load misses with low part of linear-to-physical address translation missed.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
+ "EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
"EventCode": "0x49",
- "UMask": "0x2",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 4K page structure.",
+ "EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
+ "PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
"EventCode": "0x49",
- "UMask": "0x4",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 2M/4M page structure.",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M)",
"EventCode": "0x49",
- "UMask": "0x8",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks. (1G)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
+ "PublicDescription": "This event counts store operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K)",
"EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
+ "PublicDescription": "This event counts store operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
"EventCode": "0x49",
- "UMask": "0x10",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Completed page walks due to store miss in any TLB levels of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks. (1G)",
"EventCode": "0x49",
- "UMask": "0x20",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (4K)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_4K",
- "PublicDescription": "This event counts store operations from a 4K page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks (2M/4M)",
"EventCode": "0x49",
- "UMask": "0x40",
- "BriefDescription": "Store misses that miss the DTLB and hit the STLB (2M)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT_2M",
- "PublicDescription": "This event counts store operations from a 2M page that miss the first DTLB level but hit the second and do not cause page walks.",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 2M/4M page structure.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (4K)",
"EventCode": "0x49",
- "UMask": "0x60",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Completed page walks due to store misses in one or more TLB levels of 4K page structure.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
"EventCode": "0x49",
- "UMask": "0x80",
- "BriefDescription": "DTLB store misses with low part of linear-to-physical address translation missed",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
- "PublicDescription": "DTLB store misses with low part of linear-to-physical address translation missed.",
+ "EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB store misses.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x4f",
- "UMask": "0x10",
"BriefDescription": "Cycle count for an Extended Page table walk.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x4f",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xae",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x85",
- "UMask": "0x1",
"BriefDescription": "Misses at all ITLB levels that cause page walks",
- "Counter": "0,1,2,3",
+ "EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Misses in ITLB that causes a page walk of any page size.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
"EventCode": "0x85",
- "UMask": "0x2",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Completed page walks due to misses in ITLB 4K page entries.",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "ITLB misses that hit STLB. No page walk.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x60"
},
{
+ "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M)",
"EventCode": "0x85",
- "UMask": "0x4",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Completed page walks due to misses in ITLB 2M/4M page entries.",
+ "EventName": "ITLB_MISSES.STLB_HIT_2M",
+ "PublicDescription": "ITLB misses that hit STLB (2M).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K)",
"EventCode": "0x85",
- "UMask": "0x8",
- "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "EventName": "ITLB_MISSES.STLB_HIT_4K",
+ "PublicDescription": "ITLB misses that hit STLB (4K).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x85",
- "UMask": "0xe",
"BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "Counter": "0,1,2,3",
+ "EventCode": "0x85",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"PublicDescription": "Completed page walks in ITLB of any page size.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Store miss in all TLB levels causes a page walk that completes. (1G)",
"EventCode": "0x85",
- "UMask": "0x10",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_DURATION",
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"EventCode": "0x85",
- "UMask": "0x20",
- "BriefDescription": "Core misses that miss the DTLB and hit the STLB (4K)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT_4K",
- "PublicDescription": "ITLB misses that hit STLB (4K).",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Completed page walks due to misses in ITLB 2M/4M page entries.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"EventCode": "0x85",
- "UMask": "0x40",
- "BriefDescription": "Code misses that miss the DTLB and hit the STLB (2M)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT_2M",
- "PublicDescription": "ITLB misses that hit STLB (2M).",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Completed page walks due to misses in ITLB 4K page entries.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks",
"EventCode": "0x85",
- "UMask": "0x60",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
- "PublicDescription": "ITLB misses that hit STLB. No page walk.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xae",
- "UMask": "0x1",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB.ITLB_FLUSH",
- "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by ITLB misses.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xBC",
- "UMask": "0x11",
"BriefDescription": "Number of DTLB page walker hits in the L1+FB",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L1",
"PublicDescription": "Number of DTLB page walker loads that hit in the L1+FB.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x11"
},
{
- "EventCode": "0xBC",
- "UMask": "0x12",
"BriefDescription": "Number of DTLB page walker hits in the L2",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBC",
"EventName": "PAGE_WALKER_LOADS.DTLB_L2",
"PublicDescription": "Number of DTLB page walker loads that hit in the L2.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x12"
},
{
- "EventCode": "0xBC",
- "UMask": "0x14",
"BriefDescription": "Number of DTLB page walker hits in the L3 + XSNP",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"Errata": "HSD25",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_L3",
"PublicDescription": "Number of DTLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x14"
},
{
- "EventCode": "0xBC",
- "UMask": "0x18",
"BriefDescription": "Number of DTLB page walker hits in Memory",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"Errata": "HSD25",
+ "EventCode": "0xBC",
+ "EventName": "PAGE_WALKER_LOADS.DTLB_MEMORY",
"PublicDescription": "Number of DTLB page walker loads from memory.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
"EventCode": "0xBC",
- "UMask": "0x21",
- "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
- "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
"EventCode": "0xBC",
- "UMask": "0x22",
- "BriefDescription": "Number of ITLB page walker hits in the L2",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
- "PublicDescription": "Number of ITLB page walker loads that hit in the L2.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x42"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L3.",
"EventCode": "0xBC",
- "UMask": "0x24",
- "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
- "Errata": "HSD25",
- "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x44"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
"EventCode": "0xBC",
- "UMask": "0x28",
- "BriefDescription": "Number of ITLB page walker hits in Memory",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
- "Errata": "HSD25",
- "PublicDescription": "Number of ITLB page walker loads from memory.",
+ "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x48"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
"EventCode": "0xBC",
- "UMask": "0x41",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L1 and FB.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"EventCode": "0xBC",
- "UMask": "0x42",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L2.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
"EventCode": "0xBC",
- "UMask": "0x44",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in the L3.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x84"
},
{
+ "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in memory.",
"EventCode": "0xBC",
- "UMask": "0x48",
- "BriefDescription": "Counts the number of Extended Page Table walks from the DTLB that hit in memory.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_DTLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_MEMORY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x88"
},
{
+ "BriefDescription": "Number of ITLB page walker hits in the L1+FB",
"EventCode": "0xBC",
- "UMask": "0x81",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L1 and FB.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L1",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L1",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L1+FB.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x21"
},
{
+ "BriefDescription": "Number of ITLB page walker hits in the L2",
"EventCode": "0xBC",
- "UMask": "0x82",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L2",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L2",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L2.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x22"
},
{
+ "BriefDescription": "Number of ITLB page walker hits in the L3 + XSNP",
+ "Errata": "HSD25",
"EventCode": "0xBC",
- "UMask": "0x84",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in the L2.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_L3",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_L3",
+ "PublicDescription": "Number of ITLB page walker loads that hit in the L3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Number of ITLB page walker hits in Memory",
+ "Errata": "HSD25",
"EventCode": "0xBC",
- "UMask": "0x88",
- "BriefDescription": "Counts the number of Extended Page Table walks from the ITLB that hit in memory.",
- "Counter": "0,1,2,3",
- "EventName": "PAGE_WALKER_LOADS.EPT_ITLB_MEMORY",
+ "EventName": "PAGE_WALKER_LOADS.ITLB_MEMORY",
+ "PublicDescription": "Number of ITLB page walker loads from memory.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x28"
},
{
- "EventCode": "0xBD",
- "UMask": "0x1",
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xBD",
- "UMask": "0x20",
"BriefDescription": "STLB flush attempts",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Count number of STLB flush attempts.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/cache.json b/tools/perf/pmu-events/arch/x86/icelake/cache.json
index 3529fc338c17..a9174a0837f0 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/cache.json
@@ -1,552 +1,876 @@
[
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read miss L2, no rejects"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "EventCode": "0xf2",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions"
+ "UMask": "0xe4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "BriefDescription": "Demand Data Read requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache"
+ "UMask": "0xe1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "BriefDescription": "Demand requests that miss L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x28",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.SWPF_MISS",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "SW prefetch requests that miss L2 cache."
+ "UMask": "0x27"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "BriefDescription": "Demand requests to L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Counts demand requests to L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache"
+ "UMask": "0xe7"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "BriefDescription": "RFO requests to L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache"
+ "UMask": "0xe2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "PEBScounters": "0,1,2,3",
"EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads."
+ "UMask": "0xc4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. This event accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions.",
+ "BriefDescription": "L2 cache misses when fetching instructions",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc8",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.SWPF_HIT",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "SW prefetch requests that hit L2 cache."
+ "UMask": "0x24"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe1",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests"
+ "UMask": "0xc1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe2",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_RFO",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache"
+ "UMask": "0x21"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the total number of L2 code requests.",
+ "BriefDescription": "All requests that miss L2 cache. This event is not supported on ICL and ICX products, only supported on RKL products.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe4",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "Counts all requests that miss L2 cache. This event is not supported on ICL and ICX products, only supported on RKL products.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests"
+ "UMask": "0x3f"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand requests to L2 cache.",
+ "BriefDescription": "All L2 requests. This event is not supported on ICL and ICX products, only supported on RKL products.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "Counts all L2 requests. This event is not supported on ICL and ICX products, only supported on RKL products.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache"
+ "UMask": "0xff"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of L1D misses that are outstanding"
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1"
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailablability."
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailablability.",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.L2_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources."
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of cache lines replaced in L1 data cache."
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1"
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore"
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1"
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore"
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM"
+ "UMask": "0x41"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads"
+ "UMask": "0x42"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Any memory transaction that reached the SQ."
+ "UMask": "0x11"
},
{
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions that true miss the STLB.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that miss the STLB.",
- "Data_LA": "1"
+ "UMask": "0x12"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired store instructions that true miss the STLB.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that miss the STLB.",
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with locked access.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions with locked access.",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
- "Data_LA": "1"
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions for loads.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load instructions.",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts all retired store instructions. This event account for SW prefetch instructions and PREFETCHW instruction for stores.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store instructions.",
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
"PEBS": "1",
- "CollectPEBSRecord": "2",
"PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
- "Data_LA": "1"
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "Data_LA": "1"
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions missed L2 cache as data sources",
- "Data_LA": "1"
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
- "Data_LA": "1"
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
- "EventCode": "0xd1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "SampleAfterValue": "100007",
- "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
- "Data_LA": "1"
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "EventCode": "0xd2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "Data_LA": "1"
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "EventCode": "0xd2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "Data_LA": "1"
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
- "EventCode": "0xd2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
- "Data_LA": "1"
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
- "EventCode": "0xd2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "PEBScounters": "0,1,2,3",
- "EventName": "L2_LINES_IN.ALL",
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0400",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the cycles for which the thread is active and the superQ cannot take any more entries.",
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C2380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop hit in another core, data forwarding is not required.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent but no other cores had the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was not needed to satisfy the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that hit a cacheline in the L3 where a snoop was sent.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_HIT.SNOOP_SENT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1E003C8000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that hit a cacheline in the L3 where a snoop was sent or not.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_HIT.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC03C0800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts memory transactions sent to the uncore.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions sent to the uncore including requests initiated by the core, all L3 prefetches, reads resulting from page walks, and snoop responses.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding data read requests pending.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding data read requests pending. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding data read request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Cycles where at least 1 outstanding data read request is pending. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding Demand RFO request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Cycles where at least 1 outstanding Demand RFO request is pending. RFOs are initiated by a core as part of a data store operation. Demand RFO requests include RFOs, locks, and ItoM transactions. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles the queue waiting for offcore responses is full.",
+ "EventCode": "0xf4",
"EventName": "SQ_MISC.SQ_FULL",
+ "PublicDescription": "Counts the cycles for which the thread is active and the queue waiting for responses from the uncore cannot take any more entries.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles the thread is active and superQ cannot take any more entries."
+ "UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
index 594c5551f610..85c26c889088 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/floating-point.json
@@ -1,102 +1,105 @@
[
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts all microcode Floating Point assists.",
- "EventCode": "0xC1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Counts all microcode FP assists.",
+ "EventCode": "0xc1",
"EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all microcode FP assists.",
- "CounterMask": "1"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xc7",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
- "EventCode": "0xc7",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xc7",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
"EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xc7",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3,4,5,6,7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xc7",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
"EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xc7",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
- "PEBScounters": "0,1,2,3,4,5,6,7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xc7",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
"EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 RANGE FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element."
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x60"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfc"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/frontend.json b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
index 9c3cfbfcec0f..3e3d2b002170 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/frontend.json
@@ -1,424 +1,353 @@
[
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
- "CounterMask": "5"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
- "CounterMask": "5"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1"
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
"CounterMask": "1",
- "EdgeDetect": "1"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to IDQ while MS is busy"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "PEBScounters": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
- "CounterMask": "1"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_16B.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
- "EventCode": "0x83",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
- "EventCode": "0x83",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
- "EventCode": "0x83",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss."
+ "EdgeDetect": "1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PublicDescription": "Counts the number of Decode Stream Buffer (DSB a.k.a. Uop Cache)-to-MITE speculative transitions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
- "EventCode": "0x9c",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
- "CounterMask": "5"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
- "EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
- "CounterMask": "1"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "EventCode": "0xab",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "DSB-to-MITE switch true penalty cycles."
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
"PEBS": "1",
- "CollectPEBSRecord": "2",
"PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x11",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced DSB miss.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
"PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x12",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x13",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x14",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500106",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x15",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x508006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x500206",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x501006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x500406",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500206",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
- "EventCode": "0xC6",
- "MSRValue": "0x500806",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x510006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
- "EventCode": "0xC6",
- "MSRValue": "0x501006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
- "EventCode": "0xC6",
- "MSRValue": "0x502006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x502006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x504006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500406",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x508006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x520006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x510006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x504006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x520006",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x500806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
- "EventCode": "0xC6",
- "MSRValue": "0x100206",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CounterMask": "5",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
new file mode 100644
index 000000000000..f45ae3483df4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
@@ -0,0 +1,1518 @@
+[
+ {
+ "BriefDescription": "C10 residency percent per package",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C10_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C8 residency percent per package",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C8_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C9 residency percent per package",
+ "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C9_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * ASSISTS.ANY / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=1\\,edge@ / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "max(1 - (tma_frontend_bound + tma_backend_bound + tma_retiring), 0)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
+ "MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_branch_instructions",
+ "MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(29 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 23.5 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "23.5 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35))",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "32.5 * tma_info_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "(5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_512b",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=1@) / IDQ.MITE_UOPS",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
+ "MetricName": "tma_info_big_code",
+ "MetricThreshold": "tma_info_big_code > 20",
+ "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_branching_overhead"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_mispredictions, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_slots)",
+ "MetricGroup": "Ret;tma_issueBC",
+ "MetricName": "tma_info_branching_overhead",
+ "MetricThreshold": "tma_info_branching_overhead > 10",
+ "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_big_code"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_callret"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_code_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_nt"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_tk"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_core_bound_likely",
+ "MetricThreshold": "tma_info_core_bound_likely > 0.5"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 5 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_misses, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_dsb_misses",
+ "MetricThreshold": "tma_info_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_dsb_switch_cost"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_fb_hpki"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_fetch_upc"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_ic_misses",
+ "MetricThreshold": "tma_info_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_icache_miss_latency"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_big_code",
+ "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_instruction_fetch_bw > 20"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx512",
+ "MetricThreshold": "tma_info_iparith_avx512 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_ipdsb_miss_ret < 50"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_ntaken",
+ "MetricThreshold": "tma_info_ipmisp_cond_ntaken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_taken",
+ "MetricThreshold": "tma_info_ipmisp_cond_taken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_ret",
+ "MetricThreshold": "tma_info_ipmisp_ret < 500"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_ipswpf",
+ "MetricThreshold": "tma_info_ipswpf < 100"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 11",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_jump"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * (OFFCORE_REQUESTS.ALL_DATA_RD - OFFCORE_REQUESTS.DEMAND_DATA_RD + L2_RQSTS.ALL_DEMAND_MISS + L2_RQSTS.SWPF_MISS) / tma_info_instructions",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
+ },
+ {
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x10@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l3_miss_latency"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_load_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
+ "MetricExpr": "LSD.UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "Fed;LSD",
+ "MetricName": "tma_info_lsd_coverage"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
+ "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_memory_bandwidth",
+ "MetricThreshold": "tma_info_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_memory_data_tlbs",
+ "MetricThreshold": "tma_info_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))",
+ "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_memory_latency",
+ "MetricThreshold": "tma_info_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_mispredictions",
+ "MetricThreshold": "tma_info_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - (tma_info_cond_nt + tma_info_cond_tk + tma_info_callret + tma_info_jump)",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_other_branches"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0",
+ "MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_power_license0_utilization",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
+ "MetricExpr": "CORE_POWER.LVL1_TURBO_LICENSE / tma_info_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_power_license1_utilization",
+ "MetricThreshold": "tma_info_power_license1_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
+ "MetricExpr": "CORE_POWER.LVL2_TURBO_LICENSE / tma_info_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_power_license2_utilization",
+ "MetricThreshold": "tma_info_power_license2_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "(tma_info_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
+ "MetricGroup": "SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots_utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_store_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "tma_retiring * tma_info_slots / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "tma_retiring * tma_info_slots / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 7.5"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "9 * tma_info_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_2_3 / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit",
+ "MetricExpr": "(LSD.CYCLES_ACTIVE - LSD.CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_lsd",
+ "MetricThreshold": "tma_lsd > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "tma_retiring * tma_info_slots / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_info_mispredictions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where (only) 4 uops were delivered by the MITE pipeline",
+ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group",
+ "MetricName": "tma_mite_4wide",
+ "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued",
+ "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions + tma_nop_instructions))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricExpr": "140 * MISC_RETIRED.PAUSE_INST / tma_info_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
+ "MetricName": "tma_slow_pause",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_4_9 + UOPS_DISPATCHED.PORT_7_8) / (4 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores",
+ "MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
+ "MetricName": "tma_streaming_stores",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "10 * BACLEARS.ANY / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_elision",
+ "ScaleUnit": "1cycles / elision"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/memory.json b/tools/perf/pmu-events/arch/x86/icelake/memory.json
index f158366b9dd6..e8d2ec1c029b 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/memory.json
@@ -1,410 +1,394 @@
[
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address"
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Speculatively counts the number Transactional Synchronization Extensions (TSX) Aborts due to a data capacity limitation for transactional writes.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculatively counts the number TSX Aborts due to a data capacity limitation for transactional writes."
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PublicDescription": "Counts the number of times HLE abort was triggered.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer"
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero."
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer"
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer."
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Counts the number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times we could not allocate Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "PEBScounters": "0,1,2,3",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero."
+ "BriefDescription": "Number of times an HLE execution successfully committed",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Counts the number of times HLE commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "EventCode": "0x5d",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TX_EXEC.MISC2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region"
+ "BriefDescription": "Number of times an HLE execution started.",
+ "EventCode": "0xc8",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Counts the number of times we entered an HLE region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
- "EventCode": "0x5d",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TX_EXEC.MISC3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded"
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
- "CounterMask": "2"
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
- "CounterMask": "6"
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests who miss L3 cache"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
- "EventCode": "0xc3",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00004",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears due to memory ordering conflicts."
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times we entered an HLE region. Does not count nested transactions.",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution started."
+ "BriefDescription": "Counts demand data reads that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times HLE commit succeeded.",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution successfully committed",
- "Data_LA": "1"
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times HLE abort was triggered.",
- "EventCode": "0xc8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one)."
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED_MEM",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts)."
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.)."
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "HLE_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts)."
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC08000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.START",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution started."
+ "BriefDescription": "Counts streaming stores that was not supplied by the L3 cache.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC00800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times RTM commit succeeded.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution successfully committed"
+ "BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "BriefDescription": "Cycles where at least one demand data read request known to have missed the L3 cache is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Cycles where at least one demand data read request known to have missed the L3 cache is pending. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known to have missed the L3 cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
"EventCode": "0xc9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
"EventName": "RTM_RETIRED.ABORTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted.",
- "Data_LA": "1"
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_MEM",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)"
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc9",
"EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions"
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type"
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RTM_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)"
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
- "MSRIndex": "0x3F6",
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
- "TakenAlone": "1"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x8",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "50021",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
- "TakenAlone": "1"
+ "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x10",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "20011",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
- "TakenAlone": "1"
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x20",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
- "TakenAlone": "1"
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x40",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
- "TakenAlone": "1"
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x80",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
- "TakenAlone": "1"
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x100",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
- "TakenAlone": "1"
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xcd",
- "MSRValue": "0x200",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
- "TakenAlone": "1"
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Counts the number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Counts the number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/other.json b/tools/perf/pmu-events/arch/x86/icelake/other.json
index f8dfdb847224..cfb590632918 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/other.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/other.json
@@ -1,121 +1,242 @@
[
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the Top-down Microarchitecture Analysis method. This event is counted on a designated fixed counter (Fixed Counter 3) and is an architectural event.",
- "Counter": "35",
- "UMask": "0x4",
- "PEBScounters": "35",
- "EventName": "TOPDOWN.SLOTS",
- "SampleAfterValue": "10000003",
- "BriefDescription": "Counts the number of available slots for an unhalted logical processor."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "PEBScounters": "0,1,2,3",
"EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
"SampleAfterValue": "200003",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule."
+ "UMask": "0x7"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "PEBScounters": "0,1,2,3",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule."
+ "UMask": "0x18"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
"EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
- "EventCode": "0x32",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.NTA",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHNTA instructions executed."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
- "EventCode": "0x32",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHT0 instructions executed."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
- "EventCode": "0x32",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T1_T2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
- "EventCode": "0x32",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHW instructions executed."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
- "EventCode": "0xa4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TOPDOWN.SLOTS_P",
- "SampleAfterValue": "10000003",
- "BriefDescription": "Counts the number of available slots for an unhalted logical processor."
- },
- {
- "CollectPEBSRecord": "2",
- "EventCode": "0xA4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
- "SampleAfterValue": "10000003",
- "BriefDescription": "Issue slots where no uops were being issued due to lack of back end resources."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
- "EventCode": "0xc1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x7",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "ASSISTS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware."
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch data reads (which bring data to L2) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch RFOs (which bring data to L2) that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that DRAM supplied the request.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
index 6d8311e634aa..154fee4b60fb 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/pipeline.json
@@ -1,892 +1,801 @@
[
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
- "Counter": "32",
- "UMask": "0x1",
- "PEBScounters": "32",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event"
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
},
{
- "PEBS": "2",
- "CollectPEBSRecord": "3",
- "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
- "Counter": "32",
- "UMask": "0x1",
- "PEBScounters": "32",
- "EventName": "INST_RETIRED.PREC_DIST",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution"
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "33",
- "UMask": "0x2",
- "PEBScounters": "33",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state"
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "Counter": "34",
- "UMask": "0x3",
- "PEBScounters": "34",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state."
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when: a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations, c. preceding lock RMW operations are not forwarded, d. store has the no-forward bit set (uncacheable/page-split/masked stores), e. all-blocking stores are used (mostly, fences and port I/O), and others. The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded."
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use."
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
"SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address."
+ "UMask": "0x80"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread"
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x3",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
- "CounterMask": "1"
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
- "EventCode": "0x0d",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path."
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_ISSUED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops that RAT issues to RS"
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "50021"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
- "CounterMask": "1"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x11"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
- "EventCode": "0x14",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x9",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "ARITH.DIVIDER_ACTIVE",
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "50021",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
- "CounterMask": "1"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"EventCode": "0x3C",
- "Counter": "0,1,2,3,4,5,6,7",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state"
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"EventCode": "0x3C",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
"SampleAfterValue": "25003",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted."
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "25003",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted."
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
- "EventCode": "0x4c",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LOAD_HIT_PREFETCH.SWPF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch."
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
- "EventCode": "0x5E",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread"
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RS_EVENTS.EMPTY_END",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
"CounterMask": "1",
- "EdgeDetect": "1"
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction."
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xa1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 0"
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xa1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 1"
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
- "EventCode": "0xa1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_2_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 2 and 3"
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x14"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
- "EventCode": "0xa1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_4_9",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 4 and 9"
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xa1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_5",
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 5"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xa1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_6",
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 6"
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
- "EventCode": "0xa1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on port 7 and 8"
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xa2",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations."
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "RESOURCE_STALLS.SB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync)."
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CounterMask": "2",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1"
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8"
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "BriefDescription": "Number of all retired NOP instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "16"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xA3",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x14",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "20"
+ "BriefDescription": "Cycles without actually retired instructions.",
+ "CounterMask": "1",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xa6",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
+ "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty."
+ "UMask": "0x3"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xa6",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty."
+ "BriefDescription": "Clears speculative count",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
- "EventCode": "0xA6",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
- "CounterMask": "2"
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xa6",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load."
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "LSD.UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD."
+ "BriefDescription": "TMA slots where uops got dropped",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "False dependencies due to partial compare on address.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
"EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
"EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CounterMask": "5",
"EventCode": "0xa8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
"EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
- "CounterMask": "5"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.THREAD",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle."
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "CounterMask": "1"
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xb1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1"
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xb1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2"
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR to be enabled properly.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xb1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3"
+ "BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.PAUSE_INST",
+ "PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xb1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4"
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of uops executed from any thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core."
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1"
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5e",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2"
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3"
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4"
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the specualtive path as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of x87 uops executed.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_EXECUTED.X87",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of x87 uops dispatched."
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3,4,5,6,7",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event"
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of cycles using always true condition (uops_ret &amp;lt; 16) applied to non PEBS uops retired event.",
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "BriefDescription": "Number of uops decoded out of instructions exclusively fetched by decoder 0",
+ "EventCode": "0x56",
+ "EventName": "UOPS_DECODED.DEC0",
+ "PublicDescription": "Uops exclusively fetched by decoder 0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 0",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the retirement slots used each cycle.",
- "EventCode": "0xc2",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "UOPS_RETIRED.SLOTS",
+ "BriefDescription": "Number of uops executed on port 1",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used."
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "BriefDescription": "Number of uops executed on port 2 and 3",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected."
+ "BriefDescription": "Number of uops executed on port 4 and 9",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts all branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3,4,5,6,7",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All branch instructions retired."
+ "BriefDescription": "Number of uops executed on port 5",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts taken conditional branch instructions retired.",
- "EventCode": "0xc4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.COND_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken conditional branch instructions retired."
+ "BriefDescription": "Number of uops executed on port 6",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts both direct and indirect near call instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired."
+ "BriefDescription": "Number of uops executed on port 7 and 8",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts return instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired."
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts not taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.COND_NTAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired."
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts conditional branch instructions retired.",
- "EventCode": "0xc4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x11",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.COND",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired."
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired."
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts far branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired."
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts all indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
- "EventCode": "0xc4",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_INST_RETIRED.INDIRECT",
- "SampleAfterValue": "100003",
- "BriefDescription": "All indirect branch instructions retired (excluding RETs. TSX aborts are considered indirect branch)."
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3,4,5,6,7",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted branch instructions retired.",
- "Data_LA": "1"
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
- "EventCode": "0xc5",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.COND_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "number of branch instructions retired that were mispredicted and taken. Non PEBS",
- "Data_LA": "1"
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xc5",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x11",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.COND",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "Data_LA": "1"
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "Data_LA": "1"
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x80",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "BR_MISP_RETIRED.INDIRECT",
- "SampleAfterValue": "100003",
- "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
- "Data_LA": "1"
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
- "EventCode": "0xcc",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Increments whenever there is an update to the LBR array."
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted).",
- "EventCode": "0xcc",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x40",
- "EventName": "MISC_RETIRED.PAUSE_INST",
+ "BriefDescription": "Uops that RAT issues to RS",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of retired PAUSE instructions."
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "BACLEARS.ANY",
+ "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to 'Mixing Intel AVX and Intel SSE Code' section of the Optimization Guide.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end."
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
- "EventCode": "0xec",
- "Counter": "0,1,2,3,4,5,6,7",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3,4,5,6,7",
- "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core."
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/icelake/uncore-interconnect.json
new file mode 100644
index 000000000000..8027590f1776
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelake/uncore-interconnect.json
@@ -0,0 +1,74 @@
+[
+ {
+ "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, etc.",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from its allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Total number of all outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches. This event is not supported on ICL products but is supported on RKL products.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json b/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json
new file mode 100644
index 000000000000..c6596ba09195
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelake/uncore-other.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "UNC_CLOCK.SOCKET",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
index 7180a900c175..b28f62ce1f39 100644
--- a/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/icelake/virtual-memory.json
@@ -1,236 +1,165 @@
[
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Page walks completed due to a demand data load to a 4K page."
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CounterMask": "1",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page."
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "PEBScounters": "0,1,2,3",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)"
+ "UMask": "0xe"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle."
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
- "CounterMask": "1"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Loads that miss the DTLB and hit the STLB."
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walks completed due to a demand data store to a 4K page."
+ "UMask": "0x20"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CounterMask": "1",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page."
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "PEBScounters": "0,1,2,3",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)"
+ "UMask": "0xe"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle."
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
- "CounterMask": "1"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
"SampleAfterValue": "100003",
- "BriefDescription": "Stores that miss the DTLB and hit the STLB."
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)"
+ "UMask": "0x20"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts code misses in all ITLB (Instruction TLB) levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CounterMask": "1",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)"
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "PEBScounters": "0,1,2,3",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)"
+ "UMask": "0xe"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_PENDING",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle."
+ "UMask": "0x4"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
- "CounterMask": "1"
+ "UMask": "0x2"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
"SampleAfterValue": "100003",
- "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages."
+ "UMask": "0x10"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
"EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries"
+ "UMask": "0x1"
},
{
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "BriefDescription": "STLB flush attempts",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "PEBScounters": "0,1,2,3",
"EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
"SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/cache.json b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
new file mode 100644
index 000000000000..3bdc56a75097
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/cache.json
@@ -0,0 +1,876 @@
+[
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by the L2 cache due to L2 cache fills. Evicted lines are delivered to the L3, which may or may not cache them, according to system load and priorities.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x27"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "Data_LA": "1",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "Data_LA": "1",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was remote HITM",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or Bus Lock.",
+ "Data_LA": "1",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3 (AppDirect or Memory Mode) and DRAM cache(Memory Mode).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1030000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x830000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80082380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware and software prefetches to all cache levels that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PREFETCHES.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C27F0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1830000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1030000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x830000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts memory transactions sent to the uncore.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions sent to the uncore including requests initiated by the core, all L3 prefetches, reads resulting from page walks, and snoop responses.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts cacheable and non-cacheable code reads to the core.",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and non-cacheable code reads to the core.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding data read requests pending.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding data read requests pending. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding data read request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Cycles where at least 1 outstanding data read request is pending. Data read requests include cacheable demand reads and L2 prefetches, but do not include RFOs, code reads or prefetches to the L3. Reads due to page walks resulting from any request type will also be counted. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles with outstanding code read requests pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Cycles with outstanding code read requests pending. Code Read requests include both cacheable and non-cacheable Code Reads. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding Demand RFO request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Cycles where at least 1 outstanding Demand RFO request is pending. RFOs are initiated by a core as part of a data store operation. Demand RFO requests include RFOs, locks, and ItoM transactions. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding code read requests pending.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding code read requests pending. Code Read requests include both cacheable and non-cacheable Code Reads. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.BUS_LOCK",
+ "PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles the queue waiting for offcore responses is full.",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SQ_FULL",
+ "PublicDescription": "Counts the cycles for which the thread is active and the queue waiting for responses from the uncore cannot take any more entries.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
new file mode 100644
index 000000000000..85c26c889088
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/floating-point.json
@@ -0,0 +1,105 @@
+[
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x60"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfc"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/frontend.json b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
new file mode 100644
index 000000000000..71498044f1cb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/frontend.json
@@ -0,0 +1,344 @@
+[
+ {
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PublicDescription": "Counts the number of Decode Stream Buffer (DSB a.k.a. Uop Cache)-to-MITE speculative transitions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500106",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x508006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x501006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500206",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x510006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x502006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500406",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x520006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x504006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CounterMask": "5",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
new file mode 100644
index 000000000000..0f9b174dfc22
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
@@ -0,0 +1,1568 @@
+[
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * ASSISTS.ANY / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=1\\,edge@ / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "max(1 - (tma_frontend_bound + tma_backend_bound + tma_retiring), 0)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
+ "MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_branch_instructions",
+ "MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(44 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 43.5 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "43.5 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35))",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound - tma_pmm_bound if #has_pmem > 0 else CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound)",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "48 * tma_info_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "(5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_512b",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=1@) / IDQ.MITE_UOPS",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
+ "MetricName": "tma_info_big_code",
+ "MetricThreshold": "tma_info_big_code > 20",
+ "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_branching_overhead"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_mispredictions, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_slots)",
+ "MetricGroup": "Ret;tma_issueBC",
+ "MetricName": "tma_info_branching_overhead",
+ "MetricThreshold": "tma_info_branching_overhead > 10",
+ "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_big_code"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_callret"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_code_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_nt"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_tk"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_core_bound_likely",
+ "MetricThreshold": "tma_info_core_bound_likely > 0.5"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 5 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_misses, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_dsb_misses",
+ "MetricThreshold": "tma_info_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_dsb_switch_cost"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_fb_hpki"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_fetch_upc"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_ic_misses",
+ "MetricThreshold": "tma_info_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_icache_miss_latency"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_big_code",
+ "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_instruction_fetch_bw > 20"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
+ },
+ {
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]",
+ "MetricExpr": "(UNC_CHA_TOR_INSERTS.IO_HIT_ITOM + UNC_CHA_TOR_INSERTS.IO_MISS_ITOM + UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR + UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR) * 64 / 1e9 / duration_time",
+ "MetricGroup": "IoBW;Mem;Server;SoC",
+ "MetricName": "tma_info_io_read_bw"
+ },
+ {
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e9 / duration_time",
+ "MetricGroup": "IoBW;Mem;Server;SoC",
+ "MetricName": "tma_info_io_write_bw"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx512",
+ "MetricThreshold": "tma_info_iparith_avx512 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_ipdsb_miss_ret < 50"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_ntaken",
+ "MetricThreshold": "tma_info_ipmisp_cond_ntaken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_taken",
+ "MetricThreshold": "tma_info_ipmisp_cond_taken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_ret",
+ "MetricThreshold": "tma_info_ipmisp_ret < 500"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_ipswpf",
+ "MetricThreshold": "tma_info_ipswpf < 100"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 11",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_jump"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+ "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / tma_info_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_l2_evictions_nonsilent_pki"
+ },
+ {
+ "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+ "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / tma_info_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_l2_evictions_silent_pki"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * (OFFCORE_REQUESTS.ALL_DATA_RD - OFFCORE_REQUESTS.DEMAND_DATA_RD + L2_RQSTS.ALL_DEMAND_MISS + L2_RQSTS.SWPF_MISS) / tma_info_instructions",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
+ },
+ {
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x10@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l3_miss_latency"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_load_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / cha_0@event\\=0x0@",
+ "MetricGroup": "Mem;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_mem_dram_read_latency",
+ "PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]",
+ "MetricExpr": "(1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM) / cha_0@event\\=0x0@ if #has_pmem > 0 else 0)",
+ "MetricGroup": "Mem;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_mem_pmm_read_latency",
+ "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
+ "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_memory_bandwidth",
+ "MetricThreshold": "tma_info_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_memory_data_tlbs",
+ "MetricThreshold": "tma_info_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound))",
+ "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_memory_latency",
+ "MetricThreshold": "tma_info_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_mispredictions",
+ "MetricThreshold": "tma_info_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - (tma_info_cond_nt + tma_info_cond_tk + tma_info_callret + tma_info_jump)",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_other_branches"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "Mem;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_pmm_read_bw"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "Mem;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_pmm_write_bw"
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0",
+ "MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_power_license0_utilization",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
+ "MetricExpr": "CORE_POWER.LVL1_TURBO_LICENSE / tma_info_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_power_license1_utilization",
+ "MetricThreshold": "tma_info_power_license1_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
+ "MetricExpr": "CORE_POWER.LVL2_TURBO_LICENSE / tma_info_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_power_license2_utilization",
+ "MetricThreshold": "tma_info_power_license2_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "(tma_info_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
+ "MetricGroup": "SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots_utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cha_0@event\\=0x0@",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_store_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "tma_retiring * tma_info_slots / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "tma_retiring * tma_info_slots / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 7.5"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "19 * tma_info_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_2_3 / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricExpr": "43.5 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_local_dram",
+ "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "tma_retiring * tma_info_slots / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_info_mispredictions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where (only) 4 uops were delivered by the MITE pipeline",
+ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group",
+ "MetricName": "tma_mite_4wide",
+ "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued",
+ "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions + tma_nop_instructions))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a",
+ "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 0)) * (CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_pmm_bound",
+ "MetricThreshold": "tma_pmm_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for Persistent Memory Module.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
+ "MetricExpr": "(97 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 97 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
+ "MetricName": "tma_remote_cache",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricExpr": "108 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_remote_dram",
+ "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricExpr": "37 * MISC_RETIRED.PAUSE_INST / tma_info_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
+ "MetricName": "tma_slow_pause",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_4_9 + UOPS_DISPATCHED.PORT_7_8) / (4 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores",
+ "MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
+ "MetricName": "tma_streaming_stores",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "10 * BACLEARS.ANY / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_elision",
+ "ScaleUnit": "1cycles / elision"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/memory.json b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
new file mode 100644
index 000000000000..f36ac04f8d76
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/memory.json
@@ -0,0 +1,414 @@
+[
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by the local socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F04400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84400400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x94002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts full cacheline writes (ItoM) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ITOM.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC08000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F84408000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware and software prefetches to all cache levels that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.PREFETCHES.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F844027F0",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC00477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by the local socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F04400477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70CC00477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x94000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles where at least one demand data read request known to have missed the L3 cache is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Cycles where at least one demand data read request known to have missed the L3 cache is pending. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known to have missed the L3 cache.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles where the core is waiting on at least 6 outstanding demand data read requests known to have missed the L3 cache.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "PublicDescription": "Cycles where the core is waiting on at least 6 outstanding demand data read requests known to have missed the L3 cache. Note that this event does not capture all elapsed cycles while the requests are outstanding - only cycles from when the requests were known to have missed the L3 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/other.json b/tools/perf/pmu-events/arch/x86/icelakex/other.json
new file mode 100644
index 000000000000..63d5faf2fc43
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/other.json
@@ -0,0 +1,460 @@
+[
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Hit snoop reply with data, line invalidated.",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.I_FWD_FE",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's cache, after the data is forwarded back to the requestor and indicating the data was found unmodified in the (FE) Forward or Exclusive State in this cores caches cache. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "HitM snoop reply with data, line invalidated.",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.I_FWD_M",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated: removed from this core's caches, after the data is forwarded back to the requestor, and indicating the data was found modified(M) in this cores caches cache (aka HitM response). A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Hit snoop reply without sending the data, line invalidated.",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.I_HIT_FSE",
+ "PublicDescription": "Counts responses to snoops indicating the line will now be (I)nvalidated in this core's caches without being forwarded back to the requestor. The line was in Forward, Shared or Exclusive (FSE) state in this cores caches. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Line not found snoop reply",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.MISS",
+ "PublicDescription": "Counts responses to snoops indicating that the data was not found (IHitI) in this core's caches. A single snoop response from the core counts on all hyperthreads of the Core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Hit snoop reply with data, line kept in Shared state.",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.S_FWD_FE",
+ "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (FS) Forward or Shared state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "HitM snoop reply with data, line kept in Shared state",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.S_FWD_M",
+ "PublicDescription": "Counts responses to snoops indicating the line may be kept on this core in the (S)hared state, after the data is forwarded back to the requestor, initially the data was found in the cache in the (M)odified state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Hit snoop reply without sending the data, line kept in Shared state.",
+ "EventCode": "0xef",
+ "EventName": "CORE_SNOOP_RESPONSE.S_HIT_FSE",
+ "PublicDescription": "Counts responses to snoops indicating the line was kept on this core in the (S)hared state, and that the data was found unmodified but not forwarded back to the requestor, initially the data was found in the cache in the (FSE) Forward, Shared state or Exclusive state. A single snoop response from the core counts on all hyperthreads of the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703C00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM attached to another socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700800001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703C00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM attached to another socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.SNC_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700800002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache prefetch requests and software prefetches (except PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetch (which bring data to L2) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10070",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x12380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.HWPF_L3.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x90002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts full cacheline writes (ItoM) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.ITOM.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x90000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O and un-cacheable accesses that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those PMM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70C000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C00477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F33000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x731800477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.READS_TO_CORE.SNC_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700800477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xFBFF80822",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
new file mode 100644
index 000000000000..442a4c7539dd
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json
@@ -0,0 +1,775 @@
+[
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "50021"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "50021",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) calls, including both register and memory indirect.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x14"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CounterMask": "2",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Number of all retired NOP instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
+ "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Clears speculative count",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "False dependencies due to partial compare on address.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CounterMask": "5",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR to be enabled properly.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.PAUSE_INST",
+ "PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5e",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops decoded out of instructions exclusively fetched by decoder 0",
+ "EventCode": "0x56",
+ "EventName": "UOPS_DECODED.DEC0",
+ "PublicDescription": "Uops exclusively fetched by decoder 0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 0",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 1",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 2 and 3",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 4 and 9",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 5",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 6",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 7 and 8",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to 'Mixing Intel AVX and Intel SSE Code' section of the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json
new file mode 100644
index 000000000000..b6ce14ebf844
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-cache.json
@@ -0,0 +1,9860 @@
+[
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_2LM_NM_INVITOX.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_2LM_NM_INVITOX.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "Deprecated": "1",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_2LM_NM_INVITOX.SETCONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "Deprecated": "1",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS.LLC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "Deprecated": "1",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS.SF",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "Deprecated": "1",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS.TOR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "Deprecated": "1",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS2.MEMWR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "Deprecated": "1",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_2LM_NM_SETCONFLICTS2.MEMWRNI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Intermediate bypass Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the intermediate bypass.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Not Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the full bypass.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the uncore caching and home agent (CHA)",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xf2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Any Single Snoop : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xf1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.REMOTE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x12",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.REMOTE_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "EventCode": "0x1F",
+ "EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Counter 0 Occupancy : Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline directory state lookups : Snoop Not Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Multi-socket cacheline directory state lookups : Snoop Not Needed : Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to. : Filters for transactions that did not have to send any snoops because the directory was clean.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline directory state lookups : Snoop Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Multi-socket cacheline directory state lookups : Snoop Needed : Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to. : Filters for transactions that had to send one or more snoops because the directory was not clean.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline directory state updates; memory write due to directory update from the home agent (HA) pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline directory state updates memory writes issued from the home agent (HA) pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline directory state updates; memory write due to directory update from (table of requests) TOR pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline directory state updates due to memory writes issued from the table of requests (TOR) pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Local",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.PMM_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : PMM Local : Counts the number of cycles either the local or incoming distress signals are asserted. : If the CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Remote",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : PMM Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : If another CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xBA",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xBA",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : Remote socket ownership read requests that hit in S state.",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Number of Hits in HitMe Cache : Remote socket ownership read requests that hit in S state. : Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : Remote socket WBMtoE requests",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : Remote socket writeback to I or S requests",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Number of Hits in HitMe Cache : Remote socket writeback to I or S requests : op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed : Remote socket read requests",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Number of times HitMe Cache is accessed : Remote socket read requests : op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed : Remote socket write (i.e. writeback) requests",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Number of times HitMe Cache is accessed : Remote socket write (i.e. writeback) requests : op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests that are not to shared line",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests that are not to shared line : No SF/LLC HitS/F and op is RdInvOwn",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : Remote socket read or invalidate requests",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Number of Misses in HitMe Cache : Remote socket read or invalidate requests : op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests to shared line",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Number of Misses in HitMe Cache : Remote socket RdInvOwn requests to shared line : SF/LLC HitS/F and op is RdInvOwn",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Deallocate HitME$ on Reads without RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request : Received RspFwdI* for a local request, but converted HitME$ to SF entry",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request : Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache to SHARed",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "EventCode": "0xB9",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "EventCode": "0xB9",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "HA to iMC Reads Issued : ISOCH : Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Full Line Non-ISOCH",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to any of the memory controller channels.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "UMask": "0x1fffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All transactions from Remote Agents",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All transactions from Remote Agents : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1e20ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local or remote transaction to the LLC, including prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "PerPkg": "1",
+ "UMask": "0x1bd0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x19d0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Code Reads",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Code Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x19d0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Code Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Code Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests that come from a Remote socket.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x1a10ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x1a10ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Local request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local transaction to the LLC, including prefetches from the Core",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "PerPkg": "1",
+ "UMask": "0x1bc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1bc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "PerPkg": "1",
+ "UMask": "0x1fc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Read Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Read transactions.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x19c1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bc101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Requests that come from a Remote socket",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1a01ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DMND_READ_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x841ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : E State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Exclusive State",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : F State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : F State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Forward State",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1a44ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1844ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate requests that come from a Remote socket.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1a04ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush or Invalidate Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : I State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : I State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Miss",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Prefetch requests to the LLC that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x189dff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local LLC prefetch requests (from LLC) Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Local LLC prefetch requests (from LLC) Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local LLC prefetch to the LLC",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LLC_PF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x189dff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LOC_HOM",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "PerPkg": "1",
+ "UMask": "0xbdfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed locally Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOC_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
+ "UMask": "0xbdfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : M State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : M State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Modified State",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1fe001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Write Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Write Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Writeback transactions to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote non-snoop request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.PREF_OR_DMND_REMOTE_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remote non-snoop request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Non-snoop transactions to the LLC from remote agent",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Reads",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd9ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_LOC_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x9d9ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_REM_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x11d9ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd901",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally HOMed Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_LOC_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Locally HOMed Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0xbd901",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely HOMed Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_REM_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remotely HOMed Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x13d901",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_OR_SNOOP_REMOTE_MISS_REM_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x161901",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_REMOTE_LOC_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0xa19ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Reads that Hit the Snoop Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_SF_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Reads that Hit the Snoop Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd90e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REM_HOM",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS",
+ "PerPkg": "1",
+ "UMask": "0x15dfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed remotely Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed remotely Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote snoop request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remote snoop request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Snoop transactions to the LLC from remote agent",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Snoop Requests from a Remote Socket",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "UMask": "0x1c19ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REM_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
+ "UMask": "0x15dfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1bc8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x19c8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bc801",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x888ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests that come from a Remote socket.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1a08ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : S State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Shared State",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit Exclusive State",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - H State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit HitMe State",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit Shared State",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x1a42ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x842ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x17c2ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : All Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : All Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0xf",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in E state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - All Lines : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x200f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in E State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in E State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in M State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in M State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local Only",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in M state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in M state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote - All Lines : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x800f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote - Lines in E State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote - Lines in E State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote - Lines in M State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote - Lines in M State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote Only",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote - Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote - Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : CV0 Prefetch Miss : Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : CV0 Prefetch Victim : Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state.",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : Silent Snoop Eviction : Miscellaneous events in the Cbo. : Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : Write Combining Aliasing : Miscellaneous events in the Cbo. : Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "EventCode": "0xE6",
+ "EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "EventCode": "0xE6",
+ "EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local InvItoE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local Rd",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Off",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Remote Rd",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.REMOTE_READ",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Remote Rd InvItoE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.REMOTE_READINVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Remote Rd InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ADEGRCREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.AKEGRCREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ALLRSFWAYS_RES",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.BLEGRCREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.FSF_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLOWSNP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLWAYRSV",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_PAMATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_WAYMATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.HACREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IDX_INPIPE",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IPQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IRQ_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IRQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ISMQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IVEGRCREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.LLC_WAYS_RES",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.NOTALLOWSNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ONE_FSF_VIC",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ONE_RSP_CON",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PMM_MEMMODE_TORMATCH_MULTI",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PMM_MEMMODE_TOR_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PRQ_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PTL_INPIPE",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.RMW_SETMATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.RRQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.SETMATCHENTRYWSCT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.SF_WAYS_RES",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.TOPA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.TORID_MATCH_GO_P",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.WAY_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC : NM evictions due to another read to the same near memory set in the LLC.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in SF/LLC : NM evictions due to another read to the same near memory set in the SF.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in TOR",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Memory Mode related events : Counts the number of times CHA saw NM Set conflict in TOR : No Reject in the CHA due to a pending read to the same near memory set in the TOR.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "EventCode": "0x67",
+ "EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": ": count # of FAST TOR Request inserted to ha_tor_req_fifo",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "EventCode": "0x67",
+ "EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": ": count # of SLOW TOR Request inserted to ha_pmm_tor_req_fifo",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC0 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC1 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC10",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC10",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC10 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 10 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC11",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC11",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC11 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 11 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC12",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC12",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC12 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 12 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC13",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC13",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC13 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 13 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC2 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC3 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC4 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC5 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC6",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC6",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC6 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 6 only.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC7",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC7",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC7 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 7 only.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC8",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC8",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC8 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 8 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC9",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC9",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC9 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 9 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and remote INVITOE requests sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and are sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote INVITOE requests (exclusive ownership of a cache line without receiving data) sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local read requests that miss the SF/LLC and remote read requests sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local read requests that miss the SF/LLC and are sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote read requests sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a remote socket made into the CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local write requests that miss the SF/LLC and remote write requests sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0xc",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local write requests that miss the SF/LLC and are sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote write requests sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xae",
+ "EventName": "UNC_CHA_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IPQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IRQ Rejected : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : RRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : RRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : WBQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : WBQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IPQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : IPQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : IRQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : RRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : RRQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : WBQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : WBQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : AD REQ on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : AD RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : Non UPI AK Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL NCB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL NCS on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL WB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : Non UPI IV Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : Allow Snoop : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : ANY0",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : ANY0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Any condition listed in the Other0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : HA",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : HA : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : LLC OR SF Way : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : LLC Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : PhyAddr Match : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : SF Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the PRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : AD REQ on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : AD RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : Non UPI AK Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL NCB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL NCS on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL WB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : Non UPI IV Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : Allow Snoop : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : ANY0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Any condition listed in the WBQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : HA : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : LLC OR SF Way : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : LLC Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : PhyAddr Match : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : SF Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : Allow Snoop",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Any condition listed in the RRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : HA",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : HA : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : LLC OR SF Way",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : LLC Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : PhyAddr Match",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : SF Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : Allow Snoop",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Any condition listed in the WBQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : HA",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : HA : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : LLC OR SF Way",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : LLC Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : PhyAddr Match",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : SF Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "EventCode": "0xe4",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : All",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : All : Counts the number of snoops issued by the HA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoops for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA responding to local requests",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA responding to remote requests",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Directed snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA responding to local requests",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Directed snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA responding to remote requests",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Snoops sent for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Snoops sent for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA responding to local requests",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Snoops sent for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Snoops sent for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA responding to remote requests",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RSPCNFLCT*",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : RSPCNFLCT* : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspFwd",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : RspFwd : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : Rsp*Fwd*WB",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWDWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : Rsp*Fwd*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspI",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspIFwd",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspS",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type RspS Snoop Response was received which indicates when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspSFwd",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : Rsp*WB",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : Rsp*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspCnflct : Number of snoop responses received for a Local request : Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : Rsp*FWD*WB : Number of snoop responses received for a Local request : Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspI",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspI : Number of snoop responses received for a Local request : Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspIFwd : Number of snoop responses received for a Local request : Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspS",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspS : Number of snoop responses received for a Local request : Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspSFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its currently copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : Rsp*WB : Number of snoop responses received for a Local request : Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DDR4 Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DDR4 Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.DDR",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR4",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : SF/LLC Evictions : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Hits",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Hits : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CLFlushes issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushOpts issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CLFlushOpts issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8d7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRDs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRDs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc817ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc897ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc817fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to page walks that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc897fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcccffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCRD",
+ "PerPkg": "1",
+ "UMask": "0xcccffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccd7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDRD",
+ "PerPkg": "1",
+ "UMask": "0xccd7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccc7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores that hit in the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : SpecItoMs issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc57fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefCode issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcccfff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccd7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccc7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc817fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8178601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc816fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8178a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc897fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc896fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read prefetch from remote IA that misses in the snoop filter",
+ "UMask": "0xc8977e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8177e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_LOCAL_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_REMOTE_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefCode issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcccffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefData issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccd7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccc7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_DRAM",
+ "PerPkg": "1",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remote memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc806fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc886fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8877e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC - HOMed remotely : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8077e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : SpecItoMs issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc57fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SpecItoMs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : SpecItoMs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc57ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc3fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoIs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc37ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBMtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "PerPkg": "1",
+ "PublicDescription": "WbMtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc2fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbMtoIs issued by iA Cores . (Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBStoIs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc67ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CLFlushes issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices to locally HOMed memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd42ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices to remotely HOMed memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd437f04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices to locally HOMed memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc42ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices to remotely HOMed memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc437f04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WbMtoIs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IPQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IPQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IRQ - iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From an iA Core",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IRQ - Non iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just ISOC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just ISOC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Local Targets",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Local Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA and IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests from iA Cores",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally generated IO traffic",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Misses",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Misses : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMCFG Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : MMCFG Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NearMem",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NonCoherent",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NonCoherent : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NotNearMem",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NotNearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PMM Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PMM Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PRQ - IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From a PCIe Device",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PRQ - Non IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Remote Targets",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REMOTE_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Remote Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RRQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WBQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DDR4 Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DDR4 Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : SF/LLC Evictions : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Hits",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Hits : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8d7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRDs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRDs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc817ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc897ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc817fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc897fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcccffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccd7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccc7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcccfff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefData issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccd7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccc7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc817fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8178601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc816fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8178a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc897fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc896fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8977e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8177e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefCode issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcccffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefData issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccd7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xccc7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc806fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc886fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8877e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8077e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores that missed the LLC: For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc57fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc57ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IPQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IPQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IRQ - iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From an iA Core",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IRQ - Non iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just ISOC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just ISOC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Local Targets",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Local Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA and IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests from iA Cores",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally generated IO traffic",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Misses",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Misses : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMCFG Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : MMCFG Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NearMem",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NonCoherent : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NotNearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PMM Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PMM Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PRQ - IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From a PCIe Device",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PRQ - Non IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Remote Targets",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REMOTE_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Remote Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "WbPushMtoI : Pushed to LLC : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was able to push WbPushMToI to LLC",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "WbPushMtoI : Pushed to Memory : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC0 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC1 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC10",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC10",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC10 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 10 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC11",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC11",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC11 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 11 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC12",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC12",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC12 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 12 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC13",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC13",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC13 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 13 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC2 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC3 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC4 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC5 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC6",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC6",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC6 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 6 only.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC7",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC7",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC7 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 7 only.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC8",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC8",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC8 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 8 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC9",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC9",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC9 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 9 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 0?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 0?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 1?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 1?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Sent (on 0?) : Number of XPT prefetches sent",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Sent (on 1?) : Number of XPT prefetches sent",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
new file mode 100644
index 000000000000..8ac5907762e1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json
@@ -0,0 +1,14571 @@
+[
+ {
+ "BriefDescription": "Total Write Cache Occupancy : Any Source",
+ "EventCode": "0x0F",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Total Write Cache Occupancy : Any Source : Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events. : Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy : Snoops",
+ "EventCode": "0x0F",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "PerPkg": "1",
+ "PublicDescription": "Total Write Cache Occupancy : Snoops : Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory.",
+ "EventCode": "0x0f",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "Total IRP occupancy of inbound read and write requests to coherent memory. This is effectively the sum of read occupancy and write occupancy.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : CLFlush",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "PublicDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline to coherent memory, without a RFO. PCIITOM is a speculative Invalidate to Modified command that requests ownership of the cacheline and does not move data from the mesh to IRP cache.",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline to coherent memory. RFO is a Read For Ownership command that requests ownership of the cacheline and moves data from the mesh to IRP cache.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : WbMtoI",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound read requests to coherent memory, received by the IRP and inserted into the Fire and Forget queue (FAF), a queue used for processing inbound reads in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue.",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy of the IRP Fire and Forget (FAF) queue, a queue used for processing inbound reads in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Received Valid : Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of E Line : Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of I Line : Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of M Line : Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of S Line : Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Requests",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_P2P_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "P2P Requests : P2P requests from the ITC",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Occupancy",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_P2P_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "P2P Occupancy : P2P B & S Queue Occupancy",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P completions",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if local only",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if local and target matches",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P Message",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P reads",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : Match if remote only",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if remote and target matches",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P Writes",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M line in the IIO cache",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit E or S",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit I",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit M",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Miss",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpCode",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpData",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpInv",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Atomic",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound Transaction Count : Atomic : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks the number of atomic transactions",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Other",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound Transaction Count : Other : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks the number of 'other' kinds of transactions.",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Writes",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations",
+ "EventCode": "0x0B",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Cycles Full",
+ "EventCode": "0x05",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Inserts",
+ "EventCode": "0x02",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Occupancy",
+ "EventCode": "0x08",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Cycles Full",
+ "EventCode": "0x06",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Inserts",
+ "EventCode": "0x03",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Occupancy",
+ "EventCode": "0x09",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Cycles Full",
+ "EventCode": "0x07",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Inserts",
+ "EventCode": "0x04",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Occupancy",
+ "EventCode": "0x0A",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": ": Counts the number times when it is not possible to issue a request to the M2PCIe because there are no Egress Credits available on AD0, A1 or AD0&AD1 both. Stalls on both AD0 and AD1 will count as 2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD0 Egress Credits Stalls",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No AD0 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD0 Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD1 Egress Credits Stalls",
+ "EventCode": "0x1B",
+ "EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No AD1 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD1 Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No BL Egress Credit Stalls : Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0x0D",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0x0E",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0x0C",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Request Queue Occupancy : Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_EGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Taken",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_EGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Taken",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to memory (M2M)",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "EventCode": "0x28",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled",
+ "EventCode": "0x27",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "EventCode": "0x29",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks of the mesh to PCI (M2P)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in any state",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in A state",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in I state",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Lookups : Found in S state",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates : From/to any state. Note: event counts are incorrect in 2LM mode.",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Local",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.PMM_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : PMM Local : Counts the number of cycles either the local or incoming distress signals are asserted. : If the CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Remote",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : PMM Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : If another CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_DISTRESS_PMM",
+ "EventCode": "0xF2",
+ "EventName": "UNC_M2M_DISTRESS_PMM",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_DISTRESS_PMM_MEMMODE",
+ "EventCode": "0xF1",
+ "EventName": "UNC_M2M_DISTRESS_PMM_MEMMODE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x704",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x140",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x102",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x101",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR, acting as Cache - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x110",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : PMM - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "M2M Reads Issued to iMC : PMM - Ch0 : Counts all PMM dimm read requests(full line) sent from M2M to iMC",
+ "UMask": "0x120",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "PerPkg": "1",
+ "UMask": "0x204",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x240",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x202",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x201",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR, acting as Cache - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x210",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x208",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : PMM - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "M2M Reads Issued to iMC : PMM - Ch1 : Counts all PMM dimm read requests(full line) sent from M2M to iMC",
+ "UMask": "0x220",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch2",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH2_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x440",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x740",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x702",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x701",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR, acting as Cache - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x710",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : DDR - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x708",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : PMM - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x720",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1c10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "PerPkg": "1",
+ "UMask": "0x410",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "PerPkg": "1",
+ "UMask": "0x401",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x404",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x402",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x408",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR, acting as Cache - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x440",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x420",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : PMM - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "M2M Writes Issued to iMC : PMM - Ch0 : Counts all PMM dimm writes requests(full line and partial) sent from M2M to iMC",
+ "UMask": "0x480",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "PerPkg": "1",
+ "UMask": "0x810",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "PerPkg": "1",
+ "UMask": "0x801",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x804",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x802",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x808",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR, acting as Cache - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x840",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x820",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : PMM - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "M2M Writes Issued to iMC : PMM - Ch1 : Counts all PMM dimm writes requests(full line and partial) sent from M2M to iMC",
+ "UMask": "0x880",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1c01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1c04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1c02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1c08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR, acting as Cache - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1c40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : DDR - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x1c20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : PMM - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x1c80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts",
+ "EventCode": "0x64",
+ "EventName": "UNC_M2M_MIRR_WRQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "EventCode": "0x65",
+ "EventName": "UNC_M2M_MIRR_WRQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches : MC Match",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches : Mesh Match",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "EventCode": "0x73",
+ "EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : All Channels",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 0",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 1",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 2",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : All Channels",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 0",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 1",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 2",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA0_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA1_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_MISS_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_RSP_PDRESET",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA0_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA1_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_MISS_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_RSP_PDRESET",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_HITA0_INVAL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_HITA1_INVAL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_MISS_INVAL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH2_RSP_PDRESET",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - Ch 0",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 0",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - Ch 1",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 1",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - Ch 2",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH2_UPI",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 2",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH2_XPT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "EventCode": "0x6f",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x2a",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - All Channels",
+ "EventCode": "0x6f",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 0",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH0_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 0",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH1_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 2",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH2_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 2",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- All Channels",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPTUPI_ALLCH",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - All Channels",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 0",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH0_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI- Ch 0",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH1_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI- Ch 1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 2",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH2_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - All Channels",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.XPTUPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.RPQ_PROXY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.UPI_THRESH",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.XPT_THRESH",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.RPQ_PROXY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.UPI_THRESH",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.XPT_THRESH",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.RPQ_PROXY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.UPI_THRESH",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch2 - Reasons",
+ "EventCode": "0x72",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH2.XPT_THRESH",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 2",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH2_UPI",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 2",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH2_XPT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "EventCode": "0x6d",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x2a",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 2",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": All Channels",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 0",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 1",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 2",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "EventCode": "0x79",
+ "EventName": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "EventCode": "0x78",
+ "EventName": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "EventCode": "0x77",
+ "EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xae",
+ "EventName": "UNC_M2M_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 0",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 1",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC RPQ Cycles w/Credits - PMM : Channel 0",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD_PMM.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC RPQ Cycles w/Credits - PMM : Channel 1",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD_PMM.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC RPQ Cycles w/Credits - PMM : Channel 2",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD_PMM.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 0",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Full",
+ "EventCode": "0x04",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "EventCode": "0x03",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Allocations",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "EventCode": "0x77",
+ "EventName": "UNC_M2M_RxC_AD_PREF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M2M_RxC_AK_WR_CMP",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Full",
+ "EventCode": "0x08",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "EventCode": "0x07",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "EventCode": "0x05",
+ "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "EventCode": "0x06",
+ "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_SCOREBOARD_AD_RETRY_ACCEPTS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2M_SCOREBOARD_AD_RETRY_ACCEPTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_SCOREBOARD_AD_RETRY_REJECTS",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2M_SCOREBOARD_AD_RETRY_REJECTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Retry - Mem Mirroring Mode",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_SCOREBOARD_BL_RETRY_ACCEPTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Retry - Mem Mirroring Mode",
+ "EventCode": "0x36",
+ "EventName": "UNC_M2M_SCOREBOARD_BL_RETRY_REJECTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Scoreboard Accepts",
+ "EventCode": "0x2F",
+ "EventName": "UNC_M2M_SCOREBOARD_RD_ACCEPTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Scoreboard Rejects",
+ "EventCode": "0x30",
+ "EventName": "UNC_M2M_SCOREBOARD_RD_REJECTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Scoreboard Accepts",
+ "EventCode": "0x31",
+ "EventName": "UNC_M2M_SCOREBOARD_WR_ACCEPTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Scoreboard Rejects",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2M_SCOREBOARD_WR_REJECTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Clean NearMem Read Hit",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit : Clean NearMem Read Hit : Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts clean full line read hits (reads and RFOs).",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Dirty NearMem Read Hit",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit : Dirty NearMem Read Hit : Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts dirty full line read hits (reads and RFOs).",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Clean NearMem Underfill Hit",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit : Clean NearMem Underfill Hit : Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts clean underfill hits due to a partial write",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Dirty NearMem Underfill Hit",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit : Dirty NearMem Underfill Hit : Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts dirty underfill read hits due to a partial write",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Miss",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_TAG_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 0",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 1",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 1",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 0",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 1",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 1",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "EventCode": "0x0d",
+ "EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "EventCode": "0x0e",
+ "EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Full",
+ "EventCode": "0x0c",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Not Empty",
+ "EventCode": "0x0b",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Allocations",
+ "EventCode": "0x09",
+ "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "EventCode": "0x0A",
+ "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK : CRD Transactions to Cbo",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK : NDR Transactions",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.NDR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AKC Credits",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M2M_TxC_AKC_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : All",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : All",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : All",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : All",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Cache",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Core",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to QPI",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : All",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : All",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : All",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 0",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 1",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 2",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - PMM : Channel 0",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD_PMM.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - PMM : Channel 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD_PMM.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - PMM : Channel 2",
+ "EventCode": "0x51",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD_PMM.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 0",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 1",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Mirror",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 2",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 2",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 2",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 0",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 1",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 2",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Mirror",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 2",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 2",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x81",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x81",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x81",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x83",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x83",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x83",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x89",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x89",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x89",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x85",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x85",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x85",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x87",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x87",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x87",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Requests",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : Requests : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Snoops",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : Snoops : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : VNA Messages",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : VNA Messages : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Writebacks",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : Writebacks : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to UPI (M3UPI)",
+ "EventCode": "0x01",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks of the mesh to UPI (M3UPI) : Counts the number of uclks in the M3 uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the M3 is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2C Sent",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M3UPI_D2C_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "D2C Sent : Count cases BL sends direct to core",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2U Sent",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M3UPI_D2U_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "D2U Sent : Cases where SMI3 sends D2U command",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Local",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.PMM_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : PMM Local : Counts the number of cycles either the local or incoming distress signals are asserted. : If the CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Remote",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : PMM Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : If another CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M3UPI_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M3UPI_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only) : No vn0 and vna credits available to send to M2",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO2 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO3 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO4",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO4 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO5",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together : No vn0 and vna credits available to send to M2",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits : No vn0 and vna credits available to send to M2",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO5",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.UBOX_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M3UPI_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M3UPI_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 0",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AD - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 1",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AD - Slot 1 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AD - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AK - Slot 0",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AK - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AK - Slot 2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AK - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : BL - Slot 0",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : BL - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xae",
+ "EventName": "UNC_M3UPI_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : REQ on AD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : REQ on AD : VN0 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : RSP on AD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : RSP on AD : VN0 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : SNP on AD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : SNP on AD : VN0 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : NCB on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : NCB on BL : VN0 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : NCS on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : NCS on BL : VN0 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : RSP on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : RSP on BL : VN0 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : WB on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : WB on BL : VN0 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : REQ on AD",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : REQ on AD : VN1 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : RSP on AD",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : RSP on AD : VN1 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : SNP on AD",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : SNP on AD : VN1 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : NCB on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : NCB on BL : VN1 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : NCS on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : NCS on BL : VN1 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : RSP on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : RSP on BL : VN1 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : WB on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : WB on BL : VN1 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0 : AD and BL messages won arbitration concurrently / in parallel",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1 : AD and BL messages won arbitration concurrently / in parallel",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : Max Parallel Win",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ALL_PARALLEL_WIN",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : Max Parallel Win : VN0 and VN1 arbitration sub-pipelines both produced AD and BL winners (maximum possible parallel winners)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN0 : Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN1 : Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN0 : Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN1 : Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.VN01_PARALLEL_WIN",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win : VN0 and VN1 arbitration sub-pipelines had parallel winners (at least one AD or BL on each side)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : REQ on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : REQ on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : RSP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : RSP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : SNP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : SNP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : NCB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : NCB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : NCS on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : NCS on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : RSP on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : RSP on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : WB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : WB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : REQ on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : REQ on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : RSP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : RSP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : SNP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : SNP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : NCB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : NCB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : NCS on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : NCS on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : RSP on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : RSP on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : WB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : WB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : REQ on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : REQ on AD : VN0 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : RSP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : RSP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : SNP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : SNP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : NCB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : NCB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : NCS on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : NCS on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : RSP on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : RSP on BL : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : WB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : WB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : REQ on AD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : REQ on AD : VN1 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : RSP on AD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : RSP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : SNP on AD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : SNP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : NCB on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : NCB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : NCS on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : NCS on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : RSP on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : RSP on BL : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : WB on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : WB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while pipeline is idle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 1 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 1 while merging with bl message in same flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 2 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 2 while merging with bl message in same flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : Any In BGF FIFO",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : Any In BGF FIFO : Indication that at least one packet (flit) is in the bgf (fifo only)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : Any in BGF Path",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : Any in BGF Path : Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.LT1_FOR_D2K",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.LT2_FOR_D2K",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 2",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : No D2K For Arb",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.VN0_NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : No D2K For Arb : VN0 BL RSP message was blocked from arbitration request due to lack of D2K CMP credit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.VN1_NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Credits Consumed",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.CONSUMED",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Credits Consumed : number of remote vna credits consumed per cycle",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : D2K Credits",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : D2K Credits : D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Packets in BGF FIFO",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Packets in BGF FIFO : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Packets in BGF Path",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Packets in BGF Path : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in completion fifo only",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in marker table and in fifo",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Transmit Credits",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Transmit Credits : Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : VNA In Use",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : VNA In Use : Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : All",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : All : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but could not be sent for any reason, e.g. low credits, low tsv, stall injection",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : No BGF Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_BGF",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : No BGF Credits : Data flit is ready for transmission but could not be sent",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : No TxQ Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_TXQ",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : No TxQ Credits : Data flit is ready for transmission but could not be sent",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : TSV High",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.TSV_HI",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : TSV High : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while tsv high",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : Cycle valid for Flit",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.VALID_FOR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : Cycle valid for Flit : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while cycle is valid for flit transmission",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 0",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 0 : generating bl data flit sequence; waiting for data pump 0",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is tracking at least one message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending completion fifo is full",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : a bl message finished but is in limbo and moved to pump-1-pending logic",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 1",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 1 : generating bl data flit sequence; waiting for data pump 1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request naturally serviced during hold-off period",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request forcibly serviced during service window",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request received from link layer while idle (with no slot 2 request active immediately prior)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request withdrawn during hold-off period or service window",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : All",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Needs Data Flit",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Needs Data Flit : BL message requires data flit sequence",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0 : Waiting for header pump 0",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 : Header pump 1 is not required for flit",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble : Header pump 1 is not required for flit but flit transmission delayed",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail : Header pump 1 is not required for flit and not available",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1 : Waiting for header pump 1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Accumulate : Events related to Header Flit Generation - Set 1 : Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate Ready",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Accumulate Ready : Events related to Header Flit Generation - Set 1 : header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate Wasted",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Accumulate Wasted : Events related to Header Flit Generation - Set 1 : Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked : Events related to Header Flit Generation - Set 1 : Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_AFTER",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: message was slotted only after run-ahead was over; run-ahead mode definitely wasted",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Message",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_DURING",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Message : Events related to Header Flit Generation - Set 1 : run-ahead mode: one message slotted during run-ahead",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_AFTER",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: second message slotted immediately after run-ahead; potential run-ahead success",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: two (or three) message flit sent immediately after run-ahead; complete run-ahead success",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Ok",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Parallel Ok : Events related to Header Flit Generation - Set 2 : new header flit construction may proceed in parallel with data flit sequence",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Flit Finished",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Parallel Flit Finished : Events related to Header Flit Generation - Set 2 : header flit finished assembly in parallel with data flit sequence",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Message",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Parallel Message : Events related to Header Flit Generation - Set 2 : message is slotted into header flit in parallel with data flit sequence",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall : Events related to Header Flit Generation - Set 2 : Rate-matching stall injected",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message : Events related to Header Flit Generation - Set 2 : Rate matching stall injected, but no additional message slotted during stall cycle",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Message",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : One Message : One message in flit; VNA or non-VNA flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Message in non-VNA",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG_VNX",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : One Message in non-VNA : One message in flit; non-VNA flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Two Messages",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.2_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : Two Messages : Two messages in flit; VNA flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Three Messages",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.3_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : Three Messages : Three messages in flit; VNA flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Slot Taken",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Two Slots Taken",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : All Slots Taken",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_3",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : All",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : All : header flit is ready for transmission but could not be sent : header flit is ready for transmission but could not be sent for any reason, e.g. no credits, low tsv, stall injection",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No BGF Credits",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No BGF Credits : header flit is ready for transmission but could not be sent : No BGF credits available",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No BGF credits available; no additional message slotted into flit",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No TxQ Credits",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No TxQ Credits : header flit is ready for transmission but could not be sent : No TxQ credits available",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No TxQ credits available; no additional message slotted into flit",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : TSV High",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.TSV_HI",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : TSV High : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while tsv high",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : Cycle valid for Flit",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.VALID_FOR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : Cycle valid for Flit : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while cycle is valid for flit transmission",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Can't Slot AD",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Can't Slot AD : some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Can't Slot BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Can't Slot BL : some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Parallel Attempt",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Parallel Attempt : ad and bl messages attempted to slot into the same flit in parallel",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Parallel Success",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in paralle",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : VN0",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : VN0 : vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : VN1",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : VN1 : vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : REQ on AD",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : REQ on AD : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on AD",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on AD : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : SNP on AD",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : SNP on AD : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCB on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCB on BL : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCS on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : NCS on BL : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : RSP on BL : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts : WB on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Inserts : WB on BL : Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : REQ on AD",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : REQ on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on AD",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : SNP on AD",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : SNP on AD : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCB on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCB on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCS on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : NCS on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : RSP on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts : WB on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Inserts : WB on BL : Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : REQ on AD",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : REQ on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on AD",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : SNP on AD",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : SNP on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCB on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCB on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCS on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : NCS on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : RSP on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy : WB on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Occupancy : WB on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : REQ on AD",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : REQ on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on AD",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : SNP on AD",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : SNP on AD : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCB on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCB on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCS on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : NCS on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : RSP on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy : WB on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Ingress (from CMS) Queue - Occupancy : WB on BL : Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : REQ on AD",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : RSP on AD",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : SNP on AD",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : NCB on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : NCS on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : RSP on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : WB on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : REQ on AD",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : RSP on AD",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : SNP on AD",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : NCB on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : NCS on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : RSP on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : WB on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Any In Use",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Any In Use : At least one remote vna credit is in use",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Corrected",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Corrected : Number of remote vna credits corrected (local return) per cycle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 1 : Remote vna credit level is less than 1 (i.e. no vna credits available)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 10",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT10",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 10 : remote vna credit level is less than 10; parallel vn0/vn1 arb not possible",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 4",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 4 : Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 5",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 5 : Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credit count was less than 5 and allocation to ad or bl messages was required",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credit count was less than 10 and allocation to vn0 or vn1 was required",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn0, remote vna credits were allocated only to ad messages, not to bl",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn0, remote vna credits were allocated only to bl messages, not to ad",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credits were allocated only to vn0, not to vn1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn1, remote vna credits were allocated only to ad messages, not to bl",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn1, remote vna credits were allocated only to bl messages, not to ad",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credits were allocated only to vn1, not to vn0",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M3UPI_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 REQ Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 RSP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 SNP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 WB Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 REQ Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 RSP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 SNP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 WB Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 WB Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 WB Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 REQ Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 RSP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 SNP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 WB Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 REQ Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN1 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 RSP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 SNP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN1 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 WB Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Inserts",
+ "EventCode": "0x2F",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Occupancy",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 NCB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 NCS Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 RSP Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 WB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 NCS Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 NCB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 RSP Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 WB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 WB Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 WB Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 RSP Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 WB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 NCS Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 NCB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1 RSP Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1 WB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1_NCB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1_NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1_NCS Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1_NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_THROUGH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_WRPULL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_THROUGH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_WRPULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN0 REQ Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN0 RSP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN0 SNP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN1 REQ Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN1 RSP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN1 SNP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VNA",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VNA : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN0 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN0 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN0 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN1 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN1 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN1 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VNA",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VNA : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FlowQ Generated Prefetch",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "PublicDescription": "FlowQ Generated Prefetch : Count cases where FlowQ causes spawn of Prefetch to iMC/SMI3 target",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M3UPI_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : WB on BL",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : WB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : NCB on BL",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : NCB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : REQ on AD",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : REQ on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : RSP on AD",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : RSP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : SNP on AD",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : SNP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : RSP on BL",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : RSP on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : WB on BL",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : WB on BL : Number of Cycles there were no VN0 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : NCB on BL",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : NCB on BL : Number of Cycles there were no VN0 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : REQ on AD",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : REQ on AD : Number of Cycles there were no VN0 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : RSP on AD",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : RSP on AD : Number of Cycles there were no VN0 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : SNP on AD",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : SNP on AD : Number of Cycles there were no VN0 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : RSP on BL",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : RSP on BL : Number of Cycles there were no VN0 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : WB on BL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : WB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : NCB on BL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : NCB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : REQ on AD",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : REQ on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : RSP on AD",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : RSP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : SNP on AD",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : SNP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : RSP on BL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : RSP on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : WB on BL",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : WB on BL : Number of Cycles there were no VN1 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : NCB on BL",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : NCB on BL : Number of Cycles there were no VN1 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : REQ on AD",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : REQ on AD : Number of Cycles there were no VN1 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : RSP on AD",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : RSP on AD : Number of Cycles there were no VN1 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : SNP on AD",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : SNP on AD : Number of Cycles there were no VN1 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : RSP on BL",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : RSP on BL : Number of Cycles there were no VN1 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0xc0",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "EventCode": "0x7E",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "EventCode": "0x7D",
+ "EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message is making arbitration request",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message arrived in ingress pipeline",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message took bypass path",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message was slotted into flit (non bypass)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message lost arbitration",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message was dropped because it became too old",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message was dropped because it was overwritten by new message while prefetch queue was full",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of kfclks",
+ "EventCode": "0x01",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of kfclks : Counts the number of clocks in the UPI LL. This clock runs at 1/8th the GT/s speed of the UPI link. For example, a 8GT/s link will have qfclk or 1GHz. Current products do not support dynamic link speeds, so this frequency is fixed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Direct packet attempts : D2C",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "PublicDescription": "Direct packet attempts : D2C : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Direct packet attempts : D2K",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "PerPkg": "1",
+ "PublicDescription": "Direct packet attempts : D2K : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L1 : Number of UPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a UPI link. Use edge detect to count the number of instances when the UPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "EventCode": "0x16",
+ "EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "EventCode": "0x20",
+ "EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req Nack",
+ "EventCode": "0x23",
+ "EventName": "UNC_UPI_POWER_L1_NACK",
+ "PerPkg": "1",
+ "PublicDescription": "L1 Req Nack : Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req (same as L1 Ack).",
+ "EventCode": "0x22",
+ "EventName": "UNC_UPI_POWER_L1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "L1 Req (same as L1 Ack). : Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0x25",
+ "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0x24",
+ "EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Request",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Request : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Request, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x108",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Conflict",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Response - Conflict : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x1aa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Invalid",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Response - Invalid : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x12a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Response - Data : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Response - Data, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10c",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Response - No Data : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Response - No Data, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Snoop",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Snoop : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Snoop, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x109",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Writeback",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Writeback : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Writeback, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10d",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 0",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Bypassed : Slot 0 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 1",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Bypassed : Slot 1 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 2",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Bypassed : Slot 2 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected",
+ "EventCode": "0x0B",
+ "EventName": "UNC_UPI_RxL_CRC_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "CRC Errors Detected : Number of CRC errors detected in the UPI Agent. Each UPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the UPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "LLR Requests Sent",
+ "EventCode": "0x08",
+ "EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
+ "PerPkg": "1",
+ "PublicDescription": "LLR Requests Sent : Number of LLR Requests were transmitted. This should generally be <= the number of CRC errors detected. If multiple errors are detected before the Rx side receives a LLC_REQ_ACK from the Tx side, there is no need to send more LLR_REQ_NACKs.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed",
+ "EventCode": "0x39",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Consumed : Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed",
+ "EventCode": "0x3A",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Consumed : Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "EventCode": "0x38",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "PublicDescription": "VNA Credit Consumed : Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Data",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : All Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Null FLITs received from any slot : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Data",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Null FLITs received from any slot",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Null FLITs received from any slot : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCRD Not Empty",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCTRL",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Non Data",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Protocol Header",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 0",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 1",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 2",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 0",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 1",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 2",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Occupancy - All Packets : Slot 0 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Occupancy - All Packets : Slot 1 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Occupancy - All Packets : Slot 2 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "EventCode": "0x28",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "EventCode": "0x29",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0x26",
+ "EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Request",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Request : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Request, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x108",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Conflict",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Conflict : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x1aa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Invalid",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Invalid : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x12a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Data : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Response - Data, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10c",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Response - No Data : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Response - No Data, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Snoop : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Snoop, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x109",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Writeback : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Writeback, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10d",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Tx Flit Buffer Bypassed : Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the UPI Link. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Data",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Null FLITs transmitted to any slot",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Null FLITs transmitted to any slot : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Data",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Idle",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCRD Not Empty",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCTRL",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Non Data",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Protocol Header",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 0",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 1",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 2",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "EventCode": "0x45",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "EventCode": "0x44",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "VNA Credits Pending Return - Occupancy : Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Doorbell",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Interrupt",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : Interrupt : Interrupts",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : IPI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : MSI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : VLW",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "IDI Lock/SplitLock Cycles : Number of times an IDI Lock/SplitLock sequence was started",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "EventCode": "0x4F",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "EventCode": "0x4F",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles PHOLD Assert to Ack : Assert to ACK : PHOLD cycles.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "RACU Request : Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json
new file mode 100644
index 000000000000..9cef8862c428
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-io.json
@@ -0,0 +1,9270 @@
+[
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x23",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x25",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks of the integrated IO (IIO) traffic controller : Increments counter once every Traffic Controller clock, the LSCLK (500MHz)",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for IIO clocktick",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Free running counter that increments for integrated IO (IIO) traffic controller clockticks",
+ "UMask": "0x10",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts : All Ports",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0-7",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 1",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 2",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 3",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 4",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 5",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 6",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 7",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 0-7",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 0-7",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 3 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 4 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 5 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 5",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 6 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 7 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Passing data to be written : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Issuing final read or write of line : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Processing response from IOMMU : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Issuing to IOMMU : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Request Ownership : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Writing line",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Writing line : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Passing data to be written : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Issuing final read or write of line : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Processing response from IOMMU : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Issuing to IOMMU : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Request Ownership : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Writing line : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 1G Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 2M Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 4K Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups all",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB lookups all : Some transactions have to look up IOTLB multiple times. Counts every time a request looks up IOTLB.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache hits",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": Context cache hits : Counts each time a first look up of the transaction hits the RCC.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache lookups",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": Context cache lookups : Counts each time a transaction looks up root context cache.",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups first",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB lookups first : Some transactions have to look up IOTLB multiple times. Counts the first time a request looks up IOTLB.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Fills (same as IOTLB miss)",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.MISSES",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB Fills (same as IOTLB miss) : When a transaction misses IOTLB, it does a page walk to look up memory and bring in the relevant page translation. Counts when this page translation is written to IOTLB.",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Cycles PWT full",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.CYC_PWT_FULL",
+ "PerPkg": "1",
+ "PublicDescription": ": Cycles PWT full : Counts cycles the IOMMU has reached its maximum limit for outstanding page walks.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOMMU memory access",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "PerPkg": "1",
+ "PublicDescription": ": IOMMU memory access : IOMMU sends out memory fetches when it misses the cache look up which is indicated by this signal. M2IOSF only uses low priority channel",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 1G page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 4K page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_4K_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 4K page : Counts each time a transaction's first look up hits the SLPWC at the 4K level",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWT Hit to a 256T page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWT Hit to a 256T page : Counts each time a transaction's first look up hits the SLPWC at the 512G level",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache fill",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "PerPkg": "1",
+ "PublicDescription": ": PageWalk cache fill : When a transaction misses SLPWC, it does a page walk to look up memory and bring in the relevant page translation. When this page translation is written to SLPWC, ObsPwcFillValid_nnnH is asserted.",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache lookup",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": PageWalk cache lookup : Counts each time a transaction looks up second level page walk cache.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Interrupt Entry cache hit",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": Interrupt Entry cache hit : Counts each time a transaction's first look up hits the IEC.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Interrupt Entry cache lookup",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": Interrupt Entry cache lookup : Counts the number of transaction looks up that interrupt remapping cache.",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Device-selective Context cache invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DEVICE",
+ "PerPkg": "1",
+ "PublicDescription": ": Device-selective Context cache invalidation cycles : Counts number of Device selective context cache invalidation events",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Domain-selective Context cache invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DOMAIN",
+ "PerPkg": "1",
+ "PublicDescription": ": Domain-selective Context cache invalidation cycles : Counts number of Domain selective context cache invalidation events",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache global invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_GBL",
+ "PerPkg": "1",
+ "PublicDescription": ": Context cache global invalidation cycles : Counts number of Context Cache global invalidation events",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Domain-selective IOTLB invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_DOMAIN",
+ "PerPkg": "1",
+ "PublicDescription": ": Domain-selective IOTLB invalidation cycles : Counts number of Domain selective invalidation events",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Global IOTLB invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_GBL",
+ "PerPkg": "1",
+ "PublicDescription": ": Global IOTLB invalidation cycles : Indicates that IOMMU is doing global invalidation.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Page-selective IOTLB invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PAGE",
+ "PerPkg": "1",
+ "PublicDescription": ": Page-selective IOTLB invalidation cycles : Counts number of Page-selective within Domain Invalidation events",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if all bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if all bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if any bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if any bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Counting disabled",
+ "EventCode": "0x80",
+ "EventName": "UNC_IIO_NOTHING",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Occupancy of outbound request queue : To device",
+ "EventCode": "0xC5",
+ "EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Passing data to be written",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": ": Passing data to be written : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Issuing final read or write of line",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Processing response from IOMMU",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Issuing to IOMMU",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Request Ownership",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": ": Request Ownership : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Writing line",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": ": Writing line : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : From IRP",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.IRP",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Number requests sent to PCIe from main die : From IRP : Captures Posted/Non-posted allocations from IRP. i.e. either non-confined P2P traffic or from the CPU",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : From ITC",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.ITC",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Number requests sent to PCIe from main die : From ITC : Confined P2P",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : Completion allocations",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.PREALLOC",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : Drop request",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.ALL.DROP",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Number requests PCIe makes of the main die : Drop request : Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU. : Packet error detected, must be dropped",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : All",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Number requests PCIe makes of the main die : All : Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "ITC address map 1",
+ "EventCode": "0x8F",
+ "EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "EventCode": "0xD0",
+ "EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Outbound cacheline requests issued : 64B requests issued to device : Each outbound cacheline granular request may need to make multiple passes through the pipeline. Each time a cacheline completes all its passes it advances line",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "EventCode": "0xD1",
+ "EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Outbound TLP (transaction layer packet) requests issued : To device : Each time an outbound completes all its passes it advances the pointer",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PWT occupancy",
+ "EventCode": "0x42",
+ "EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "PWT occupancy : Indicates how many page walks are outstanding at any point in time.",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Passing data to be written",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - cacheline complete : Passing data to be written : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Issuing final read or write of line",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - cacheline complete : Issuing final read or write of line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Request Ownership",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - cacheline complete : Request Ownership : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Writing line",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - cacheline complete : Writing line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Passing data to be written",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Passing data to be written : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer. : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Issuing final read or write of line",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Issuing final read or write of line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Processing response from IOMMU",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Processing response from IOMMU : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Issuing to IOMMU",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Issuing to IOMMU : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Request Ownership",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Request Ownership : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer. : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Writing line",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Writing line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer. : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - pass complete : Passing data to be written : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - pass complete : Issuing final read or write of line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - pass complete : Request Ownership : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Writing line",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - pass complete : Writing line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Symbol Times on Link",
+ "EventCode": "0x82",
+ "EventName": "UNC_IIO_SYMBOL_TIMES",
+ "PerPkg": "1",
+ "PublicDescription": "Symbol Times on Link : Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to PCI (M2P)",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2P_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks of the mesh to PCI (M2P) : Counts the number of uclks in the M3 uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the M3 is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Local",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.PMM_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : PMM Local : Counts the number of cycles either the local or incoming distress signals are asserted. : If the CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : PMM Remote",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.PMM_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : PMM Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : If another CHA TOR has too many PMM transactions, this signal will throttle outgoing MS2IDI traffic",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "EventCode": "0xb9",
+ "EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "EventCode": "0xb9",
+ "EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "EventCode": "0xe6",
+ "EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "EventCode": "0xe6",
+ "EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : All",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : All",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCS",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : All",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - DRS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCB",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - DRS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCB",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - DRS",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCB",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCS",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCB",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCS",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCB",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCS",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCB",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCS",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - DRS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCB",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - DRS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCB",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - DRS",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCB",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCS",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - DRS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCB",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - DRS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCB",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - DRS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xae",
+ "EventName": "UNC_M2P_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.UPI_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.UPI_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PMM",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2P_TxC_CREDITS.PMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AD_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AD_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AK_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AK_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.BL_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.BL_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AD_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AD_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.BL_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.BL_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "EventCode": "0x9e",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "EventCode": "0x9e",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "EventCode": "0xb3",
+ "EventName": "UNC_M2P_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "EventCode": "0xb3",
+ "EventName": "UNC_M2P_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
new file mode 100644
index 000000000000..814d9599474d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-memory.json
@@ -0,0 +1,1548 @@
+[
+ {
+ "BriefDescription": "DRAM Activate Count : All Activates",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Activate Count : All Activates : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : Activate due to Bypass",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Activate Count : Activate due to Bypass : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS commands issued",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0x3f",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM Read CAS commands, w/ and w/o auto-pre, issued on this channel. This includes underfills.",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre : DRAM RD_CAS and WR_CAS Commands : Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with explicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM underfill read CAS commands issued",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total of DRAM Read CAS commands issued due to an underfill",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM write CAS commands issued",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM Write CAS commands issued, w/ and w/o auto-pre, on this channel.",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Clockticks",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for the Memory Controller",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "imc_free_running"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge All Commands : Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.OPPORTUNISTIC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Half clockticks for IMC",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_HCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PARITY_ERRORS",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M_PARITY_ERRORS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.RD",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.TOTAL",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.TOTAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.WR",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : All",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Commands : All : Counts all commands issued to PMM",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Misc Commands (error, flow ACKs)",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.MISC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Misc GNTs",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.MISC_GNT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Reads - RPQ",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RD",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Commands : Reads - RPQ : Counts read requests issued to the PMM RPQ",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : RPQ GNTs",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.RPQ_GNTS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Underfill reads",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.UFILL_RD",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Commands : Underfill reads : Counts underfill read commands, due to a partial write, issued to PMM",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Underfill GNTs",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WPQ_GNTS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands : Writes",
+ "EventCode": "0xEA",
+ "EventName": "UNC_M_PMM_CMD1.WR",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Commands : Writes : Counts write commands issued to PMM",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Expected No data packet (ERID matched NDP encoding)",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.NODATA_EXP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Unexpected No data packet (ERID matched a Read, but data was a NDP)",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.NODATA_UNEXP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Opportunistic Reads",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.OPP_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : ECC Errors",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ECC_ERROR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : ERID detectable parity error",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ERID_ERROR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.PMM_ERID_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Read Requests - Slot 0",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.REQS_SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Commands - Part 2 : Read Requests - Slot 1",
+ "EventCode": "0xEB",
+ "EventName": "UNC_M_PMM_CMD2.REQS_SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Cycles Full",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M_PMM_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Cycles Not Empty",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M_PMM_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Queue Inserts",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Read Queue Inserts : Counts number of read requests allocated in the PMM Read Pending Queue. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Cycles Full",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Cycles Not Empty",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PMM_WPQ_FLUSH",
+ "EventCode": "0xe8",
+ "EventName": "UNC_M_PMM_WPQ_FLUSH",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PMM_WPQ_FLUSH_CYC",
+ "EventCode": "0xe9",
+ "EventName": "UNC_M_PMM_WPQ_FLUSH_CYC",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Queue Inserts",
+ "EventCode": "0xE7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Queue Inserts : Counts number of write requests allocated in the PMM Write Pending Queue.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Write Pending Queue.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Write Pending Queue.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Write Pending Queue.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
+ "EventCode": "0x85",
+ "EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "PerPkg": "1",
+ "PublicDescription": "Channel PPD Cycles : Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clock-Enabled Self-Refresh",
+ "EventCode": "0x43",
+ "EventName": "UNC_M_POWER_SELF_REFRESH",
+ "PerPkg": "1",
+ "PublicDescription": "Clock-Enabled Self-Refresh : Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x1c",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to page miss",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to page miss : Counts the number of DRAM Precharge commands sent on this channel. : Pages Misses are due to precharges from bank scheduler (rd/wr requests)",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to page table",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to page table : Counts the number of DRAM Precharge commands sent on this channel. : Precharges from Page Table",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to read",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to read : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from read bank scheduler",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to write",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to write : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from write bank scheduler",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Full",
+ "EventCode": "0x19",
+ "EventName": "UNC_M_RDB_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Not Empty",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NOT_EMPTY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Occupancy",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M_RDB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "EventCode": "0x15",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Scoreboard Accesses Accepted",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.FMRD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.FMWR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Write Accepts",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Write Rejects",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.NMRD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.NMWR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : FM read completions",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : FM write completions",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Read Accepts",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Read Rejects",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Scoreboard Accesses Rejected",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.REJECTS",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : NM read completions",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : NM write completions",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Alloc",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.ALLOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Dealloc",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.FM_RD_STARVED",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FMRD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FMTGRWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.FM_WR_STARVED",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FMWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write Starved",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FM_RD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write Starved",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read Starved",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.FM_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.NM_RD_STARVED",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.NMRD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_CANARY.NM_WR_STARVED",
+ "Deprecated": "1",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.NMWR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Valid",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.NM_RD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read Starved",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.NM_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Reject",
+ "EventCode": "0xD9",
+ "EventName": "UNC_M_SB_CANARY.VLD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Full",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M_SB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Not-Empty",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M_SB_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Block region reads",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Block region writes",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Persistent Mem reads",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Persistent Mem writes",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Reads",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Writes",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M_SB_INSERTS.WRS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Block region reads",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Block region writes",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Persistent Mem reads",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Persistent Mem writes",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Reads",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : All",
+ "EventCode": "0xDA",
+ "EventName": "UNC_M_SB_PREF_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : DDR4",
+ "EventCode": "0xDA",
+ "EventName": "UNC_M_SB_PREF_INSERTS.DDR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : Persistent Mem",
+ "EventCode": "0xDA",
+ "EventName": "UNC_M_SB_PREF_INSERTS.PMM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : All",
+ "EventCode": "0xDB",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : DDR4",
+ "EventCode": "0xDB",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "Deprecated": "1",
+ "EventCode": "0xdb",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.PMEM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : Persistent Mem",
+ "EventCode": "0xdb",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.CANARY",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.DDR_EARLY_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : FM requests rejected due to full address conflict",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : NM requests rejected due to set conflict",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : Patrol requests rejected due to set conflict",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.FM_RD",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.FM_WR",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FMWR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read - Set",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write - Set",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.NM_RD",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_ALLOC.NM_WR",
+ "Deprecated": "1",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NMWR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Set",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write - Set",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FMRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FMTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FMWR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read - Set",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write - Set",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NMRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "Deprecated": "1",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NMWR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Set",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write - Set",
+ "EventCode": "0xDE",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.FM_RD",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.FM_TGR",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.FM_WR",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FMWR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.NM_RD",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.NMRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M_SB_STRV_OCC.NM_WR",
+ "Deprecated": "1",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.NMWR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write",
+ "EventCode": "0xD8",
+ "EventName": "UNC_M_SB_STRV_OCC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.NEW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.OCC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "EventCode": "0xDD",
+ "EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Hit in Near Memory Cache",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Miss, no data in this line",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Miss, existing data may be evicted to Far Memory",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.MISS_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Read Hit in Near Memory Cache",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.NM_RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag Check : Write Hit in Near Memory Cache",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M_TAGCHK.NM_WR_HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Full Cycles : Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x16",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Full Cycles : Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Not Empty : Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Not Empty : Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x82",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
new file mode 100644
index 000000000000..ee4dac6fc797
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-power.json
@@ -0,0 +1,204 @@
+[
+ {
+ "BriefDescription": "Clockticks of the power control unit (PCU)",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks of the power control unit (PCU) : The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 0 Cycles : Cycles spent in phase-shedding power state 0",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 1 Cycles : Cycles spent in phase-shedding power state 1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 2 Cycles : Cycles spent in phase-shedding power state 2",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 3 Cycles : Cycles spent in phase-shedding power state 3",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX256 Frequency Clipping",
+ "EventCode": "0x49",
+ "EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX512 Frequency Clipping",
+ "EventCode": "0x4a",
+ "EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "EventCode": "0x04",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Thermal Strongest Upper Limit Cycles : Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "EventCode": "0x05",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Power Strongest Upper Limit Cycles : Counts the number of cycles when power is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "IO P Limit Strongest Lower Limit Cycles : Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent changing Frequency : Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Memory Phase Shedding Cycles : Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C0 : Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C2E : Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C3 : Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C6 : Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "EventCode": "0x06",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C0 and C1",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C-State : C0 and C1 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C-State : C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C6 and C7",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C-State : C6 and C7 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0x0A",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "External Prochot : Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x09",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Internal Prochot : Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Total Core C State Transition Cycles : Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
new file mode 100644
index 000000000000..e3227c7f2fe9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/icelakex/virtual-memory.json
@@ -0,0 +1,181 @@
+[
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CounterMask": "1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CounterMask": "1",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CounterMask": "1",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "STLB flush attempts",
+ "EventCode": "0xBD",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
index 5f6cb2abc384..6ddc7d1c61d5 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/cache.json
@@ -1,1102 +1,867 @@
[
{
- "PublicDescription": "Demand Data Read requests that hit L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts the number of lines brought into the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1D miss oustandings duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf"
},
{
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.)",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "PublicDescription": "Not rejected writebacks that missed LLC.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "L2 cache lines filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "L2 cache lines in E state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "L2 cache lines in I state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "L2 cache lines in S state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Clean L2 cache lines evicted by demand",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "PublicDescription": "Clean L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "PublicDescription": "Dirty L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Dirty L2 cache lines filling the L2",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DIRTY_ALL",
+ "PublicDescription": "Dirty L2 cache lines filling the L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xa"
+ },
+ {
+ "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_CLEAN",
+ "PublicDescription": "Clean L2 cache lines evicted by the MLC prefetcher.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "PublicDescription": "Dirty L2 cache lines evicted by the MLC prefetcher.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts all L2 code requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "PublicDescription": "RFO requests that hit L2 cache.",
+ "BriefDescription": "Demand Data Read requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts all L2 HW prefetcher requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc0"
},
{
- "PublicDescription": "Counts all L2 store RFO requests.",
+ "BriefDescription": "RFO requests to L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
"EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts all L2 store RFO requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "BriefDescription": "L2 cache misses when fetching instructions",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts all L2 code requests.",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Demand Data Read requests that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "L2_RQSTS.PF_HIT",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "L2_RQSTS.PF_MISS",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Counts all L2 HW prefetcher requests.",
+ "BriefDescription": "RFO requests that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.ALL_PF",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "RFO requests that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from L2 hardware prefetchers",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "RFOs that miss cache lines.",
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that miss cache lines",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "RFOs that hit cache lines in M state.",
+ "BriefDescription": "RFOs that access cache lines in any state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "PublicDescription": "RFOs that access cache lines in any state.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in M state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf"
},
{
- "PublicDescription": "RFOs that access cache lines in any state.",
+ "BriefDescription": "RFOs that hit cache lines in M state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "PublicDescription": "RFOs that hit cache lines in M state.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that access cache lines in any state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Not rejected writebacks that missed LLC.",
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "BriefDescription": "RFOs that miss cache lines",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "PublicDescription": "RFOs that miss cache lines.",
"SampleAfterValue": "200003",
- "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "BriefDescription": "L2 or LLC HW prefetches that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "Any MLC or LLC HW prefetch accessing L2, including rejects.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "Transactions accessing L2 pipe.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "L2 cache accesses when fetching instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed LLC",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to LLC",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss oustandings duration in cycles",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts the number of lines brought into the L1 data cache.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Offcore outstanding Demand Data Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "Demand Data Read requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "RFO requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles in which the L1D is locked.",
+ "BriefDescription": "Cycles when L1D is locked",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "Cycles in which the L1D is locked.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1D is locked",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Demand data read requests sent to uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand code read requests sent to uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cases when offcore requests buffer cannot take more entries for core.",
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
+ "PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PEBS": "1",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "All retired load uops. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
+ "BriefDescription": "All retired store uops. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Retired load uops with locked access. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
},
{
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Retired load uops whose data source was local memory (cross-socket snoop not needed or missed).",
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops which data sources missed LLC but serviced from local dram.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Demand Data Read requests that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "RFO requests that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_TRANS.RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x42"
},
{
- "PublicDescription": "L2 cache accesses when fetching instructions.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_TRANS.CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache accesses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
},
{
- "PublicDescription": "Any MLC or LLC HW prefetch accessing L2, including rejects.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_TRANS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 or LLC HW prefetches that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
},
{
- "PublicDescription": "L1D writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_TRANS.L1D_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L1D writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "L2 fill requests that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_TRANS.L2_FILL",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 fill requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cacheable and noncachaeble code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Demand code read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "L2 writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Demand data read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Transactions accessing L2 pipe.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_TRANS.ALL_REQUESTS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Transactions accessing L2 pipe",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "L2 cache lines in I state filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_IN.I",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in I state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "L2 cache lines in S state filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_IN.S",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in S state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "L2 cache lines in E state filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_IN.E",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in E state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "L2 cache lines filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "L2_LINES_IN.ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Clean L2 cache lines evicted by demand.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by demand",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Dirty L2 cache lines evicted by demand.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by demand",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Clean L2 cache lines evicted by the MLC prefetcher.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.PF_CLEAN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Dirty L2 cache lines evicted by the MLC prefetcher.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_LINES_OUT.PF_DIRTY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Offcore outstanding Demand Data Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Dirty L2 cache lines filling the L2.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "L2_LINES_OUT.DIRTY_ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines filling the L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Split locks in SQ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts all demand & prefetch code reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x000105B3",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x000107F7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all writebacks from the core to the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all writebacks from the core to the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x18000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses. It also includes L2 hints sent to LLC to keep a line from being evicted out of the core caches",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand rfo's",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts non-temporal stores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand rfo's",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses. It also includes L2 hints sent to LLC to keep a line from being evicted out of the core caches",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x000105B3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts non-temporal stores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x000107F7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Split locks in SQ",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
index 950b62c0908e..87c958213c7a 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/floating-point.json
@@ -1,151 +1,135 @@
[
{
- "PublicDescription": "Counts number of X87 uops executed.",
- "EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FP_COMP_OPS_EXE.X87",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.ANY",
+ "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1e"
},
{
- "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
- "EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of SIMD FP assists due to input values",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "Number of SIMD FP assists due to input values.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "PublicDescription": "Number of SIMD FP assists due to output values.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "PublicDescription": "Number of X87 FP assists due to input values.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "Number of X87 FP assists due to output values.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Counts number of SSE* or AVX-128 double precision FP scalar uops executed.",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
+ "PublicDescription": "Counts number of SSE* or AVX-128 double precision FP scalar uops executed.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Counts 256-bit packed single-precision floating-point instructions.",
- "EventCode": "0x11",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "number of GSSE-256 Computational FP single precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts 256-bit packed double-precision floating-point instructions.",
- "EventCode": "0x11",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "PublicDescription": "Counts number of X87 uops executed.",
"SampleAfterValue": "2000003",
- "BriefDescription": "number of AVX-256 Computational FP double precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of assists associated with 256-bit AVX store operations.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
"EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "OTHER_ASSISTS.AVX_STORE",
+ "PublicDescription": "Number of assists associated with 256-bit AVX store operations.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
"EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
"EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of X87 FP assists due to output values.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ASSIST.X87_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Number of X87 FP assists due to input values.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "FP_ASSIST.X87_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of SIMD FP assists due to output values.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_ASSIST.SIMD_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of SIMD FP assists due to input values.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "number of AVX-256 Computational FP double precision uops issued this cycle",
+ "EventCode": "0x11",
+ "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "PublicDescription": "Counts 256-bit packed double-precision floating-point instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x1e",
- "EventName": "FP_ASSIST.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "number of GSSE-256 Computational FP single precision uops issued this cycle",
+ "EventCode": "0x11",
+ "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "PublicDescription": "Counts 256-bit packed single-precision floating-point instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
index efaa949ead31..89004a6c9ed1 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/frontend.json
@@ -1,305 +1,255 @@
[
{
- "PublicDescription": "Counts cycles the IDQ is empty.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "IDQ.EMPTY",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PublicDescription": "Number of DSB to MITE switches.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Cycles DSB to MITE switches caused delay.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "PublicDescription": "DSB Fill encountered > 3 DSB lines.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "BriefDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.IFETCH_STALL",
+ "PublicDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes UC accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "PublicDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "PublicDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts cycles MITE is delivered at least one uops. Set Cmask = 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "EventName": "IDQ.DSB_CYCLES",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "EventName": "IDQ.EMPTY",
+ "PublicDescription": "Counts cycles the IDQ is empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts cycles MITE is delivered at least one uops. Set Cmask = 1.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "Number of uops delivered to IDQ from any path.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3c"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "EventName": "IDQ.MITE_CYCLES",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "PublicDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "PublicDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of uops delivered to IDQ from any path.",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "IDQ.MITE_ALL_UOPS",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes UC accesses.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction cache, streaming buffer and victim cache misses",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "PublicDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE.IFETCH_STALL",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "PublicDescription": "Count issue pipeline slots where no uop was delivered from the front end to the back end when there is no back-end stall.",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Count issue pipeline slots where no uop was delivered from the front end to the back end when there is no back-end stall.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
"SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
"CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "SampleAfterValue": "2000003",
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Number of DSB to MITE switches.",
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DSB2MITE_SWITCHES.COUNT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles DSB to MITE switches caused delay.",
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "DSB Fill encountered > 3 DSB lines.",
- "EventCode": "0xAC",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
index 28e25447d3ef..5247f69c13b6 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
@@ -1,340 +1,996 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fetch_BW;PGO",
- "MetricName": "IpTB"
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Branch instructions per taken branch. ",
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
- "MetricName": "BpTB"
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5) / (3 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(60 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS))) + 43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS)))) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS))) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(7 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(7 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * min(CPU_CLK_UNHALTED.THREAD, IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE) / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE) / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "ICACHE.IFETCH_STALL / tma_info_clks - tma_itlb_misses",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
},
{
- "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpL"
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
},
{
- "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpS"
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;Instruction_Type",
- "MetricName": "IpB"
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
- "MetricName": "IpCall"
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "1 / (tma_fp_scalar + tma_fp_vector)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
- "MetricName": "FLOPc"
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_lcp"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TLB_SMT",
- "MetricName": "Page_Walks_Utilization_SMT"
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L1MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
},
{
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI_All"
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
},
{
- "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2HPKI_All"
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L3MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Average number of parallel requests to external memory",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_parallel_requests",
+ "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_request_latency"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_core_clks",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "MetricName": "tma_info_turbo_utilization"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "29 * (MEM_LOAD_UOPS_RETIRED.LLC_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_RETIRED.LLC_MISS))) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Sample with: UOPS_DISPATCHED_PORT.PORT_4. Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING)) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "13 * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS * FP_COMP_OPS_EXE.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
index a74d54f56192..fd1fe491c577 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/memory.json
@@ -1,236 +1,182 @@
[
{
- "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Speculative cache-line split Store-address uops dispatched to L1D.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xBE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.LLC_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of any page walk that had a miss in LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 4.",
+ "BriefDescription": "Loads with latency value being above 128",
"EventCode": "0xCD",
- "MSRValue": "0x4",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 4",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
+ "MSRValue": "0x80",
"PEBS": "2",
- "PublicDescription": "Loads with latency value being above 8.",
- "EventCode": "0xCD",
- "MSRValue": "0x8",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "50021",
- "BriefDescription": "Loads with latency value being above 8",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "PublicDescription": "Loads with latency value being above 128.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 16.",
+ "BriefDescription": "Loads with latency value being above 16",
"EventCode": "0xCD",
- "MSRValue": "0x10",
- "Counter": "3",
- "UMask": "0x1",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 16.",
"SampleAfterValue": "20011",
- "BriefDescription": "Loads with latency value being above 16",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads with latency value being above 256",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
"PEBS": "2",
- "PublicDescription": "Loads with latency value being above 32.",
+ "PublicDescription": "Loads with latency value being above 256.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Loads with latency value being above 32",
"EventCode": "0xCD",
- "MSRValue": "0x20",
- "Counter": "3",
- "UMask": "0x1",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 32.",
"SampleAfterValue": "100007",
- "BriefDescription": "Loads with latency value being above 32",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 64.",
+ "BriefDescription": "Loads with latency value being above 4",
"EventCode": "0xCD",
- "MSRValue": "0x40",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Loads with latency value being above 64",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 4.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 128.",
+ "BriefDescription": "Loads with latency value being above 512",
"EventCode": "0xCD",
- "MSRValue": "0x80",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Loads with latency value being above 128",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 512.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 256.",
+ "BriefDescription": "Loads with latency value being above 64",
"EventCode": "0xCD",
- "MSRValue": "0x100",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Loads with latency value being above 256",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 64.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 512.",
+ "BriefDescription": "Loads with latency value being above 8",
"EventCode": "0xCD",
- "MSRValue": "0x200",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Loads with latency value being above 512",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 8.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
"EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "PEBS": "2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "Speculative cache-line split Store-address uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the LLC and the data returned from dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the LLC and the data returned from dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3004003f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3004003f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts LLC replacements",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x6004001b3",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand code reads that miss the LLC and the data returned from dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads that miss the LLC and the data returned from dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x6004001b3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts LLC replacements",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of any page walk that had a miss in LLC.",
+ "EventCode": "0xBE",
+ "EventName": "PAGE_WALKS.LLC_MISS",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/other.json b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
index 4eb83ee40412..e80e99d064ba 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/other.json
@@ -1,44 +1,36 @@
[
{
- "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPL_CYCLES.RING0",
+ "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "CounterMask": "1",
"EdgeDetect": "1",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0_TRANS",
+ "PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
"SampleAfterValue": "100007",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
index 2a0aad91d83d..30a3da9cd22b 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/pipeline.json
@@ -1,1305 +1,1030 @@
[
{
- "Counter": "Fixed counter 0",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 0"
- },
- {
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "Counter": "Fixed counter 2",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "False dependencies in MOB due to partial compare on address.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "BriefDescription": "Divide operations executed",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"EdgeDetect": "1",
- "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Number of flags-merge uops allocated. Such uops adds delay.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_ISSUED.FLAGS_MERGE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of flags-merge uops being allocated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_ISSUED.SINGLE_MUL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles that the divider is active, includes INT and FP. Set 'edge =1, cmask=1' to count the number of divides.",
- "EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.FPU_DIV_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divider is busy executing divide operations",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Divide operations executed.",
"EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EdgeDetect": "1",
"EventName": "ARITH.FPU_DIV",
+ "PublicDescription": "Divide operations executed.",
"SampleAfterValue": "100003",
- "BriefDescription": "Divide operations executed",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "BriefDescription": "Cycles when divider is busy executing divide operations",
+ "EventCode": "0x14",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "PublicDescription": "Cycles that the divider is active, includes INT and FP. Set 'edge =1, cmask=1' to count the number of divides.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOAD_HIT_PRE.HW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "Speculative and retired macro-conditional branches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "PublicDescription": "Cycles the RS is empty for the thread.",
- "EventCode": "0x5E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "PublicDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "BriefDescription": "Speculative and retired direct near calls",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "PublicDescription": "Speculative and retired direct near calls.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "PublicDescription": "Stall cycles due to IQ is full.",
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ILD_STALL.IQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stall cycles because IQ is full",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
},
{
- "PublicDescription": "Not taken macro-conditional branches.",
+ "BriefDescription": "Not taken macro-conditional branches",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "Not taken macro-conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not taken macro-conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "Taken speculative and retired macro-conditional branches.",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "Taken speculative and retired macro-conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "PublicDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "PublicDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x82"
},
{
- "PublicDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "BriefDescription": "Taken speculative and retired direct near calls",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "PublicDescription": "Taken speculative and retired direct near calls.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x90"
},
{
- "PublicDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "PublicDescription": "Taken speculative and retired direct near calls.",
+ "BriefDescription": "Taken speculative and retired indirect calls",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "PublicDescription": "Taken speculative and retired indirect calls.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired direct near calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa0"
},
{
- "PublicDescription": "Taken speculative and retired indirect calls.",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "Taken speculative and retired indirect branches with return mnemonic.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "PublicDescription": "Speculative and retired macro-conditional branches.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Branch instructions at retirement.",
+ "SampleAfterValue": "400009"
},
{
- "PublicDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Speculative and retired indirect branches excluding calls and returns.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc8",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PublicDescription": "Number of far branches retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Speculative and retired direct near calls.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired direct near calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "PublicDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "Speculative and retired mispredicted macro conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
- "PublicDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "Mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
- "PublicDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "BriefDescription": "Speculative mispredicted indirect branches",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
- "PublicDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "Taken speculative and retired mispredicted macro conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "PublicDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "PublicDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "PublicDescription": "Taken speculative and retired mispredicted indirect calls.",
"SampleAfterValue": "200003",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa0"
},
{
- "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "PublicDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "SampleAfterValue": "400009"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles Allocation is stalled due to Resource Related reason.",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RESOURCE_STALLS.RS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "SampleAfterValue": "2000003"
},
{
- "PublicDescription": "Cycles stalled due to no store buffers available (not including draining form sync).",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RESOURCE_STALLS.ROB",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Cycles with pending L2 miss loads. Set AnyThread to count per core.",
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L2 cache miss loads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "CounterMask": "1",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "PublicDescription": "Cycles with pending L2 miss loads. Set AnyThread to count per core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
+ "BriefDescription": "Cycles with pending memory loads.",
+ "CounterMask": "2",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending memory loads.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Total execution stalls.",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "PublicDescription": "Total execution stalls.",
"SampleAfterValue": "2000003",
- "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc"
},
{
- "PublicDescription": "Number of loads missed L2.",
+ "BriefDescription": "Execution stalls due to L1 data cache misses",
+ "CounterMask": "12",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls due to L2 cache misses.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc"
},
{
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x5"
},
{
+ "BriefDescription": "Execution stalls due to L2 cache misses.",
+ "CounterMask": "5",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "PublicDescription": "Number of loads missed L2.",
"SampleAfterValue": "2000003",
+ "UMask": "0x5"
+ },
+ {
"BriefDescription": "Execution stalls due to memory subsystem.",
"CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x6"
},
{
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x6"
},
{
- "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "BriefDescription": "Stall cycles because IQ is full",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "PublicDescription": "Stall cycles due to IQ is full.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls due to L1 data cache misses",
- "CounterMask": "12",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Number of instructions at retirement.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder",
+ "UMask": "0x3"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
"SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "False dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
"CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.THREAD",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Counts the number of executed AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Number of self-modifying-code machine clears detected.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Resource-related stall cycles",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "Cycles Allocation is stalled due to Resource Related reason.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Cycles stalled due to no store buffers available (not including draining form sync).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE",
+ "BriefDescription": "Count cases of saving new LBR",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Cycles the RS is empty for the thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of instructions at retirement.",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "PEBS": "2",
- "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "AnyThread": "1",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "CounterHTOff": "1"
+ "UMask": "0xc"
},
{
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.ALL",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired uops.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
- "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of self-modifying-code machine clears detected.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of executed AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Branch instructions at retirement.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the number of not taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of far branches retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of flags-merge uops being allocated.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops allocated. Such uops adds delay.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired uops.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/uncore-cache.json b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-cache.json
new file mode 100644
index 000000000000..be9a3ed1a940
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-cache.json
@@ -0,0 +1,202 @@
+[
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
+ "PerPkg": "1",
+ "UMask": "0x86",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
+ "PerPkg": "1",
+ "UMask": "0x8f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_ES",
+ "PerPkg": "1",
+ "UMask": "0x46",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_I",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_M",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_MESI",
+ "PerPkg": "1",
+ "UMask": "0x4f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
+ "PerPkg": "1",
+ "UMask": "0x16",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
+ "PerPkg": "1",
+ "UMask": "0x1f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_I",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
+ "PerPkg": "1",
+ "UMask": "0x2f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "An external snoop hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EXTERNAL",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "An external snoop hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EXTERNAL",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "An external snoop misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EXTERNAL",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-interconnect.json
new file mode 100644
index 000000000000..c3252c094a9c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/uncore-interconnect.json
@@ -0,0 +1,75 @@
+[
+ {
+ "BriefDescription": "Cycles weighted by number of requests pending in Coherency Tracker.",
+ "EventCode": "0x83",
+ "EventName": "UNC_ARB_COH_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of requests allocated in Coherency Tracker.",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts cycles weighted by the number of requests waiting for data returning from the memory controller. Accounts for coherent and non-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Cycles with at least half of the requests outstanding are waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "CounterMask": "10",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_OVER_HALF_FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "CounterMask": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC evictions allocated.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.EVICTIONS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts the number of allocated write entries, include full, partial, and LLC evictions.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/uncore.json b/tools/perf/pmu-events/arch/x86/ivybridge/uncore.json
deleted file mode 100644
index 42c70eed05a2..000000000000
--- a/tools/perf/pmu-events/arch/x86/ivybridge/uncore.json
+++ /dev/null
@@ -1,314 +0,0 @@
-[
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x01",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS",
- "BriefDescription": "A snoop misses in some processor core.",
- "PublicDescription": "A snoop misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x02",
- "EventName": "UNC_CBO_XSNP_RESPONSE.INVAL",
- "BriefDescription": "A snoop invalidates a non-modified line in some processor core.",
- "PublicDescription": "A snoop invalidates a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x04",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HIT",
- "BriefDescription": "A snoop hits a non-modified line in some processor core.",
- "PublicDescription": "A snoop hits a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x08",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HITM",
- "BriefDescription": "A snoop hits a modified line in some processor core.",
- "PublicDescription": "A snoop hits a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x10",
- "EventName": "UNC_CBO_XSNP_RESPONSE.INVAL_M",
- "BriefDescription": "A snoop invalidates a modified line in some processor core.",
- "PublicDescription": "A snoop invalidates a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x20",
- "EventName": "UNC_CBO_XSNP_RESPONSE.EXTERNAL_FILTER",
- "BriefDescription": "Filter on cross-core snoops initiated by this Cbox due to external snoop request.",
- "PublicDescription": "Filter on cross-core snoops initiated by this Cbox due to external snoop request.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x40",
- "EventName": "UNC_CBO_XSNP_RESPONSE.XCORE_FILTER",
- "BriefDescription": "Filter on cross-core snoops initiated by this Cbox due to processor core memory request.",
- "PublicDescription": "Filter on cross-core snoops initiated by this Cbox due to processor core memory request.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x80",
- "EventName": "UNC_CBO_XSNP_RESPONSE.EVICTION_FILTER",
- "BriefDescription": "Filter on cross-core snoops initiated by this Cbox due to LLC eviction.",
- "PublicDescription": "Filter on cross-core snoops initiated by this Cbox due to LLC eviction.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x01",
- "EventName": "UNC_CBO_CACHE_LOOKUP.M",
- "BriefDescription": "LLC lookup request that access cache and found line in M-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x02",
- "EventName": "UNC_CBO_CACHE_LOOKUP.E",
- "BriefDescription": "LLC lookup request that access cache and found line in E-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in E-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x04",
- "EventName": "UNC_CBO_CACHE_LOOKUP.S",
- "BriefDescription": "LLC lookup request that access cache and found line in S-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x08",
- "EventName": "UNC_CBO_CACHE_LOOKUP.I",
- "BriefDescription": "LLC lookup request that access cache and found line in I-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x10",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_FILTER",
- "BriefDescription": "Filter on processor core initiated cacheable read requests.",
- "PublicDescription": "Filter on processor core initiated cacheable read requests.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x20",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_FILTER",
- "BriefDescription": "Filter on processor core initiated cacheable write requests.",
- "PublicDescription": "Filter on processor core initiated cacheable write requests.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x40",
- "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_FILTER",
- "BriefDescription": "Filter on external snoop requests.",
- "PublicDescription": "Filter on external snoop requests.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x80",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_REQUEST_FILTER",
- "BriefDescription": "Filter on any IRQ or IPQ initiated requests including uncacheable, non-coherent requests.",
- "PublicDescription": "Filter on any IRQ or IPQ initiated requests including uncacheable, non-coherent requests.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
- "BriefDescription": "Counts cycles weighted by the number of requests waiting for data returning from the memory controller. Accounts for coherent and non-coherent requests initiated by IA cores, processor graphic units, or LLC.",
- "PublicDescription": "Counts cycles weighted by the number of requests waiting for data returning from the memory controller. Accounts for coherent and non-coherent requests initiated by IA cores, processor graphic units, or LLC.",
- "Counter": "0",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x81",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
- "BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
- "PublicDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x81",
- "UMask": "0x20",
- "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
- "BriefDescription": "Counts the number of allocated write entries, include full, partial, and LLC evictions.",
- "PublicDescription": "Counts the number of allocated write entries, include full, partial, and LLC evictions.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x81",
- "UMask": "0x80",
- "EventName": "UNC_ARB_TRK_REQUESTS.EVICTIONS",
- "BriefDescription": "Counts the number of LLC evictions allocated.",
- "PublicDescription": "Counts the number of LLC evictions allocated.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x83",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_OCCUPANCY.ALL",
- "BriefDescription": "Cycles weighted by number of requests pending in Coherency Tracker.",
- "PublicDescription": "Cycles weighted by number of requests pending in Coherency Tracker.",
- "Counter": "0",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x84",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
- "BriefDescription": "Number of requests allocated in Coherency Tracker.",
- "PublicDescription": "Number of requests allocated in Coherency Tracker.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
- "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "PublicDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "Counter": "0,1",
- "CounterMask": "1",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_OVER_HALF_FULL",
- "BriefDescription": "Cycles with at least half of the requests outstanding are waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "PublicDescription": "Cycles with at least half of the requests outstanding are waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "Counter": "0,1",
- "CounterMask": "10",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x0",
- "UMask": "0x01",
- "EventName": "UNC_CLOCK.SOCKET",
- "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Counter": "Fixed",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x06",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ES",
- "BriefDescription": "LLC lookup request that access cache and found line in E-state or S-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in E-state or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
index f243551b4d12..b97f15cb20fc 100644
--- a/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivybridge/virtual-memory.json
@@ -1,180 +1,144 @@
[
{
- "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "BriefDescription": "Page walk for a large page completed for Demand load.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_LOAD_MISSES.LARGE_PAGE_WALK_COMPLETED",
"SampleAfterValue": "100003",
+ "UMask": "0x88"
+ },
+ {
"BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x81"
},
{
- "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "EventCode": "0x5F",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts load operations that missed 1st level DTLB but hit the 2nd level.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x82"
},
{
- "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "DTLB_LOAD_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walk for a large page completed for Demand load.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Miss in all TLB levels causes a page walk that completes of any page size (4K/2M/4M/1G).",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Miss in all TLB levels causes a page walk that completes of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles PMH is busy with this walk.",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "PublicDescription": "Cycles PMH is busy with this walk.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts load operations that missed 1st level DTLB but hit the 2nd level.",
- "EventCode": "0x5F",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Misses in all ITLB levels that cause page walks.",
+ "BriefDescription": "Completed page walks in ITLB due to STLB load misses for large pages",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "ITLB_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in ITLB due to STLB load misses for large pages.",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Misses in all ITLB levels that cause page walks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycle PMH is busy with a walk.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
"SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Completed page walks in ITLB due to STLB load misses for large pages.",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "ITLB_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Misses in all ITLB levels that cause completed page walks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Completed page walks in ITLB due to STLB load misses for large pages",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "Cycle PMH is busy with a walk.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Count number of STLB flush attempts.",
+ "BriefDescription": "STLB flush attempts",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Count number of STLB flush attempts.",
"SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/cache.json b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
index 6dad3ad6b102..c8f7d5e66504 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/cache.json
@@ -1,1260 +1,986 @@
[
{
- "PublicDescription": "Demand Data Read requests that hit L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts the number of lines brought into the L1 data cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1D miss outstanding duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf"
},
{
- "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.)",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "PublicDescription": "Not rejected writebacks that missed LLC.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "L2 cache lines filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "L2 cache lines in E state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "PublicDescription": "L2 cache lines in E state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 cache lines in I state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "PublicDescription": "L2 cache lines in I state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines in S state filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "PublicDescription": "L2 cache lines in S state filling L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Clean L2 cache lines evicted by demand",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "PublicDescription": "Clean L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Dirty L2 cache lines evicted by demand",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "PublicDescription": "Dirty L2 cache lines evicted by demand.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Dirty L2 cache lines filling the L2",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DIRTY_ALL",
+ "PublicDescription": "Dirty L2 cache lines filling the L2.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xa"
+ },
+ {
+ "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_CLEAN",
+ "PublicDescription": "Clean L2 cache lines evicted by the MLC prefetcher.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "PublicDescription": "Dirty L2 cache lines evicted by the MLC prefetcher.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts all L2 code requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "PublicDescription": "RFO requests that hit L2 cache.",
+ "BriefDescription": "Demand Data Read requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts any demand and L1 HW prefetch data load requests to L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
+ "BriefDescription": "Requests from L2 hardware prefetchers",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts all L2 HW prefetcher requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc0"
},
{
- "PublicDescription": "Counts all L2 store RFO requests.",
+ "BriefDescription": "RFO requests to L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
"EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts all L2 store RFO requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Number of instruction fetches that hit the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
+ "BriefDescription": "L2 cache misses when fetching instructions",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Number of instruction fetches that missed the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts all L2 code requests.",
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Demand Data Read requests that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "L2_RQSTS.PF_HIT",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that hit L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "L2_RQSTS.PF_MISS",
+ "PublicDescription": "Counts all L2 HW prefetcher requests that missed L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Counts all L2 HW prefetcher requests.",
+ "BriefDescription": "RFO requests that hit L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.ALL_PF",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "RFO requests that hit L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from L2 hardware prefetchers",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "RFOs that miss cache lines.",
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the number of store RFO requests that miss the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that miss cache lines",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "RFOs that hit cache lines in M state.",
+ "BriefDescription": "RFOs that access cache lines in any state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "PublicDescription": "RFOs that access cache lines in any state.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in M state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf"
},
{
- "PublicDescription": "RFOs that access cache lines in any state.",
+ "BriefDescription": "RFOs that hit cache lines in M state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "PublicDescription": "RFOs that hit cache lines in M state.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that access cache lines in any state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Not rejected writebacks that missed LLC.",
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "BriefDescription": "RFOs that miss cache lines",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "PublicDescription": "RFOs that miss cache lines.",
"SampleAfterValue": "200003",
- "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "BriefDescription": "L2 or LLC HW prefetches that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_PF",
+ "PublicDescription": "Any MLC or LLC HW prefetch accessing L2, including rejects.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "BriefDescription": "Transactions accessing L2 pipe",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "PublicDescription": "Transactions accessing L2 pipe.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "BriefDescription": "L2 cache accesses when fetching instructions",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.CODE_RD",
+ "PublicDescription": "L2 cache accesses when fetching instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed LLC",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to LLC",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increments the number of outstanding L1D misses every cycle. Set Cmask = 1 and Edge =1 to count occurrences.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss oustandings duration in cycles",
- "CounterHTOff": "2"
- },
- {
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "2"
- },
- {
- "PublicDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core",
- "CounterMask": "1",
- "CounterHTOff": "2"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of lines brought into the L1 data cache.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Offcore outstanding Demand Data Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "PublicDescription": "Demand Data Read requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L1D_WB",
+ "PublicDescription": "L1D writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 fill requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_FILL",
+ "PublicDescription": "L2 fill requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.RFO",
+ "PublicDescription": "RFO requests that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles in which the L1D is locked.",
+ "BriefDescription": "Cycles when L1D is locked",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "Cycles in which the L1D is locked.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1D is locked",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand data read requests sent to uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Demand code read requests sent to uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts each cache miss condition for references to the last level cache.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts requests originating from the core that reference a cache line in the last level cache.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cases when offcore requests buffer cannot take more entries for core.",
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "BriefDescription": "Retired load uops whose data source was local DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x3"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load uops whose data source was remote DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
+ "SampleAfterValue": "100007",
+ "UMask": "0xc"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Data forwarded from remote cache.",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Remote cache HITM.",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops. (Precise Event)",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
- },
- {
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
"PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources following L1 data-cache miss.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PEBS": "1",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops with L2 cache misses as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
"PEBS": "1",
+ "SampleAfterValue": "50021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "All retired load uops. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
+ "BriefDescription": "All retired store uops. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Retired load uops with locked access. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
},
{
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops whose data source was local DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops whose data source was remote DRAM (Snoop not needed, Snoop Miss, or Snoop Hit data not forwarded).",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Remote cache HITM.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD",
- "SampleAfterValue": "100007",
- "BriefDescription": "Data forwarded from remote cache.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Demand Data Read requests that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "RFO requests that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_TRANS.RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x42"
},
{
- "PublicDescription": "L2 cache accesses when fetching instructions.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_TRANS.CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache accesses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
},
{
- "PublicDescription": "Any MLC or LLC HW prefetch accessing L2, including rejects.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_TRANS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 or LLC HW prefetches that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event)",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
},
{
- "PublicDescription": "L1D writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_TRANS.L1D_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L1D writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Data read requests sent to uncore (demand and prefetch).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "L2 fill requests that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_TRANS.L2_FILL",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 fill requests that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cacheable and noncacheable code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Demand code read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "L2 writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Demand data read requests sent to uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Transactions accessing L2 pipe.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_TRANS.ALL_REQUESTS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Transactions accessing L2 pipe",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Demand RFO read requests sent to uncore, including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "L2 cache lines in I state filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_IN.I",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in I state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "L2 cache lines in S state filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_IN.S",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in S state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Offcore outstanding cacheable data read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "L2 cache lines in E state filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_IN.E",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in E state filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "L2 cache lines filling L2.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "L2_LINES_IN.ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Clean L2 cache lines evicted by demand.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by demand",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Dirty L2 cache lines evicted by demand.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by demand",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Clean L2 cache lines evicted by the MLC prefetcher.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.PF_CLEAN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding code reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Offcore outstanding Demand Code Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Dirty L2 cache lines evicted by the MLC prefetcher.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_LINES_OUT.PF_DIRTY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Offcore outstanding Demand Data Read transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Dirty L2 cache lines filling the L2.",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "L2_LINES_OUT.DIRTY_ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines filling the L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "PublicDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Split locks in SQ",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Offcore outstanding RFO store transactions in SQ to uncore. Set Cmask=1 to count cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch data reads that hit the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all writebacks from the core to the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all writebacks from the core to the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x803c8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803c8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x23ffc08000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23ffc08000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts non-temporal stores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts non-temporal stores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Split locks in SQ",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json b/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
index 950b62c0908e..89c6d47cc077 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/floating-point.json
@@ -1,151 +1,135 @@
[
{
- "PublicDescription": "Counts number of X87 uops executed.",
- "EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FP_COMP_OPS_EXE.X87",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.ANY",
+ "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1e"
},
{
- "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
- "EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of SIMD FP assists due to input values",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "PublicDescription": "Number of SIMD FP assists due to input values.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
+ "BriefDescription": "Number of SIMD FP assists due to Output values",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "PublicDescription": "Number of SIMD FP assists due to output values.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "PublicDescription": "Number of X87 FP assists due to input values.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "PublicDescription": "Number of X87 FP assists due to output values.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Counts number of SSE* or AVX-128 double precision FP scalar uops executed.",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
+ "PublicDescription": "Counts number of SSE* or AVX-128 double precision FP scalar uops executed.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Counts 256-bit packed single-precision floating-point instructions.",
- "EventCode": "0x11",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "number of GSSE-256 Computational FP single precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts 256-bit packed double-precision floating-point instructions.",
- "EventCode": "0x11",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULs and IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.X87",
+ "PublicDescription": "Counts number of X87 uops executed.",
"SampleAfterValue": "2000003",
- "BriefDescription": "number of AVX-256 Computational FP double precision uops issued this cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of assists associated with 256-bit AVX store operations.",
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
"EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "OTHER_ASSISTS.AVX_STORE",
+ "PublicDescription": "Number of assists associated with 256-bit AVX store operations.",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
"EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
"EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of X87 FP assists due to output values.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ASSIST.X87_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of X87 FP assists due to input values.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "FP_ASSIST.X87_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of SIMD FP assists due to output values.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_ASSIST.SIMD_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Number of SIMD FP assists due to input values.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "number of AVX-256 Computational FP double precision uops issued this cycle",
+ "EventCode": "0x11",
+ "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "PublicDescription": "Counts 256-bit packed double-precision floating-point instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles with any input/output SSE* or FP assists.",
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x1e",
- "EventName": "FP_ASSIST.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "number of GSSE-256 Computational FP single precision uops issued this cycle",
+ "EventCode": "0x11",
+ "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "PublicDescription": "Counts 256-bit packed single-precision floating-point instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
index efaa949ead31..4ee100024ca9 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/frontend.json
@@ -1,305 +1,255 @@
[
{
- "PublicDescription": "Counts cycles the IDQ is empty.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "IDQ.EMPTY",
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PublicDescription": "Number of DSB to MITE switches.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Cycles DSB to MITE switches caused delay.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "PublicDescription": "DSB Fill encountered > 3 DSB lines.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "BriefDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.IFETCH_STALL",
+ "PublicDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes UC accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "PublicDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "PublicDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "PublicDescription": "Counts cycles DSB is delivered four uops. Set Cmask = 4.",
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts cycles MITE is delivered at least one uops. Set Cmask = 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
- "PublicDescription": "Counts cycles DSB is delivered at least one uops. Set Cmask = 1.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "EventName": "IDQ.DSB_CYCLES",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Increment each cycle. # of uops delivered to IDQ from DSB path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts cycles MITE is delivered four uops. Set Cmask = 4.",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "EventName": "IDQ.EMPTY",
+ "PublicDescription": "Counts cycles the IDQ is empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts cycles MITE is delivered at least one uops. Set Cmask = 1.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "EventName": "IDQ.MITE_ALL_UOPS",
+ "PublicDescription": "Number of uops delivered to IDQ from any path.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
+ "UMask": "0x3c"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MITE path. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
"EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "PublicDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_OCCUR",
+ "PublicDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of uops delivered to IDQ from any path.",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "IDQ.MITE_ALL_UOPS",
+ "EventName": "IDQ.MS_DSB_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by DSB. Set Cmask = 1 to count cycles. Add Edge=1 to count # of delivery.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ when MS_busy by MITE. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Misses. Includes UC accesses.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction cache, streaming buffer and victim cache misses",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "PublicDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE.IFETCH_STALL",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Increment each cycle # of uops delivered to IDQ from MS by either DSB or MITE. Set Cmask = 1 to count cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code-fetch stalled due to L1 instruction-cache miss or an iTLB miss",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "PublicDescription": "Count issue pipeline slots where no uop was delivered from the front end to the back end when there is no back-end stall.",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Count issue pipeline slots where no uop was delivered from the front end to the back end when there is no back-end stall.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
"SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
"CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
"EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "SampleAfterValue": "2000003",
"BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Number of DSB to MITE switches.",
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DSB2MITE_SWITCHES.COUNT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles DSB to MITE switches caused delay.",
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "DSB Fill encountered > 3 DSB lines.",
- "EventCode": "0xAC",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
index db23db2e98be..89469b10fa30 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
@@ -1,346 +1,1027 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5) / (3 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * OTHER_ASSISTS.ANY_WB_ASSIST / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(60 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) + 43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD)))) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fetch_BW;PGO",
- "MetricName": "IpTB"
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Branch instructions per taken branch. ",
- "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
- "MetricName": "BpTB"
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "43 * (MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(7 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(7 * DTLB_STORE_MISSES.STLB_HIT + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_UOPS_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "(200 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM + 60 * OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * min(CPU_CLK_UNHALTED.THREAD, IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE) / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE) / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses.",
+ "MetricExpr": "ICACHE.IFETCH_STALL / tma_info_clks - tma_itlb_misses",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
},
{
- "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpL"
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
},
{
- "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpS"
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;Instruction_Type",
- "MetricName": "IpB"
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
- "MetricName": "IpCall"
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "1 / (tma_fp_scalar + tma_fp_vector)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
- "MetricName": "FLOPc"
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
- "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_UOPS_RETIRED.L1_MISS + mem_load_uops_retired.hit_lfb )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_UOPS_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / cycles",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_lcp"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION ) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TLB_SMT",
- "MetricName": "Page_Walks_Utilization_SMT"
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L1MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
},
{
"BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "0",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI_All"
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
},
{
- "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2HPKI_All"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L3MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_UOPS_RETIRED.LLC_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_UOPS_RETIRED.L1_MISS + MEM_LOAD_UOPS_RETIRED.HIT_LFB)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182\\,thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_DURATION + DTLB_LOAD_MISSES.WALK_DURATION + DTLB_STORE_MISSES.WALK_DURATION) / tma_info_core_clks",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
},
{
"BriefDescription": "Socket actual clocks when any core is active on that socket",
"MetricExpr": "cbox_0@event\\=0x0@",
- "MetricGroup": "",
- "MetricName": "Socket_CLKS"
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "MetricName": "tma_info_turbo_utilization"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) - CYCLE_ACTIVITY.STALLS_L1D_PENDING) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_UOPS_RETIRED.L1_HIT_PS;MEM_LOAD_UOPS_RETIRED.HIT_LFB_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L1D_PENDING - CYCLE_ACTIVITY.STALLS_L2_PENDING) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "41 * (MEM_LOAD_UOPS_RETIRED.LLC_HIT * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS. Related metrics: tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "200 * (MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_local_dram",
+ "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_UOPS_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Sample with: UOPS_DISPATCHED_PORT.PORT_4. Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) + UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - (UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC if tma_info_ipc > 1.8 else UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_LDM_PENDING)) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,inv\\,cmask\\=1@ / 2 if #SMT_on else (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_EXECUTE) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0)) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=1@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC - UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((cpu@UOPS_EXECUTED.CORE\\,cmask\\=2@ - cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@) / 2 if #SMT_on else (UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC - UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks)",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(cpu@UOPS_EXECUTED.CORE\\,cmask\\=3@ / 2 if #SMT_on else UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(200 * (MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) + 180 * (MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD)))) / tma_info_clks",
+ "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
+ "MetricName": "tma_remote_cache",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "310 * (MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_UOPS_RETIRED.HIT_LFB / (MEM_LOAD_UOPS_RETIRED.L2_HIT + MEM_LOAD_UOPS_RETIRED.LLC_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM + MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS + MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_HITM + MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_FWD))) / tma_info_clks",
+ "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_remote_dram",
+ "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_UOPS_L3_MISS_RETIRED.REMOTE_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "13 * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_UOPS_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "2 * MEM_UOPS_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_UOPS_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) + (1 - MEM_UOPS_RETIRED.LOCK_LOADS / MEM_UOPS_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS * FP_COMP_OPS_EXE.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/memory.json b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
index 3a7b86af8816..138d1aa0b32d 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/memory.json
@@ -1,503 +1,382 @@
[
{
- "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Speculative cache-line split Store-address uops dispatched to L1D.",
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 4.",
+ "BriefDescription": "Loads with latency value being above 128",
"EventCode": "0xCD",
- "MSRValue": "0x4",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 4",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
+ "MSRValue": "0x80",
"PEBS": "2",
- "PublicDescription": "Loads with latency value being above 8.",
- "EventCode": "0xCD",
- "MSRValue": "0x8",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "50021",
- "BriefDescription": "Loads with latency value being above 8",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "PublicDescription": "Loads with latency value being above 128.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 16.",
+ "BriefDescription": "Loads with latency value being above 16",
"EventCode": "0xCD",
- "MSRValue": "0x10",
- "Counter": "3",
- "UMask": "0x1",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 16.",
"SampleAfterValue": "20011",
- "BriefDescription": "Loads with latency value being above 16",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads with latency value being above 256",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
"PEBS": "2",
- "PublicDescription": "Loads with latency value being above 32.",
+ "PublicDescription": "Loads with latency value being above 256.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Loads with latency value being above 32",
"EventCode": "0xCD",
- "MSRValue": "0x20",
- "Counter": "3",
- "UMask": "0x1",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 32.",
"SampleAfterValue": "100007",
- "BriefDescription": "Loads with latency value being above 32",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 64.",
+ "BriefDescription": "Loads with latency value being above 4",
"EventCode": "0xCD",
- "MSRValue": "0x40",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Loads with latency value being above 64",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 4.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 128.",
+ "BriefDescription": "Loads with latency value being above 512",
"EventCode": "0xCD",
- "MSRValue": "0x80",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Loads with latency value being above 128",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 512.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 256.",
+ "BriefDescription": "Loads with latency value being above 64",
"EventCode": "0xCD",
- "MSRValue": "0x100",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Loads with latency value being above 256",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 64.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Loads with latency value being above 512.",
+ "BriefDescription": "Loads with latency value being above 8",
"EventCode": "0xCD",
- "MSRValue": "0x200",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Loads with latency value being above 512",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Loads with latency value being above 8.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
"EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "PEBS": "2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only.",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "PublicDescription": "Speculative cache-line split load uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "PublicDescription": "Speculative cache-line split Store-address uops dispatched to L1D.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc00244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc00244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67f800244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data forwarded from remote cache",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f800244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f800244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch data reads that hits the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hits the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc203f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc203f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x6004003f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x6004003f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC the data is found in M state in remote cache and forwarded from there",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f8203f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc003f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data forwarded from remote cache",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc003f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f8203f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67f800004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f820004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc00004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc00004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f820004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67fc00001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67fc00001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67f800001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f820001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc00001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc00001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f820001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the LLC and the data is found in M state in remote cache and forwarded from there.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc20002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc20002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that miss the LLC and the data is found in M state in remote cache and forwarded from there.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67fc00010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67fc00010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67f800010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f820010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc00010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc00010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f820010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that miss in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/other.json b/tools/perf/pmu-events/arch/x86/ivytown/other.json
index 4eb83ee40412..e80e99d064ba 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/other.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/other.json
@@ -1,44 +1,36 @@
[
{
- "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPL_CYCLES.RING0",
+ "PublicDescription": "Unhalted core cycles when the thread is in ring 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when the thread is in ring 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
+ "CounterMask": "1",
"EdgeDetect": "1",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0_TRANS",
+ "PublicDescription": "Number of intervals between processor halts while thread is in ring 0.",
"SampleAfterValue": "100007",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CPL_CYCLES.RING123",
+ "PublicDescription": "Unhalted core cycles when the thread is not in ring 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
+ "PublicDescription": "Cycles in which the L1D and L2 are locked, due to a UC lock or split lock.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
index 2a0aad91d83d..30a3da9cd22b 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/pipeline.json
@@ -1,1305 +1,1030 @@
[
{
- "Counter": "Fixed counter 0",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 0"
- },
- {
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "Fixed counter 1"
- },
- {
- "Counter": "Fixed counter 2",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
- },
- {
- "PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "False dependencies in MOB due to partial compare on address.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "BriefDescription": "Divide operations executed",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"EdgeDetect": "1",
- "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "Number of flags-merge uops allocated. Such uops adds delay.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_ISSUED.FLAGS_MERGE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of flags-merge uops being allocated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_ISSUED.SINGLE_MUL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles that the divider is active, includes INT and FP. Set 'edge =1, cmask=1' to count the number of divides.",
- "EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.FPU_DIV_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divider is busy executing divide operations",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Divide operations executed.",
"EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EdgeDetect": "1",
"EventName": "ARITH.FPU_DIV",
+ "PublicDescription": "Divide operations executed.",
"SampleAfterValue": "100003",
- "BriefDescription": "Divide operations executed",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "BriefDescription": "Cycles when divider is busy executing divide operations",
+ "EventCode": "0x14",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "PublicDescription": "Cycles that the divider is active, includes INT and FP. Set 'edge =1, cmask=1' to count the number of divides.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOAD_HIT_PRE.HW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MOVE_ELIMINATION.SIMD_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x58",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MOVE_ELIMINATION.SIMD_NOT_ELIMINATED",
- "SampleAfterValue": "1000003",
- "BriefDescription": "Number of SIMD Move Elimination candidate uops that were not eliminated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-conditional branches",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "Speculative and retired macro-conditional branches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "PublicDescription": "Cycles the RS is empty for the thread.",
- "EventCode": "0x5E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "PublicDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "BriefDescription": "Speculative and retired direct near calls",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "PublicDescription": "Speculative and retired direct near calls.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "PublicDescription": "Stall cycles due to IQ is full.",
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ILD_STALL.IQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stall cycles because IQ is full",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
},
{
- "PublicDescription": "Not taken macro-conditional branches.",
+ "BriefDescription": "Not taken macro-conditional branches",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
"EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "Not taken macro-conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not taken macro-conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "Taken speculative and retired macro-conditional branches.",
+ "BriefDescription": "Taken speculative and retired macro-conditional branches",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
"EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "Taken speculative and retired macro-conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "PublicDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
"EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "PublicDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x82"
},
{
- "PublicDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "BriefDescription": "Taken speculative and retired direct near calls",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "PublicDescription": "Taken speculative and retired direct near calls.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x90"
},
{
- "PublicDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "PublicDescription": "Taken speculative and retired direct near calls.",
+ "BriefDescription": "Taken speculative and retired indirect calls",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "PublicDescription": "Taken speculative and retired indirect calls.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired direct near calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa0"
},
{
- "PublicDescription": "Taken speculative and retired indirect calls.",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "PublicDescription": "Taken speculative and retired indirect branches with return mnemonic.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "PublicDescription": "Speculative and retired macro-conditional branches.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Branch instructions at retirement.",
+ "SampleAfterValue": "400009"
},
{
- "PublicDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Speculative and retired indirect branches excluding calls and returns.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc8",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PublicDescription": "Number of far branches retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Speculative and retired direct near calls.",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired direct near calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "Counts the number of not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"PublicDescription": "Counts all near executed branches (not necessarily retired).",
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
- "PublicDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "PublicDescription": "Speculative and retired mispredicted macro conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
- "PublicDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "Mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
- "PublicDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "BriefDescription": "Speculative mispredicted indirect branches",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
- "PublicDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "PublicDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "PublicDescription": "Taken speculative and retired mispredicted macro conditional branches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "PublicDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "PublicDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "PublicDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "PublicDescription": "Taken speculative and retired mispredicted indirect calls.",
"SampleAfterValue": "200003",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa0"
},
{
- "PublicDescription": "Counts all near executed branches (not necessarily retired).",
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "PublicDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Mispredicted branch instructions at retirement.",
+ "SampleAfterValue": "400009"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "PublicDescription": "Increments at the frequency of XCLK (100 MHz) when not halted.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Reference cycles when the thread is unhalted. (counts at 100 MHz rate)",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted. (counts at 100 MHz rate)",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles Allocation is stalled due to Resource Related reason.",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "Counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling.",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RESOURCE_STALLS.RS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "PublicDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "SampleAfterValue": "2000003"
},
{
- "PublicDescription": "Cycles stalled due to no store buffers available (not including draining form sync).",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RESOURCE_STALLS.ROB",
+ "BriefDescription": "Cycles with pending L1 cache miss loads.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Cycles with pending L2 miss loads. Set AnyThread to count per core.",
+ "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
+ "CounterMask": "1",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L2 cache miss loads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles with pending L2 cache miss loads.",
+ "CounterMask": "1",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "PublicDescription": "Cycles with pending L2 miss loads. Set AnyThread to count per core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss load* is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
+ "BriefDescription": "Cycles with pending memory loads.",
+ "CounterMask": "2",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CYCLE_ACTIVITY.CYCLES_LDM_PENDING",
+ "PublicDescription": "Cycles with pending memory loads. Set AnyThread to count per core.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending memory loads.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "2",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Total execution stalls.",
+ "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
+ "CounterMask": "4",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "CYCLE_ACTIVITY.CYCLES_NO_EXECUTE",
+ "PublicDescription": "Total execution stalls.",
"SampleAfterValue": "2000003",
- "BriefDescription": "This event increments by 1 for every cycle where there was no execute for this thread.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc"
},
{
- "PublicDescription": "Number of loads missed L2.",
+ "BriefDescription": "Execution stalls due to L1 data cache misses",
+ "CounterMask": "12",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls due to L2 cache misses.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0xc"
},
{
+ "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
+ "CounterMask": "5",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
"EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss load* is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x5"
},
{
+ "BriefDescription": "Execution stalls due to L2 cache misses.",
+ "CounterMask": "5",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "PublicDescription": "Number of loads missed L2.",
"SampleAfterValue": "2000003",
+ "UMask": "0x5"
+ },
+ {
"BriefDescription": "Execution stalls due to memory subsystem.",
"CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_LDM_PENDING",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x6"
},
{
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "6",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
"EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x6"
},
{
- "PublicDescription": "Cycles with pending L1 cache miss loads. Set AnyThread to count per core.",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
"EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with pending L1 cache miss loads.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "BriefDescription": "Stall cycles because IQ is full",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "PublicDescription": "Stall cycles due to IQ is full.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "2"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Execution stalls due to L1 data cache miss loads. Set Cmask=0CH.",
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls due to L1 data cache misses",
- "CounterMask": "12",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Number of instructions at retirement.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder",
+ "UMask": "0x3"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc.)",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
"SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "False dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for H/W prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "Non-SW-prefetch load dispatches that hit fill buffer allocated for S/W prefetch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder",
"CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "PublicDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.THREAD",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Counts the number of executed AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Number of self-modifying-code machine clears detected.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of integer Move Elimination candidate uops that were not eliminated.",
+ "EventCode": "0x58",
+ "EventName": "MOVE_ELIMINATION.INT_NOT_ELIMINATED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Resource-related stall cycles",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "Cycles Allocation is stalled due to Resource Related reason.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Cycles stalled due to no store buffers available (not including draining form sync).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE",
+ "BriefDescription": "Count cases of saving new LBR",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Count cases of saving new LBR records by hardware.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Cycles the RS is empty for the thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of instructions at retirement.",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 2.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "PEBS": "2",
- "PublicDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution.",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "AnyThread": "1",
+ "BriefDescription": "Uops dispatched to port 2, loads and stores per core (speculative and retired).",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "CounterHTOff": "1"
+ "UMask": "0xc"
},
{
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "OTHER_ASSISTS.ANY_WB_ASSIST",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of times any microcode assist is invoked by HW upon uop writeback.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.ALL",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "PublicDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired uops.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "Cycles which a Uop is dispatched on port 5.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
- "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "PublicDescription": "Cycles per core when uops are dispatched to port 5.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts total number of uops to be executed per-core each cycle.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of self-modifying-code machine clears detected.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of executed AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Branch instructions at retirement.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
+ "PublicDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the number of not taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Counts total number of uops to be executed per-thread each cycle. Set Cmask = 1, INV =1 to count stall cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of far branches retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Increments each cycle the # of Uops issued by the RAT to RS. Set Cmask = 1, Inv = 1, Any= 1to count stalled cycles of this core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Mispredicted branch instructions at retirement.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of flags-merge uops being allocated.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.FLAGS_MERGE",
+ "PublicDescription": "Number of flags-merge uops allocated. Such uops adds delay.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of Multiply packed/scalar single precision uops allocated",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SINGLE_MUL",
+ "PublicDescription": "Number of multiply packed/scalar single precision uops allocated.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "PublicDescription": "Number of slow LEA or similar uops allocated. Such uop has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired uops.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "number of near branch instructions retired that were mispredicted and taken.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Count cases of saving new LBR records by hardware.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of front end re-steers due to BPU misprediction.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
index 267410594833..8bf2706eb6d5 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-cache.json
@@ -1,321 +1,3096 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Uncore Clocks",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
- "Counter": "0,1",
+ "BriefDescription": "Counter 0 Occupancy",
+ "EventCode": "0x1f",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Any Request",
"EventCode": "0x34",
"EventName": "UNC_C_LLC_LOOKUP.ANY",
- "Filter": "filter_state=0x1",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
"UMask": "0x11",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Read transactions",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Lookups that Match NID",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "M line evictions from LLC (writebacks to memory)",
- "Counter": "0,1",
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "UMask": "0x9",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:17] bits correspond to [M'FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x5",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in M state",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode.demand",
- "Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_READ",
- "Filter": "filter_opc=0x182",
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.; Qualify one of the other subevents by the Target NID. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Number of times that an RFO hit in S state. This is useful for determining if it might be good for a workload to use RspIWB instead of RspSWB.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 0",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.AGE0",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 0",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 1",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.AGE1",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 2",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.AGE2",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 2",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Age 3",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.AGE3",
+ "PerPkg": "1",
+ "PublicDescription": "How often age was set to 3",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; LRU Bits Decremented",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.LRU_DECREMENT",
+ "PerPkg": "1",
+ "PublicDescription": "How often all LRU bits were decremented by 1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "LRU Queue; Non-0 Aged Victim",
+ "EventCode": "0x3c",
+ "EventName": "UNC_C_QLRU.VICTIM_NON_ZERO",
+ "PerPkg": "1",
+ "PublicDescription": "How often we picked a victim that had a non-zero age",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Counterclockwise",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Clockwise",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads. Derived from unc_c_tor_inserts.miss_opcode.uncacheable",
- "Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "filter_opc=0x187",
+ "BriefDescription": "AD Ring In Use; Down",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xcc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even on Vring 0",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd on Vring 0",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even on VRing 1",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd on VRing 1",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up",
+ "EventCode": "0x1B",
+ "EventName": "UNC_C_RING_AD_USED.UP",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x33",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even on Vring 0",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd on Vring 0",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even on VRing 1",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd on VRing 1",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Counterclockwise",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Clockwise",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for RFO. Derived from unc_c_tor_inserts.miss_opcode.rfo_prefetch",
- "Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.RFO_LLC_PREFETCH",
- "Filter": "filter_opc=0x190",
+ "BriefDescription": "AK Ring In Use; Down",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xcc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even on Vring 0",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd on Vring 0",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_VR0_ODD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even on VRing 1",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd on VRing 1",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up",
+ "EventCode": "0x1C",
+ "EventName": "UNC_C_RING_AK_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x33",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even on Vring 0",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd on Vring 0",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even on VRing 1",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd on VRing 1",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Counterclockwise",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Clockwise",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0xcc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even on Vring 0",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd on Vring 0",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even on VRing 1",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd on VRing 1",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Down and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up",
+ "EventCode": "0x1D",
+ "EventName": "UNC_C_RING_BL_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x33",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even on Vring 0",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd on Vring 0",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even on VRing 1",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd on VRing 1",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.; Filters for the Up and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AD_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.: Acknowledgements to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.: Data Responses to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.: Snoops of processor's cache.",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "IV Ring in Use; Any",
+ "EventCode": "0x1e",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters any polarity",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "IV Ring in Use; Down",
+ "EventCode": "0x1e",
+ "EventName": "UNC_C_RING_IV_USED.DOWN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for Down polarity",
+ "UMask": "0xcc",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "IV Ring in Use; Up",
+ "EventCode": "0x1e",
+ "EventName": "UNC_C_RING_IV_USED.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for Up polarity",
+ "UMask": "0x33",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AD_IPQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AD_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x7",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IPQ is externally startved and therefore we are blocking the IRQ.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; IRQ is externally starved and therefore we are blocking the IPQ.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.; Number of times that the ISMQ Bid.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ is blocking the ingress queue and causing the starvation.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations: IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; VFIFO",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.; Counts the number of allocations into the IRQ Ordering FIFO. In JKT, it is necessary to keep IO requests in order. Therefore, they are allocated into an ordering FIFO that sits next to the IRQ, and must be satisfied from the FIFO in order (with respect to each other). This event, in conjunction with the Occupancy Accumulator event, can be used to calculate average lifetime in the FIFO. Transactions are allocated into the FIFO as soon as they enter the Cachebo (and the IRQ) and are deallocated from the FIFO as soon as they are deallocated from the IRQ.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IPQ in Internal Starvation.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the IRQ in Internal Starvation.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.; Cycles with the ISMQ in Internal Starvation.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from an address conflicts. Address conflicts out of the IPQ should be rare. They will generally only occur if two different sockets are sending requests to the same address at the same time. This is a true conflict case, unlike the IPQ Address Conflict which is commonly caused by prefetching characteristics.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject. TOR rejects from the IPQ can be caused by the Egress being full or Address Conflicts.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.; Counts the number of times that a request form the IPQ was retried because of a TOR reject from the Egress being full. IPQ requests make use of the AD Egress for regular responses, the BL egress to forward data, and the AK egress to return credits.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because of an address match in the TOR. In order to maintain coherency, requests to the same address are not allowed to pass each other up in the Cbo. Therefore, if there is an outstanding request to a given address, one cannot issue another request to that address until it is complete. This comes up most commonly with prefetches. Outstanding prefetches occasionally will not complete their memory fetch and a demand request to the same address will then sit in the IRQ and get retried until the prefetch fills the data into the LLC. Therefore, it will not be uncommon to see this case in high bandwidth streaming workloads when the LLC Prefetcher in the core is enabled.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of IRQ retries that occur. Requests from the IRQ are retried if they are rejected from the TOR pipeline for a variety of reasons. Some of the most common reasons include if the Egress is full, there are no RTIDs, or there is a Physical Address match to another outstanding request.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request from the IRQ was retried because it failed to acquire an entry in the Egress. The egress is the buffer that queues up for allocating onto the ring. IRQ requests can make use of all four rings and all four Egresses. If any of the queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No IIO Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of requests rejects because of lack of QPI Ingress credits. These credits are required in order to send transactions to the QPI agent. Please see the QPI_IGR_CREDITS events for more information.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that requests from the IRQ were retried because there were no RTIDs available. RTIDs are required after a request misses the LLC and needs to send snoops and/or requests to memory. If there are no RTIDs available, requests will queue up in the IRQ and retry until one becomes available. Note that there are multiple RTID pools for the different sockets. There may be cases where the local RTIDs are all used, but requests destined for remote memory can still acquire an RTID because there are remote RTIDs available. This event does not provide any filtering for this case.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the total number of times that a request from the ISMQ retried because of a TOR reject. ISMQ requests generally will not need to retry (or at least ISMQ retries are less common than IRQ retries). ISMQ requests will retry if they are not able to acquire a needed Egress credit to get onto the ring, or for cache evictions that need to acquire an RTID. Most ISMQ requests already have an RTID, so eviction retries will be less common here.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by a lack of Egress credits. The egress is the buffer that queues up for allocating onto the ring. If any of the Egress queues that a given request needs to make use of are full, the request will be retried.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Number of times a request attempted to acquire the NCS/NCB credit for sending messages on BL to the IIO. There is a single credit in each CBo that is shared between the NCS and NCB message classes for sending transactions on the BL ring (such as read data) to the IIO.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Counts the number of times that a request from the ISMQ retried because of a TOR reject caused by no RTIDs. M-state cache evictions are serviced through the ISMQ, and must acquire an RTID in order to write back to memory. If no RTIDs are available, they will be retried.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No WB Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.WB_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.; Retries of writes to local memory due to lack of HT WB credits",
+ "UMask": "0x80",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for code reads. Derived from unc_c_tor_inserts.miss_opcode.code",
- "Counter": "0,1",
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; VFIFO",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.; Accumulates the number of used entries in the IRQ Ordering FIFO in each cycle. In JKT, it is necessary to keep IO requests in order. Therefore, they are allocated into an ordering FIFO that sits next to the IRQ, and must be satisfied from the FIFO in order (with respect to each other). This event, in conjunction with the Allocations event, can be used to calculate average lifetime in the FIFO. This event can be used in conjunction with the Not Empty event to calculate average queue occupancy. Transactions are allocated into the FIFO as soon as they enter the Cachebo (and the IRQ) and are deallocated from the FIFO as soon as they are deallocated from the IRQ.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.CODE_LLC_PREFETCH",
- "Filter": "filter_opc=0x191",
+ "EventName": "UNC_C_TOR_INSERTS.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC prefetch misses for data reads. Derived from unc_c_tor_inserts.miss_opcode.data_read",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Evictions",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_LLC_PREFETCH",
- "Filter": "filter_opc=0x192",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Eviction transactions inserted into the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe allocating writes that miss LLC - DDIO misses. Derived from unc_c_tor_inserts.miss_opcode.ddio_miss",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Local Memory",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x28",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses for PCIe read current. Derived from unc_c_tor_inserts.miss_opcode.pcie_read",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Local Memory - Opcode Matched",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.LOCAL_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisfied by an opcode, inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses for ItoM writes (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.miss_opcode.itom_write",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Misses to Local Memory",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x2a",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses for PCIe non-snoop reads. Derived from unc_c_tor_inserts.miss_opcode.pcie_read",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Misses to Local Memory - Opcode Matched",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_NON_SNOOP_READ",
- "Filter": "filter_opc=0x1e4",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_LOCAL_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisfied by an opcode, inserted into the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses for PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.miss_opcode.pcie_write",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_NON_SNOOP_WRITE",
- "Filter": "filter_opc=0x1e6",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match an opcode.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode.streaming_full",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "filter_opc=0x18c",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x8a",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode.streaming_partial",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Misses to Remote Memory - Opcode Matched",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "filter_opc=0x18d",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_REMOTE_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions, satisfied by an opcode, inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Partial PCIe reads. Derived from unc_c_tor_inserts.opcode.pcie_partial",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID Matched",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_PARTIAL_READ",
- "Filter": "filter_opc=0x195",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched (matches an RTID destination) transactions inserted into the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid. In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe allocating writes that hit in LLC (DDIO hits). Derived from unc_c_tor_inserts.opcode.ddio_hit",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched eviction transactions inserted into the TOR.",
+ "UMask": "0x44",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode.pcie_read_current",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All NID matched miss requests that were inserted into the TOR.",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "ItoM write hits (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.opcode.itom_write_hit",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Miss transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe non-snoop reads. Derived from unc_c_tor_inserts.opcode.pcie_read",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_READ",
- "Filter": "filter_opc=0x1e4",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe non-snoop writes (partial). Derived from unc_c_tor_inserts.opcode.pcie_partial_write",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
- "Filter": "filter_opc=0x1e5",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; NID matched write transactions inserted into the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.opcode.pcie_full_write",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Opcode Match",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_WRITE",
- "Filter": "filter_opc=0x1e6",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Transactions inserted into the TOR that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc)",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Remote Memory - Opcode Matched",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; All transactions, satisfied by an opcode, inserted into the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Occupancy for all LLC misses that are addressed to local memory",
+ "BriefDescription": "TOR Inserts; Writebacks",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).; Write transactions inserted into the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All valid TOR entries. This includes requests that reside in the TOR for a short time, such as LLC Hits that do not need to snoop cores or requests that get rejected and have to be retried through one of the ingress queues. The TOR is more commonly a bottleneck in skews with smaller core counts, where the ratio of RTIDs to TOR entries is larger. Note that there are reserved TOR entries for various request types, so it is possible that a given request type be blocked with an occupancy that is less than 20. Also note that generally requests will not be able to arbitrate into the TOR pipeline if there are no available TOR slots.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding eviction transactions in the TOR. Evictions can be quick, such as when the line is in the F, S, or E states and no core valid bits are set. They can also be longer if either CV bits are set (so the cores need to be snooped) and/or if there is a HitM (in which case it is necessary to write the request out to memory).",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisfied by an opcode, in the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss All",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding miss requests in the TOR. 'Miss' means the allocation requires an RTID. This generally means that the request was sent to memory or MMIO.",
+ "UMask": "0xa",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL",
"PerPkg": "1",
- "UMask": "0x2A",
- "Unit": "CBO"
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x2a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Local Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_LOCAL_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisfied by an opcode, in the TOR that are satisfied by locally HOMed memory.",
+ "UMask": "0x23",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode.llc_data_read",
+ "BriefDescription": "TOR Occupancy; Miss Opcode Match",
"EventCode": "0x36",
- "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
- "Filter": "filter_opc=0x182",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries for miss transactions that match an opcode. This generally means that the request was sent to memory or MMIO.",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Occupancy for all LLC misses that are addressed to remote memory",
+ "BriefDescription": "TOR Occupancy",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE",
"PerPkg": "1",
- "UMask": "0x8A",
- "Unit": "CBO"
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x8a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses to Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss transactions, satisfied by an opcode, in the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x83",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of NID matched outstanding requests in the TOR. The NID is programmed in Cn_MSR_PMON_BOX_FILTER.nid.In conjunction with STATE = I, it is possible to monitor misses to specific NIDs in the system.",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding NID matched eviction transactions in the TOR .",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID.",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding Miss requests in the TOR that match a NID and an opcode.",
+ "UMask": "0x43",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match a NID and an opcode.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); NID matched write transactions int the TOR.",
+ "UMask": "0x50",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); TOR entries that match an opcode (matched by Cn_MSR_PMON_BOX_FILTER.opc).",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Remote Memory - Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.REMOTE_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Number of outstanding transactions, satisfied by an opcode, in the TOR that are satisfied by remote caches or remote memory.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Writebacks",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); Write transactions in the TOR. This does not include RFO, but actual operations that contain data being sent from the core.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto AD Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto AK Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Onto BL Ring",
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the AK ring. This is commonly used for snoop responses coming from the core and destined for a Cachebo.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Corebo destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.; Ring transactions from the Cachebo destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AD Ring (to core)",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the core AD egress spent in starvation",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK_BOTH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that both AK egresses spent in starvation",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto IV Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.; cycles that the cachebo IV egress spent in starvation",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BT Bypass",
+ "EventCode": "0x52",
+ "EventName": "UNC_H_BT_BYPASS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of transactions that bypass the BT (fifo) to HT",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty: Local",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Cycles Not Empty: Remote",
+ "EventCode": "0x42",
+ "EventName": "UNC_H_BT_CYCLES_NE.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Backup Tracker (BT) is not empty. The BT is the actual HOM tracker in IVT.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Local",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Reads Local",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Reads Remote",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Remote",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Writes Local",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT Occupancy; Writes Remote",
+ "EventCode": "0x43",
+ "EventName": "UNC_H_BT_OCCUPANCY.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the HA BT pool in every cycle. This can be used with the not empty stat to calculate average queue occupancy or the allocations stat in order to calculate average queue latency. HA BTs are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_BL_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Snoop Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.INCOMING_SNP_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming snoop hazard",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.RSPACKCFLT_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BT to HT Not Issued; Incoming Data Hazard",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_BT_TO_HT_NOT_ISSUED.WBMDATA_HAZARD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not issue transaction from BT to HT.; Cycles unable to issue from BT due to incoming BL data hazard",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the bypass.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Acknowledge Conflicts",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.ACKCNFLTS",
+ "PerPkg": "1",
+ "PublicDescription": "Count the number of Ackcnflts",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Cmp Fwds",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.CMP_FWDS",
+ "PerPkg": "1",
+ "PublicDescription": "Count the number of Cmp_Fwd. This will give the number of late conflicts.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Conflict Detected",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are handling conflicts.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Last in conflict chain",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.LAST",
+ "PerPkg": "1",
+ "PublicDescription": "Count every last conflictor in conflict chain. Can be used to compute the average conflict chain length as (#Ackcnflts/#LastConflictor)+1. This can be used to give a feel for the conflict chain lengths while analyzing lock kernels.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Direct2Core messages sent",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles in which Direct2Core was disabled",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Reads where Direct2Core overridden",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lat Opt Return",
+ "EventCode": "0x41",
+ "EventName": "UNC_H_DIRECTORY_LAT_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "Directory Latency Optimization Data Return Path Taken. When directory mode is enabled and the directory returned for a read is Dir=I, then data can be returned using a faster path if certain conditions are met (credits, free pipeline, etc).",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: Any state",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that did not have to send any snoops because the directory bit was clear.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: Snoop A",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNOOP_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: Snoop S",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNOOP_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.; Filters for transactions that had to send one or more snoops because the directory bit was set.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: A State",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: I State",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups: S State",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: A2I",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: A2S",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory clears. This occurs when snoops were sent and all returned with RspI.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: I2A",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: I2S",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: S2A",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates: S2I",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "EventCode": "0xD",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.; Filter for directory sets. This occurs when a remote read transaction requests memory, bringing it to a remote cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD QPI Link 2 Credit Accumulator",
+ "EventCode": "0x59",
+ "EventName": "UNC_H_IGR_AD_QPI2_ACCUMULATOR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of credits available to the QPI Link 2 AD Ingress buffer.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL QPI Link 2 Credit Accumulator",
+ "EventCode": "0x5a",
+ "EventName": "UNC_H_IGR_BL_QPI2_ACCUMULATOR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of credits available to the QPI Link 2 BL Ingress buffer.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD QPI Link 2 Credit Accumulator",
+ "EventCode": "0x59",
+ "EventName": "UNC_H_IGR_CREDITS_AD_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of credits available to the QPI Link 2 AD Ingress buffer.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL QPI Link 2 Credit Accumulator",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_IGR_CREDITS_BL_QPI2",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of credits available to the QPI Link 2 BL Ingress buffer.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Normal Priority Reads Issued; Normal Priority",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "EventCode": "0x1e",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Conflicts; Any Conflict",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_IODC_CONFLICTS.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Conflicts; Last Conflict",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_IODC_CONFLICTS.LAST",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Conflicts: Remote InvItoE - Same RTID",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_IODC_CONFLICTS.REMOTE_INVI2E_SAME_RTID",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Conflicts: Remote (Other) - Same Addr",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_IODC_CONFLICTS.REMOTE_OTHER_SAME_ADDR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "IODC Inserts",
+ "EventCode": "0x56",
+ "EventName": "UNC_H_IODC_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "IODC Allocations",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Num IODC 0 Length Writes",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_IODC_OLEN_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Num IODC 0 Length Writebacks M to I - All of which are dropped.",
+ "Unit": "HA"
},
{
- "BriefDescription": "Read requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "OSB Snoop Broadcast; Local InvItoE",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Local Reads",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast; Remote",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_OSB.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; All",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local I",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Local S",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote I",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "OSB Early Data Return; Reads to Remote S",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_OSB_EDR.READS_REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that broadcast snoop due to OSB, but found clean data in memory and was able to do early data return",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Local InvItoEs",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from the local socket.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote InvItoEs",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only InvItoEs coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming ead requests. This is a good proxy for LLC Read Misses (including RFOs).",
"UMask": "0x3",
"Unit": "HA"
},
{
- "BriefDescription": "Write requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Local Reads",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the local socket. This is a good proxy for LLC Read Misses (including RFOs) from the local socket.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote Reads",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only read requests coming from the remote socket. This is a good proxy for LLC Read Misses (including RFOs) from the remote socket.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
- "UMask": "0xC",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; Incoming write requests.",
+ "UMask": "0xc",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache along with writeback to memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x21",
- "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "BriefDescription": "Read and Write Requests; Local Writes",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from the local socket.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Remote Writes",
+ "EventCode": "0x1",
+ "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).; This filter includes only writes coming from remote sockets.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xcc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even on VRing 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd on VRing 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even on VRing 1",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd on VRing 1",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise",
+ "EventCode": "0x3E",
+ "EventName": "UNC_H_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even on VRing 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd on VRing 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even on VRing 1",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd on VRing 1",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xcc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even on VRing 0",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd on VRing 0",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even on VRing 1",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd on VRing 1",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise",
+ "EventCode": "0x3F",
+ "EventName": "UNC_H_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even on VRing 0",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd on VRing 0",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even on VRing 1",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd on VRing 1",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xcc",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even on VRing 0",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd on VRing 0",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_VR0_ODD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even on VRing 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd on VRing 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even on VRing 0",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd on VRing 0",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even on VRing 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd on VRing 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
"UMask": "0x20",
"Unit": "HA"
},
{
- "BriefDescription": "M line forwarded from remote cache with no writeback to memory",
- "Counter": "0,1,2,3",
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and special requests such as ISOCH reads. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RSPCNFLCT*",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspI",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspIFwd",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
"UMask": "0x4",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line response from remote cache",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Snoop Responses Received; RspS",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPS",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
"UMask": "0x2",
"Unit": "HA"
},
{
- "BriefDescription": "Shared line forwarded from remote cache",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Snoop Responses Received; RspSFwd",
"EventCode": "0x21",
"EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its currently copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*Fwd*WB",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; Rsp*WB",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Other",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for all other snoop responses.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd. This is returned when a remote caching agent forwards data but holds on to its currently copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxFWDxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_SNP_RESP_RECV_LOCAL.RSPxWB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 2",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 3",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 4",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 5",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 6",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 7",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 10",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 11",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 8",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for Monroe systems that use the TAD to enable individual channels to enter self-refresh to save power.; Filters request made to TAD Region 9",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty",
+ "EventCode": "0x3",
+ "EventName": "UNC_H_TRACKER_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the local HA tracker pool is not empty. This can be used with edge detect to identify the number of situations when the pool became empty. This should not be confused with RTID credit usage -- which must be tracked inside each cbo individually -- but represents the actual tracker buffer structure. In other words, this buffer could be completely empty, but there may still be credits in use by the CBos. This stat can be used in conjunction with the occupancy accumulation stat in order to calculate average queue occpancy. HA trackers are allocated as soon as a request enters the HA if an HT (Home Tracker) entry is available and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "EventCode": "0xF",
+ "EventName": "UNC_H_TxR_AD.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.; Filter for outbound NDR transactions sent on the AD ring. NDR stands for non-data response and is generally used for completions that do not include data. AD NDR is used for transactions to remote sockets.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 0",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy; Filter for occupancy from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AD Egress Occupancy; Filter for occupancy from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK: CRD Transactions to Cbo",
+ "EventCode": "0xe",
+ "EventName": "UNC_H_TxR_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 0",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy; Filter for occupancy from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 1",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "AK Egress Occupancy; Filter for occupancy from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to the cache.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent directly to the requesting core.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.; Filter for data being sent to a remote socket over QPI.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Full; Filter for cycles full from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Cycles full from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Not Empty; Filter for cycles not empty from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Allocations from both schedulers",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Allocations; Filter for allocations from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy: All",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 0",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy; Filter for occupancy from scheduler bank 0",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 1",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "PublicDescription": "BL Egress Occupancy; Filter for occupancy from scheduler bank 1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no regular credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 0 only.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 1 only.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 2 only.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no special credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and special requests such as ISOCH writes. This count only tracks the special credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.; Filter for memory controller channel 3 only.",
"UMask": "0x8",
"Unit": "HA"
}
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
index b798a860bc81..ccf451534d16 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-interconnect.json
@@ -1,48 +1,3287 @@
[
{
- "BriefDescription": "QPI clock ticks. Use to get percentages for QPI cycles events",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Merges",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.MERGE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.; When two requests to the same address from the same source are received back to back, it is possible to merge the two of them together.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Stalls",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.STALL_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.; When it is not possible to merge two conflicting requests, a stall event occurs. This is bad for performance.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Any Source",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Select Source",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.; Tracks all requests from any source port.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Any Source",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Select Source",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Any Source",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Select Source",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Any Source",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Select Source",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.; Tracks only those requests that come from the port specified in the IRP_PmonFilter.OrderingQ register. This register allows one to select one specific queue. It is not possible to monitor multiple queues at a time.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of clocks in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0xb",
+ "EventName": "UNC_I_RxR_AK_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the AK Ingress is full. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "EventCode": "0xa",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0xc",
+ "EventName": "UNC_I_RxR_AK_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the AK Ingress in each cycles. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Ownership Lost",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.LOST_OWNERSHIP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.; Tracks the number of requests that lost ownership as a result of a tickle. When a tickle comes in, if the request is not at the head of the queue in the switch, then that request as well as any requests behind it in the switch queue will lose ownership and have to re-acquire it later when they get to the head of the queue. This will therefore track the number of requests that lost ownership and not just the number of tickles.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Data Returned",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.TOP_OF_QUEUE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.; Tracks the number of cases when a tickle was received but the requests was at the head of the queue in the switch. In this case, data is returned rather than releasing ownership.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count: Read Prefetches",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.PD_PREFETCHES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREFETCHES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xe",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xf",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0xd",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ordering Stalls",
+ "EventCode": "0x1a",
+ "EventName": "UNC_I_WRITE_ORDERING_STALL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are pending write ACK's in the switch but the switch->IRP pipeline is not utilized.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of qfclks",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
- "Unit": "QPI LL"
+ "PublicDescription": "Counts the number of clocks in the QPI LL. This clock runs at 1/8th the GT/s speed of the QPI link. For example, a 8GT/s link will have qfclk or 1GHz. JKT does not support dynamic link speeds, so this frequency is fixed.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Count of CTO Events",
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_CTO_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits. Had there been enough credits, the spawn would have worked as the RBT bit was set and the RBT tag matched.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and there weren't enough Egress credits. The valid bit was set.",
+ "UMask": "0x20",
+ "Unit": "QPI"
},
{
- "BriefDescription": "Cycles where receiving QPI link is in half-width mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because there were not enough Egress credits AND the RBT bit was not set, but the RBT tag matched.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT Miss, Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match, the valid bit was not set and there weren't enough Egress credits.",
+ "UMask": "0x80",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match although the valid bit was set and there were enough Egress credits.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the route-back table (RBT) specified that the transaction should not trigger a direct2core transaction. This is common for IO transactions. There were enough Egress credits and the RBT tag matched but the valid bit was not set.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Miss and Invalid",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn failed because the RBT tag did not match and the valid bit was not set although there were enough Egress credits.",
+ "UMask": "0x40",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.SUCCESS_RBT_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.; The spawn was successful. There were sufficient credits, the RBT valid bit was set and there was an RBT tag match. The message was marked to spawn direct2core.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MATCH_MASK",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.AnyDataC",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.AnyResp",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.AnyResp11flits",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.AnyResp9flits",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.DataC_E",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.DataC_E_Cmp",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.DataC_E_FrcAckCnflt",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.DataC_F",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.DataC_F_Cmp",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.DataC_F_FrcAckCnflt",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.DataC_M",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.WbEData",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.WbIData",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.DRS.WbSData",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.HOM.AnyReq",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.HOM.AnyResp",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.HOM.RespFwd",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.HOM.RespFwdI",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.HOM.RespFwdIWb",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.HOM.RespFwdS",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.HOM.RespFwdSWb",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.HOM.RespIWb",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.HOM.RespSWb",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.NCB.AnyInt",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.NCB.AnyMsg",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.NCB.AnyMsg11flits",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.NCB.AnyMsg9flits",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.NCS.AnyMsg1or2flits",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.NCS.AnyMsg3flits",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.NCS.NcRd",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.NDR.AnyCmp",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_MESSAGE.SNP.AnySnp",
+ "PerPkg": "1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_RxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "rxl0p_power_cycles %",
"PerPkg": "1",
- "Unit": "QPI LL"
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0xf",
+ "EventName": "UNC_Q_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Bypassed",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; LinkInit",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).; CRC errors detected during link initialization.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; Normal Operations",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).; CRC errors detected during normal operation.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; DRS",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "QPI"
},
{
- "BriefDescription": "Cycles where transmitting QPI link is in half-width mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "VN0 Credit Consumed; HOM",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the HOM message class.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCB",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCB message class.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCS",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NCS message class.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NDR",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the NDR message class.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; SNP",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN0 credit for the SNP message class.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; DRS",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; HOM",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the HOM message class.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCB",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCB message class.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NCS",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NCS message class.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; NDR",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the NDR message class.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed; SNP",
+ "EventCode": "0x39",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.; VN1 credit for the SNP message class.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "EventCode": "0x1d",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN0",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - DRS; for VN1",
+ "EventCode": "0xF",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_DRS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors DRS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN0",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - HOM; for VN1",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_HOM.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors HOM flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN0",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCB; for VN1",
+ "EventCode": "0x10",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCB.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCB flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN0",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NCS; for VN1",
+ "EventCode": "0x11",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NCS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NCS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN0",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - NDR; for VN1",
+ "EventCode": "0x14",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_NDR.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors NDR flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN0",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty - SNP; for VN1",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_RxL_CYCLES_NE_SNP.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy. This monitors SNP flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits received over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of flits received over QPI that do not hold protocol payload. When QPI is not in a power saving state, it continuously transmits flits across the link. When there are no protocol flits to send, it will send IDLE and NULL flits across. These flits sometimes do carry a payload, such as credit returns, but are generally not considered part of the QPI bandwidth.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Non-Data protocol Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits received across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data.",
+ "UMask": "0x18",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits received over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits received over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits received over QPI on the home channel.",
+ "UMask": "0x6",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits received over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request received over QPI on the home channel. This basically counts the number of remote memory requests received over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits received over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are received on the home channel.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
+ "UMask": "0xc",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not the NCB headers.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits received over QPI. This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits received over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations",
+ "EventCode": "0x8",
+ "EventName": "UNC_Q_RxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN0",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS; for VN1",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN0",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM; for VN1",
+ "EventCode": "0xC",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN0",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB; for VN1",
+ "EventCode": "0xA",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN0",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS; for VN1",
+ "EventCode": "0xB",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR",
+ "EventCode": "0xe",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN0",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR; for VN1",
+ "EventCode": "0xE",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP",
+ "EventCode": "0xd",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN0",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP; for VN1",
+ "EventCode": "0xD",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN0",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS; for VN1",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM; for VN1",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN0",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB; for VN1",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN0",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS; for VN1",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR",
+ "EventCode": "0x1a",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN0",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR; for VN1",
+ "EventCode": "0x1A",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN0",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP; for VN1",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - HOM",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - DRS",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - SNP",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NDR",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCS",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; BGF Stall - NCB",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.BGF_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; Egress Credits",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.EGRESS_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled a packet because there were insufficient BGF credits. For details on a message class granularity, use the Egress Credit Occupancy events.",
+ "UMask": "0x40",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN0; GV",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS_VN0.GV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 0; Stalled because a GV transition (frequency transition) was taking place.",
+ "UMask": "0x80",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - HOM",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the HOM message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - DRS",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the DRS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - SNP",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the SNP message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NDR",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NDR message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCS",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCS message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI on VN1; BGF Stall - NCB",
+ "EventCode": "0x3a",
+ "EventName": "UNC_Q_RxL_STALLS_VN1.BGF_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI on Virtual Network 1.; Stalled a packet from the NCB message class because there were not enough BGF credits. In bypass mode, we will stall on the packet boundary, while in RxQ mode we will stall on the flit boundary.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"EventCode": "0xd",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_TxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "txl0p_power_cycles %",
"PerPkg": "1",
- "Unit": "QPI LL"
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI"
},
{
- "BriefDescription": "Number of data flits transmitted ",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "EventCode": "0x5",
+ "EventName": "UNC_Q_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is almost full, we block some but not all packets.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.; When LLR is totally full, we are not allowed to send any packets.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "EventCode": "0x6",
+ "EventName": "UNC_Q_TxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of data flits transmitted over QPI. Each flit contains 64b of data. This includes both DRS and NCB data flits (coherent and non-coherent). This can be used to calculate the data bandwidth of the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This does not include the header flits that go in data packets.",
"UMask": "0x2",
- "Unit": "QPI LL"
+ "Unit": "QPI"
},
{
- "BriefDescription": "Number of non data (control) flits transmitted ",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.; Number of non-NULL non-data flits transmitted across QPI. This basically tracks the protocol overhead on the QPI link. One can get a good picture of the QPI-link characteristics by evaluating the protocol flits, data flits, and idle/null flits. This includes the header flits for data packets.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency.",
+ "UMask": "0x18",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of data flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the data flits (not the header).",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of protocol flits transmitted over QPI on the DRS (Data Response) channel. DRS flits are used to transmit data with coherency. This does not count data flits transmitted over the NCB channel which transmits non-coherent data. This includes only the header flits (not the data). This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of flits transmitted over QPI on the home channel.",
+ "UMask": "0x6",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of non-request flits transmitted over QPI on the home channel. These are most commonly snoop responses, and this event can be used as a proxy for that.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of data request transmitted over QPI on the home channel. This basically counts the number of remote memory requests transmitted over QPI. In conjunction with the local read count in the Home Agent, one can calculate the number of LLC Misses.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the number of snoop request flits transmitted over QPI. These requests are contained in the snoop channel. This does not include snoop responses, which are transmitted on the home channel.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass flits. These packets are generally used to transmit non-coherent data across QPI.",
+ "UMask": "0xc",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass data flits. These flits are generally used to transmit non-coherent data across QPI. This does not include a count of the DRS (coherent) data flits. This only counts the data flits, not the NCB headers.",
"UMask": "0x4",
- "Unit": "QPI LL"
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of Non-Coherent Bypass non-data flits. These packets are generally used to transmit non-coherent data across QPI, and the flits counted here are for headers and other non-data flits. This includes extended headers.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Number of NCS (non-coherent standard) flits transmitted over QPI. This includes extended headers.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets to the local socket which use the AK ring.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three groups that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each flit is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four fits, each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI speed (for example, 8.0 GT/s), the transfers here refer to fits. Therefore, in L0, the system will transfer 1 flit at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as data bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual data and an additional 16 bits of other information. To calculate data bandwidth, one should therefore do: data flits * 8B / time.; Counts the total number of flits transmitted over the NDR (Non-Data Response) channel. This channel is used to send a variety of protocol flits including grants and completions. This is only for NDR packets destined for Route-thru to a remote socket.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "EventCode": "0x4",
+ "EventName": "UNC_Q_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "EventCode": "0x7",
+ "EventName": "UNC_Q_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - HOM; for VN1",
+ "EventCode": "0x26",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Home messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD HOM; for VN1",
+ "EventCode": "0x22",
+ "EventName": "UNC_Q_TxR_AD_HOM_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for HOM messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "EventCode": "0x28",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD NDR; for VN1",
+ "EventCode": "0x24",
+ "EventName": "UNC_Q_TxR_AD_NDR_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for NDR messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN0",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - SNP; for VN1",
+ "EventCode": "0x27",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of link layer credits into the R3 (for transactions across the BGF) acquired each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN0",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AD SNP; for VN1",
+ "EventCode": "0x23",
+ "EventName": "UNC_Q_TxR_AD_SNP_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of link layer credits into the R3 (for transactions across the BGF) available in each cycle. Flow Control FIFO for Snoop messages on AD.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN0",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN1",
+ "EventCode": "0x29",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. Local NDR message class to AK Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN0",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - AK NDR: for VN1",
+ "EventCode": "0x25",
+ "EventName": "UNC_Q_TxR_AK_NDR_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. Local NDR message class to AK Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for VN1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - DRS; for Shared VN",
+ "EventCode": "0x2a",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_ACQUIRED.VN_SHR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. DRS message class to BL Egress.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN0",
+ "EventCode": "0x1f",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for VN1",
+ "EventCode": "0x1f",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL DRS; for Shared VN",
+ "EventCode": "0x1f",
+ "EventName": "UNC_Q_TxR_BL_DRS_CREDIT_OCCUPANCY.VN_SHR",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. DRS message class to BL Egress.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN0",
+ "EventCode": "0x2b",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCB; for VN1",
+ "EventCode": "0x2b",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCB message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCB; for VN1",
+ "EventCode": "0x20",
+ "EventName": "UNC_Q_TxR_BL_NCB_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCB message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - NCS; for VN1",
+ "EventCode": "0x2c",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_ACQUIRED.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of credits into the R3 (for transactions across the BGF) acquired each cycle. NCS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN0",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "R3QPI Egress Credit Occupancy - BL NCS; for VN1",
+ "EventCode": "0x21",
+ "EventName": "UNC_Q_TxR_BL_NCS_CREDIT_OCCUPANCY.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event that tracks the number of credits into the R3 (for transactions across the BGF) available in each cycle. NCS message class to BL Egress.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Returned",
+ "EventCode": "0x1c",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURNS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits returned.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "EventCode": "0x1b",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_R3_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO10",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 10",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO11",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 11",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO12",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 12",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO13",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 13",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO14",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 14&16",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO8",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 8",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2c",
+ "EventName": "UNC_R3_C_HI_AD_CREDITS_EMPTY.CBO9",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes); Cbox 9",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO0",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 0",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO1",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO2",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 2",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO3",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 3",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO4",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 4",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO5",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 5",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO6",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 6",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty",
+ "EventCode": "0x2b",
+ "EventName": "UNC_R3_C_LO_AD_CREDITS_EMPTY.CBO7",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers lower CBoxes); Cbox 7",
+ "UMask": "0x80",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2f",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA0",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA0",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2f",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.HA1",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; HA1",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2f",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCB Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "HA/R2 AD Credits Empty",
+ "EventCode": "0x2f",
+ "EventName": "UNC_R3_HA_R2_BL_CREDITS_EMPTY.R2_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to either HA or R2 on the BL Ring; R2 NCS Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 AD Credits Empty",
+ "EventCode": "0x29",
+ "EventName": "UNC_R3_QPI0_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the AD Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI0 BL Credits Empty",
+ "EventCode": "0x2d",
+ "EventName": "UNC_R3_QPI0_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI0 on the BL Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 AD Credits Empty",
+ "EventCode": "0x2a",
+ "EventName": "UNC_R3_QPI1_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the AD Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 HOM Messages",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 NDR Messages",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN0 SNP Messages",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 HOM Messages",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 NDR Messages",
+ "UMask": "0x40",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VN1 SNP Messages",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "QPI1 BL Credits Empty",
+ "EventCode": "0x2e",
+ "EventName": "UNC_R3_QPI1_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to QPI1 on the BL Ring; VNA",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xcc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even on VRing 0",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd on VRing 0",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Even on VRing 0",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Odd on VRing 0",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xcc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even on VRing 0",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd on VRing 0",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Even on VRing 0",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Odd on VRing 0",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xcc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even on VRing 0",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd on VRing 0",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Even on VRing 0",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Odd on VRing 0",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "EventCode": "0xA",
+ "EventName": "UNC_R3_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters any polarity",
+ "UMask": "0xff",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "EventCode": "0xa",
+ "EventName": "UNC_R3_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters for Counterclockwise polarity",
+ "UMask": "0xcc",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "EventCode": "0xa",
+ "EventName": "UNC_R3_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters for Clockwise polarity",
+ "UMask": "0x33",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "AD Ingress Bypassed",
+ "EventCode": "0x12",
+ "EventName": "UNC_R3_RxR_AD_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the AD Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Bypassed",
+ "EventCode": "0x12",
+ "EventName": "UNC_R3_RxR_BYPASSED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; DRS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; HOM",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NDR",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; SNP",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; HOM",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; HOM Ingress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCB",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NDR",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; NDR Ingress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; SNP",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.; SNP Ingress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; AK CCW",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_TxR_NACK_CCW.AD",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; BL CW",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_TxR_NACK_CCW.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; BL CCW",
+ "EventCode": "0x28",
+ "EventName": "UNC_R3_TxR_NACK_CCW.BL",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; AD CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK_CW.AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; AD CCW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK_CW.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Egress NACK; AK CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R3_TxR_NACK_CW.BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Clockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; DRS Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; HOM Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCB Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NCS Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; NDR Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Acquisition Failed on DRS; SNP Message Class",
+ "EventCode": "0x39",
+ "EventName": "UNC_R3_VN1_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a VN1 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN1 credit and is delayed. This should generally be a rare situation.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; DRS Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; HOM Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCS Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Non-Coherent Standard (NCS). NCS is commonly used for ?",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NDR Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP Message Class",
+ "EventCode": "0x38",
+ "EventName": "UNC_R3_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions; HOM Message Class",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Data Response (DRS). DRS is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using DRS.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for the Home (HOM) message class. HOM is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Broadcast (NCB). NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Non-Coherent Standard (NCS).",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; NDR packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.; Filter for Snoop (SNP) message class. SNP is used for outgoing snoops. Note that snoop responses flow on the HOM message class.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with no VNA credits available",
+ "EventCode": "0x31",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_OUT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles when the transmitted has no VNA credits available and therefore cannot send any requests on this channel. Note that this does not mean that no flits can be transmitted, as those holding VN0 credits will still (potentially) be able to transmit. Generally it is the goal of the uncore that VNA credits should not run out, as this can substantially throttle back useful QPI bandwidth.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with 1 or more VNA credits in use",
+ "EventCode": "0x32",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_USED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles with one or more VNA credits in use. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average number of used VNA credits.",
+ "Unit": "R3QPI"
+ },
+ {
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times an IDI Lock/SplitLock sequence was started",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; Filter by core",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores; PREQ, PSMI, P2U, Thermal, PCUSMI, PMI",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x20",
+ "Unit": "UBOX"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-io.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-io.json
new file mode 100644
index 000000000000..5887e6ebcfa8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-io.json
@@ -0,0 +1,549 @@
+[
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; DRS",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).; Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xcc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even on VRing 0",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd on VRing 0",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even on VRing 1",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd on VRing 1",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even on VRing 0",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd on VRing 0",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even on VRing 1",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd on VRing 1",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xcc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even on VRing 0",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd on VRing 0",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even on VRing 1",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd on VRing 1",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even on VRing 0",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd on VRing 0",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even on VRing 1",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd on VRing 1",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0xcc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even on VRing 0",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd on VRing 0",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even on VRing 1",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x40",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd on VRing 1",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Counterclockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x80",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x33",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even on VRing 0",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_VR0_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 0.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd on VRing 0",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_VR0_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 0.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even on VRing 1",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_VR1_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Even ring polarity on Virtual Ring 1.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd on VRing 1",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_VR1_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.; Filters for the Clockwise and Odd ring polarity on Virtual Ring 1.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "EventCode": "0xA",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters any polarity",
+ "UMask": "0xff",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Counterclockwise",
+ "EventCode": "0xa",
+ "EventName": "UNC_R2_RING_IV_USED.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters for Counterclockwise polarity",
+ "UMask": "0xcc",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Clockwise",
+ "EventCode": "0xa",
+ "EventName": "UNC_R2_RING_IV_USED.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.; Filters for Clockwise polarity",
+ "UMask": "0x33",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RxR_AK_BOUNCES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Counterclockwise",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RxR_AK_BOUNCES.CCW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced; Clockwise",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RxR_AK_BOUNCES.CW",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCB Ingress Queue",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R2_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the R2PCIe Ingress. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; NCS Ingress Queue",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R2_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given R2PCIe Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the R2PCIe Ingress Not Empty event to calculate average occupancy or the R2PCIe Ingress Allocations event in order to calculate average queuing latency.; DRS Ingress Queue",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AD Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; AK Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.; BL Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AD CCW",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_TxR_NACK_CCW.AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD CounterClockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; AK CCW",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_TxR_NACK_CCW.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK CounterClockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CCW NACK; BL CCW",
+ "EventCode": "0x28",
+ "EventName": "UNC_R2_TxR_NACK_CCW.BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL CounterClockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CW NACK; AD CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD Clockwise Egress Queue",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CW NACK; AK CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK Clockwise Egress Queue",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress CW NACK; BL CW",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACK_CW.BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL Clockwise Egress Queue",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
index df4b43294fa0..65509342d56a 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-memory.json
@@ -1,78 +1,1619 @@
[
{
- "BriefDescription": "Memory page activates for reads and writes",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT.RD",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"UMask": "0x1",
- "Umask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "Read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Activate Count; Activate due to Write",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_READ",
+ "EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Read CAS commands issued on this channel (including underfills).",
"UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "Write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in RMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the number of underfill reads that are issued by the memory controller. This will generally be about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ. While it is possible for underfills to be issed in both WMM and RMM, this event counts both.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Read CAS issued in WMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of DRAM Write CAS commands issued on this channel.",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_WRITE",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands; Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Clockticks",
+ "EventName": "UNC_M_DCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0xC",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks. Use to generate percentages for memory controller CYCLES events",
- "Counter": "0,1,2,3",
- "EventName": "UNC_M_CLOCKTICKS",
+ "BriefDescription": "ECC Correctable Errors",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
- "Counter": "0,1,2,3",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
- "Counter": "0,1,2,3",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory page conflicts",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of page misses. This does not include explicit precharge commands sent with CAS commands in Auto-Precharge mode. This does not include PRE commands sent as a result of the page close counter expiration.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to read",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to write",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 4",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 5",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 6",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 7",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 0",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 4",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 5",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 6",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 7",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE MXB write buffer occupancy",
+ "EventCode": "0x91",
+ "EventName": "UNC_M_VMSE_MXB_WR_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in RMM",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.RMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "VMSE WR PUSH issued; VMSE write PUSH issued in WMM",
+ "EventCode": "0x90",
+ "EventName": "UNC_M_VMSE_WR_PUSH.WMM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "EventCode": "0xc1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
"UMask": "0x1",
"Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 0",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 1",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 2",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 4",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 5",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 6",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 7",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 0",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 1",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 2",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 4",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 5",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 6",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 7",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
index 635c09fda1d9..5df1ebfb89ea 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/uncore-power.json
@@ -1,273 +1,591 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
- "Counter": "0,1,2,3",
+ "BriefDescription": "pclk Cycles",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "BriefDescription": "Core 0 C State Transition Cycles",
+ "EventCode": "0x70",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "BriefDescription": "Core 10 C State Transition Cycles",
+ "EventCode": "0x7a",
+ "EventName": "UNC_P_CORE10_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "BriefDescription": "Core 11 C State Transition Cycles",
+ "EventCode": "0x7b",
+ "EventName": "UNC_P_CORE11_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "BriefDescription": "Core 12 C State Transition Cycles",
+ "EventCode": "0x7c",
+ "EventName": "UNC_P_CORE12_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "BriefDescription": "Core 13 C State Transition Cycles",
+ "EventCode": "0x7d",
+ "EventName": "UNC_P_CORE13_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "BriefDescription": "Core 14 C State Transition Cycles",
+ "EventCode": "0x7e",
+ "EventName": "UNC_P_CORE14_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "BriefDescription": "Core 1 C State Transition Cycles",
+ "EventCode": "0x71",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "BriefDescription": "Core 2 C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "BriefDescription": "Core 3 C State Transition Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "BriefDescription": "Core 4 C State Transition Cycles",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "BriefDescription": "Core 5 C State Transition Cycles",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
- "Counter": "0,1,2,3",
- "EventCode": "0xa",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "BriefDescription": "Core 6 C State Transition Cycles",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 7 C State Transition Cycles",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 8 C State Transition Cycles",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_CORE8_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 9 C State Transition Cycles",
+ "EventCode": "0x79",
+ "EventName": "UNC_P_CORE9_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 0",
+ "EventCode": "0x17",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 1",
+ "EventCode": "0x18",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 10",
+ "EventCode": "0x21",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE10",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 11",
+ "EventCode": "0x22",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE11",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 12",
+ "EventCode": "0x23",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE12",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 13",
+ "EventCode": "0x24",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE13",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 14",
+ "EventCode": "0x25",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE14",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 2",
+ "EventCode": "0x19",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 3",
+ "EventCode": "0x1a",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 4",
+ "EventCode": "0x1b",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 5",
+ "EventCode": "0x1c",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 6",
+ "EventCode": "0x1d",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 7",
+ "EventCode": "0x1e",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 8",
+ "EventCode": "0x1f",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE8",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Deep C State Rejection - Core 9",
+ "EventCode": "0x20",
+ "EventName": "UNC_P_DELAYED_C_STATE_ABORT_CORE9",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times that a deep C state was requested, but the delayed C state algorithm rejected the deep sleep state. In other words, a wake event occurred before the timer expired that causes a transition into the deeper C state.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 0 C State Demotions",
+ "EventCode": "0x1e",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 1 C State Demotions",
+ "EventCode": "0x1f",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 10 C State Demotions",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_DEMOTIONS_CORE10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 11 C State Demotions",
+ "EventCode": "0x43",
+ "EventName": "UNC_P_DEMOTIONS_CORE11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 12 C State Demotions",
+ "EventCode": "0x44",
+ "EventName": "UNC_P_DEMOTIONS_CORE12",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 13 C State Demotions",
+ "EventCode": "0x45",
+ "EventName": "UNC_P_DEMOTIONS_CORE13",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 14 C State Demotions",
+ "EventCode": "0x46",
+ "EventName": "UNC_P_DEMOTIONS_CORE14",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Core 2 C State Demotions",
+ "EventCode": "0x20",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 3 C State Demotions",
+ "EventCode": "0x21",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 4 C State Demotions",
+ "EventCode": "0x22",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 5 C State Demotions",
+ "EventCode": "0x23",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 6 C State Demotions",
+ "EventCode": "0x24",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 7 C State Demotions",
+ "EventCode": "0x25",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 8 C State Demotions",
+ "EventCode": "0x40",
+ "EventName": "UNC_P_DEMOTIONS_CORE8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core 9 C State Demotions",
+ "EventCode": "0x41",
+ "EventName": "UNC_P_DEMOTIONS_CORE9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xb",
+ "EventName": "UNC_P_FREQ_BAND0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xc",
+ "EventName": "UNC_P_FREQ_BAND1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xd",
+ "EventName": "UNC_P_FREQ_BAND2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xe",
+ "EventName": "UNC_P_FREQ_BAND3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Current Strongest Upper Limit Cycles",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when current is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0x7",
- "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_CURRENT_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_current_cycles %",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x61",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Perf P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x62",
+ "EventName": "UNC_P_FREQ_MIN_PERF_P_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower. Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down. This is largely to minimize increases in snoop and remote read latencies.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
- "Counter": "0,1,2,3",
"EventCode": "0x60",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
- "Filter": "filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2f",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
- "Filter": "filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "BriefDescription": "Package C State Exit Latency",
+ "EventCode": "0x26",
+ "EventName": "UNC_P_PKG_C_EXIT_LATENCY",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is transitioning from package C2 to C3.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
- "Filter": "filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "BriefDescription": "Package C State Exit Latency",
+ "EventCode": "0x26",
+ "EventName": "UNC_P_PKG_C_EXIT_LATENCY_SEL",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is transitioning from package C2 to C3.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
- "Filter": "filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C0_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is in C0",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "BriefDescription": "Package C State Residency - C2",
+ "EventCode": "0x2b",
+ "EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C2_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is in C2",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "BriefDescription": "Package C State Residency - C3",
+ "EventCode": "0x2c",
+ "EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C3_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is in C3",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2d",
+ "EventName": "UNC_P_PKG_C_STATE_RESIDENCY_C6_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the package is in C6",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0xa",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x63",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles Changing Voltage",
+ "EventCode": "0x3",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_CHANGE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition. This event is calculated by or'ing together the increasing and decreasing events.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles Decreasing Voltage",
+ "EventCode": "0x2",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_DECREASE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is decreasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles Increasing Voltage",
+ "EventCode": "0x1",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_INCREASE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is increasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
index 4645e9d3f460..410763dd4394 100644
--- a/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/ivytown/virtual-memory.json
@@ -1,198 +1,158 @@
[
{
- "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_LOAD_MISSES.DEMAND_LD_WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "DTLB_LOAD_MISSES.DEMAND_LD_WALK_COMPLETED",
+ "EventName": "DTLB_LOAD_MISSES.DEMAND_LD_WALK_DURATION",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x84"
+ },
+ {
+ "BriefDescription": "Page walk for a large page completed for Demand load.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.LARGE_PAGE_WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes an page walk of any page size.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Misses in all TLB levels that cause a page walk of any page size from demand loads.",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
+ "EventCode": "0x5F",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts load operations that missed 1st level DTLB but hit the 2nd level.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Demand load Miss in all translation lookaside buffer (TLB) levels causes a page walk that completes of any page size.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "DTLB_LOAD_MISSES.DEMAND_LD_WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Misses in all TLB levels that caused page walk completed of any size by demand loads.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82"
},
{
- "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
+ "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "Cycle PMH is busy with a walk due to demand loads.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Demand load cycles page miss handler (PMH) is busy with this walk.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "DTLB_LOAD_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walk for a large page completed for Demand load.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Miss in all TLB levels causes a page walk of any page size (4K/2M/4M/1G).",
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Miss in all TLB levels causes a page walk that completes of any page size (4K/2M/4M/1G).",
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Miss in all TLB levels causes a page walk that completes of any page size (4K/2M/4M/1G).",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Cycles PMH is busy with this walk.",
+ "BriefDescription": "Cycles when PMH is busy with page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
+ "PublicDescription": "Cycles PMH is busy with this walk.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts load operations that missed 1st level DTLB but hit the 2nd level.",
- "EventCode": "0x5F",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Misses in all ITLB levels that cause page walks.",
+ "BriefDescription": "Completed page walks in ITLB due to STLB load misses for large pages",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "ITLB_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "PublicDescription": "Completed page walks in ITLB due to STLB load misses for large pages.",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Misses in all ITLB levels that cause page walks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycle PMH is busy with a walk.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of cache load STLB hits. No page walk.",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Number of cache load STLB hits. No page walk.",
"SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Completed page walks in ITLB due to STLB load misses for large pages.",
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "ITLB_MISSES.LARGE_PAGE_WALK_COMPLETED",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Misses in all ITLB levels that cause completed page walks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Completed page walks in ITLB due to STLB load misses for large pages",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of ITLB flushes, includes 4k/2M/4M pages.",
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when PMH is busy with page walks",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "Cycle PMH is busy with a walk.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Count number of STLB flush attempts.",
+ "BriefDescription": "STLB flush attempts",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Count number of STLB flush attempts.",
"SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/cache.json b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
index 52dc6ef40e63..b9769d39940c 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/cache.json
@@ -1,1290 +1,977 @@
[
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This event counts the number of load uops retired",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "BriefDescription": "Allocated L1D data cache lines in M state.",
+ "EventCode": "0x51",
+ "EventName": "L1D.ALLOCATED_IN_M",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of store uops retired.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
+ "EventCode": "0x51",
+ "EventName": "L1D.ALL_M_REPLACEMENT",
"SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "EventCode": "0x51",
+ "EventName": "L1D.EVICTION",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "L1D data line replacements.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "CounterMask": "1",
+ "EventCode": "0xBF",
+ "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x5"
},
{
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xD3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
- "SampleAfterValue": "100007",
- "BriefDescription": "Data from remote DRAM either Snoop not needed or Snoop Miss (RspI)",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "L1D miss outstanding duration in cycles.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D.ALLOCATED_IN_M",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Allocated L1D data cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L1D.EVICTION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf"
},
{
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L1D.ALL_M_REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss oustandings duration in cycles.",
- "CounterHTOff": "2"
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "2"
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_S",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1D is locked.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache lines filling L2.",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache lines in E state filling L2.",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache lines in I state filling L2.",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache lines in S state filling L2.",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "BriefDescription": "Dirty L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "BriefDescription": "Dirty L2 cache lines filling the L2.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DIRTY_ALL",
"SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa"
},
{
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_CLEAN",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_DIRTY",
"SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 code requests.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30"
},
{
+ "BriefDescription": "Demand Data Read requests.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Requests from L2 hardware prefetchers.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.ALL_PF",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc0"
},
{
+ "BriefDescription": "RFO requests to L2 cache.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "EventName": "L2_RQSTS.ALL_RFO",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "L2 cache misses when fetching instructions.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_RQSTS.CODE_RD_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Demand Data Read requests that hit L2 cache.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_RQSTS.PF_HIT",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_RQSTS.PF_MISS",
+ "EventName": "L2_RQSTS.PF_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that miss cache lines.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "RFOs that access cache lines in any state.",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
"EventName": "L2_STORE_LOCK_RQSTS.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that access cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_L1D_WB_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_L1D_WB_RQSTS.HIT_S",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_L1D_WB_RQSTS.HIT_E",
+ "BriefDescription": "RFOs that hit cache lines in E state.",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_L1D_WB_RQSTS.HIT_M",
+ "BriefDescription": "RFOs that hit cache lines in M state.",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_L1D_WB_RQSTS.ALL",
+ "BriefDescription": "RFOs that miss cache lines.",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "EventName": "L2_TRANS.ALL_PF",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Transactions accessing L2 pipe.",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_TRANS.RFO",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "L2 cache accesses when fetching instructions.",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L2_TRANS.CODE_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache accesses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Demand Data Read requests that access L2 cache.",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_TRANS.ALL_PF",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1D writebacks that access L2 cache.",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_TRANS.L1D_WB",
"SampleAfterValue": "200003",
- "BriefDescription": "L1D writebacks that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "L2 fill requests that access L2 cache.",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_TRANS.L2_FILL",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 fill requests that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "L2 writebacks that access L2 cache.",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "L2_TRANS.L2_WB",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "RFO requests that access L2 cache.",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_TRANS.ALL_REQUESTS",
+ "EventName": "L2_TRANS.RFO",
"SampleAfterValue": "200003",
- "BriefDescription": "Transactions accessing L2 pipe.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_IN.I",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in I state filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when L1D is locked.",
+ "EventCode": "0x63",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_IN.S",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in S state filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_IN.E",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in E state filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "L2_LINES_IN.ALL",
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required.",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "BriefDescription": "Data from local DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.LOCAL_DRAM",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Data from remote DRAM either Snoop not needed or Snoop Miss (RspI)",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_UOPS_LLC_MISS_RETIRED.REMOTE_DRAM",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready.",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by demand.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources.",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by demand.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.PF_CLEAN",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required.",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Miss in last-level (L3) cache. Excludes Unknown data-source.",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.LLC_MISS",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "All retired load uops.",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load uops retired",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "All retired store uops.",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of store uops retired.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Retired load uops with locked access.",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load uops that split across a cacheline boundary.",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "BriefDescription": "Retired store uops that split across a cacheline boundary.",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K).",
"SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x42"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "L2_LINES_OUT.DIRTY_ALL",
+ "BriefDescription": "Retired load uops that miss the STLB.",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines filling the L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x11"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
+ "BriefDescription": "Retired store uops that miss the STLB.",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x12"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "BriefDescription": "Demand and prefetch data reads.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
+ "BriefDescription": "Cacheable and non-cacheable code read requests.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "Split locks in SQ.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests sent to uncore.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from L2 hardware prefetchers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xBF",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
"EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
"EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
+ "UMask": "0x1"
},
{
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts all demand & prefetch data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x000105B3",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch data reads that hit the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
"BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoop returned a clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x000107F7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all writebacks from the core to the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoop returned a clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all writebacks from the core to the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x803c8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x23ffc08000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoop returned a clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand rfo's",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803c8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23ffc08000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts non-temporal stores",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand rfo's",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x000105B3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts non-temporal stores",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch prefetch RFOs",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x000107F7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "BriefDescription": "Split locks in SQ.",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo references (demand & prefetch)",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
index 982eda48785e..79e8f403c426 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/floating-point.json
@@ -1,138 +1,108 @@
[
{
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OTHER_ASSISTS.AVX_STORE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "BriefDescription": "Cycles with any input/output SSE or FP assist.",
+ "CounterMask": "1",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.ANY",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1e"
},
{
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "BriefDescription": "Number of SIMD FP assists due to input values.",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of SIMD FP assists due to Output values.",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ASSIST.X87_OUTPUT",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of X87 assists due to input value.",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "FP_ASSIST.X87_INPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of X87 assists due to output value.",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FP_COMP_OPS_EXE.X87",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULs and IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
+ "EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.AVX_STORE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x11",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "SIMD_FP_256.PACKED_SINGLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.AVX_TO_SSE",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.SSE_TO_AVX",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
"EventCode": "0x11",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "SIMD_FP_256.PACKED_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x1e",
- "EventName": "FP_ASSIST.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
+ "EventCode": "0x11",
+ "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
index 1b7b1dd36c68..754ee2749485 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/frontend.json
@@ -1,305 +1,250 @@
[
{
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
},
{
- "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "IDQ.EMPTY",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.ALL_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
+ "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.OTHER_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x18"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "BriefDescription": "Cycles MITE is delivering 4 Uops.",
"CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x24"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "BriefDescription": "Cycles MITE is delivering any Uop.",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x24"
},
{
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xAC",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB_FILL.OTHER_CANCEL",
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.EMPTY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xAC",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_ALL_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3c"
},
{
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "EventName": "IDQ.MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more information.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_DSB_OCCUR",
+ "EventName": "IDQ.MS_DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_OCCUR",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "EventName": "IDQ.MS_SWITCHES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "EventName": "IDQ.MS_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xAC",
- "Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "DSB_FILL.ALL_CANCEL",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
+ "CounterMask": "4",
"EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
- "EventName": "IDQ.MITE_ALL_UOPS",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
index dbb33e00b72a..e8f4e5c01c9f 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
@@ -1,232 +1,511 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(7 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * min(CPU_CLK_UNHALTED.THREAD, IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE) / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE) / UOPS_DISPATCHED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) / UOPS_DISPATCHED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
},
{
- "BriefDescription": "Total number of retired Instructions",
- "MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_mem_bandwidth"
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
- "MetricName": "FLOPc"
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_lcp"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182\\,thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
- "BriefDescription": "Socket actual clocks when any core is active on that socket",
- "MetricExpr": "cbox_0@event\\=0x0@",
- "MetricGroup": "",
- "MetricName": "Socket_CLKS"
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "1e9 * (UNC_C_TOR_OCCUPANCY.MISS_OPCODE@filter_opc\\=0x182@ / UNC_C_TOR_INSERTS.MISS_OPCODE@filter_opc\\=0x182@) / (tma_info_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cbox_0@event\\=0x0@",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "MetricName": "tma_info_turbo_utilization"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_RETIRED.LLC_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_dram_bw_use",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: ",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - (cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - (cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING)) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS * FP_COMP_OPS_EXE.X87 / UOPS_DISPATCHED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/memory.json b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
index 27e636428f4f..a71e630fd030 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/memory.json
@@ -1,422 +1,319 @@
[
{
- "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 128.",
"EventCode": "0xCD",
- "MSRValue": "0x4",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 4 .",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
+ "MSRValue": "0x80",
"PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x8",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "50021",
- "BriefDescription": "Loads with latency value being above 8.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 16.",
"EventCode": "0xCD",
- "MSRValue": "0x10",
- "Counter": "3",
- "UMask": "0x1",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
"SampleAfterValue": "20011",
- "BriefDescription": "Loads with latency value being above 16.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads with latency value being above 256.",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
"PEBS": "2",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Loads with latency value being above 32.",
"EventCode": "0xCD",
- "MSRValue": "0x20",
- "Counter": "3",
- "UMask": "0x1",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
"SampleAfterValue": "100007",
- "BriefDescription": "Loads with latency value being above 32.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 4 .",
"EventCode": "0xCD",
- "MSRValue": "0x40",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Loads with latency value being above 64.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 512.",
"EventCode": "0xCD",
- "MSRValue": "0x80",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Loads with latency value being above 128.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 64.",
"EventCode": "0xCD",
- "MSRValue": "0x100",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Loads with latency value being above 256.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 8.",
"EventCode": "0xCD",
- "MSRValue": "0x200",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Loads with latency value being above 512.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
"EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "PEBS": "2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
"EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MISALIGN_MEM_REF.LOADS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
"EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "MISALIGN_MEM_REF.STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "This event counts all LLC misses for all demand and L2 prefetches. LLC prefetches are excluded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFFC20077",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all local dram accesses for all demand and L2 prefetches. LLC prefetches are excluded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400077",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "This event counts all remote cache-to-cache transfers (includes HITM and HIT-Forward) for all demand and L2 prefetches. LLC prefetches are excluded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_DRAM",
+ "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.REMOTE_HITM_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x187FC20077",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f820004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc00004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67fc00001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67f800004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc00004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that miss the LLC and the data forwarded from remote cache",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f820004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote & local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67fc00001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f820001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc00001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67f800001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67fc00010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc00001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data forwarded from remote cache",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f820001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from remote & local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote & local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x67f800010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67fc00010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x87f820010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from local dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x107fc00010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x600400010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from remote dram",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x67f800010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC the data is found in M state in remote cache and forwarded from there",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3fffc20080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x107fc00010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data forwarded from remote cache",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x600400077",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.REMOTE_HIT_FORWARD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x87f820010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all local dram accesses for all demand and L2 prefetches. LLC prefetches are excluded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss in the LLC",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFFC20077",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20200",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts all LLC misses for all demand and L2 prefetches. LLC prefetches are excluded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x187FC20077",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DEMAND_MLC_PREF_READS.LLC_MISS.REMOTE_HITM_HIT_FORWARD",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3fffc20080",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts all remote cache-to-cache transfers (includes HITM and HIT-Forward) for all demand and L2 prefetches. LLC prefetches are excluded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/other.json b/tools/perf/pmu-events/arch/x86/jaketown/other.json
index 64b195b82c50..9f96121baef8 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/other.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/other.json
@@ -1,58 +1,46 @@
[
{
- "EventCode": "0x17",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Valid instructions written to IQ per cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPL_CYCLES.RING0",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "CounterMask": "1",
"EdgeDetect": "1",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0_TRANS",
"SampleAfterValue": "100007",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CPL_CYCLES.RING123",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
"EventCode": "0x4E",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "HW_PRE_REQ.DL1_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Valid instructions written to IQ per cycle.",
+ "EventCode": "0x17",
+ "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
index 783a5b4a67b1..d0edfdec9f01 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/pipeline.json
@@ -1,1216 +1,955 @@
[
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
- "Counter": "Fixed counter 1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 1"
+ "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+ "EventCode": "0xB6",
+ "EventName": "AGU_BYPASS_CANCEL.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "BriefDescription": "Divide operations executed.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.FPU_DIV",
+ "PublicDescription": "This event counts the number of the divide operations executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "Fixed counter 3",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "BriefDescription": "Cycles when divider is busy executing divide operations.",
+ "EventCode": "0x14",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Speculative and retired branches.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "Not taken macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc2"
},
{
+ "BriefDescription": "Speculative and retired direct near calls.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
+ "BriefDescription": "Speculative and retired indirect return branches.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc8"
},
{
+ "BriefDescription": "Not taken macro-conditional branches.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Taken speculative and retired direct near calls.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x90"
},
{
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc8",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x84"
},
{
+ "BriefDescription": "Taken speculative and retired indirect calls.",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa0"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
"SampleAfterValue": "200003",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x88"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
},
{
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Speculative and retired mispredicted direct near calls.",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd0"
},
{
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
+ "BriefDescription": "Speculative mispredicted indirect branches",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
"SampleAfterValue": "200003",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
"SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x90"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0x84"
},
{
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
},
{
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ILD_STALL.IQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Stall cycles because IQ is full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "UMask": "0x88"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Multiply packed/scalar single precision uops allocated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted not taken branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RESOURCE_STALLS.LB",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted taken branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RESOURCE_STALLS.RS",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RESOURCE_STALLS.ROB",
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5B",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "RESOURCE_STALLS2.BOB_FULL",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Thread cycles when thread is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003"
},
{
- "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of micro-ops retired.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.ALL",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x6"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect mispredicted near call instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted not taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "2",
- "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Stall cycles because IQ is full.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired instructions experiencing ITLB misses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.FPU_DIV_ACTIVE",
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divider is busy executing divide operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of the divide operations executed.",
- "EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "ARITH.FPU_DIV",
- "SampleAfterValue": "100003",
- "BriefDescription": "Divide operations executed.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED.THREAD",
+ "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched per thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED.CORE",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched from any thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "2",
- "CounterHTOff": "2"
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "6",
- "CounterHTOff": "2"
+ "BriefDescription": "False dependencies in MOB due to partial compare.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
"EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
"SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
"EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOAD_HIT_PRE.HW_PF",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
"SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel? 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "AGU_BYPASS_CANCEL.COUNT",
+ "BriefDescription": "Retired instructions experiencing ITLB misses.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
"SampleAfterValue": "100003",
- "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
+ "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
+ "CounterMask": "1",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+ "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "BriefDescription": "Multiply packed/scalar single precision uops allocated.",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
+ "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel? 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "BriefDescription": "Resource-related stall cycles.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.LB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.LB_SB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.MEM_RS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.OOO_RSRC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf0"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
- "TakenAlone": "1",
- "CounterHTOff": "1"
+ "UMask": "0x4"
},
{
- "EventCode": "0x5B",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls2 control structures full for physical registers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Cycles with either free list is empty.",
"EventCode": "0x5B",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
"EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with either free list is empty.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "RESOURCE_STALLS.MEM_RS",
+ "BriefDescription": "Resource stalls2 control structures full for physical registers.",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "RESOURCE_STALLS.OOO_RSRC",
+ "BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.BOB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Resource stalls out of order resources full.",
"EventCode": "0x5B",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
"EventName": "RESOURCE_STALLS2.OOO_RSRC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls out of order resources full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "RESOURCE_STALLS.LB_SB",
+ "BriefDescription": "Count cases of saving new LBR.",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel? 64 and IA-32 Architectures Optimization Reference Manual.",
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+ "BriefDescription": "Uops dispatched from any thread.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_DISPATCHED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EdgeDetect": "1",
- "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "BriefDescription": "Uops dispatched per thread.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_DISPATCHED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xc3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xc"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
"AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "UMask": "0x30"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
"EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Actually retired uops.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of micro-ops retired.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
index 3fa61d962607..63395e7ee0ce 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-cache.json
@@ -1,210 +1,1772 @@
[
{
- "BriefDescription": "Uncore cache clock ticks",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Uncore Clocks",
"EventName": "UNC_C_CLOCKTICKS",
"PerPkg": "1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "All LLC Misses (code+ data rd + data wr - including demand and prefetch)",
- "Counter": "0,1",
+ "BriefDescription": "Counter 0 Occupancy",
+ "EventCode": "0x1f",
+ "EventName": "UNC_C_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x21",
+ "EventName": "UNC_C_ISMQ_DRD_MISS_OCC",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x3",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; RTID",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.NID",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; External Snoop Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x9",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cache Lookups; Write Requests",
"EventCode": "0x34",
- "EventName": "UNC_C_LLC_LOOKUP.ANY",
- "Filter": "filter_state=0x1",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set filter mask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CBoGlCtrl[22:18] bits correspond to [FMESI] state.",
+ "UMask": "0x5",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.MISS",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x11",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "M line evictions from LLC (writebacks to memory)",
- "Counter": "0,1",
+ "BriefDescription": "Lines Victimized; Lines in M state",
"EventCode": "0x37",
"EventName": "UNC_C_LLC_VICTIMS.M_STATE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses - demand and prefetch data reads - excludes LLC prefetches. Derived from unc_c_tor_inserts.miss_opcode.demand",
- "Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.DATA_READ",
- "Filter": "filter_opc=0x182",
+ "BriefDescription": "Lines Victimized; Victimized Lines that Match NID",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.NID",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; RFO HitS",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.STARTED",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_C_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Even",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Down and Odd",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Even",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AD Ring In Use; Up and Odd",
+ "EventCode": "0x1b",
+ "EventName": "UNC_C_RING_AD_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Even",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Down and Odd",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Even",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "AK Ring In Use; Up and Odd",
+ "EventCode": "0x1c",
+ "EventName": "UNC_C_RING_AK_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Even",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Down and Odd",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.DOWN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Even",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "BL Ring in Use; Up and Odd",
+ "EventCode": "0x1d",
+ "EventName": "UNC_C_RING_BL_USED.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the 'UP' direction is on the clockwise ring and 'DN' is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Acknowledgements to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Data Responses to core",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Number of LLC responses that bounced on the Ring.; Snoops of processor's cache.",
+ "EventCode": "0x5",
+ "EventName": "UNC_C_RING_BOUNCES.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses - Uncacheable reads. Derived from unc_c_tor_inserts.miss_opcode.uncacheable",
- "Counter": "0,1",
+ "BriefDescription": "BL Ring in Use; Any",
+ "EventCode": "0x1e",
+ "EventName": "UNC_C_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring in JKT. Therefore, if one wants to monitor the 'Even' ring, they should select both UP_EVEN and DN_EVEN. To monitor the 'Odd' ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0xf",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AD_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.AK_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.BL_CORE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x6",
+ "EventName": "UNC_C_RING_SINK_STARVED.IV_CORE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x7",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IRQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; IPQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Arbiter Blocking Cycles; ISMQ_BID",
+ "EventCode": "0x12",
+ "EventName": "UNC_C_RxR_EXT_STARVED.ISMQ_BIDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in external starvation. This occurs when one of the ingress queues is being starved by the other queues.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; VFIFO",
+ "EventCode": "0x13",
+ "EventName": "UNC_C_RxR_INSERTS.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IPQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; IRQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Internal Starvation Cycles; ISMQ",
+ "EventCode": "0x14",
+ "EventName": "UNC_C_RxR_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles in internal starvation. This occurs when one (or more) of the entries in the ingress queue are being starved out by other entries in that queue.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Address Conflict",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; Any Reject",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No Egress Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Probe Queue Retries; No QPI Credits",
+ "EventCode": "0x31",
+ "EventName": "UNC_C_RxR_IPQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a snoop (probe) request had to retry. Filters exist to cover some of the common cases retries.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Address Conflict",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ADDR_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; Any Reject",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No Egress Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.FULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No QPI Credits",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects; No RTIDs",
+ "EventCode": "0x32",
+ "EventName": "UNC_C_RxR_IRQ_RETRY.RTID",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Any Reject",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No Egress Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No IIO Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.IIO_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No QPI Credits",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.QPI_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; No RTIDs",
+ "EventCode": "0x33",
+ "EventName": "UNC_C_RxR_ISMQ_RETRY.RTID",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.IRQ_REJECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy; VFIFO",
+ "EventCode": "0x11",
+ "EventName": "UNC_C_RxR_OCCUPANCY.VFIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Evictions",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "filter_opc=0x187",
+ "EventName": "UNC_C_TOR_INSERTS.EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x4",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe allocating writes that miss LLC - DDIO misses. Derived from unc_c_tor_inserts.miss_opcode.ddio_miss",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Miss All",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x3",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0xa",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "LLC misses for ItoM writes (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.miss_opcode.itom_write",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Miss Opcode Match",
"EventCode": "0x35",
- "EventName": "LLC_MISSES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_c_tor_inserts.opcode.streaming_full",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID Matched",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "filter_opc=0x18c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x48",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_c_tor_inserts.opcode.streaming_partial",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID Matched Evictions",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "filter_opc=0x18d",
+ "EventName": "UNC_C_TOR_INSERTS.NID_EVICTION",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x44",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Partial PCIe reads. Derived from unc_c_tor_inserts.opcode.pcie_partial",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID Matched Miss All",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_PARTIAL_READ",
- "Filter": "filter_opc=0x195",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe allocating writes that hit in LLC (DDIO hits). Derived from unc_c_tor_inserts.opcode.ddio_hit",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched Miss",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_WRITE",
- "Filter": "filter_opc=0x19c",
+ "EventName": "UNC_C_TOR_INSERTS.NID_MISS_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x43",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe read current. Derived from unc_c_tor_inserts.opcode.pcie_read_current",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID and Opcode Matched",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_READ",
- "Filter": "filter_opc=0x19e",
+ "EventName": "UNC_C_TOR_INSERTS.NID_OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x41",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "ItoM write hits (as part of fast string memcpy stores). Derived from unc_c_tor_inserts.opcode.itom_write_hit",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; NID Matched Writebacks",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.ITOM_WRITE",
- "Filter": "filter_opc=0x1c8",
+ "EventName": "UNC_C_TOR_INSERTS.NID_WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x50",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe non-snoop reads. Derived from unc_c_tor_inserts.opcode.pcie_read",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Opcode Match",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_READ",
- "Filter": "filter_opc=0x1e4",
+ "EventName": "UNC_C_TOR_INSERTS.OPCODE",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
"UMask": "0x1",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe non-snoop writes (partial). Derived from unc_c_tor_inserts.opcode.pcie_partial_write",
- "Counter": "0,1",
+ "BriefDescription": "TOR Inserts; Writebacks",
"EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_PARTIAL_WRITE",
- "Filter": "filter_opc=0x1e5",
+ "EventName": "UNC_C_TOR_INSERTS.WB",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182).",
+ "UMask": "0x10",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "PCIe non-snoop writes (full line). Derived from unc_c_tor_inserts.opcode.pcie_full_write",
- "Counter": "0,1",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.PCIE_NS_WRITE",
- "Filter": "filter_opc=0x1e6",
+ "BriefDescription": "TOR Occupancy; Any",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.ALL",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x1",
- "Unit": "CBO"
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x4",
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Occupancy counter for all LLC misses; we divide this by UNC_C_CLOCKTICKS to get average Q depth",
+ "BriefDescription": "TOR Occupancy; Miss All",
"EventCode": "0x36",
"EventName": "UNC_C_TOR_OCCUPANCY.MISS_ALL",
- "Filter": "filter_opc=0x182",
- "MetricExpr": "(UNC_C_TOR_OCCUPANCY.MISS_ALL / UNC_C_CLOCKTICKS) * 100.",
- "MetricName": "tor_occupancy.miss_all %",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
"UMask": "0xa",
- "Unit": "CBO"
+ "Unit": "CBOX"
},
{
- "BriefDescription": "Occupancy counter for LLC data reads (demand and L2 prefetch). Derived from unc_c_tor_occupancy.miss_opcode.llc_data_read",
+ "BriefDescription": "TOR Occupancy; Miss Opcode Match",
"EventCode": "0x36",
- "EventName": "UNC_C_TOR_OCCUPANCY.LLC_DATA_READ",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS_OPCODE",
"PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
"UMask": "0x3",
- "Unit": "CBO"
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_EVICTION",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x4a",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_MISS_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x43",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; NID and Opcode Matched",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.NID_OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Opcode Match",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.OPCODE",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select 'MISS_OPC_MATCH' and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182)",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "EventCode": "0x4",
+ "EventName": "UNC_C_TxR_ADS_USED",
+ "PerPkg": "1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x1",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AD - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AD_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x10",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; AK - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.AK_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x20",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Cacheno",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; BL - Corebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.BL_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x40",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Egress Allocations; IV - Cachebo",
+ "EventCode": "0x2",
+ "EventName": "UNC_C_TxR_INSERTS.IV_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Cbo Egress. The Egress is used to queue up requests destined for the ring.",
+ "UMask": "0x8",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto AK Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "Injection Starvation; Onto BL Ring",
+ "EventCode": "0x3",
+ "EventName": "UNC_C_TxR_STARVED.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the Egress cannot send a transaction onto the ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "HA to iMC Bypass; Not Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.",
+ "UMask": "0x2",
+ "Unit": "HA"
},
{
- "BriefDescription": "read requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "HA to iMC Bypass; Taken",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_BYPASS_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the HA was able to bypass was attempted. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filted by when the bypass was taken and when it was not.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "uclks",
+ "EventName": "UNC_H_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the HA. This will be slightly different than the count in the Ubox because of enable/freeze delays. The HA is on the other side of the die from the fixed Ubox uclk counter, so the drift could be somewhat larger than in units that are closer like the QPI Agent.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; Conflict Detected",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Conflict Checks; No Conflict",
+ "EventCode": "0xb",
+ "EventName": "UNC_H_CONFLICT_CYCLES.NO_CONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Direct2Core Messages Sent",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_DIRECT2CORE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Direct2Core messages sent",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2Core was Disabled",
+ "EventCode": "0x12",
+ "EventName": "UNC_H_DIRECT2CORE_CYCLES_DISABLED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles in which Direct2Core was disabled",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Number of Reads that had Direct2Core Overridden",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Reads where Direct2Core overridden",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Not Needed",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Lookups; Snoop Needed",
+ "EventCode": "0xc",
+ "EventName": "UNC_H_DIRECTORY_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that looked up the directory. Can be filtered by requests that had to snoop and those that did not have to.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Any Directory Update",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Clear",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.CLEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Directory Updates; Directory Set",
+ "EventCode": "0xd",
+ "EventName": "UNC_H_DIRECTORY_UPDATE.SET",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of directory updates that were required. These result in writes to the memory controller. This can be filtered by directory sets and directory clears.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; AD to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.AD_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 0",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Cycles without QPI Ingress Credits; BL to QPI Link 1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_IGR_NO_CREDIT_CYCLES.BL_QPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the HA does not have credits to send messages to the QPI Agent. This can be filtered by the different credit pools and the different links.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Retry Events",
+ "EventCode": "0x1e",
+ "EventName": "UNC_H_IMC_RETRY",
+ "PerPkg": "1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; All Writes",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0xf",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Full Line",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; Partial Non-ISOCH",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA to iMC Full Line Writes Issued; ISOCH Partial",
+ "EventCode": "0x1a",
+ "EventName": "UNC_H_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of full line writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Reads",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.READS",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
"UMask": "0x3",
"Unit": "HA"
},
{
- "BriefDescription": "write requests to home agent",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read and Write Requests; Writes",
"EventCode": "0x1",
"EventName": "UNC_H_REQUESTS.WRITES",
"PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
"UMask": "0xc",
"Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x3e",
+ "EventName": "UNC_H_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x3f",
+ "EventName": "UNC_H_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x40",
+ "EventName": "UNC_H_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x15",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "iMC RPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x16",
+ "EventName": "UNC_H_RPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting reads from the HA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's RPQ (read pending queue). This queue is broken into regular credits/buffers that are used by general reads, and 'special' requests such as ISOCH reads. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 0",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 1",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 2",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 3",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 4",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x10",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 5",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x20",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 6",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x40",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 0; TAD Region 7",
+ "EventCode": "0x1b",
+ "EventName": "UNC_H_TAD_REQUESTS_G0.REGION7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 0 to 7. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x80",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 10",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION10",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 11",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION11",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 8",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION8",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA Requests to a TAD Region - Group 1; TAD Region 9",
+ "EventCode": "0x1c",
+ "EventName": "UNC_H_TAD_REQUESTS_G1.REGION9",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HA requests to a given TAD region. There are up to 11 TAD (target address decode) regions in each home agent. All requests destined for the memory controller must first be decoded to determine which TAD region they are in. This event is filtered based on the TAD region ID, and covers regions 8 to 10. This event is useful for understanding how applications are using the memory that is spread across the different memory regions. It is particularly useful for 'Monroe' systems that use the TAD to enable individual channels to enter self-refresh to save power.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Tracker Allocations; All Requests",
+ "EventCode": "0x6",
+ "EventName": "UNC_H_TRACKER_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the local HA tracker pool. This can be used in conjunction with the occupancy accumulation event in order to calculate average latency. One cannot filter between reads and writes. HA trackers are allocated as soon as a request enters the HA and is released after the snoop response and data return (or post in the case of a write) and the response is returned on the ring.",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Non-data Responses",
+ "EventCode": "0xf",
+ "EventName": "UNC_H_TxR_AD.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions; Snoops",
+ "EventCode": "0xf",
+ "EventName": "UNC_H_TxR_AD.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound transactions on the AD ring. This can be filtered by the NDR and SNP message classes. See the filter descriptions for more details.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; All",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Full; Scheduler 1",
+ "EventCode": "0x2a",
+ "EventName": "UNC_H_TxR_AD_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; All",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 0",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Not Empty; Scheduler 1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_TxR_AD_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; All",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 0",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Allocations; Scheduler 1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_TxR_AD_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; All",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 0",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AD Egress Occupancy; Scheduler 1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_TxR_AD_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; All",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Full; Scheduler 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_TxR_AK_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; All",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 0",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Not Empty; Scheduler 1",
+ "EventCode": "0x31",
+ "EventName": "UNC_H_TxR_AK_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; All",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 0",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations; Scheduler 1",
+ "EventCode": "0x2f",
+ "EventName": "UNC_H_TxR_AK_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound NDR Ring Transactions",
+ "EventCode": "0xe",
+ "EventName": "UNC_H_TxR_AK_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of outbound NDR transactions sent on the AK ring. NDR stands for 'non-data response' and is generally used for completions that do not include data. AK NDR is used for messages to the local socket.",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; All",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 0",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "AK Egress Occupancy; Scheduler 1",
+ "EventCode": "0x30",
+ "EventName": "UNC_H_TxR_AK_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_CORE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "EventCode": "0x10",
+ "EventName": "UNC_H_TxR_BL.DRS_QPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS messages sent out on the BL ring. This can be filtered by the destination.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; All",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 0",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Full; Scheduler 1",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TxR_BL_CYCLES_FULL.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; All",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 0",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Not Empty; Scheduler 1",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TxR_BL_CYCLES_NE.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; All",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 0",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Allocations; Scheduler 1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_TxR_BL_INSERTS.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; All",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 0",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "BL Egress Occupancy; Scheduler 1",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_TxR_BL_OCCUPANCY.SCHED1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 0",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 2",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Regular; Channel 3",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_REG_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'regular' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the regular credits Common high banwidth workloads should be able to make use of all of the regular buffers, but it will be difficult (and uncommon) to make use of both the regular and special buffers at the same time. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 0",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x1",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x2",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 2",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x4",
+ "Unit": "HA"
+ },
+ {
+ "BriefDescription": "HA iMC CHN0 WPQ Credits Empty - Special; Channel 3",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_WPQ_CYCLES_NO_SPEC_CREDITS.CHN3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are no 'special' credits available for posting writes from the HA into the iMC. In order to send writes into the memory controller, the HA must first acquire a credit for the iMC's WPQ (write pending queue). This queue is broken into regular credits/buffers that are used by general writes, and 'special' requests such as ISOCH writes. This count only tracks the 'special' credits. This statistic is generally not interesting for general IA workloads, but may be of interest for understanding the characteristics of systems using ISOCH. One can filter based on the memory controller channel. One or more channels can be tracked at a given time.",
+ "UMask": "0x8",
+ "Unit": "HA"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
index 1b53c0e609e3..874f15ea8228 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-interconnect.json
@@ -1,48 +1,1787 @@
[
{
- "BriefDescription": "QPI clock ticks. Used to get percentages of QPI cycles events",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Merges",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.MERGE_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Address Match (Conflict) Count; Conflict Stalls",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_ADDRESS_MATCH.STALL_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when an inbound write (from a device to memory or another device) had an address match with another request in the write cache.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Any Source",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ack Pending Occupancy; Select Source",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_CACHE_ACK_PENDING_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes that have acquired ownership but have not yet returned their data to the uncore. These writes are generally queued up in the switch trying to get to the head of their queues so that they can post their data. The queue occuapancy increments when the ACK is received, and decrements when either the data is returned OR a tickle is received and ownership is released. Note that a single tickle can result in multiple decrements.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Any Source",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Ownership Occupancy; Select Source",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_CACHE_OWN_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore trying to acquire ownership in each cycle. This can be used with the write transaction count to calculate the average write latency in the uncore. The occupancy increments when a write request is issued, and decrements when the data is returned.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Any Source",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Read Occupancy; Select Source",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_CACHE_READ_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads that are outstanding in the uncore in each cycle. This can be used with the read transaction count to calculate the average read latency in the uncore. The occupancy increments when a read request is issued, and decrements when the data is returned.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Select Source",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Any Source",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outstanding Write Occupancy; Select Source",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_CACHE_WRITE_OCCUPANCY.SOURCE",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of writes (and write prefetches) that are outstanding in the uncore in each cycle. This can be used with the transaction count event to calculate the average latency in the uncore. The occupancy increments when the ownership fetch/prefetch is issued, and decrements the data is returned to the uncore.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clocks in the IRP",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of clocks in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0xB",
+ "EventName": "UNC_I_RxR_AK_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the AK Ingress is full. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Ingress Occupancy",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_RxR_AK_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the AK Ingress. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0xC",
+ "EventName": "UNC_I_RxR_AK_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the AK Ingress in each cycles. This queue is where the IRP receives responses from R2PCIe (the ring).",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x4",
+ "EventName": "UNC_I_RxR_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - DRS",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_RxR_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x7",
+ "EventName": "UNC_I_RxR_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x5",
+ "EventName": "UNC_I_RxR_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCB",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_RxR_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x8",
+ "EventName": "UNC_I_RxR_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x6",
+ "EventName": "UNC_I_RxR_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the BL Ingress is full. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL Ingress Occupancy - NCS",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_RxR_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the BL Ingress. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "EventCode": "0x9",
+ "EventName": "UNC_I_RxR_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of the BL Ingress in each cycles. This queue is where the IRP receives data from R2PCIe (the ring). It is used for data returns from read requets as well as outbound MMIO writes.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Ownership Lost",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.LOST_OWNERSHIP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Tickle Count; Data Returned",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_TICKLES.TOP_OF_QUEUE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of tickles that are received. This is for both explicit (from Cbo) and implicit (internal conflict) tickles.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.PD_PREFETCHES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 'Inbound' transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 'Inbound' transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of 'Inbound' transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_TxR_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_TxR_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_TxR_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxR_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Write Ordering Stalls",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_WRITE_ORDERING_STALL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when there are pending write ACK's in the switch but the switch->IRP pipeline is not utilized.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Number of qfclks",
"EventCode": "0x14",
"EventName": "UNC_Q_CLOCKTICKS",
"PerPkg": "1",
- "Unit": "QPI LL"
+ "PublicDescription": "Counts the number of clocks in the QPI LL. This clock runs at 1/8th the 'GT/s' speed of the QPI link. For example, a 8GT/s link will have qfclk or 1GHz. JKT does not support dynamic link speeds, so this frequency is fixed.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Count of CTO Events",
+ "EventCode": "0x38",
+ "EventName": "UNC_Q_CTO_COUNT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of CTO (cluster trigger outs) events that were asserted across the two slots. If both slots trigger in a given cycle, the event will increment by 2. You can use edge detect to count the number of cases when both events triggered.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress Credits",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - Egress and RBT",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_CREDITS_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x8",
+ "Unit": "QPI"
},
{
- "BriefDescription": "Cycles where receiving QPI link is in half-width mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Failure - RBT Not Set",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.FAILURE_RBT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Direct 2 Core Spawning; Spawn Success",
+ "EventCode": "0x13",
+ "EventName": "UNC_Q_DIRECT2CORE.SUCCESS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRS packets that we attempted to do direct2core on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "EventCode": "0x12",
+ "EventName": "UNC_Q_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a QPI link. Use edge detect to count the number of instances when the QPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"EventCode": "0x10",
"EventName": "UNC_Q_RxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_RxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "rxl0p_power_cycles %",
"PerPkg": "1",
- "Unit": "QPI LL"
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0xf",
+ "EventName": "UNC_Q_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Bypassed",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; LinkInit",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.LINK_INIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected; Normal Operations",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_CRC_ERRORS.NORMAL_OP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the QPI Agent. Each QPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the QPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; DRS",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; HOM",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCB",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; NCS",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x4",
+ "Unit": "QPI"
},
{
- "BriefDescription": "Cycles where transmitting QPI link is in half-width mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "VN0 Credit Consumed; NDR",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed; SNP",
+ "EventCode": "0x1e",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VN0.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "EventCode": "0x1d",
+ "EventName": "UNC_Q_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Cycles Not Empty",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the QPI RxQ was not empty. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy Accumulator event to calculate the average occupancy.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Idle and Null Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 0; Non-Data protocol Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_RxL_FLITS_G0.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Flits (both Header and Data)",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x18",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Data Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; DRS Header Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.DRS_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x6",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Non-Request Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_NONREQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; HOM Request Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.HOM_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 1; SNP Flits",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_RxL_FLITS_G1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0xc",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent data Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent non-data Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCB_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Coherent standard Rx Flits",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AD",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Received - Group 2; Non-Data Response Rx Flits - AK",
+ "EventCode": "0x3",
+ "EventName": "UNC_Q_RxL_FLITS_G2.NDR_AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits received from the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations",
+ "EventCode": "0x8",
+ "EventName": "UNC_Q_RxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - DRS",
+ "EventCode": "0x9",
+ "EventName": "UNC_Q_RxL_INSERTS_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only DRS flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - HOM",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_RxL_INSERTS_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only HOM flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCB",
+ "EventCode": "0xa",
+ "EventName": "UNC_Q_RxL_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCB flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NCS",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NCS flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - NDR",
+ "EventCode": "0xe",
+ "EventName": "UNC_Q_RxL_INSERTS_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only NDR flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Rx Flit Buffer Allocations - SNP",
+ "EventCode": "0xd",
+ "EventName": "UNC_Q_RxL_INSERTS_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Rx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime. This monitors only SNP flits.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets",
+ "EventCode": "0xb",
+ "EventName": "UNC_Q_RxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - DRS",
+ "EventCode": "0x15",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors DRS flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - HOM",
+ "EventCode": "0x18",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors HOM flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCB flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NCS",
+ "EventCode": "0x17",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NCS flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - NDR",
+ "EventCode": "0x1a",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors NDR flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - SNP",
+ "EventCode": "0x19",
+ "EventName": "UNC_Q_RxL_OCCUPANCY_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the QPI RxQ in each cycle. Generally, when data is transmitted across QPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime. This monitors SNP flits only.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - HOM",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - DRS",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - SNP",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NDR",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCS",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x20",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; BGF Stall - NCB",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.BGF_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; Egress Credits",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.EGRESS_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x40",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Stalls Sending to R3QPI; GV",
+ "EventCode": "0x35",
+ "EventName": "UNC_Q_RxL_STALLS.GV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of stalls trying to send to R3QPI.",
+ "UMask": "0x80",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
"EventCode": "0xd",
"EventName": "UNC_Q_TxL0P_POWER_CYCLES",
- "MetricExpr": "(UNC_Q_TxL0P_POWER_CYCLES / UNC_Q_CLOCKTICKS) * 100.",
- "MetricName": "txl0p_power_cycles %",
"PerPkg": "1",
- "Unit": "QPI LL"
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the QPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize QPI for snoops and their responses. Use edge detect to count the number of instances when the QPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "QPI"
},
{
- "BriefDescription": "Number of data flits transmitted ",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0xc",
+ "EventName": "UNC_Q_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "EventCode": "0x5",
+ "EventName": "UNC_Q_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the QPI Link. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is almost full",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.ALMOST_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with no LLR Credits; LLR is full",
+ "EventCode": "0x2",
+ "EventName": "UNC_Q_TxL_CRC_NO_CREDITS.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when the Tx side ran out of Link Layer Retry credits, causing the Tx to stall.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Cycles not Empty",
+ "EventCode": "0x6",
+ "EventName": "UNC_Q_TxL_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the TxQ is not empty. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Data Tx Flits",
"EventName": "UNC_Q_TxL_FLITS_G0.DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
"UMask": "0x2",
- "Unit": "QPI LL"
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 0; Idle and Null Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G0.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x1",
+ "Unit": "QPI"
},
{
- "BriefDescription": "Number of non data (control) flits transmitted ",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Flits Transferred - Group 0; Non-Data protocol Tx Flits",
"EventName": "UNC_Q_TxL_FLITS_G0.NON_DATA",
"PerPkg": "1",
- "ScaleUnit": "8Bytes",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. It includes filters for Idle, protocol, and Data Flits. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time (for L0) or 4B instead of 8B for L0p.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Flits (both Header and Data)",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x18",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Data Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; DRS Header Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.DRS_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x6",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Non-Request Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_NONREQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; HOM Request Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.HOM_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 1; SNP Flits",
+ "EventName": "UNC_Q_TxL_FLITS_G1.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for SNP, HOM, and DRS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent Bypass Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0xc",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x4",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent non-data Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCB_NONDATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x8",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Coherent standard Tx Flits",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x10",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AD",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x1",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Flits Transferred - Group 2; Non-Data Response Tx Flits - AK",
+ "EventCode": "0x1",
+ "EventName": "UNC_Q_TxL_FLITS_G2.NDR_AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of flits transmitted across the QPI Link. This is one of three 'groups' that allow us to track flits. It includes filters for NDR, NCB, and NCS message classes. Each 'flit' is made up of 80 bits of information (in addition to some ECC data). In full-width (L0) mode, flits are made up of four 'fits', each of which contains 20 bits of data (along with some additional ECC data). In half-width (L0p) mode, the fits are only 10 bits, and therefore it takes twice as many fits to transmit a flit. When one talks about QPI 'speed' (for example, 8.0 GT/s), the 'transfers' here refer to 'fits'. Therefore, in L0, the system will transfer 1 'flit' at the rate of 1/4th the QPI speed. One can calculate the bandwidth of the link by taking: flits*80b/time. Note that this is not the same as 'data' bandwidth. For example, when we are transferring a 64B cacheline across QPI, we will break it into 9 flits -- 1 with header information and 8 with 64 bits of actual 'data' and an additional 16 bits of other information. To calculate 'data' bandwidth, one should therefore do: data flits * 8B / time.",
+ "UMask": "0x2",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "EventCode": "0x4",
+ "EventName": "UNC_Q_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the QPI Tx Flit Buffer. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "EventCode": "0x7",
+ "EventName": "UNC_Q_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across QPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Returned",
+ "EventCode": "0x1c",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURNS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits returned.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "EventCode": "0x1b",
+ "EventName": "UNC_Q_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "QPI"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_R3_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the QPI uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the QPI Agent is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Acquired",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transferring data without coherency, and DRS is used for transferring data with coherency (cacheable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Acquired",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transferring data without coherency, and DRS is used for transferring data with coherency (cacheable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Acquired",
+ "EventCode": "0x20",
+ "EventName": "UNC_R3_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the NCS/NCB/DRS credit is acquired in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transferring data without coherency, and DRS is used for transferring data with coherency (cacheable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Rejected",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transferring data without coherency, and DRS is used for transferring data with coherency (cacheable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Rejected",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transferring data without coherency, and DRS is used for transferring data with coherency (cacheable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit Rejected",
+ "EventCode": "0x21",
+ "EventName": "UNC_R3_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request attempted to acquire an NCS/NCB/DRS credit in the QPI for sending messages on BL to the IIO but was rejected because no credit was available. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transferring data without coherency, and DRS is used for transferring data with coherency (cacheable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit In Use",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transferring data without coherency, and DRS is used for transferring data with coherency (cacheable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit In Use",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transferring data without coherency, and DRS is used for transferring data with coherency (cacheable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "to IIO BL Credit In Use",
+ "EventCode": "0x22",
+ "EventName": "UNC_R3_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the NCS/NCB/DRS credit is in use in the QPI for sending messages on BL to the IIO. There is one credit for each of these three message classes (three credits total). NCS is used for reads to PCIe space, NCB is used for transferring data without coherency, and DRS is used for transferring data with coherency (cacheable PCI transactions). This event can only track one message class at a time.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R3_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R3_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R3_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "R3 IV Ring in Use; Any",
+ "EventCode": "0xa",
+ "EventName": "UNC_R3_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sent, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
+ "UMask": "0xf",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Bypassed",
+ "EventCode": "0x12",
+ "EventName": "UNC_R3_RxR_BYPASSED.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the Ingress was bypassed and an incoming transaction was bypassed directly across the BGF and into the qfclk domain.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; DRS",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; HOM",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NDR",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; SNP",
+ "EventCode": "0x10",
+ "EventName": "UNC_R3_RxR_CYCLES_NE.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the QPI Ingress is not empty. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; DRS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; HOM",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCB",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NCS",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; NDR",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
"UMask": "0x4",
- "Unit": "QPI LL"
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Allocations; SNP",
+ "EventCode": "0x11",
+ "EventName": "UNC_R3_RxR_INSERTS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI Ingress. This tracks one of the three rings that are used by the QPI agent. This can be used in conjunction with the QPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; DRS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; HOM",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCB",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NCS",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; NDR",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy Accumulator; SNP",
+ "EventCode": "0x13",
+ "EventName": "UNC_R3_RxR_OCCUPANCY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given QPI Ingress queue in each cycles. This tracks one of the three ring Ingress buffers. This can be used with the QPI Ingress Not Empty event to calculate average occupancy or the QPI Ingress Allocations event in order to calculate average queuing latency.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; DRS Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; HOM Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCB Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NCS Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; NDR Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Acquisition Failed on DRS; SNP Message Class",
+ "EventCode": "0x37",
+ "EventName": "UNC_R3_VN0_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a request failed to acquire a DRS VN0 credit. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This therefore counts the number of times when a request failed to acquire either a VNA or VN0 credit and is delayed. This should generally be a rare situation.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; DRS Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; HOM Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCS Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NDR Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP Message Class",
+ "EventCode": "0x36",
+ "EventName": "UNC_R3_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA credit Acquisitions",
+ "EventCode": "0x33",
+ "EventName": "UNC_R3_VNA_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI VNA Credit acquisitions. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average lifetime of a credit holder. VNA credits are used by all message classes in order to communicate across QPI. If a packet is unable to acquire credits, it will then attempt to use credits from the VN0 pool. Note that a single packet may require multiple flit buffers (i.e. when data is being transferred). Therefore, this event will increment by the number of credits acquired in each cycle. Filtering based on message class is not provided. One can count the number of packets transferred in a given message class using an qfclk event.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; DRS Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x8",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; HOM Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x1",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCB Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x10",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NCS Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x20",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; NDR Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.NDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x4",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Reject; SNP Message Class",
+ "EventCode": "0x34",
+ "EventName": "UNC_R3_VNA_CREDITS_REJECT.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of attempted VNA credit acquisitions that were rejected because the VNA credit pool was full (or almost full). It is possible to filter this event by message class. Some packets use more than one flit buffer, and therefore must acquire multiple credits. Therefore, one could get a reject even if the VNA credits were not fully used up. The VNA pool is generally used to provide the bulk of the QPI bandwidth (as opposed to the VN0 pool which is used to guarantee forward progress). VNA credits can run out if the flit buffer on the receiving side starts to queue up substantially. This can happen if the rest of the uncore is unable to drain the requests fast enough.",
+ "UMask": "0x2",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with no VNA credits available",
+ "EventCode": "0x31",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_OUT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles when the transmitted has no VNA credits available and therefore cannot send any requests on this channel. Note that this does not mean that no flits can be transmitted, as those holding VN0 credits will still (potentially) be able to transmit. Generally it is the goal of the uncore that VNA credits should not run out, as this can substantially throttle back useful QPI bandwidth.",
+ "Unit": "R3QPI"
+ },
+ {
+ "BriefDescription": "Cycles with 1 or more VNA credits in use",
+ "EventCode": "0x32",
+ "EventName": "UNC_R3_VNA_CREDIT_CYCLES_USED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of QPI uclk cycles with one or more VNA credits in use. This event can be used in conjunction with the VNA In-Use Accumulator to calculate the average number of used VNA credits.",
+ "Unit": "R3QPI"
+ },
+ {
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "VLW Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore. Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_DISABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Filter Match",
+ "EventCode": "0x41",
+ "EventName": "UNC_U_FILTER_MATCH.U2C_ENABLE",
+ "PerPkg": "1",
+ "PublicDescription": "Filter match per thread (w/ or w/o Filter Enable). Specify the thread to filter on using NCUPMONCTRLGLCTR.ThreadID.",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times an IDI Lock/SplitLock sequence was started",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "MsgCh Requests by Size; 4B Requests",
+ "EventCode": "0x47",
+ "EventName": "UNC_U_MSG_CHNL_SIZE_COUNT.4B",
+ "PerPkg": "1",
+ "PublicDescription": "Number of transactions on the message channel filtered by request size. This includes both reads and writes.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "MsgCh Requests by Size; 8B Requests",
+ "EventCode": "0x47",
+ "EventName": "UNC_U_MSG_CHNL_SIZE_COUNT.8B",
+ "PerPkg": "1",
+ "PublicDescription": "Number of transactions on the message channel filtered by request size. This includes both reads and writes.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; ACK to Deassert",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ACK_TO_DEASSERT",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles. Filter from source CoreID.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS.COUNT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Correctable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.CMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Livelock",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LIVELOCK",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; LTError",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.LTERROR",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T0",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T0",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Monitor T1",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.MONITOR_T1",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Other",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Trap",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.TRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Monitor Sent to T0; Uncorrectable Machine Check",
+ "EventCode": "0x43",
+ "EventName": "UNC_U_U2C_EVENTS.UMC",
+ "PerPkg": "1",
+ "PublicDescription": "Events coming from Uncore can be sent to one or all cores",
+ "UMask": "0x20",
+ "Unit": "UBOX"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-io.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-io.json
new file mode 100644
index 000000000000..b1ce5f77675e
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-io.json
@@ -0,0 +1,324 @@
+[
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_R2_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the R2PCIe uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the R2PCIe is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credit Acquired; NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_R2_IIO_CREDITS_ACQUIRED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of credits that are acquired in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; DRS",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; NCB",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Failed to Acquire a Credit; NCS",
+ "EventCode": "0x34",
+ "EventName": "UNC_R2_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; DRS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCB",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2PCIe IIO Credits in Use; NCS",
+ "EventCode": "0x32",
+ "EventName": "UNC_R2_IIO_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when one or more credits in the R2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly).",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Even",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AD Ring in Use; Clockwise and Odd",
+ "EventCode": "0x7",
+ "EventName": "UNC_R2_RING_AD_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Even",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 AK Ring in Use; Clockwise and Odd",
+ "EventCode": "0x8",
+ "EventName": "UNC_R2_RING_AK_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Counterclockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CCW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Even",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 BL Ring in Use; Clockwise and Odd",
+ "EventCode": "0x9",
+ "EventName": "UNC_R2_RING_BL_USED.CW_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "R2 IV Ring in Use; Any",
+ "EventCode": "0xa",
+ "EventName": "UNC_R2_RING_IV_USED.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sunk into the ring stop. The IV ring is unidirectional. Whether UP or DN is used is dependent on the system programming. Thereofore, one should generally set both the UP and DN bits for a given polarity (or both) at a given time.",
+ "UMask": "0xf",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "AK Ingress Bounced",
+ "EventCode": "0x12",
+ "EventName": "UNC_R2_RxR_AK_BOUNCES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a request destined for the AK ingress bounced.",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; DRS",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCB",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Cycles Not Empty; NCS",
+ "EventCode": "0x10",
+ "EventName": "UNC_R2_RxR_CYCLES_NE.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Ingress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AD",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; AK",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Full; BL",
+ "EventCode": "0x25",
+ "EventName": "UNC_R2_TxR_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress buffer is full.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AD",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; AK",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Cycles Not Empty; BL",
+ "EventCode": "0x23",
+ "EventName": "UNC_R2_TxR_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the R2PCIe Egress is not empty. This tracks one of the three rings that are used by the R2PCIe agent. This can be used in conjunction with the R2PCIe Egress Occupancy Accumulator event in order to calculate average queue occupancy. Only a single Egress queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress NACK; AD",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACKS.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the Egress received a NACK from the ring and could not issue a transaction.",
+ "UMask": "0x1",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress NACK; AK",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACKS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the Egress received a NACK from the ring and could not issue a transaction.",
+ "UMask": "0x2",
+ "Unit": "R2PCIe"
+ },
+ {
+ "BriefDescription": "Egress NACK; BL",
+ "EventCode": "0x26",
+ "EventName": "UNC_R2_TxR_NACKS.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the Egress received a NACK from the ring and could not issue a transaction.",
+ "UMask": "0x4",
+ "Unit": "R2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
index 8551cebeba23..6dcc9415a462 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-memory.json
@@ -1,82 +1,435 @@
[
{
- "BriefDescription": "Memory page activates",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM Activate Count",
"EventCode": "0x1",
"EventName": "UNC_M_ACT_COUNT",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
"Unit": "iMC"
},
{
- "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (w/ and w/out auto-pre)",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_READ",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM Reads (RD_CAS + Underfills)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
"PerPkg": "1",
"UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM RD_CAS (w/ and w/out auto-pre)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; Underfill Read Issued",
"EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_WRITE",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; All DRAM WR_CAS (both Modes)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
"PerPkg": "1",
"UMask": "0xc",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory controller clock ticks. Used to get percentages of memory controller cycles events",
- "Counter": "0,1,2,3",
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "uclks",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "Uncore Fixed Counter - uclks",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles all ranks are in critical thermal throttle",
- "Counter": "0,1,2,3",
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
"EventCode": "0x86",
"EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
- "MetricExpr": "(UNC_M_POWER_CRITICAL_THROTTLE_CYCLES / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_critical_throttle_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
"Unit": "iMC"
},
{
- "BriefDescription": "Cycles Memory is in self refresh power mode",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Clock-Enabled Self-Refresh",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "Memory page conflicts",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharges due to page miss",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "Occupancy counter for memory read queue",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY",
"PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_WPQ_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
"Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
index 8755693d86c6..b3ee5d741015 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/uncore-power.json
@@ -1,272 +1,310 @@
[
{
- "BriefDescription": "PCU clock ticks. Use to get percentages of PCU cycles events",
- "Counter": "0,1,2,3",
+ "BriefDescription": "pclk Cycles",
"EventName": "UNC_P_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 800 MHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x3",
+ "EventName": "UNC_P_CORE0_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x4",
+ "EventName": "UNC_P_CORE1_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x5",
+ "EventName": "UNC_P_CORE2_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x6",
+ "EventName": "UNC_P_CORE3_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned a frequency greater than or equal to the frequency that is configured in the filter. (filter_band0=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band0_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_BAND0_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND0_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band0_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_CORE4_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transistioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band1=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band1_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_BAND1_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND1_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band1_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x8",
+ "EventName": "UNC_P_CORE5_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band2=XXX with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band2_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_BAND2_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND2_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band2_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_CORE6_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to the frequency that is configured in the filter. (filter_band3=XXX, with XXX in 100Mhz units). One can also use inversion (filter_inv=1) to track cycles when we were less than the configured frequency. Derived from unc_p_freq_band3_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_BAND3_TRANSITIONS",
- "Filter": "edge=1",
- "MetricExpr": "(UNC_P_FREQ_BAND3_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_band3_cycles %",
+ "BriefDescription": "Core C State Transition Cycles",
+ "EventCode": "0xa",
+ "EventName": "UNC_P_CORE7_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions. There is one event per core.",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
- "Filter": "occ_sel=1",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C0 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c0 %",
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x1e",
+ "EventName": "UNC_P_DEMOTIONS_CORE0",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C3. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
- "Filter": "occ_sel=2",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C3 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c3 %",
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x1f",
+ "EventName": "UNC_P_DEMOTIONS_CORE1",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "This is an occupancy event that tracks the number of cores that are in C6. It can be used by itself to get the average number of cores in C0, with threshholding to generate histograms, or with other PCU events ",
- "Counter": "0,1,2,3",
- "EventCode": "0x80",
- "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
- "Filter": "occ_sel=3",
- "MetricExpr": "(UNC_P_POWER_STATE_OCCUPANCY.CORES_C6 / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "power_state_occupancy.cores_c6 %",
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x20",
+ "EventName": "UNC_P_DEMOTIONS_CORE2",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip",
- "Counter": "0,1,2,3",
- "EventCode": "0xa",
- "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
- "MetricExpr": "(UNC_P_PROCHOT_EXTERNAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "prochot_external_cycles %",
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x21",
+ "EventName": "UNC_P_DEMOTIONS_CORE3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x22",
+ "EventName": "UNC_P_DEMOTIONS_CORE4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x23",
+ "EventName": "UNC_P_DEMOTIONS_CORE5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x24",
+ "EventName": "UNC_P_DEMOTIONS_CORE6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Core C State Demotions",
+ "EventCode": "0x25",
+ "EventName": "UNC_P_DEMOTIONS_CORE7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when a configurable cores had a C-state demotion",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xb",
+ "EventName": "UNC_P_FREQ_BAND0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xc",
+ "EventName": "UNC_P_FREQ_BAND1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xd",
+ "EventName": "UNC_P_FREQ_BAND2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Frequency Residency",
+ "EventCode": "0xe",
+ "EventName": "UNC_P_FREQ_BAND3_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to the frequency that is configured in the filter. One can use all four counters with this event, so it is possible to track up to 4 configurable bands. One can use edge detect in conjunction with this event to track the number of times that we transitioned into a frequency greater than or equal to the configurable frequency. One can also use inversion to track cycles when we were less than the configured frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when temperature is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Current Strongest Upper Limit Cycles",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when current is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
"EventCode": "0x4",
"EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_limit_thermal_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when the OS is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "OS Strongest Upper Limit Cycles",
"EventCode": "0x6",
"EventName": "UNC_P_FREQ_MAX_OS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_OS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_os_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the OS is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when power is the upper limit on frequency",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
"EventCode": "0x5",
"EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_POWER_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_power_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles when current is the upper limit on frequency",
- "Counter": "0,1,2,3",
- "EventCode": "0x7",
- "EventName": "UNC_P_FREQ_MAX_CURRENT_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_MAX_CURRENT_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_max_current_cycles %",
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x1",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Perf P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x2",
+ "EventName": "UNC_P_FREQ_MIN_PERF_P_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when Perf P Limit is preventing us from dropping the frequency lower. Perf P Limit is an algorithm that takes input from remote sockets when determining if a socket should drop it's frequency down. This is largely to minimize increases in snoop and remote read latencies.",
"Unit": "PCU"
},
{
"BriefDescription": "Cycles spent changing Frequency",
- "Counter": "0,1,2,3",
"EventName": "UNC_P_FREQ_TRANS_CYCLES",
- "MetricExpr": "(UNC_P_FREQ_TRANS_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_trans_cycles %",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_CYCLES",
- "Filter": "filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2f",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_CYCLES",
- "Filter": "filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "BriefDescription": "Number of cores in C0",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
"PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_CYCLES",
- "Filter": "filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "BriefDescription": "Number of cores in C0",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
"PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore was running at a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_CYCLES",
- "Filter": "filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "BriefDescription": "Number of cores in C0",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in C0. It can be used by itself to get the average number of cores in C0, with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0xa",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 1.2Ghz. Derived from unc_p_freq_band0_cycles",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
"EventCode": "0xb",
- "EventName": "UNC_P_FREQ_GE_1200MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band0=12",
- "MetricExpr": "(UNC_P_FREQ_GE_1200MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_1200mhz_cycles %",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
"PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of times that the uncore transitioned to a frequency greater than or equal to 2Ghz. Derived from unc_p_freq_band1_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xc",
- "EventName": "UNC_P_FREQ_GE_2000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band1=20",
- "MetricExpr": "(UNC_P_FREQ_GE_2000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_2000mhz_cycles %",
+ "BriefDescription": "Cycles Changing Voltage",
+ "EventCode": "0x3",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_CHANGE",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition. This event is calculated by or'ing together the increasing and decreasing events.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 3Ghz. Derived from unc_p_freq_band2_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xd",
- "EventName": "UNC_P_FREQ_GE_3000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band2=30",
- "MetricExpr": "(UNC_P_FREQ_GE_3000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_3000mhz_cycles %",
+ "BriefDescription": "Cycles Decreasing Voltage",
+ "EventCode": "0x2",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_DECREASE",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is decreasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
"Unit": "PCU"
},
{
- "BriefDescription": "Counts the number of cycles that the uncore transitioned to a frequency greater than or equal to 4Ghz. Derived from unc_p_freq_band3_cycles",
- "Counter": "0,1,2,3",
- "EventCode": "0xe",
- "EventName": "UNC_P_FREQ_GE_4000MHZ_TRANSITIONS",
- "Filter": "edge=1,filter_band3=40",
- "MetricExpr": "(UNC_P_FREQ_GE_4000MHZ_CYCLES / UNC_P_CLOCKTICKS) * 100.",
- "MetricName": "freq_ge_4000mhz_cycles %",
+ "BriefDescription": "Cycles Increasing Voltage",
+ "EventCode": "0x1",
+ "EventName": "UNC_P_VOLT_TRANS_CYCLES_INCREASE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is increasing voltage. There is no filtering supported with this event. One can use it as a simple event, or use it conjunction with the occupancy events to monitor the number of cores or threads that were impacted by the transition.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x32",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
"PerPkg": "1",
"Unit": "PCU"
}
diff --git a/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
index a654ab771fce..fa08d355b97e 100644
--- a/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/jaketown/virtual-memory.json
@@ -1,149 +1,117 @@
[
{
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "EPT.WALK_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "EventCode": "0x4F",
+ "EventName": "EPT.WALK_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Misses at all ITLB levels that cause page walks.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries.",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "STLB flush attempts.",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "TLB_FLUSH.STLB_ANY",
"SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
index e847b0fd696d..d9876cb06b08 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/cache.json
@@ -1,2305 +1,1892 @@
[
{
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "L2_REQUESTS_REJECT.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of MEC requests from the L2Q that reference a cache line (cacheable requests) exlcuding SW prefetches filling only to L2 cache and L1 evictions (automatically exlcudes L2HWP, UC, WC) that were rejected - Multiple repeated rejects should be counted multiple times"
- },
- {
+ "BriefDescription": "Counts the number of MEC requests that were not accepted into the L2Q because of any L2 queue reject condition. There is no concept of at-ret here. It might include requests due to instructions in the speculative path.",
"EventCode": "0x31",
- "Counter": "0,1",
- "UMask": "0x0",
"EventName": "CORE_REJECT_L2Q.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of MEC requests that were not accepted into the L2Q because of any L2 queue reject condition. There is no concept of at-ret here. It might include requests due to instructions in the speculative path."
+ "SampleAfterValue": "200003"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "L2_REQUESTS.REFERENCE",
+ "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of core cycles the fetch stalled for all icache misses.",
+ "EventCode": "0x86",
+ "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the total number of L2 cache references."
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts the number of L2 cache misses",
"EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x41",
"EventName": "L2_REQUESTS.MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of L2 cache misses"
+ "UMask": "0x41"
},
{
- "PublicDescription": "This event counts the number of core cycles the fetch stalls because of an icache miss. This is a cumulative count of cycles the NIP stalled for all icache misses.",
- "EventCode": "0x86",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "BriefDescription": "Counts the total number of L2 cache references.",
+ "EventCode": "0x2E",
+ "EventName": "L2_REQUESTS.REFERENCE",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles the fetch stalls because of an icache miss. This is a cummulative count of core cycles the fetch stalled for all icache misses."
+ "UMask": "0x4f"
},
{
- "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
+ "BriefDescription": "Counts the number of MEC requests from the L2Q that reference a cache line (cacheable requests) excluding SW prefetches filling only to L2 cache and L1 evictions (automatically exlcudes L2HWP, UC, WC) that were rejected - Multiple repeated rejects should be counted multiple times",
+ "EventCode": "0x30",
+ "EventName": "L2_REQUESTS_REJECT.ALL",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts all the load micro-ops retired",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "MEM_UOPS_RETIRED.L1_MISS_LOADS",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PublicDescription": "This event counts the number of load micro-ops retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load micro-ops retired that miss in L1 D cache"
+ "UMask": "0x40"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts all the store micro-ops retired",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "MEM_UOPS_RETIRED.L2_HIT_LOADS",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PublicDescription": "This event counts the number of store micro-ops retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load micro-ops retired that hit in the L2",
- "Data_LA": "1"
+ "UMask": "0x80"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the loads retired that get the data from the other core in the same tile in M state",
+ "Data_LA": "1",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "MEM_UOPS_RETIRED.L2_MISS_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of load micro-ops retired that miss in the L2",
- "Data_LA": "1"
+ "EventName": "MEM_UOPS_RETIRED.HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Counts the number of load micro-ops retired that miss in L1 D cache",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "MEM_UOPS_RETIRED.UTLB_MISS_LOADS",
+ "EventName": "MEM_UOPS_RETIRED.L1_MISS_LOADS",
+ "PublicDescription": "This event counts the number of load micro-ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load micro-ops retired that caused micro TLB miss"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of load micro-ops retired that hit in the L2",
+ "Data_LA": "1",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "MEM_UOPS_RETIRED.HITM",
+ "EventName": "MEM_UOPS_RETIRED.L2_HIT_LOADS",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the loads retired that get the data from the other core in the same tile in M state",
- "Data_LA": "1"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of load micro-ops retired.",
+ "BriefDescription": "Counts the number of load micro-ops retired that miss in the L2",
+ "Data_LA": "1",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts all the load micro-ops retired"
+ "EventName": "MEM_UOPS_RETIRED.L2_MISS_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of store micro-ops retired.",
+ "BriefDescription": "Counts the number of load micro-ops retired that caused micro TLB miss",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x80",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "EventName": "MEM_UOPS_RETIRED.UTLB_MISS_LOADS",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts all the store micro-ops retired"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts the matrix events specified by MSR_OFFCORE_RESPx",
"EventCode": "0xB7",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts the matrix events specified by MSR_OFFCORE_RESPx"
- },
- {
- "EventCode": "0xB7",
- "MSRValue": "0x4000000070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x1000400070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0800400070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800400044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1000080070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800080070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x0000010070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x40000032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x10004032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x08004032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x10000832f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x08000832f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x00000132f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000044",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x1000400044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000013091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0800400044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800403091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1000080044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800403091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800080044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000403091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x0000010044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800183091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800083091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x1000400022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000083091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x0800400022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x1000080022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0800080022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x0000010022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x4000003091",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.OUTSTANDING",
"MSRIndex": "0x1a6",
+ "MSRValue": "0x4000003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x1000403091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0800403091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800400070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1000083091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800083091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x0000013091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for any response",
- "Offcore": "1"
- },
- {
- "EventCode": "0xB7",
- "MSRValue": "0x4000008000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
- "MSRIndex": "0x1a6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x1000408000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800408000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x1000088000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x0800088000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0000018000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x0000014800",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts all streaming stores (WC and should be programmed on PMC1) that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0000014000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00000132f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial streaming stores (WC and should be programmed on PMC1) that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x4000002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18004032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1000402000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08004032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800402000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x1000082000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18001832f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800082000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x08000832f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0000012000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10000832f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x4000001000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00040032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x1000401000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00100032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0800401000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00020032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x1000081000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00080032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x0800081000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_E_F",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x40000032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0000011000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000018000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010800",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800408000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Full streaming stores (WC and should be programmed on PMC1) that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800408000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x1000400400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000408000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x0800400400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800188000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x1000080400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800088000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800080400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000088000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x0000010400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004008000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x4000000200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010008000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x1000400200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002008000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x0800400200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008008000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x1000080200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_M",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000008000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0800080200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800400022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1000400100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_M",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800400100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_E_F",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x1000080100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_M",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800080100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_E_F",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x4000000080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x1000400080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0800400080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x1000080080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x0800080080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_E_F",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0000010080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800400400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1000400040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800400040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x1000080040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800080040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x1000400020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x0800400020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x1000080020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x0800080020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008000400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x0000020020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that provides no supplier details",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0000010020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800400004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1000400004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800400004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x1000080004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800080004",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x4000000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x1000400002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0800400002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x1000080002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x0800080002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_E_F",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0000010002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The oustanding response should be programmed only on PMC0.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x1000400001",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0800400001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_FAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x1000080001",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x0800080001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_NEAR_TILE_E_F",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x0000010001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for any response",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0002000001",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x0002000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x0002000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_M",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0002000020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0002000080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800400002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0002000100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_M",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0002000200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x0002000400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0002001000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0002002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x0002008000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x0002003091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0002000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x0002000044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x00020032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_M",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Full streaming stores (WC and should be programmed on PMC1) that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0002000070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_M",
+ "EventName": "OFFCORE_RESPONSE.FULL_STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010800",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0004000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0004000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800400080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0004000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0004000020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x0004000040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0004000080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0004000100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_E",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x0004000200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x0004000400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0004001000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x0004002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_E",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008000080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x0004008000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_E",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial streaming stores (WC and should be programmed on PMC1) that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0004003091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_E",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_STREAMING_STORES.ANY_RESPONSE",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0000014000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0004000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_E",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.ANY_RESPONSE",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0000010100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0004000044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_E",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x1800400100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x00040032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_E",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0800400100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0004000070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_E",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE_M",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x1000400100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in E state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x0008000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_S",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x1800180100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0008000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_S",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_E_F",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0800080100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0008000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_S",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE_M",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x1000080100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x0008000020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_S",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_E",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0004000100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x0008000080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_S",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_F",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0010000100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0008000100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_S",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a7",
+ "MSRValue": "0x0002000100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x0008000200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_S",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_S",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0008000100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0008000400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_S",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000012000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0008001000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_S",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800402000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0008002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_S",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800402000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0008008000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_S",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000402000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x0008003091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_S",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800182000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0008000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_S",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800082000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0008000044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_S",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000082000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x00080032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_S",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in S state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x0010000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x0010000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x0010000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x0010000020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_F",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0010000040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0010000080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800400040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x0010000100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_THIS_TILE_F",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_E_F",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0010000200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x0010000400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x0010001000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0010002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x0010008000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x0010003091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x0010000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_THIS_TILE_F",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x0010000044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x00100032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x0010000070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_THIS_TILE_F",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses which hit its own tile's L2 with data in F state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x1800180002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x1800180004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x1800180020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x1800180040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x1800180080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x1800180100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_NEAR_TILE",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x1800180200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008000020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that provides no supplier details",
"EventCode": "0xB7",
- "MSRValue": "0x1800180400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.SUPPLIER_NONE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000020020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x1800181000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000011000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1800182000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800401000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1800188000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800401000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x1800183091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000401000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x1800180022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800181000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
"EventCode": "0xB7",
- "MSRValue": "0x1800180044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800081000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x18001832f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000081000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x1800180070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_NEAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004001000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for reponses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x1800400002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010001000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x1800400004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002001000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x1800400040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008001000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
"EventCode": "0xB7",
- "MSRValue": "0x1800400080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_HIT_FAR_TILE",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000001000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all streaming stores (WC and should be programmed on PMC1) that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x1800400100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a7",
+ "MSRValue": "0x0000014800",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for any response",
"EventCode": "0xB7",
- "MSRValue": "0x1800400400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in E/F state. Valid only for SNC4 cluster mode.",
"EventCode": "0xB7",
- "MSRValue": "0x1800401000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800400200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Far(not in the same quadrant as the request)-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x1800402000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_FAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from snoop request hit with data forwarded from its Near-other tile L2 in E/F/M state",
"EventCode": "0xB7",
- "MSRValue": "0x1800408000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1800180200",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in E/F state.",
+ "EventCode": "0xB7",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_E_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0800080200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from a snoop request hit with data forwarded from its Near-other tile's L2 in M state.",
"EventCode": "0xB7",
- "MSRValue": "0x1800403091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_NEAR_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in E state",
"EventCode": "0xB7",
- "MSRValue": "0x1800400022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_E",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0004000200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in F state",
"EventCode": "0xB7",
- "MSRValue": "0x1800400044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_F",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0010000200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in M state",
"EventCode": "0xB7",
- "MSRValue": "0x18004032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_M",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0002000200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses which hit its own tile's L2 with data in S state",
"EventCode": "0xB7",
- "MSRValue": "0x1800400070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.L2_HIT_FAR_TILE",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.L2_HIT_THIS_TILE_S",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0008000200",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that are outstanding, per weighted cycle, from the time of the request to when any response is received. The outstanding response should be programmed only on PMC0.",
+ "EventCode": "0xB7",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for reponses from snoop request hit with data forwarded from it Far(not in the same quadrant as the request)-other tile L2 in E/F/M state. Valid only in SNC4 Cluster mode.",
- "Offcore": "1"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json b/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
new file mode 100644
index 000000000000..ecc96f32f167
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/floating-point.json
@@ -0,0 +1,26 @@
+[
+ {
+ "BriefDescription": "Counts the number of floating operations retired that required microcode assists",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "This event counts the number of times that the pipeline stalled due to FP operations needing assists.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of vector SSE, AVX, AVX2, AVX-512 micro-ops retired. More specifically, it counts packed SSE, AVX, AVX2, AVX-512 micro-ops (both floating point and integer) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.PACKED_SIMD",
+ "PublicDescription": "This event counts the number of packed vector SSE, AVX, AVX2, and AVX-512 micro-ops retired (floating point, integer and store) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops retired. More specifically, it counts scalar SSE, AVX, AVX2, AVX-512 micro-ops except for loads (memory-to-register mov-type micro ops), division, sqrt.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.SCALAR_SIMD",
+ "PublicDescription": "This event counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops retired (floating point, integer and store) except for loads (memory-to-register mov-type micro ops), division, sqrt.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json b/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
index 6d38636689a4..9001f5019848 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/frontend.json
@@ -1,34 +1,51 @@
[
{
+ "BriefDescription": "Counts the number of times the front end resteers for any branch as a result of another branch handling mechanism in the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times the front end resteers for conditional branches as a result of another branch handling mechanism in the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.COND",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of times the front end resteers for RET branches as a result of another branch handling mechanism in the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts all instruction fetches, including uncacheable fetches.",
"EventCode": "0x80",
- "Counter": "0,1",
- "UMask": "0x3",
"EventName": "ICACHE.ACCESSES",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts all instruction fetches, including uncacheable fetches."
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Counts all instruction fetches that hit the instruction cache.",
"EventCode": "0x80",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "ICACHE.HIT",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts all instruction fetches that hit the instruction cache."
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all instruction fetches that miss the instruction cache or produce memory requests. An instruction fetch miss is counted only once and not once for every cycle it is outstanding.",
"EventCode": "0x80",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "ICACHE.MISSES",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts all instruction fetches that miss the instruction cache or produce memory requests. An instruction fetch miss is counted only once and not once for every cycle it is outstanding."
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts the number of times the MSROM starts a flow of uops.",
"EventCode": "0xE7",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "MS_DECODED.MS_ENTRY",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of times the MSROM starts a flow of uops."
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
index c6bb16ba0f86..b0361f6f0dd9 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/memory.json
@@ -1,1110 +1,909 @@
[
{
+ "BriefDescription": "Counts the number of times the machine clears due to memory ordering hazards",
"EventCode": "0xC3",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of times the machine clears due to memory ordering hazards"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0100400070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080200070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0101000070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0080800070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x01004032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x00802032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x01010032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181803091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x00808032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0100400044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080803091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0080200044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180603091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0101000044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100403091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080800044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080203091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0100400022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080200022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0101000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080800022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Prefetch requests that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0100403091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200070",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0080203091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01818032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0101003091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01010032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080803091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00808032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0100408000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01806032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080208000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x01004032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any Read request that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0101008000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00802032f7",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0080808000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181808000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0100402000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101008000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080202000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080808000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0101002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180608000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080802000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100408000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0100401000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080208000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0080201000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0101001000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080801000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0100400400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080200400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data write requests that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0101000400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0080800400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0100400200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080200200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0101000200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080800200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Bus locks and split lock requests that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0100400100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_FAR",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200400",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0080200100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_NEAR",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0101000100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_FAR",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_FAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080800100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_NEAR",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x2000020080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.NON_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from any NON_DRAM system address. This includes MMIO transactions",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0100400080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080200080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0101000080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080800080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0100400040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0080200040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0101000040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080800040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x2000020020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.NON_DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from any NON_DRAM system address. This includes MMIO transactions",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0100400020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080200020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0101000020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080800020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0100400004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0080200004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0101000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080800004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0100400002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_FAR",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080200002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM_NEAR",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0101000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_FAR",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from any NON_DRAM system address. This includes MMIO transactions",
"EventCode": "0xB7",
- "MSRValue": "0x0080800002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR_NEAR",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0100400001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_FAR",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_FAR",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0101000100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0080200001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM_NEAR",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.DDR_NEAR",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0080800100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from MCDRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0101000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_FAR",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0180600100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Far.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0080800001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR_NEAR",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_FAR",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0100400100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for data responses from DRAM Local.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0180600001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.MCDRAM",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM_NEAR",
+ "MSRIndex": "0x1a7",
+ "MSRValue": "0x0080200100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0180600002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181802000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0180600004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0180600020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080802000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0180600080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100402000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L1 data HW prefetches that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0180600100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.MCDRAM",
- "MSRIndex": "0x1a7",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.MCDRAM_NEAR",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080202000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial writes (UC or WT or WP and should be programmed on PMC1) that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0180600200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0180600400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0180601000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0180608000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 code HW prefetches that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0180603091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0180600022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0180600044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x01806032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0180600070",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_PF_L2.MCDRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Prefetch requests that accounts for responses from MCDRAM (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0181800001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.DDR",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand cacheable data and L1 prefetch data reads that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0181800002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.DDR",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data writes that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from any NON_DRAM system address. This includes MMIO transactions",
"EventCode": "0xB7",
- "MSRValue": "0x0181800004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.DDR",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.NON_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand code reads and prefetch code reads that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0181800020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.DDR",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181801000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 data RFO prefetches (includes PREFETCHW instruction) that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0181800040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.DDR",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101001000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L2 code HW prefetches that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0181800080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.DDR",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080801000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Partial reads (UC or WC and is valid only for Outstanding response type). that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0181800200",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180601000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0181800400",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.BUS_LOCKS.DDR",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100401000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Bus locks and split lock requests that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts Software Prefetches that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0181801000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.DDR",
+ "EventName": "OFFCORE_RESPONSE.PF_SOFTWARE.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080201000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Software Prefetches that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from DDR (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0181802000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.DDR",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0181800200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts L1 data HW prefetches that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Far.",
"EventCode": "0xB7",
- "MSRValue": "0x0181808000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.DDR",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0101000200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from DRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x0181803091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.DDR",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.DDR_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080800200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data and L1 prefetch data read requests that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for responses from MCDRAM (local and far)",
"EventCode": "0xB7",
- "MSRValue": "0x0181800022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.DDR",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0180600200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand cacheable data write requests that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Far or Other tile L2 hit far.",
"EventCode": "0xB7",
- "MSRValue": "0x0181800044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.DDR",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_FAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0100400200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts Demand code reads and prefetch code read requests that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts UC code reads (valid only for Outstanding response type) that accounts for data responses from MCDRAM Local.",
"EventCode": "0xB7",
- "MSRValue": "0x01818032f7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_READ.DDR",
+ "EventName": "OFFCORE_RESPONSE.UC_CODE_READS.MCDRAM_NEAR",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080200200",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any Read request that accounts for responses from DDR (local and far)",
- "Offcore": "1"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
index 92e4ef2e22c6..3dc532107ead 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/pipeline.json
@@ -1,432 +1,333 @@
[
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of branch instructions retired",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x0",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of branch instructions retired"
+ "PEBS": "1",
+ "SampleAfterValue": "200003"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of near CALL branch instructions retired.",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x7e",
- "EventName": "BR_INST_RETIRED.JCC",
+ "EventName": "BR_INST_RETIRED.CALL",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps."
+ "UMask": "0xf9"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of far branch instructions retired.",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xfe",
- "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps and predicted taken."
+ "UMask": "0xbf"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xf9",
- "EventName": "BR_INST_RETIRED.CALL",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of near CALL branch instructions retired."
+ "UMask": "0xfb"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps.",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xfd",
- "EventName": "BR_INST_RETIRED.REL_CALL",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of near relative CALL branch instructions retired."
+ "UMask": "0x7e"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of branch instructions retired that were near indirect CALL or near indirect JMP.",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xfb",
- "EventName": "BR_INST_RETIRED.IND_CALL",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of near indirect CALL branch instructions retired."
+ "UMask": "0xeb"
},
{
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
"PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfd"
+ },
+ {
+ "BriefDescription": "Counts the number of near RET branch instructions retired.",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xf7",
"EventName": "BR_INST_RETIRED.RETURN",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of near RET branch instructions retired."
+ "UMask": "0xf7"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of branch instructions retired that were conditional jumps and predicted taken.",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xeb",
- "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of branch instructions retired that were near indirect CALL or near indirect JMP."
+ "UMask": "0xfe"
},
{
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
"PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xbf",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of far branch instructions retired."
+ "SampleAfterValue": "200003"
},
{
+ "BriefDescription": "Counts the number of mispredicted near CALL branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CALL",
"PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf9"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted far branch instructions retired.",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "EventName": "BR_MISP_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted branch instructions retired"
+ "UMask": "0xbf"
},
{
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps.",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0x7e",
"EventName": "BR_MISP_RETIRED.JCC",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps."
+ "UMask": "0x7e"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired that were near indirect CALL or near indirect JMP.",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xfe",
- "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps and predicted taken."
+ "UMask": "0xeb"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of mispredicted near relative CALL branch instructions retired.",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xfb",
- "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "EventName": "BR_MISP_RETIRED.REL_CALL",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired."
+ "UMask": "0xfd"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xf7",
"EventName": "BR_MISP_RETIRED.RETURN",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired."
+ "UMask": "0xf7"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired that were conditional jumps and predicted taken.",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xeb",
- "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted branch instructions retired that were near indirect CALL or near indirect JMP."
+ "UMask": "0xfe"
},
{
- "PublicDescription": "This event counts the number of micro-ops retired that were supplied from MSROM.",
- "EventCode": "0xC2",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.MS",
+ "BriefDescription": "Counts the number of unhalted reference clock cycles",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of micro-ops retired that are from the complex flows issued by the micro-sequencer (MS)."
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists.",
- "EventCode": "0xC2",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "UOPS_RETIRED.ALL",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of micro-ops retired"
+ "UMask": "0x3"
},
{
- "PublicDescription": "This event counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops retired (floating point, integer and store) except for loads (memory-to-register mov-type micro ops), division, sqrt.",
- "EventCode": "0xC2",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "UOPS_RETIRED.SCALAR_SIMD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of scalar SSE, AVX, AVX2, AVX-512 micro-ops retired. More specifically, it counts scalar SSE, AVX, AVX2, AVX-512 micro-ops except for loads (memory-to-register mov-type micro ops), division, sqrt."
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of packed vector SSE, AVX, AVX2, and AVX-512 micro-ops retired (floating point, integer and store) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies.",
- "EventCode": "0xC2",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "UOPS_RETIRED.PACKED_SIMD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of vector SSE, AVX, AVX2, AVX-512 micro-ops retired. More specifically, it counts packed SSE, AVX, AVX2, AVX-512 micro-ops (both floating point and integer) except for loads (memory-to-register mov-type micro-ops), packed byte and word multiplies."
+ "BriefDescription": "Counts the number of unhalted core clock cycles",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xC3",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of times that the machine clears due to program modifying data within 1K of a recently fetched code page"
+ "BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider.",
+ "EventCode": "0xCD",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "PublicDescription": "This event counts cycles when the divider is busy. More specifically cycles when the divide unit is unable to accept a new divide uop because it is busy processing a previously dispatched uop. The cycles will be counted irrespective of whether or not another divide uop is waiting to enter the divide unit (from the RS). This event counts integer divides, x87 divides, divss, divsd, sqrtss, sqrtsd event and does not count vector divides.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of times that the pipeline stalled due to FP operations needing assists.",
- "EventCode": "0xC3",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.FP_ASSIST",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of floating operations retired that required microcode assists"
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts the total number of instructions retired",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts all nukes",
"EventCode": "0xC3",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "MACHINE_CLEARS.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts all nukes"
+ "UMask": "0x8"
},
{
- "EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "NO_ALLOC_CYCLES.ROB_FULL",
+ "BriefDescription": "Counts the number of times that the machine clears due to program modifying data within 1K of a recently fetched code page",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and the ROB is full"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of core cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted branch to retire.",
+ "BriefDescription": "Counts the total number of core cycles when no micro-ops are allocated for any reason.",
"EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
+ "EventName": "NO_ALLOC_CYCLES.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and the alloc pipe is stalled waiting for a mispredicted branch to retire."
+ "UMask": "0x7f"
},
{
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and the alloc pipe is stalled waiting for a mispredicted branch to retire.",
"EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "NO_ALLOC_CYCLES.RAT_STALL",
+ "EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
+ "PublicDescription": "This event counts the number of core cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted branch to retire.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted."
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of core cycles when no uops are allocated, the instruction queue is empty and the alloc pipe is stalled waiting for instructions to be fetched.",
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated, the IQ is empty, and no other condition is blocking allocation.",
"EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x90",
"EventName": "NO_ALLOC_CYCLES.NOT_DELIVERED",
+ "PublicDescription": "This event counts the number of core cycles when no uops are allocated, the instruction queue is empty and the alloc pipe is stalled waiting for instructions to be fetched.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated, the IQ is empty, and no other condition is blocking allocation."
+ "UMask": "0x90"
},
{
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and a RATstall (caused by reservation station full) is asserted.",
"EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x7f",
- "EventName": "NO_ALLOC_CYCLES.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the total number of core cycles when no micro-ops are allocated for any reason."
- },
- {
- "EventCode": "0xCB",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "RS_FULL_STALL.MEC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of core cycles when allocation pipeline is stalled and is waiting for a free MEC reservation station entry."
- },
- {
- "EventCode": "0xCB",
- "Counter": "0,1",
- "UMask": "0x1f",
- "EventName": "RS_FULL_STALL.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full."
- },
- {
- "EventCode": "0xC0",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the total number of instructions retired"
- },
- {
- "PublicDescription": "This event counts cycles when the divider is busy. More specifically cycles when the divide unit is unable to accept a new divide uop because it is busy processing a previously dispatched uop. The cycles will be counted irrespective of whether or not another divide uop is waiting to enter the divide unit (from the RS). This event counts integer divides, x87 divides, divss, divsd, sqrtss, sqrtsd event and does not count vector divides.",
- "EventCode": "0xCD",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "CYCLES_DIV_BUSY.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles the number of core cycles when divider is busy. Does not imply a stall waiting for the divider."
- },
- {
- "PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps.",
- "Counter": "Fixed counter 1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Fixed Counter: Counts the number of instructions retired"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of unhalted core clock cycles"
- },
- {
- "EventCode": "0x3C",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of unhalted reference clock cycles"
- },
- {
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles"
- },
- {
- "Counter": "Fixed counter 3",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles"
- },
- {
- "EventCode": "0xE6",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "BACLEARS.ALL",
+ "EventName": "NO_ALLOC_CYCLES.RAT_STALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of times the front end resteers for any branch as a result of another branch handling mechanism in the front end."
+ "UMask": "0x20"
},
{
- "EventCode": "0xE6",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "BACLEARS.RETURN",
+ "BriefDescription": "Counts the number of core cycles when no micro-ops are allocated and the ROB is full",
+ "EventCode": "0xCA",
+ "EventName": "NO_ALLOC_CYCLES.ROB_FULL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of times the front end resteers for RET branches as a result of another branch handling mechanism in the front end."
+ "UMask": "0x1"
},
{
- "EventCode": "0xE6",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "BACLEARS.COND",
+ "BriefDescription": "Counts any retired load that was pushed into the recycle queue for any reason.",
+ "EventCode": "0x03",
+ "EventName": "RECYCLEQ.ANY_LD",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of times the front end resteers for conditional branches as a result of another branch handling mechanism in the front end."
+ "UMask": "0x40"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts any retired store that was pushed into the recycle queue for any reason.",
"EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "RECYCLEQ.LD_BLOCK_ST_FORWARD",
+ "EventName": "RECYCLEQ.ANY_ST",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address partially overlaps with a store",
- "Data_LA": "1"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address overlaps with a store whose data is not ready",
"EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "RECYCLEQ.LD_BLOCK_STD_NOTREADY",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of occurences a retired load gets blocked because its address overlaps with a store whose data is not ready"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of retired store that experienced a cache line boundary split(Precise Event). Note that each spilt should be counted only once.",
+ "BriefDescription": "Counts the number of occurrences a retired load gets blocked because its address partially overlaps with a store",
+ "Data_LA": "1",
"EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "RECYCLEQ.ST_SPLITS",
+ "EventName": "RECYCLEQ.LD_BLOCK_ST_FORWARD",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of occurences a retired store that is a cache line split. Each split should be counted only once."
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of occurrences a retired load that is a cache line split. Each split should be counted only once.",
+ "Data_LA": "1",
"EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "RECYCLEQ.LD_SPLITS",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of occurences a retired load that is a cache line split. Each split should be counted only once.",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Counts all the retired locked loads. It does not include stores because we would double count if we count stores",
"EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x10",
"EventName": "RECYCLEQ.LOCK",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts all the retired locked loads. It does not include stores because we would double count if we count stores"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts the store micro-ops retired that were pushed in the rehad queue because the store address buffer is full",
"EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x20",
"EventName": "RECYCLEQ.STA_FULL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the store micro-ops retired that were pushed in the rehad queue because the store address buffer is full"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Counts the number of occurrences a retired store that is a cache line split. Each split should be counted only once.",
"EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "RECYCLEQ.ANY_LD",
+ "EventName": "RECYCLEQ.ST_SPLITS",
+ "PublicDescription": "This event counts the number of retired store that experienced a cache line boundary split(Precise Event). Note that each spilt should be counted only once.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts any retired load that was pushed into the recycle queue for any reason."
+ "UMask": "0x4"
},
{
- "EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x80",
- "EventName": "RECYCLEQ.ANY_ST",
+ "BriefDescription": "Counts the total number of core cycles the Alloc pipeline is stalled when any one of the reservation stations is full.",
+ "EventCode": "0xCB",
+ "EventName": "RS_FULL_STALL.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts any retired store that was pushed into the recycle queue for any reason."
+ "UMask": "0x1f"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xf9",
- "EventName": "BR_MISP_RETIRED.CALL",
+ "BriefDescription": "Counts the number of core cycles when allocation pipeline is stalled and is waiting for a free MEC reservation station entry.",
+ "EventCode": "0xCB",
+ "EventName": "RS_FULL_STALL.MEC",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted near CALL branch instructions retired."
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xfd",
- "EventName": "BR_MISP_RETIRED.REL_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted near relative CALL branch instructions retired."
+ "BriefDescription": "Counts the number of micro-ops retired",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PublicDescription": "This event counts the number of micro-ops (uops) retired. The processor decodes complex macro instructions into a sequence of simpler uops. Most instructions are composed of one or two uops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xbf",
- "EventName": "BR_MISP_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted far branch instructions retired."
+ "BriefDescription": "Counts the number of micro-ops retired that are from the complex flows issued by the micro-sequencer (MS).",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.MS",
+ "PublicDescription": "This event counts the number of micro-ops retired that were supplied from MSROM.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json
new file mode 100644
index 000000000000..1b8dcfa5461c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-cache.json
@@ -0,0 +1,3365 @@
+[
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IPQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IPQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ or PRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -PRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -PRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 6",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 7",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 0-7",
+ "EventCode": "0x81",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired For Transgress 8",
+ "EventCode": "0x81",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 6",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 7",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 0-7",
+ "EventCode": "0x83",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy For Transgress 8",
+ "EventCode": "0x83",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 6",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 7",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 0-7",
+ "EventCode": "0x89",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired For Transgress 8",
+ "EventCode": "0x89",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 6",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 7",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 0-7",
+ "EventCode": "0x8B",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy For Transgress 8",
+ "EventCode": "0x8B",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "EventCode": "0xD1",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "EventCode": "0xD1",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "EventCode": "0xD5",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "EventCode": "0xD5",
+ "EventName": "UNC_H_AG0_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 6",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 7",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 0-7",
+ "EventCode": "0x85",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired For Transgress 8",
+ "EventCode": "0x85",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 6",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 7",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 0-7",
+ "EventCode": "0x87",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy For Transgress 8",
+ "EventCode": "0x87",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 6",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 7",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 0-7",
+ "EventCode": "0x8D",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired For Transgress 8",
+ "EventCode": "0x8D",
+ "EventName": "UNC_H_AG1_BL_CRD_ACQUIRED_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 6",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 7",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 0-7",
+ "EventCode": "0x8F",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy For Transgress 8",
+ "EventCode": "0x8F",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "EventCode": "0xD3",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "EventCode": "0xD3",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_AD_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 6",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR6",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 7",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL.TGR7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 0-7",
+ "EventCode": "0xD7",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.ANY_OF_TGR0_THRU_TGR7",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Transgress Credits For Transgress 8",
+ "EventCode": "0xD7",
+ "EventName": "UNC_H_AG1_STALL_NO_CRD_EGRESS_HORZ_BL_EXT.TGR8",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized that Match NID",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Read transactions",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized that Does Not Match NID",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "EventCode": "0x37",
+ "EventName": "UNC_H_CACHE_LINES_VICTIMIZED.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "EventCode": "0xC0",
+ "EventName": "UNC_H_CLOCK",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_EGRESS_HORZ_ADS_USED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_EGRESS_HORZ_ADS_USED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_EGRESS_HORZ_ADS_USED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Bypass. AD ring",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_EGRESS_HORZ_BYPASS.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Bypass. AK ring",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_EGRESS_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Bypass. BL ring",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_EGRESS_HORZ_BYPASS.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Bypass. IV ring",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_EGRESS_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full AD",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full AK",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full BL",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full IV",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty AD",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty AK",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty BL",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty IV",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_EGRESS_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts AD",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_EGRESS_HORZ_INSERTS.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts AK",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_EGRESS_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts BL",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_EGRESS_HORZ_INSERTS.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts IV",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_EGRESS_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_EGRESS_HORZ_NACK.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_EGRESS_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_EGRESS_HORZ_NACK.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_EGRESS_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy AD",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy AK",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy BL",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy IV",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_EGRESS_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_EGRESS_HORZ_STARVED.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_EGRESS_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_EGRESS_HORZ_STARVED.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_EGRESS_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "EventCode": "0xAE",
+ "EventName": "UNC_H_EGRESS_ORDERING.IV_SNP_GO_DN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "EventCode": "0xAE",
+ "EventName": "UNC_H_EGRESS_ORDERING.IV_SNP_GO_UP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_EGRESS_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. AD ring agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. AD ring agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. AK ring agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. AK ring agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. BL ring agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. BL ring agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Bypass. IV ring agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_EGRESS_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full IV - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_FULL.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AD - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AD - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AK - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty AK - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty BL - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty BL - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty IV - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_EGRESS_VERT_CYCLES_NE.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations AD - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations AD - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations AK - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations AK - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations BL - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations BL - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations IV - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_EGRESS_VERT_INSERTS.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs Onto AK Ring",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs Onto BL Ring",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_EGRESS_VERT_NACK.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy IV - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_EGRESS_VERT_OCCUPANCY.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation Onto AK Ring",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation Onto BL Ring",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_EGRESS_VERT_STARVED.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts cycles source throttling is asserted - horizontal",
+ "EventCode": "0xA5",
+ "EventName": "UNC_H_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts cycles source throttling is asserted - vertical",
+ "EventCode": "0xA5",
+ "EventName": "UNC_H_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Left and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Left and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Right and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop - Right and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Left and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Left and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Right and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop - Right and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Left and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Left and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Right and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop - Right and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop - Left",
+ "EventCode": "0xAD",
+ "EventName": "UNC_H_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop - Right",
+ "EventCode": "0xAD",
+ "EventName": "UNC_H_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Allocations. Counts number of allocations per cycle into the specified Ingress queue. - PRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_INGRESS_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles with the IPQ in Internal Starvation.",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_INGRESS_INT_STARVED.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles with the IRQ in Internal Starvation.",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_INGRESS_INT_STARVED.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles with the ISMQ in Internal Starvation.",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_INGRESS_INT_STARVED.ISMQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress internal starvation cycles. Counts cycles in internal starvation. This occurs when one or more of the entries in the ingress queue are being starved out by other entries in the queue.",
+ "EventCode": "0x14",
+ "EventName": "UNC_H_INGRESS_INT_STARVED.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - IRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - PRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Occupancy. Counts number of entries in the specified Ingress queue in each cycle. - PRQ Rejected",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_INGRESS_OCCUPANCY.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.ANY_REJECT_IPQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_INGRESS_RETRY_IPQ1_REJECT.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_INGRESS_RETRY_IRQ1_REJECT.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_INGRESS_RETRY_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Queue Retries",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_INGRESS_RETRY_OTHER1_RETRY.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Request Queue Rejects",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_INGRESS_RETRY_PRQ1_REJECT.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "REQUESTQ'' includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_INGRESS_RETRY_REQ_Q1_RETRY.SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. CV0 Prefetch Miss",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. CV0 Prefetch Victim",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. RFO HitS",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous events in the Cbo. Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type - Acknowledgements to core",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type - Data Responses to core.",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Horizontal ring that were bounced, by ring type - Snoops of processor's cache.",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type.",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type - Acknowledgements to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type - Data Responses to core.",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of incoming messages from the Vertical ring that were bounced, by ring type - Snoops of processor's cache.",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal ring sink starvation count - AD ring",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal ring sink starvation count - AK ring",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal ring sink starvation count - BL ring",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal ring sink starvation count - IV ring",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical ring sink starvation count - AD ring",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical ring sink starvation count - AK ring",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical ring sink starvation count - BL ring",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical ring sink starvation count - IV ring",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts cycles in throttle mode.",
+ "EventCode": "0xA4",
+ "EventName": "UNC_H_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_SF_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Read transactions",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_SF_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_SF_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups. Counts the number of times the LLC was accessed. Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "EventCode": "0x34",
+ "EventName": "UNC_H_SF_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_TG_INGRESS_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass. Number of packets bypassing the CMS Ingress .",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_TG_INGRESS_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation. Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_TG_INGRESS_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations. Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_TG_INGRESS_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy. Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_TG_INGRESS_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -SF/LLC Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x32",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -Hit (Not a Miss)",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMask": "0x1f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IPQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x38",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -IRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -Miss",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x2f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent -PRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_H_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x32",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -Hit (Not a Miss)",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMask": "0x1f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IPQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x38",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IPQ hit",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IPQ miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IRQ or PRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IRQ or PRQ hit",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -IRQ or PRQ miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.IRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMask": "0x2f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -PRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -PRQ hit",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent -PRQ miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_H_TOR_OCCUPANCY.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Uncore Clocks",
+ "EventName": "UNC_H_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Down and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Down and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Up and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop - Up and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Down and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Down and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Up and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop - Up and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Down and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Down and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Up and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop - Up and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop - Down",
+ "EventCode": "0xAC",
+ "EventName": "UNC_H_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop - Up",
+ "EventCode": "0xAC",
+ "EventName": "UNC_H_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-io.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-io.json
new file mode 100644
index 000000000000..898f7e425cd4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-io.json
@@ -0,0 +1,194 @@
+[
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AD_0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AD_1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AK_0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. AK_1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. BL_0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full. Counts the number of cycles when the M2PCIe Egress is full. BL_1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_FULL.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AD_0",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AD_1",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AK_0",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. AK_1",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. BL_0",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty. Counts the number of cycles when the M2PCIe Egress is not empty. BL_1",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_EGRESS_CYCLES_NE.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AD_0",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AD_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AD_1",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AD_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_0",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AK_0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_1",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AK_1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_CRD_0",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AK_CRD_0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. AK_CRD_1",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.AK_CRD_1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. BL_0",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.BL_0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress. Counts the number of number of messages inserted into the the M2PCIe Egress queue. BL_1",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_EGRESS_INSERTS.BL_1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.ALL",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_INGRESS_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.CBO_IDI",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_INGRESS_CYCLES_NE.CBO_IDI",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.CBO_NCB",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_INGRESS_CYCLES_NE.CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress Queue Cycles Not Empty. Counts the number of cycles when the M2PCIe Ingress is not empty.CBO_NCS",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_INGRESS_CYCLES_NE.CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json
index e3bcd86c4f56..fb752974179b 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/uncore-memory.json
@@ -1,42 +1,106 @@
[
{
- "BriefDescription": "ddr bandwidth read (CPU traffic only) (MB/sec). ",
- "Counter": "0,1,2,3",
- "EventCode": "0x03",
- "EventName": "UNC_M_CAS_COUNT.RD",
+ "BriefDescription": "Counts the number of read requests and streaming stores that hit in MCDRAM cache and the data in MCDRAM is clean with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.HIT_CLEAN",
"PerPkg": "1",
- "ScaleUnit": "6.4e-05MiB",
- "UMask": "0x01",
- "Unit": "imc"
+ "UMask": "0x1",
+ "Unit": "EDC_UCLK"
},
{
- "BriefDescription": "ddr bandwidth write (CPU traffic only) (MB/sec). ",
- "Counter": "0,1,2,3",
- "EventCode": "0x03",
- "EventName": "UNC_M_CAS_COUNT.WR",
+ "BriefDescription": "Counts the number of read requests and streaming stores that hit in MCDRAM cache and the data in MCDRAM is dirty with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.HIT_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "EDC_UCLK"
+ },
+ {
+ "BriefDescription": "Counts the number of read requests and streaming stores that miss in MCDRAM cache and the data evicted from the MCDRAM is clean with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.MISS_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "EDC_UCLK"
+ },
+ {
+ "BriefDescription": "Counts the number of read requests and streaming stores that miss in MCDRAM cache and the data evicted from the MCDRAM is dirty with respect to DDR. This event is only valid in cache and hybrid memory mode.",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.MISS_DIRTY",
"PerPkg": "1",
- "ScaleUnit": "6.4e-05MiB",
- "UMask": "0x02",
- "Unit": "imc"
+ "UMask": "0x8",
+ "Unit": "EDC_UCLK"
},
{
- "BriefDescription": "mcdram bandwidth read (CPU traffic only) (MB/sec). ",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Number of EDC Hits or Misses. Miss I",
+ "EventCode": "0x02",
+ "EventName": "UNC_E_EDC_ACCESS.MISS_INVALID",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "EDC_UCLK"
+ },
+ {
+ "BriefDescription": "ECLK count",
+ "EventName": "UNC_E_E_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "EDC_ECLK"
+ },
+ {
+ "BriefDescription": "Counts the number of read requests received by the MCDRAM controller. This event is valid in all three memory modes: flat, cache and hybrid. In cache and hybrid memory mode, this event counts all read requests as well as streaming stores that hit or miss in the MCDRAM cache.",
"EventCode": "0x01",
"EventName": "UNC_E_RPQ_INSERTS",
"PerPkg": "1",
- "ScaleUnit": "6.4e-05MiB",
- "UMask": "0x01",
- "Unit": "edc_eclk"
+ "UMask": "0x1",
+ "Unit": "EDC_ECLK"
+ },
+ {
+ "BriefDescription": "UCLK count",
+ "EventName": "UNC_E_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "EDC_UCLK"
},
{
- "BriefDescription": "mcdram bandwidth write (CPU traffic only) (MB/sec). ",
- "Counter": "0,1,2,3",
+ "BriefDescription": "Counts the number of write requests received by the MCDRAM controller. This event is valid in all three memory modes: flat, cache and hybrid. In cache and hybrid memory mode, this event counts all streaming stores, writebacks and, read requests that miss in MCDRAM cache.",
"EventCode": "0x02",
"EventName": "UNC_E_WPQ_INSERTS",
"PerPkg": "1",
- "ScaleUnit": "6.4e-05MiB",
- "UMask": "0x01",
- "Unit": "edc_eclk"
+ "UMask": "0x1",
+ "Unit": "EDC_ECLK"
+ },
+ {
+ "BriefDescription": "CAS All",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC_DCLK"
+ },
+ {
+ "BriefDescription": "CAS Reads",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC_DCLK"
+ },
+ {
+ "BriefDescription": "CAS Writes",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC_DCLK"
+ },
+ {
+ "BriefDescription": "DCLK count",
+ "EventName": "UNC_M_D_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC_DCLK"
+ },
+ {
+ "BriefDescription": "UCLK count",
+ "EventName": "UNC_M_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC_UCLK"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
index 9e493977771f..99a8fa8f19cc 100644
--- a/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/knightslanding/virtual-memory.json
@@ -1,65 +1,58 @@
[
{
- "PEBS": "1",
+ "BriefDescription": "Counts the number of load micro-ops retired that cause a DTLB miss",
+ "Data_LA": "1",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "PEBS": "1",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load micro-ops retired that cause a DTLB miss",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Counts the total number of core cycles for all the page walks. The cycles for page walks started in speculative path will also be included.",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.D_SIDE_WALKS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total D-side page walks that are completed or started. The page walks started in the speculative path will also be counted",
- "EdgeDetect": "1"
+ "EventName": "PAGE_WALKS.CYCLES",
+ "PublicDescription": "This event counts every cycle when a data (D) page walk or instruction (I) page walk is in progress.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Counts the total number of core cycles for all the D-side page walks. The cycles for page walks started in speculative path will also be included.",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "PAGE_WALKS.D_SIDE_CYCLES",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the total number of core cycles for all the D-side page walks. The cycles for page walks started in speculative path will also be included."
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts the total D-side page walks that are completed or started. The page walks started in the speculative path will also be counted",
+ "EdgeDetect": "1",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "PAGE_WALKS.I_SIDE_WALKS",
+ "EventName": "PAGE_WALKS.D_SIDE_WALKS",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the total I-side page walks that are completed.",
- "EdgeDetect": "1"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress.",
+ "BriefDescription": "Counts the total number of core cycles for all the I-side page walks. The cycles for page walks started in speculative path will also be included.",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "PublicDescription": "This event counts every cycle when an I-side (walks due to an instruction fetch) page walk is in progress.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the total number of core cycles for all the I-side page walks. The cycles for page walks started in speculative path will also be included."
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts the total I-side page walks that are completed.",
+ "EdgeDetect": "1",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.WALKS",
+ "EventName": "PAGE_WALKS.I_SIDE_WALKS",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the total page walks that are completed (I-side and D-side)",
- "EdgeDetect": "1"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts every cycle when a data (D) page walk or instruction (I) page walk is in progress.",
+ "BriefDescription": "Counts the total page walks that are completed (I-side and D-side)",
+ "EdgeDetect": "1",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the total number of core cycles for all the page walks. The cycles for page walks started in speculative path will also be included."
+ "EventName": "PAGE_WALKS.WALKS",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv
index 25b06cf98747..66c37a3cbf43 100644
--- a/tools/perf/pmu-events/arch/x86/mapfile.csv
+++ b/tools/perf/pmu-events/arch/x86/mapfile.csv
@@ -1,40 +1,39 @@
Family-model,Version,Filename,EventType
-GenuineIntel-6-56,v5,broadwellde,core
-GenuineIntel-6-3D,v17,broadwell,core
-GenuineIntel-6-47,v17,broadwell,core
-GenuineIntel-6-4F,v10,broadwellx,core
-GenuineIntel-6-1C,v4,bonnell,core
-GenuineIntel-6-26,v4,bonnell,core
-GenuineIntel-6-27,v4,bonnell,core
-GenuineIntel-6-36,v4,bonnell,core
-GenuineIntel-6-35,v4,bonnell,core
-GenuineIntel-6-5C,v8,goldmont,core
-GenuineIntel-6-7A,v1,goldmontplus,core
-GenuineIntel-6-3C,v24,haswell,core
-GenuineIntel-6-45,v24,haswell,core
-GenuineIntel-6-46,v24,haswell,core
-GenuineIntel-6-3F,v17,haswellx,core
-GenuineIntel-6-3A,v18,ivybridge,core
-GenuineIntel-6-3E,v19,ivytown,core
-GenuineIntel-6-2D,v20,jaketown,core
-GenuineIntel-6-57,v9,knightslanding,core
-GenuineIntel-6-85,v9,knightslanding,core
-GenuineIntel-6-1E,v2,nehalemep,core
-GenuineIntel-6-1F,v2,nehalemep,core
-GenuineIntel-6-1A,v2,nehalemep,core
-GenuineIntel-6-2E,v2,nehalemex,core
-GenuineIntel-6-[4589]E,v24,skylake,core
-GenuineIntel-6-37,v13,silvermont,core
-GenuineIntel-6-4D,v13,silvermont,core
-GenuineIntel-6-4C,v13,silvermont,core
-GenuineIntel-6-2A,v15,sandybridge,core
-GenuineIntel-6-2C,v2,westmereep-dp,core
-GenuineIntel-6-25,v2,westmereep-sp,core
-GenuineIntel-6-2F,v2,westmereex,core
-GenuineIntel-6-55-[01234],v1,skylakex,core
-GenuineIntel-6-55-[56789ABCDEF],v1,cascadelakex,core
-GenuineIntel-6-7D,v1,icelake,core
-GenuineIntel-6-7E,v1,icelake,core
-GenuineIntel-6-86,v1,tremontx,core
+GenuineIntel-6-(97|9A|B7|BA|BF),v1.20,alderlake,core
+GenuineIntel-6-BE,v1.20,alderlaken,core
+GenuineIntel-6-(1C|26|27|35|36),v4,bonnell,core
+GenuineIntel-6-(3D|47),v27,broadwell,core
+GenuineIntel-6-56,v9,broadwellde,core
+GenuineIntel-6-4F,v20,broadwellx,core
+GenuineIntel-6-55-[56789ABCDEF],v1.17,cascadelakex,core
+GenuineIntel-6-9[6C],v1.03,elkhartlake,core
+GenuineIntel-6-5[CF],v13,goldmont,core
+GenuineIntel-6-7A,v1.01,goldmontplus,core
+GenuineIntel-6-B6,v1.00,grandridge,core
+GenuineIntel-6-A[DE],v1.01,graniterapids,core
+GenuineIntel-6-(3C|45|46),v33,haswell,core
+GenuineIntel-6-3F,v27,haswellx,core
+GenuineIntel-6-(7D|7E|A7),v1.17,icelake,core
+GenuineIntel-6-6[AC],v1.20,icelakex,core
+GenuineIntel-6-3A,v24,ivybridge,core
+GenuineIntel-6-3E,v23,ivytown,core
+GenuineIntel-6-2D,v23,jaketown,core
+GenuineIntel-6-(57|85),v10,knightslanding,core
+GenuineIntel-6-A[AC],v1.01,meteorlake,core
+GenuineIntel-6-1[AEF],v3,nehalemep,core
+GenuineIntel-6-2E,v3,nehalemex,core
+GenuineIntel-6-2A,v19,sandybridge,core
+GenuineIntel-6-(8F|CF),v1.12,sapphirerapids,core
+GenuineIntel-6-AF,v1.00,sierraforest,core
+GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core
+GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v55,skylake,core
+GenuineIntel-6-55-[01234],v1.29,skylakex,core
+GenuineIntel-6-86,v1.20,snowridgex,core
+GenuineIntel-6-8[CD],v1.10,tigerlake,core
+GenuineIntel-6-2C,v4,westmereep-dp,core
+GenuineIntel-6-25,v3,westmereep-sp,core
+GenuineIntel-6-2F,v3,westmereex,core
AuthenticAMD-23-([12][0-9A-F]|[0-9A-F]),v2,amdzen1,core
AuthenticAMD-23-[[:xdigit:]]+,v1,amdzen2,core
+AuthenticAMD-25-([245][[:xdigit:]]|[[:xdigit:]]),v1,amdzen3,core
+AuthenticAMD-25-[[:xdigit:]]+,v1,amdzen4,core
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/cache.json b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
new file mode 100644
index 000000000000..bf24d3f25a3d
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/cache.json
@@ -0,0 +1,204 @@
+[
+ {
+ "BriefDescription": "L2 code requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Demand Data Read access L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4f",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of store ops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6",
+ "Unit": "cpu_atom"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
new file mode 100644
index 000000000000..66e5609699ea
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/frontend.json
@@ -0,0 +1,27 @@
+[
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_BUBBLES.CORE",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that were no operation was delivered to the back-end pipeline due to instruction fetch limitations when the back-end could have accepted more operations. Common examples include instruction cache misses or x86 instruction decode limitations.\nThe count may be distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Frontend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
new file mode 100644
index 000000000000..20c2efe70eeb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/memory.json
@@ -0,0 +1,157 @@
+[
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
+ "PEBS": "2",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/other.json b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
new file mode 100644
index 000000000000..14e648bf11c5
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/other.json
@@ -0,0 +1,42 @@
+[
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts demand read for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
new file mode 100644
index 000000000000..639789478073
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/pipeline.json
@@ -0,0 +1,214 @@
+[
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "SampleAfterValue": "200003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "SampleAfterValue": "200003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that were not consumed by the back-end pipeline due to lack of back-end resources, as a result of memory subsystem delays, execution units limitations, or other conditions.\nThe count is distributed among unhalted logical processors (hyper-threads) who share the same physical core, in processors that support Intel Hyper-Threading Technology. Software can use this event as the numerator for the Backend Bound metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL",
+ "EventCode": "0x72",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "This event counts a subset of the Topdown Slots event that are utilized by operations that eventually get retired (committed) by the processor pipeline. Usually, this event positively correlates with higher performance for example, as measured by the instructions-per-cycle metric.\nSoftware can use this event as the numerator for the Retiring metric (or top-level category) of the Top-down Microarchitecture Analysis method.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
new file mode 100644
index 000000000000..556e4292fcc8
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/meteorlake/virtual-memory.json
@@ -0,0 +1,38 @@
+[
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe",
+ "Unit": "cpu_atom"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe",
+ "Unit": "cpu_core"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
index a11029efda2f..1a132fcda964 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/cache.json
@@ -1,3229 +1,2705 @@
[
{
+ "BriefDescription": "Cycles L1D locked",
"EventCode": "0x63",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "CACHE_LOCK_CYCLES.L1D",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles L1D locked"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles L1D and L2 locked",
"EventCode": "0x63",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "CACHE_LOCK_CYCLES.L1D_L2",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles L1D and L2 locked"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1D cache lines replaced in M state",
"EventCode": "0x51",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D.M_EVICT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D cache lines replaced in M state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1D cache lines allocated in the M state",
"EventCode": "0x51",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D.M_REPL",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D cache lines allocated in the M state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1D snoop eviction of cache lines in M state",
"EventCode": "0x51",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "L1D.M_SNOOP_EVICT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D snoop eviction of cache lines in M state"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 data cache lines allocated",
"EventCode": "0x51",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D.REPL",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache lines allocated"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All references to the L1 data cache",
"EventCode": "0x43",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_ALL_REF.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "All references to the L1 data cache"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1 data cacheable reads and writes",
"EventCode": "0x43",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_ALL_REF.CACHEABLE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cacheable reads and writes"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1 data cache read in E state",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D_CACHE_LD.E_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 data cache read in I state (misses)",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_CACHE_LD.I_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in I state (misses)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1 data cache reads",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "L1D_CACHE_LD.M_STATE",
+ "EventName": "L1D_CACHE_LD.MESI",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in M state"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L1 data cache read in M state",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0xf",
- "EventName": "L1D_CACHE_LD.MESI",
+ "EventName": "L1D_CACHE_LD.M_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache reads"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 data cache read in S state",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_CACHE_LD.S_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1 data cache load locks in E state",
"EventCode": "0x42",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D_CACHE_LOCK.E_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load locks in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 data cache load lock hits",
"EventCode": "0x42",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_CACHE_LOCK.HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load lock hits"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1 data cache load locks in M state",
"EventCode": "0x42",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "L1D_CACHE_LOCK.M_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load locks in M state"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 data cache load locks in S state",
"EventCode": "0x42",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_CACHE_LOCK.S_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load locks in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1D load lock accepted in fill buffer",
"EventCode": "0x53",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_CACHE_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D load lock accepted in fill buffer"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1D prefetch load lock accepted in fill buffer",
"EventCode": "0x52",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D prefetch load lock accepted in fill buffer"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1 data cache stores in E state",
"EventCode": "0x41",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D_CACHE_ST.E_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache stores in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 data cache stores in M state",
"EventCode": "0x41",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "L1D_CACHE_ST.M_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache stores in M state"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 data cache stores in S state",
"EventCode": "0x41",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_CACHE_ST.S_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache stores in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1D hardware prefetch misses",
"EventCode": "0x4E",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_PREFETCH.MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D hardware prefetch misses"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1D hardware prefetch requests",
"EventCode": "0x4E",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_PREFETCH.REQUESTS",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D hardware prefetch requests"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1D hardware prefetch requests triggered",
"EventCode": "0x4E",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D_PREFETCH.TRIGGERS",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D hardware prefetch requests triggered"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 writebacks to L2 in E state",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L1D_WB_L2.E_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 writebacks to L2 in I state (misses)",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L1D_WB_L2.I_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in I state (misses)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All L1 writebacks to L2",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L1D_WB_L2.M_STATE",
+ "EventName": "L1D_WB_L2.MESI",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in M state"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L1 writebacks to L2 in M state",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L1D_WB_L2.MESI",
+ "EventName": "L1D_WB_L2.M_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "All L1 writebacks to L2"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 writebacks to L2 in S state",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L1D_WB_L2.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "All L2 data requests",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
"EventName": "L2_DATA_RQSTS.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 data requests"
+ "UMask": "0xff"
},
{
+ "BriefDescription": "L2 data demand loads in E state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L2 data demand loads in I state (misses)",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in I state (misses)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L2 data demand requests",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
+ "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in M state"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L2 data demand loads in M state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
+ "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand requests"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 data demand loads in S state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 data prefetches in E state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in E state"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "L2 data prefetches in the I state (misses)",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in the I state (misses)"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "All L2 data prefetches",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in M state"
+ "UMask": "0xf0"
},
{
+ "BriefDescription": "L2 data prefetches in M state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 data prefetches"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "L2 data prefetches in the S state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in the S state"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "L2 lines alloacated",
"EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines alloacated"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "L2 lines allocated in the E state",
"EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L2_LINES_IN.E_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines allocated in the E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L2 lines allocated in the S state",
"EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_LINES_IN.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines allocated in the S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 lines evicted",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
"EventName": "L2_LINES_OUT.ANY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines evicted"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L2 lines evicted by a demand request",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines evicted by a demand request"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L2 modified lines evicted by a demand request",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 modified lines evicted by a demand request"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 lines evicted by a prefetch request",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines evicted by a prefetch request"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L2 modified lines evicted by a prefetch request",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 modified lines evicted by a prefetch request"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 instruction fetches",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_RQSTS.IFETCH_HIT",
+ "EventName": "L2_RQSTS.IFETCHES",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetch hits"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "L2 instruction fetch hits",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_RQSTS.IFETCH_MISS",
+ "EventName": "L2_RQSTS.IFETCH_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetch misses"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "L2 instruction fetch misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.IFETCHES",
+ "EventName": "L2_RQSTS.IFETCH_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetches"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "L2 load hits",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_RQSTS.LD_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 load hits"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L2 load misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_RQSTS.LD_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 load misses"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"EventName": "L2_RQSTS.LOADS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 requests"
+ "UMask": "0x3"
},
{
+ "BriefDescription": "All L2 misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xaa",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 misses"
+ "UMask": "0xaa"
},
{
+ "BriefDescription": "All L2 prefetches",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_RQSTS.PREFETCH_HIT",
+ "EventName": "L2_RQSTS.PREFETCHES",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 prefetch hits"
+ "UMask": "0xc0"
},
{
+ "BriefDescription": "L2 prefetch hits",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_RQSTS.PREFETCH_MISS",
+ "EventName": "L2_RQSTS.PREFETCH_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 prefetch misses"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "L2 prefetch misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.PREFETCHES",
+ "EventName": "L2_RQSTS.PREFETCH_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 prefetches"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "All L2 requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 requests"
+ "UMask": "0xff"
},
{
+ "BriefDescription": "L2 RFO requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.RFOS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO hits"
+ "UMask": "0xc"
},
{
+ "BriefDescription": "L2 RFO hits",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO misses"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L2 RFO misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.RFOS",
+ "EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO requests"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "All L2 transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "L2_TRANSACTIONS.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 transactions"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "L2 fill transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_TRANSACTIONS.FILL",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 fill transactions"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "L2 instruction fetch transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L2_TRANSACTIONS.IFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetch transactions"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1D writeback to L2 transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_TRANSACTIONS.L1D_WB",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D writeback to L2 transactions"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "L2 Load transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_TRANSACTIONS.LOAD",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 Load transactions"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L2 prefetch transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "L2_TRANSACTIONS.PREFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 prefetch transactions"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 RFO transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_TRANSACTIONS.RFO",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO transactions"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 writeback to LLC transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "L2_TRANSACTIONS.WB",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 writeback to LLC transactions"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "L2 demand lock RFOs in E state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "L2_WRITE.LOCK.E_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in E state"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "All demand L2 lock RFOs that hit the cache",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xe0",
"EventName": "L2_WRITE.LOCK.HIT",
"SampleAfterValue": "100000",
- "BriefDescription": "All demand L2 lock RFOs that hit the cache"
+ "UMask": "0xe0"
},
{
+ "BriefDescription": "L2 demand lock RFOs in I state (misses)",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_WRITE.LOCK.I_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in I state (misses)"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "All demand L2 lock RFOs",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_WRITE.LOCK.M_STATE",
+ "EventName": "L2_WRITE.LOCK.MESI",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in M state"
+ "UMask": "0xf0"
},
{
+ "BriefDescription": "L2 demand lock RFOs in M state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "L2_WRITE.LOCK.MESI",
+ "EventName": "L2_WRITE.LOCK.M_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "All demand L2 lock RFOs"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "L2 demand lock RFOs in S state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_WRITE.LOCK.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in S state"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "All L2 demand store RFOs that hit the cache",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
"EventName": "L2_WRITE.RFO.HIT",
"SampleAfterValue": "100000",
- "BriefDescription": "All L2 demand store RFOs that hit the cache"
+ "UMask": "0xe"
},
{
+ "BriefDescription": "L2 demand store RFOs in I state (misses)",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_WRITE.RFO.I_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand store RFOs in I state (misses)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All L2 demand store RFOs",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_WRITE.RFO.M_STATE",
+ "EventName": "L2_WRITE.RFO.MESI",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand store RFOs in M state"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L2 demand store RFOs in M state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_WRITE.RFO.MESI",
+ "EventName": "L2_WRITE.RFO.M_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "All L2 demand store RFOs"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 demand store RFOs in S state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_WRITE.RFO.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand store RFOs in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Longest latency cache miss",
"EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100000",
- "BriefDescription": "Longest latency cache miss"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Longest latency cache reference",
"EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "200000",
- "BriefDescription": "Longest latency cache reference"
- },
- {
- "PEBS": "1",
- "EventCode": "0xB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_INST_RETIRED.LOADS",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired which contains a load (Precise Event)"
+ "UMask": "0x4f"
},
{
- "PEBS": "1",
+ "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)",
"EventCode": "0xB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_INST_RETIRED.STORES",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired which contains a store (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
- "SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
- "SampleAfterValue": "10000",
- "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
- "SampleAfterValue": "40000",
- "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
- "SampleAfterValue": "40000",
- "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xF",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MEM_UNCORE_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "10000",
- "BriefDescription": "Load instructions retired with a data source of local DRAM or locally homed remote hitm (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xF",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_UNCORE_RETIRED.OTHER_CORE_L2_HITM",
- "SampleAfterValue": "40000",
- "BriefDescription": "Load instructions retired that HIT modified data in sibling core (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xF",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT",
- "SampleAfterValue": "20000",
- "BriefDescription": "Load instructions retired remote cache HIT data source (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xF",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_UNCORE_RETIRED.REMOTE_DRAM",
- "SampleAfterValue": "10000",
- "BriefDescription": "Load instructions retired remote DRAM and remote home-remote cache HITM (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xF",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "MEM_UNCORE_RETIRED.UNCACHEABLE",
- "SampleAfterValue": "4000",
- "BriefDescription": "Load instructions retired IO (Precise Event)"
- },
- {
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
- "SampleAfterValue": "100000",
- "BriefDescription": "Offcore L1 data cache writebacks"
- },
- {
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_SQ_FULL",
- "SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests blocked due to Super Queue full"
- },
- {
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Super Queue lock splits across a cache line"
- },
- {
- "EventCode": "0x6",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "STORE_BLOCKS.AT_RET",
- "SampleAfterValue": "200000",
- "BriefDescription": "Loads delayed with at-Retirement block code"
- },
- {
- "EventCode": "0x6",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "STORE_BLOCKS.L1D_BLOCK",
- "SampleAfterValue": "200000",
- "BriefDescription": "Cacheable loads delayed with L1D block code"
- },
- {
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x0",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
"MSRIndex": "0x3F6",
+ "PEBS": "2",
"SampleAfterValue": "2000000",
- "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x400",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
"SampleAfterValue": "100",
- "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x80",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
"SampleAfterValue": "1000",
- "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x10",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
"SampleAfterValue": "10000",
- "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x4000",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x4000",
+ "PEBS": "2",
"SampleAfterValue": "5",
- "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x800",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
"SampleAfterValue": "50",
- "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x100",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
"SampleAfterValue": "500",
- "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x20",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
"SampleAfterValue": "5000",
- "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x8000",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8000",
+ "PEBS": "2",
"SampleAfterValue": "3",
- "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x4",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
"SampleAfterValue": "50000",
- "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x1000",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x1000",
+ "PEBS": "2",
"SampleAfterValue": "20",
- "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x200",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
"SampleAfterValue": "200",
- "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x40",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
"SampleAfterValue": "2000",
- "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x8",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
"SampleAfterValue": "20000",
- "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x2000",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x2000",
+ "PEBS": "2",
"SampleAfterValue": "10",
- "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instructions retired which contains a load (Precise Event)",
+ "EventCode": "0xB",
+ "EventName": "MEM_INST_RETIRED.LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instructions retired which contains a store (Precise Event)",
+ "EventCode": "0xB",
+ "EventName": "MEM_INST_RETIRED.STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
+ "PEBS": "1",
+ "SampleAfterValue": "200000",
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "10000",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "40000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "40000",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Load instructions retired with a data source of local DRAM or locally homed remote hitm (Precise Event)",
+ "EventCode": "0xF",
+ "EventName": "MEM_UNCORE_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "SampleAfterValue": "10000",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Load instructions retired that HIT modified data in sibling core (Precise Event)",
+ "EventCode": "0xF",
+ "EventName": "MEM_UNCORE_RETIRED.OTHER_CORE_L2_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "40000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Load instructions retired remote cache HIT data source (Precise Event)",
+ "EventCode": "0xF",
+ "EventName": "MEM_UNCORE_RETIRED.REMOTE_CACHE_LOCAL_HOME_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "20000",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Load instructions retired remote DRAM and remote home-remote cache HITM (Precise Event)",
+ "EventCode": "0xF",
+ "EventName": "MEM_UNCORE_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
+ "SampleAfterValue": "10000",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Load instructions retired IO (Precise Event)",
+ "EventCode": "0xF",
+ "EventName": "MEM_UNCORE_RETIRED.UNCACHEABLE",
+ "PEBS": "1",
+ "SampleAfterValue": "4000",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Offcore L1 data cache writebacks",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
+ "SampleAfterValue": "100000",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Offcore requests blocked due to Super Queue full",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_SQ_FULL",
+ "SampleAfterValue": "100000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F11",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F11",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore data reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF11",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF11",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore data reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x111",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x111",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x211",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x211",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x411",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x411",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x711",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x711",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4711",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4711",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1811",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1811",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3811",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3811",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x811",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x811",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F44",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F44",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore code reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF44",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF44",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore code reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x144",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x144",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x244",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x244",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x444",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x444",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x744",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x744",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4744",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4744",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1844",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1844",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3844",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3844",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x844",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x844",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7FFF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7FFF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore requests",
"EventCode": "0xB7",
- "MSRValue": "0xFFFF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFFFF",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x80FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x80FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x1FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x2FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x4FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x7FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x47FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x47FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x18FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x18FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x38FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x38FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x10FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x10FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x8FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F22",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F22",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore RFO requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF22",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF22",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore RFO requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x122",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x122",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x222",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x222",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x422",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x422",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x722",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x722",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4722",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4722",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1822",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1822",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3822",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3822",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x822",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x822",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F08",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F08",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore writebacks",
"EventCode": "0xB7",
- "MSRValue": "0xFF08",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF08",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore writebacks",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
"EventCode": "0xB7",
- "MSRValue": "0x8008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x108",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x108",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x408",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x408",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x708",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x708",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4708",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4708",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1808",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1808",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3808",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3808",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x808",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x808",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F77",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F77",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore code or data read requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF77",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF77",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore code or data read requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
"EventCode": "0xB7",
- "MSRValue": "0x8077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x177",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x177",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x277",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x277",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x477",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x477",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x777",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x777",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4777",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4777",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1877",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1877",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3877",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3877",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x877",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x877",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = any cache_dram",
"EventCode": "0xB7",
- "MSRValue": "0x7F33",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F33",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any cache_dram",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = any location",
"EventCode": "0xB7",
- "MSRValue": "0xFF33",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF33",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any location",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x133",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x133",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x233",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x233",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x433",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x433",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = local cache",
"EventCode": "0xB7",
- "MSRValue": "0x733",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x733",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = local cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = local cache or dram",
"EventCode": "0xB7",
- "MSRValue": "0x4733",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4733",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = local cache or dram",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1833",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1833",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = remote cache or dram",
"EventCode": "0xB7",
- "MSRValue": "0x3833",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3833",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = remote cache or dram",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache ",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x833",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x833",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F03",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F03",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore demand data requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF03",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF03",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand data requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
"EventCode": "0xB7",
- "MSRValue": "0x8003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x103",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x103",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x203",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x203",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x403",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x403",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x703",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x703",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4703",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4703",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1803",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1803",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3803",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3803",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x803",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x803",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F01",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F01",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore demand data reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF01",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF01",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand data reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x101",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x101",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x201",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x201",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x401",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x401",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x701",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x701",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4701",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4701",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1801",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1801",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3801",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3801",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x801",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x801",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F04",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F04",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore demand code reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF04",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF04",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand code reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x104",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x104",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x204",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x204",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x404",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x404",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x704",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x704",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4704",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4704",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1804",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1804",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3804",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3804",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x804",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x804",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F02",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F02",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore demand RFO requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF02",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF02",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand RFO requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x102",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x102",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x202",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x202",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x402",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x402",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x702",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x702",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4702",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4702",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1802",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1802",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3802",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3802",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x802",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x802",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F80",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F80",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore other requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF80",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF80",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore other requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8080",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8080",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x180",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x180",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x280",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x280",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x480",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x480",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x780",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x780",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4780",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4780",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1880",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1880",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3880",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3880",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1080",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1080",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x880",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x880",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F30",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F30",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch data requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF30",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF30",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch data requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
"EventCode": "0xB7",
- "MSRValue": "0x8030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x130",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x130",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x230",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x230",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x430",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x430",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x730",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x730",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4730",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4730",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1830",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1830",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3830",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3830",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x830",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x830",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F10",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F10",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch data reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF10",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF10",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch data reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x110",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x110",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x210",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x210",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x410",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x410",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x710",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x710",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4710",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4710",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1810",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1810",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3810",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3810",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x810",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x810",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F40",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F40",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch code reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF40",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF40",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch code reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x140",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x140",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x240",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x240",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x440",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x440",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x740",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x740",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4740",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4740",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1840",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1840",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3840",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3840",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x840",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x840",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F20",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F20",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch RFO requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF20",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF20",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch RFO requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x120",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x120",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x220",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x220",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x420",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x420",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x720",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x720",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4720",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4720",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1820",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1820",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3820",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3820",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x820",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x820",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F70",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F70",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF70",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF70",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x170",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x170",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x270",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x270",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x470",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x470",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x770",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x770",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4770",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4770",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1870",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1870",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3870",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3870",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x870",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x870",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Super Queue lock splits across a cache line",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Loads delayed with at-Retirement block code",
+ "EventCode": "0x6",
+ "EventName": "STORE_BLOCKS.AT_RET",
+ "SampleAfterValue": "200000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cacheable loads delayed with L1D block code",
+ "EventCode": "0x6",
+ "EventName": "STORE_BLOCKS.L1D_BLOCK",
+ "SampleAfterValue": "200000",
+ "UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
index 7d2f71a9dee3..c03f8990fa82 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/floating-point.json
@@ -1,229 +1,201 @@
[
{
- "PEBS": "1",
+ "BriefDescription": "X87 Floating point assists (Precise Event)",
"EventCode": "0xF7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FP_ASSIST.ALL",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "X87 Floating point assists (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)",
"EventCode": "0xF7",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "FP_ASSIST.INPUT",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)",
"EventCode": "0xF7",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "FP_ASSIST.OUTPUT",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "MMX Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "FP_COMP_OPS_EXE.MMX",
"SampleAfterValue": "2000000",
- "BriefDescription": "MMX Uops"
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "SSE2 integer Uops",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "SSE* FP double precision Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE* FP double precision Uops"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "SSE and SSE2 FP Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "FP_COMP_OPS_EXE.SSE_FP",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE and SSE2 FP Uops"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "SSE FP packed Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE FP packed Uops"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "SSE FP scalar Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE FP scalar Uops"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "SSE* FP single precision Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE* FP single precision Uops"
- },
- {
- "EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
- "SampleAfterValue": "2000000",
- "BriefDescription": "SSE2 integer Uops"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Computational floating-point operations executed",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000000",
- "BriefDescription": "Computational floating-point operations executed"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All Floating Point to and from MMX transitions",
"EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"EventName": "FP_MMX_TRANS.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "All Floating Point to and from MMX transitions"
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Transitions from MMX to Floating Point instructions",
"EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FP_MMX_TRANS.TO_FP",
"SampleAfterValue": "2000000",
- "BriefDescription": "Transitions from MMX to Floating Point instructions"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Transitions from Floating Point to MMX instructions",
"EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "FP_MMX_TRANS.TO_MMX",
"SampleAfterValue": "2000000",
- "BriefDescription": "Transitions from Floating Point to MMX instructions"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "128 bit SIMD integer pack operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "SIMD_INT_128.PACK",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer pack operations"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "128 bit SIMD integer arithmetic operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "SIMD_INT_128.PACKED_ARITH",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer arithmetic operations"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "128 bit SIMD integer logical operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "SIMD_INT_128.PACKED_LOGICAL",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer logical operations"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "128 bit SIMD integer multiply operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SIMD_INT_128.PACKED_MPY",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer multiply operations"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "128 bit SIMD integer shift operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "SIMD_INT_128.PACKED_SHIFT",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer shift operations"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "128 bit SIMD integer shuffle/move operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "SIMD_INT_128.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer shuffle/move operations"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "128 bit SIMD integer unpack operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "SIMD_INT_128.UNPACK",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer unpack operations"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "SIMD integer 64 bit pack operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "SIMD_INT_64.PACK",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit pack operations"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "SIMD integer 64 bit arithmetic operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "SIMD_INT_64.PACKED_ARITH",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit arithmetic operations"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "SIMD integer 64 bit logical operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "SIMD_INT_64.PACKED_LOGICAL",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit logical operations"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "SIMD integer 64 bit packed multiply operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SIMD_INT_64.PACKED_MPY",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit packed multiply operations"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "SIMD integer 64 bit shift operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "SIMD_INT_64.PACKED_SHIFT",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit shift operations"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "SIMD integer 64 bit shuffle/move operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "SIMD_INT_64.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit shuffle/move operations"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "SIMD integer 64 bit unpack operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "SIMD_INT_64.UNPACK",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit unpack operations"
+ "UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json b/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
index e5e21e03444d..f7f28510e3ae 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/frontend.json
@@ -1,26 +1,23 @@
[
{
+ "BriefDescription": "Instructions decoded",
"EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MACRO_INSTS.DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions decoded"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Macro-fused instructions decoded",
"EventCode": "0xA6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MACRO_INSTS.FUSIONS_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Macro-fused instructions decoded"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Two Uop instructions decoded",
"EventCode": "0x19",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "TWO_UOP_INSTS_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Two Uop instructions decoded"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/memory.json b/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
index f914a4525b65..f810880a295e 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/memory.json
@@ -1,739 +1,605 @@
[
{
+ "BriefDescription": "Offcore data reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF811",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF811",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF844",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF844",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x60FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x60FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF8FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF8FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x40FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x40FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x20FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x20FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF822",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF822",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF808",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF808",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF877",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF877",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = any LLC miss",
"EventCode": "0xB7",
- "MSRValue": "0xF833",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF833",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any LLC miss",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x4033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF803",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF803",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF801",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF801",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF804",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF804",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF802",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF802",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6080",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6080",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF880",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF880",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2080",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2080",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF830",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF830",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF810",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF810",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF840",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF840",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF820",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF820",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF870",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF870",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/other.json b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
index af0860622445..fb706cb51832 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/other.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/other.json
@@ -1,210 +1,128 @@
[
{
- "EventCode": "0xE8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BPU_CLEARS.EARLY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Early Branch Prediciton Unit clears"
- },
- {
- "EventCode": "0xE8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BPU_CLEARS.LATE",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Late Branch Prediction Unit clears"
- },
- {
- "EventCode": "0xE5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BPU_MISSED_CALL_RET",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Branch prediction unit missed call or return"
- },
- {
+ "BriefDescription": "ES segment renames",
"EventCode": "0xD5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ES_REG_RENAMES",
"SampleAfterValue": "2000000",
- "BriefDescription": "ES segment renames"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "I/O transactions",
"EventCode": "0x6C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IO_TRANSACTIONS",
"SampleAfterValue": "2000000",
- "BriefDescription": "I/O transactions"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1I instruction fetch stall cycles",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L1I.CYCLES_STALLED",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I instruction fetch stall cycles"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1I instruction fetch hits",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L1I.HITS",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I instruction fetch hits"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1I instruction fetch misses",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L1I.MISSES",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I instruction fetch misses"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1I Instruction fetches",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"EventName": "L1I.READS",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I Instruction fetches"
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Large ITLB hit",
"EventCode": "0x82",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LARGE_ITLB.HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "Large ITLB hit"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All loads dispatched",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "LOAD_DISPATCH.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "All loads dispatched"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Loads dispatched from the MOB",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "LOAD_DISPATCH.MOB",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loads dispatched from the MOB"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Loads dispatched that bypass the MOB",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LOAD_DISPATCH.RS",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loads dispatched that bypass the MOB"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads dispatched from stage 305",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "LOAD_DISPATCH.RS_DELAYED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loads dispatched from stage 305"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "False dependencies due to partial address aliasing",
"EventCode": "0x7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "PARTIAL_ADDRESS_ALIAS",
"SampleAfterValue": "200000",
- "BriefDescription": "False dependencies due to partial address aliasing"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "RAT_STALLS.ANY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "All RAT stall cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RAT_STALLS.FLAGS",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Flag stall cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RAT_STALLS.REGISTERS",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Partial register stall cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RAT_STALLS.ROB_READ_PORT",
- "SampleAfterValue": "2000000",
- "BriefDescription": "ROB read port stalls cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RAT_STALLS.SCOREBOARD",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Scoreboard stall cycles"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All Store buffer stall cycles",
"EventCode": "0x4",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "SB_DRAIN.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All Store buffer stall cycles"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Segment rename stall cycles",
"EventCode": "0xD4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SEG_RENAME_STALLS",
"SampleAfterValue": "2000000",
- "BriefDescription": "Segment rename stall cycles"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Thread responded HIT to snoop",
"EventCode": "0xB8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SNOOP_RESPONSE.HIT",
"SampleAfterValue": "100000",
- "BriefDescription": "Thread responded HIT to snoop"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Thread responded HITE to snoop",
"EventCode": "0xB8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "SNOOP_RESPONSE.HITE",
"SampleAfterValue": "100000",
- "BriefDescription": "Thread responded HITE to snoop"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Thread responded HITM to snoop",
"EventCode": "0xB8",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "SNOOP_RESPONSE.HITM",
"SampleAfterValue": "100000",
- "BriefDescription": "Thread responded HITM to snoop"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Super Queue full stall cycles",
"EventCode": "0xF6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SQ_FULL_STALL_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Super Queue full stall cycles"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json b/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
index 41006ddcd893..c45f2ffa861e 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/pipeline.json
@@ -1,881 +1,828 @@
[
{
+ "BriefDescription": "Cycles the divider is busy",
"EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ARITH.CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles the divider is busy"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Divide Operations executed",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0x14",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ARITH.DIV",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Divide Operations executed",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Multiply operations executed",
"EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "ARITH.MUL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Multiply operations executed"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "BACLEAR asserted with bad target address",
"EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BACLEAR.BAD_TARGET",
"SampleAfterValue": "2000000",
- "BriefDescription": "BACLEAR asserted with bad target address"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "BACLEAR asserted, regardless of cause",
"EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BACLEAR.CLEAR",
"SampleAfterValue": "2000000",
- "BriefDescription": "BACLEAR asserted, regardless of cause "
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Instruction queue forced BACLEAR",
"EventCode": "0xA7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BACLEAR_FORCE_IQ",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instruction queue forced BACLEAR"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Early Branch Prediciton Unit clears",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Late Branch Prediction Unit clears",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Branch prediction unit missed call or return",
+ "EventCode": "0xE5",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Branch instructions decoded",
"EventCode": "0xE0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Branch instructions decoded"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Branch instructions executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x7f",
"EventName": "BR_INST_EXEC.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "Branch instructions executed"
+ "UMask": "0x7f"
},
{
+ "BriefDescription": "Conditional branch instructions executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BR_INST_EXEC.COND",
"SampleAfterValue": "200000",
- "BriefDescription": "Conditional branch instructions executed"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Unconditional branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BR_INST_EXEC.DIRECT",
"SampleAfterValue": "200000",
- "BriefDescription": "Unconditional branches executed"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Unconditional call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
- "BriefDescription": "Unconditional call branches executed"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Indirect call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
- "BriefDescription": "Indirect call branches executed"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Indirect non call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "20000",
- "BriefDescription": "Indirect non call branches executed"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
"EventName": "BR_INST_EXEC.NEAR_CALLS",
"SampleAfterValue": "20000",
- "BriefDescription": "Call branches executed"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "All non call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "BR_INST_EXEC.NON_CALLS",
"SampleAfterValue": "200000",
- "BriefDescription": "All non call branches executed"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Indirect return branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "BR_INST_EXEC.RETURN_NEAR",
"SampleAfterValue": "20000",
- "BriefDescription": "Indirect return branches executed"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Taken branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "BR_INST_EXEC.TAKEN",
"SampleAfterValue": "200000",
- "BriefDescription": "Taken branches executed"
+ "UMask": "0x40"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired branch instructions (Precise Event)",
"EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired branch instructions (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired conditional branch instructions (Precise Event)",
"EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired conditional branch instructions (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired near call instructions (Precise Event)",
"EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "Retired near call instructions (Precise Event)"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Mispredicted branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x7f",
"EventName": "BR_MISP_EXEC.ANY",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted branches executed"
+ "UMask": "0x7f"
},
{
+ "BriefDescription": "Mispredicted conditional branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BR_MISP_EXEC.COND",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted conditional branches executed"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Mispredicted unconditional branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BR_MISP_EXEC.DIRECT",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted unconditional branches executed"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Mispredicted non call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted non call branches executed"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Mispredicted indirect call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted indirect call branches executed"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Mispredicted indirect non call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted indirect non call branches executed"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Mispredicted call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
"EventName": "BR_MISP_EXEC.NEAR_CALLS",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted call branches executed"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "Mispredicted non call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "BR_MISP_EXEC.NON_CALLS",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted non call branches executed"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Mispredicted return branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "BR_MISP_EXEC.RETURN_NEAR",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted return branches executed"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Mispredicted taken branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "BR_MISP_EXEC.TAKEN",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted taken branches executed"
+ "UMask": "0x40"
},
{
- "PEBS": "1",
+ "BriefDescription": "Mispredicted near retired calls (Precise Event)",
"EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted near retired calls (Precise Event)"
+ "UMask": "0x2"
},
{
- "EventCode": "0x0",
- "Counter": "Fixed counter 3",
- "UMask": "0x0",
+ "BriefDescription": "Reference cycles when thread is not halted (fixed counter)",
"EventName": "CPU_CLK_UNHALTED.REF",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Reference cycles when thread is not halted (fixed counter)"
+ "SampleAfterValue": "2000000"
},
{
+ "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPU_CLK_UNHALTED.REF_P",
"SampleAfterValue": "100000",
- "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)"
+ "UMask": "0x1"
},
{
- "EventCode": "0x0",
- "Counter": "Fixed counter 2",
- "UMask": "0x0",
+ "BriefDescription": "Cycles when thread is not halted (fixed counter)",
"EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Cycles when thread is not halted (fixed counter)"
+ "SampleAfterValue": "2000000"
},
{
+ "BriefDescription": "Cycles when thread is not halted (programmable counter)",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Cycles when thread is not halted (programmable counter)"
+ "SampleAfterValue": "2000000"
},
{
+ "BriefDescription": "Total CPU cycles",
+ "CounterMask": "2",
"EventCode": "0x3C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
"EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Total CPU cycles",
- "CounterMask": "2"
+ "Invert": "1",
+ "SampleAfterValue": "2000000"
},
{
+ "BriefDescription": "Any Instruction Length Decoder stall cycles",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
"EventName": "ILD_STALL.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Any Instruction Length Decoder stall cycles"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "Instruction Queue full stall cycles",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instruction Queue full stall cycles"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Length Change Prefix stall cycles",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000000",
- "BriefDescription": "Length Change Prefix stall cycles"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Stall cycles due to BPU MRU bypass",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "ILD_STALL.MRU",
"SampleAfterValue": "2000000",
- "BriefDescription": "Stall cycles due to BPU MRU bypass"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Regen stall cycles",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "ILD_STALL.REGEN",
"SampleAfterValue": "2000000",
- "BriefDescription": "Regen stall cycles"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Instructions that must be decoded by decoder 0",
"EventCode": "0x18",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "INST_DECODED.DEC0",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions that must be decoded by decoder 0"
+ "UMask": "0x1"
},
{
- "EventCode": "0x1E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INST_QUEUE_WRITE_CYCLES",
+ "BriefDescription": "Instructions written to instruction queue.",
+ "EventCode": "0x17",
+ "EventName": "INST_QUEUE_WRITES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles instructions are written to the instruction queue"
+ "UMask": "0x1"
},
{
- "EventCode": "0x17",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INST_QUEUE_WRITES",
+ "BriefDescription": "Cycles instructions are written to the instruction queue",
+ "EventCode": "0x1E",
+ "EventName": "INST_QUEUE_WRITE_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions written to instruction queue."
+ "UMask": "0x1"
},
{
- "EventCode": "0x0",
- "Counter": "Fixed counter 1",
- "UMask": "0x0",
+ "BriefDescription": "Instructions retired (fixed counter)",
"EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired (fixed counter)"
+ "SampleAfterValue": "2000000"
},
{
- "PEBS": "1",
+ "BriefDescription": "Instructions retired (Programmable counter and Precise Event)",
"EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired (Programmable counter and Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired MMX instructions (Precise Event)",
"EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "INST_RETIRED.MMX",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired MMX instructions (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16",
"EventCode": "0xC0",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "INST_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Total cycles (Precise Event)",
- "CounterMask": "16"
+ "CounterMask": "16",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "Invert": "1",
+ "PEBS": "2",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired floating-point operations (Precise Event)",
"EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "INST_RETIRED.X87",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired floating-point operations (Precise Event)"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Load operations conflicting with software prefetches",
"EventCode": "0x4C",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "LOAD_HIT_PRE",
"SampleAfterValue": "200000",
- "BriefDescription": "Load operations conflicting with software prefetches"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles when uops were delivered by the LSD",
+ "CounterMask": "1",
"EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LSD.ACTIVE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles when uops were delivered by the LSD",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles no uops were delivered by the LSD",
+ "CounterMask": "1",
"EventCode": "0xA8",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LSD.INACTIVE",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no uops were delivered by the LSD",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loops that can't stream from the instruction queue",
"EventCode": "0x20",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LSD_OVERFLOW",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loops that can't stream from the instruction queue"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles machine clear asserted",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "20000",
- "BriefDescription": "Cycles machine clear asserted"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "MACHINE_CLEARS.MEM_ORDER",
"SampleAfterValue": "20000",
- "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Self-Modifying Code detected",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20000",
- "BriefDescription": "Self-Modifying Code detected"
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "All RAT stall cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xf"
+ },
+ {
+ "BriefDescription": "Flag stall cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Partial register stall cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ROB read port stalls cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Scoreboard stall cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Resource related stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Resource related stall cycles"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "FPU control word write stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "RESOURCE_STALLS.FPCW",
"SampleAfterValue": "2000000",
- "BriefDescription": "FPU control word write stall cycles"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Load buffer stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "RESOURCE_STALLS.LOAD",
"SampleAfterValue": "2000000",
- "BriefDescription": "Load buffer stall cycles"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "MXCSR rename stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "RESOURCE_STALLS.MXCSR",
"SampleAfterValue": "2000000",
- "BriefDescription": "MXCSR rename stall cycles"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Other Resource related stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "RESOURCE_STALLS.OTHER",
"SampleAfterValue": "2000000",
- "BriefDescription": "Other Resource related stall cycles"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "ROB full stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "RESOURCE_STALLS.ROB_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "ROB full stall cycles"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Reservation Station full stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "RESOURCE_STALLS.RS_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Reservation Station full stall cycles"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Store buffer stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "RESOURCE_STALLS.STORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Store buffer stall cycles"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)"
- },
- {
- "EventCode": "0xDB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOP_UNFUSION",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Uop unfusions due to FP exceptions"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Stack pointer instructions decoded",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "UOPS_DECODED.ESP_FOLDING",
"SampleAfterValue": "2000000",
- "BriefDescription": "Stack pointer instructions decoded"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Stack pointer sync operations",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "UOPS_DECODED.ESP_SYNC",
"SampleAfterValue": "2000000",
- "BriefDescription": "Stack pointer sync operations"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Uops decoded by Microcode Sequencer",
+ "CounterMask": "1",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops decoded by Microcode Sequencer",
- "CounterMask": "1"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles no Uops are decoded",
+ "CounterMask": "1",
"EventCode": "0xD1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_DECODED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops are decoded",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
"AnyThread": "1",
+ "BriefDescription": "Cycles Uops executed on any port (core count)",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops executed on any port (core count)",
- "CounterMask": "1"
+ "UMask": "0x3f"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
"AnyThread": "1",
+ "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
- "CounterMask": "1"
+ "UMask": "0x1f"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
- "SampleAfterValue": "2000000",
"BriefDescription": "Uops executed on any port (core count)",
"CounterMask": "1",
- "EdgeDetect": "1"
- },
- {
+ "EdgeDetect": "1",
"EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on ports 0-4 (core count)",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "UMask": "0x3f"
},
{
+ "AnyThread": "1",
+ "BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops issued on any port (core count)",
- "CounterMask": "1"
+ "UMask": "0x1f"
},
{
+ "AnyThread": "1",
+ "BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "CounterMask": "1",
"EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x3f"
+ },
+ {
"AnyThread": "1",
+ "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
- "CounterMask": "1"
+ "UMask": "0x1f"
},
{
+ "BriefDescription": "Uops executed on port 0",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_EXECUTED.PORT0",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 0"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Uops issued on ports 0, 1 or 5",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "UOPS_EXECUTED.PORT015",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops issued on ports 0, 1 or 5"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "CounterMask": "1",
"EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
- "CounterMask": "1"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Uops executed on port 1",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.PORT1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 1"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED.PORT2_CORE",
+ "BriefDescription": "Uops issued on ports 2, 3 or 4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.PORT234_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 2 (core count)"
+ "UMask": "0x80"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED.PORT234_CORE",
+ "BriefDescription": "Uops executed on port 2 (core count)",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.PORT2_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops issued on ports 2, 3 or 4"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"AnyThread": "1",
+ "BriefDescription": "Uops executed on port 3 (core count)",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT3_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 3 (core count)"
+ "UMask": "0x8"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"AnyThread": "1",
+ "BriefDescription": "Uops executed on port 4 (core count)",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT4_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 4 (core count)"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Uops executed on port 5",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "UOPS_EXECUTED.PORT5",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 5"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Uops issued",
"EventCode": "0xE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops issued"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
+ "BriefDescription": "Cycles no Uops were issued on any thread",
+ "CounterMask": "1",
+ "EventCode": "0xE",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops were issued on any thread",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
+ "BriefDescription": "Cycles Uops were issued on either thread",
+ "CounterMask": "1",
+ "EventCode": "0xE",
"EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops were issued on either thread",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Fused Uops issued",
"EventCode": "0xE",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_ISSUED.FUSED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Fused Uops issued"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles no Uops were issued",
+ "CounterMask": "1",
"EventCode": "0xE",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops were issued",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles Uops are being retired",
+ "CounterMask": "1",
"EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops are being retired",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Uops retired (Precise Event)",
"EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_RETIRED.ANY",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops retired (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Macro-fused Uops retired (Precise Event)",
"EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Macro-fused Uops retired (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retirement slots used (Precise Event)",
"EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retirement slots used (Precise Event)"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "CounterMask": "1",
"EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "CounterMask": "16",
"EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
- "CounterMask": "16"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xC0",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "BriefDescription": "Uop unfusions due to FP exceptions",
+ "EventCode": "0xDB",
+ "EventName": "UOP_UNFUSION",
"SampleAfterValue": "2000000",
- "BriefDescription": "Total cycles (Precise Event)",
- "CounterMask": "16"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
index 0596094e0ee9..c434cd4ef4f1 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemep/virtual-memory.json
@@ -1,109 +1,96 @@
[
{
+ "BriefDescription": "DTLB load misses",
"EventCode": "0x8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "DTLB_LOAD_MISSES.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB load misses"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "DTLB load miss caused by low part of address",
"EventCode": "0x8",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "DTLB_LOAD_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB load miss caused by low part of address"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "DTLB second level hit",
"EventCode": "0x8",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "DTLB second level hit"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "DTLB load miss page walks complete",
"EventCode": "0x8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB load miss page walks complete"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "DTLB misses",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "DTLB_MISSES.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB misses"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "DTLB first level misses but second level hit",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "DTLB_MISSES.STLB_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB first level misses but second level hit"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "DTLB miss page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DTLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB miss page walks"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "ITLB flushes",
"EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ITLB_FLUSH",
"SampleAfterValue": "2000000",
- "BriefDescription": "ITLB flushes"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ITLB_MISS_RETIRED",
- "SampleAfterValue": "200000",
- "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "ITLB miss",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ITLB_MISSES.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "ITLB miss"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "ITLB miss page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
- "BriefDescription": "ITLB miss page walks"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)",
+ "EventCode": "0xC8",
+ "EventName": "ITLB_MISS_RETIRED",
"PEBS": "1",
+ "SampleAfterValue": "200000",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
"EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that miss the DTLB (Precise Event)"
+ "UMask": "0x80"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired stores that miss the DTLB (Precise Event)",
"EventCode": "0xC",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MEM_STORE_RETIRED.DTLB_MISS",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired stores that miss the DTLB (Precise Event)"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
index 21a0f8fd057e..a4142cd2ca86 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/cache.json
@@ -1,3184 +1,2665 @@
[
{
+ "BriefDescription": "Cycles L1D locked",
"EventCode": "0x63",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "CACHE_LOCK_CYCLES.L1D",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles L1D locked"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles L1D and L2 locked",
"EventCode": "0x63",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "CACHE_LOCK_CYCLES.L1D_L2",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles L1D and L2 locked"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1D cache lines replaced in M state",
"EventCode": "0x51",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D.M_EVICT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D cache lines replaced in M state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1D cache lines allocated in the M state",
"EventCode": "0x51",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D.M_REPL",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D cache lines allocated in the M state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1D snoop eviction of cache lines in M state",
"EventCode": "0x51",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "L1D.M_SNOOP_EVICT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D snoop eviction of cache lines in M state"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 data cache lines allocated",
"EventCode": "0x51",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D.REPL",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache lines allocated"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All references to the L1 data cache",
"EventCode": "0x43",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_ALL_REF.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "All references to the L1 data cache"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1 data cacheable reads and writes",
"EventCode": "0x43",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_ALL_REF.CACHEABLE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cacheable reads and writes"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1 data cache read in E state",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D_CACHE_LD.E_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 data cache read in I state (misses)",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_CACHE_LD.I_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in I state (misses)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1 data cache reads",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "L1D_CACHE_LD.M_STATE",
+ "EventName": "L1D_CACHE_LD.MESI",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in M state"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L1 data cache read in M state",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0xf",
- "EventName": "L1D_CACHE_LD.MESI",
+ "EventName": "L1D_CACHE_LD.M_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache reads"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 data cache read in S state",
"EventCode": "0x40",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_CACHE_LD.S_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache read in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1 data cache load locks in E state",
"EventCode": "0x42",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D_CACHE_LOCK.E_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load locks in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 data cache load lock hits",
"EventCode": "0x42",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_CACHE_LOCK.HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load lock hits"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1 data cache load locks in M state",
"EventCode": "0x42",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "L1D_CACHE_LOCK.M_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load locks in M state"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 data cache load locks in S state",
"EventCode": "0x42",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_CACHE_LOCK.S_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache load locks in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1D load lock accepted in fill buffer",
"EventCode": "0x53",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_CACHE_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D load lock accepted in fill buffer"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1D prefetch load lock accepted in fill buffer",
"EventCode": "0x52",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_CACHE_PREFETCH_LOCK_FB_HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1D prefetch load lock accepted in fill buffer"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1 data cache stores in E state",
"EventCode": "0x41",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D_CACHE_ST.E_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache stores in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 data cache stores in M state",
"EventCode": "0x41",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "L1D_CACHE_ST.M_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache stores in M state"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 data cache stores in S state",
"EventCode": "0x41",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_CACHE_ST.S_STATE",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1 data cache stores in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1D hardware prefetch misses",
"EventCode": "0x4E",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "L1D_PREFETCH.MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D hardware prefetch misses"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1D hardware prefetch requests",
"EventCode": "0x4E",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "L1D_PREFETCH.REQUESTS",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D hardware prefetch requests"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1D hardware prefetch requests triggered",
"EventCode": "0x4E",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "L1D_PREFETCH.TRIGGERS",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D hardware prefetch requests triggered"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 writebacks to L2 in E state",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L1D_WB_L2.E_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1 writebacks to L2 in I state (misses)",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L1D_WB_L2.I_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in I state (misses)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All L1 writebacks to L2",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L1D_WB_L2.M_STATE",
+ "EventName": "L1D_WB_L2.MESI",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in M state"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L1 writebacks to L2 in M state",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L1D_WB_L2.MESI",
+ "EventName": "L1D_WB_L2.M_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "All L1 writebacks to L2"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L1 writebacks to L2 in S state",
"EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L1D_WB_L2.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L1 writebacks to L2 in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "All L2 data requests",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
"EventName": "L2_DATA_RQSTS.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 data requests"
+ "UMask": "0xff"
},
{
+ "BriefDescription": "L2 data demand loads in E state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L2_DATA_RQSTS.DEMAND.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L2 data demand loads in I state (misses)",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_DATA_RQSTS.DEMAND.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in I state (misses)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L2 data demand requests",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
+ "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in M state"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L2 data demand loads in M state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_DATA_RQSTS.DEMAND.MESI",
+ "EventName": "L2_DATA_RQSTS.DEMAND.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand requests"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 data demand loads in S state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_DATA_RQSTS.DEMAND.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data demand loads in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 data prefetches in E state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "L2_DATA_RQSTS.PREFETCH.E_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in E state"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "L2 data prefetches in the I state (misses)",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_DATA_RQSTS.PREFETCH.I_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in the I state (misses)"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "All L2 data prefetches",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in M state"
+ "UMask": "0xf0"
},
{
+ "BriefDescription": "L2 data prefetches in M state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "L2_DATA_RQSTS.PREFETCH.MESI",
+ "EventName": "L2_DATA_RQSTS.PREFETCH.M_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 data prefetches"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "L2 data prefetches in the S state",
"EventCode": "0x26",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_DATA_RQSTS.PREFETCH.S_STATE",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 data prefetches in the S state"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "L2 lines alloacated",
"EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "L2_LINES_IN.ANY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines alloacated"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "L2 lines allocated in the E state",
"EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L2_LINES_IN.E_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines allocated in the E state"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L2 lines allocated in the S state",
"EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_LINES_IN.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines allocated in the S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 lines evicted",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
"EventName": "L2_LINES_OUT.ANY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines evicted"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L2 lines evicted by a demand request",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_LINES_OUT.DEMAND_CLEAN",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines evicted by a demand request"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L2 modified lines evicted by a demand request",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_LINES_OUT.DEMAND_DIRTY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 modified lines evicted by a demand request"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 lines evicted by a prefetch request",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L2_LINES_OUT.PREFETCH_CLEAN",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 lines evicted by a prefetch request"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L2 modified lines evicted by a prefetch request",
"EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "L2_LINES_OUT.PREFETCH_DIRTY",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 modified lines evicted by a prefetch request"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 instruction fetches",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_RQSTS.IFETCH_HIT",
+ "EventName": "L2_RQSTS.IFETCHES",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetch hits"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "L2 instruction fetch hits",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_RQSTS.IFETCH_MISS",
+ "EventName": "L2_RQSTS.IFETCH_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetch misses"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "L2 instruction fetch misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.IFETCHES",
+ "EventName": "L2_RQSTS.IFETCH_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetches"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "L2 load hits",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_RQSTS.LD_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 load hits"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L2 load misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_RQSTS.LD_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 load misses"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"EventName": "L2_RQSTS.LOADS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 requests"
+ "UMask": "0x3"
},
{
+ "BriefDescription": "All L2 misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xaa",
"EventName": "L2_RQSTS.MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 misses"
+ "UMask": "0xaa"
},
{
+ "BriefDescription": "All L2 prefetches",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_RQSTS.PREFETCH_HIT",
+ "EventName": "L2_RQSTS.PREFETCHES",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 prefetch hits"
+ "UMask": "0xc0"
},
{
+ "BriefDescription": "L2 prefetch hits",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_RQSTS.PREFETCH_MISS",
+ "EventName": "L2_RQSTS.PREFETCH_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 prefetch misses"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "L2 prefetch misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.PREFETCHES",
+ "EventName": "L2_RQSTS.PREFETCH_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 prefetches"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "All L2 requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
"EventName": "L2_RQSTS.REFERENCES",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 requests"
+ "UMask": "0xff"
},
{
+ "BriefDescription": "L2 RFO requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "EventName": "L2_RQSTS.RFOS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO hits"
+ "UMask": "0xc"
},
{
+ "BriefDescription": "L2 RFO hits",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_RQSTS.RFO_MISS",
+ "EventName": "L2_RQSTS.RFO_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO misses"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L2 RFO misses",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.RFOS",
+ "EventName": "L2_RQSTS.RFO_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO requests"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "All L2 transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "L2_TRANSACTIONS.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All L2 transactions"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "L2 fill transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_TRANSACTIONS.FILL",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 fill transactions"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "L2 instruction fetch transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L2_TRANSACTIONS.IFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 instruction fetch transactions"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1D writeback to L2 transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_TRANSACTIONS.L1D_WB",
"SampleAfterValue": "200000",
- "BriefDescription": "L1D writeback to L2 transactions"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "L2 Load transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_TRANSACTIONS.LOAD",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 Load transactions"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L2 prefetch transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "L2_TRANSACTIONS.PREFETCH",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 prefetch transactions"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 RFO transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_TRANSACTIONS.RFO",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 RFO transactions"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L2 writeback to LLC transactions",
"EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "L2_TRANSACTIONS.WB",
"SampleAfterValue": "200000",
- "BriefDescription": "L2 writeback to LLC transactions"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "L2 demand lock RFOs in E state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "L2_WRITE.LOCK.E_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in E state"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "All demand L2 lock RFOs that hit the cache",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xe0",
"EventName": "L2_WRITE.LOCK.HIT",
"SampleAfterValue": "100000",
- "BriefDescription": "All demand L2 lock RFOs that hit the cache"
+ "UMask": "0xe0"
},
{
+ "BriefDescription": "L2 demand lock RFOs in I state (misses)",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "L2_WRITE.LOCK.I_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in I state (misses)"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "All demand L2 lock RFOs",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_WRITE.LOCK.M_STATE",
+ "EventName": "L2_WRITE.LOCK.MESI",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in M state"
+ "UMask": "0xf0"
},
{
+ "BriefDescription": "L2 demand lock RFOs in M state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "L2_WRITE.LOCK.MESI",
+ "EventName": "L2_WRITE.LOCK.M_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "All demand L2 lock RFOs"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "L2 demand lock RFOs in S state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "L2_WRITE.LOCK.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand lock RFOs in S state"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "All L2 demand store RFOs that hit the cache",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
"EventName": "L2_WRITE.RFO.HIT",
"SampleAfterValue": "100000",
- "BriefDescription": "All L2 demand store RFOs that hit the cache"
+ "UMask": "0xe"
},
{
+ "BriefDescription": "L2 demand store RFOs in I state (misses)",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L2_WRITE.RFO.I_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand store RFOs in I state (misses)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All L2 demand store RFOs",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_WRITE.RFO.M_STATE",
+ "EventName": "L2_WRITE.RFO.MESI",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand store RFOs in M state"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "L2 demand store RFOs in M state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_WRITE.RFO.MESI",
+ "EventName": "L2_WRITE.RFO.M_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "All L2 demand store RFOs"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "L2 demand store RFOs in S state",
"EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L2_WRITE.RFO.S_STATE",
"SampleAfterValue": "100000",
- "BriefDescription": "L2 demand store RFOs in S state"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Longest latency cache miss",
"EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
"EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100000",
- "BriefDescription": "Longest latency cache miss"
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Longest latency cache reference",
"EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
"EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "200000",
- "BriefDescription": "Longest latency cache reference"
+ "UMask": "0x4f"
},
{
- "PEBS": "1",
- "EventCode": "0xB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_INST_RETIRED.LOADS",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired which contains a load (Precise Event)"
- },
- {
- "PEBS": "1",
+ "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)",
"EventCode": "0xB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_INST_RETIRED.STORES",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired which contains a store (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
- "SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
- "SampleAfterValue": "10000",
- "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
- "SampleAfterValue": "40000",
- "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)"
- },
- {
- "PEBS": "1",
- "EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
- "SampleAfterValue": "40000",
- "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)"
- },
- {
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
- "SampleAfterValue": "100000",
- "BriefDescription": "Offcore L1 data cache writebacks"
- },
- {
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_SQ_FULL",
- "SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests blocked due to Super Queue full"
- },
- {
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Super Queue lock splits across a cache line"
- },
- {
- "EventCode": "0x6",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "STORE_BLOCKS.AT_RET",
- "SampleAfterValue": "200000",
- "BriefDescription": "Loads delayed with at-Retirement block code"
- },
- {
- "EventCode": "0x6",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "STORE_BLOCKS.L1D_BLOCK",
- "SampleAfterValue": "200000",
- "BriefDescription": "Cacheable loads delayed with L1D block code"
- },
- {
- "PEBS": "2",
- "EventCode": "0xB",
- "MSRValue": "0x0",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_0",
"MSRIndex": "0x3F6",
+ "PEBS": "2",
"SampleAfterValue": "2000000",
- "BriefDescription": "Memory instructions retired above 0 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x400",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_1024",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
"SampleAfterValue": "100",
- "BriefDescription": "Memory instructions retired above 1024 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x80",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_128",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
"SampleAfterValue": "1000",
- "BriefDescription": "Memory instructions retired above 128 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x10",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
"SampleAfterValue": "10000",
- "BriefDescription": "Memory instructions retired above 16 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x4000",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_16384",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x4000",
+ "PEBS": "2",
"SampleAfterValue": "5",
- "BriefDescription": "Memory instructions retired above 16384 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x800",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_2048",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
"SampleAfterValue": "50",
- "BriefDescription": "Memory instructions retired above 2048 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x100",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_256",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
"SampleAfterValue": "500",
- "BriefDescription": "Memory instructions retired above 256 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x20",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
"SampleAfterValue": "5000",
- "BriefDescription": "Memory instructions retired above 32 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x8000",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_32768",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8000",
+ "PEBS": "2",
"SampleAfterValue": "3",
- "BriefDescription": "Memory instructions retired above 32768 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x4",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
"SampleAfterValue": "50000",
- "BriefDescription": "Memory instructions retired above 4 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x1000",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_4096",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x1000",
+ "PEBS": "2",
"SampleAfterValue": "20",
- "BriefDescription": "Memory instructions retired above 4096 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x200",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_512",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
"SampleAfterValue": "200",
- "BriefDescription": "Memory instructions retired above 512 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x40",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_64",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
"SampleAfterValue": "2000",
- "BriefDescription": "Memory instructions retired above 64 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x8",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
"SampleAfterValue": "20000",
- "BriefDescription": "Memory instructions retired above 8 clocks (Precise Event)"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
+ "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)",
"EventCode": "0xB",
- "MSRValue": "0x2000",
- "Counter": "3",
- "UMask": "0x10",
"EventName": "MEM_INST_RETIRED.LATENCY_ABOVE_THRESHOLD_8192",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x2000",
+ "PEBS": "2",
"SampleAfterValue": "10",
- "BriefDescription": "Memory instructions retired above 8192 clocks (Precise Event)"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instructions retired which contains a load (Precise Event)",
+ "EventCode": "0xB",
+ "EventName": "MEM_INST_RETIRED.LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instructions retired which contains a store (Precise Event)",
+ "EventCode": "0xB",
+ "EventName": "MEM_INST_RETIRED.STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired loads that miss L1D and hit an previously allocated LFB (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.HIT_LFB",
+ "PEBS": "1",
+ "SampleAfterValue": "200000",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired loads that hit the L1 data cache (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.L1D_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired loads that hit the L2 cache (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired loads that miss the LLC cache (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.LLC_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "10000",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired loads that hit valid versions in the LLC cache (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.LLC_UNSHARED_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "40000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired loads that hit sibling core's L2 in modified or unmodified states (Precise Event)",
+ "EventCode": "0xCB",
+ "EventName": "MEM_LOAD_RETIRED.OTHER_CORE_L2_HIT_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "40000",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Offcore L1 data cache writebacks",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.L1D_WRITEBACK",
+ "SampleAfterValue": "100000",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Offcore requests blocked due to Super Queue full",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_SQ_FULL",
+ "SampleAfterValue": "100000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F11",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F11",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore data reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF11",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF11",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore data reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x111",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x111",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x211",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x211",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x411",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x411",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x711",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x711",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4711",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4711",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1811",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1811",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3811",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3811",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x811",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x811",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F44",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F44",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore code reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF44",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF44",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore code reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x144",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x144",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x244",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x244",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x444",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x444",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x744",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x744",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4744",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4744",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1844",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1844",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3844",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3844",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x844",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x844",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7FFF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7FFF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore requests",
"EventCode": "0xB7",
- "MSRValue": "0xFFFF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFFFF",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x80FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x80FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x1FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x2FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x4FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x7FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x47FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x47FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x18FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x18FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x38FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x38FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x10FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x10FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x8FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F22",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F22",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore RFO requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF22",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF22",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore RFO requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x122",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x122",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x222",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x222",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x422",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x422",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x722",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x722",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4722",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4722",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1822",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1822",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3822",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3822",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x822",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x822",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F08",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F08",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore writebacks",
"EventCode": "0xB7",
- "MSRValue": "0xFF08",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF08",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore writebacks",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
"EventCode": "0xB7",
- "MSRValue": "0x8008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x108",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x108",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x408",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x408",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x708",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x708",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4708",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4708",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1808",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1808",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3808",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3808",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x808",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x808",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F77",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F77",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore code or data read requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF77",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF77",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore code or data read requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
"EventCode": "0xB7",
- "MSRValue": "0x8077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x177",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x177",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x277",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x277",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x477",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x477",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x777",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x777",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4777",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4777",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1877",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1877",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3877",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3877",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x877",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x877",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = any cache_dram",
"EventCode": "0xB7",
- "MSRValue": "0x7F33",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F33",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any cache_dram",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = any location",
"EventCode": "0xB7",
- "MSRValue": "0xFF33",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF33",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any location",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x133",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x133",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x233",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x233",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x433",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x433",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = local cache",
"EventCode": "0xB7",
- "MSRValue": "0x733",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x733",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = local cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = local cache or dram",
"EventCode": "0xB7",
- "MSRValue": "0x4733",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4733",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = local cache or dram",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1833",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1833",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = remote cache or dram",
"EventCode": "0xB7",
- "MSRValue": "0x3833",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3833",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = remote cache or dram",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HIT in a remote cache ",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x833",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x833",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F03",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F03",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore demand data requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF03",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF03",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand data requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
"EventCode": "0xB7",
- "MSRValue": "0x8003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x103",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x103",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x203",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x203",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x403",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x403",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x703",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x703",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4703",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4703",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1803",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1803",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3803",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3803",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x803",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x803",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F01",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F01",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore demand data reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF01",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF01",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand data reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x101",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x101",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x201",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x201",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x401",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x401",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x701",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x701",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4701",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4701",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1801",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1801",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3801",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3801",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x801",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x801",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F04",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F04",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore demand code reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF04",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF04",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand code reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x104",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x104",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x204",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x204",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x404",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x404",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x704",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x704",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4704",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4704",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1804",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1804",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3804",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3804",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x804",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x804",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F02",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F02",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore demand RFO requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF02",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF02",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore demand RFO requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x102",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x102",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x202",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x202",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x402",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x402",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x702",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x702",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4702",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4702",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1802",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1802",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3802",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3802",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x802",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x802",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F80",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F80",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore other requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF80",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF80",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore other requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8080",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8080",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x180",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x180",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x280",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x280",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x480",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x480",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x780",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x780",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4780",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4780",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1880",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1880",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3880",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3880",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1080",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1080",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x880",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x880",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x7F30",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F30",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by any cache or DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch data requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF30",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF30",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch data requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
"EventCode": "0xB7",
- "MSRValue": "0x8030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the IO, CSR, MMIO unit.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x130",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x130",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x230",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x230",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x430",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x430",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x730",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x730",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4730",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4730",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1830",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1830",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3830",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3830",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x830",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x830",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F10",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F10",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch data reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF10",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF10",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch data reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x110",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x110",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x210",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x210",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x410",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x410",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x710",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x710",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4710",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4710",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1810",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1810",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3810",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3810",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x810",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x810",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F40",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F40",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch code reads",
"EventCode": "0xB7",
- "MSRValue": "0xFF40",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF40",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch code reads",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x140",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x140",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x240",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x240",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x440",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x440",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x740",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x740",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4740",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4740",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1840",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1840",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3840",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3840",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x840",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x840",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F20",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F20",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch RFO requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF20",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF20",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch RFO requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x120",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x120",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x220",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x220",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x420",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x420",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x720",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x720",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4720",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4720",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1820",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1820",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3820",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3820",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x820",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x820",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x7F70",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x7F70",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by any cache or DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All offcore prefetch requests",
"EventCode": "0xB7",
- "MSRValue": "0xFF70",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LOCATION",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xFF70",
"SampleAfterValue": "100000",
- "BriefDescription": "All offcore prefetch requests",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
"EventCode": "0xB7",
- "MSRValue": "0x8070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.IO_CSR_MMIO",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x8070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the IO, CSR, MMIO unit",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x170",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_NO_OTHER_CORE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x170",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC and not found in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x270",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x270",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HIT in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
"EventCode": "0xB7",
- "MSRValue": "0x470",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LLC_HIT_OTHER_CORE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x470",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC and HITM in a sibling core",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
"EventCode": "0xB7",
- "MSRValue": "0x770",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x770",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4770",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4770",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the LLC or local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1870",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1870",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x3870",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x3870",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by a remote cache or remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x1070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HIT",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x1070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests that HIT in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
"EventCode": "0xB7",
- "MSRValue": "0x870",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_CACHE_HITM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x870",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests that HITM in a remote cache",
- "Offcore": "1"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Super Queue lock splits across a cache line",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Loads delayed with at-Retirement block code",
+ "EventCode": "0x6",
+ "EventName": "STORE_BLOCKS.AT_RET",
+ "SampleAfterValue": "200000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cacheable loads delayed with L1D block code",
+ "EventCode": "0x6",
+ "EventName": "STORE_BLOCKS.L1D_BLOCK",
+ "SampleAfterValue": "200000",
+ "UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
index 7d2f71a9dee3..c03f8990fa82 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/floating-point.json
@@ -1,229 +1,201 @@
[
{
- "PEBS": "1",
+ "BriefDescription": "X87 Floating point assists (Precise Event)",
"EventCode": "0xF7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FP_ASSIST.ALL",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "X87 Floating point assists (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)",
"EventCode": "0xF7",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "FP_ASSIST.INPUT",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "X87 Floating poiint assists for invalid input value (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)",
"EventCode": "0xF7",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "FP_ASSIST.OUTPUT",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "X87 Floating point assists for invalid output value (Precise Event)"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "MMX Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "FP_COMP_OPS_EXE.MMX",
"SampleAfterValue": "2000000",
- "BriefDescription": "MMX Uops"
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "SSE2 integer Uops",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "SSE* FP double precision Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "FP_COMP_OPS_EXE.SSE_DOUBLE_PRECISION",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE* FP double precision Uops"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "SSE and SSE2 FP Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "FP_COMP_OPS_EXE.SSE_FP",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE and SSE2 FP Uops"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "SSE FP packed Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_PACKED",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE FP packed Uops"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "SSE FP scalar Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "FP_COMP_OPS_EXE.SSE_FP_SCALAR",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE FP scalar Uops"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "SSE* FP single precision Uops",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "FP_COMP_OPS_EXE.SSE_SINGLE_PRECISION",
"SampleAfterValue": "2000000",
- "BriefDescription": "SSE* FP single precision Uops"
- },
- {
- "EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_COMP_OPS_EXE.SSE2_INTEGER",
- "SampleAfterValue": "2000000",
- "BriefDescription": "SSE2 integer Uops"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Computational floating-point operations executed",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000000",
- "BriefDescription": "Computational floating-point operations executed"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All Floating Point to and from MMX transitions",
"EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"EventName": "FP_MMX_TRANS.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "All Floating Point to and from MMX transitions"
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Transitions from MMX to Floating Point instructions",
"EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FP_MMX_TRANS.TO_FP",
"SampleAfterValue": "2000000",
- "BriefDescription": "Transitions from MMX to Floating Point instructions"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Transitions from Floating Point to MMX instructions",
"EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "FP_MMX_TRANS.TO_MMX",
"SampleAfterValue": "2000000",
- "BriefDescription": "Transitions from Floating Point to MMX instructions"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "128 bit SIMD integer pack operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "SIMD_INT_128.PACK",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer pack operations"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "128 bit SIMD integer arithmetic operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "SIMD_INT_128.PACKED_ARITH",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer arithmetic operations"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "128 bit SIMD integer logical operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "SIMD_INT_128.PACKED_LOGICAL",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer logical operations"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "128 bit SIMD integer multiply operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SIMD_INT_128.PACKED_MPY",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer multiply operations"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "128 bit SIMD integer shift operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "SIMD_INT_128.PACKED_SHIFT",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer shift operations"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "128 bit SIMD integer shuffle/move operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "SIMD_INT_128.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer shuffle/move operations"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "128 bit SIMD integer unpack operations",
"EventCode": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "SIMD_INT_128.UNPACK",
"SampleAfterValue": "200000",
- "BriefDescription": "128 bit SIMD integer unpack operations"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "SIMD integer 64 bit pack operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "SIMD_INT_64.PACK",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit pack operations"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "SIMD integer 64 bit arithmetic operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "SIMD_INT_64.PACKED_ARITH",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit arithmetic operations"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "SIMD integer 64 bit logical operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "SIMD_INT_64.PACKED_LOGICAL",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit logical operations"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "SIMD integer 64 bit packed multiply operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SIMD_INT_64.PACKED_MPY",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit packed multiply operations"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "SIMD integer 64 bit shift operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "SIMD_INT_64.PACKED_SHIFT",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit shift operations"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "SIMD integer 64 bit shuffle/move operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "SIMD_INT_64.SHUFFLE_MOVE",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit shuffle/move operations"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "SIMD integer 64 bit unpack operations",
"EventCode": "0xFD",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "SIMD_INT_64.UNPACK",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD integer 64 bit unpack operations"
+ "UMask": "0x8"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json b/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
index e5e21e03444d..f7f28510e3ae 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/frontend.json
@@ -1,26 +1,23 @@
[
{
+ "BriefDescription": "Instructions decoded",
"EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MACRO_INSTS.DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions decoded"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Macro-fused instructions decoded",
"EventCode": "0xA6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MACRO_INSTS.FUSIONS_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Macro-fused instructions decoded"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Two Uop instructions decoded",
"EventCode": "0x19",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "TWO_UOP_INSTS_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Two Uop instructions decoded"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/memory.json b/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
index f914a4525b65..f810880a295e 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/memory.json
@@ -1,739 +1,605 @@
[
{
+ "BriefDescription": "Offcore data reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF811",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF811",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2011",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2011",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF844",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF844",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2044",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2044",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x60FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x60FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF8FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF8FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x40FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x40FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x20FF",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_REQUEST.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x20FF",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF822",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF822",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2022",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2022",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore RFO requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF808",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF808",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore writebacks to a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2008",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.COREWB.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2008",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore writebacks to a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF877",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF877",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2077",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2077",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore code or data read requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore request = all data, response = any LLC miss",
"EventCode": "0xB7",
- "MSRValue": "0xF833",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF833",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore request = all data, response = any LLC miss",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the local DRAM.",
"EventCode": "0xB7",
- "MSRValue": "0x4033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the local DRAM.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore data reads, RFOs, and prefetches satisfied by the remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2033",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DATA_IN.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2033",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore data reads, RFO's and prefetches statisfied by the remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF803",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF803",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2003",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2003",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF801",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF801",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2001",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2001",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand data reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF804",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF804",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2004",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2004",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand code reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF802",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF802",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2002",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2002",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore demand RFO requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6080",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6080",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF880",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF880",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2080",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.OTHER.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2080",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore other requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF830",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF830",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2030",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2030",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF810",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF810",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2010",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_DATA_RD.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2010",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch data reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF840",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF840",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2040",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_IFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2040",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch code reads satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF820",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF820",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2020",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PF_RFO.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2020",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch RFO requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x6070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x6070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by any DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests that missed the LLC",
"EventCode": "0xB7",
- "MSRValue": "0xF870",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS",
"MSRIndex": "0x1A6",
+ "MSRValue": "0xF870",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests that missed the LLC",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x4070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.LOCAL_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x4070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by the local DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
"EventCode": "0xB7",
- "MSRValue": "0x2070",
- "Counter": "2",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.PREFETCH.REMOTE_DRAM",
"MSRIndex": "0x1A6",
+ "MSRValue": "0x2070",
"SampleAfterValue": "100000",
- "BriefDescription": "Offcore prefetch requests satisfied by a remote DRAM",
- "Offcore": "1"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/other.json b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
index af0860622445..fb706cb51832 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/other.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/other.json
@@ -1,210 +1,128 @@
[
{
- "EventCode": "0xE8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BPU_CLEARS.EARLY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Early Branch Prediciton Unit clears"
- },
- {
- "EventCode": "0xE8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BPU_CLEARS.LATE",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Late Branch Prediction Unit clears"
- },
- {
- "EventCode": "0xE5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BPU_MISSED_CALL_RET",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Branch prediction unit missed call or return"
- },
- {
+ "BriefDescription": "ES segment renames",
"EventCode": "0xD5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ES_REG_RENAMES",
"SampleAfterValue": "2000000",
- "BriefDescription": "ES segment renames"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "I/O transactions",
"EventCode": "0x6C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "IO_TRANSACTIONS",
"SampleAfterValue": "2000000",
- "BriefDescription": "I/O transactions"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1I instruction fetch stall cycles",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "L1I.CYCLES_STALLED",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I instruction fetch stall cycles"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "L1I instruction fetch hits",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "L1I.HITS",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I instruction fetch hits"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "L1I instruction fetch misses",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "L1I.MISSES",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I instruction fetch misses"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "L1I Instruction fetches",
"EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
"EventName": "L1I.READS",
"SampleAfterValue": "2000000",
- "BriefDescription": "L1I Instruction fetches"
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Large ITLB hit",
"EventCode": "0x82",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LARGE_ITLB.HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "Large ITLB hit"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All loads dispatched",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "LOAD_DISPATCH.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "All loads dispatched"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Loads dispatched from the MOB",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "LOAD_DISPATCH.MOB",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loads dispatched from the MOB"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Loads dispatched that bypass the MOB",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LOAD_DISPATCH.RS",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loads dispatched that bypass the MOB"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads dispatched from stage 305",
"EventCode": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "LOAD_DISPATCH.RS_DELAYED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loads dispatched from stage 305"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "False dependencies due to partial address aliasing",
"EventCode": "0x7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "PARTIAL_ADDRESS_ALIAS",
"SampleAfterValue": "200000",
- "BriefDescription": "False dependencies due to partial address aliasing"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "RAT_STALLS.ANY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "All RAT stall cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RAT_STALLS.FLAGS",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Flag stall cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RAT_STALLS.REGISTERS",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Partial register stall cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RAT_STALLS.ROB_READ_PORT",
- "SampleAfterValue": "2000000",
- "BriefDescription": "ROB read port stalls cycles"
- },
- {
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RAT_STALLS.SCOREBOARD",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Scoreboard stall cycles"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "All Store buffer stall cycles",
"EventCode": "0x4",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "SB_DRAIN.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "All Store buffer stall cycles"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Segment rename stall cycles",
"EventCode": "0xD4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SEG_RENAME_STALLS",
"SampleAfterValue": "2000000",
- "BriefDescription": "Segment rename stall cycles"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Thread responded HIT to snoop",
"EventCode": "0xB8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SNOOP_RESPONSE.HIT",
"SampleAfterValue": "100000",
- "BriefDescription": "Thread responded HIT to snoop"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Thread responded HITE to snoop",
"EventCode": "0xB8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "SNOOP_RESPONSE.HITE",
"SampleAfterValue": "100000",
- "BriefDescription": "Thread responded HITE to snoop"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Thread responded HITM to snoop",
"EventCode": "0xB8",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "SNOOP_RESPONSE.HITM",
"SampleAfterValue": "100000",
- "BriefDescription": "Thread responded HITM to snoop"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Super Queue full stall cycles",
"EventCode": "0xF6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SQ_FULL_STALL_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Super Queue full stall cycles"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json b/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
index 41006ddcd893..c45f2ffa861e 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/pipeline.json
@@ -1,881 +1,828 @@
[
{
+ "BriefDescription": "Cycles the divider is busy",
"EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ARITH.CYCLES_DIV_BUSY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles the divider is busy"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Divide Operations executed",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0x14",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ARITH.DIV",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Divide Operations executed",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Multiply operations executed",
"EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "ARITH.MUL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Multiply operations executed"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "BACLEAR asserted with bad target address",
"EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BACLEAR.BAD_TARGET",
"SampleAfterValue": "2000000",
- "BriefDescription": "BACLEAR asserted with bad target address"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "BACLEAR asserted, regardless of cause",
"EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BACLEAR.CLEAR",
"SampleAfterValue": "2000000",
- "BriefDescription": "BACLEAR asserted, regardless of cause "
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Instruction queue forced BACLEAR",
"EventCode": "0xA7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BACLEAR_FORCE_IQ",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instruction queue forced BACLEAR"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Early Branch Prediciton Unit clears",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.EARLY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Late Branch Prediction Unit clears",
+ "EventCode": "0xE8",
+ "EventName": "BPU_CLEARS.LATE",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Branch prediction unit missed call or return",
+ "EventCode": "0xE5",
+ "EventName": "BPU_MISSED_CALL_RET",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Branch instructions decoded",
"EventCode": "0xE0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BR_INST_DECODED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Branch instructions decoded"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Branch instructions executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x7f",
"EventName": "BR_INST_EXEC.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "Branch instructions executed"
+ "UMask": "0x7f"
},
{
+ "BriefDescription": "Conditional branch instructions executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BR_INST_EXEC.COND",
"SampleAfterValue": "200000",
- "BriefDescription": "Conditional branch instructions executed"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Unconditional branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BR_INST_EXEC.DIRECT",
"SampleAfterValue": "200000",
- "BriefDescription": "Unconditional branches executed"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Unconditional call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "BR_INST_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
- "BriefDescription": "Unconditional call branches executed"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Indirect call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "BR_INST_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "20000",
- "BriefDescription": "Indirect call branches executed"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Indirect non call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "BR_INST_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "20000",
- "BriefDescription": "Indirect non call branches executed"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
"EventName": "BR_INST_EXEC.NEAR_CALLS",
"SampleAfterValue": "20000",
- "BriefDescription": "Call branches executed"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "All non call branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "BR_INST_EXEC.NON_CALLS",
"SampleAfterValue": "200000",
- "BriefDescription": "All non call branches executed"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Indirect return branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "BR_INST_EXEC.RETURN_NEAR",
"SampleAfterValue": "20000",
- "BriefDescription": "Indirect return branches executed"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Taken branches executed",
"EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "BR_INST_EXEC.TAKEN",
"SampleAfterValue": "200000",
- "BriefDescription": "Taken branches executed"
+ "UMask": "0x40"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired branch instructions (Precise Event)",
"EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired branch instructions (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired conditional branch instructions (Precise Event)",
"EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired conditional branch instructions (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired near call instructions (Precise Event)",
"EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"SampleAfterValue": "20000",
- "BriefDescription": "Retired near call instructions (Precise Event)"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Mispredicted branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x7f",
"EventName": "BR_MISP_EXEC.ANY",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted branches executed"
+ "UMask": "0x7f"
},
{
+ "BriefDescription": "Mispredicted conditional branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "BR_MISP_EXEC.COND",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted conditional branches executed"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Mispredicted unconditional branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BR_MISP_EXEC.DIRECT",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted unconditional branches executed"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Mispredicted non call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "BR_MISP_EXEC.DIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted non call branches executed"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Mispredicted indirect call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "BR_MISP_EXEC.INDIRECT_NEAR_CALL",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted indirect call branches executed"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Mispredicted indirect non call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "BR_MISP_EXEC.INDIRECT_NON_CALL",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted indirect non call branches executed"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Mispredicted call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
"EventName": "BR_MISP_EXEC.NEAR_CALLS",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted call branches executed"
+ "UMask": "0x30"
},
{
+ "BriefDescription": "Mispredicted non call branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
"EventName": "BR_MISP_EXEC.NON_CALLS",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted non call branches executed"
+ "UMask": "0x7"
},
{
+ "BriefDescription": "Mispredicted return branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "BR_MISP_EXEC.RETURN_NEAR",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted return branches executed"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Mispredicted taken branches executed",
"EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "BR_MISP_EXEC.TAKEN",
"SampleAfterValue": "20000",
- "BriefDescription": "Mispredicted taken branches executed"
+ "UMask": "0x40"
},
{
- "PEBS": "1",
+ "BriefDescription": "Mispredicted near retired calls (Precise Event)",
"EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
"SampleAfterValue": "2000",
- "BriefDescription": "Mispredicted near retired calls (Precise Event)"
+ "UMask": "0x2"
},
{
- "EventCode": "0x0",
- "Counter": "Fixed counter 3",
- "UMask": "0x0",
+ "BriefDescription": "Reference cycles when thread is not halted (fixed counter)",
"EventName": "CPU_CLK_UNHALTED.REF",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Reference cycles when thread is not halted (fixed counter)"
+ "SampleAfterValue": "2000000"
},
{
+ "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPU_CLK_UNHALTED.REF_P",
"SampleAfterValue": "100000",
- "BriefDescription": "Reference base clock (133 Mhz) cycles when thread is not halted (programmable counter)"
+ "UMask": "0x1"
},
{
- "EventCode": "0x0",
- "Counter": "Fixed counter 2",
- "UMask": "0x0",
+ "BriefDescription": "Cycles when thread is not halted (fixed counter)",
"EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Cycles when thread is not halted (fixed counter)"
+ "SampleAfterValue": "2000000"
},
{
+ "BriefDescription": "Cycles when thread is not halted (programmable counter)",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
"EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Cycles when thread is not halted (programmable counter)"
+ "SampleAfterValue": "2000000"
},
{
+ "BriefDescription": "Total CPU cycles",
+ "CounterMask": "2",
"EventCode": "0x3C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
"EventName": "CPU_CLK_UNHALTED.TOTAL_CYCLES",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Total CPU cycles",
- "CounterMask": "2"
+ "Invert": "1",
+ "SampleAfterValue": "2000000"
},
{
+ "BriefDescription": "Any Instruction Length Decoder stall cycles",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
"EventName": "ILD_STALL.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Any Instruction Length Decoder stall cycles"
+ "UMask": "0xf"
},
{
+ "BriefDescription": "Instruction Queue full stall cycles",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "ILD_STALL.IQ_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instruction Queue full stall cycles"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Length Change Prefix stall cycles",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ILD_STALL.LCP",
"SampleAfterValue": "2000000",
- "BriefDescription": "Length Change Prefix stall cycles"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Stall cycles due to BPU MRU bypass",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "ILD_STALL.MRU",
"SampleAfterValue": "2000000",
- "BriefDescription": "Stall cycles due to BPU MRU bypass"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Regen stall cycles",
"EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "ILD_STALL.REGEN",
"SampleAfterValue": "2000000",
- "BriefDescription": "Regen stall cycles"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Instructions that must be decoded by decoder 0",
"EventCode": "0x18",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "INST_DECODED.DEC0",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions that must be decoded by decoder 0"
+ "UMask": "0x1"
},
{
- "EventCode": "0x1E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INST_QUEUE_WRITE_CYCLES",
+ "BriefDescription": "Instructions written to instruction queue.",
+ "EventCode": "0x17",
+ "EventName": "INST_QUEUE_WRITES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles instructions are written to the instruction queue"
+ "UMask": "0x1"
},
{
- "EventCode": "0x17",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INST_QUEUE_WRITES",
+ "BriefDescription": "Cycles instructions are written to the instruction queue",
+ "EventCode": "0x1E",
+ "EventName": "INST_QUEUE_WRITE_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions written to instruction queue."
+ "UMask": "0x1"
},
{
- "EventCode": "0x0",
- "Counter": "Fixed counter 1",
- "UMask": "0x0",
+ "BriefDescription": "Instructions retired (fixed counter)",
"EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired (fixed counter)"
+ "SampleAfterValue": "2000000"
},
{
- "PEBS": "1",
+ "BriefDescription": "Instructions retired (Programmable counter and Precise Event)",
"EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Instructions retired (Programmable counter and Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired MMX instructions (Precise Event)",
"EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "INST_RETIRED.MMX",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired MMX instructions (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Total cycles (Precise Event)",
+ "CounterMask": "16",
"EventCode": "0xC0",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "INST_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Total cycles (Precise Event)",
- "CounterMask": "16"
+ "CounterMask": "16",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "Invert": "1",
+ "PEBS": "2",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired floating-point operations (Precise Event)",
"EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "INST_RETIRED.X87",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retired floating-point operations (Precise Event)"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Load operations conflicting with software prefetches",
"EventCode": "0x4C",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "LOAD_HIT_PRE",
"SampleAfterValue": "200000",
- "BriefDescription": "Load operations conflicting with software prefetches"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles when uops were delivered by the LSD",
+ "CounterMask": "1",
"EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LSD.ACTIVE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles when uops were delivered by the LSD",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles no uops were delivered by the LSD",
+ "CounterMask": "1",
"EventCode": "0xA8",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LSD.INACTIVE",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no uops were delivered by the LSD",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loops that can't stream from the instruction queue",
"EventCode": "0x20",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LSD_OVERFLOW",
"SampleAfterValue": "2000000",
- "BriefDescription": "Loops that can't stream from the instruction queue"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles machine clear asserted",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MACHINE_CLEARS.CYCLES",
"SampleAfterValue": "20000",
- "BriefDescription": "Cycles machine clear asserted"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "MACHINE_CLEARS.MEM_ORDER",
"SampleAfterValue": "20000",
- "BriefDescription": "Execution pipeline restart due to Memory ordering conflicts"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Self-Modifying Code detected",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "MACHINE_CLEARS.SMC",
"SampleAfterValue": "20000",
- "BriefDescription": "Self-Modifying Code detected"
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "All RAT stall cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ANY",
+ "SampleAfterValue": "2000000",
+ "UMask": "0xf"
+ },
+ {
+ "BriefDescription": "Flag stall cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.FLAGS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Partial register stall cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.REGISTERS",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ROB read port stalls cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.ROB_READ_PORT",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Scoreboard stall cycles",
+ "EventCode": "0xD2",
+ "EventName": "RAT_STALLS.SCOREBOARD",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Resource related stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Resource related stall cycles"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "FPU control word write stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "RESOURCE_STALLS.FPCW",
"SampleAfterValue": "2000000",
- "BriefDescription": "FPU control word write stall cycles"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Load buffer stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "RESOURCE_STALLS.LOAD",
"SampleAfterValue": "2000000",
- "BriefDescription": "Load buffer stall cycles"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "MXCSR rename stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "RESOURCE_STALLS.MXCSR",
"SampleAfterValue": "2000000",
- "BriefDescription": "MXCSR rename stall cycles"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Other Resource related stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "RESOURCE_STALLS.OTHER",
"SampleAfterValue": "2000000",
- "BriefDescription": "Other Resource related stall cycles"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "ROB full stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "RESOURCE_STALLS.ROB_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "ROB full stall cycles"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Reservation Station full stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "RESOURCE_STALLS.RS_FULL",
"SampleAfterValue": "2000000",
- "BriefDescription": "Reservation Station full stall cycles"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Store buffer stall cycles",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "RESOURCE_STALLS.STORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Store buffer stall cycles"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "SSEX_UOPS_RETIRED.PACKED_DOUBLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Packed-Double Uops retired (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "SSEX_UOPS_RETIRED.PACKED_SINGLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Packed-Single Uops retired (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_DOUBLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Scalar-Double Uops retired (Precise Event)"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "SSEX_UOPS_RETIRED.SCALAR_SINGLE",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Scalar-Single Uops retired (Precise Event)"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "SSEX_UOPS_RETIRED.VECTOR_INTEGER",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "SIMD Vector Integer Uops retired (Precise Event)"
- },
- {
- "EventCode": "0xDB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOP_UNFUSION",
- "SampleAfterValue": "2000000",
- "BriefDescription": "Uop unfusions due to FP exceptions"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Stack pointer instructions decoded",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "UOPS_DECODED.ESP_FOLDING",
"SampleAfterValue": "2000000",
- "BriefDescription": "Stack pointer instructions decoded"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Stack pointer sync operations",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "UOPS_DECODED.ESP_SYNC",
"SampleAfterValue": "2000000",
- "BriefDescription": "Stack pointer sync operations"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Uops decoded by Microcode Sequencer",
+ "CounterMask": "1",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_DECODED.MS_CYCLES_ACTIVE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops decoded by Microcode Sequencer",
- "CounterMask": "1"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles no Uops are decoded",
+ "CounterMask": "1",
"EventCode": "0xD1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_DECODED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops are decoded",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
"AnyThread": "1",
+ "BriefDescription": "Cycles Uops executed on any port (core count)",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops executed on any port (core count)",
- "CounterMask": "1"
+ "UMask": "0x3f"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
"AnyThread": "1",
+ "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_ACTIVE_CYCLES_NO_PORT5",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops executed on ports 0-4 (core count)",
- "CounterMask": "1"
+ "UMask": "0x1f"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
- "SampleAfterValue": "2000000",
"BriefDescription": "Uops executed on any port (core count)",
"CounterMask": "1",
- "EdgeDetect": "1"
- },
- {
+ "EdgeDetect": "1",
"EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on ports 0-4 (core count)",
- "CounterMask": "1",
- "EdgeDetect": "1"
+ "UMask": "0x3f"
},
{
+ "AnyThread": "1",
+ "BriefDescription": "Uops executed on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_COUNT_NO_PORT5",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "AnyThread": "1",
- "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops issued on any port (core count)",
- "CounterMask": "1"
+ "UMask": "0x1f"
},
{
+ "AnyThread": "1",
+ "BriefDescription": "Cycles no Uops issued on any port (core count)",
+ "CounterMask": "1",
"EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES",
"Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
+ "SampleAfterValue": "2000000",
+ "UMask": "0x3f"
+ },
+ {
"AnyThread": "1",
+ "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.CORE_STALL_CYCLES_NO_PORT5",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops issued on ports 0-4 (core count)",
- "CounterMask": "1"
+ "UMask": "0x1f"
},
{
+ "BriefDescription": "Uops executed on port 0",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_EXECUTED.PORT0",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 0"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Uops issued on ports 0, 1 or 5",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "UOPS_EXECUTED.PORT015",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops issued on ports 0, 1 or 5"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
+ "CounterMask": "1",
"EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "UOPS_EXECUTED.PORT015_STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops issued on ports 0, 1 or 5",
- "CounterMask": "1"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Uops executed on port 1",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_EXECUTED.PORT1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 1"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED.PORT2_CORE",
+ "BriefDescription": "Uops issued on ports 2, 3 or 4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.PORT234_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 2 (core count)"
+ "UMask": "0x80"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"AnyThread": "1",
- "EventName": "UOPS_EXECUTED.PORT234_CORE",
+ "BriefDescription": "Uops executed on port 2 (core count)",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.PORT2_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops issued on ports 2, 3 or 4"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"AnyThread": "1",
+ "BriefDescription": "Uops executed on port 3 (core count)",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT3_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 3 (core count)"
+ "UMask": "0x8"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"AnyThread": "1",
+ "BriefDescription": "Uops executed on port 4 (core count)",
+ "EventCode": "0xB1",
"EventName": "UOPS_EXECUTED.PORT4_CORE",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 4 (core count)"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Uops executed on port 5",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "UOPS_EXECUTED.PORT5",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops executed on port 5"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Uops issued",
"EventCode": "0xE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_ISSUED.ANY",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops issued"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
+ "BriefDescription": "Cycles no Uops were issued on any thread",
+ "CounterMask": "1",
+ "EventCode": "0xE",
"EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops were issued on any thread",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
+ "BriefDescription": "Cycles Uops were issued on either thread",
+ "CounterMask": "1",
+ "EventCode": "0xE",
"EventName": "UOPS_ISSUED.CYCLES_ALL_THREADS",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops were issued on either thread",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Fused Uops issued",
"EventCode": "0xE",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_ISSUED.FUSED",
"SampleAfterValue": "2000000",
- "BriefDescription": "Fused Uops issued"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles no Uops were issued",
+ "CounterMask": "1",
"EventCode": "0xE",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles no Uops were issued",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles Uops are being retired",
+ "CounterMask": "1",
"EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_RETIRED.ACTIVE_CYCLES",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops are being retired",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Uops retired (Precise Event)",
"EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_RETIRED.ANY",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Uops retired (Precise Event)"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Macro-fused Uops retired (Precise Event)",
"EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Macro-fused Uops retired (Precise Event)"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retirement slots used (Precise Event)",
"EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Retirement slots used (Precise Event)"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
+ "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
+ "CounterMask": "1",
"EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Cycles Uops are not retiring (Precise Event)",
- "CounterMask": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
+ "CounterMask": "16",
"EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PEBS": "1",
"SampleAfterValue": "2000000",
- "BriefDescription": "Total cycles using precise uop retired event (Precise Event)",
- "CounterMask": "16"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "EventCode": "0xC0",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "BriefDescription": "Uop unfusions due to FP exceptions",
+ "EventCode": "0xDB",
+ "EventName": "UOP_UNFUSION",
"SampleAfterValue": "2000000",
- "BriefDescription": "Total cycles (Precise Event)",
- "CounterMask": "16"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
index 0596094e0ee9..c434cd4ef4f1 100644
--- a/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/nehalemex/virtual-memory.json
@@ -1,109 +1,96 @@
[
{
+ "BriefDescription": "DTLB load misses",
"EventCode": "0x8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "DTLB_LOAD_MISSES.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB load misses"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "DTLB load miss caused by low part of address",
"EventCode": "0x8",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "DTLB_LOAD_MISSES.PDE_MISS",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB load miss caused by low part of address"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "DTLB second level hit",
"EventCode": "0x8",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "DTLB_LOAD_MISSES.STLB_HIT",
"SampleAfterValue": "2000000",
- "BriefDescription": "DTLB second level hit"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "DTLB load miss page walks complete",
"EventCode": "0x8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB load miss page walks complete"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "DTLB misses",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "DTLB_MISSES.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB misses"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "DTLB first level misses but second level hit",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "DTLB_MISSES.STLB_HIT",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB first level misses but second level hit"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "DTLB miss page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DTLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
- "BriefDescription": "DTLB miss page walks"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "ITLB flushes",
"EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ITLB_FLUSH",
"SampleAfterValue": "2000000",
- "BriefDescription": "ITLB flushes"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ITLB_MISS_RETIRED",
- "SampleAfterValue": "200000",
- "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "ITLB miss",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "ITLB_MISSES.ANY",
"SampleAfterValue": "200000",
- "BriefDescription": "ITLB miss"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "ITLB miss page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "200000",
- "BriefDescription": "ITLB miss page walks"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired instructions that missed the ITLB (Precise Event)",
+ "EventCode": "0xC8",
+ "EventName": "ITLB_MISS_RETIRED",
"PEBS": "1",
+ "SampleAfterValue": "200000",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Retired loads that miss the DTLB (Precise Event)",
"EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "MEM_LOAD_RETIRED.DTLB_MISS",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired loads that miss the DTLB (Precise Event)"
+ "UMask": "0x80"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired stores that miss the DTLB (Precise Event)",
"EventCode": "0xC",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MEM_STORE_RETIRED.DTLB_MISS",
+ "PEBS": "1",
"SampleAfterValue": "200000",
- "BriefDescription": "Retired stores that miss the DTLB (Precise Event)"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
index bb79e89c2049..4e5572ee7dfe 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/cache.json
@@ -1,1879 +1,1438 @@
[
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Allocated L1D data cache lines in M state.",
+ "EventCode": "0x51",
+ "EventName": "L1D.ALLOCATED_IN_M",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
+ "EventCode": "0x51",
+ "EventName": "L1D.ALL_M_REPLACEMENT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_RQSTS.RFO_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
+ "EventCode": "0x51",
+ "EventName": "L1D.EVICTION",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D data line replacements.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "L2_RQSTS.ALL_RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
+ "CounterMask": "1",
+ "EventCode": "0xBF",
+ "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
+ "SampleAfterValue": "100003",
+ "UMask": "0x5"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers unavailability.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D miss outstanding duration in cycles.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_RQSTS.PF_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_RQSTS.PF_MISS",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf"
},
{
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc0",
- "EventName": "L2_RQSTS.ALL_PF",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_E",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from L2 hardware prefetchers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_M",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that miss cache lines.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
+ "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.HIT_S",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
+ "EventCode": "0x28",
+ "EventName": "L2_L1D_WB_RQSTS.MISS",
"SampleAfterValue": "200003",
- "BriefDescription": "RFOs that hit cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x27",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_STORE_LOCK_RQSTS.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFOs that access cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache lines filling L2.",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_L1D_WB_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Count the number of modified Lines evicted from L1 and missed L2. (Non-rejected WBs from the DCU.).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache lines in E state filling L2.",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.E",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_L1D_WB_RQSTS.HIT_S",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in S state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache lines in I state filling L2.",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.I",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_L1D_WB_RQSTS.HIT_E",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in E state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache lines in S state filling L2.",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.S",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_L1D_WB_RQSTS.HIT_M",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Clean L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x28",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "L2_L1D_WB_RQSTS.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not rejected writebacks from L1D to L2 cache lines in any state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Dirty L2 cache lines evicted by demand.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
+ "BriefDescription": "Dirty L2 cache lines filling the L2.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.DIRTY_ALL",
"SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa"
},
{
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_CLEAN",
"SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss oustandings duration in cycles.",
- "CounterHTOff": "2"
+ "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.PF_DIRTY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "2"
+ "BriefDescription": "L2 code requests.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30"
},
{
- "EventCode": "0x48",
- "Counter": "2",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "2"
+ "BriefDescription": "Demand Data Read requests.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3"
},
{
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles a demand request was blocked due to Fill Buffers inavailability.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Requests from L2 hardware prefetchers.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc0"
},
{
- "PublicDescription": "This event counts L1D data line replacements. Replacements occur when a new line is brought into the cache, causing eviction of a line loaded earlier.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests to L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc"
},
{
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D.ALLOCATED_IN_M",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Allocated L1D data cache lines in M state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L1D.EVICTION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data cache lines in M state evicted due to replacement.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache misses when fetching instructions.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
},
{
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L1D.ALL_M_REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cache lines in M state evicted out of L1D due to Snoop HitM or dirty line replacement.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Requests from the L2 hardware prefetchers that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Requests from the L2 hardware prefetchers that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFOs that access cache lines in any state.",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.ALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFOs that hit cache lines in E state.",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_E",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFOs that hit cache lines in M state.",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.HIT_M",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1D is locked.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFOs that miss cache lines.",
+ "EventCode": "0x27",
+ "EventName": "L2_STORE_LOCK_RQSTS.MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_PF",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Transactions accessing L2 pipe.",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.ALL_REQUESTS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache accesses when fetching instructions.",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.CODE_RD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.DEMAND_DATA_RD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "BriefDescription": "L1D writebacks that access L2 cache.",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L1D_WB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "L2 fill requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_FILL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache.",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "RFO requests that access L2 cache.",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.RFO",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles when L1D is locked.",
+ "EventCode": "0x63",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xBF",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "L1D_BLOCKS.BANK_CONFLICT_CYCLES",
+ "BriefDescription": "Core-originated cacheable demand requests missed LLC.",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when dispatched loads are cancelled due to L1D bank conflicts with other load ports.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
+ "BriefDescription": "Core-originated cacheable demand requests that refer to LLC.",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4f"
},
{
+ "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS).",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
- "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state. (Precise Event - PEBS)",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. (Precise Event - PEBS).",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2. (Precise Event - PEBS)",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS).",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
- "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. (Precise Event - PEBS).",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load. (Precise Event - PEBS).",
+ "EventCode": "0xD4",
+ "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
"PEBS": "1",
- "PublicDescription": "This event counts the number of load uops retired (Precise Event)",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load uops. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts retired demand loads that missed the last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops. (Precise Event - PEBS)",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS).",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
"PEBS": "1",
- "PublicDescription": "This event counts the number of store uops retired. (Precise Event - PEBS)",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store uops. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS).",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load uops with L1 cache hits as data sources. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS).",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops with L2 cache hits as data sources. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required. (Precise Event - PEBS)",
+ "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. (Precise Event - PEBS).",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "MEM_LOAD_UOPS_RETIRED.LLC_HIT",
+ "PEBS": "1",
+ "PublicDescription": "This event counts retired load uops that hit in the last-level (L3) cache without snoops required. (Precise Event - PEBS)",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load uops which data sources were data hits in LLC without snoops required. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "All retired load uops. (Precise Event - PEBS).",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
"PEBS": "1",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_UOPS_RETIRED.HIT_LFB",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were load uops missed L1 but hit FB due to preceding miss to the same cache line with data not ready. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts the number of load uops retired (Precise Event)",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x81"
},
{
+ "BriefDescription": "All retired store uops. (Precise Event - PEBS).",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC hit and cross-core snoop missed in on-pkg core cache. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts the number of store uops retired. (Precise Event - PEBS)",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x82"
},
{
+ "BriefDescription": "Retired load uops with locked access. (Precise Event - PEBS).",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
"PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a non-modified state. (Precise Event - PEBS)",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were LLC and cross-core snoop hits in on-pkg core cache. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
},
{
+ "BriefDescription": "Retired load uops that split across a cacheline boundary. (Precise Event - PEBS).",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
"PEBS": "1",
- "PublicDescription": "This event counts retired load uops that hit in the last-level cache (L3) and were found in a non-modified state in a neighboring core's private cache (same package). Since the last level cache is inclusive, hits to the L3 may require snooping the private L2 caches of any cores on the same socket that have the line. In this case, a snoop was required, and another L2 had the line in a modified state, so the line had to be invalidated in that L2 cache and transferred to the requesting L2. (Precise Event - PEBS)",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load uops which data sources were HitM responses from shared LLC. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts line-splitted load uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
+ "BriefDescription": "Retired store uops that split across a cacheline boundary. (Precise Event - PEBS).",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
"PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_UOPS_LLC_HIT_RETIRED.XSNP_NONE",
+ "PublicDescription": "This event counts line-splitted store uops retired to the architected path. A line split is across 64B cache-line which includes a page split (4K). (Precise Event - PEBS)",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load uops which data sources were hits in LLC without snoops required. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x42"
},
{
+ "BriefDescription": "Retired load uops that miss the STLB. (Precise Event - PEBS).",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_LOADS",
"PEBS": "1",
- "PublicDescription": "This event counts retired demand loads that missed the last-level (L3) cache. This means that the load is usually satisfied from memory in a client system or possibly from the remote socket in a server. Demand loads are non speculative load uops. (Precise Event - PEBS)",
- "EventCode": "0xD4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load uops with unknown information as data source in cache serviced the load. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
- },
- {
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_TRANS.DEMAND_DATA_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_TRANS.RFO",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_TRANS.CODE_RD",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache accesses when fetching instructions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_TRANS.ALL_PF",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 or LLC HW prefetches that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "L2_TRANS.L1D_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L1D writebacks that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
},
{
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "L2_TRANS.L2_FILL",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 fill requests that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired store uops that miss the STLB. (Precise Event - PEBS).",
+ "EventCode": "0xD0",
+ "EventName": "MEM_UOPS_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
},
{
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand and prefetch data reads.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "L2_TRANS.ALL_REQUESTS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Transactions accessing L2 pipe.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cacheable and noncacheable code read requests.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_IN.I",
+ "BriefDescription": "Demand Data Read requests sent to uncore.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in I state filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_IN.S",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
"SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in S state filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_IN.E",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines in E state filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cases when offcore requests buffer cannot take more entries for core.",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of L2 cache lines brought into the L2 cache. Lines are filled into the L2 cache when there was an L2 miss.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x7",
- "EventName": "L2_LINES_IN.ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_OUT.DEMAND_CLEAN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by demand.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_OUT.DEMAND_DIRTY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by demand.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.PF_CLEAN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Clean L2 cache lines evicted by L2 prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "L2_LINES_OUT.PF_DIRTY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines evicted by L2 prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "L2_LINES_OUT.DIRTY_ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "Dirty L2 cache lines filling the L2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_C6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Split locks in SQ.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding RFO store transactions in SuperQueue (SQ), queue to uncore.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch data reads.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x000105B3",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0091",
"SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0091",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch code reads that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch data reads that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch RFOs that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch RFOs that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo references (demand & prefetch) .",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x000107F7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c03f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts data/code/rfo reads (demand & prefetch) that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c03f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch prefetch RFOs .",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10008",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0122",
"SampleAfterValue": "100003",
- "BriefDescription": "COREWB & ANY_RESPONSE",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand & prefetch RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.COREWB.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10008",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10433",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand code reads that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data reads .",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data reads that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand rfo's .",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x18000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x00010002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses. It also includes L2 hints sent to LLC to keep a line from being evicted out of the core caches.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x803c8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2380408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data writes (RFOs) that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_M.HITM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses. It also includes L2 hints sent to LLC to keep a line from being evicted out of the core caches.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts L2 hints sent to LLC to keep a line from being evicted out of the core caches.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.OTHER.LRU_HINTS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x803c8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) data reads that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts miscellaneous accesses that include port i/o, MMIO and uncacheable memory accesses.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.OTHER.PORTIO_MMIO_UC",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2380408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to L2) data reads that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to L2) RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0020",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3f803c0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the LLC.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x4003c0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) code reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10003c0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0200",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1003c0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2003c0100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10400",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10800",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts non-temporal stores.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) data reads that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0080",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data reads .",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the LLC.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3f803c0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand rfo's .",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003c0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand code reads.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x000105B3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003c0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00010122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003c0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch prefetch RFOs .",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts prefetch (that bring data to LLC only) RFOs that hit in the LLC and the snoops sent to sibling cores return clean response.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x000107F7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ALL_READS.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_HIT.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003c0100",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo references (demand & prefetch) .",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10433",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DATA_IN.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10080",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = DATA_INTO_CORE and RESPONSE = ANY_RESPONSE",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_HIT_M.HITM",
+ "EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10200",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = DEMAND_RFO and RESPONSE = LLC_HIT_M and SNOOP = HITM",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts requests where the address of an atomic lock instruction spans a cache line boundary or the lock instruction is executed on uncacheable address.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_IFETCH.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.SPLIT_LOCK_UC_LOCK.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = PF_RFO and RESPONSE = ANY_RESPONSE",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts non-temporal stores.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = ANY_RESPONSE",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "BriefDescription": "Split locks in SQ.",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = ANY_RESPONSE",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
index ce26537c7d47..79e8f403c426 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/floating-point.json
@@ -1,138 +1,108 @@
[
{
- "EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FP_COMP_OPS_EXE.X87",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULsand IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with any input/output SSE or FP assist.",
+ "CounterMask": "1",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1e"
},
{
- "EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of SIMD FP assists due to input values.",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.SIMD_INPUT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of SIMD FP assists due to Output values.",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.SIMD_OUTPUT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of X87 assists due to input value.",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.X87_INPUT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of X87 assists due to output value.",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.X87_OUTPUT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed double-precision uops issued this cycle.",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
+ "EventName": "FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
"EventName": "FP_COMP_OPS_EXE.SSE_PACKED_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational packed single-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
"EventCode": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
"EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar double-precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0x11",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "BriefDescription": "Number of SSE* or AVX-128 FP Computational scalar single-precision uops issued this cycle.",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x11",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "BriefDescription": "Number of FP Computational Uops Executed this cycle. The number of FADD, FSUB, FCOM, FMULs, integer MULs and IMULs, FDIVs, FPREMs, FSQRTS, integer DIVs, and IDIVs. This event does not distinguish an FADD used in the middle of a transcendental flow from a s.",
+ "EventCode": "0x10",
+ "EventName": "FP_COMP_OPS_EXE.X87",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
"EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
"EventName": "OTHER_ASSISTS.AVX_STORE",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of GSSE memory assist for stores. GSSE microcode assist is being invoked whenever the hardware is unable to properly handle GSSE-256b operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
"EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "OTHER_ASSISTS.AVX_TO_SSE",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from AVX-256 to legacy SSE when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
"EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "OTHER_ASSISTS.SSE_TO_AVX",
"SampleAfterValue": "100003",
- "BriefDescription": "Number of transitions from SSE to AVX-256 when penalty applicable.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ASSIST.X87_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to output value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "FP_ASSIST.X87_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of X87 assists due to input value.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_ASSIST.SIMD_OUTPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to Output values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ASSIST.SIMD_INPUT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of SIMD FP assists due to input values.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of AVX-256 Computational FP double precision uops issued this cycle.",
+ "EventCode": "0x11",
+ "EventName": "SIMD_FP_256.PACKED_DOUBLE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x1e",
- "EventName": "FP_ASSIST.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of GSSE-256 Computational FP single precision uops issued this cycle.",
+ "EventCode": "0x11",
+ "EventName": "SIMD_FP_256.PACKED_SINGLE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
index e58ed14a204c..700716b42f1a 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/frontend.json
@@ -1,305 +1,250 @@
[
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "IDQ.EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
+ "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.ALL_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
+ "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.EXCEED_DSB_LINES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_UOPS",
+ "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
+ "EventCode": "0xAC",
+ "EventName": "DSB_FILL.OTHER_CANCEL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
+ "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_DSB_OCCUR",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
"EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "SampleAfterValue": "2000003",
"BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
+ "BriefDescription": "Cycles MITE is delivering 4 Uops.",
+ "CounterMask": "4",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
"EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Cycles MITE is delivering any Uop.",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
"EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path.",
+ "CounterMask": "1",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
+ "EventName": "IDQ.DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more information.",
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
+ "EventName": "IDQ.DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Instruction Decode Queue (IDQ) empty cycles.",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
+ "EventName": "IDQ.EMPTY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
"EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x3c",
"EventName": "IDQ.MITE_ALL_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3c"
},
{
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE.HIT",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Instruction Cache, Streaming Buffer and Victim Cache Reads. both cacheable and noncacheable, including UC fetches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of instruction cache, streaming buffer and victim cache misses. Counting includes unchacheable accesses.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE.MISSES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction cache, streaming buffer and victim cache misses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "This event counts cycles during which the microcode sequencer assisted the front-end in delivering uops. Microcode assists are used for complex instructions or scenarios that can't be handled by the standard decoder. Using other instructions, if possible, will usually improve performance. See the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual for more information.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x30"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "BriefDescription": "Deliveries to Instruction Decode Queue (IDQ) initiated by Decode Stream Buffer (DSB) while Microcode Sequencer (MS) is busy.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_OCCUR",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "BriefDescription": "Uops initiated by Decode Stream Buffer (DSB) that are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy.",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled .",
"EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "This event counts the number of uops not delivered to the back-end per cycle, per thread, when the back-end was not stalled. In the ideal case 4 uops can be delivered each cycle. The event counts the undelivered uops - so if 3 were delivered in one cycle, the counter would be incremented by 1 for that cycle (4 - 3). If the back-end is stalled, the count for this event is not incremented even when uops were not delivered, because the back-end would not have been able to accept them. This event is used in determining the front-end bound category of the top-down pipeline slots characterization.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "4",
"EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the cycles attributed to a switch from the Decoded Stream Buffer (DSB), which holds decoded instructions, to the legacy decode pipeline. It excludes cycles when the back-end cannot accept new micro-ops. The penalty for these switches is potentially several cycles of instruction starvation, where no micro-ops are delivered to the back-end.",
- "EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "BriefDescription": "Cycles when 1 or more uops were delivered to the by the front end.",
+ "CounterMask": "4",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_GE_1_UOP_DELIV.CORE",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xAC",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DSB_FILL.OTHER_CANCEL",
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled.",
+ "CounterMask": "3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid DSB fill not because of exceeding way limit.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xAC",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DSB_FILL.EXCEED_DSB_LINES",
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Decode Stream Buffer (DSB) fill encounter more than 3 Decode Stream Buffer (DSB) lines.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xAC",
- "Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "DSB_FILL.ALL_CANCEL",
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cases of cancelling valid Decode Stream Buffer (DSB) fill not because of exceeding way limit.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
index 78c1a987f9a2..0a6fc0136f4a 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/memory.json
@@ -1,445 +1,337 @@
[
{
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MISALIGN_MEM_REF.LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x05",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MISALIGN_MEM_REF.STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xBE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.LLC_MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
"EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from memory disambiguation, external snoops, or cross SMT-HW-thread snoop (stores) hitting load buffers. Machine clears can have a significant performance impact if they are happening frequently.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 128.",
"EventCode": "0xCD",
- "MSRValue": "0x4",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads with latency value being above 4 .",
- "TakenAlone": "1",
- "CounterHTOff": "3"
- },
- {
+ "MSRValue": "0x80",
"PEBS": "2",
- "EventCode": "0xCD",
- "MSRValue": "0x8",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "50021",
- "BriefDescription": "Loads with latency value being above 8.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 16.",
"EventCode": "0xCD",
- "MSRValue": "0x10",
- "Counter": "3",
- "UMask": "0x1",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
"SampleAfterValue": "20011",
- "BriefDescription": "Loads with latency value being above 16.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads with latency value being above 256.",
+ "EventCode": "0xCD",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
"PEBS": "2",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Loads with latency value being above 32.",
"EventCode": "0xCD",
- "MSRValue": "0x20",
- "Counter": "3",
- "UMask": "0x1",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
"SampleAfterValue": "100007",
- "BriefDescription": "Loads with latency value being above 32.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 4 .",
"EventCode": "0xCD",
- "MSRValue": "0x40",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Loads with latency value being above 64.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 512.",
"EventCode": "0xCD",
- "MSRValue": "0x80",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Loads with latency value being above 128.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 64.",
"EventCode": "0xCD",
- "MSRValue": "0x100",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Loads with latency value being above 256.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Loads with latency value being above 8.",
"EventCode": "0xCD",
- "MSRValue": "0x200",
- "Counter": "3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Loads with latency value being above 512.",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
+ "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
"EventCode": "0xCD",
- "Counter": "3",
- "UMask": "0x2",
"EventName": "MEM_TRANS_RETIRED.PRECISE_STORE",
+ "PEBS": "2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Sample stores and collect precise store operation via PEBS record. PMC3 only. (Precise Event - PEBS).",
- "PRECISE_STORE": "1",
- "TakenAlone": "1",
- "CounterHTOff": "3"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Speculative cache line split load uops dispatched to L1 cache.",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.LOADS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculative cache line split STA uops dispatched to L1 cache.",
+ "EventCode": "0x05",
+ "EventName": "MISALIGN_MEM_REF.STORES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400244",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400244",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch code reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400091",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400091",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch data reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch code reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400240",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400240",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch code reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch data reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400090",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400090",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch data reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch RFOs that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400120",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400120",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch RFOs that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3004003f7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_READS.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3004003f7",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all data/code/rfo reads (demand & prefetch) that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400122",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.ALL_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400122",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand & prefetch RFOs that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1f80408fff",
+ "PublicDescription": "This event counts any requests that miss the LLC where the data was returned from local DRAM",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand code reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts LLC replacements.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS.LOCAL_DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x6004001b3",
+ "PublicDescription": "This event counts all data requests (demand/prefetch data reads and demand data writes (RFOs) that miss the LLC where the data is returned from local DRAM",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS_LOCAL.ANY_LLC_HIT",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x17004001b3",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data writes (RFOs) that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand code reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400020",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1f80400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand data writes (RFOs) that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1f80400010",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x300400100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1f80400040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the LLC and the data returned from dram.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts all data requests (demand/prefetch data reads and demand data writes (RFOs) that miss the LLC where the data is returned from local DRAM",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) code reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x6004001b3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS.LOCAL_DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400040",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts LLC replacements.",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts any requests that miss the LLC where the data was returned from local DRAM",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1f80408fff",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.LLC_MISS_LOCAL.DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400010",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = ANY_REQUEST and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x17004001b3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DATA_IN_SOCKET.LLC_MISS_LOCAL.ANY_LLC_HIT",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400020",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = DATA_IN_SOCKET and RESPONSE = LLC_MISS_LOCAL and SNOOP = ANY_LLC_HIT",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) code reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1f80400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_IFETCH.LLC_MISS_LOCAL.DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_CODE_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400200",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = DEMAND_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1f80400010",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_DATA_RD.LLC_MISS_LOCAL.DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_DATA_RD.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400080",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = PF_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the LLC and the data returned from dram.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1f80400040",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
- "EventName": "OFFCORE_RESPONSE.PF_IFETCH.LLC_MISS_LOCAL.DRAM",
+ "EventName": "OFFCORE_RESPONSE.PF_LLC_RFO.LLC_MISS.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x300400100",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = PF_RFO and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1f80400080",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L_DATA_RD.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1f80400080",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = PF_LLC_DATA_RD and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1f80400200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Offcore": "1",
"EventName": "OFFCORE_RESPONSE.PF_L_IFETCH.LLC_MISS_LOCAL.DRAM",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1f80400200",
"SampleAfterValue": "100003",
- "BriefDescription": "REQUEST = PF_LLC_IFETCH and RESPONSE = LLC_MISS_LOCAL and SNOOP = DRAM",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of any page walk that had a miss in LLC. Does not necessary cause a SUSPEND.",
+ "EventCode": "0xBE",
+ "EventName": "PAGE_WALKS.LLC_MISS",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/other.json b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
index 874eb40a2e0f..9f96121baef8 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/other.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/other.json
@@ -1,58 +1,46 @@
[
{
- "EventCode": "0x17",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Valid instructions written to IQ per cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x4E",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "HW_PRE_REQ.DL1_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPL_CYCLES.RING0",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when the thread is in ring 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
+ "CounterMask": "1",
"EdgeDetect": "1",
+ "EventCode": "0x5C",
"EventName": "CPL_CYCLES.RING0_TRANS",
"SampleAfterValue": "100007",
- "BriefDescription": "Number of intervals between processor halts while thread is in ring 0.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
"EventCode": "0x5C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CPL_CYCLES.RING123",
"SampleAfterValue": "2000003",
- "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Hardware Prefetch requests that miss the L1D cache. This accounts for both L1 streamer and IP-based (IPP) HW prefetchers. A request is being counted each time it access the cache & miss it, including if a block is applicable or if hit the Fill Buffer for .",
+ "EventCode": "0x4E",
+ "EventName": "HW_PRE_REQ.DL1_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Valid instructions written to IQ per cycle.",
+ "EventCode": "0x17",
+ "EventName": "INSTS_WRITTEN_TO_IQ.INSTS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
"EventCode": "0x63",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
index b7150f65f16d..ecaf94ccc9c7 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/pipeline.json
@@ -1,1226 +1,963 @@
[
{
- "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "Fixed counter 2",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
+ "EventCode": "0xB6",
+ "EventName": "AGU_BYPASS_CANCEL.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
- "Counter": "Fixed counter 0",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 0"
+ "BriefDescription": "Divide operations executed.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.FPU_DIV",
+ "PublicDescription": "This event counts the number of the divide operations executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "BriefDescription": "Cycles when divider is busy executing divide operations.",
+ "EventCode": "0x14",
+ "EventName": "ARITH.FPU_DIV_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state.",
- "CounterHTOff": "Fixed counter 1"
+ "UMask": "0x1"
},
{
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 1"
+ "BriefDescription": "Speculative and retired branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS.DATA_UNKNOWN",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceeding smaller uncompleted store. See the table of not supported store forwards in the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd0"
},
{
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "LD_BLOCKS.ALL_BLOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired indirect return branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
},
{
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken macro-conditional branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired macro-conditional branches.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "EdgeDetect": "1",
- "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of occurences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired direct near calls.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x90"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "INT_MISC.RAT_STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0x84"
},
{
- "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired indirect calls.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
+ "EventCode": "0x88",
+ "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x88"
},
{
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.FPU_DIV_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divider is busy executing divide operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of the divide operations executed.",
- "EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "ARITH.FPU_DIV",
- "SampleAfterValue": "100003",
- "BriefDescription": "Divide operations executed.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Return instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "PublicDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate)",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired mispredicted direct near calls.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd0"
},
{
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LOAD_HIT_PRE.HW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative mispredicted indirect branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
},
{
- "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual.",
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
},
{
- "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel\u00ae 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
},
{
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
+ "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x90"
+ },
+ {
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
+ "SampleAfterValue": "200003",
+ "UMask": "0x84"
+ },
+ {
+ "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0xa0"
+ },
+ {
+ "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
+ "SampleAfterValue": "200003",
+ "UMask": "0x88"
+ },
+ {
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Direct and indirect mispredicted near call instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Mispredicted not taken branch instructions retired.(Precise Event - PEBS).",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Mispredicted taken branch instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Multiply packed/scalar single precision uops allocated.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x5B",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with either free list is empty.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5B",
- "Counter": "0,1,2,3",
- "UMask": "0xf",
- "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls2 control structures full for physical registers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5B",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "RESOURCE_STALLS2.BOB_FULL",
+ "BriefDescription": "Count XClk pulses when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x5B",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "EventName": "RESOURCE_STALLS2.OOO_RSRC",
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "This event counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls out of order resources full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
- "EventCode": "0x5E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "BriefDescription": "Reference cycles when the thread is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "AnyThread": "1",
+ "BriefDescription": "Reference cycles when the at least one thread on the physical core is unhalted (counts at 100 MHz rate).",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
+ "BriefDescription": "Core cycles when the thread is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "This event counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ILD_STALL.IQ_FULL",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Stall cycles because IQ is full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_INST_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not taken macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Thread cycles when thread is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_INST_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_JUMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired macro-conditional branch instructions excluding calls and indirects.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect branches with return mnemonic.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_INST_EXEC.TAKEN_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
+ "CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x6"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_INST_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x5"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_INST_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Stall cycles because IQ is full.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.IQ_FULL",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_JMP",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired macro-unconditional branches excluding calls and indirects.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xc8",
- "EventName": "BR_INST_EXEC.ALL_INDIRECT_NEAR_RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired indirect return branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_INST_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x88",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_INST_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) external stall is sent to Instruction Decode Queue (IDQ) for the thread.",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RAT_STALL_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "BR_MISP_EXEC.NONTAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Not taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of cycles waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "BR_MISP_EXEC.TAKEN_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x84",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of occurrences waiting for the checkpoints in Resource Allocation Table (RAT) to be recovered after Nuke due to all other cases except JEClear (e.g. whenever a ucode assist is needed like SSE exception, memory disambiguation, etc...).",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_STALLS_COUNT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x88",
- "EventName": "BR_MISP_EXEC.TAKEN_RETURN_NEAR",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect branches with return mnemonic.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of cases where any load ends up with a valid block-code written to the load buffer (including blocks due to Memory Order Buffer (MOB), Data Cache Unit (DCU), TLB, but load has no DCU miss).",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL_BLOCK",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0x90",
- "EventName": "BR_MISP_EXEC.TAKEN_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Loads delayed due to SB blocks, preceding store operations with known addresses but unknown data.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xa0",
- "EventName": "BR_MISP_EXEC.TAKEN_INDIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Taken speculative and retired mispredicted indirect calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "This event counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "BR_MISP_EXEC.ALL_CONDITIONAL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cases when loads get true Block-on-Store blocking code preventing store forwarding.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "This event counts loads that followed a store to the same address, where the data could not be forwarded inside the pipeline from the store to the load. The most common reason why store forwarding would be blocked is when a load's address range overlaps with a preceding smaller uncompleted store. See the table of not supported store forwards in the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual. The penalty for blocked store forwarding is that the load must wait for the store to complete before it can be issued.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "BR_MISP_EXEC.ALL_INDIRECT_JUMP_NON_CALL_RET",
- "SampleAfterValue": "200003",
- "BriefDescription": "Mispredicted indirect branches excluding calls and returns.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "False dependencies in MOB due to partial compare.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Aliasing occurs when a load is issued after a store and their memory addresses are offset by 4K. This event counts the number of loads that aliased with a preceding store, resulting in an extended address check in the pipeline. The enhanced address check typically has a performance penalty of 5 cycles.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xd0",
- "EventName": "BR_MISP_EXEC.ALL_DIRECT_NEAR_CALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted direct near calls.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "This event counts the number of times that load operations are temporarily blocked because of older stores, with addresses that are not yet known. A load operation may incur more than one block of this type.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ALL_STA_BLOCK",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0x89",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Speculative and retired mispredicted macro conditional branches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for hardware prefetch.",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.HW_PF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not software-prefetch load dispatches that hit FB allocated for software prefetch.",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MASKMOV",
+ "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired instructions experiencing ITLB misses.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "BriefDescription": "Increments the number of flags-merge uops in flight each cycle.",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
+ "BriefDescription": "Performance sensitive flags-merging uops added by Sandy Bridge u-arch.",
+ "CounterMask": "1",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.FLAGS_MERGE_UOP_CYCLES",
+ "PublicDescription": "This event counts the number of cycles spent executing performance-sensitive flags-merging uops. For example, shift CL (merge_arith_flags). For more details, See the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "BriefDescription": "Multiply packed/scalar single precision uops allocated.",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.MUL_SINGLE_UOP",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "AnyThread": "1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
+ "BriefDescription": "Cycles with at least one slow LEA uop being allocated.",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.SLOW_LEA_WINDOW",
+ "PublicDescription": "This event counts the number of cycles with at least one slow LEA uop being allocated. A uop is generally considered as slow LEA if it has three sources (for example, two sources and immediate) regardless of whether it is a result of LEA instruction or not. Examples of the slow LEA uop are or uops with base, index, and offset source operands using base and index reqisters, where base is EBR/RBP/R13, using RIP relative or 16-bit addressing modes. See the Intel(R) 64 and IA-32 Architectures Optimization Reference Manual for more details about slow LEA instructions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Resource-related stall cycles.",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "RESOURCE_STALLS.ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "RESOURCE_STALLS.LB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the cycles of stall due to lack of load buffers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RESOURCE_STALLS.RS",
+ "EventName": "RESOURCE_STALLS.LB_SB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xa"
},
{
+ "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "EventName": "RESOURCE_STALLS.MEM_RS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0xa",
- "EventName": "RESOURCE_STALLS.LB_SB",
+ "EventName": "RESOURCE_STALLS.OOO_RSRC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to load or store buffers all being in use.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf0"
},
{
+ "BriefDescription": "Cycles stalled due to re-order buffer full.",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "RESOURCE_STALLS.MEM_RS",
+ "EventName": "RESOURCE_STALLS.ROB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to memory buffers or Reservation Station (RS) being fully utilized.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Cycles stalled due to no eligible RS entry available.",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RESOURCE_STALLS.ROB",
+ "EventName": "RESOURCE_STALLS.RS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to re-order buffer full.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
"EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0xf0",
- "EventName": "RESOURCE_STALLS.OOO_RSRC",
+ "EventName": "RESOURCE_STALLS.SB",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource stalls due to Rob being full, FCSW, MXCSR and OTHER.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_PENDING",
+ "BriefDescription": "Cycles with either free list is empty.",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.ALL_FL_EMPTY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x2",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_PENDING",
+ "BriefDescription": "Resource stalls2 control structures full for physical registers.",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.ALL_PRF_CONTROL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread, increment by 1. Note this is in DCU and connected to Umask 1. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "2",
- "CounterHTOff": "2"
+ "UMask": "0xf"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.CYCLES_NO_DISPATCH",
+ "BriefDescription": "Cycles when Allocator is stalled if BOB is full and new branch needs it.",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.BOB_FULL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was no dispatch for this thread, increment by 1. Note this is connect to Umask 2. No dispatch can be deduced from the UOPS_EXECUTED event.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_PENDING",
+ "BriefDescription": "Resource stalls out of order resources full.",
+ "EventCode": "0x5B",
+ "EventName": "RESOURCE_STALLS2.OOO_RSRC",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a MLC-miss pending demand load and no uops dispatched on this thread (i.e. Non-completed valid SQ entry allocated for demand load and waiting for Uncore), increment by 1. Note this is in MLC and connected to Umask 0 and 2.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x4f"
},
{
- "EventCode": "0xA3",
- "Counter": "2",
- "UMask": "0x6",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_PENDING",
+ "BriefDescription": "Count cases of saving new LBR.",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Each cycle there was a miss-pending demand load this thread and no uops dispatched, increment by 1. Note this is in DCU and connected to Umask 1 and 2. Miss Pending demand load should be deduced by OR-ing increment bits of DCACHE_MISS_PEND.PENDING.",
- "CounterMask": "6",
- "CounterHTOff": "2"
+ "UMask": "0x20"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched per thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Uops dispatched from any thread.",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "UOPS_DISPATCHED.CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Uops dispatched from any thread.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Uops dispatched per thread.",
"EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "EventName": "UOPS_DISPATCHED.THREAD",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 0.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 1.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "AGU_BYPASS_CANCEL.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts executed load operations with all the following traits: 1. addressing of the format [base + offset], 2. the offset is between 1 and 2047, 3. the address specified in the base register is in one page and the address [base+offset] is in an.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "PEBS": "2",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 2.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired. (Precise Event - PEBS).",
- "TakenAlone": "1",
- "CounterHTOff": "1"
+ "UMask": "0xc"
},
{
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OTHER_ASSISTS.ITLB_MISS_RETIRED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired instructions experiencing ITLB misses.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when load or STA uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of micro-ops retired. (Precise Event)",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.ALL",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when load or STA uops are dispatched to port 3.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Actually retired uops. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x30"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 4.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x40"
},
{
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "BriefDescription": "Cycles per thread when uops are dispatched to port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization. (Precise Event - PEBS)",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "AnyThread": "1",
+ "BriefDescription": "Cycles per core when uops are dispatched to port 5.",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5_CORE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0xc3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "This event is incremented when self-modifying code (SMC) is detected, which causes a machine clear. Machine clears can have a significant performance impact if they are happening frequently.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Maskmov false fault - counts number of time ucode passes through Maskmov flow due to instruction's mask being 0 while the flow was completed without raising a fault.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "MACHINE_CLEARS.MASKMOV",
- "SampleAfterValue": "100003",
- "BriefDescription": "This event counts the number of executed Intel AVX masked load operations that refer to an illegal address range with the mask bits set to 0.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_INST_RETIRED.NEAR_CALL_R3",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect macro near call instructions retired (captured in ring 3). (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "2",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Not taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Far branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS).",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "This event counts the number of Uops issued by the front-end of the pipeilne to the back-end.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for all threads.",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread.",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Actually retired uops. (Precise Event - PEBS).",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
"PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect mispredicted near call instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PEBS": "2",
- "PublicDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS)",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "This event counts the number of micro-ops retired. (Precise Event)",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "BR_MISP_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted not taken branch instructions retired.(Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.CORE_STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retirement slots used. (Precise Event - PEBS).",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
"PEBS": "1",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted taken branch instructions retired. (Precise Event - PEBS).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "PublicDescription": "This event counts the number of retirement slots used each cycle. There are potentially 4 slots that can be used each cycle - meaning, 4 micro-ops or 4 instructions could retire each cycle. This event is used in determining the 'Retiring' category of the Top-Down pipeline slots characterization. (Precise Event - PEBS)",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Count cases of saving new LBR.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
index fb2d7b8875f8..4a99fe515f4b 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
@@ -1,226 +1,510 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 32 * ( ICACHE.HIT + ICACHE.MISSES ) / 4 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "1 - (tma_frontend_bound + tma_bad_speculation + tma_retiring)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (( IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS ) )",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "12 * (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT + BACLEARS.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.FPU_DIV_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_UOPS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "(1 - MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS)) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "(7 * DTLB_LOAD_MISSES.STLB_HIT + DTLB_LOAD_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_UOPS_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * min(CPU_CLK_UNHALTED.THREAD, IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE) / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE) / UOPS_DISPATCHED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) / UOPS_DISPATCHED.THREAD",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_512b, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
},
{
- "BriefDescription": "Total number of retired Instructions",
- "MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_mem_bandwidth"
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
- "MetricName": "FLOPc"
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + LSD.UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_lcp"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
},
{
"BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_DISPATCHED.THREAD / (( cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 ) if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * (FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE) + 8 * SIMD_FP_256.PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_DISPATCHED.THREAD / (cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@ / 2 if #SMT_on else cpu@UOPS_DISPATCHED.CORE\\,cmask\\=1@)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_COMP_OPS_EXE.SSE_SCALAR_SINGLE + FP_COMP_OPS_EXE.SSE_SCALAR_DOUBLE ) + 2 * FP_COMP_OPS_EXE.SSE_PACKED_DOUBLE + 4 * ( FP_COMP_OPS_EXE.SSE_PACKED_SINGLE + SIMD_FP_256.PACKED_DOUBLE ) + 8 * SIMD_FP_256.PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "Average number of parallel requests to external memory",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_parallel_requests",
+ "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_request_latency"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "MetricName": "tma_info_turbo_utilization"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "(12 * ITLB_MISSES.STLB_HIT + ITLB_MISSES.WALK_DURATION) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: ITLB_MISSES.WALK_COMPLETED",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricConstraint": "NO_GROUP_EVENTS_SMT",
+ "MetricExpr": "MEM_LOAD_UOPS_RETIRED.LLC_HIT / (MEM_LOAD_UOPS_RETIRED.LLC_HIT + 7 * MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS) * CYCLE_ACTIVITY.STALLS_L2_PENDING / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_UOPS_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=6@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_info_dram_bw_use",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: ",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING) + RESOURCE_STALLS.SB) / (min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - (cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.CYCLES_NO_DISPATCH) + cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=1@ - (cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=3@ if tma_info_ipc > 1.8 else cpu@UOPS_DISPATCHED.THREAD\\,cmask\\=2@) - (RS_EVENTS.EMPTY_CYCLES if tma_fetch_latency > 0.1 else 0) + RESOURCE_STALLS.SB - RESOURCE_STALLS.SB - min(CPU_CLK_UNHALTED.THREAD, CYCLE_ACTIVITY.STALLS_L1D_PENDING)) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "RESOURCE_STALLS.SB / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_UOPS_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS * FP_COMP_OPS_EXE.X87 / UOPS_DISPATCHED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-cache.json
new file mode 100644
index 000000000000..be9a3ed1a940
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-cache.json
@@ -0,0 +1,202 @@
+[
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
+ "PerPkg": "1",
+ "UMask": "0x86",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
+ "PerPkg": "1",
+ "UMask": "0x8f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_ES",
+ "PerPkg": "1",
+ "UMask": "0x46",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_I",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_M",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup external snoop request that access cache and found line in MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_MESI",
+ "PerPkg": "1",
+ "UMask": "0x4f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
+ "PerPkg": "1",
+ "UMask": "0x16",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_M",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
+ "PerPkg": "1",
+ "UMask": "0x1f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in I-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_I",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
+ "PerPkg": "1",
+ "UMask": "0x2f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "An external snoop hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_EXTERNAL",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "An external snoop hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_EXTERNAL",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "An external snoop misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EXTERNAL",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-interconnect.json
new file mode 100644
index 000000000000..c3252c094a9c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/uncore-interconnect.json
@@ -0,0 +1,75 @@
+[
+ {
+ "BriefDescription": "Cycles weighted by number of requests pending in Coherency Tracker.",
+ "EventCode": "0x83",
+ "EventName": "UNC_ARB_COH_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of requests allocated in Coherency Tracker.",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts cycles weighted by the number of requests waiting for data returning from the memory controller. Accounts for coherent and non-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Cycles with at least half of the requests outstanding are waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "CounterMask": "10",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_OVER_HALF_FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "CounterMask": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts the number of LLC evictions allocated.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.EVICTIONS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Counts the number of allocated write entries, include full, partial, and LLC evictions.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/uncore.json b/tools/perf/pmu-events/arch/x86/sandybridge/uncore.json
deleted file mode 100644
index 42c70eed05a2..000000000000
--- a/tools/perf/pmu-events/arch/x86/sandybridge/uncore.json
+++ /dev/null
@@ -1,314 +0,0 @@
-[
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x01",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS",
- "BriefDescription": "A snoop misses in some processor core.",
- "PublicDescription": "A snoop misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x02",
- "EventName": "UNC_CBO_XSNP_RESPONSE.INVAL",
- "BriefDescription": "A snoop invalidates a non-modified line in some processor core.",
- "PublicDescription": "A snoop invalidates a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x04",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HIT",
- "BriefDescription": "A snoop hits a non-modified line in some processor core.",
- "PublicDescription": "A snoop hits a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x08",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HITM",
- "BriefDescription": "A snoop hits a modified line in some processor core.",
- "PublicDescription": "A snoop hits a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x10",
- "EventName": "UNC_CBO_XSNP_RESPONSE.INVAL_M",
- "BriefDescription": "A snoop invalidates a modified line in some processor core.",
- "PublicDescription": "A snoop invalidates a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x20",
- "EventName": "UNC_CBO_XSNP_RESPONSE.EXTERNAL_FILTER",
- "BriefDescription": "Filter on cross-core snoops initiated by this Cbox due to external snoop request.",
- "PublicDescription": "Filter on cross-core snoops initiated by this Cbox due to external snoop request.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x40",
- "EventName": "UNC_CBO_XSNP_RESPONSE.XCORE_FILTER",
- "BriefDescription": "Filter on cross-core snoops initiated by this Cbox due to processor core memory request.",
- "PublicDescription": "Filter on cross-core snoops initiated by this Cbox due to processor core memory request.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x80",
- "EventName": "UNC_CBO_XSNP_RESPONSE.EVICTION_FILTER",
- "BriefDescription": "Filter on cross-core snoops initiated by this Cbox due to LLC eviction.",
- "PublicDescription": "Filter on cross-core snoops initiated by this Cbox due to LLC eviction.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x01",
- "EventName": "UNC_CBO_CACHE_LOOKUP.M",
- "BriefDescription": "LLC lookup request that access cache and found line in M-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x02",
- "EventName": "UNC_CBO_CACHE_LOOKUP.E",
- "BriefDescription": "LLC lookup request that access cache and found line in E-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in E-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x04",
- "EventName": "UNC_CBO_CACHE_LOOKUP.S",
- "BriefDescription": "LLC lookup request that access cache and found line in S-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x08",
- "EventName": "UNC_CBO_CACHE_LOOKUP.I",
- "BriefDescription": "LLC lookup request that access cache and found line in I-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x10",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_FILTER",
- "BriefDescription": "Filter on processor core initiated cacheable read requests.",
- "PublicDescription": "Filter on processor core initiated cacheable read requests.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x20",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_FILTER",
- "BriefDescription": "Filter on processor core initiated cacheable write requests.",
- "PublicDescription": "Filter on processor core initiated cacheable write requests.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x40",
- "EventName": "UNC_CBO_CACHE_LOOKUP.EXTSNP_FILTER",
- "BriefDescription": "Filter on external snoop requests.",
- "PublicDescription": "Filter on external snoop requests.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x80",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_REQUEST_FILTER",
- "BriefDescription": "Filter on any IRQ or IPQ initiated requests including uncacheable, non-coherent requests.",
- "PublicDescription": "Filter on any IRQ or IPQ initiated requests including uncacheable, non-coherent requests.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
- "BriefDescription": "Counts cycles weighted by the number of requests waiting for data returning from the memory controller. Accounts for coherent and non-coherent requests initiated by IA cores, processor graphic units, or LLC.",
- "PublicDescription": "Counts cycles weighted by the number of requests waiting for data returning from the memory controller. Accounts for coherent and non-coherent requests initiated by IA cores, processor graphic units, or LLC.",
- "Counter": "0",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x81",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
- "BriefDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
- "PublicDescription": "Counts the number of coherent and in-coherent requests initiated by IA cores, processor graphic units, or LLC.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x81",
- "UMask": "0x20",
- "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
- "BriefDescription": "Counts the number of allocated write entries, include full, partial, and LLC evictions.",
- "PublicDescription": "Counts the number of allocated write entries, include full, partial, and LLC evictions.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x81",
- "UMask": "0x80",
- "EventName": "UNC_ARB_TRK_REQUESTS.EVICTIONS",
- "BriefDescription": "Counts the number of LLC evictions allocated.",
- "PublicDescription": "Counts the number of LLC evictions allocated.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x83",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_OCCUPANCY.ALL",
- "BriefDescription": "Cycles weighted by number of requests pending in Coherency Tracker.",
- "PublicDescription": "Cycles weighted by number of requests pending in Coherency Tracker.",
- "Counter": "0",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x84",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
- "BriefDescription": "Number of requests allocated in Coherency Tracker.",
- "PublicDescription": "Number of requests allocated in Coherency Tracker.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
- "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "PublicDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "Counter": "0,1",
- "CounterMask": "1",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_OVER_HALF_FULL",
- "BriefDescription": "Cycles with at least half of the requests outstanding are waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "PublicDescription": "Cycles with at least half of the requests outstanding are waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "Counter": "0,1",
- "CounterMask": "10",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "ARB",
- "EventCode": "0x0",
- "UMask": "0x01",
- "EventName": "UNC_CLOCK.SOCKET",
- "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Counter": "Fixed",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x06",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ES",
- "BriefDescription": "LLC lookup request that access cache and found line in E-state or S-state.",
- "PublicDescription": "LLC lookup request that access cache and found line in E-state or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
index b8eccce5d75d..fa08d355b97e 100644
--- a/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/sandybridge/virtual-memory.json
@@ -1,149 +1,117 @@
[
{
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses in all DTLB levels that cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
"SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
"BriefDescription": "Load misses at all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "DTLB_LOAD_MISSES.WALK_DURATION",
+ "PublicDescription": "This event counts cycles when the page miss handler (PMH) is servicing page walks caused by DTLB load misses.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts load operations that miss the first DTLB level but hit the second and do not cause any page walks. The penalty in this case is approximately 7 cycles.",
- "EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Load operations that miss the first DTLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
"EventName": "DTLB_STORE_MISSES.WALK_DURATION",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Store operations that miss the first TLB level but hit the second and do not cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
"EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "EPT.WALK_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycle count for an Extended Page table walk. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Misses at all ITLB levels that cause page walks.",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"SampleAfterValue": "100003",
- "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
+ "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_DURATION",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when PMH is busy with page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Misses in all ITLB levels that cause completed page walks.",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
"SampleAfterValue": "100003",
- "BriefDescription": "Operations that miss the first ITLB level but hit the second and do not cause any page walks.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when PMH is busy with page walks.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_DURATION",
+ "PublicDescription": "This event count cycles when Page Miss Handler (PMH) is servicing page walks caused by ITLB misses.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries.",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "STLB flush attempts.",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "TLB_FLUSH.STLB_ANY",
"SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
new file mode 100644
index 000000000000..9606e76b98d6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/cache.json
@@ -0,0 +1,906 @@
+[
+ {
+ "BriefDescription": "L1D.HWPF_MISS",
+ "EventCode": "0x51",
+ "EventName": "L1D.HWPF_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event L1D_PEND_MISS.L2_STALLS",
+ "Deprecated": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALLS",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0x25",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "L2_LINES_OUT.NON_SILENT",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cache lines that have been L2 hardware prefetched but not used by demand accesses",
+ "EventCode": "0x26",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
+ "PublicDescription": "Counts the number of cache lines that have been prefetched by the L2 hardware prefetcher but not used by demand access when evicted from the L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_RQSTS.REFERENCES]",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.REFERENCES]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_RQSTS.MISS]",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_RQSTS.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Demand Data Read access L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "Demand requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Counts demand requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x27"
+ },
+ {
+ "BriefDescription": "Demand requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Counts demand requests to L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe7"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.ALL_HWPF",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_HWPF",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf0"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "L2_RQSTS.HWPF_MISS",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.HWPF_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache. [This event is alias to L2_REQUEST.MISS]",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.MISS]",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache [This event is alias to L2_REQUEST.ALL]",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. [This event is alias to L2_REQUEST.ALL]",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that refer to L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Completed demand load uops that miss the L1 d-cache.",
+ "EventCode": "0x43",
+ "EventName": "MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "PublicDescription": "Number of completed demand load requests that missed the L1 data cache including shadow misses (FB hits, merge to an ongoing L1D miss)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfd"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "Data_LA": "1",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "EventCode": "0xd3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM",
+ "PublicDescription": "Counts retired load instructions with remote Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Data_LA": "1",
+ "EventCode": "0xd4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions with at least one load to uncacheable memory-type, or at least one cache-line split locked access (Bus Lock).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source where the data request missed all caches.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.LOCAL_PMM",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with local Intel(R) Optane(TM) DC persistent memory as the data source and the data request missed L3.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "MEM_STORE_RETIRED.L2_HIT",
+ "EventCode": "0x44",
+ "EventName": "MEM_STORE_RETIRED.L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired memory uops for any access",
+ "EventCode": "0xe5",
+ "EventName": "MEM_UOP_RETIRED.ANY",
+ "PublicDescription": "Number of retired micro-operations (uops) for load or store memory accesses",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop that hit in another core, which did not forward the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1030000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x830000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80082380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F003C4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop that hit in another core, which did not forward the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that resulted in a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop was sent and data was returned (Modified or Not Modified).",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1830004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit a modified line in another core's caches which forwarded the data.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1030004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by a cache on a remote socket where a snoop hit in another core's caches which forwarded the unmodified data to the requesting core.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_CACHE.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x830004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that hit a modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that either hit a non-modified line in a distant L3 Cache or were snooped from a distant core's L1/L2 caches on this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_CACHE.HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x808004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO), hardware prefetch RFOs (which bring data to L2), and software prefetches for exclusive ownership (PREFETCHW) that hit to a (M)odified cacheline in the L3 or snoop filter.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.RFO_TO_CORE.L3_HIT_M",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F80040022",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that hit in the L3 or were snooped from another core's caches on the same socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 outstanding demand data read request is pending.",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "CounterMask": "1",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DATA_RD",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of outstanding demand data read requests pending.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of outstanding demand data read requests pending. Requests are considered outstanding from the time they miss the core's L2 cache until the transaction completion message is sent to the requestor.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts bus locks, accounts for cache line split locks and UC locks.",
+ "EventCode": "0x2c",
+ "EventName": "SQ_MISC.BUS_LOCK",
+ "PublicDescription": "Counts the more expensive bus lock needed to enforce cache coherency for certain memory accesses that need to be done atomically. Can be created by issuing an atomic instruction (via the LOCK prefix) which causes a cache line split or accesses uncacheable memory.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x40",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
new file mode 100644
index 000000000000..4a9d211e9d4f
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json
@@ -0,0 +1,193 @@
+[
+ {
+ "BriefDescription": "ARITH.FPDIV_ACTIVE",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FPDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "ASSISTS.SSE_AVX_MIX",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.SSE_AVX_MIX",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5",
+ "EventCode": "0xb3",
+ "EventName": "FP_ARITH_DISPATCHED.PORT_5",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x60"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "PublicDescription": "Number of any Vector retired FP arithmetic instructions. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfc"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.128B_PACKED_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.256B_PACKED_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.512B_PACKED_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of all Scalar Half-Precision FP arithmetic instructions(1) retired - regular and complex.",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.SCALAR",
+ "PublicDescription": "FP_ARITH_INST_RETIRED2.SCALAR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.SCALAR_HALF",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of all Vector (also called packed) Half-Precision FP arithmetic instructions(1) retired.",
+ "EventCode": "0xcf",
+ "EventName": "FP_ARITH_INST_RETIRED2.VECTOR",
+ "PublicDescription": "FP_ARITH_INST_RETIRED2.VECTOR",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1c"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
new file mode 100644
index 000000000000..860a415e5e79
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/frontend.json
@@ -0,0 +1,362 @@
+[
+ {
+ "BriefDescription": "Clears due to Unknown Branches.",
+ "EventCode": "0x60",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Number of times the front-end is resteered when it finds a branch instruction in a fetch line. This is called Unknown Branch which occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "DECODE.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles the Microcode Sequencer is busy.",
+ "EventCode": "0x87",
+ "EventName": "DECODE.MS_BUSY",
+ "SampleAfterValue": "500009",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "EventCode": "0x61",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600106",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x608006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x601006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600206",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x610006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x602006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600406",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x620006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x604006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x600806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.MS_FLOWS",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.MS_FLOWS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x17",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_DATA.STALLS",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The decode pipeline works at a 32 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_TAG.STALLS",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CounterMask": "6",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CounterMask": "6",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
new file mode 100644
index 000000000000..b72a36999930
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/memory.json
@@ -0,0 +1,341 @@
+[
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "2",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "3",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "MEMORY_ACTIVITY.STALLS_L2_MISS",
+ "CounterMask": "5",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "MEMORY_ACTIVITY.STALLS_L3_MISS",
+ "CounterMask": "9",
+ "EventCode": "0x47",
+ "EventName": "MEMORY_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired memory store access operations. A PDist event for PEBS Store Latency Facility.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.STORE_SAMPLE",
+ "PEBS": "2",
+ "PublicDescription": "Counts Retired memory accesses with at least 1 store operation. This PEBS event is the precisely-distributed (PDist) trigger covering all stores uops for sampling by the PEBS Store Latency Facility. The facility is described in Intel SDM Volume 3 section 19.9.8",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that missed the local socket's L1, L2, and L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x94002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FC04477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F04C04477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that missed the L3 Cache and were supplied by the local socket (DRAM or PMM), whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM or DRAM accesses that are controlled by the close or distant SNC Cluster. It does not count misses to the L3 which go to Local CXL Type 2 Memory or Local Non DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL_SOCKET",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70CC04477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that missed the local socket's L1, L2, and L3 caches.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x94000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline is homed locally.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data read requests that miss the L3 cache.",
+ "EventCode": "0x21",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache.",
+ "EventCode": "0x20",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "For every cycle, increments by the number of demand data read requests pending that are known to have missed the L3 cache. Note that this does not capture all elapsed cycles while requests are outstanding - only cycles from when the requests were known by the requesting core to have missed the L3 cache.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
new file mode 100644
index 000000000000..31b6be9fb8c7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/other.json
@@ -0,0 +1,342 @@
+[
+ {
+ "BriefDescription": "ASSISTS.PAGE_FAULT",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.PAGE_FAULT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the cycles where the AMX (Advance Matrix Extension) unit is busy performing an operation.",
+ "EventCode": "0xb7",
+ "EventName": "EXE.AMX_BUSY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_CODE_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM attached to another socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by PMM attached to another socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_DATA_RD.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.DEMAND_RFO.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts data load hardware prefetch requests to the L1 data cache that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L1D.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches (which bring data to L2) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L2.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10070",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x12380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts hardware prefetches to the L3 only that were not supplied by the local socket's L1, L2, or L3 caches and the cacheline was homed in a remote socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.HWPF_L3.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x90002380",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts writebacks of modified cachelines and streaming stores that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.MODIFIED_WRITE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10808",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F3FFC4477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x73C004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, unless in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts only those DRAM accesses that are controlled by the close SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts DRAM accesses that are controlled by the close or distant SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x70C004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to this socket, whether or not in Sub NUMA Cluster(SNC) Mode. In SNC Mode counts PMM accesses that are controlled by the close or distant SNC Cluster.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_SOCKET_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x700C04477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the local socket's L1, L2, or L3 caches and were supplied by a remote socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F33004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM attached to another socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x730004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM or PMM attached to another socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x733004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by PMM attached to another socket.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.REMOTE_PMM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x703004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all (cacheable) data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM on a distant memory controller of this socket when the system is in SNC (sub-NUMA cluster) mode.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.READS_TO_CORE.SNC_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x708004477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts Demand RFOs, ItoM's, PREFECTHW's, Hardware RFO Prefetches to the L1/L2 and Streaming stores that likely resulted in a store to Memory (DRAM or PMM)",
+ "EventCode": "0x2A,0x2B",
+ "EventName": "OCR.WRITE_ESTIMATE.MEMORY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xFBFF80822",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread.",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS.EMPTY_COUNT",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY_COUNT",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.COUNT",
+ "Invert": "1",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event RS.EMPTY",
+ "Deprecated": "1",
+ "EventCode": "0xa5",
+ "EventName": "RS_EMPTY.CYCLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Cycles the uncore cannot take further requests",
+ "CounterMask": "1",
+ "EventCode": "0x2d",
+ "EventName": "XQ.FULL_CYCLES",
+ "PublicDescription": "number of cycles when the thread is active and the uncore cannot take any further requests (for example prefetches, loads or stores initiated by the Core that miss the L2 cache).",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
new file mode 100644
index 000000000000..72e9bdfa9f80
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json
@@ -0,0 +1,975 @@
+[
+ {
+ "BriefDescription": "AMX retired arithmetic BF16 operations.",
+ "EventCode": "0xce",
+ "EventName": "AMX_OPS_RETIRED.BF16",
+ "PublicDescription": "Number of AMX-based retired arithmetic bfloat16 (BF16) floating-point operations. Counts TDPBF16PS FP instructions. SW to use operation multiplier of 4",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "AMX retired arithmetic integer 8-bit operations.",
+ "EventCode": "0xce",
+ "EventName": "AMX_OPS_RETIRED.INT8",
+ "PublicDescription": "Number of AMX-based retired arithmetic integer operations of 8-bit width source operands. Counts TDPB[SS,UU,US,SU]D instructions. SW should use operation multiplier of 8.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.DIV_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.FPDIV_ACTIVE",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.FP_DIVIDER_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event counts the cycles the integer divider is busy.",
+ "CounterMask": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.IDIV_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event ARITH.IDIV_ACTIVE",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb0",
+ "EventName": "ARITH.INT_DIVIDER_ACTIVE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware. Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1b"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Miss-predicted near indirect branch instructions retired (excluding returns)",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts miss-predicted near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C01",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 light-weight slower wakeup time but more power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C02",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.2 light-weight faster wakeup time but less power saving optimized state. This state can be entered via the TPAUSE or UMWAIT instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Core clocks when the thread is in the C0.1 or C0.2 or running a PAUSE in C0 ACPI state.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.C0_WAIT",
+ "PublicDescription": "Counts core clocks when the thread is in the C0.1 or C0.2 power saving optimized states (TPAUSE or UMWAIT instructions) or running the PAUSE instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x70"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.PAUSE_INST",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "5",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CounterMask": "2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x75",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "INST_RETIRED.MACRO_FUSED",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.MACRO_FUSED",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired NOP instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 instructions",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Precise instruction retired with PEBS precise-distribution",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PublicDescription": "A version of INST_RETIRED that allows for a precise distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR++) feature to fix bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Iterations of Repeat string retired instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.REP_ITERATION",
+ "PublicDescription": "Number of iterations of Repeat (REP) string retired instructions such as MOVS, CMPS, and SCAS. Each has a byte, word, and doubleword version and string instructions can be repeated using a repetition prefix, REP, that allows their architectural execution to be repeated a number of times as specified by the RCX register. Note the number of iterations is implementation-dependent.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Clears speculative count",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "INT_MISC.MBA_STALLS",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.MBA_STALLS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x7",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "EventCode": "0xad",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.128BIT",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.128BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x13"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.256BIT",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.256BIT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xac"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 128-bit vector instructions.",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_128",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 128-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "integer ADD, SUB, SAD 256-bit vector instructions.",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.ADD_256",
+ "PublicDescription": "Number of retired integer ADD/SUB (regular or horizontal), SAD 256-bit vector instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.MUL_256",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.MUL_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.SHUFFLES",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.SHUFFLES",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_128",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_128",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "INT_VEC_RETIRED.VNNI_256",
+ "EventCode": "0xe7",
+ "EventName": "INT_VEC_RETIRED.VNNI_256",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x88"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CounterMask": "6",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "LFENCE instructions retired",
+ "EventCode": "0xe0",
+ "EventName": "MISC2_RETIRED.LFENCE",
+ "PublicDescription": "number of LFENCE retired instructions",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "Number of slots in TMA method where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculations.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BAD_SPEC_SLOTS",
+ "PublicDescription": "Number of slots of TMA method that were wasted due to incorrect speculation. It covers all types of control-flow or data-related mis-speculations.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by (any type of) branch mispredictions. This event estimates number of specualtive operations that were issued but not retired as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.MEMORY_BOUND_SLOTS",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "UOPS_DECODED.DEC0_UOPS",
+ "EventCode": "0x76",
+ "EventName": "UOPS_DECODED.DEC0_UOPS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops executed on port 0",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PublicDescription": "Number of uops dispatch to execution port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops executed on port 1",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PublicDescription": "Number of uops dispatch to execution port 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 2, 3 and 10",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3_10",
+ "PublicDescription": "Number of uops dispatch to execution ports 2, 3 and 10",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 4 and 9",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PublicDescription": "Number of uops dispatch to execution ports 4 and 9",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 5 and 11",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_5_11",
+ "PublicDescription": "Number of uops dispatch to execution ports 5 and 11",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Uops executed on port 6",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PublicDescription": "Number of uops dispatch to execution port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Uops executed on ports 7 and 8",
+ "EventCode": "0xb2",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Number of uops dispatch to execution ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UOPS_EXECUTED.STALLS",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "EventCode": "0xae",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with retired uop(s).",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.CYCLES",
+ "PublicDescription": "Counts cycles where at least one uop has retired.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired uops except the last uop of each instruction.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.HEAVY",
+ "PublicDescription": "Counts the number of retired micro-operations (uops) except the last uop of each instruction. An instruction that is decoded into less than two uops does not contribute to the count.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "UOPS_RETIRED.MS",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x8",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALLS",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UOPS_RETIRED.STALLS",
+ "CounterMask": "1",
+ "Deprecated": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
new file mode 100644
index 000000000000..126300b7ae77
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
@@ -0,0 +1,1675 @@
+[
+ {
+ "BriefDescription": "C1 residency percent per core",
+ "MetricExpr": "cstate_core@c1\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C1_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5_11 + UOPS_DISPATCHED.PORT_6) / (5 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the Advanced Matrix Extensions (AMX) execution engine was busy with tile (arithmetic) operations",
+ "MetricExpr": "EXE.AMX_BUSY / tma_info_core_clks",
+ "MetricGroup": "Compute;HPC;Server;TopdownL5;tma_L5_group;tma_ports_utilized_0_group",
+ "MetricName": "tma_amx_busy",
+ "MetricThreshold": "tma_amx_busy > 0.5 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * cpu@ASSISTS.ANY\\,umask\\=0x1B@ / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops as a result of handing SSE to AVX* or AVX* to SSE transition Assists.",
+ "MetricExpr": "63 * ASSISTS.SSE_AVX_MIX / tma_info_slots",
+ "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_avx_assists",
+ "MetricThreshold": "tma_avx_assists > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "max(1 - (tma_frontend_bound + tma_backend_bound + tma_retiring), 0)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricExpr": "topdown\\-br\\-mispredict / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources. Sample with: FRONTEND_RETIRED.MS_FLOWS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - tma_branch_mispredicts / tma_bad_speculation) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(76 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 75.5 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "75.5 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35))",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIV_ACTIVE / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_clks - tma_pmm_bound if #has_pmem > 0 else MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_clks)",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - MEMORY_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "80 * tma_info_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "topdown\\-fetch\\-lat / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "max(0, tma_heavy_operations - tma_microcode_sequencer)",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) matrix uops fraction the CPU has retired (aggregated across all supported FP datatypes in AMX engine)",
+ "MetricExpr": "cpu@AMX_OPS_RETIRED.BF16\\,cmask\\=1@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;HPC;Pipeline;Server;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_fp_amx",
+ "MetricThreshold": "tma_fp_amx > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) matrix uops fraction the CPU has retired (aggregated across all supported FP datatypes in AMX engine). Refer to AMX_Busy and GFLOPs metrics for actual AMX utilization and FP performance, resp.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector + tma_fp_amx",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists",
+ "MetricExpr": "30 * ASSISTS.FP / tma_info_slots",
+ "MetricGroup": "HPC;TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_fp_assists",
+ "MetricThreshold": "tma_fp_assists > 0.1",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Floating Point (FP) Assists. FP Assist may apply when working with very small floating point values (so-called Denormals).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + FP_ARITH_INST_RETIRED2.SCALAR) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ + FP_ARITH_INST_RETIRED2.VECTOR) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_512b",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.MACRO_FUSED / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fused_instructions",
+ "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "topdown\\-heavy\\-ops / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. Sample with: UOPS_RETIRED.HEAVY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "ICACHE_DATA.STALLS / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
+ "MetricName": "tma_info_big_code",
+ "MetricThreshold": "tma_info_big_code > 20",
+ "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_branching_overhead"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_mispredictions, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_slots)",
+ "MetricGroup": "Ret;tma_issueBC",
+ "MetricName": "tma_info_branching_overhead",
+ "MetricThreshold": "tma_info_branching_overhead > 10",
+ "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_big_code"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_callret"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_code_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_nt"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_tk"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_core_bound_likely",
+ "MetricThreshold": "tma_info_core_bound_likely > 0.5"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 6 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_misses, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_dsb_misses",
+ "MetricThreshold": "tma_info_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_dsb_switch_cost"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_fb_hpki"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_fetch_upc"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + FP_ARITH_INST_RETIRED2.SCALAR_HALF + 2 * (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED2.COMPLEX_SCALAR_HALF) + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * (FP_ARITH_INST_RETIRED2.128B_PACKED_HALF + cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@) + 16 * (FP_ARITH_INST_RETIRED2.256B_PACKED_HALF + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) + 32 * FP_ARITH_INST_RETIRED2.512B_PACKED_HALF + 4 * AMX_OPS_RETIRED.BF16",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(FP_ARITH_DISPATCHED.PORT_0 + FP_ARITH_DISPATCHED.PORT_1 + FP_ARITH_DISPATCHED.PORT_5) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "tma_info_flopc / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_ic_misses",
+ "MetricThreshold": "tma_info_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_DATA.STALLS / cpu@ICACHE_DATA.STALLS\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_icache_miss_latency"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_big_code",
+ "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_instruction_fetch_bw > 20"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
+ },
+ {
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR * 64 / 1e9 / duration_time",
+ "MetricGroup": "IoBW;Mem;Server;SoC",
+ "MetricName": "tma_info_io_write_bw"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + FP_ARITH_INST_RETIRED2.SCALAR + (cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ + FP_ARITH_INST_RETIRED2.VECTOR))",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.BF16",
+ "MetricGroup": "Flops;FpVector;InsType;Server",
+ "MetricName": "tma_info_iparith_amx_f16",
+ "MetricThreshold": "tma_info_iparith_amx_f16 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions."
+ },
+ {
+ "BriefDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / AMX_OPS_RETIRED.INT8",
+ "MetricGroup": "InsType;IntVector;Server",
+ "MetricName": "tma_info_iparith_amx_int8",
+ "MetricThreshold": "tma_info_iparith_amx_int8 < 10",
+ "PublicDescription": "Instructions per Integer Arithmetic AMX operation (lower number means higher occurrence rate). Operations factored per matrices' sizes of the AMX instructions."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.128B_PACKED_HALF)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.256B_PACKED_HALF)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE + FP_ARITH_INST_RETIRED2.512B_PACKED_HALF)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx512",
+ "MetricThreshold": "tma_info_iparith_avx512 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per a microcode Assist invocation",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@ASSISTS.ANY\\,umask\\=0x1B@",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_ipassist",
+ "MetricThreshold": "tma_info_ipassist < 100e3",
+ "PublicDescription": "Instructions per a microcode Assist invocation. See Assists tree node for details (lower number means higher occurrence rate)"
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_ipdsb_miss_ret < 50"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_flopc",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_ntaken",
+ "MetricThreshold": "tma_info_ipmisp_cond_ntaken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_taken",
+ "MetricThreshold": "tma_info_ipmisp_cond_taken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_ret",
+ "MetricThreshold": "tma_info_ipmisp_ret < 500"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_ipswpf",
+ "MetricThreshold": "tma_info_ipswpf < 100"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 13",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_jump"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+ "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / tma_info_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_l2_evictions_nonsilent_pki"
+ },
+ {
+ "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+ "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / tma_info_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_l2_evictions_silent_pki"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
+ },
+ {
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l3_miss_latency"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / MEM_LOAD_COMPLETED.L1_MISS_ANY",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_load_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR) / uncore_cha_0@event\\=0x1@",
+ "MetricGroup": "Mem;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_mem_dram_read_latency",
+ "PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]",
+ "MetricExpr": "(1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM / UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM) / uncore_cha_0@event\\=0x1@ if #has_pmem > 0 else 0)",
+ "MetricGroup": "Mem;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_mem_pmm_read_latency",
+ "PublicDescription": "Average latency of data read request to external 3D X-Point memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_fb_full / (tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
+ "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_memory_bandwidth",
+ "MetricThreshold": "tma_info_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_memory_data_tlbs",
+ "MetricThreshold": "tma_info_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_pmm_bound + tma_store_bound))",
+ "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_memory_latency",
+ "MetricThreshold": "tma_info_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_mispredictions",
+ "MetricThreshold": "tma_info_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - (tma_info_cond_nt + tma_info_cond_tk + tma_info_callret + tma_info_jump)",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_other_branches"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (4 * tma_info_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for reads [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_RPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "Mem;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_pmm_read_bw"
+ },
+ {
+ "BriefDescription": "Average 3DXP Memory Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(64 * UNC_M_PMM_WPQ_INSERTS / 1e9 / duration_time if #has_pmem > 0 else 0)",
+ "MetricGroup": "Mem;MemoryBW;Server;SoC",
+ "MetricName": "tma_info_pmm_write_bw"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "(tma_info_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
+ "MetricGroup": "SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots_utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "uncore_cha_0@event\\=0x1@",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_store_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Estimated fraction of retirement-cycles dealing with repeat instructions",
+ "MetricExpr": "INST_RETIRED.REP_ITERATION / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_strings_cycles",
+ "MetricThreshold": "tma_info_strings_cycles > 0.1"
+ },
+ {
+ "BriefDescription": "Tera Integer (matrix) Operations Per Second",
+ "MetricExpr": "8 * AMX_OPS_RETIRED.INT8 / 1e12 / duration_time",
+ "MetricGroup": "Cor;HPC;IntVector;Server",
+ "MetricName": "tma_info_tiops"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "tma_retiring * tma_info_slots / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Cross-socket Ultra Path Interconnect (UPI) data transmit bandwidth for data only [MB / sec]",
+ "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 64 / 9 / 1e6",
+ "MetricGroup": "Server;SoC",
+ "MetricName": "tma_info_upi_data_transmit_bw"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "tma_retiring * tma_info_slots / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 9"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic Integer (Int) matrix uops fraction the CPU has retired (aggregated across all supported Int datatypes in AMX engine)",
+ "MetricExpr": "cpu@AMX_OPS_RETIRED.INT8\\,cmask\\=1@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;HPC;IntVector;Pipeline;Server;TopdownL4;tma_L4_group;tma_int_operations_group",
+ "MetricName": "tma_int_amx",
+ "MetricThreshold": "tma_int_amx > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic Integer (Int) matrix uops fraction the CPU has retired (aggregated across all supported Int datatypes in AMX engine). Refer to AMX_Busy and TIOPs metrics for actual AMX utilization and Int performance, resp.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_int_vector_128b + tma_int_vector_256b + tma_shuffles + tma_int_amx",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_int_operations",
+ "MetricThreshold": "tma_int_operations > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall Integer (Int) select operations fraction the CPU has executed (retired). Vector/Matrix Int operations and shuffles are counted. Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired",
+ "MetricExpr": "(INT_VEC_RETIRED.ADD_128 + INT_VEC_RETIRED.VNNI_128) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
+ "MetricName": "tma_int_vector_128b",
+ "MetricThreshold": "tma_int_vector_128b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 128-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired",
+ "MetricExpr": "(INT_VEC_RETIRED.ADD_256 + INT_VEC_RETIRED.MUL_256 + INT_VEC_RETIRED.VNNI_256) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;IntVector;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group;tma_issue2P",
+ "MetricName": "tma_int_vector_256b",
+ "MetricThreshold": "tma_int_vector_256b > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric represents 256-bit vector Integer ADD/SUB/SAD or VNNI (Vector Neural Network Instructions) uops fraction the CPU has retired. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_TAG.STALLS / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((EXE_ACTIVITY.BOUND_ON_LOADS - MEMORY_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L1D_MISS - MEMORY_ACTIVITY.STALLS_L2_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(MEMORY_ACTIVITY.STALLS_L2_MISS - MEMORY_ACTIVITY.STALLS_L3_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "33 * tma_info_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "DECODE.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_2_3_10 / (3 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3_10",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricExpr": "71 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_local_dram",
+ "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to memory bandwidth Allocation feature (RDT's memory bandwidth throttling).",
+ "MetricExpr": "INT_MISC.MBA_STALLS / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;Server;TopdownL5;tma_L5_group;tma_mem_bandwidth_group",
+ "MetricName": "tma_mba_stalls",
+ "MetricThreshold": "tma_mba_stalls > 0.1 & (tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricExpr": "topdown\\-mem\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to LFENCE Instructions.",
+ "MetricExpr": "13 * MISC2_RETIRED.LFENCE / tma_info_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
+ "MetricName": "tma_memory_fence",
+ "MetricThreshold": "tma_memory_fence > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * MEM_UOP_RETIRED.ANY / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.MS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: UOPS_RETIRED.MS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "tma_branch_mispredicts / tma_bad_speculation * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_info_mispredictions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued",
+ "MetricExpr": "160 * ASSISTS.SSE_AVX_MIX / tma_info_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * cpu@UOPS_RETIRED.MS\\,cmask\\=1\\,edge@ / (tma_retiring * tma_info_slots / UOPS_ISSUED.ANY) / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: FRONTEND_RETIRED.MS_FLOWS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
+ "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - INST_RETIRED.MACRO_FUSED) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_non_fused_branches",
+ "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_int_operations + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults",
+ "MetricExpr": "99 * ASSISTS.PAGE_FAULT / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_assists_group",
+ "MetricName": "tma_page_faults",
+ "MetricThreshold": "tma_page_faults > 0.05",
+ "PublicDescription": "This metric roughly estimates fraction of slots the CPU retired uops as a result of handing Page Faults. A Page Fault may apply on first application access to a memory page. Note operating system handling of page faults accounts for the majority of its cost.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a",
+ "MetricExpr": "(((1 - ((19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS))) / (19 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + 10 * (MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS)) + (25 * (MEM_LOAD_RETIRED.LOCAL_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0) + 33 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) if #has_pmem > 0 else 0))) if #has_pmem > 0 else 0)) * (MEMORY_ACTIVITY.STALLS_L3_MISS / tma_info_clks) if 1e6 * (MEM_LOAD_L3_MISS_RETIRED.REMOTE_PMM + MEM_LOAD_RETIRED.LOCAL_PMM) > MEM_LOAD_RETIRED.L1_MISS else 0) if #has_pmem > 0 else 0)",
+ "MetricGroup": "MemoryBound;Server;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_pmm_bound",
+ "MetricThreshold": "tma_pmm_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric roughly estimates (based on idle latencies) how often the CPU was stalled on accesses to external 3D-Xpoint (Crystal Ridge, a.k.a. IXP) memory by loads, PMM stands for Persistent Memory Module.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@)) / tma_info_clks if ARITH.DIV_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * cpu@EXE_ACTIVITY.2_PORTS_UTIL\\,umask\\=0xc@) / tma_info_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - EXE_ACTIVITY.BOUND_ON_LOADS) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_int_vector_128b, tma_int_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
+ "MetricExpr": "(135.5 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 135.5 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
+ "MetricName": "tma_remote_cache",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricExpr": "149 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_remote_dram",
+ "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Shuffle (cross \"vector lane\" data transfers) uops fraction the CPU has retired.",
+ "MetricExpr": "INT_VEC_RETIRED.SHUFFLES / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "HPC;Pipeline;TopdownL4;tma_L4_group;tma_int_operations_group",
+ "MetricName": "tma_shuffles",
+ "MetricThreshold": "tma_shuffles > 0.1 & (tma_int_operations > 0.1 & tma_light_operations > 0.6)",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricExpr": "CPU_CLK_UNHALTED.PAUSE / tma_info_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
+ "MetricName": "tma_slow_pause",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: CPU_CLK_UNHALTED.PAUSE_INST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(XQ.FULL_CYCLES + L1D_PEND_MISS.L2_STALLS) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricExpr": "(MEM_STORE_RETIRED.L2_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_4_9 + UOPS_DISPATCHED.PORT_7_8) / (4 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores",
+ "MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
+ "MetricName": "tma_streaming_stores",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "INT_MISC.UNKNOWN_BRANCH_CYCLES / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: FRONTEND_RETIRED.UNKNOWN_BRANCH",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_elision",
+ "ScaleUnit": "1cycles / elision"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
new file mode 100644
index 000000000000..b91cebf81f50
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cache.json
@@ -0,0 +1,5644 @@
+[
+ {
+ "BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Intermediate bypass Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the intermediate bypass.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Not Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the full bypass.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CHA clock cycles while the event is enabled",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xf2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Any Single Snoop : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xf1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.REMOTE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Snoop Targets from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x12",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.REMOTE_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Snoop Target from Remote : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6e",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6e",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6e",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6d",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "EventCode": "0x5f",
+ "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "EventCode": "0x5f",
+ "EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : op is WbMtoE",
+ "EventCode": "0x5f",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache : op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "EventCode": "0x5f",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed : op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "EventCode": "0x5e",
+ "EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed : op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "EventCode": "0x5e",
+ "EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : No SF/LLC HitS/F and op is RdInvOwn",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache : SF/LLC HitS/F and op is RdInvOwn",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Deallocate HitME$ on Reads without RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a local request : Received RspFwdI* for a local request, but converted HitME$ to SF entry",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Allocate/Update to HitMe Cache : op is RspIFwd or RspIFwdWb for a remote request : Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache : Update HitMe Cache to SHARed",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "HA to iMC Reads Issued : ISOCH : Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "EventCode": "0x5b",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "UMask": "0x1fffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All transactions from Remote Agents",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All transactions from Remote Agents : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x17e0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local or remote transaction to the LLC, including prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x1bd0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local non-prefetch requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Local non-prefetch requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local transaction to the LLC, not including prefetch",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1bc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Reads",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1fc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Read Request : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Read transactions.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Demand Data Reads, Core and LLC prefetches : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x841ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1fc101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : E State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Exclusive State",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : F State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : F State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Forward State",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1a44ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : I State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : I State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Miss",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local LLC prefetch requests (from LLC)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LLCPREF_LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Local LLC prefetch requests (from LLC) : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Any local LLC prefetch to the LLC",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCALLY_HOMED_ADDRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
+ "UMask": "0xbdfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x19d0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x19c1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Demand CRd Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x1850ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Demand Data Reads that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1841ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Demand RFO Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_DMND_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1848ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed locally : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_FLUSH_INV",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1844ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Prefetch requests to the LLC that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_LLC_PF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x189dff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Prefetches that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x199dff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Prefetches that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x1910ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Prefetches that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1981ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Prefetches that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_PF_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1908ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests that come from the local socket (usually the core)",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x19c8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : M State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : M State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Modified State",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1fe001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Write Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote non-snoop requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.PREF_OR_DMND_REMOTE_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remote non-snoop requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Remote non-snoop transactions to the LLC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTELY_HOMED_ADDRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
+ "UMask": "0x15dfff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Requests that come from a Remote socket.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_CODE",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "UMask": "0x1a10ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Requests that come from a Remote socket",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_DATA_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1a01ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed remotely",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed remotely : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Transaction whose address resides in a remote MC",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate requests that come from a Remote socket.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_FLUSH_INV",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1a04ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache that come from a remote socket",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x1a02ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests that come from a Remote socket.",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1a08ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote snoop requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remote snoop requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Remote snoop transactions to the LLC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Snoop Requests from a Remote Socket",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "UMask": "0x1c19ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1bc8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally HOMed RFOs - Demand and Prefetches",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x9c8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : S State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Hit Shared State",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit Exclusive State",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - H State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit HitMe State",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : SF Hit Shared State",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Writes",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Requests that install or change a line in the LLC. Examples: Writebacks from Core L2's and UPI. Prefetches into the LLC.",
+ "UMask": "0x842ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remote Writes",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x17c2ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in E state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : IA traffic",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IA",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : IA traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : IO traffic",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : IO traffic : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in E state that are victimized on a fill from an IO device",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x12",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in F or S state that are victimized on a fill from an IO device",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO_FS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1c",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in M state that are victimized on a fill from an IO device",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in any state that are victimized on a fill from an IO device",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.IO_MESF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x200f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local Only",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in M state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in M state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x800f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Remote Only",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Remote Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in E state that are victimized on a fill",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in M state that are victimized on a fill",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "All LLC lines in S state that are victimized on a fill",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : CV0 Prefetch Miss : Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : CV0 Prefetch Victim : Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state.",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : Silent Snoop Eviction : Miscellaneous events in the Cbo. : Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : Write Combining Aliasing : Miscellaneous events in the Cbo. : Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local InvItoE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_INVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Local InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Local Rd",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.LOCAL_READ",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Local Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Off",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.OFF_PWRHEURISTIC",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Off : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Remote Rd",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.REMOTE_READ",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Remote Rd : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : Remote Rd InvItoE",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.REMOTE_READINVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : Remote Rd InvItoE : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB.RFO_HITS_SNP_BCAST",
+ "PerPkg": "1",
+ "PublicDescription": "OSB Snoop Broadcast : RFO HitS Snoop Broadcast : Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "EventCode": "0x65",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_INVITOX.SETCONFLICT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near Memory set conflict in SF/LLC",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "Near Memory evictions due to another read to the same Near Memory set in the LLC.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near memory set conflict in SF/LLC",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.SF",
+ "PerPkg": "1",
+ "PublicDescription": "Near Memory evictions due to another read to the same Near Memory set in the SF",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Memory Mode related events; Counts the number of times CHA saw a Near Memory set conflict in TOR",
+ "EventCode": "0x64",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "No Reject in the CHA due to a pending read to the same Near Memory set in the TOR.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.IODC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "EventCode": "0x70",
+ "EventName": "UNC_CHA_PMM_MEMMODE_NM_SETCONFLICTS2.MEMWRNI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.DDR4_FAST_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.REJ_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.SLOWTORQ_SKIP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.SLOW_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE_IRQ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "EventCode": "0x66",
+ "EventName": "UNC_CHA_PMM_QOS.THROTTLE_PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "EventCode": "0x67",
+ "EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_FAST_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": ": count # of FAST TOR Request inserted to ha_tor_req_fifo",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of SLOW TOR Request inserted to ha_pmm_tor_req_fifo",
+ "EventCode": "0x67",
+ "EventName": "UNC_CHA_PMM_QOS_OCCUPANCY.DDR_SLOW_FIFO",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC0 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC1 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC2 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC3 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC4 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC5 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Remote requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests made into the CHA",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a unit on this socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a remote socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a remote socket made into the CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write requests made into the CHA",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0xc",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write Requests from a unit on this socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Writes Remote",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IPQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IRQ Rejected : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : RRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : RRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : WBQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : WBQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IPQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "IPQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IPQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2c",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "EventCode": "0x2d",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "EventCode": "0x2d",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : IPQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : RRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : RRQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : WBQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : WBQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : AD REQ on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : AD RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : Non UPI AK Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL NCB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL NCS on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL WB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2e",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : Non UPI IV Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : Allow Snoop : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : ANY0",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : ANY0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Any condition listed in the Other0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : HA",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : HA : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : LLC OR SF Way : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : LLC Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : PhyAddr Match : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : SF Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Victim",
+ "EventCode": "0x2f",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the PRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : AD REQ on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : AD RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : Non UPI AK Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL NCB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL NCS on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL WB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2a",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : Non UPI IV Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : Allow Snoop : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : ANY0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Any condition listed in the WBQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : HA : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : LLC OR SF Way : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : LLC Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : PhyAddr Match : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : SF Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "EventCode": "0x2b",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : Allow Snoop",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Any condition listed in the RRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : HA",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : HA : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : LLC OR SF Way",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : LLC Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : PhyAddr Match",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : SF Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry. : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects - Set 1 : Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "RRQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : Allow Snoop",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : Allow Snoop : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Any condition listed in the WBQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : HA",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : HA : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : LLC OR SF Way",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : LLC OR SF Way : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : LLC Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : LLC Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : PhyAddr Match",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : PhyAddr Match : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : SF Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : SF Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry. : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects - Set 1 : Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "WBQ Rejects - Set 1 : Victim : Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : All",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : All : Counts the number of snoops issued by the HA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoop for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast snoop for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA. This filter includes only requests coming from local sockets.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA.This filter includes only requests coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Directed snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA. This filter includes only requests coming from local sockets.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Directed snoops for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA. This filter includes only requests coming from remote sockets.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast or directed Snoops sent for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast or directed Snoops sent for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the local socket.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast or directed Snoops sent for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast or directed Snoops sent for Remote Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the remote socket.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RSPCNFLCT*",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : RSPCNFLCT* : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoops responses of RspConflict. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspFwd",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : RspFwd : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : Rsp*Fwd*WB",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWDWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : Rsp*Fwd*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of Rsp*Fwd*WB. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspI Snoop Responses Received",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspIFwd Snoop Responses Received",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspS Snoop Responses Received",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type RspS Snoop Response was received which indicates when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspSFwd Snoop Responses Received",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : Rsp*WB",
+ "EventCode": "0x5c",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : Rsp*WB : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for a snoop response of RspIWB or RspSWB. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspCnflct : Number of snoop responses received for a Local request : Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : Rsp*FWD*WB : Number of snoop responses received for a Local request : Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspI",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspI : Number of snoop responses received for a Local request : Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspIFwd : Number of snoop responses received for a Local request : Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspS",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspS : Number of snoop responses received for a Local request : Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspSFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "EventCode": "0x5d",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : Rsp*WB : Number of snoop responses received for a Local request : Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "EventCode": "0x6b",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0xc001ffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DDR Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DDR Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : SF/LLC Evictions : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Hits",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Hits : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally initiated requests from IA Cores",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;CLFlush from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; CLFlush events that are initiated from the Core",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;CLFlushOpt from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; CLFlushOpt events that are initiated from the Core",
+ "UMask": "0xc8d7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read from local IA that misses in the snoop filter",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc817ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt from local IA that misses in the snoop filter",
+ "UMask": "0xc827ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt Pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8a7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc897ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read from local IA that hits in the snoop filter",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that hits in the snoop filter",
+ "UMask": "0xc817fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to page walks that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt from local IA that hits in the snoop filter",
+ "UMask": "0xc827fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt Pref hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc8a7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Pref hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc897fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Hit LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefCode hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA that hits in the snoop filter",
+ "UMask": "0xcccffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefData hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch data read from local IA that hits in the snoop filter",
+ "UMask": "0xccd7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefRFO hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch read for ownership from local IA that hits in the snoop filter",
+ "UMask": "0xccc7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership from local IA that hits in the snoop filter",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO Pref hits from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;ItoM from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; ItoM events that are initiated from the Core",
+ "UMask": "0xcc47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefCode from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA.",
+ "UMask": "0xcccfff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefData from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch data read from local IA.",
+ "UMask": "0xccd7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefRFO from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xccc7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; misses from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for CRd misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode CRd",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRDMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c80b8201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd",
+ "UMask": "0xc817fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8138201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRds issued by IA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target DDR memory",
+ "UMask": "0xc8178601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd misses from local IA targeting local memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target local memory",
+ "UMask": "0xc816fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt from local IA that misses in the snoop filter",
+ "UMask": "0xc827fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; DRd Opt Pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read opt prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8a7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target PMM memory",
+ "UMask": "0xc8178a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd Pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF",
+ "UMask": "0xc897fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting local memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF, and target local memory",
+ "UMask": "0xc896fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd Pref misses from local IA targeting remote memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRD_PREF, and target remote memory",
+ "UMask": "0xc8977e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for DRd misses from local IA targeting remote memory",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and target remote memory",
+ "UMask": "0xc8177e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefCode misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch code read from local IA that misses in the snoop filter",
+ "UMask": "0xcccffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC Prefetch Code transactions issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFCODE_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10cccf8201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefData misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch data read from local IA that misses in the snoop filter",
+ "UMask": "0xccd7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; LLCPrefRFO misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Last level cache prefetch read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xccc7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RFO and L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFOMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8038201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts RFO misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc806fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO pref misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc886fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO prefetch misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8877e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc8077e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts;SpecItoM from Local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; SpecItoM events that are initiated from the Core",
+ "UMask": "0xcc57ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc3fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc37ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc2fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbMtoIs issued by iA Cores . (Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc67ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM hits from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur and FsRdCur hits from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO hits from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for ItoM from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IO with the opcode ItoM",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for ItoMCacheNears from IO devices.",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IO devices with the opcode ItoMCacheNears. This event indicates a partial write request.",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM misses from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur and FsRdCur misses from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts for RdCur from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "Inserts into the TOR from local IO with the opcode RdCur",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO from local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IPQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IPQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IRQ - iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : From an iA Core",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IRQ - Non iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just ISOC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just ISOC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Local Targets",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Local Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA and IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally initiated requests",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally initiated requests from iA Cores",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All locally generated IO traffic",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Misses",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Misses : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMCFG Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : MMCFG Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMIO Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMIO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : MMIO Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NearMem",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NonCoherent",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NonCoherent : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NotNearMem",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NotNearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PMM Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PM Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PRQ - IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : From a PCIe Device",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PRQ - Non IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Remote Targets",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REMOTE_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Remote Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Remote",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Remote : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All remote requests (e.g. snoops, writebacks) that came from remote sockets",
+ "UMask": "0xc001ffc8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All Snoops from Remote",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_SNPS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All Snoops from Remote : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. : All snoops to this LLC that came from remote sockets",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RRQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All Snoops from Remote",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.SNPS_FROM_REM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. All snoops to this LLC that came from remote sockets.",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WBQ : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0xc001ffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DDR Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DDR Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : SF/LLC Evictions : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Hits",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Hits : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8d7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read from local IA that misses in the snoop filter",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc817ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt from local IA that misses in the snoop filter",
+ "UMask": "0xc827ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt Pref from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8a7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc897ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read from local IA that hits in the snoop filter",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that hits in the snoop filter",
+ "UMask": "0xc817fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt from local IA that hits in the snoop filter",
+ "UMask": "0xc827fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt Pref hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc8a7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc897fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Hit LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefCode hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch code read from local IA that hits in the snoop filter",
+ "UMask": "0xcccffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefData hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA that hits in the snoop filter",
+ "UMask": "0xccd7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefRFO hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch read for ownership from local IA that hits in the snoop filter",
+ "UMask": "0xccc7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that hits in the snoop filter",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO Pref hits from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that hits in the snoop filter",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd47ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefCode from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA.",
+ "UMask": "0xcccfff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefData from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA that misses in the snoop filter",
+ "UMask": "0xccd7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefRFO from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xccc7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read from local IA that misses in the snoop filter",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for CRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRDMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c80b8201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88efe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd issued by iA Cores that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80f7e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd",
+ "UMask": "0xc817fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds and equivalent opcodes issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8138201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target DDR memory",
+ "UMask": "0xc8178601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd misses from local IA targeting local memory",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target local memory",
+ "UMask": "0xc816fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8168a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt from local IA that misses in the snoop filter",
+ "UMask": "0xc827fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Opt Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read opt prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8a7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRds issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target PMM memory",
+ "UMask": "0xc8178a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc897fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc896fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_LOCAL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8968a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8978a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; DRd Pref misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8977e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_PREF_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Prefs issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8970a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for DRd misses from local IA targeting remote memory",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles for elements in the TOR from local IA cores which miss the LLC and snoop filter with the opcode DRd, and which target remote memory",
+ "UMask": "0xc8177e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting DDR Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_REMOTE_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores targeting PMM Mem that Missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8170a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc47fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefCode misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch code read from local IA that misses in the snoop filter",
+ "UMask": "0xcccffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for LLC Prefetch Code transactions issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFCODE_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10cccf8201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefData misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFDATA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch data read from local IA that misses in the snoop filter",
+ "UMask": "0xccd7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; LLCPrefRFO misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LLCPREFRFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Last level cache prefetch read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xccc7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8668a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LOCAL_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed locally : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86e8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8670a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_REMOTE_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC - HOMed remotely : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f0a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy for RFO and L2 RFO prefetches issued from an IA core which miss the L3 and target memory in a CXL type 2 accelerator.",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFOMORPH_CXL_ACC",
+ "PerPkg": "1",
+ "UMask": "0x10c8038201",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc806fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc886fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc8877e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc8077e01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLFs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8678a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting DDR that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8601",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores targeting PMM that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86f8a01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership from local IA that misses in the snoop filter",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO prefetch from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Read for ownership prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SpecItoMs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_SPECITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : SpecItoMs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc57ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM hits from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RdCur and FsRdCur hits from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO hits from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM misses from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RdCur and FsRdCur misses from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RdCur and FsRdCur from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ItoM from local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IPQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IPQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IRQ - iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : From an iA Core",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IRQ - Non iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just ISOC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just ISOC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Local Targets",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Local Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA and IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally initiated requests",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally initiated requests from iA Cores",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All locally generated IO traffic",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Misses",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Misses : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMCFG Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : MMCFG Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMIO Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMIO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : MMIO Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NearMem",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NonCoherent : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NotNearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PMM Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PMM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PMM Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PRQ - IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : From a PCIe Device",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PRQ - Non IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Remote Targets",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REMOTE_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Remote Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Remote",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Remote : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All remote requests (e.g. snoops, writebacks) that came from remote sockets",
+ "UMask": "0xc001ffc8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All Snoops from Remote",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.REM_SNPS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All Snoops from Remote : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T : All snoops to this LLC that came from remote sockets",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RRQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All Snoops from Remote",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.SNPS_FROM_REM",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. All snoops to this LLC that came from remote sockets.",
+ "UMask": "0xc001ff08",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WBQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WBQ : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "WbPushMtoI : Pushed to LLC : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was able to push WbPushMToI to LLC",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "WbPushMtoI : Pushed to Memory : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC0 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC1 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC2 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC3 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC4 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "EventCode": "0x5a",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC5 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 0?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 0?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 1?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 1?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Sent (on 0?) : Number of XPT prefetches sent",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Sent (on 1?) : Number of XPT prefetches sent",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cxl.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cxl.json
new file mode 100644
index 000000000000..f3e84fd88de3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-cxl.json
@@ -0,0 +1,450 @@
+[
+ {
+ "BriefDescription": "Counts the number of lfclk ticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_CXLCM_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Rxx AGF 0",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req AGF0",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_REQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp AGF",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_REQ1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Data AGF",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_RSP0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp AGF",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.CACHE_RSP1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req AGF 1",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Data AGF",
+ "EventCode": "0x43",
+ "EventName": "UNC_CXLCM_RxC_AGF_INSERTS.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with AK set",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.AK_HDR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with BE set",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.BE_HDR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of control flits received",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.CTRL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Headerless flits received",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.NO_HDR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of protocol flits received",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.PROT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with SZ set",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.SZ_HDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of flits received",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.VALID",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of valid messages in the flit",
+ "EventCode": "0x4b",
+ "EventName": "UNC_CXLCM_RxC_FLITS.VALID_MSG",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of CRC errors detected",
+ "EventCode": "0x40",
+ "EventName": "UNC_CXLCM_RxC_MISC.CRC_ERRORS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Init flits sent",
+ "EventCode": "0x40",
+ "EventName": "UNC_CXLCM_RxC_MISC.INIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of LLCRD flits sent",
+ "EventCode": "0x40",
+ "EventName": "UNC_CXLCM_RxC_MISC.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Retry flits sent",
+ "EventCode": "0x40",
+ "EventName": "UNC_CXLCM_RxC_MISC.RETRY",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.CACHE_RSP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles the Packing Buffer is Full",
+ "EventCode": "0x52",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_FULL.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Data Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.CACHE_RSP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Rxx Packing buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_INSERTS.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Cache Data Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Cache Req Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Cache Rsp Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.CACHE_RSP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Mem Data Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of cycles of Not Empty for Mem Rxx Packing buffer",
+ "EventCode": "0x42",
+ "EventName": "UNC_CXLCM_RxC_PACK_BUF_NE.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with AK set",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.AK_HDR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with BE set",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.BE_HDR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of control flits packed",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.CTRL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Headerless flits packed",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.NO_HDR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of protocol flits packed",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.PROT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of Flits with SZ set",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.SZ_HDR",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Count the number of flits packed",
+ "EventCode": "0x05",
+ "EventName": "UNC_CXLCM_TxC_FLITS.VALID",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Data Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_DATA",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_REQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp1 Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_REQ1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Rsp0 Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_RSP0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Cache Req Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.CACHE_RSP1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Data Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.MEM_DATA",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Number of Allocation to Mem Rxx Packing buffer",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLCM_TxC_PACK_BUF_INSERTS.MEM_REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLCM"
+ },
+ {
+ "BriefDescription": "Counts the number of uclk ticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_CXLDP_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to M2S Data AGF",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_DATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to M2S Req AGF",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.M2S_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to U2C Data AGF",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to U2C Req AGF",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to U2C Rsp AGF 0",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_RSP0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CXLDP"
+ },
+ {
+ "BriefDescription": "Number of Allocation to U2C Rsp AGF 1",
+ "EventCode": "0x02",
+ "EventName": "UNC_CXLDP_TxC_AGF_INSERTS.U2C_RSP1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CXLDP"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
new file mode 100644
index 000000000000..08faf38115d9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json
@@ -0,0 +1,6199 @@
+[
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory.",
+ "EventCode": "0x0f",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "Total IRP occupancy of inbound read and write requests to coherent memory. This is effectively the sum of read occupancy and write occupancy.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "IRP Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of IRP clock cycles while the event is enabled",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF - request insert from TC.",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF occupancy",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Received Valid : Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of E Line : Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of I Line : Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of M Line : Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of S Line : Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M line in the IIO cache",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit E or S",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit I",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit M",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Miss",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpCode",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpData",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpInv",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations",
+ "EventCode": "0x0b",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Cycles Full",
+ "EventCode": "0x05",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Inserts",
+ "EventCode": "0x02",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Occupancy",
+ "EventCode": "0x08",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Cycles Full",
+ "EventCode": "0x06",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Inserts",
+ "EventCode": "0x03",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Occupancy",
+ "EventCode": "0x09",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Cycles Full",
+ "EventCode": "0x07",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Inserts",
+ "EventCode": "0x04",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Occupancy",
+ "EventCode": "0x0a",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "EventCode": "0x1c",
+ "EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": ": Counts the number times when it is not possible to issue a request to the M2PCIe because there are no Egress Credits available on AD0, A1 or AD0AD1 both. Stalls on both AD0 and AD1 will count as 2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD0 Egress Credits Stalls",
+ "EventCode": "0x1a",
+ "EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No AD0 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD0 Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD1 Egress Credits Stalls",
+ "EventCode": "0x1b",
+ "EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No AD1 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD1 Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x1d",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No BL Egress Credit Stalls : Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0x0d",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0x0e",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0x0c",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Request Queue Occupancy : Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "M2M Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks of the mesh to memory (M2M)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress : Counts the number of time non cisgress D2C was not honoured by egress due to directory state constraints",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Counts the time when FM didn? do d2c for fill reads (cross tile case)",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden : Cisgress",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE.CISGRESS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden : 2LM Hit?",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE.PMM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_DIRECT2UPITXN_OVERRIDE.PMM_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a direct to UPI transaction was overridden. : Counts the number of times D2K wasn't honored even though the incoming request had d2k set",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.CISGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored : Counts cisgress d2K that was not honored due to directory constraints",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U : Counts the number of time D2K was not honoured by egress due to directory state constraints",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored : Counts non cisgress d2K that was not honored due to directory constraints",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to the Intel UPI",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times egress did D2K (Direct to KTI)",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE.CISGRESS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with any directory to non persistent memory",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory A to non persistent memory",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory I to non persistent memory",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory S to non persistent memory",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "UMask": "0x320",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "UMask": "0x340",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x301",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_I_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to I to non persistent memory (DRAM or HBM)",
+ "UMask": "0x120",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_I_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to I to non persistent memory (DRAM or HBM)",
+ "UMask": "0x220",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_S_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to S to non persistent memory (DRAM or HBM)",
+ "UMask": "0x140",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A_TO_S_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to S to non persistent memory (DRAM or HBM)",
+ "UMask": "0x240",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory (DRAM or HBM)",
+ "UMask": "0x101",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "UMask": "0x304",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "UMask": "0x302",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_A_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to A to non persistent memory (DRAM or HBM)",
+ "UMask": "0x104",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_A_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to A to non persistent memory (DRAM or HBM)",
+ "UMask": "0x204",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_S_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to S to non persistent memory (DRAM or HBM)",
+ "UMask": "0x102",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I_TO_S_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to S to non persistent memory (DRAM or HBM)",
+ "UMask": "0x202",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts any 2lm miss data return that would result in directory update to non persistent memory (DRAM or HBM)",
+ "UMask": "0x201",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "UMask": "0x310",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "UMask": "0x308",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_A_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to A to non persistent memory (DRAM or HBM)",
+ "UMask": "0x110",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_A_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to A to non persistent memory (DRAM or HBM)",
+ "UMask": "0x210",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_I_HIT_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to I to non persistent memory (DRAM or HBM)",
+ "UMask": "0x108",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S_TO_I_MISS_NON_PMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to I to non persistent memory (DRAM or HBM)",
+ "UMask": "0x208",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x80000004",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x80000001",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Count when Starve Glocab counter is at 7",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_IGR_STARVE_WINNER.MASK7",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x304",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0.TO_NM1LM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0.TO_NM1LM",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0.TO_NMCache",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0.TO_NMCache",
+ "PerPkg": "1",
+ "UMask": "0x110",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x104",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x140",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x102",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x101",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x110",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x108",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH0_TO_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x120",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1.TO_NM1LM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1.TO_NM1LM",
+ "PerPkg": "1",
+ "UMask": "0x208",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1.TO_NMCache",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1.TO_NMCache",
+ "PerPkg": "1",
+ "UMask": "0x210",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x204",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x240",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x202",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x201",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x210",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x208",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.CH1_TO_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x220",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.FROM_TGR",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x340",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.ISOCH",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x302",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x301",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x310",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x308",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_NM1LM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_NM1LM",
+ "PerPkg": "1",
+ "UMask": "0x308",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_NMCACHE",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_NMCACHE",
+ "PerPkg": "1",
+ "UMask": "0x310",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_READS.TO_PMM",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_IMC_READS.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x320",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "All Writes - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1810",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0.NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x810",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "From TGR - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x801",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x804",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x802",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x808",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR, acting as Cache - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x840",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x820",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "PMM - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_TO_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "PMM - Ch0 : Counts all PMM dimm writes requests(full line and partial) sent from M2M to iMC",
+ "UMask": "0x880",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1.NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "All Writes - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1010",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "From TGR - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1001",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1004",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1002",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1008",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR, acting as Cache - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1040",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x1020",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "PMM - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_TO_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "PMM - Ch1 : Counts all PMM dimm writes requests(full line and partial) sent from M2M to iMC",
+ "UMask": "0x1080",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "From TGR - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Full Non-ISOCH - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1801",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1804",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1802",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1808",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR, acting as Cache - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1840",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "DDR - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_DDR_AS_MEM",
+ "PerPkg": "1",
+ "UMask": "0x1820",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "PMM - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_IMC_WRITES.TO_PMM",
+ "PerPkg": "1",
+ "UMask": "0x1880",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": UPI - All Channels",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": XPT - All Channels",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.RD_MERGED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.WR_MERGED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.WR_SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "PublicDescription": "Prefetch CAM Inserts : XPT -All Channels",
+ "UMask": "0x5",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "All Channels",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 0",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 1",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.PMM_MEMMODE_ACCEPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "EventCode": "0x03",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clean NearMem Read Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clean full line read hits (reads and RFOs).",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Dirty NearMem Read Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TAG_HIT.NM_RD_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts dirty full line read hits (reads and RFOs).",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Clean NearMem Underfill Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_CLEAN",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts clean underfill hits due to a partial write",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tag Hit : Dirty NearMem Underfill Hit",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TAG_HIT.NM_UFILL_HIT_DIRTY",
+ "PerPkg": "1",
+ "PublicDescription": "Tag Hit indicates when a request sent to the iMC hit in Near Memory. : Counts dirty underfill read hits due to a partial write",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_TAG_MISS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2M_TAG_MISS",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "EventCode": "0x2f",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x204",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 1",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 0",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 1",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 0",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "EventCode": "0x35",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Requests",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : Requests : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Snoops",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : Snoops : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : VNA Messages",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : VNA Messages : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty : Writebacks",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "CBox AD Credits Empty : Writebacks : No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M3UPI Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of M2UPI clock cycles while the event is enabled",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M3UPI CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2C Sent",
+ "EventCode": "0x2b",
+ "EventName": "UNC_M3UPI_D2C_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "D2C Sent : Count cases BL sends direct to core",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2U Sent",
+ "EventCode": "0x2a",
+ "EventName": "UNC_M3UPI_D2U_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "D2U Sent : Cases where SMI3 sends D2U command",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO0 and IIO1 share the same ring destination. (1 VN0 credit only) : No vn0 and vna credits available to send to M2",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO2 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO3 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO4",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO4 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO5",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : All IIO targets for NCS are in single mask. ORs them together : No vn0 and vna credits available to send to M2",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : Selected M2p BL NCS credits : No vn0 and vna credits available to send to M2",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty : IIO5",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.UBOX_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2 BL Credits Empty : IIO5 : No vn0 and vna credits available to send to M2",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AD - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 1",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AD - Slot 1 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AD - Slot 2",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AD - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AK - Slot 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AK - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : AK - Slot 2",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : AK - Slot 2 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received : BL - Slot 0",
+ "EventCode": "0x3e",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi Slot Flit Received : BL - Slot 0 : Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : REQ on AD",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : REQ on AD : VN0 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : RSP on AD",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : RSP on AD : VN0 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : SNP on AD",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : SNP on AD : VN0 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : NCB on BL",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : NCB on BL : VN0 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : NCS on BL",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : NCS on BL : VN0 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : RSP on BL",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : RSP on BL : VN0 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0 : WB on BL",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN0 : WB on BL : VN0 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : REQ on AD",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : REQ on AD : VN1 message requested but lost arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : RSP on AD",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : RSP on AD : VN1 message requested but lost arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : SNP on AD",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : SNP on AD : VN1 message requested but lost arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : NCB on BL",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : NCB on BL : VN1 message requested but lost arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : NCS on BL",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : NCS on BL : VN1 message requested but lost arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : RSP on BL",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : RSP on BL : VN1 message requested but lost arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1 : WB on BL",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Lost Arb for VN1 : WB on BL : VN1 message requested but lost arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN0 : AD and BL messages won arbitration concurrently / in parallel",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : AD, BL Parallel Win VN1 : AD and BL messages won arbitration concurrently / in parallel",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : Max Parallel Win",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ALL_PARALLEL_WIN",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : Max Parallel Win : VN0 and VN1 arbitration sub-pipelines both produced AD and BL winners (maximum possible parallel winners)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN0 : Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending AD VN1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending AD VN1 : Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN0 : Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : No Progress on Pending BL VN1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : No Progress on Pending BL VN1 : Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.VN01_PARALLEL_WIN",
+ "PerPkg": "1",
+ "PublicDescription": "Arb Miscellaneous : VN0, VN1 Parallel Win : VN0 and VN1 arbitration sub-pipelines had parallel winners (at least one AD or BL on each side)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : REQ on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : REQ on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : RSP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : RSP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : SNP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : SNP on AD : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : NCB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : NCB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : NCS on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : NCS on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : RSP on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : RSP on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0 : WB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN0 : WB on BL : VN0 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : REQ on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : REQ on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : RSP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : RSP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : SNP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : SNP on AD : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : NCB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : NCB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : NCS on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : NCS on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : RSP on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : RSP on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1 : WB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRD_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No Credits to Arb for VN1 : WB on BL : VN1 message is blocked from requesting arbitration due to lack of remote UPI credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : REQ on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : REQ on AD : VN0 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : RSP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : RSP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : SNP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : SNP on AD : VN0 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : NCB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : NCB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : NCS on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : NCS on BL : VN0 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : RSP on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : RSP on BL : VN0 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0 : WB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN0 : WB on BL : VN0 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : REQ on AD",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : REQ on AD : VN1 message was not able to request arbitration while some other message won arbitration : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : RSP on AD",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : RSP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : SNP on AD",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : SNP on AD : VN1 message was not able to request arbitration while some other message won arbitration : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : NCB on BL",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : NCB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : NCS on BL",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : NCS on BL : VN1 message was not able to request arbitration while some other message won arbitration : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : RSP on BL",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : RSP on BL : VN1 message was not able to request arbitration while some other message won arbitration : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1 : WB on BL",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOREQ_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Can't Arb for VN1 : WB on BL : VN1 message was not able to request arbitration while some other message won arbitration : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on BL Arb : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD to Slot 0 on Idle : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to slot 0 of independent flit while pipeline is idle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 1 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 1 while merging with bl message in same flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses : AD + BL to Slot 2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress Queue Bypasses : AD + BL to Slot 2 : Number of times message is bypassed around the Ingress Queue : AD is taking bypass to flit slot 2 while merging with bl message in same flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : Any In BGF FIFO",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : Any In BGF FIFO : Indication that at least one packet (flit) is in the bgf (fifo only)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : Any in BGF Path",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : Any in BGF Path : Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.LT1_FOR_D2K",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.LT2_FOR_D2K",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : d2k credit count is less than 2",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events : No D2K For Arb",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.VN0_NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : No D2K For Arb : VN0 BL RSP message was blocked from arbitration request due to lack of D2K CMP credit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.VN1_NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous Credit Events : VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Credits Consumed",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.CONSUMED",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Credits Consumed : number of remote vna credits consumed per cycle",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : D2K Credits",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : D2K Credits : D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Packets in BGF FIFO",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Packets in BGF FIFO : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Packets in BGF Path",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Packets in BGF Path : Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in completion fifo only",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : count of bl messages in pump-1-pending state, in marker table and in fifo",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : Transmit Credits",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : Transmit Credits : Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy : VNA In Use",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "Credit Occupancy : VNA In Use : Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : REQ on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : SNP on AD : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : NCS on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : RSP on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty : WB on BL : Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : All",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : All : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but could not be sent for any reason, e.g. low credits, low tsv, stall injection",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : No BGF Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_BGF",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : No BGF Credits : Data flit is ready for transmission but could not be sent",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : No TxQ Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.NO_TXQ",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : No TxQ Credits : Data flit is ready for transmission but could not be sent",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : TSV High",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.TSV_HI",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : TSV High : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while tsv high",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent : Cycle valid for Flit",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_DATA_FLITS_NOT_SENT.VALID_FOR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Data Flit Not Sent : Cycle valid for Flit : Data flit is ready for transmission but could not be sent : data flit is ready for transmission but was not sent while cycle is valid for flit transmission",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 0",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 0 : generating bl data flit sequence; waiting for data pump 0",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is tracking at least one message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending completion fifo is full",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : a bl message finished but is in limbo and moved to pump-1-pending logic",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence : Wait on Pump 1",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Generating BL Data Flit Sequence : Wait on Pump 1 : generating bl data flit sequence; waiting for data pump 1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_HOLDOFF",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request naturally serviced during hold-off period",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_IN_SERVICE",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request forcibly serviced during service window",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_RECEIVED",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request received from link layer while idle (with no slot 2 request active immediately prior)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC.S2REQ_WITHDRAWN",
+ "PerPkg": "1",
+ "PublicDescription": ": slot 2 request withdrawn during hold-off period or service window",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : All",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Needs Data Flit",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Needs Data Flit : BL message requires data flit sequence",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 0 : Waiting for header pump 0",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 : Header pump 1 is not required for flit",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Bubble : Header pump 1 is not required for flit but flit transmission delayed",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Don't Need Pump 1 - Not Avail : Header pump 1 is not required for flit and not available",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Slotting BL Message Into Header Flit : Wait on Pump 1 : Waiting for header pump 1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Accumulate : Events related to Header Flit Generation - Set 1 : Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate Ready",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Accumulate Ready : Events related to Header Flit Generation - Set 1 : header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Accumulate Wasted",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Accumulate Wasted : Events related to Header Flit Generation - Set 1 : Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Blocked : Events related to Header Flit Generation - Set 1 : Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_AFTER",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: message was slotted only after run-ahead was over; run-ahead mode definitely wasted",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1 : Run-Ahead - Message",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG1_DURING",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Run-Ahead - Message : Events related to Header Flit Generation - Set 1 : run-ahead mode: one message slotted during run-ahead",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_AFTER",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: second message slotted immediately after run-ahead; potential run-ahead success",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG2_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 1 : Events related to Header Flit Generation - Set 1 : run-ahead mode: two (or three) message flit sent immediately after run-ahead; complete run-ahead success",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Ok",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Parallel Ok : Events related to Header Flit Generation - Set 2 : new header flit construction may proceed in parallel with data flit sequence",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Flit Finished",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Parallel Flit Finished : Events related to Header Flit Generation - Set 2 : header flit finished assembly in parallel with data flit sequence",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Parallel Message",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.PAR_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Parallel Message : Events related to Header Flit Generation - Set 2 : message is slotted into header flit in parallel with data flit sequence",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall : Events related to Header Flit Generation - Set 2 : Rate-matching stall injected",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "PerPkg": "1",
+ "PublicDescription": "Flit Gen - Header 2 : Rate-matching Stall - No Message : Events related to Header Flit Generation - Set 2 : Rate matching stall injected, but no additional message slotted during stall cycle",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Message",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : One Message : One message in flit; VNA or non-VNA flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Message in non-VNA",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.1_MSG_VNX",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : One Message in non-VNA : One message in flit; non-VNA flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Two Messages",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.2_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : Two Messages : Two messages in flit; VNA flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Three Messages",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.3_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Sent Header Flit : Three Messages : Three messages in flit; VNA flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : One Slot Taken",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : Two Slots Taken",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit : All Slots Taken",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLITS_SENT.SLOTS_3",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : All",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : All : header flit is ready for transmission but could not be sent : header flit is ready for transmission but could not be sent for any reason, e.g. no credits, low tsv, stall injection",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No BGF Credits",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No BGF Credits : header flit is ready for transmission but could not be sent : No BGF credits available",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No BGF Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No BGF credits available; no additional message slotted into flit",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No TxQ Credits",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No TxQ Credits : header flit is ready for transmission but could not be sent : No TxQ credits available",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : No TxQ Credits + No Extra Message Slotted : header flit is ready for transmission but could not be sent : No TxQ credits available; no additional message slotted into flit",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : TSV High",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.TSV_HI",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : TSV High : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while tsv high",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent : Cycle valid for Flit",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_HDR_FLIT_NOT_SENT.VALID_FOR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Header Not Sent : Cycle valid for Flit : header flit is ready for transmission but could not be sent : header flit is ready for transmission but was not sent while cycle is valid for flit transmission",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Can't Slot AD",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Can't Slot AD : some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Can't Slot BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Can't Slot BL : some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Parallel Attempt",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Parallel Attempt : ad and bl messages attempted to slot into the same flit in parallel",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : Parallel Success",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : Parallel Success : ad and bl messages were actually slotted into the same flit in paralle",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : VN0",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : VN0 : vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held : VN1",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Message Held : VN1 : vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : REQ on AD",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : RSP on AD",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : SNP on AD",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : NCB on BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : NCS on BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : RSP on BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit : WB on BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : REQ on AD",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : REQ on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : RSP on AD",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : RSP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : SNP on AD",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : SNP on AD : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : NCB on BL",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : NCB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : NCS on BL",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : NCS on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : RSP on BL",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : RSP on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit : WB on BL",
+ "EventCode": "0x4f",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message can't slot into flit : WB on BL : Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Any In Use",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Any In Use : At least one remote vna credit is in use",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Corrected",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Corrected : Number of remote vna credits corrected (local return) per cycle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 1",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 1 : Remote vna credit level is less than 1 (i.e. no vna credits available)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 10",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT10",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 10 : remote vna credit level is less than 10; parallel vn0/vn1 arb not possible",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 4",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 4 : Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits : Level < 5",
+ "EventCode": "0x5a",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "PerPkg": "1",
+ "PublicDescription": "Remote VNA Credits : Level < 5 : Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_ADBL_ALLOC_L5",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credit count was less than 5 and allocation to ad or bl messages was required",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.REQ_VN01_ALLOC_LT10",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credit count was less than 10 and allocation to vn0 or vn1 was required",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_AD",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn0, remote vna credits were allocated only to ad messages, not to bl",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_JUST_BL",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn0, remote vna credits were allocated only to bl messages, not to ad",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN0_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credits were allocated only to vn0, not to vn1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_AD",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn1, remote vna credits were allocated only to ad messages, not to bl",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_JUST_BL",
+ "PerPkg": "1",
+ "PublicDescription": ": on vn1, remote vna credits were allocated only to bl messages, not to ad",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD_MISC.VN1_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": ": remote vna credits were allocated only to vn1, not to vn0",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 REQ Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 RSP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 SNP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN0 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN0 WB Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 REQ Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 RSP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 SNP Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD : VN1 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for AD : VN1 WB Messages : AD arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD FlowQ Bypass : Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN0 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN0 WB Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 REQ Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 RSP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 SNP Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty : VN1 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Not Empty : VN1 WB Messages : Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 REQ Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 RSP Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 SNP Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN0 WB Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 REQ Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN1 REQ Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 RSP Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts : VN1 SNP Messages",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD Flow Q Inserts : VN1 SNP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 REQ Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 RSP Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 SNP Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN0 WB Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 REQ Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 RSP Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy : VN1 SNP Messages",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Inserts",
+ "EventCode": "0x2f",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Occupancy",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 NCB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 NCS Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 RSP Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN0 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN0 WB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 NCS Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 NCB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 RSP Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL : VN1 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Failed ARB for BL : VN1 WB Messages : BL arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN0 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN0 WB Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 REQ Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 RSP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 SNP Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty : VN1 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Not Empty : VN1 WB Messages : Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 RSP Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 WB Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 NCS Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN0 NCB Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN0 NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1 RSP Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1 RSP Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1 WB Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1 WB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1_NCB Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1_NCB Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts : VN1_NCS Messages",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL Flow Q Inserts : VN1_NCS Messages : Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCS Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCB Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 RSP Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 WB Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_THROUGH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN0 NCB Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN0_WRPULL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 RSP Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1 WB Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_THROUGH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy : VN1_NCS Messages",
+ "EventCode": "0x1f",
+ "EventName": "UNC_M3UPI_TxC_BL_WB_FLQ_OCCUPANCY.VN1_WRPULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN0 REQ Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN0 RSP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN0 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN0 SNP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN1 REQ Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN1 RSP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VN1 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VN1 SNP Messages : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty : VNA",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 AD Credits Empty : VNA : No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN0 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN0 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN0 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN0 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN1 RSP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN1 REQ Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VN1 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VN1 SNP Messages : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty : VNA",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "UPI0 BL Credits Empty : VNA : No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FlowQ Generated Prefetch",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "PublicDescription": "FlowQ Generated Prefetch : Count cases where FlowQ causes spawn of Prefetch to iMC/SMI3 target",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : WB on BL",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : WB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : NCB on BL",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : NCB on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : REQ on AD",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : REQ on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : RSP on AD",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : RSP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : SNP on AD",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : SNP on AD : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used : RSP on BL",
+ "EventCode": "0x5b",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Used : RSP on BL : Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : WB on BL",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : WB on BL : Number of Cycles there were no VN0 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : NCB on BL",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : NCB on BL : Number of Cycles there were no VN0 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : REQ on AD",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : REQ on AD : Number of Cycles there were no VN0 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : RSP on AD",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : RSP on AD : Number of Cycles there were no VN0 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : SNP on AD",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : SNP on AD : Number of Cycles there were no VN0 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits : RSP on BL",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 No Credits : RSP on BL : Number of Cycles there were no VN0 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : WB on BL",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : WB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : NCB on BL",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : NCB on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : REQ on AD",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : REQ on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : RSP on AD",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : RSP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : SNP on AD",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : SNP on AD : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used : RSP on BL",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Used : RSP on BL : Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers. : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : WB on BL",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : WB on BL : Number of Cycles there were no VN1 Credits : Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : NCB on BL",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : NCB on BL : Number of Cycles there were no VN1 Credits : Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : REQ on AD",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : REQ on AD : Number of Cycles there were no VN1 Credits : Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : RSP on AD",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : RSP on AD : Number of Cycles there were no VN1 Credits : Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : SNP on AD",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : SNP on AD : Number of Cycles there were no VN1 Credits : Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits : RSP on BL",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 No Credits : RSP on BL : Number of Cycles there were no VN1 Credits : Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_EQ_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_GT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.BOTHNONZERO_RT_LT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0xc0",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_EQ_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_GT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "EventCode": "0x7e",
+ "EventName": "UNC_M3UPI_WB_OCC_COMPARE.RT_LT_LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCALDEST_VN1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.LOCAL_AND_RT_VN1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.ROUTETHRU_VN1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "EventCode": "0x7d",
+ "EventName": "UNC_M3UPI_WB_PENDING.WAITING4PULL_VN1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.ARB",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message is making arbitration request",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.ARRIVED",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message arrived in ingress pipeline",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.BYPASS",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message took bypass path",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.FLITTED",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message was slotted into flit (non bypass)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_ARB",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message lost arbitration",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_OLD",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message was dropped because it became too old",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_XPT_PFTCH.LOST_QFULL",
+ "PerPkg": "1",
+ "PublicDescription": ": xpt prefetch message was dropped because it was overwritten by new message while prefetch queue was full",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bouncable)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "AD Bouncable : Number of allocations into the CRS Egress",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD credited)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "AD credited : Number of allocations into the CRS Egress",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AK)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK : Number of allocations into the CRS Egress",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AKC)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "AKC : Number of allocations into the CRS Egress",
+ "UMask": "0x40",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bouncable)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "BL Bouncable : Number of allocations into the CRS Egress",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL credited)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "BL credited : Number of allocations into the CRS Egress",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (IV)",
+ "EventCode": "0x47",
+ "EventName": "UNC_MDF_CRS_TxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "IV : Number of allocations into the CRS Egress",
+ "UMask": "0x20",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AD)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AD",
+ "PerPkg": "1",
+ "PublicDescription": "AD : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AK)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AK",
+ "PerPkg": "1",
+ "PublicDescription": "AK : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x4",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (AKC)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "AKC : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x10",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (BL)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.BL",
+ "PerPkg": "1",
+ "PublicDescription": "BL : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Number of cycles incoming messages from the vertical ring that are bounced at the SBO\r\nIngress (V-EMIB) (IV)",
+ "EventCode": "0x4B",
+ "EventName": "UNC_MDF_CRS_TxR_V_BOUNCES.IV",
+ "PerPkg": "1",
+ "PublicDescription": "IV : Number of cycles incoming messages from the vertical ring that are bounced at the SBO",
+ "UMask": "0x8",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "EventCode": "0x15",
+ "EventName": "UNC_MDF_FAST_ASSERTED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "AD bnc : Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "UMask": "0x1",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "EventCode": "0x15",
+ "EventName": "UNC_MDF_FAST_ASSERTED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "BL bnc : Counts the number of cycles when the distress signals are asserted based on SBO Ingress threshold",
+ "UMask": "0x2",
+ "Unit": "MDF"
+ },
+ {
+ "BriefDescription": "UPI Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of UPI LL clock cycles while the event is enabled",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Direct packet attempts : D2C",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "PublicDescription": "Direct packet attempts : D2C : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Direct packet attempts : D2K",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "PerPkg": "1",
+ "PublicDescription": "Direct packet attempts : D2K : Counts the number of DRS packets that we attempted to do direct2core/direct2UPI on. There are 4 mutually exclusive filters. Filter [0] can be used to get successful spawns, while [1:3] provide the different failure cases. Note that this does not count packets that are not candidates for Direct2Core. The only candidates for Direct2Core are DRS packets destined for Cbos.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L1",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L1 : Number of UPI qfclk cycles spent in L1 power mode. L1 is a mode that totally shuts down a UPI link. Use edge detect to count the number of instances when the UPI link entered L1. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. Because L1 totally shuts down the link, it takes a good amount of time to exit this mode.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "EventCode": "0x16",
+ "EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "EventCode": "0x20",
+ "EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req Nack",
+ "EventCode": "0x23",
+ "EventName": "UNC_UPI_POWER_L1_NACK",
+ "PerPkg": "1",
+ "PublicDescription": "L1 Req Nack : Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req (same as L1 Ack).",
+ "EventCode": "0x22",
+ "EventName": "UNC_UPI_POWER_L1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "L1 Req (same as L1 Ack). : Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0x25",
+ "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0x24",
+ "EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.DATA",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.LLCRD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.LLCTRL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.NULL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.PROTHDR",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT0",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT1",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_ANY_FLITS.SLOT2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_UPI_RxL_ANY_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "EventCode": "0x05",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Receive path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Receive path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 0",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Bypassed : Slot 0 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 1",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Bypassed : Slot 1 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Bypassed : Slot 2",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Bypassed : Slot 2 : Counts the number of times that an incoming flit was able to bypass the flit buffer and pass directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of flits transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected",
+ "EventCode": "0x0b",
+ "EventName": "UNC_UPI_RxL_CRC_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "CRC Errors Detected : Number of CRC errors detected in the UPI Agent. Each UPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the UPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "LLR Requests Sent",
+ "EventCode": "0x08",
+ "EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
+ "PerPkg": "1",
+ "PublicDescription": "LLR Requests Sent : Number of LLR Requests were transmitted. This should generally be <= the number of CRC errors detected. If multiple errors are detected before the Rx side receives a LLC_REQ_ACK from the Tx side, there is no need to send more LLR_REQ_NACKs..",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed",
+ "EventCode": "0x39",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 Credit Consumed : Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed",
+ "EventCode": "0x3a",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 Credit Consumed : Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "EventCode": "0x38",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Data",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : All Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Null FLITs received from any slot",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Data",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Idle",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Idle : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCRD Not Empty",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : LLCTRL",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : All Non Data",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot NULL or LLCRD Empty",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Protocol Header",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 0",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 1",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received : Slot 2",
+ "EventCode": "0x03",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Received : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 0",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Allocations : Slot 0 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 1",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Allocations : Slot 1 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations : Slot 2",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Flit Buffer Allocations : Slot 2 : Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Occupancy - All Packets : Slot 0 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Occupancy - All Packets : Slot 1 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets : Slot 2",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "RxQ Occupancy - All Packets : Slot 2 : Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "EventCode": "0x2a",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0p",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0p : Number of UPI qfclk cycles spent in L0p power mode. L0p is a mode where we disable 1/2 of the UPI lanes, decreasing our bandwidth in order to save power. It increases snoop and data transfer latencies and decreases overall bandwidth. This mode can be very useful in NUMA optimized workloads that largely only utilize UPI for snoops and their responses. Use edge detect to count the number of instances when the UPI link entered L0p. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "EventCode": "0x28",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "EventCode": "0x29",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0",
+ "EventCode": "0x26",
+ "EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles in L0 : Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.DATA",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.DATA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.LLCRD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.LLCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.LLCTRL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.NULL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.PROTHDR",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT0",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT1",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL_ANY_FLITS.SLOT2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_UPI_TxL_ANY_FLITS.SLOT2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Bypass, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode",
+ "EventCode": "0x04",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Matches on Transmit path of a UPI Port : Non-Coherent Standard, Match Opcode : Matches on Transmit path of a UPI port.\r\nMatch based on UMask specific bits:\r\nZ: Message Class (3-bit)\r\nY: Message Class Enable\r\nW: Opcode (4-bit)\r\nV: Opcode Enable\r\nU: Local Enable\r\nT: Remote Enable\r\nS: Data Hdr Enable\r\nR: Non-Data Hdr Enable\r\nQ: Dual Slot Hdr Enable\r\nP: Single Slot Hdr Enable\r\nLink Layer control types are excluded (LL CTRL, slot NULL, LLCRD) even under specific opcode match_en cases.\r\nNote: If Message Class is disabled, we expect opcode to also be disabled.",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Bypassed",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Tx Flit Buffer Bypassed : Counts the number of times that an incoming flit was able to bypass the Tx flit buffer and pass directly out the UPI Link. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Data",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All Data : Counts number of data flits across this UPI link.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All LLCRD Not Empty",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x17",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All LLCTRL",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All LLCTRL : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "All Null Flits",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Protocol Header",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All ProtDDR : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x87",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Data",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Data : Shows legal flit time (hides impact of L0p and L0c). : Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Idle",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Idle : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCRD Not Empty",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : LLCRD Not Empty : Shows legal flit time (hides impact of L0p and L0c). : Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : LLCTRL",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : LLCTRL : Shows legal flit time (hides impact of L0p and L0c). : Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : All Non Data",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : All Non Data : Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot NULL or LLCRD Empty : Shows legal flit time (hides impact of L0p and L0c). : LLCRD with all zeros is treated as NULL. Slot 1 is not treated as NULL if slot 0 is a dual slot. This can apply to slot 0,1, or 2.",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Protocol Header",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Protocol Header : Shows legal flit time (hides impact of L0p and L0c). : Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 0",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot 0 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 1",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot 1 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent : Slot 2",
+ "EventCode": "0x02",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Valid Flits Sent : Slot 2 : Shows legal flit time (hides impact of L0p and L0c). : Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Tx Flit Buffer Allocations : Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Tx Flit Buffer Occupancy : Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "EventCode": "0x45",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "EventCode": "0x44",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "VNA Credits Pending Return - Occupancy : Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Message Received : Doorbell",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Interrupt",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : Interrupt : Interrupts",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : IPI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : MSI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : VLW",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_UPI_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "EventCode": "0x4e",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "EventCode": "0x4f",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "EventCode": "0x4f",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles PHOLD Assert to Ack : Assert to ACK : PHOLD cycles.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "EventCode": "0x4c",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "EventCode": "0x4c",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "EventCode": "0x4c",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "RACU Request : Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
new file mode 100644
index 000000000000..8b5f54fed103
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json
@@ -0,0 +1,3651 @@
+[
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x23",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x25",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x26",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x27",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART0_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART1_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART2_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x32",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART3_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x33",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART4_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART5_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x35",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART6_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x36",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_OUT.PART7_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "IIO Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "Number of IIO clock cycles while the event is enabled",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for IIO clocktick",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0-7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7002004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 2",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 3",
+ "UMask": "0x7008004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 4",
+ "UMask": "0x7010004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 5",
+ "UMask": "0x7020004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 6",
+ "UMask": "0x7040004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 7",
+ "UMask": "0x7080004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 0",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7000001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 1",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x4 card is plugged in to slot 1",
+ "UMask": "0x7000002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 2",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7000004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 3",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x4 card is plugged in to slot 3",
+ "UMask": "0x7000008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 4",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7000010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 5",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x4 card is plugged in to slot 1",
+ "UMask": "0x7000020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 6",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7000040",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy : Part 7",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "x4 card is plugged in to slot 3",
+ "UMask": "0x7000080",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0-7",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00ff",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7002004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7004004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7008004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested by the CPU : Core reading from Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7010004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested by the CPU : Core reading from Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7020004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested by the CPU : Core reading from Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7040004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested by the CPU : Core reading from Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7080004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0-7 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00ff",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0100",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0200",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested by the CPU : Core writing to Cards MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7002008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7004008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7008008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7010008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7020008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7040008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7080008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7002002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7004002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7008002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7010002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7020002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x7040002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7080002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part0-7 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00ff",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part0 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part1 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part2 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by IIO Part3 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part0-7 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.ALL_PARTS",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00ff",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part0 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part1 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part2 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made by IIO Part3 to Memory",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Passing data to be written : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Issuing final read or write of line : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Processing response from IOMMU : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Issuing to IOMMU : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Request Ownership : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Writing line",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests : Writing line : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Passing data to be written : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Issuing final read or write of line : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Processing response from IOMMU : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x70ff002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Issuing to IOMMU : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x70ff001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Request Ownership : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Incoming arbitration requests granted : Writing line : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x70ff010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache hits",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": Context cache hits : Counts each time a first look up of the transaction hits the RCC.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache lookups",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": Context cache lookups : Counts each time a transaction looks up root context cache.",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups first",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": IOTLB lookups first : Some transactions have to look up IOTLB multiple times. Counts the first time a request looks up IOTLB.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "IOTLB Fills (same as IOTLB miss)",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.MISSES",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "IOTLB Fills (same as IOTLB miss) : When a transaction misses IOTLB, it does a page walk to look up memory and bring in the relevant page translation. Counts when this page translation is written to IOTLB.",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOMMU memory access",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "PerPkg": "1",
+ "PublicDescription": ": IOMMU memory access : IOMMU sends out memory fetches when it misses the cache look up which is indicated by this signal. M2IOSF only uses low priority channel",
+ "UMask": "0xc0",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWT Hit to a 256T page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_256T_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWT Hit to a 256T page : Counts each time a transaction's first look up hits the SLPWC at the 512G level",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 4K page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 4K page : Counts each time a transaction's first look up hits the SLPWC at the 4K level",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 1G page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache fill",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "PerPkg": "1",
+ "PublicDescription": ": PageWalk cache fill : When a transaction misses SLPWC, it does a page walk to look up memory and bring in the relevant page translation. When this page translation is written to SLPWC, ObsPwcFillValid_nnnH is asserted.",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache lookup",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": PageWalk cache lookup : Counts each time a transaction looks up second level page walk cache.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_1G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_256T_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 1G page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.SLPWC_512G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Global IOTLB invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.PWT_OCCUPANCY_MSB",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": ": Global IOTLB invalidation cycles : Indicates that IOMMU is doing global invalidation.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if all bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if all bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if any bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if any bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : All",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0FFF",
+ "PublicDescription": "Number requests PCIe makes of the main die : All : Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "EventCode": "0x8e",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "ITC address map 1",
+ "EventCode": "0x8f",
+ "EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "EventCode": "0xd0",
+ "EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Outbound cacheline requests issued : 64B requests issued to device : Each outbound cacheline granular request may need to make multiple passes through the pipeline. Each time a cacheline completes all its passes it advances line",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "EventCode": "0xd1",
+ "EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Outbound TLP (transaction layer packet) requests issued : To device : Each time an outbound completes all its passes it advances the pointer",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PWT occupancy. Does not include 9th bit of occupancy (will undercount if PWT is greater than 255 per cycle).",
+ "EventCode": "0x42",
+ "EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "PerPkg": "1",
+ "PortMask": "0x0000",
+ "PublicDescription": "PWT occupancy : Indicates how many page walks are outstanding at any point in time.",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Request Ownership : PCIe Request complete",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Request Ownership : PCIe Request complete : Only for posted requests : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.",
+ "UMask": "0x70ff020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Request Ownership : Writing line",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Request Ownership : Writing line : Only for posted requests : Only for posted requests",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Request Ownership : Issuing final read or write of line",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Request Ownership : Issuing final read or write of line : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Request Ownership : Passing data to be written",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Request Ownership : Passing data to be written : Only for posted requests : Only for posted requests",
+ "UMask": "0x70ff010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Processing response from IOMMU : Passing data to be written",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Processing response from IOMMU : Passing data to be written : Only for posted requests",
+ "UMask": "0x70ff008",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Processing response from IOMMU : Issuing final read or write of line",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "UMask": "0x70ff002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Processing response from IOMMU : Request Ownership",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Processing response from IOMMU : Request Ownership : Only for posted requests",
+ "UMask": "0x70ff001",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Processing response from IOMMU : Writing line",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "Processing response from IOMMU : Writing line : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "PCIe Request - pass complete : Passing data to be written : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x70ff020",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "PCIe Request - pass complete : Issuing final read or write of line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "PCIe Request - pass complete : Request Ownership : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x70ff004",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Writing line",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x00FF",
+ "PublicDescription": "PCIe Request - pass complete : Writing line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x70ff010",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Cards MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x7001002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x7002002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x7004002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x7008002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x7010002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x7020002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x7040002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x7080002",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0001",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0002",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0004",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0008",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0010",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0020",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0040",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x0080",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "M2P Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2P_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of M2P clock cycles while the event is enabled",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : All",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : All",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCS",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : All",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - DRS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCB",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI0 - NCS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - DRS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCB",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 0 : UPI1 - NCS",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - DRS",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCB",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Dedicated P2P Credit Taken - 1 : UPI2 - NCS",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2P_REMOTE_DED_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCB",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI0 - NCS",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCB",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI1 - NCS",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCB",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Dedicated Credits Returned : UPI2 - NCS",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2P_REMOTE_P2P_DED_RETURNED.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - DRS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCB",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI0 - NCS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - DRS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCB",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 0 : UPI1 - NCS",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - DRS",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCB",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Taken - 1 : UPI2 - NCS",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_TAKEN_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - DRS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCB",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI0 - NCS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - DRS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_DRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCB",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 0 : UPI1 - NCS",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_0.UPI1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - DRS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_DRS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCB",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Remote Shared P2P Credit - 1 : UPI2 - NCS",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_WAIT_1.UPI2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.UPI_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.UPI_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.UPI_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PMM",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M2P_TxC_CREDITS.PMM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_0",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.PMM_BLOCK_1",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_0",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.PMM_DISTRESS_1",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
new file mode 100644
index 000000000000..225333561295
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-memory.json
@@ -0,0 +1,3308 @@
+[
+ {
+ "BriefDescription": "Cycles - at UCLK",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2HBM_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2HBM_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled : Non Cisgress",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of time non cisgress D2C was not honoured by egress due to directory state constraints",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Counts the time when FM didn't do d2c for fill reads (cross tile case)",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden : Cisgress",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2HBM_DIRECT2CORE_TXN_OVERRIDE.CISGRESS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
+ "EventCode": "0x1b",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel UPI was disabled",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Cisgress D2U Ignored",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.CISGRESS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts cisgress d2K that was not honored due to directory constraints",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Egress Ignored D2U",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.EGRESS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of time D2K was not honoured by egress due to directory state constraints",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Cycles when Direct2UPI was Disabled : Non Cisgress D2U Ignored",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_NOT_TAKEN_DIRSTATE.NON_CISGRESS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts non cisgress d2K that was not honored due to directory constraints",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number of times a direct to UPI transaction was overridden.",
+ "EventCode": "0x1c",
+ "EventName": "UNC_M2HBM_DIRECT2UPI_TXN_OVERRIDE.CISGRESS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in A State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in I State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in L State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On NonDirty Line in S State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in A State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in I State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in L State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Hit : On Dirty Line in S State",
+ "EventCode": "0x1d",
+ "EventName": "UNC_M2HBM_DIRECTORY_HIT.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with any directory to non persistent memory",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory A to non persistent memory",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory I to non persistent memory",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2HBM_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of hit data returns to egress with directory S to non persistent memory",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in A State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in I State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in L State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On NonDirty Line in S State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in A State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in I State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in L State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Directory Miss : On Dirty Line in S State",
+ "EventCode": "0x1e",
+ "EventName": "UNC_M2HBM_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "UMask": "0x320",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "UMask": "0x340",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "UMask": "0x301",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_I_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to I to non persistent memory",
+ "UMask": "0x120",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_I_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to I to non persistent memory",
+ "UMask": "0x220",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_S_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from A to S to non persistent memory",
+ "UMask": "0x140",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.A_TO_S_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from A to S to non persistent memory",
+ "UMask": "0x240",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts any 1lm or 2lm hit data return that would result in directory update to non persistent memory",
+ "UMask": "0x101",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "UMask": "0x304",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "UMask": "0x302",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_A_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to A to non persistent memory",
+ "UMask": "0x104",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_A_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to A to non persistent memory",
+ "UMask": "0x204",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_S_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from I to S to non persistent memory",
+ "UMask": "0x102",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.I_TO_S_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from I to S to non persistent memory",
+ "UMask": "0x202",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts any 2lm miss data return that would result in directory update to non persistent memory",
+ "UMask": "0x201",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "UMask": "0x310",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "UMask": "0x308",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_A_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to A to non persistent memory",
+ "UMask": "0x110",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_A_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to A to non persistent memory",
+ "UMask": "0x210",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_I_HIT_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 1lm or 2lm hit data returns that would result in directory update from S to I to non persistent memory",
+ "UMask": "0x108",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory Updates",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2HBM_DIRECTORY_UPDATE.S_TO_I_MISS_NON_PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts 2lm miss data returns that would result in directory update from S to I to non persistent memory",
+ "UMask": "0x208",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on AkAd cmp message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.AD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on any packet type",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on Bl Cmp message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.BL_CMP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on NM fill write message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.CROSSTILE_NMWR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on D2Cha message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.D2CHA",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on D2c message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.D2CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count distress signalled on D2k message",
+ "EventCode": "0x67",
+ "EventName": "UNC_M2HBM_DISTRESS.D2UPI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2HBM_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x80000004",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2HBM_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x80000001",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Count when Starve Glocab counter is at 7",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2HBM_IGR_STARVE_WINNER.MASK7",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x80",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x304",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0.ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0.ALL",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0.NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x101",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0_ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x104",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0_FROM_TGR",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x140",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Critical Priority - Ch0",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x102",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH0_NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH0_NORMAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x101",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH1.ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1.ALL",
+ "PerPkg": "1",
+ "UMask": "0x204",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH1.NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x201",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH1_ALL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x204",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - Ch1",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x240",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Critical Priority - Ch1",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x202",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.CH1_NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.CH1_NORMAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x201",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - All Channels",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x340",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Critical Priority - All Channels",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x302",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_READS.NORMAL",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2HBM_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x301",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "All Writes - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1810",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.ALL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0.ALL",
+ "PerPkg": "1",
+ "UMask": "0x810",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.FULL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0.FULL",
+ "PerPkg": "1",
+ "UMask": "0x801",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0.PARTIAL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x802",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_ALL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x810",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_FULL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_FULL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x801",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x804",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_NI",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_NI_MISS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x802",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - Ch0",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x808",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "All Writes - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1010",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1001",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1002",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "All Writes - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_ALL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1010",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Full Line Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_FULL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1001",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1004",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_NI",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_NI_MISS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_PARTIAL",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1002",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - Ch1",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1008",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "From TGR - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Full Non-ISOCH - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1801",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Full Line - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1804",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.NI",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Non-Inclusive Miss - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.NI_MISS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Partial Non-ISOCH - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1802",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "ISOCH Partial - All Channels",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2HBM_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1808",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_CIS_DROPS",
+ "EventCode": "0x5c",
+ "EventName": "UNC_M2HBM_PREFCAM_CIS_DROPS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : UPI - All Channels",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": ": UPI - All Channels",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_MERGE.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": ": XPT - All Channels",
+ "EventCode": "0x5d",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.RD_MERGED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.WR_MERGED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches",
+ "EventCode": "0x5e",
+ "EventName": "UNC_M2HBM_PREFCAM_DEMAND_NO_MERGE.WR_SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH0_UPI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - Ch 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH1_UPI",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : UPI - All Channels",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.UPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2HBM_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "PublicDescription": "Prefetch CAM Inserts : XPT -All Channels",
+ "UMask": "0x5",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2HBM_PREFCAM_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "All Channels",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": ": Channel 0",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": ": Channel 1",
+ "EventCode": "0x5f",
+ "EventName": "UNC_M2HBM_PREFCAM_RESP_MISS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.CIS",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.CIS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2HBM_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "UNC_M2HBM_PREFCAM_RxC_OCCUPANCY",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2HBM_PREFCAM_RxC_OCCUPANCY",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2HBM_RxC_AD.INSERTS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) : AD Ingress (from CMS) Allocations",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2HBM_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "EventCode": "0x03",
+ "EventName": "UNC_M2HBM_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) : BL Ingress (from CMS) Allocations",
+ "EventCode": "0x04",
+ "EventName": "UNC_M2HBM_RxC_BL.INSERTS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts anytime a BL packet is added to Ingress",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) : BL Ingress (from CMS) Allocations",
+ "EventCode": "0x04",
+ "EventName": "UNC_M2HBM_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts anytime a BL packet is added to Ingress",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "EventCode": "0x05",
+ "EventName": "UNC_M2HBM_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "EventCode": "0x2e",
+ "EventName": "UNC_M2HBM_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "EventCode": "0x2f",
+ "EventName": "UNC_M2HBM_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2HBM_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2HBM_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x204",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2HBM_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 1",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2HBM_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) : AD Egress (to CMS) Allocations",
+ "EventCode": "0x06",
+ "EventName": "UNC_M2HBM_TxC_AD.INSERTS",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts anytime a AD packet is added to Egress",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) : AD Egress (to CMS) Allocations",
+ "EventCode": "0x06",
+ "EventName": "UNC_M2HBM_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts anytime a AD packet is added to Egress",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "EventCode": "0x07",
+ "EventName": "UNC_M2HBM_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) : Inserts - CMS0 - Near Side",
+ "EventCode": "0x0E",
+ "EventName": "UNC_M2HBM_TxC_BL.INSERTS_CMS0",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of BL transactions to CMS add port 0",
+ "UMask": "0x101",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) : Inserts - CMS1 - Far Side",
+ "EventCode": "0x0E",
+ "EventName": "UNC_M2HBM_TxC_BL.INSERTS_CMS1",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of BL transactions to CMS add port 1",
+ "UMask": "0x201",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy : All",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2HBM_TxC_BL_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 0",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2HBM_WPQ_FLUSH.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 1",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2HBM_WPQ_FLUSH.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2HBM_WPQ_NO_REG_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2HBM_WPQ_NO_REG_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2HBM_WPQ_NO_SPEC_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "M2M and iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2HBM_WPQ_NO_SPEC_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 0",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2HBM_WR_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2HBM_WR_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "EventCode": "0x4d",
+ "EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "EventCode": "0x4c",
+ "EventName": "UNC_M2HBM_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2HBM_WR_TRACKER_POSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2HBM_WR_TRACKER_POSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2HBM_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2HBM_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2HBM"
+ },
+ {
+ "BriefDescription": "Activate due to read, write, underfill, or bypass",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0xff",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Activate due to read",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x11",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Activate due to Read in PCH0",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.RD_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Activate due to Read in PCH1",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.RD_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x10",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Underfill Read transaction on Page Empty or Page Miss",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.UFILL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x44",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.UFILL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x4",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.UFILL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x40",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Activate due to write",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x22",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Activate due to Write in PCH0",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.WR_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Activate Count : Activate due to Write in PCH1",
+ "EventCode": "0x02",
+ "EventName": "UNC_MCHBM_ACT_COUNT.WR_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Activate commands sent on this channel. Activate commands are issued to open up a page on the HBM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x20",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "All CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "UMask": "0xff",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 0",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "HBM RD_CAS and WR_CAS Commands",
+ "UMask": "0x40",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 1",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "HBM RD_CAS and WR_CAS Commands",
+ "UMask": "0x80",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read CAS commands issued (regular and underfill)",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "UMask": "0xcf",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Regular read CAS commands with precharge",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD_PRE_REG",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Underfill read CAS commands with precharge",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Regular read CAS commands issued (does not include underfills)",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "UMask": "0xc1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Underfill read CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "UMask": "0xc4",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "UMask": "0xf0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM RD_CAS and WR_CAS Commands. : HBM WR_CAS commands w/o auto-pre",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.WR_NONPRE",
+ "PerPkg": "1",
+ "UMask": "0xd0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write CAS commands with precharge",
+ "EventCode": "0x05",
+ "EventName": "UNC_MCHBM_CAS_COUNT.WR_PRE",
+ "PerPkg": "1",
+ "UMask": "0xe0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 1",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_32B",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_64B",
+ "PerPkg": "1",
+ "UMask": "0xc1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Underfill Read CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_UFILL_32B",
+ "PerPkg": "1",
+ "UMask": "0xd0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Underfill Read CAS Command in Regular Mode (64B) in Pseudochannel 1",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.RD_UFILL_64B",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.WR_32B",
+ "PerPkg": "1",
+ "UMask": "0xe0",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_MCHBM_CAS_ISSUED_REQ_LEN.WR_64B",
+ "PerPkg": "1",
+ "UMask": "0xc4",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "IMC Clockticks at DCLK frequency",
+ "EventCode": "0x01",
+ "EventName": "UNC_MCHBM_CLOCKTICKS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge All Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_MCHBM_HBM_PREALL.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge All Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_MCHBM_HBM_PREALL.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "All Precharge Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_MCHBM_HBM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Precharge All Commands: Counts the number of times that the precharge all command was sent.",
+ "UMask": "0x3",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "IMC Clockticks at HCLK frequency",
+ "EventCode": "0x01",
+ "EventName": "UNC_MCHBM_HCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "All precharge events",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0xff",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Precharge from MC page table",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x88",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands. : Precharges from Page Table",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.PGT_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Equivalent to PAGE_EMPTY",
+ "UMask": "0x8",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.PGT_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x80",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Precharge due to read on page miss",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x11",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands. : Precharge due to read",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.RD_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Precharge from read bank scheduler",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.RD_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.UFILL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x44",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.UFILL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x4",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.UFILL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x40",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Precharge due to write on page miss",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x22",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands. : Precharge due to write",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.WR_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel. : Precharge from write bank scheduler",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "HBM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_MCHBM_PRE_COUNT.WR_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of HBM Precharge commands sent on this channel.",
+ "UMask": "0x20",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles where the read buffer has greater than UMASK elements. NOTE: Umask must be set to the maximum number of elements in the queue (24 entries for SPR).",
+ "EventCode": "0x19",
+ "EventName": "UNC_MCHBM_RDB_FULL",
+ "PerPkg": "1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Counts the number of inserts into the read buffer.",
+ "EventCode": "0x17",
+ "EventName": "UNC_MCHBM_RDB_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_MCHBM_RDB_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_MCHBM_RDB_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Counts the number of elements in the read buffer per cycle.",
+ "EventCode": "0x1a",
+ "EventName": "UNC_MCHBM_RDB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_MCHBM_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations: Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_MCHBM_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations: Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x80",
+ "EventName": "UNC_MCHBM_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy: Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x81",
+ "EventName": "UNC_MCHBM_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy: Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_MCHBM_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations: Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_MCHBM_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations: Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x82",
+ "EventName": "UNC_MCHBM_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy: Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to memory. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x83",
+ "EventName": "UNC_MCHBM_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy: Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to memory. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_MCHBM_WPQ_READ_HIT",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_MCHBM_WPQ_READ_HIT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_MCHBM_WPQ_READ_HIT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_MCHBM_WPQ_WRITE_HIT",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_MCHBM_WPQ_WRITE_HIT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x1",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_MCHBM_WPQ_WRITE_HIT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match: Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x2",
+ "Unit": "MCHBM"
+ },
+ {
+ "BriefDescription": "Activate due to read, write, underfill, or bypass",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Activate Count : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0xff",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : All DRAM Read and Write actions : DRAM RD_CAS and WR_CAS Commands : Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0xff",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 0",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 0 : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 1",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Pseudo Channel 1 : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands : Counts the total number of DRAM Read CAS commands issued on this channel. This includes underfills.",
+ "UMask": "0xcf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xc2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xc8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/out auto-pre : DRAM RD_CAS and WR_CAS Commands : Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0xc1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM underfill read CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : Underfill Read Issued : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xc4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM write CAS commands issued",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands : Counts the total number of DRAM Write CAS commands issued on this channel.",
+ "UMask": "0xf0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xd0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "EventCode": "0x05",
+ "EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0xe0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pseudo Channel 1",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_32B",
+ "PerPkg": "1",
+ "UMask": "0xc8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_64B",
+ "PerPkg": "1",
+ "UMask": "0xc1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Underfill Read CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_UFILL_32B",
+ "PerPkg": "1",
+ "UMask": "0xd0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Underfill Read CAS Command in Regular Mode (64B) in Pseudochannel 1",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.RD_UFILL_64B",
+ "PerPkg": "1",
+ "UMask": "0xc2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write CAS Command in Interleaved Mode (32B)",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.WR_32B",
+ "PerPkg": "1",
+ "UMask": "0xe0",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write CAS Command in Regular Mode (64B) in Pseudochannel 0",
+ "EventCode": "0x06",
+ "EventName": "UNC_M_CAS_ISSUED_REQ_LEN.WR_64B",
+ "PerPkg": "1",
+ "UMask": "0xc4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "IMC Clockticks at DCLK frequency",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM DCLK clock cycles while the event is enabled",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge All Commands : Counts the number of times that the precharge all command was sent.",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "IMC Clockticks at HCLK frequency",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_HCLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM HCLK clock cycles while the event is enabled",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.RD",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_PCLS.RD",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.TOTAL",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_PCLS.TOTAL",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.WR",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M_PCLS.WR",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue inserts",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M_PMM_RPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of read requests allocated in the PMM Read Pending Queue.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue occupancy",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue occupancy",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.ALL_SCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH0",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.GNT_WAIT_SCH1",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Read Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Read Pending Queue Occupancy",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M_PMM_RPQ_OCCUPANCY.NO_GNT_SCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the per cycle occupancy of the PMM Read Pending Queue.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM (for IXP) Write Queue Cycles Not Empty",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M_PMM_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue inserts",
+ "EventCode": "0xe7",
+ "EventName": "UNC_M_PMM_WPQ_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of write requests allocated in the PMM Write Pending Queue.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the PMM DIMM.",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH0",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Write Pending Queue.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM Write Pending Queue Occupancy",
+ "EventCode": "0xE4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.ALL_SCH1",
+ "PerPkg": "1",
+ "PublicDescription": "PMM Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the PMM Write Pending Queue.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM (for IXP) Write Pending Queue Occupancy",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.CAS",
+ "PerPkg": "1",
+ "PublicDescription": "PMM (for IXP) Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the IXP DIMM.",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PMM (for IXP) Write Pending Queue Occupancy",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M_PMM_WPQ_OCCUPANCY.PWR",
+ "PerPkg": "1",
+ "PublicDescription": "PMM (for IXP) Write Pending Queue Occupancy : Accumulates the per cycle occupancy of the Write Pending Queue to the IXP DIMM.",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel PPD Cycles",
+ "EventCode": "0x85",
+ "EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "PerPkg": "1",
+ "PublicDescription": "Channel PPD Cycles : Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clock-Enabled Self-Refresh",
+ "EventCode": "0x43",
+ "EventName": "UNC_M_POWER_SELF_REFRESH",
+ "PerPkg": "1",
+ "PublicDescription": "Clock-Enabled Self-Refresh : Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Precharge due to read, write, underfill, or PGT.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0xff",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to (?)",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to (?) : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x88",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharges from Page Table",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharges from Page Table : Counts the number of DRAM Precharge commands sent on this channel. : Equivalent to PAGE_EMPTY",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.PGT_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Precharge due to read on page miss",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to read",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to read : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from read bank scheduler",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.RD_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x44",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.UFILL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Precharge due to write on page miss",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x22",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to write",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to write : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from write bank scheduler",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x03",
+ "EventName": "UNC_M_PRE_COUNT.WR_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles where the read buffer has greater than UMASK elements. This includes reads to both DDR and PMEM. NOTE: Umask must be set to the maximum number of elements in the queue (24 entries for SPR).",
+ "EventCode": "0x19",
+ "EventName": "UNC_M_RDB_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of inserts into the read buffer destined for DDR. Does not count reads destined for PMEM.",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles where there's at least one element in the read buffer. This includes reads to both DDR and PMEM.",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NE",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Not Empty",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NE.PCH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Not Empty",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NE.PCH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles where there's at least one element in the read buffer. This includes reads to both DDR and PMEM.",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NOT_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Counts the number of elements in the read buffer, including reads to both DDR and PMEM.",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M_RDB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard accepts",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Write Accepts",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Write Rejects",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.FM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : FM read completions",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_RD_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : FM write completions",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.NM_WR_CMPS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Read Accepts",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : Read Rejects",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.RD_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard rejects",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.REJECTS",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : NM read completions",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_ACCEPTS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Accesses : NM write completions",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M_SB_ACCESSES.WR_REJECTS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Alloc",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.ALLOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Dealloc",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write Starved",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FM_RD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write Starved",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FM_TGR_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read Starved",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.FM_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Valid",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.NM_RD_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read Starved",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.NM_WR_STARVED",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Reject",
+ "EventCode": "0xd9",
+ "EventName": "UNC_M_SB_CANARY.VLD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Full",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M_SB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Cycles Not-Empty",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M_SB_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Block region reads",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Block region writes",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Persistent Mem reads",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Persistent Mem writes",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Reads",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Inserts : Writes",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M_SB_INSERTS.WRS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Block region reads",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_RDS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Block region writes",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.BLOCK_WRS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Persistent Mem reads",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_RDS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Persistent Mem writes",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.PMM_WRS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Occupancy : Reads",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M_SB_OCCUPANCY.RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : All",
+ "EventCode": "0xda",
+ "EventName": "UNC_M_SB_PREF_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : DDR4",
+ "EventCode": "0xda",
+ "EventName": "UNC_M_SB_PREF_INSERTS.DDR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Inserts : PMM",
+ "EventCode": "0xda",
+ "EventName": "UNC_M_SB_PREF_INSERTS.PMM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : All",
+ "EventCode": "0xdb",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : DDR4",
+ "EventCode": "0xdb",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Scoreboard Prefetch Occupancy : Persistent Mem",
+ "EventCode": "0xDB",
+ "EventName": "UNC_M_SB_PREF_OCCUPANCY.PMM",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.CANARY",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.DDR_EARLY_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : FM requests rejected due to full address conflict",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.FM_ADDR_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : NM requests rejected due to set conflict",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.NM_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of Scoreboard Requests Rejected : Patrol requests rejected due to set conflict",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M_SB_REJECT.PATROL_SET_CNFLT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read - Set",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write - Set",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Set",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write - Set",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M_SB_STRV_ALLOC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read - Set",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write - Set",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Set",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write - Set",
+ "EventCode": "0xde",
+ "EventName": "UNC_M_SB_STRV_DEALLOC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Read",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read - Clear",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Far Mem Write",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.FM_WR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Read",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.NM_RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": ": Near Mem Write",
+ "EventCode": "0xd8",
+ "EventName": "UNC_M_SB_STRV_OCC.NM_WR",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.DDR4_CMP",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.NEW",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.NEW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.OCC",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.OCC",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.PMM0_CMP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.PMM1_CMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.PMM2_CMP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_HIT",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_SB_TAGGED.RD_MISS",
+ "EventCode": "0xdd",
+ "EventName": "UNC_M_SB_TAGGED.RD_MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check hit in near memory cache (DDR4)",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check miss, no data at this line",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.MISS_CLEAN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check miss, existing data may be evicted to PMM",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.MISS_DIRTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check hit due to memory read (bug?)",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.NM_RD_HIT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "2LM Tag check hit due to memory write (bug?)",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M_TAGCHK.NM_WR_HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x82",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "FCMask": "0x00000000",
+ "PerPkg": "1",
+ "PortMask": "0x00000000",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
new file mode 100644
index 000000000000..8948e85074f0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-power.json
@@ -0,0 +1,197 @@
+[
+ {
+ "BriefDescription": "PCU PCLK Clockticks",
+ "EventCode": "0x01",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of PCU PCLK Clock cycles while the event is enabled",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 0 Cycles : Cycles spent in phase-shedding power state 0",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 1 Cycles : Cycles spent in phase-shedding power state 1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 2 Cycles : Cycles spent in phase-shedding power state 2",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 3 Cycles : Cycles spent in phase-shedding power state 3",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX256 Frequency Clipping",
+ "EventCode": "0x49",
+ "EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX512 Frequency Clipping",
+ "EventCode": "0x4a",
+ "EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "EventCode": "0x04",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Thermal Strongest Upper Limit Cycles : Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "EventCode": "0x05",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Power Strongest Upper Limit Cycles : Counts the number of cycles when power is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "IO P Limit Strongest Lower Limit Cycles : Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent changing Frequency : Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2f",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Memory Phase Shedding Cycles : Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2a",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C0 : Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "EventCode": "0x2b",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C2E : Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2d",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C6 : Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "EventCode": "0x06",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C0",
+ "EventCode": "0x35",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C0 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C3",
+ "EventCode": "0x36",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C6",
+ "EventCode": "0x37",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY_CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C6 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0x0a",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "External Prochot : Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x09",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Internal Prochot : Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Total Core C State Transition Cycles : Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
new file mode 100644
index 000000000000..a1e3b8d2ebe7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/virtual-memory.json
@@ -0,0 +1,165 @@
+[
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CounterMask": "1",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 1G page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "EventCode": "0x12",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CounterMask": "1",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 1G page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "EventCode": "0x13",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CounterMask": "1",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "EventCode": "0x11",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/cache.json b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
new file mode 100644
index 000000000000..7f0dc65a55d2
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/cache.json
@@ -0,0 +1,155 @@
+[
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Counts the number of load ops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Counts the number of store ops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_1024",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x400",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_2048",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x800",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of tagged load uops retired that exceed the latency threshold defined in MEC_CR_PEBS_LD_LAT_THRESHOLD - Only counts with PEBS enabled.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Counts the number of stores uops retired same as MEM_UOPS_RETIRED.ALL_STORES",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.STORE_LATENCY",
+ "PEBS": "2",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json b/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json
new file mode 100644
index 000000000000..be8f1c7e195c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/frontend.json
@@ -0,0 +1,16 @@
+[
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts every time the code stream enters into a new cache line by walking sequential from the previous line or being redirected by a jump and the instruction cache registers bytes are not present. -",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
new file mode 100644
index 000000000000..79d8af45100c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/memory.json
@@ -0,0 +1,20 @@
+[
+ {
+ "BriefDescription": "Counts demand data reads that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBFC00002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/other.json b/tools/perf/pmu-events/arch/x86/sierraforest/other.json
new file mode 100644
index 000000000000..2414f6ff53b0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/other.json
@@ -0,0 +1,20 @@
+[
+ {
+ "BriefDescription": "Counts demand data reads that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0xB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
new file mode 100644
index 000000000000..41212957ef21
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/pipeline.json
@@ -0,0 +1,96 @@
+[
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.THREAD_P]",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles [This event is alias to CPU_CLK_UNHALTED.CORE_P]",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of instructions retired",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window, including relevant microcode flows, and while uops are not yet available in the instruction queue (IQ) or until an FE_BOUND event occurs besides OTHER and CISC. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to backend stalls",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of retirement slots not consumed due to front end stalls",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of consumed retirement slots. Similar to UOPS_RETIRED.ALL",
+ "EventCode": "0x72",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json b/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json
new file mode 100644
index 000000000000..bd5f2b634c98
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/sierraforest/virtual-memory.json
@@ -0,0 +1,24 @@
+[
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/cache.json b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
index 805ef1436539..818e0664a3a6 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/cache.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/cache.json
@@ -1,812 +1,677 @@
[
{
- "PublicDescription": "This event counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBS (L2 misses) and WOB (L2 write-back victims).",
- "EventCode": "0x30",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "L2_REJECT_XQ.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of request from the L2 that were not accepted into the XQ"
- },
- {
- "PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2Q due to a full or nearly full w condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core?s dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event.)",
+ "BriefDescription": "Counts the number of request that were not accepted into the L2Q because the L2Q is FULL.",
"EventCode": "0x31",
- "Counter": "0,1",
- "UMask": "0x0",
"EventName": "CORE_REJECT_L2Q.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of request that were not accepted into the L2Q because the L2Q is FULL."
- },
- {
- "PublicDescription": "This event counts requests originating from the core that references a cache line in the L2 cache.",
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x4f",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache requests from this core"
- },
- {
- "PublicDescription": "This event counts the total number of L2 cache references and the number of L2 cache misses respectively.",
- "EventCode": "0x2E",
- "Counter": "0,1",
- "UMask": "0x41",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache request misses"
+ "PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2Q due to a full or nearly full w condition which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the XQ, but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to insure fairness between cores, or to delay a core?s dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event.)",
+ "SampleAfterValue": "200003"
},
{
- "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss.",
"EventCode": "0x86",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "FETCH_STALL.ICACHE_FILL_PENDING_CYCLES",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ICache miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ICache miss. Note: this event is not the same as the total number of cycles spent retrieving instruction cache lines from the memory hierarchy.\r\nCounts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles code-fetch stalled due to an outstanding ICache miss."
- },
- {
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retired loads that were prohibited from receiving forwarded data from the store because of address mismatch.",
- "EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "REHABQ.LD_BLOCK_ST_FORWARD",
- "SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store forward restriction"
- },
- {
- "PublicDescription": "This event counts the cases where a forward was technically possible, but did not occur because the store data was not available at the right time.",
- "EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "REHABQ.LD_BLOCK_STD_NOTREADY",
- "SampleAfterValue": "200003",
- "BriefDescription": "Loads blocked due to store data not ready"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of retire stores that experienced cache line boundary splits.",
- "EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "REHABQ.ST_SPLITS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Store uops that split cache line boundary"
+ "BriefDescription": "Counts the number of request from the L2 that were not accepted into the XQ",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_XQ.ALL",
+ "PublicDescription": "This event counts the number of demand and prefetch transactions that the L2 XQ rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBS (L2 misses) and WOB (L2 write-back victims).",
+ "SampleAfterValue": "200003"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of retire loads that experienced cache line boundary splits.",
- "EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "REHABQ.LD_SPLITS",
+ "BriefDescription": "L2 cache request misses",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "This event counts the total number of L2 cache references and the number of L2 cache misses respectively.",
"SampleAfterValue": "200003",
- "BriefDescription": "Load uops that split cache line boundary"
+ "UMask": "0x41"
},
{
- "PublicDescription": "This event counts the number of retired memory operations with lock semantics. These are either implicit locked instructions such as the XCHG instruction or instructions with an explicit LOCK prefix (0xF0).",
- "EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "REHABQ.LOCK",
+ "BriefDescription": "L2 cache requests from this core",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "This event counts requests originating from the core that references a cache line in the L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Uops with lock semantics"
+ "UMask": "0x4f"
},
{
- "PublicDescription": "This event counts the number of retired stores that are delayed because there is not a store address buffer available.",
- "EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "REHABQ.STA_FULL",
+ "BriefDescription": "All Loads",
+ "EventCode": "0x04",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PublicDescription": "This event counts the number of load ops retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Store address buffer full"
+ "UMask": "0x40"
},
{
- "PublicDescription": "This event counts the number of load uops reissued from Rehabq.",
- "EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "REHABQ.ANY_LD",
+ "BriefDescription": "All Stores",
+ "EventCode": "0x04",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PublicDescription": "This event counts the number of store ops retired.",
"SampleAfterValue": "200003",
- "BriefDescription": "Any reissued load uops"
+ "UMask": "0x80"
},
{
- "PublicDescription": "This event counts the number of store uops reissued from Rehabq.",
- "EventCode": "0x03",
- "Counter": "0,1",
- "UMask": "0x80",
- "EventName": "REHABQ.ANY_ST",
+ "BriefDescription": "Cross core or cross module hitm",
+ "EventCode": "0x04",
+ "EventName": "MEM_UOPS_RETIRED.HITM",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load ops retired that got data from the other core or from the other module.",
"SampleAfterValue": "200003",
- "BriefDescription": "Any reissued store uops"
+ "UMask": "0x20"
},
{
- "PublicDescription": "This event counts the number of load ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
+ "BriefDescription": "Loads missed L1",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "MEM_UOPS_RETIRED.L1_MISS_LOADS",
+ "PublicDescription": "This event counts the number of load ops retired that miss in L1 Data cache. Note that prefetch misses will not be counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads missed L1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of load ops retired that hit in the L2.",
+ "BriefDescription": "Loads hit L2",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "MEM_UOPS_RETIRED.L2_HIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load ops retired that hit in the L2.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads hit L2"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of load ops retired that miss in the L2.",
+ "BriefDescription": "Loads missed L2",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x4",
"EventName": "MEM_UOPS_RETIRED.L2_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load ops retired that miss in the L2.",
"SampleAfterValue": "100007",
- "BriefDescription": "Loads missed L2"
+ "UMask": "0x4"
},
{
- "PublicDescription": "This event counts the number of load ops retired that had UTLB miss.",
+ "BriefDescription": "Loads missed UTLB",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x10",
"EventName": "MEM_UOPS_RETIRED.UTLB_MISS",
+ "PublicDescription": "This event counts the number of load ops retired that had UTLB miss.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads missed UTLB"
- },
- {
- "PEBS": "1",
- "PublicDescription": "This event counts the number of load ops retired that got data from the other core or from the other module.",
- "EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "MEM_UOPS_RETIRED.HITM",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cross core or cross module hitm"
- },
- {
- "PublicDescription": "This event counts the number of load ops retired.",
- "EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x40",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "SampleAfterValue": "200003",
- "BriefDescription": "All Loads"
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts the number of store ops retired.",
- "EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x80",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "SampleAfterValue": "200003",
- "BriefDescription": "All Stores"
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
+ "EventCode": "0xB7",
+ "EventName": "OFFCORE_RESPONSE",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
+ "BriefDescription": "Counts any code reads (demand & prefetch) that have any response type.",
"EventCode": "0xB7",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010044",
"SampleAfterValue": "100007",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any code reads (demand & prefetch) that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000044",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any code reads (demand & prefetch) that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any code reads (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000044",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any code reads (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any code reads (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000044",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any code reads (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any code reads (demand & prefetch) that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000044",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000044",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any code reads (demand & prefetch) that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any data read (demand & prefetch) that have any response type.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010044",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_CODE_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000013091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any code reads (demand & prefetch) that have any response type.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any data read (demand & prefetch) that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any data read (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any rfo reads (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any data read (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any data read (demand & prefetch) that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200003091",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that have any response type.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010022",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000018008",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any rfo reads (demand & prefetch) that have any response type.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that hit in the other module where modified copies were found in other core's L1 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1680003091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000008008",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data read (demand & prefetch) that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x1000003091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400008008",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data read (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any request that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x0400003091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200008008",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data read (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that have any response type.",
"EventCode": "0xB7",
- "MSRValue": "0x0200003091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data read (demand & prefetch) that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x0000013091",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_DATA_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any data read (demand & prefetch) that have any response type.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that hit in the other module where modified copies were found in other core's L1 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1680004800",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts streaming store that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x1000008008",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that hit in the other module where modified copies were found in other core's L1 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts any rfo reads (demand & prefetch) that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x0400008008",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.ANY_RFO.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000022",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts writeback (modified to exclusive) that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x0200008008",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000008",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts writeback (modified to exclusive) that miss L2 with no details on snoop-related information.",
"EventCode": "0xB7",
- "MSRValue": "0x0000018008",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.ANY_REQUEST.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NO_SNOOP_NEEDED",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0080000008",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts any request that have any response type.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that have any response type.",
"EventCode": "0xB7",
- "MSRValue": "0x1680002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts DCU hardware prefetcher data read that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x1000002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts DCU hardware prefetcher data read that hit in the other module where modified copies were found in other core's L1 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x0400002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts DCU hardware prefetcher data read that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x0200002000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts DCU hardware prefetcher data read that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x0000012000",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000004",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts DCU hardware prefetcher data read that have any response type.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch data read that have any response type.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000100",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000010001",
"SampleAfterValue": "100007",
- "BriefDescription": "Countsof demand RFO requests to write to partial cache lines that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch data read that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000080",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand reads of partial cache lines (including UC and WC) that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch data read that hit in the other module where modified copies were found in other core's L1 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch data read that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch data read that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000040",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch data read that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000001",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts RFO requests generated by L2 prefetchers that hit in the other module where modified copies were found in other core's L1 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that hit in the other module where modified copies were found in other core's L1 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000020",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000010",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand and DCU prefetch RFOs that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000010",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6,0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x4000000002",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that hit in the other module where modified copies were found in other core's L1 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts demand reads of partial cache lines (including UC and WC) that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000010",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_READS.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000080",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Countsof demand RFO requests to write to partial cache lines that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000010",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PARTIAL_WRITES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000100",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts DCU hardware prefetcher data read that have any response type.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000008",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.ANY_RESPONSE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0000012000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts writeback (modified to exclusive) that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts DCU hardware prefetcher data read that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x0080000008",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.COREWB.L2_MISS.NO_SNOOP_NEEDED",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts writeback (modified to exclusive) that miss L2 with no details on snoop-related information.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts DCU hardware prefetcher data read that hit in the other module where modified copies were found in other core's L1 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts DCU hardware prefetcher data read that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts DCU hardware prefetcher data read that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1_DATA_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200002000",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x0000010004",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch instruction cacheline that have any response type.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts code reads generated by L2 prefetchers that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_CODE_RD.L2_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000040",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch RFOs that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000010",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that hit in the other module where modified copies were found in other core's L1 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000010",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch RFOs that hit in the other module where modified copies were found in other core's L1 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000010",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts data cacheline reads generated by L2 prefetchers that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000002",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000010",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch RFOs that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x4000000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.OUTSTANDING",
- "MSRIndex": "0x1a6",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.ANY",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680000020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch data read that are are outstanding, per cycle, from the time of the L2 miss to when any response is received.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts RFO requests generated by L2 prefetchers that hit in the other module where modified copies were found in other core's L1 cache.",
"EventCode": "0xB7",
- "MSRValue": "0x1680000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.ANY",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HITM_OTHER_CORE",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch data read that miss L2.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
"EventCode": "0xB7",
- "MSRValue": "0x1000000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HITM_OTHER_CORE",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.HIT_OTHER_CORE_NO_FWD",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0400000020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch data read that hit in the other module where modified copies were found in other core's L1 cache.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts RFO requests generated by L2 prefetchers that miss L2 with a snoop miss response.",
"EventCode": "0xB7",
- "MSRValue": "0x0400000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.HIT_OTHER_CORE_NO_FWD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L2_MISS.SNOOP_MISS",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x0200000020",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch data read that miss L2 and the snoops to sibling cores hit in either E/S state and the line is not forwarded.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts streaming store that miss L2.",
"EventCode": "0xB7",
- "MSRValue": "0x0200000001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L2_MISS.SNOOP_MISS",
+ "EventName": "OFFCORE_RESPONSE.STREAMING_STORES.L2_MISS.ANY",
"MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1680004800",
"SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch data read that miss L2 with a snoop miss response.",
- "Offcore": "1"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB7",
- "MSRValue": "0x0000010001",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts demand and DCU prefetch data read that have any response type.",
- "Offcore": "1"
+ "BriefDescription": "Any reissued load uops",
+ "EventCode": "0x03",
+ "EventName": "REHABQ.ANY_LD",
+ "PublicDescription": "This event counts the number of load uops reissued from Rehabq.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Any reissued store uops",
+ "EventCode": "0x03",
+ "EventName": "REHABQ.ANY_ST",
+ "PublicDescription": "This event counts the number of store uops reissued from Rehabq.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Loads blocked due to store data not ready",
+ "EventCode": "0x03",
+ "EventName": "REHABQ.LD_BLOCK_STD_NOTREADY",
+ "PublicDescription": "This event counts the cases where a forward was technically possible, but did not occur because the store data was not available at the right time.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Loads blocked due to store forward restriction",
+ "EventCode": "0x03",
+ "EventName": "REHABQ.LD_BLOCK_ST_FORWARD",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retired loads that were prohibited from receiving forwarded data from the store because of address mismatch.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Load uops that split cache line boundary",
+ "EventCode": "0x03",
+ "EventName": "REHABQ.LD_SPLITS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of retire loads that experienced cache line boundary splits.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops with lock semantics",
+ "EventCode": "0x03",
+ "EventName": "REHABQ.LOCK",
+ "PublicDescription": "This event counts the number of retired memory operations with lock semantics. These are either implicit locked instructions such as the XCHG instruction or instructions with an explicit LOCK prefix (0xF0).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Store address buffer full",
+ "EventCode": "0x03",
+ "EventName": "REHABQ.STA_FULL",
+ "PublicDescription": "This event counts the number of retired stores that are delayed because there is not a store address buffer available.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Store uops that split cache line boundary",
+ "EventCode": "0x03",
+ "EventName": "REHABQ.ST_SPLITS",
+ "PublicDescription": "This event counts the number of retire stores that experienced cache line boundary splits.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json b/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json
new file mode 100644
index 000000000000..f2b1e8f08d68
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/silvermont/floating-point.json
@@ -0,0 +1,10 @@
+[
+ {
+ "BriefDescription": "Stalls due to FP assists",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "This event counts the number of times that pipeline stalled due to FP operations needing assists.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/frontend.json b/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
index 204473badf5a..cd6ed3f59e26 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/frontend.json
@@ -1,47 +1,66 @@
[
{
- "PublicDescription": "This event counts all instruction fetches, not including most uncacheable\r\nfetches.",
+ "BriefDescription": "Counts the number of baclears",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ALL",
+ "PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.ANY event counts the number of baclears for any type of branch.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of JCC baclears",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.COND",
+ "PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.COND event counts the number of JCC (Jump on Conditional Code) baclears.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of RETURN baclears",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.RETURN",
+ "PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.RETURN event counts the number of RETURN baclears.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of times a decode restriction reduced the decode throughput due to wrong instruction length prediction",
+ "EventCode": "0xE9",
+ "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "PublicDescription": "Counts the number of times a decode restriction reduced the decode throughput due to wrong instruction length prediction.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetches",
"EventCode": "0x80",
- "Counter": "0,1",
- "UMask": "0x3",
"EventName": "ICACHE.ACCESSES",
+ "PublicDescription": "This event counts all instruction fetches, not including most uncacheable\r\nfetches.",
"SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetches"
+ "UMask": "0x3"
},
{
- "PublicDescription": "This event counts all instruction fetches from the instruction cache.",
+ "BriefDescription": "Instruction fetches from Icache",
"EventCode": "0x80",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "ICACHE.HIT",
+ "PublicDescription": "This event counts all instruction fetches from the instruction cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetches from Icache"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts all instruction fetches that miss the Instruction cache or produce memory requests. This includes uncacheable fetches. An instruction fetch miss is counted only once and not once for every cycle it is outstanding.",
+ "BriefDescription": "Icache miss",
"EventCode": "0x80",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "ICACHE.MISSES",
+ "PublicDescription": "This event counts all instruction fetches that miss the Instruction cache or produce memory requests. This includes uncacheable fetches. An instruction fetch miss is counted only once and not once for every cycle it is outstanding.",
"SampleAfterValue": "200003",
- "BriefDescription": "Icache miss"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of times the MSROM starts a flow of UOPS. It does not count every time a UOP is read from the microcode ROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort. The event will count MSROM startups for UOPS that are speculative, and subsequently cleared by branch mispredict or machine clear. Background: UOPS are produced by two mechanisms. Either they are generated by hardware that decodes instructions into UOPS, or they are delivered by a ROM (called the MSROM) that holds UOPS associated with a specific instruction. MSROM UOPS might also be delivered in response to some condition such as a fault or other exceptional condition. This event is an excellent mechanism for detecting instructions that require the use of MSROM instructions.",
+ "BriefDescription": "Counts the number of times entered into a ucode flow in the FEC. Includes inserted flows due to front-end detected faults or assists. Speculative count.",
"EventCode": "0xE7",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "MS_DECODED.MS_ENTRY",
+ "PublicDescription": "Counts the number of times the MSROM starts a flow of UOPS. It does not count every time a UOP is read from the microcode ROM. The most common case that this counts is when a micro-coded instruction is encountered by the front end of the machine. Other cases include when an instruction encounters a fault, trap, or microcode assist of any sort. The event will count MSROM startups for UOPS that are speculative, and subsequently cleared by branch mispredict or machine clear. Background: UOPS are produced by two mechanisms. Either they are generated by hardware that decodes instructions into UOPS, or they are delivered by a ROM (called the MSROM) that holds UOPS associated with a specific instruction. MSROM UOPS might also be delivered in response to some condition such as a fault or other exceptional condition. This event is an excellent mechanism for detecting instructions that require the use of MSROM instructions.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of times entered into a ucode flow in the FEC. Includes inserted flows due to front-end detected faults or assists. Speculative count."
- },
- {
- "PublicDescription": "Counts the number of times a decode restriction reduced the decode throughput due to wrong instruction length prediction.",
- "EventCode": "0xE9",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of times a decode restriction reduced the decode throughput due to wrong instruction length prediction"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/memory.json b/tools/perf/pmu-events/arch/x86/silvermont/memory.json
index d72e09a5f929..15ea45187210 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/memory.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/memory.json
@@ -1,11 +1,10 @@
[
{
- "PublicDescription": "This event counts the number of times that pipeline was cleared due to memory ordering issues.",
+ "BriefDescription": "Stalls due to Memory ordering",
"EventCode": "0xC3",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "This event counts the number of times that pipeline was cleared due to memory ordering issues.",
"SampleAfterValue": "200003",
- "BriefDescription": "Stalls due to Memory ordering"
+ "UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/other.json b/tools/perf/pmu-events/arch/x86/silvermont/other.json
index 47814046fa9d..cff113adb823 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/other.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/other.json
@@ -1,20 +1,18 @@
[
{
- "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
+ "BriefDescription": "Cycles code-fetch stalled due to any reason.",
"EventCode": "0x86",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
+ "EventName": "FETCH_STALL.ALL",
+ "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss."
+ "UMask": "0x3f"
},
{
- "PublicDescription": "Counts cycles that fetch is stalled due to any reason. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes. This will include cycles due to an ITLB miss, ICache miss and other events.",
+ "BriefDescription": "Cycles code-fetch stalled due to an outstanding ITLB miss.",
"EventCode": "0x86",
- "Counter": "0,1",
- "UMask": "0x3f",
- "EventName": "FETCH_STALL.ALL",
+ "EventName": "FETCH_STALL.ITLB_FILL_PENDING_CYCLES",
+ "PublicDescription": "Counts cycles that fetch is stalled due to an outstanding ITLB miss. That is, the decoder queue is able to accept bytes, but the fetch unit is unable to provide bytes due to an ITLB miss. Note: this event is not the same as page walk cycles to retrieve an instruction translation.",
"SampleAfterValue": "200003",
- "BriefDescription": "Cycles code-fetch stalled due to any reason."
+ "UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
index 1ed62ad4cf77..2d4214bf9e39 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/pipeline.json
@@ -1,356 +1,281 @@
[
{
- "PEBS": "1",
- "PublicDescription": "ALL_BRANCHES counts the number of any branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "BriefDescription": "Counts the number of branch instructions retired...",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x0",
"EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of branch instructions retired..."
- },
- {
"PEBS": "1",
- "PublicDescription": "JCC counts the number of conditional branch (JCC) instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x7e",
- "EventName": "BR_INST_RETIRED.JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of JCC branch instructions retired"
+ "PublicDescription": "ALL_BRANCHES counts the number of any branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "SampleAfterValue": "200003"
},
{
- "PEBS": "1",
- "PublicDescription": "TAKEN_JCC counts the number of taken conditional branch (JCC) instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "BriefDescription": "Counts the number of taken branch instructions retired",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xfe",
- "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
+ "PEBS": "2",
+ "PublicDescription": "ALL_TAKEN_BRANCHES counts the number of all taken branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of taken JCC branch instructions retired"
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "PublicDescription": "CALL counts the number of near CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "BriefDescription": "Counts the number of near CALL branch instructions retired",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xf9",
"EventName": "BR_INST_RETIRED.CALL",
+ "PEBS": "1",
+ "PublicDescription": "CALL counts the number of near CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of near CALL branch instructions retired"
+ "UMask": "0xf9"
},
{
- "PEBS": "1",
- "PublicDescription": "REL_CALL counts the number of near relative CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "BriefDescription": "Counts the number of far branch instructions retired",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xfd",
- "EventName": "BR_INST_RETIRED.REL_CALL",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "FAR counts the number of far branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of near relative CALL branch instructions retired"
+ "UMask": "0xbf"
},
{
- "PEBS": "1",
- "PublicDescription": "IND_CALL counts the number of near indirect CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xfb",
"EventName": "BR_INST_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "PublicDescription": "IND_CALL counts the number of near indirect CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of near indirect CALL branch instructions retired"
+ "UMask": "0xfb"
},
{
- "PEBS": "1",
- "PublicDescription": "RETURN counts the number of near RET branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "BriefDescription": "Counts the number of JCC branch instructions retired",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xf7",
- "EventName": "BR_INST_RETIRED.RETURN",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "PEBS": "1",
+ "PublicDescription": "JCC counts the number of conditional branch (JCC) instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of near RET branch instructions retired"
+ "UMask": "0x7e"
},
{
- "PEBS": "1",
- "PublicDescription": "NON_RETURN_IND counts the number of near indirect JMP and near indirect CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xeb",
"EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "PublicDescription": "NON_RETURN_IND counts the number of near indirect JMP and near indirect CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired"
+ "UMask": "0xeb"
},
{
- "PEBS": "1",
- "PublicDescription": "FAR counts the number of far branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired",
"EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0xbf",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "PEBS": "1",
+ "PublicDescription": "REL_CALL counts the number of near relative CALL branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of far branch instructions retired"
+ "UMask": "0xfd"
},
{
+ "BriefDescription": "Counts the number of near RET branch instructions retired",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.RETURN",
"PEBS": "1",
- "PublicDescription": "ALL_BRANCHES counts the number of any mispredicted branch instructions retired. This umask is an architecturally defined event. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
- "EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "RETURN counts the number of near RET branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted branch instructions retired"
+ "UMask": "0xf7"
},
{
+ "BriefDescription": "Counts the number of taken JCC branch instructions retired",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
"PEBS": "1",
- "PublicDescription": "JCC counts the number of mispredicted conditional branches (JCC) instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
- "EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0x7e",
- "EventName": "BR_MISP_RETIRED.JCC",
+ "PublicDescription": "TAKEN_JCC counts the number of taken conditional branch (JCC) instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted JCC branch instructions retired"
+ "UMask": "0xfe"
},
{
- "PEBS": "1",
- "PublicDescription": "TAKEN_JCC counts the number of mispredicted taken conditional branch (JCC) instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "BriefDescription": "Counts the number of mispredicted branch instructions retired",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xfe",
- "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted taken JCC branch instructions retired"
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "ALL_BRANCHES counts the number of any mispredicted branch instructions retired. This umask is an architecturally defined event. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "200003"
},
{
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
"PEBS": "1",
"PublicDescription": "IND_CALL counts the number of mispredicted near indirect CALL branch instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted JCC branch instructions retired",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xfb",
- "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "PEBS": "1",
+ "PublicDescription": "JCC counts the number of mispredicted conditional branches (JCC) instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired"
+ "UMask": "0x7e"
},
{
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
"PEBS": "1",
- "PublicDescription": "RETURN counts the number of mispredicted near RET branch instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "PublicDescription": "NON_RETURN_IND counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xf7",
"EventName": "BR_MISP_RETIRED.RETURN",
+ "PEBS": "1",
+ "PublicDescription": "RETURN counts the number of mispredicted near RET branch instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired"
+ "UMask": "0xf7"
},
{
- "PEBS": "1",
- "PublicDescription": "NON_RETURN_IND counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "BriefDescription": "Counts the number of mispredicted taken JCC branch instructions retired",
"EventCode": "0xC5",
- "Counter": "0,1",
- "UMask": "0xeb",
- "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "PublicDescription": "TAKEN_JCC counts the number of mispredicted taken conditional branch (JCC) instructions retired. This event counts the number of retired branch instructions that were mispredicted by the processor, categorized by type. A branch misprediction occurs when the processor predicts that the branch would be taken, but it is not, or vice-versa. When the misprediction is discovered, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired"
+ "UMask": "0xfe"
},
{
- "PublicDescription": "This event counts the number of micro-ops retired that were supplied from MSROM.",
- "EventCode": "0xC2",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "UOPS_RETIRED.MS",
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. In systems with a constant core frequency, this event can give you a measurement of the elapsed time while the core was not in halt state by dividing the event count by the core frequency. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
"SampleAfterValue": "2000003",
- "BriefDescription": "MSROM micro-ops retired"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts the number of micro-ops retired. The processor decodes complex macro instructions into a sequence of simpler micro-ops. Most instructions are composed of one or two micro-ops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. In some cases micro-op sequences are fused or whole instructions are fused into one micro-op. See other UOPS_RETIRED events for differentiating retired fused and non-fused micro-ops.",
- "EventCode": "0xC2",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "UOPS_RETIRED.ALL",
+ "BriefDescription": "Core cycles when core is not halted",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PublicDescription": "This event counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Reference cycles when core is not halted",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "PublicDescription": "This event counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Micro-ops retired"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of times that a program writes to a code section. Self-modifying code causes a severe penalty in all Intel? architecture processors.",
- "EventCode": "0xC3",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "200003",
- "BriefDescription": "Self-Modifying Code detected"
+ "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "PublicDescription": "This event counts the number of times that pipeline stalled due to FP operations needing assists.",
- "EventCode": "0xC3",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.FP_ASSIST",
- "SampleAfterValue": "200003",
- "BriefDescription": "Stalls due to FP assists"
+ "BriefDescription": "Cycles the divider is busy. Does not imply a stall waiting for the divider.",
+ "EventCode": "0xCD",
+ "EventName": "CYCLES_DIV_BUSY.ALL",
+ "PublicDescription": "Cycles the divider is busy.This event counts the cycles when the divide unit is unable to accept a new divide UOP because it is busy processing a previously dispatched UOP. The cycles will be counted irrespective of whether or not another divide UOP is waiting to enter the divide unit (from the RS). This event might count cycles while a divide is in progress even if the RS is empty. The divide instruction is one of the longest latency instructions in the machine. Hence, it has a special event associated with it to help determine if divides are delaying the retirement of instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Machine clears happen when something happens in the machine that causes the hardware to need to take special care to get the right answer. When such a condition is signaled on an instruction, the front end of the machine is notified that it must restart, so no more instructions will be decoded from the current path. All instructions \"older\" than this one will be allowed to finish. This instruction and all \"younger\" instructions must be cleared, since they must not be allowed to complete. Essentially, the hardware waits until the problematic instruction is the oldest instruction in the machine. This means all older instructions are retired, and all pending stores (from older instructions) are completed. Then the new path of instructions from the front end are allowed to start into the machine. There are many conditions that might cause a machine clear (including the receipt of an interrupt, or a trap or a fault). All those conditions (including but not limited to MACHINE_CLEARS.MEMORY_ORDERING, MACHINE_CLEARS.SMC, and MACHINE_CLEARS.FP_ASSIST) are captured in the ANY event. In addition, some conditions can be specifically counted (i.e. SMC, MEMORY_ORDERING, FP_ASSIST). However, the sum of SMC, MEMORY_ORDERING, and FP_ASSIST machine clears will not necessarily equal the number of ANY.",
+ "BriefDescription": "Fixed Counter: Counts the number of instructions retired",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps. Background: Modern microprocessors employ extensive pipelining and speculative techniques. Since sometimes an instruction is started but never completed, the notion of \"retirement\" is introduced. A retired instruction is one that commits its states. Or stated differently, an instruction might be abandoned at some point. No instruction is truly finished until it retires. This counter measures the number of completed instructions. The fixed event is INST_RETIRED.ANY and the programmable event is INST_RETIRED.ANY_P.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instructions retired",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "This event counts the number of instructions that retire execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts all machine clears",
"EventCode": "0xC3",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "MACHINE_CLEARS.ALL",
+ "PublicDescription": "Machine clears happen when something happens in the machine that causes the hardware to need to take special care to get the right answer. When such a condition is signaled on an instruction, the front end of the machine is notified that it must restart, so no more instructions will be decoded from the current path. All instructions \"older\" than this one will be allowed to finish. This instruction and all \"younger\" instructions must be cleared, since they must not be allowed to complete. Essentially, the hardware waits until the problematic instruction is the oldest instruction in the machine. This means all older instructions are retired, and all pending stores (from older instructions) are completed. Then the new path of instructions from the front end are allowed to start into the machine. There are many conditions that might cause a machine clear (including the receipt of an interrupt, or a trap or a fault). All those conditions (including but not limited to MACHINE_CLEARS.MEMORY_ORDERING, MACHINE_CLEARS.SMC, and MACHINE_CLEARS.FP_ASSIST) are captured in the ANY event. In addition, some conditions can be specifically counted (i.e. SMC, MEMORY_ORDERING, FP_ASSIST). However, the sum of SMC, MEMORY_ORDERING, and FP_ASSIST machine clears will not necessarily equal the number of ANY.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts all machine clears"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts the number of cycles when no uops are allocated and the ROB is full (less than 2 entries available).",
- "EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "NO_ALLOC_CYCLES.ROB_FULL",
+ "BriefDescription": "Self-Modifying Code detected",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "This event counts the number of times that a program writes to a code section. Self-modifying code causes a severe penalty in all Intel? architecture processors.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles when no uops are allocated and the ROB is full (less than 2 entries available)"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted.",
+ "BriefDescription": "Counts the number of cycles when no uops are allocated for any reason.",
"EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x4",
- "EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
+ "EventName": "NO_ALLOC_CYCLES.ALL",
+ "PublicDescription": "The NO_ALLOC_CYCLES.ALL event counts the number of cycles when the front-end does not provide any instructions to be allocated for any reason. This event indicates the cycles where an allocation stalls occurs, and no UOPS are allocated in that cycle.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted"
+ "UMask": "0x3f"
},
{
+ "BriefDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted",
"EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x20",
- "EventName": "NO_ALLOC_CYCLES.RAT_STALL",
+ "EventName": "NO_ALLOC_CYCLES.MISPREDICTS",
+ "PublicDescription": "Counts the number of cycles when no uops are allocated and the alloc pipe is stalled waiting for a mispredicted jump to retire. After the misprediction is detected, the front end will start immediately but the allocate pipe stalls until the mispredicted.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles when no uops are allocated and a RATstall is asserted."
+ "UMask": "0x4"
},
{
- "PublicDescription": "The NO_ALLOC_CYCLES.NOT_DELIVERED event is used to measure front-end inefficiencies, i.e. when front-end of the machine is not delivering micro-ops to the back-end and the back-end is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into micro-ops (uops) in machine understandable format and putting them into a micro-op queue to be consumed by back end. The back-end then takes these micro-ops, allocates the required resources. When all resources are ready, micro-ops are executed. If the back-end is not ready to accept micro-ops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more UOPS. This event counts the cycles only when back-end is requesting more uops and front-end is not able to provide them. Some examples of conditions that cause front-end efficiencies are: Icache misses, ITLB misses, and decoder restrictions that limit the the front-end bandwidth.",
+ "BriefDescription": "Counts the number of cycles when no uops are allocated, the IQ is empty, and no other condition is blocking allocation.",
"EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x50",
"EventName": "NO_ALLOC_CYCLES.NOT_DELIVERED",
+ "PublicDescription": "The NO_ALLOC_CYCLES.NOT_DELIVERED event is used to measure front-end inefficiencies, i.e. when front-end of the machine is not delivering micro-ops to the back-end and the back-end is not stalled. This event can be used to identify if the machine is truly front-end bound. When this event occurs, it is an indication that the front-end of the machine is operating at less than its theoretical peak performance. Background: We can think of the processor pipeline as being divided into 2 broader parts: Front-end and Back-end. Front-end is responsible for fetching the instruction, decoding into micro-ops (uops) in machine understandable format and putting them into a micro-op queue to be consumed by back end. The back-end then takes these micro-ops, allocates the required resources. When all resources are ready, micro-ops are executed. If the back-end is not ready to accept micro-ops from the front-end, then we do not want to count these as front-end bottlenecks. However, whenever we have bottlenecks in the back-end, we will have allocation unit stalls and eventually forcing the front-end to wait until the back-end is ready to receive more UOPS. This event counts the cycles only when back-end is requesting more uops and front-end is not able to provide them. Some examples of conditions that cause front-end efficiencies are: Icache misses, ITLB misses, and decoder restrictions that limit the front-end bandwidth.",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles when no uops are allocated, the IQ is empty, and no other condition is blocking allocation."
+ "UMask": "0x50"
},
{
- "PublicDescription": "The NO_ALLOC_CYCLES.ALL event counts the number of cycles when the front-end does not provide any instructions to be allocated for any reason. This event indicates the cycles where an allocation stalls occurs, and no UOPS are allocated in that cycle.",
+ "BriefDescription": "Counts the number of cycles when no uops are allocated and a RATstall is asserted.",
"EventCode": "0xCA",
- "Counter": "0,1",
- "UMask": "0x3f",
- "EventName": "NO_ALLOC_CYCLES.ALL",
+ "EventName": "NO_ALLOC_CYCLES.RAT_STALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles when no uops are allocated for any reason."
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts the number of cycles and allocation pipeline is stalled and is waiting for a free MEC reservation station entry. The cycles should be appropriately counted in case of the cracked ops e.g. In case of a cracked load-op, the load portion is sent to M.",
- "EventCode": "0xCB",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "RS_FULL_STALL.MEC",
+ "BriefDescription": "Counts the number of cycles when no uops are allocated and the ROB is full (less than 2 entries available)",
+ "EventCode": "0xCA",
+ "EventName": "NO_ALLOC_CYCLES.ROB_FULL",
+ "PublicDescription": "Counts the number of cycles when no uops are allocated and the ROB is full (less than 2 entries available).",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles and allocation pipeline is stalled and is waiting for a free MEC reservation station entry. The cycles should be appropriately counted in case of the cracked ops e.g. In case of a cracked load-op, the load portion is sent to M"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts the number of cycles the Alloc pipeline is stalled when any one of the RSs (IEC, FPC and MEC) is full. This event is a superset of all the individual RS stall event counts.",
"EventCode": "0xCB",
- "Counter": "0,1",
- "UMask": "0x1f",
"EventName": "RS_FULL_STALL.ALL",
"SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of cycles the Alloc pipeline is stalled when any one of the RSs (IEC, FPC and MEC) is full. This event is a superset of all the individual RS stall event counts."
- },
- {
- "PublicDescription": "This event counts the number of instructions that retire execution. For instructions that consist of multiple micro-ops, this event counts the retirement of the last micro-op of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers.",
- "EventCode": "0xC0",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired"
- },
- {
- "PublicDescription": "Cycles the divider is busy.This event counts the cycles when the divide unit is unable to accept a new divide UOP because it is busy processing a previously dispatched UOP. The cycles will be counted irrespective of whether or not another divide UOP is waiting to enter the divide unit (from the RS). This event might count cycles while a divide is in progress even if the RS is empty. The divide instruction is one of the longest latency instructions in the machine. Hence, it has a special event associated with it to help determine if divides are delaying the retirement of instructions.",
- "EventCode": "0xCD",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "CYCLES_DIV_BUSY.ALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles the divider is busy. Does not imply a stall waiting for the divider."
- },
- {
- "PublicDescription": "This event counts the number of instructions that retire. For instructions that consist of multiple micro-ops, this event counts exactly once, as the last micro-op of the instruction retires. The event continues counting while instructions retire, including during interrupt service routines caused by hardware interrupts, faults or traps. Background: Modern microprocessors employ extensive pipelining and speculative techniques. Since sometimes an instruction is started but never completed, the notion of \"retirement\" is introduced. A retired instruction is one that commits its states. Or stated differently, an instruction might be abandoned at some point. No instruction is truly finished until it retires. This counter measures the number of completed instructions. The fixed event is INST_RETIRED.ANY and the programmable event is INST_RETIRED.ANY_P.",
- "Counter": "Fixed counter 1",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Fixed Counter: Counts the number of instructions retired"
- },
- {
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. In systems with a constant core frequency, this event can give you a measurement of the elapsed time while the core was not in halt state by dividing the event count by the core frequency. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
- "Counter": "Fixed counter 2",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Fixed Counter: Counts the number of unhalted core clock cycles"
+ "UMask": "0x1f"
},
{
- "PublicDescription": "Counts the number of reference cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. Divide this event count by core frequency to determine the elapsed time while the core was not in halt state. This event is architecturally defined and is a designated fixed counter. CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.CORE_P use the core frequency which may change from time to time. CPU_CLK_UNHALTE.REF_TSC and CPU_CLK_UNHALTED.REF are not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time. The fixed events are CPU_CLK_UNHALTED.CORE and CPU_CLK_UNHALTED.REF_TSC and the programmable events are CPU_CLK_UNHALTED.CORE_P and CPU_CLK_UNHALTED.REF.",
- "Counter": "Fixed counter 3",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Fixed Counter: Counts the number of unhalted reference clock cycles"
+ "BriefDescription": "Counts the number of cycles and allocation pipeline is stalled and is waiting for a free MEC reservation station entry. The cycles should be appropriately counted in case of the cracked ops e.g. In case of a cracked load-op, the load portion is sent to M",
+ "EventCode": "0xCB",
+ "EventName": "RS_FULL_STALL.MEC",
+ "PublicDescription": "Counts the number of cycles and allocation pipeline is stalled and is waiting for a free MEC reservation station entry. The cycles should be appropriately counted in case of the cracked ops e.g. In case of a cracked load-op, the load portion is sent to M.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time.",
- "EventCode": "0x3C",
- "Counter": "0,1",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "BriefDescription": "Micro-ops retired",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PublicDescription": "This event counts the number of micro-ops retired. The processor decodes complex macro instructions into a sequence of simpler micro-ops. Most instructions are composed of one or two micro-ops. Some instructions are decoded into longer sequences such as repeat instructions, floating point transcendental instructions, and assists. In some cases micro-op sequences are fused or whole instructions are fused into one micro-op. See other UOPS_RETIRED events for differentiating retired fused and non-fused micro-ops.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when core is not halted"
+ "UMask": "0x10"
},
{
- "PublicDescription": "This event counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. In mobile systems the core frequency may change from time. This event is not affected by core frequency changes but counts as if the core is running at the maximum frequency all the time.",
- "EventCode": "0x3C",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF",
+ "BriefDescription": "MSROM micro-ops retired",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.MS",
+ "PublicDescription": "This event counts the number of micro-ops retired that were supplied from MSROM.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when core is not halted"
- },
- {
- "PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.ANY event counts the number of baclears for any type of branch.",
- "EventCode": "0xE6",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "BACLEARS.ALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of baclears"
- },
- {
- "PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.RETURN event counts the number of RETURN baclears.",
- "EventCode": "0xE6",
- "Counter": "0,1",
- "UMask": "0x8",
- "EventName": "BACLEARS.RETURN",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of RETURN baclears"
- },
- {
- "PublicDescription": "The BACLEARS event counts the number of times the front end is resteered, mainly when the Branch Prediction Unit cannot provide a correct prediction and this is corrected by the Branch Address Calculator at the front end. The BACLEARS.COND event counts the number of JCC (Jump on Condtional Code) baclears.",
- "EventCode": "0xE6",
- "Counter": "0,1",
- "UMask": "0x10",
- "EventName": "BACLEARS.COND",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of JCC baclears"
- },
- {
- "PEBS": "2",
- "PublicDescription": "ALL_TAKEN_BRANCHES counts the number of all taken branch instructions retired. Branch prediction predicts the branch target and enables the processor to begin executing instructions long before the branch true execution path is known. All branches utilize the branch prediction unit (BPU) for prediction. This unit predicts the target address not only based on the EIP of the branch but also based on the execution path through which execution reached this EIP. The BPU can efficiently predict the following branch types: conditional branches, direct calls and jumps, indirect calls and jumps, returns.",
- "EventCode": "0xC4",
- "Counter": "0,1",
- "UMask": "0x80",
- "PEBScounters": "0,1",
- "EventName": "BR_INST_RETIRED.ALL_TAKEN_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of taken branch instructions retired"
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json b/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
index ad31479f8f60..1be3fa5c4ad3 100644
--- a/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/silvermont/virtual-memory.json
@@ -1,69 +1,62 @@
[
{
- "PEBS": "1",
- "PublicDescription": "This event counts the number of load ops retired that had DTLB miss.",
+ "BriefDescription": "Loads missed DTLB",
"EventCode": "0x04",
- "Counter": "0,1",
- "UMask": "0x8",
"EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "This event counts the number of load ops retired that had DTLB miss.",
"SampleAfterValue": "200003",
- "BriefDescription": "Loads missed DTLB"
+ "UMask": "0x8"
},
{
- "PublicDescription": "This event counts when a data (D) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks.",
+ "BriefDescription": "Total cycles for all the page walks. (I-side and D-side)",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x1",
- "EventName": "PAGE_WALKS.D_SIDE_WALKS",
- "SampleAfterValue": "100003",
- "BriefDescription": "D-side page-walks",
- "EdgeDetect": "1"
+ "EventName": "PAGE_WALKS.CYCLES",
+ "PublicDescription": "This event counts every cycle when a data (D) page walk or instruction (I) page walk is in progress. Since a pagewalk implies a TLB miss, the approximate cost of a TLB miss can be determined from this event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3"
},
{
- "PublicDescription": "This event counts every cycle when a D-side (walks due to a load) page walk is in progress. Page walk duration divided by number of page walks is the average duration of page-walks.",
+ "BriefDescription": "Duration of D-side page-walks in core cycles",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x1",
"EventName": "PAGE_WALKS.D_SIDE_CYCLES",
+ "PublicDescription": "This event counts every cycle when a D-side (walks due to a load) page walk is in progress. Page walk duration divided by number of page walks is the average duration of page-walks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Duration of D-side page-walks in core cycles"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts when an instruction (I) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks.",
+ "BriefDescription": "D-side page-walks",
+ "EdgeDetect": "1",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x2",
- "EventName": "PAGE_WALKS.I_SIDE_WALKS",
+ "EventName": "PAGE_WALKS.D_SIDE_WALKS",
+ "PublicDescription": "This event counts when a data (D) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks.",
"SampleAfterValue": "100003",
- "BriefDescription": "I-side page-walks",
- "EdgeDetect": "1"
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event counts every cycle when a I-side (walks due to an instruction fetch) page walk is in progress. Page walk duration divided by number of page walks is the average duration of page-walks.",
+ "BriefDescription": "Duration of I-side page-walks in core cycles",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x2",
"EventName": "PAGE_WALKS.I_SIDE_CYCLES",
+ "PublicDescription": "This event counts every cycle when a I-side (walks due to an instruction fetch) page walk is in progress. Page walk duration divided by number of page walks is the average duration of page-walks.",
"SampleAfterValue": "200003",
- "BriefDescription": "Duration of I-side page-walks in core cycles"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts when a data (D) page walk or an instruction (I) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks.",
+ "BriefDescription": "I-side page-walks",
+ "EdgeDetect": "1",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.WALKS",
+ "EventName": "PAGE_WALKS.I_SIDE_WALKS",
+ "PublicDescription": "This event counts when an instruction (I) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks.",
"SampleAfterValue": "100003",
- "BriefDescription": "Total page walks that are completed (I-side and D-side)",
- "EdgeDetect": "1"
+ "UMask": "0x2"
},
{
- "PublicDescription": "This event counts every cycle when a data (D) page walk or instruction (I) page walk is in progress. Since a pagewalk implies a TLB miss, the approximate cost of a TLB miss can be determined from this event.",
+ "BriefDescription": "Total page walks that are completed (I-side and D-side)",
+ "EdgeDetect": "1",
"EventCode": "0x05",
- "Counter": "0,1",
- "UMask": "0x3",
- "EventName": "PAGE_WALKS.CYCLES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Total cycles for all the page walks. (I-side and D-side)"
+ "EventName": "PAGE_WALKS.WALKS",
+ "PublicDescription": "This event counts when a data (D) page walk or an instruction (I) page walk is completed or started. Since a page walk implies a TLB miss, the number of TLB misses can be counted by counting the number of pagewalks.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/cache.json b/tools/perf/pmu-events/arch/x86/skylake/cache.json
index 720458139049..ce592d871949 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/cache.json
@@ -1,2928 +1,2148 @@
[
{
- "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read miss L2, no rejects",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x22",
- "EventName": "L2_RQSTS.RFO_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts L2 cache misses when fetching instructions.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L1D miss outstandings duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Demand requests that miss L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x27",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Demand requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x38",
- "EventName": "L2_RQSTS.PF_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "All requests that miss L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "L2_RQSTS.MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "All requests that miss L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
},
{
- "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc1",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc2",
- "EventName": "L2_RQSTS.RFO_HIT",
+ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xc4",
- "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
- "EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xd8",
- "EventName": "L2_RQSTS.PF_HIT",
+ "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "Deprecated": "1",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.USELESS_PREF",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe1",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand Data Read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
- "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "BriefDescription": "Demand Data Read requests",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe2",
- "EventName": "L2_RQSTS.ALL_RFO",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
"SampleAfterValue": "200003",
- "BriefDescription": "RFO requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe1"
},
{
- "PublicDescription": "Counts the total number of L2 code requests.",
+ "BriefDescription": "Demand requests that miss L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe4",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "L2 code requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x27"
},
{
- "PublicDescription": "Demand requests to L2 cache.",
+ "BriefDescription": "Demand requests to L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xe7",
"EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Demand requests to L2 cache.",
"SampleAfterValue": "200003",
- "BriefDescription": "Demand requests to L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe7"
},
{
- "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xf8",
"EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
"SampleAfterValue": "200003",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf8"
},
{
- "PublicDescription": "All L2 requests.",
+ "BriefDescription": "RFO requests to L2 cache",
"EventCode": "0x24",
- "Counter": "0,1,2,3",
- "UMask": "0xff",
- "EventName": "L2_RQSTS.REFERENCES",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
"SampleAfterValue": "200003",
- "BriefDescription": "All L2 requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe2"
},
{
- "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "Errata": "SKL057",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests missed L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
},
{
- "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
- "EventCode": "0x2E",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "Errata": "SKL057",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
},
{
- "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D miss outstandings duration in cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
},
{
- "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
},
{
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "All requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
},
{
- "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
- "EventCode": "0x48",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xd8"
},
{
- "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "EventCode": "0x51",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L1D.REPLACEMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "L1D data line replacements",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x38"
},
{
- "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All L2 requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "All L2 requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
},
{
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
},
{
- "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core-originated cacheable demand requests missed L3",
+ "Errata": "SKL057",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
},
{
- "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core-originated cacheable demand requests that refer to L3",
+ "Errata": "SKL057",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4f"
},
{
- "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x82"
},
{
- "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x83"
},
{
- "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand and prefetch data reads",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
},
{
- "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
- "BriefDescription": "Any memory transaction that reached the SQ.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
- "EventCode": "0xB2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "EventCode": "0xB7, 0xBB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE",
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x42"
},
{
- "PEBS": "1",
- "PublicDescription": "Retired load instructions that miss the STLB.",
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
"EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x11",
"EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that miss the STLB. (Precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x11"
},
{
- "PEBS": "1",
- "PublicDescription": "Retired store instructions that miss the STLB.",
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
"EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x12",
"EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that miss the STLB. (Precise Event)",
- "CounterHTOff": "0,1,2,3",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x21",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions with locked access. (Precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary. (Precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x42",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary. (Precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
+ "Data_LA": "1",
+ "EventCode": "0xD4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
"PEBS": "1",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired load instructions. (Precise Event)",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "PublicDescription": "All retired store instructions.",
- "EventCode": "0xD0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "All retired store instructions. (Precise Event)",
- "CounterHTOff": "0,1,2,3",
+ "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
"Data_LA": "1",
- "L1_Hit_Indication": "1"
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
- },
- {
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
"PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions missed L2 cache as data sources.",
"SampleAfterValue": "50021",
- "BriefDescription": "Retired load instructions missed L2 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
"PEBS": "1",
- "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
"EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
- "EventCode": "0xD1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PEBS": "1",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "BriefDescription": "Cacheable and non-cacheable code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "SampleAfterValue": "20011",
- "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
- "EventCode": "0xD2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
"SampleAfterValue": "100003",
- "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "UMask": "0x4"
},
{
- "PEBS": "1",
- "EventCode": "0xD4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
- "CounterHTOff": "0,1,2,3",
- "Data_LA": "1"
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts L2 writebacks that access L2 cache.",
- "EventCode": "0xF0",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "L2_TRANS.L2_WB",
- "SampleAfterValue": "200003",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "EventCode": "0xF1",
- "Counter": "0,1,2,3",
- "UMask": "0x1f",
- "EventName": "L2_LINES_IN.ALL",
- "SampleAfterValue": "100003",
- "BriefDescription": "L2 cache lines filling L2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "L2_LINES_OUT.SILENT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "L2_LINES_OUT.NON_SILENT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.USELESS_PREF",
- "SampleAfterValue": "200003",
- "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF2",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "L2_LINES_OUT.USELESS_HWPF",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
- "EventCode": "0xF4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of cache line split locks sent to uncore.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC01C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4001C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2001C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x801C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC01C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x401C0004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0080004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0108000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000108000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40080004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400108000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0040004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200108000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100108000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080108000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040108000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0088000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000088000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40040004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400088000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0100004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200088000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100088000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080088000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040088000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40100004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests have any response type.",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000018000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC01C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4001C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2001C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x801C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC01C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x401C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0080001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0100004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40080001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0040001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0080004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40040001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0100001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40100001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC01C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4001C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2001C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x801C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC01C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x401C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0080002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000080002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400080002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200080002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80080002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0100002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100080002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40080002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0040002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000040002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400040002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200040002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80040002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0080002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100040002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40040002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0100002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000100002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400100002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200100002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80100002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100100002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40100002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200400002",
"SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100400002",
"SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0020002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0020002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000020002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400020002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200020002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80020002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100020002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40020002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "BriefDescription": "Counts any other requests have any response type.",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC01C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4001C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2001C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x801C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x401C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC01C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0088000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x10001C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000088000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x04001C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400088000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x02001C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200088000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x01001C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80088000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00801C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100088000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00401C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40088000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0100001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0048000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000100001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000048000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400100001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400048000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200100001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200048000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100100001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80048000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080100001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100048000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040100001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40048000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0080001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0108000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000080001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000108000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400080001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400108000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200080001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200108000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100080001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80108000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080080001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100108000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040080001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40108000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC0020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC0028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1000020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0400020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0200020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0100020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x80028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0080020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0040020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x40028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads have any response type.",
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0000010001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "BriefDescription": "Number of cache line split locks sent to uncore.",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads have any response type.",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
index 213dd6230cf2..4d494a5cabbf 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/floating-point.json
@@ -1,67 +1,74 @@
[
{
+ "BriefDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instruction retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Counts once for most SIMD scalar computational floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Counts once for most SIMD scalar computational single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
+ "BriefDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
+ "EventCode": "0xC7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "EventCode": "0xC7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "SampleAfterValue": "2000003",
+ "UMask": "0xfc"
+ },
+ {
+ "BriefDescription": "Cycles with any input/output SSE or FP assist",
+ "CounterMask": "1",
"EventCode": "0xCA",
- "Counter": "0,1,2,3",
- "UMask": "0x1e",
"EventName": "FP_ASSIST.ANY",
+ "PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles with any input/output SSE or FP assist",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1e"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/frontend.json b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
index 7fa95a35e3ca..04f08e4d2402 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/frontend.json
@@ -1,482 +1,421 @@
[
{
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "IDQ.MITE_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "IDQ.DSB_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "IDQ.MS_DSB_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x18",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "IDQ.MS_MITE_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x24",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EdgeDetect": "1",
- "EventName": "IDQ.MS_SWITCHES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
- "EventCode": "0x79",
- "Counter": "0,1,2,3",
- "UMask": "0x30",
- "EventName": "IDQ.MS_UOPS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE_16B.IFDATA_STALL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x83",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x83",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x83",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
- "EventCode": "0x9C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x9C",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses.\nNote: Invoking MITE requires two or three cycles delay.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
"EventCode": "0xAB",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
"PEBS": "1",
"PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
"EventCode": "0xC6",
- "MSRValue": "0x11",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
"PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
"EventCode": "0xC6",
- "MSRValue": "0x12",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
"EventCode": "0xC6",
- "MSRValue": "0x13",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "FRONTEND_RETIRED.L2_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
- "EventCode": "0xC6",
- "MSRValue": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x400106",
+ "PEBS": "2",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x15",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x408006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x400206",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x401006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x200206",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x400206",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x400406",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x410006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x400806",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x401006",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x200206",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x402006",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x300206",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x404006",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x402006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x408006",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x400406",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x410006",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x420006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x420006",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x404006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "MSRValue": "0x100206",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x400806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "1",
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
"EventCode": "0xC6",
- "MSRValue": "0x300206",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
"SampleAfterValue": "100007",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "4",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/memory.json b/tools/perf/pmu-events/arch/x86/skylake/memory.json
index f197b4c7695b..588ad6059a13 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/memory.json
@@ -1,1604 +1,1165 @@
[
{
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "TX_MEM.ABORT_CAPACITY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "EventCode": "0x54",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "TX_EXEC.MISC1",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "TX_EXEC.MISC2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "TX_EXEC.MISC3",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "RTM region detected inside HLE.",
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "TX_EXEC.MISC4",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
- "EventCode": "0x5d",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "TX_EXEC.MISC5",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
+ "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
+ "CounterMask": "2",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
"EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
"EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x6",
"EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
- "CounterMask": "6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
- "EventCode": "0xB0",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand Data Read requests who miss L3 cache",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
- "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "SKL089",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "HLE_RETIRED.START",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times HLE abort was triggered.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution started.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Number of times HLE commit succeeded.",
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
"EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "HLE_RETIRED.COMMIT",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution successfully committed",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PEBS": "1",
- "PublicDescription": "Number of times HLE abort was triggered. (PEBS)",
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
"EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "HLE_RETIRED.ABORTED",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
"EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
"EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
"EventName": "HLE_RETIRED.ABORTED_TIMER",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
+ "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "BriefDescription": "Number of times an HLE execution successfully committed",
"EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an HLE execution started.",
"EventCode": "0xC8",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "HLE_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RTM_RETIRED.START",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution started.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of times RTM commit succeeded.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "RTM_RETIRED.COMMIT",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution successfully committed",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Errata": "SKL089",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "Number of times RTM abort was triggered. (PEBS)",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "RTM_RETIRED.ABORTED",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RTM_RETIRED.ABORTED_MEM",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC9",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "RTM_RETIRED.ABORTED_TIMER",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
- "EventCode": "0xC9",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "RTM_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xCD",
- "MSRValue": "0x4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xCD",
- "MSRValue": "0x8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
"SampleAfterValue": "50021",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xCD",
- "MSRValue": "0x10",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "20011",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xCD",
- "MSRValue": "0x20",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xCD",
- "MSRValue": "0x40",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "2003",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xCD",
- "MSRValue": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "1009",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xCD",
- "MSRValue": "0x100",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "503",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20001C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
- "EventCode": "0xCD",
- "MSRValue": "0x200",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
- "MSRIndex": "0x3F6",
- "SampleAfterValue": "101",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
- "TakenAlone": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Counts all demand code reads",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000080004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFC408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000040004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203C408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000100004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103C408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFC400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007C408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x203C400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC4008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7C400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC4000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044008000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000408000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001C8000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x44000004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000108000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000400004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts all demand code reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000088000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020004",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000048000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20001C0001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts any other requests",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000028000",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000080001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts any other requests",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFC400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000040001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203C400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000100001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103C400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFC400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007C400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x203C400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC4000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7C400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC4000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001C0004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x44000001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000400001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts demand data reads",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020001",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20001C0002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020004",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000080002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFC400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000040002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203C400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000100002",
"SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFC400002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103C400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x203C400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007C400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC4000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7C400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC4000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004000002",
"SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
"BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x44000002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000400002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts all demand data writes (RFOs)",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001C0002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000020002",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x20001C8000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_E.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000088000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_M.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000048000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_HIT_S.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000108000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FFC400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FFC408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x203C400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103C408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x103C400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x43C408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x043C400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x23C408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x023C400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0xBC408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x013C400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x203C408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x00BC400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x13C408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x007C400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x7C408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x3FC4000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FC4008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2004000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x1004000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x404008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0404000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_HIT_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x204008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0204000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x84008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0104000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2004008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0084000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_NONE",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x104008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x0044000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SPL_HIT",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L3_MISS_LOCAL_DRAM.SPL_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x44008000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000400001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.L4_HIT_LOCAL_L4.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000408000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
+ "BriefDescription": "Counts any other requests",
"EventCode": "0xB7, 0xBB",
- "MSRValue": "0x20001C0001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
+ "EventName": "OFFCORE_RESPONSE.OTHER.SUPPLIER_NONE.SNOOP_NON_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000028000",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts demand data reads",
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000100001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_S.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times RTM abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts demand data reads",
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000080001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_E.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "PublicDescription": "Counts demand data reads",
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000040001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT_M.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts demand data reads",
- "EventCode": "0xB7, 0xBB",
- "MSRValue": "0x2000020001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.SUPPLIER_NONE.SNOOP_NON_DRAM",
- "MSRIndex": "0x1a6, 0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads",
- "Offcore": "1",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC5",
+ "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/other.json b/tools/perf/pmu-events/arch/x86/skylake/other.json
index 84a316d380ac..d75d53279b4e 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/other.json
@@ -1,48 +1,17 @@
[
{
- "EventCode": "0x32",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "SW_PREFETCH_ACCESS.NTA",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHNTA instructions executed.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "SW_PREFETCH_ACCESS.T0",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHT0 instructions executed.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "SW_PREFETCH_ACCESS.T1_T2",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of PREFETCHW instructions executed.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
+ "BriefDescription": "Number of hardware interrupts received by the processor.",
"EventCode": "0xCB",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "HW_INTERRUPTS.RECEIVED",
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
"SampleAfterValue": "203",
- "BriefDescription": "Number of hardware interrupts received by the processor.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
+ "EventCode": "0x09",
+ "EventName": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
index 4a891fbbc4bb..2dfc3af08eff 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/pipeline.json
@@ -1,967 +1,824 @@
[
{
- "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
- "Counter": "Fixed counter 0",
- "UMask": "0x1",
- "EventName": "INST_RETIRED.ANY",
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
"SampleAfterValue": "2000003",
- "BriefDescription": "Instructions retired from execution.",
- "CounterHTOff": "Fixed counter 0"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when the thread is not in halt state",
- "CounterHTOff": "Fixed counter 1"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "Counter": "Fixed counter 1",
- "UMask": "0x2",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "Fixed counter 1"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "Counter": "Fixed counter 2",
- "UMask": "0x3",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "CounterHTOff": "Fixed counter 2"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations,c. preceding lock RMW operations are not forwarded,d. store has the no-forward bit set (uncacheable/page-split/masked stores),e. all-blocking stores are used (mostly, fences and port I/O), and others.The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "SampleAfterValue": "100003",
- "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "EventCode": "0x03",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "LD_BLOCKS.NO_SR",
- "SampleAfterValue": "100003",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Far branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "This event counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "EventCode": "0x07",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "SampleAfterValue": "100003",
- "BriefDescription": "False dependencies in MOB due to partial compare on address.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Return instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "AnyThread": "1",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0x0D",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "EventCode": "0x0E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative mispredicted indirect branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
},
{
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x0E",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x14",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ARITH.DIVIDER_ACTIVE",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EdgeDetect": "1",
- "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"AnyThread": "1",
- "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2503",
"BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
+ },
+ {
"AnyThread": "1",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "SampleAfterValue": "2503",
"BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
+ "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "SampleAfterValue": "100007"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Thread cycles when thread is not in halt state",
"EventCode": "0x3C",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2503",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
},
{
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
- "EventCode": "0x4C",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "SampleAfterValue": "100003",
- "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
- "EventCode": "0x59",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
- "EventCode": "0x5E",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
- "EventCode": "0x5E",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "RS_EVENTS.EMPTY_END",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
- "EventCode": "0x87",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ILD_STALL.LCP",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x14"
},
{
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
- "EventCode": "0xA1",
- "Counter": "0,1,2,3",
- "UMask": "0x80",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "PublicDescription": "Counts resource-related stall cycles.",
- "EventCode": "0xa2",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "RESOURCE_STALLS.ANY",
+ "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Resource-related stall cycles",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
- "EventCode": "0xA2",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "RESOURCE_STALLS.SB",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Total execution stalls.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x5",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "CounterMask": "5",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "BriefDescription": "Number of all retired NOP instructions.",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.NOP",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "CounterMask": "8",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0xc",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "CounterMask": "12",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
+ "CounterMask": "10",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "Invert": "1",
+ "PEBS": "2",
+ "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "CounterMask": "16",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "Counter": "0,1,2,3",
- "UMask": "0x14",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "AnyThread": "1",
+ "BriefDescription": "Clears speculative count",
+ "CounterMask": "1",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
"SampleAfterValue": "2000003",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "CounterMask": "20",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xA6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xA6",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xA6",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xA6",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
- "EventCode": "0xA6",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xA6",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "4",
"EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.UOPS",
+ "EventName": "LSD.CYCLES_4_UOPS",
+ "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
"EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
"EventCode": "0xA8",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "LSD.CYCLES_4_UOPS",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.THREAD",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3f"
},
{
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "BriefDescription": "Resource-related stall cycles",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "Counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Number of uops executed from any thread.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE",
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of uops executed on the core.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "CounterMask": "2",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "CounterMask": "3",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "CounterMask": "4",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of x87 uops executed.",
- "EventCode": "0xB1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "UOPS_EXECUTED.X87",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of x87 uops dispatched.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
- "EventCode": "0xC0",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "SKL091, SKL044",
- "EventName": "INST_RETIRED.ANY_P",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PEBS": "2",
- "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
- "EventCode": "0xC0",
- "Counter": "1",
- "UMask": "0x1",
- "Errata": "SKL091, SKL044",
- "EventName": "INST_RETIRED.PREC_DIST",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "CounterHTOff": "1"
+ "UMask": "0x10"
},
{
- "PEBS": "2",
- "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
- "EventCode": "0xC0",
- "Invert": "1",
- "Counter": "0,2,3",
- "UMask": "0x1",
- "Errata": "SKL091, SKL044",
- "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
- "CounterMask": "10",
- "CounterHTOff": "0,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xC1",
- "Counter": "0,1,2,3",
- "UMask": "0x3f",
- "EventName": "OTHER_ASSISTS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
},
{
- "PublicDescription": "Counts the retirement slots used.",
- "EventCode": "0xC2",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Retirement slots used.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "PublicDescription": "This event counts cycles without actually retired uops.",
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Number of uops executed from any thread.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles without actually retired uops.",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
- "EventCode": "0xC2",
- "Invert": "1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "CounterMask": "10",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Number of machine clears (nukes) of any type.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EdgeDetect": "1",
- "EventName": "MACHINE_CLEARS.COUNT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
- "EventCode": "0xC3",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "MACHINE_CLEARS.SMC",
- "SampleAfterValue": "100003",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "SampleAfterValue": "100007",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "All (macro) branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "SampleAfterValue": "100007",
- "BriefDescription": "Return instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Counts all not taken macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Taken branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
- "EventCode": "0xC4",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "Errata": "SKL091",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Counts the number of far branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x0",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "400009",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "1",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "PEBS": "1",
- "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "PEBS": "2",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "SampleAfterValue": "400009",
- "BriefDescription": "Mispredicted macro branch instructions retired.",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PEBS": "1",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "EventCode": "0xC5",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "SampleAfterValue": "400009",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of macro-fused uops retired. (non precise)",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "PublicDescription": "Counts the number of macro-fused uops retired. (non precise)",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "Counts the retirement slots used.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Increments whenever there is an update to the LBR array.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCC",
- "Counter": "0,1,2,3",
- "UMask": "0x40",
- "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
- "EventCode": "0xE6",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "BACLEARS.ANY",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "16",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
index 8704efeb8d31..a6d212b349f5 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
@@ -1,371 +1,1484 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "MetricExpr": "1 - tma_frontend_bound - (UOPS_ISSUED.ANY + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fetch_BW;PGO",
- "MetricName": "IpTB"
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(18.5 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM + 16.5 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "16.5 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35))",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "22 * tma_info_average_frequency * OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
+ "MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fused_instructions",
+ "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "(UOPS_RETIRED.RETIRE_SLOTS + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY) / tma_info_slots",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Branch instructions per taken branch. ",
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
+ "MetricName": "tma_info_big_code",
+ "MetricThreshold": "tma_info_big_code > 20",
+ "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_branching_overhead"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
- "MetricName": "BpTB"
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_mispredictions, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.CONDITIONAL + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_slots)",
+ "MetricGroup": "Ret;tma_issueBC",
+ "MetricName": "tma_info_branching_overhead",
+ "MetricThreshold": "tma_info_branching_overhead > 10",
+ "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_big_code"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_callret"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_code_stlb_mpki"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.NOT_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_nt"
},
{
- "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpL"
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "(BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_tk"
},
{
- "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpS"
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_core_bound_likely",
+ "MetricThreshold": "tma_info_core_bound_likely > 0.5"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;Instruction_Type",
- "MetricName": "IpB"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
- "MetricName": "IpCall"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_ARB_TRK_REQUESTS.ALL + UNC_ARB_COH_TRK_REQUESTS.ALL) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_misses, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_dsb_misses",
+ "MetricThreshold": "tma_info_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / DSB2MITE_SWITCHES.COUNT",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_dsb_switch_cost"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_fb_hpki"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_fetch_upc"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_ic_misses",
+ "MetricThreshold": "tma_info_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@ + 2",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_icache_miss_latency"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_big_code",
+ "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0x3c@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
- "MetricName": "FLOPc"
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_ipdsb_miss_ret < 50"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_ipswpf",
+ "MetricThreshold": "tma_info_ipswpf < 100"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_lcp"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization",
- "MetricConstraint": "NO_NMI_WATCHDOG"
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
- "MetricGroup": "TLB_SMT",
- "MetricName": "Page_Walks_Utilization_SMT"
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_jump"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Access_BW"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L1MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
},
{
- "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI"
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI_All"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
},
{
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2HPKI_All"
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L3MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_load_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.DATA_READ / UNC_ARB_TRK_OCCUPANCY.DATA_READ@thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
+ },
+ {
+ "BriefDescription": "Average number of parallel requests to external memory",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_parallel_requests",
+ "PublicDescription": "Average number of parallel requests to external memory. Accounts for all requests"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "1e9 * (UNC_ARB_TRK_OCCUPANCY.DATA_READ / UNC_ARB_TRK_REQUESTS.DATA_READ) / (tma_info_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
+ },
+ {
+ "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+ "MetricExpr": "UNC_ARB_TRK_OCCUPANCY.ALL / UNC_ARB_TRK_REQUESTS.ALL",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_request_latency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
+ "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_memory_bandwidth",
+ "MetricThreshold": "tma_info_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
+ "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_memory_data_tlbs",
+ "MetricThreshold": "tma_info_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))",
+ "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_memory_latency",
+ "MetricThreshold": "tma_info_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_mispredictions",
+ "MetricThreshold": "tma_info_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "UNC_CLOCK.SOCKET",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_store_stlb_mpki"
},
{
"BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "MetricName": "tma_info_turbo_utilization"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "64 * ( arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@ ) / 1000000 / duration_time / 1000",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "arb@event\\=0x80\\,umask\\=0x2@ / arb@event\\=0x80\\,umask\\=0x2\\,thresh\\=1@",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_Parallel_Reads"
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions. )",
- "MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
- "MetricGroup": "",
- "MetricName": "IpFarBranch"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "6.5 * tma_info_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricExpr": "(12 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (9 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_info_mispredictions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued",
+ "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
+ "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_non_fused_branches",
+ "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Sample with: UOPS_DISPATCHED_PORT.PORT_4. Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_7 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_store_op_utilization_group",
+ "MetricName": "tma_port_7",
+ "MetricThreshold": "tma_port_7 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address). Sample with: UOPS_DISPATCHED_PORT.PORT_7",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_NONE / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_1 - UOPS_EXECUTED.CORE_CYCLES_GE_2) / 2 if #SMT_on else EXE_ACTIVITY.1_PORTS_UTIL) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_2 - UOPS_EXECUTED.CORE_CYCLES_GE_3) / 2 if #SMT_on else EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 9 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "9 * BACLEARS.ANY / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_elision",
+ "ScaleUnit": "1cycles / elision"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json b/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json
new file mode 100644
index 000000000000..b4e061477c1a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/uncore-cache.json
@@ -0,0 +1,124 @@
+[
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
+ "UMask": "0x86",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in I-state.",
+ "UMask": "0x88",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in M-state.",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
+ "UMask": "0x8f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
+ "UMask": "0x16",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in I-state.",
+ "UMask": "0x18",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
+ "UMask": "0x1f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
+ "UMask": "0x26",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in M-state.",
+ "UMask": "0x21",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
+ "EventCode": "0x34",
+ "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
+ "PerPkg": "1",
+ "PublicDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
+ "UMask": "0x2f",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CBOX"
+ },
+ {
+ "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
+ "EventCode": "0x22",
+ "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/skylake/uncore-interconnect.json
new file mode 100644
index 000000000000..fe7e19717371
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/uncore-interconnect.json
@@ -0,0 +1,67 @@
+[
+ {
+ "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all Core entries outstanding for the memory controller. The outstanding interval starts after LLC miss till return of first data chunk. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
+ "CounterMask": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Core Data Read entries outstanding for the memory controller. The outstanding interval starts after LLC miss till return of first data chunk.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "UNC_ARB_TRK_REQUESTS.ALL",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Core coherent Data Read requests sent to memory controller whose data is returned directly to requesting agent.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Core coherent Data Read requests sent to memory controller whose data is returned directly to requesting agent.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore-other.json b/tools/perf/pmu-events/arch/x86/skylake/uncore-other.json
new file mode 100644
index 000000000000..58be90d7cc93
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylake/uncore-other.json
@@ -0,0 +1,10 @@
+[
+ {
+ "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylake/uncore.json b/tools/perf/pmu-events/arch/x86/skylake/uncore.json
deleted file mode 100644
index dbc193252fb3..000000000000
--- a/tools/perf/pmu-events/arch/x86/skylake/uncore.json
+++ /dev/null
@@ -1,254 +0,0 @@
-[
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x41",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x81",
- "EventName": "UNC_CBO_XSNP_RESPONSE.MISS_EVICTION",
- "BriefDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "PublicDescription": "A cross-core snoop resulted from L3 Eviction which misses in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x44",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HIT_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a non-modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x22",
- "UMask": "0x48",
- "EventName": "UNC_CBO_XSNP_RESPONSE.HITM_XCORE",
- "BriefDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "PublicDescription": "A cross-core snoop initiated by this Cbox due to processor core memory request which hits a modified line in some processor core.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x21",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_M",
- "BriefDescription": "L3 Lookup write request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x81",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_M",
- "BriefDescription": "L3 Lookup any request that access cache and found line in M-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in M-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x18",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_I",
- "BriefDescription": "L3 Lookup read request that access cache and found line in I-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x88",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_I",
- "BriefDescription": "L3 Lookup any request that access cache and found line in I-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in I-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x1f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_MESI",
- "BriefDescription": "L3 Lookup read request that access cache and found line in any MESI-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in any MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x2f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_MESI",
- "BriefDescription": "L3 Lookup write request that access cache and found line in MESI-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x8f",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_MESI",
- "BriefDescription": "L3 Lookup any request that access cache and found line in MESI-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in MESI-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x86",
- "EventName": "UNC_CBO_CACHE_LOOKUP.ANY_ES",
- "BriefDescription": "L3 Lookup any request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup any request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x16",
- "EventName": "UNC_CBO_CACHE_LOOKUP.READ_ES",
- "BriefDescription": "L3 Lookup read request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup read request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "CBO",
- "EventCode": "0x34",
- "UMask": "0x26",
- "EventName": "UNC_CBO_CACHE_LOOKUP.WRITE_ES",
- "BriefDescription": "L3 Lookup write request that access cache and found line in E or S-state",
- "PublicDescription": "L3 Lookup write request that access cache and found line in E or S-state.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
- "BriefDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Each cycle count number of all Core outgoing valid entries. Such entry is defined as valid from its allocation till first of IDI0 or DRS0 messages is sent out. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
- "BriefDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "PublicDescription": "Total number of Core outgoing entries allocated. Accounts for Coherent and non-coherent traffic.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x02",
- "EventName": "UNC_ARB_TRK_REQUESTS.DRD_DIRECT",
- "BriefDescription": "Number of Core coherent Data Read entries allocated in DirectData mode",
- "PublicDescription": "Number of Core coherent Data Read entries allocated in DirectData mode.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x81",
- "UMask": "0x20",
- "EventName": "UNC_ARB_TRK_REQUESTS.WRITES",
- "BriefDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "PublicDescription": "Number of Writes allocated - any write transactions: full/partials writes and evictions.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x84",
- "UMask": "0x01",
- "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
- "BriefDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "PublicDescription": "Number of entries allocated. Account for Any type: e.g. Snoop, Core aperture, etc.",
- "Counter": "0,1",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "iMPH-U",
- "EventCode": "0x80",
- "UMask": "0x01",
- "EventName": "UNC_ARB_TRK_OCCUPANCY.CYCLES_WITH_ANY_REQUEST",
- "BriefDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.;",
- "PublicDescription": "Cycles with at least one request outstanding is waiting for data return from memory controller. Account for coherent and non-coherent requests initiated by IA Cores, Processor Graphics Unit, or LLC.",
- "Counter": "0",
- "CounterMask": "1",
- "Invert": "0",
- "EdgeDetect": "0"
- },
- {
- "Unit": "NCU",
- "EventCode": "0x0",
- "UMask": "0x01",
- "EventName": "UNC_CLOCK.SOCKET",
- "BriefDescription": "This 48-bit fixed counter counts the UCLK cycles",
- "PublicDescription": "This 48-bit fixed counter counts the UCLK cycles.",
- "Counter": "FIXED",
- "CounterMask": "0",
- "Invert": "0",
- "EdgeDetect": "0"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
index 2bcba7daca14..f59405877ae8 100644
--- a/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylake/virtual-memory.json
@@ -1,284 +1,228 @@
[
{
- "PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
+ "BriefDescription": "Load misses in all DTLB levels that cause page walks",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
- "BriefDescription": "Load misses in all DTLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
"SampleAfterValue": "2000003",
- "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
},
{
- "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "SampleAfterValue": "100003",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
- "SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"EventCode": "0x08",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
+ "BriefDescription": "Store misses in all DTLB levels that cause page walks",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"EventCode": "0x49",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "SampleAfterValue": "100003",
- "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
- "EventCode": "0x4F",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
+ "EventCode": "0x4f",
"EventName": "EPT.WALK_PENDING",
+ "PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
"SampleAfterValue": "2000003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
- "EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
- "SampleAfterValue": "100003",
- "BriefDescription": "Misses at all ITLB levels that cause page walks",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "BriefDescription": "Misses at all ITLB levels that cause page walks",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
+ "PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0xe",
"EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
- "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "ITLB_MISSES.WALK_PENDING",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
- "CounterMask": "1",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"EventCode": "0x85",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "EventCode": "0xAE",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "ITLB.ITLB_FLUSH",
- "SampleAfterValue": "100007",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
- "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
"EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
- "BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "BriefDescription": "STLB flush attempts",
"EventCode": "0xBD",
- "Counter": "0,1,2,3",
- "UMask": "0x20",
"EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
"SampleAfterValue": "100007",
- "BriefDescription": "STLB flush attempts",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/cache.json b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
index 24df183693fa..d28d8822a51a 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/cache.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/cache.json
@@ -1,1663 +1,1288 @@
[
{
- "EventCode": "0x24",
- "UMask": "0x21",
- "BriefDescription": "Demand Data Read miss L2, no rejects",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
- "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
+ "BriefDescription": "L1D data line replacements",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "L1D miss outstandings duration in cycles",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "AnyThread": "1",
+ "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xF1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0x24",
- "UMask": "0x22",
- "BriefDescription": "RFO requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_MISS",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.SILENT",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0x24",
- "UMask": "0x24",
- "BriefDescription": "L2 cache misses when fetching instructions",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.CODE_RD_MISS",
- "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.USELESS_HWPF",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
+ "Deprecated": "1",
+ "EventCode": "0xF2",
+ "EventName": "L2_LINES_OUT.USELESS_PREF",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "L2 code requests",
"EventCode": "0x24",
- "UMask": "0x27",
- "BriefDescription": "Demand requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
- "PublicDescription": "Demand requests that miss L2 cache.",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe4"
},
{
+ "BriefDescription": "Demand Data Read requests",
"EventCode": "0x24",
- "UMask": "0x38",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.PF_MISS",
- "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe1"
},
{
+ "BriefDescription": "Demand requests that miss L2 cache",
"EventCode": "0x24",
- "UMask": "0x3f",
- "BriefDescription": "All requests that miss L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.MISS",
- "PublicDescription": "All requests that miss L2 cache.",
+ "EventName": "L2_RQSTS.ALL_DEMAND_MISS",
+ "PublicDescription": "Demand requests that miss L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x27"
},
{
+ "BriefDescription": "Demand requests to L2 cache",
"EventCode": "0x24",
- "UMask": "0xc1",
- "BriefDescription": "Demand Data Read requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
- "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
+ "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
+ "PublicDescription": "Demand requests to L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe7"
},
{
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
"EventCode": "0x24",
- "UMask": "0xc2",
- "BriefDescription": "RFO requests that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.RFO_HIT",
- "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "EventName": "L2_RQSTS.ALL_PF",
+ "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xf8"
},
{
+ "BriefDescription": "RFO requests to L2 cache",
"EventCode": "0x24",
- "UMask": "0xc4",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
+ },
+ {
"BriefDescription": "L2 cache hits when fetching instructions, code reads.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.CODE_RD_HIT",
"PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc4"
},
{
+ "BriefDescription": "L2 cache misses when fetching instructions",
"EventCode": "0x24",
- "UMask": "0xd8",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.PF_HIT",
- "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x24"
},
{
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
"EventCode": "0x24",
- "UMask": "0xe1",
- "BriefDescription": "Demand Data Read requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
- "PublicDescription": "Counts the number of demand Data Read requests (including requests from L1D hardware prefetchers). These loads may hit or miss L2 cache. Only non rejected loads are counted.",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests, initiated by load instructions, that hit L2 cache",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc1"
},
{
+ "BriefDescription": "Demand Data Read miss L2, no rejects",
"EventCode": "0x24",
- "UMask": "0xe2",
- "BriefDescription": "RFO requests to L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_RFO",
- "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts the number of demand Data Read requests that miss L2 cache. Only not rejected loads are counted.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x21"
},
{
+ "BriefDescription": "All requests that miss L2 cache",
"EventCode": "0x24",
- "UMask": "0xe4",
- "BriefDescription": "L2 code requests",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_CODE_RD",
- "PublicDescription": "Counts the total number of L2 code requests.",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "All requests that miss L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x3f"
},
{
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache",
"EventCode": "0x24",
- "UMask": "0xe7",
- "BriefDescription": "Demand requests to L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_DEMAND_REFERENCES",
- "PublicDescription": "Demand requests to L2 cache.",
+ "EventName": "L2_RQSTS.PF_HIT",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that hit L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xd8"
},
{
+ "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache",
"EventCode": "0x24",
- "UMask": "0xf8",
- "BriefDescription": "Requests from the L1/L2/L3 hardware prefetchers or Load software prefetches",
- "Counter": "0,1,2,3",
- "EventName": "L2_RQSTS.ALL_PF",
- "PublicDescription": "Counts the total number of requests from the L2 hardware prefetchers.",
+ "EventName": "L2_RQSTS.PF_MISS",
+ "PublicDescription": "Counts requests from the L1/L2/L3 hardware prefetchers or Load software prefetches that miss L2 cache.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x38"
},
{
- "EventCode": "0x24",
- "UMask": "0xff",
"BriefDescription": "All L2 requests",
- "Counter": "0,1,2,3",
+ "EventCode": "0x24",
"EventName": "L2_RQSTS.REFERENCES",
"PublicDescription": "All L2 requests.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xF0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
},
{
- "EventCode": "0x2E",
- "UMask": "0x41",
"BriefDescription": "Core-originated cacheable demand requests missed L3",
- "Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.MISS",
"Errata": "SKL057",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
"PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all misses to the L3.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0x2E",
- "UMask": "0x4f",
"BriefDescription": "Core-originated cacheable demand requests that refer to L3",
- "Counter": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
"Errata": "SKL057",
+ "EventCode": "0x2E",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
"PublicDescription": "Counts core-originated cacheable requests to the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches from L1 and L2. It does not include all accesses to the L3.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "L1D miss outstandings duration in cycles",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING",
- "PublicDescription": "Counts duration of L1D miss outstanding, that is each cycle number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch.Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x1",
- "BriefDescription": "Cycles with L1D load Misses outstanding from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.PENDING_CYCLES_ANY",
- "AnyThread": "1",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x48",
- "UMask": "0x2",
- "BriefDescription": "Number of times a request needed a FB entry but there was no entry available for it. That is the FB unavailability was dominant reason for blocking the request. A request includes cacheable/uncacheable demands that is load, store or SW prefetch.",
- "Counter": "0,1,2,3",
- "EventName": "L1D_PEND_MISS.FB_FULL",
- "PublicDescription": "Number of times a request needed a FB (Fill Buffer) entry but there was no entry available for it. A request includes cacheable/uncacheable demands that are load, store or SW prefetch instructions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x51",
- "UMask": "0x1",
- "BriefDescription": "L1D data line replacements",
- "Counter": "0,1,2,3",
- "EventName": "L1D.REPLACEMENT",
- "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x1",
- "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x2",
- "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
- "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x2",
- "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4f"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
- "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x81"
},
{
- "EventCode": "0x60",
- "UMask": "0x4",
- "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x82"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x83"
},
{
- "EventCode": "0x60",
- "UMask": "0x8",
- "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
- "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
},
{
- "EventCode": "0xB0",
- "UMask": "0x1",
- "BriefDescription": "Demand Data Read requests sent to uncore",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
- "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x41"
},
{
- "EventCode": "0xB0",
- "UMask": "0x2",
- "BriefDescription": "Cacheable and noncachaeble code read requests",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
- "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x42"
},
{
- "EventCode": "0xB0",
- "UMask": "0x4",
- "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
- "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x11"
},
{
- "EventCode": "0xB0",
- "UMask": "0x8",
- "BriefDescription": "Demand and prefetch data reads",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
- "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xD0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x12"
},
{
- "EventCode": "0xB0",
- "UMask": "0x80",
- "BriefDescription": "Any memory transaction that reached the SQ.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
- "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
},
{
- "EventCode": "0xB2",
- "UMask": "0x1",
- "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
- "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
+ "PEBS": "1",
+ "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
},
{
- "EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE",
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x11",
- "BriefDescription": "Retired load instructions that miss the STLB. (Precise Event)",
+ "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
"Data_LA": "1",
+ "EventCode": "0xD2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
- "PublicDescription": "Retired load instructions that miss the STLB.",
+ "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xD0",
- "UMask": "0x12",
- "BriefDescription": "Retired store instructions that miss the STLB. (Precise Event)",
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
"Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
- "PublicDescription": "Retired store instructions that miss the STLB.",
- "SampleAfterValue": "100003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Retired load instructions which data sources missed L3 but serviced from local DRAM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD0",
- "UMask": "0x21",
- "BriefDescription": "Retired load instructions with locked access. (Precise Event)",
+ "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
"Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xD0",
- "UMask": "0x41",
- "BriefDescription": "Retired load instructions that split across a cacheline boundary. (Precise Event)",
+ "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
"Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Retired load instructions whose data sources was forwarded from a remote cache.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "EventCode": "0xD0",
- "UMask": "0x42",
- "BriefDescription": "Retired store instructions that split across a cacheline boundary. (Precise Event)",
+ "BriefDescription": "Retired load instructions whose data sources was remote HITM",
"Data_LA": "1",
+ "EventCode": "0xD3",
+ "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
- "SampleAfterValue": "100003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Retired load instructions whose data sources was remote HITM.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD0",
- "UMask": "0x81",
- "BriefDescription": "All retired load instructions. (Precise Event)",
+ "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
"Data_LA": "1",
+ "EventCode": "0xD4",
+ "EventName": "MEM_LOAD_MISC_RETIRED.UC",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_LOADS",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "SampleAfterValue": "100007",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD0",
- "UMask": "0x82",
- "BriefDescription": "All retired store instructions. (Precise Event)",
+ "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_INST_RETIRED.ALL_STORES",
- "PublicDescription": "All retired store instructions.",
- "SampleAfterValue": "2000003",
- "L1_Hit_Indication": "1",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "EventCode": "0xD1",
- "UMask": "0x1",
"BriefDescription": "Retired load instructions with L1 cache hits as data sources",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
+ "EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
"PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD1",
- "UMask": "0x2",
- "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L2_HIT",
- "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x8"
},
{
- "EventCode": "0xD1",
- "UMask": "0x4",
- "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_HIT",
- "PublicDescription": "Retired load instructions with L3 cache hits as data sources.",
- "SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
- },
- {
"EventCode": "0xD1",
- "UMask": "0x8",
- "BriefDescription": "Retired load instructions missed L1 cache as data sources",
- "Data_LA": "1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L1_MISS",
- "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "PublicDescription": "Retired load instructions with L2 cache hits as data sources.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xD1",
- "UMask": "0x10",
"BriefDescription": "Retired load instructions missed L2 cache as data sources",
"Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
+ "EventCode": "0xD1",
"EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
"PublicDescription": "Retired load instructions missed L2 cache as data sources.",
"SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
},
{
- "EventCode": "0xD1",
- "UMask": "0x20",
- "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.L3_MISS",
- "PublicDescription": "Retired load instructions missed L3 cache as data sources.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD1",
- "UMask": "0x40",
- "BriefDescription": "Retired load instructions which data sources were load missed L1 but hit FB due to preceding miss to the same cache line with data not ready",
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
"Data_LA": "1",
+ "EventCode": "0xD1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
"PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_RETIRED.FB_HIT",
- "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x20"
},
{
- "EventCode": "0xD2",
- "UMask": "0x1",
- "BriefDescription": "Retired load instructions which data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xD2",
- "UMask": "0x2",
- "BriefDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT",
- "PublicDescription": "Retired load instructions which data sources were L3 and cross-core snoop hits in on-pkg core cache.",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
},
{
- "EventCode": "0xD2",
- "UMask": "0x4",
- "BriefDescription": "Retired load instructions which data sources were HitM responses from shared L3",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM",
- "PublicDescription": "Retired load instructions which data sources were HitM responses from shared L3.",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cacheable and non-cacheable code read requests",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_CODE_RD",
+ "PublicDescription": "Counts both cacheable and non-cacheable code read requests.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xD2",
- "UMask": "0x8",
- "BriefDescription": "Retired load instructions which data sources were hits in L3 without snoops required",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
- "PublicDescription": "Retired load instructions which data sources were hits in L3 without snoops required.",
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xD3",
- "UMask": "0x1",
- "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from local dram",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xD3",
- "UMask": "0x2",
- "BriefDescription": "Retired load instructions which data sources missed L3 but serviced from remote dram",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Offcore requests buffer cannot take more entries for this thread core.",
+ "EventCode": "0xB2",
+ "EventName": "OFFCORE_REQUESTS_BUFFER.SQ_FULL",
+ "PublicDescription": "Counts the number of cases when the offcore requests buffer cannot take more entries for the core. This can happen when the superqueue does not contain eligible entries, or when L1D writeback pending FIFO requests is full.Note: Writeback pending FIFO has six entries.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xD3",
- "UMask": "0x4",
- "BriefDescription": "Retired load instructions whose data sources was remote HITM",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xD3",
- "UMask": "0x8",
- "BriefDescription": "Retired load instructions whose data sources was forwarded from a remote cache",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xD4",
- "UMask": "0x4",
- "BriefDescription": "Retired instructions with at least 1 uncacheable load or lock.",
- "Data_LA": "1",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "MEM_LOAD_MISC_RETIRED.UC",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles with offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF0",
- "UMask": "0x40",
- "BriefDescription": "L2 writebacks that access L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_TRANS.L2_WB",
- "PublicDescription": "Counts L2 writebacks that access L2 cache.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF1",
- "UMask": "0x1f",
- "BriefDescription": "L2 cache lines filling L2",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_IN.ALL",
- "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF2",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared state. A non-threaded event.",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.SILENT",
- "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Code Reads transactions in the SuperQueue (SQ), queue to uncore, every cycle.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_CODE_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Code Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xF2",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines can be either in modified state or clean state. Modified lines may either be written back to L3 or directly written to memory and not allocated in L3. Clean lines may either be allocated in L3 or dropped",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.NON_SILENT",
- "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding Demand Data Read transactions in uncore queue.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding Demand Data Read transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor. See the corresponding Umask under OFFCORE_REQUESTS.Note: A prefetch promoted to Demand is counted from the promotion point.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF2",
- "UMask": "0x4",
- "BriefDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
- "Deprecated": "1",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.USELESS_PREF",
- "PublicDescription": "This event is deprecated. Refer to new event L2_LINES_OUT.USELESS_HWPF",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xF2",
- "UMask": "0x4",
- "BriefDescription": "Counts the number of lines that have been hardware prefetched but not used and now evicted by L2 cache",
- "Counter": "0,1,2,3",
- "EventName": "L2_LINES_OUT.USELESS_HWPF",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore, every cycle",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding RFO (store) transactions in the super queue (SQ) every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xF4",
- "UMask": "0x10",
- "BriefDescription": "Number of cache line split locks sent to uncore.",
- "Counter": "0,1,2,3",
- "EventName": "SQ_MISC.SPLIT_LOCK",
- "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
+ "BriefDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OFFCORE_RESPONSE",
+ "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads have any response type.",
- "MSRValue": "0x0000010001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads have any response type.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x01003C0001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x04003C0001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x10003C0001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x3F803C0001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) have any response type.",
- "MSRValue": "0x0000010002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) have any response type.",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x01003C0002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x04003C0002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x10003C0002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x3F803C0002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
- "MSRValue": "0x0000010004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any response type.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
- "MSRValue": "0x01003C0004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
- "MSRValue": "0x04003C0004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
- "MSRValue": "0x10003C0004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
- "MSRValue": "0x3F803C0004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
- "MSRValue": "0x0000010010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads have any response type.",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x01003C0010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x04003C0010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x10003C0010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x3F803C0010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
- "MSRValue": "0x0000010020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs have any response type.",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x01003C0020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x04003C0020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x10003C0020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x3F803C0020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
- "MSRValue": "0x0000010080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads have any response type.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x01003C0080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x04003C0080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x10003C0080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x3F803C0080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
- "MSRValue": "0x0000010100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs have any response type.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x01003C0100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x04003C0100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x10003C0100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x3F803C0100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
- "MSRValue": "0x0000010400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests have any response type.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x01003C0400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x04003C0400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x10003C0400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x3F803C0400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD have any response type.",
- "MSRValue": "0x0000010490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD have any response type.",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x01003C0490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x04003C0490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x10003C0490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3F803C0490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD have any response type.",
- "MSRValue": "0x0000010120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD have any response type.",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x01003C0120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x04003C0120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x10003C0120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3F803C0120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD have any response type.",
- "MSRValue": "0x0000010491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD have any response type.",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x01003C0491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x04003C0491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x10003C0491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3F803C0491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD have any response type.",
- "MSRValue": "0x0000010122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD have any response type.",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x01003C0122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.NO_SNOOP_NEEDED",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x04003C0122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x10003C0122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.HITM_OTHER_CORE",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3F803C0122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads",
- "MSRValue": "0x08007C0001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "Counts demand data reads",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs)",
- "MSRValue": "0x08007C0002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "Counts all demand data writes (RFOs)",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
- "MSRValue": "0x08007C0004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads",
- "MSRValue": "0x08007C0010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs",
- "MSRValue": "0x08007C0020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads",
- "MSRValue": "0x08007C0080",
- "Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that have any response type.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
- "MSRValue": "0x08007C0100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
- "MSRValue": "0x08007C0400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3F803C0100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x08007C0490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HITM_OTHER_CORE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and the snoop to one of the sibling cores hits the line in M state and the line is forwarded.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x08007C0120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.HIT_OTHER_CORE_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that hit in the L3 and sibling core snoops are not needed as either the core-valid bit is not set or the shared line is present in multiple cores.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x08007C0491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.NO_SNOOP_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD",
- "MSRValue": "0x08007C0122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
- "PublicDescription": "TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0100",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cache line split locks sent to uncore.",
+ "EventCode": "0xF4",
+ "EventName": "SQ_MISC.SPLIT_LOCK",
+ "PublicDescription": "Counts the number of cache line split locks sent to the uncore.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
index c5d0babe89fc..64dd36387209 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/floating-point.json
@@ -1,85 +1,75 @@
[
{
+ "BriefDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "UMask": "0x1",
- "BriefDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Counts once for most SIMD 128-bit packed computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instruction retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "UMask": "0x2",
- "BriefDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired. Each count represents 1 computation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Counts once for most SIMD 128-bit packed computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "UMask": "0x4",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired. Each count represents 2 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Counts once for most SIMD 256-bit packed double computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "UMask": "0x8",
- "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Counts once for most SIMD 256-bit packed single computational precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xC7",
- "UMask": "0x10",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired. Each count represents 4 computations. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
"EventCode": "0xC7",
- "UMask": "0x20",
- "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired. Each count represents 8 computations. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP RSQRT SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "UMask": "0x40",
- "BriefDescription": "Number of Packed Double-Precision FP arithmetic instructions (Use operation multiplier of 8)",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Counts once for most SIMD scalar computational double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired. Counts twice for DPP and FM(N)ADD/SUB instructions retired.",
"EventCode": "0xC7",
- "UMask": "0x80",
- "BriefDescription": "Number of Packed Single-Precision FP arithmetic instructions (Use operation multiplier of 16)",
- "Counter": "0,1,2,3",
- "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Counts once for most SIMD scalar computational single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SIMD scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCA",
- "UMask": "0x1e",
"BriefDescription": "Cycles with any input/output SSE or FP assist",
- "Counter": "0,1,2,3",
- "EventName": "FP_ASSIST.ANY",
"CounterMask": "1",
+ "EventCode": "0xCA",
+ "EventName": "FP_ASSIST.ANY",
"PublicDescription": "Counts cycles with any input and output SSE or x87 FP assist. If an input and output assist are detected on the same cycle the event increments by 1.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1e"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
index 4dc583cfb545..04f08e4d2402 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/frontend.json
@@ -1,482 +1,421 @@
[
{
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x4",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MITE_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x8",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.DSB_UOPS",
- "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x10",
- "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_DSB_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x18",
- "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x20",
- "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_MITE_UOPS",
- "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xE6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering any Uop",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
- "CounterMask": "1",
- "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x24",
- "BriefDescription": "Cycles MITE is delivering 4 Uops",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
- "CounterMask": "4",
- "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequenser (MS) is busy",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_UOPS",
- "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EdgeDetect": "1",
- "EventCode": "0x79",
- "UMask": "0x30",
- "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
- "Counter": "0,1,2,3",
- "EventName": "IDQ.MS_SWITCHES",
- "CounterMask": "1",
- "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x80",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE_16B.IFDATA_STALL",
- "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x83",
- "UMask": "0x1",
- "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_HIT",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x83",
- "UMask": "0x2",
- "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_MISS",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x83",
- "UMask": "0x4",
- "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
- "Counter": "0,1,2,3",
- "EventName": "ICACHE_64B.IFTAG_STALL",
- "SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "Invert": "1",
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
- "CounterMask": "1",
- "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
- "CounterMask": "2",
- "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
- "CounterMask": "3",
- "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
- "CounterMask": "4",
- "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x9C",
- "UMask": "0x1",
- "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
- "Counter": "0,1,2,3",
- "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
- "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding \u201c4 \u2013 x\u201d when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switches",
+ "EventCode": "0xAB",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PublicDescription": "This event counts the number of the Decode Stream Buffer (DSB)-to-MITE switches including all misses because of missing Decode Stream Buffer (DSB) cache and u-arch forced misses.\nNote: Invoking MITE requires two or three cycles delay.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xAB",
- "UMask": "0x2",
"BriefDescription": "Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xAB",
"EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
- "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 0\u20132 cycles.",
+ "PublicDescription": "Counts Decode Stream Buffer (DSB)-to-MITE switch true penalty cycles. These cycles do not include uops routed through because of the switch itself, for example, when Instruction Decode Queue (IDQ) pre-allocation is unavailable, or Instruction Decode Queue (IDQ) is full. SBD-to-MITE switch true penalty cycles happen after the merge mux (MM) receives Decode Stream Buffer (DSB) Sync-indication until receiving the first MITE uop. MM is placed before Instruction Decode Queue (IDQ) to merge uops being fed from the MITE and Decode Stream Buffer (DSB) paths. Decode Stream Buffer (DSB) inserts the Sync-indication whenever a Decode Stream Buffer (DSB)-to-MITE switch occurs.Penalty: A Decode Stream Buffer (DSB) hit followed by a Decode Stream Buffer (DSB) miss can cost up to six cycles in which no uops are delivered to the IDQ. Most often, such switches from the Decode Stream Buffer (DSB) to the legacy pipeline cost 02 cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x400406",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x200206",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x400206",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x15",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
- "TakenAlone": "1",
+ "MSRValue": "0x12",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced iTLB true miss. Precise Event.",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
"PEBS": "1",
- "MSRValue": "0x14",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
- "TakenAlone": "1",
+ "MSRValue": "0x400106",
+ "PEBS": "2",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x13",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x408006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x12",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x401006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 2 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired Instructions who experienced decode stream buffer (DSB - the decoded instruction-cache) miss. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x11",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
- "TakenAlone": "1",
+ "MSRValue": "0x400206",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x300206",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x410006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x100206",
- "Counter": "0,1,2,3",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
- "TakenAlone": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 2 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x420006",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_2",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x200206",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 3 bubble-slots for a period of 2 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x410006",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_3",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x300206",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x408006",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x402006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x404006",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
"MSRIndex": "0x3F7",
- "TakenAlone": "1",
+ "MSRValue": "0x400406",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x402006",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
- "TakenAlone": "1",
+ "MSRValue": "0x420006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
"EventCode": "0xC6",
- "UMask": "0x1",
- "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall. Precise Event.",
- "PEBS": "1",
- "MSRValue": "0x401006",
- "Counter": "0,1,2,3",
- "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
"MSRIndex": "0x3F7",
- "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
- "TakenAlone": "1",
+ "MSRValue": "0x404006",
+ "PEBS": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC6",
- "UMask": "0x1",
"BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
- "PEBS": "1",
- "MSRValue": "0x400806",
- "Counter": "0,1,2,3",
+ "EventCode": "0xC6",
"EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
"MSRIndex": "0x3F7",
+ "MSRValue": "0x400806",
+ "PEBS": "1",
"PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
- "TakenAlone": "1",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "EventCode": "0xC6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PublicDescription": "Cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering 4 Uops",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_4_UOPS",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_DSB_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Count includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering 4 Uops",
+ "CounterMask": "4",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_4_UOPS",
+ "PublicDescription": "Counts the number of cycles 4 uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.ALL_MITE_CYCLES_ANY_UOPS",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. Counting includes uops that may 'bypass' the IDQ. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from Decode Stream Buffer (DSB) path",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. Counting includes uops that may 'bypass' the IDQ. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Cycles when uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_DSB_CYCLES",
+ "PublicDescription": "Counts cycles during which uops initiated by Decode Stream Buffer (DSB) are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_MITE_UOPS",
+ "PublicDescription": "Counts the number of uops initiated by MITE and delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Counting includes uops that may 'bypass' the IDQ.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) while Microcode Sequencer (MS) is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops not delivered to Resource Allocation Table (RAT) per thread when backend of the machine is not stalled",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to Resource Allocation Table (RAT) per thread adding 4 x when Resource Allocation Table (RAT) is not stalled and Instruction Decode Queue (IDQ) delivers x uops to Resource Allocation Table (RAT) (where x belongs to {0,1,2,3}). Counting does not cover cases when: a. IDQ-Resource Allocation Table (RAT) pipe serves the other thread. b. Resource Allocation Table (RAT) is stalled for the thread (including uop drops and clear BE conditions). c. Instruction Decode Queue (IDQ) delivers four uops.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles per thread when 4 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "4",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when no uops are delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core =4.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cycles FE delivered 4 uops or Resource Allocation Table (RAT) was stalling FE.",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles per thread when 3 or more uops are not delivered to Resource Allocation Table (RAT) when backend of the machine is not stalled",
+ "CounterMask": "3",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_1_UOP_DELIV.CORE",
+ "PublicDescription": "Counts, on the per-thread basis, cycles when less than 1 uop is delivered to Resource Allocation Table (RAT). IDQ_Uops_Not_Delivered.core >= 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with less than 2 uops delivered by the front end.",
+ "CounterMask": "2",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_2_UOP_DELIV.CORE",
+ "PublicDescription": "Cycles with less than 2 uops delivered by the front-end.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with less than 3 uops delivered by the front end.",
+ "CounterMask": "1",
+ "EventCode": "0x9C",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_LE_3_UOP_DELIV.CORE",
+ "PublicDescription": "Cycles with less than 3 uops delivered by the front-end.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/memory.json b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
index 48a9cdf81307..2b797dbc75fe 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/memory.json
@@ -1,1396 +1,1021 @@
[
{
- "EventCode": "0x54",
- "UMask": "0x1",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CONFLICT",
- "PublicDescription": "Number of times a TSX line had a cache conflict.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x2",
- "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_CAPACITY",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x4",
- "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
- "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x8",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
- "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x10",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
- "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x20",
- "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
- "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x54",
- "UMask": "0x40",
- "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
- "Counter": "0,1,2,3",
- "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
- "PublicDescription": "Number of times we could not allocate Lock Buffer.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC2",
- "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x4",
- "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC3",
- "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x8",
- "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC4",
- "PublicDescription": "RTM region detected inside HLE.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x5d",
- "UMask": "0x10",
- "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
- "Counter": "0,1,2,3",
- "EventName": "TX_EXEC.MISC5",
- "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x10",
- "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x10",
- "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
- "CounterMask": "6",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x60",
- "UMask": "0x10",
- "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xA3",
- "UMask": "0x2",
"BriefDescription": "Cycles while L3 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
"CounterMask": "2",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L3_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA3",
- "UMask": "0x6",
"BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
"CounterMask": "6",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xB0",
- "UMask": "0x10",
- "BriefDescription": "Demand Data Read requests who miss L3 cache",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
- "PublicDescription": "Demand Data Read requests who miss L3 cache.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC3",
- "UMask": "0x2",
- "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
- "Errata": "SKL089",
- "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x6"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
"EventCode": "0xC8",
- "UMask": "0x1",
- "BriefDescription": "Number of times an HLE execution started.",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.START",
- "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
+ "EventName": "HLE_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times HLE abort was triggered.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
"EventCode": "0xC8",
- "UMask": "0x2",
- "BriefDescription": "Number of times an HLE execution successfully committed",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.COMMIT",
- "PublicDescription": "Number of times HLE commit succeeded.",
+ "EventName": "HLE_RETIRED.ABORTED_EVENTS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
"EventCode": "0xC8",
- "UMask": "0x4",
- "BriefDescription": "Number of times an HLE execution aborted due to any reasons (multiple categories may count as one).",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED",
- "PublicDescription": "Number of times HLE abort was triggered. (PEBS)",
+ "EventName": "HLE_RETIRED.ABORTED_MEM",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
"EventCode": "0xC8",
- "UMask": "0x8",
- "BriefDescription": "Number of times an HLE execution aborted due to various memory events (e.g., read/write capacity and conflicts).",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MEM",
+ "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xC8",
- "UMask": "0x10",
"BriefDescription": "Number of times an HLE execution aborted due to hardware timer expiration.",
- "Counter": "0,1,2,3",
+ "EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_TIMER",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xC8",
- "UMask": "0x20",
"BriefDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
- "Counter": "0,1,2,3",
+ "EventCode": "0xC8",
"EventName": "HLE_RETIRED.ABORTED_UNFRIENDLY",
- "PublicDescription": "Number of times an HLE execution aborted due to HLE-unfriendly instructions and certain unfriendly events (such as AD assists etc.).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Number of times an HLE execution successfully committed",
"EventCode": "0xC8",
- "UMask": "0x40",
- "BriefDescription": "Number of times an HLE execution aborted due to incompatible memory type",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_MEMTYPE",
- "PublicDescription": "Number of times an HLE execution aborted due to incompatible memory type.",
+ "EventName": "HLE_RETIRED.COMMIT",
+ "PublicDescription": "Number of times HLE commit succeeded.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Number of times an HLE execution started.",
"EventCode": "0xC8",
- "UMask": "0x80",
- "BriefDescription": "Number of times an HLE execution aborted due to unfriendly events (such as interrupts).",
- "Counter": "0,1,2,3",
- "EventName": "HLE_RETIRED.ABORTED_EVENTS",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x1",
- "BriefDescription": "Number of times an RTM execution started.",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.START",
- "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x2",
- "BriefDescription": "Number of times an RTM execution successfully committed",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.COMMIT",
- "PublicDescription": "Number of times RTM commit succeeded.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x4",
- "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED",
- "PublicDescription": "Number of times RTM abort was triggered. (PEBS)",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x8",
- "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MEM",
- "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x10",
- "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_TIMER",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xC9",
- "UMask": "0x20",
- "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
- "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "EventName": "HLE_RETIRED.START",
+ "PublicDescription": "Number of times we entered an HLE region. Does not count nested transactions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xC9",
- "UMask": "0x40",
- "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
- "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the number of machine clears due to memory order conflicts.",
+ "Errata": "SKL089",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of memory ordering Machine Clears detected. Memory Ordering Machine Clears can result from one of the following:a. memory disambiguation,b. external snoop, orc. cross SMT-HW-thread snoop (stores) hitting load buffer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC9",
- "UMask": "0x80",
- "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
- "Counter": "0,1,2,3",
- "EventName": "RTM_RETIRED.ABORTED_EVENTS",
- "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
- "PEBS": "2",
- "MSRValue": "0x200",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "101",
- "CounterHTOff": "0,1,2,3"
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
- "PEBS": "2",
- "MSRValue": "0x100",
- "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
"PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
"SampleAfterValue": "503",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
- "PEBS": "2",
- "MSRValue": "0x80",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "1009",
- "CounterHTOff": "0,1,2,3"
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
- "PEBS": "2",
- "MSRValue": "0x40",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "2003",
- "CounterHTOff": "0,1,2,3"
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
- "PEBS": "2",
- "MSRValue": "0x20",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3"
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
- "PEBS": "2",
- "MSRValue": "0x10",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
"MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
- "SampleAfterValue": "20011",
- "CounterHTOff": "0,1,2,3"
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
"BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
- "PEBS": "2",
- "MSRValue": "0x8",
- "Counter": "0,1,2,3",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
"EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
"MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
"PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
"SampleAfterValue": "50021",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "EventCode": "0xCD",
- "UMask": "0x1",
- "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
- "PEBS": "2",
- "MSRValue": "0x4",
- "Counter": "0,1,2,3",
- "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
- "MSRIndex": "0x3F6",
- "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
- "TakenAlone": "1",
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "EventCode": "0xB0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles with at least 1 Demand Data Read requests who miss L3 cache in the superQ.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of Offcore outstanding Demand Data Read requests that miss L3 cache in the superQ every cycle.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles with at least 6 Demand Data Read requests that miss L3 cache in the superQ.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.L3_MISS_DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD TBD",
- "MSRValue": "0x3FBC000001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x083FC00001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x103FC00001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x063FC00001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x063B800001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch data reads that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand data reads TBD",
- "MSRValue": "0x0604000001",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800491",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD TBD",
- "MSRValue": "0x3FBC000002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x083FC00002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x103FC00002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x063FC00002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x063B800002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch data reads that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all demand data writes (RFOs) TBD",
- "MSRValue": "0x0604000002",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all demand data writes (RFOs) TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800490",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
- "MSRValue": "0x3FBC000004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
- "MSRValue": "0x083FC00004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
- "MSRValue": "0x103FC00004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
- "MSRValue": "0x063FC00004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
- "MSRValue": "0x063B800004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch RFOs that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
- "MSRValue": "0x0604000004",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800120",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
- "MSRValue": "0x3FBC000010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x083FC00010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x103FC00010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x063FC00010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x063B800010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand & prefetch RFOs that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts prefetch (that bring data to L2) data reads TBD",
- "MSRValue": "0x0604000010",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts prefetch (that bring data to L2) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800122",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
- "MSRValue": "0x3FBC000020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x083FC00020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x103FC00020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x063FC00020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x063B800020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand code reads that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
- "MSRValue": "0x0604000020",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to L2) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_CODE_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800004",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
- "MSRValue": "0x3FBC000080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x083FC00080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x103FC00080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x063FC00080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x063B800080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts demand data reads that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
- "MSRValue": "0x0604000080",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) data reads TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800001",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
- "MSRValue": "0x3FBC000100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x083FC00100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x103FC00100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x063FC00100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x063B800100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all demand data writes (RFOs) that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
- "MSRValue": "0x0604000100",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts all prefetch (that bring data to LLC only) RFOs TBD",
+ "EventName": "OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800002",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
- "MSRValue": "0x3FBC000400",
- "Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD TBD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x083FC00400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x103FC00400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x063FC00400",
- "Counter": "0,1,2,3",
"EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x063B800400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
- "MSRValue": "0x0604000400",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "Counts L1 data cache hardware prefetch requests and software prefetch requests TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L1D_AND_SW.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800400",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3FBC000490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x083FC00490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x103FC00490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063FC00490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063B800490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts prefetch (that bring data to L2) data reads that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0604000490",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800010",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3FBC000120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x083FC00120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x103FC00120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063FC00120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063B800120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to L2) RFOs that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0604000120",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_PF_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800020",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3FBC000491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x083FC00491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x103FC00491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063FC00491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063B800491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) data reads that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0604000491",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_DATA_RD.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_DATA_RD.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800080",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss in the L3.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD TBD",
- "MSRValue": "0x3FBC000122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.ANY_SNOOP",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.ANY_SNOOP",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3FBC000100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the modified data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x083FC00122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HIT_FORWARD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x103FC00100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and clean or shared data is transferred from remote cache.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x103FC00122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.REMOTE_HITM",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.REMOTE_HIT_FORWARD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x83FC00100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local or remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063FC00122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63FC00100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from local dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x063B800122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x604000100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
},
{
- "Offcore": "1",
+ "BriefDescription": "Counts all prefetch (that bring data to LLC only) RFOs that miss the L3 and the data is returned from remote dram.",
"EventCode": "0xB7, 0xBB",
- "UMask": "0x1",
- "BriefDescription": "TBD TBD",
- "MSRValue": "0x0604000122",
- "Counter": "0,1,2,3",
- "EventName": "OFFCORE_RESPONSE.ALL_RFO.L3_MISS_LOCAL_DRAM.SNOOP_MISS_OR_NO_FWD",
- "MSRIndex": "0x1a6, 0x1a7",
- "PublicDescription": "TBD TBD",
+ "EventName": "OFFCORE_RESPONSE.PF_L3_RFO.L3_MISS_REMOTE_DRAM.SNOOP_MISS_OR_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x63B800100",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to any reasons (multiple categories may count as one).",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PEBS": "1",
+ "PublicDescription": "Number of times RTM abort was triggered.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to uncommon conditions.",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_TIMER",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Number of times RTM commit succeeded.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xC9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed. Since this is the count of execution, it may not always cause a transactional abort.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions (e.g., vzeroupper) that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of times a XBEGIN instruction was executed inside an HLE transactional region.",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC4",
+ "PublicDescription": "RTM region detected inside HLE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC5",
+ "PublicDescription": "Counts the number of times an HLE XACQUIRE instruction was executed inside an RTM transactional region.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data capacity limitation for transactional reads or writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to XRELEASE lock not satisfying the address and value requirements in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_MISMATCH",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to release/commit but data and address mismatch.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to NoAllocatedElisionBuffer being non-zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_NOT_EMPTY",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to commit but Lock Buffer not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an HLE transactional execution aborted due to an unsupported read alignment from the elision buffer.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_ELISION_BUFFER_UNSUPPORTED_ALIGNMENT",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to attempting an unsupported alignment from Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times a HLE transactional region aborted due to a non XRELEASE prefixed instruction writing to an elided lock in the elision buffer",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_HLE_STORE_TO_ELIDED_LOCK",
+ "PublicDescription": "Number of times a TSX Abort was triggered due to a non-release/commit store to lock.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times HLE lock could not be elided due to ElisionBufferAvailable being zero.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.HLE_ELISION_BUFFER_FULL",
+ "PublicDescription": "Number of times we could not allocate Lock Buffer.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/other.json b/tools/perf/pmu-events/arch/x86/skylakex/other.json
index 778a541463eb..cda8a7a45f0c 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/other.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/other.json
@@ -1,164 +1,114 @@
[
{
- "EventCode": "0x28",
- "UMask": "0x7",
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x28",
"EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x7"
},
{
- "EventCode": "0x28",
- "UMask": "0x18",
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x28",
"EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x18"
},
{
- "EventCode": "0x28",
- "UMask": "0x20",
"BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x28",
"EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
"PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server michroarchtecture). This includes high current AVX 512-bit instructions.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0x28",
- "UMask": "0x40",
"BriefDescription": "Core cycles the core was throttled due to a pending power level request.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x28",
"EventName": "CORE_POWER.THROTTLE",
"PublicDescription": "Core cycles the out-of-order engine was throttled due to a pending power level request.",
"SampleAfterValue": "200003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0x32",
- "UMask": "0x1",
- "BriefDescription": "Number of PREFETCHNTA instructions executed.",
- "Counter": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.NTA",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "UMask": "0x2",
- "BriefDescription": "Number of PREFETCHT0 instructions executed.",
- "Counter": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T0",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "UMask": "0x4",
- "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
- "Counter": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.T1_T2",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0x32",
- "UMask": "0x8",
- "BriefDescription": "Number of PREFETCHW instructions executed.",
- "Counter": "0,1,2,3",
- "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "EventCode": "0xEF",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xCB",
- "UMask": "0x1",
- "BriefDescription": "Number of hardware interrupts received by the processor.",
- "Counter": "0,1,2,3",
- "EventName": "HW_INTERRUPTS.RECEIVED",
- "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
- "SampleAfterValue": "203",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
"EventCode": "0xEF",
- "UMask": "0x1",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
"EventCode": "0xEF",
- "UMask": "0x2",
- "Counter": "0,1,2,3",
"EventName": "CORE_SNOOP_RESPONSE.RSP_IHITFSE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_IHITI",
"EventCode": "0xEF",
- "UMask": "0x4",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_IHITI",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
"EventCode": "0xEF",
- "UMask": "0x8",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
"EventCode": "0xEF",
- "UMask": "0x10",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDM",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDM",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
"EventCode": "0xEF",
- "UMask": "0x20",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_IFWDFE",
+ "EventName": "CORE_SNOOP_RESPONSE.RSP_SHITFSE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xEF",
- "UMask": "0x40",
- "Counter": "0,1,2,3",
- "EventName": "CORE_SNOOP_RESPONSE.RSP_SFWDFE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of hardware interrupts received by the processor.",
+ "EventCode": "0xCB",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "PublicDescription": "Counts the number of hardware interruptions received by the processor.",
+ "SampleAfterValue": "203",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
"EventCode": "0xFE",
- "UMask": "0x2",
+ "EventName": "IDI_MISC.WB_DOWNGRADE",
+ "PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
"BriefDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly",
- "Counter": "0,1,2,3",
+ "EventCode": "0xFE",
"EventName": "IDI_MISC.WB_UPGRADE",
"PublicDescription": "Counts number of cache lines that are allocated and written back to L3 with the intention that they are more likely to be reused shortly.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xFE",
- "UMask": "0x4",
- "BriefDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly",
- "Counter": "0,1,2,3",
- "EventName": "IDI_MISC.WB_DOWNGRADE",
- "PublicDescription": "Counts number of cache lines that are dropped and not written back to L3 as they are deemed to be less likely to be reused shortly.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
+ "EventCode": "0x09",
+ "EventName": "MEMORY_DISAMBIGUATION.HISTORY_RESET",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
index 369f56c1d1b5..0f06e314fe36 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/pipeline.json
@@ -1,967 +1,823 @@
[
{
- "UMask": "0x1",
- "BriefDescription": "Instructions retired from execution.",
- "Counter": "Fixed counter 0",
- "EventName": "INST_RETIRED.ANY",
- "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
"SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 0"
+ "UMask": "0x1"
},
{
- "UMask": "0x2",
- "BriefDescription": "Core cycles when the thread is not in halt state",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD",
- "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009"
},
{
- "UMask": "0x2",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "Fixed counter 1",
- "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 1"
+ "BriefDescription": "All (macro) branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "UMask": "0x3",
- "BriefDescription": "Reference cycles when the core is not in halt state.",
- "Counter": "Fixed counter 2",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "Fixed counter 2"
+ "BriefDescription": "Conditional branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x03",
- "UMask": "0x2",
- "BriefDescription": "Loads blocked by overlapping with store buffer that cannot be forwarded .",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.STORE_FORWARD",
- "PublicDescription": "Counts how many times the load operation got the true Block-on-Store blocking code preventing store forwarding. This includes cases when:a. preceding store conflicts with the load (incomplete overlap),b. store forwarding is impossible due to u-arch limitations,c. preceding lock RMW operations are not forwarded,d. store has the no-forward bit set (uncacheable/page-split/masked stores),e. all-blocking stores are used (mostly, fences and port I/O), and others.The most common case is a load blocked due to its address range overlapping with a preceding smaller uncompleted store. Note: This event does not take into account cases of out-of-SW-control (for example, SbTailHit), unknown physical STA, and cases of blocking loads on store due to being non-WB memory type or a lock. These cases are covered by other events. See the table of not supported store forwards in the Optimization Guide.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "EventCode": "0x03",
- "UMask": "0x8",
- "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS.NO_SR",
- "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Far branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "This event counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
},
{
- "EventCode": "0x07",
- "UMask": "0x1",
- "BriefDescription": "False dependencies in MOB due to partial compare on address.",
- "Counter": "0,1,2,3",
- "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
- "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
},
{
- "EventCode": "0x0D",
- "UMask": "0x1",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES",
- "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Return instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
},
{
- "EventCode": "0x0D",
- "UMask": "0x1",
- "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Taken branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "This event counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EventCode": "0x0D",
- "UMask": "0x80",
- "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
- "Counter": "0,1,2,3",
- "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Not taken branch instructions retired.",
+ "Errata": "SKL091",
+ "EventCode": "0xC4",
+ "EventName": "BR_INST_RETIRED.NOT_TAKEN",
+ "PublicDescription": "This event counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
},
{
- "Invert": "1",
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative and retired mispredicted macro conditional branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.ALL_BRANCHES",
+ "PublicDescription": "This event counts both taken and not taken speculative and retired mispredicted branch instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
},
{
- "EventCode": "0x0E",
- "UMask": "0x1",
- "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.ANY",
- "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Speculative mispredicted indirect branches",
+ "EventCode": "0x89",
+ "EventName": "BR_MISP_EXEC.INDIRECT",
+ "PublicDescription": "Counts speculatively miss-predicted indirect branches at execution time. Counts for indirect near CALL or JMP instructions (RET excluded).",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
},
{
- "EventCode": "0x0E",
- "UMask": "0x2",
- "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
- "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to \u201cMixing Intel AVX and Intel SSE Code\u201d section of the Optimization Guide.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "All mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "400009"
},
{
- "EventCode": "0x0E",
- "UMask": "0x20",
- "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_ISSUED.SLOW_LEA",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted macro branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
+ "PEBS": "2",
+ "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x4"
},
{
- "EventCode": "0x14",
- "UMask": "0x1",
- "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
- "Counter": "0,1,2,3",
- "EventName": "ARITH.DIVIDER_ACTIVE",
- "CounterMask": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.CONDITIONAL",
+ "PEBS": "1",
+ "PublicDescription": "This event counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Thread cycles when thread is not in halt state",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P",
- "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x2"
},
{
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
},
{
- "EdgeDetect": "1",
- "EventCode": "0x3C",
- "UMask": "0x0",
- "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
- "CounterMask": "1",
- "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xC5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"EventCode": "0x3C",
- "UMask": "0x1",
+ "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
+ "AnyThread": "1",
"BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
"EventName": "CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2503",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
"EventCode": "0x3C",
- "UMask": "0x1",
- "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
- "AnyThread": "1",
- "SampleAfterValue": "2503",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
},
{
- "EventCode": "0x3C",
- "UMask": "0x1",
"BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x3C",
"EventName": "CPU_CLK_UNHALTED.REF_XCLK",
- "SampleAfterValue": "2503",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
+ "AnyThread": "1",
+ "BriefDescription": "Core crystal clock cycles when at least one thread on the physical core is unhalted.",
"EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK_ANY",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Counts when there is a transition from ring 1, 2 or 3 to ring 0.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
"EventCode": "0x3C",
- "UMask": "0x2",
- "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
- "Counter": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
- "SampleAfterValue": "2503",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "CPU_CLK_UNHALTED.RING0_TRANS",
+ "PublicDescription": "Counts when the Current Privilege Level (CPL) transitions from ring 1, 2 or 3 to ring 0 (Kernel).",
+ "SampleAfterValue": "100007"
},
{
- "EventCode": "0x4C",
- "UMask": "0x1",
- "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
- "Counter": "0,1,2,3",
- "EventName": "LOAD_HIT_PRE.SW_PF",
- "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0x59",
- "UMask": "0x1",
- "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
- "Counter": "0,1,2,3",
- "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
- "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EdgeDetect": "1",
- "Invert": "1",
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_END",
- "CounterMask": "1",
- "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0x5E",
- "UMask": "0x1",
- "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
- "Counter": "0,1,2,3",
- "EventName": "RS_EVENTS.EMPTY_CYCLES",
- "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles when at least one thread on the physical core is not in halt state.",
+ "EventCode": "0x3C",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P_ANY",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0x87",
- "UMask": "0x1",
- "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
- "Counter": "0,1,2,3",
- "EventName": "ILD_STALL.LCP",
- "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA1",
- "UMask": "0x1",
- "BriefDescription": "Cycles per thread when uops are executed in port 0",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA1",
- "UMask": "0x2",
- "BriefDescription": "Cycles per thread when uops are executed in port 1",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xA1",
- "UMask": "0x4",
- "BriefDescription": "Cycles per thread when uops are executed in port 2",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xc"
},
{
- "EventCode": "0xA1",
- "UMask": "0x8",
- "BriefDescription": "Cycles per thread when uops are executed in port 3",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x5"
},
{
- "EventCode": "0xA1",
- "UMask": "0x10",
- "BriefDescription": "Cycles per thread when uops are executed in port 4",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x14"
},
{
- "EventCode": "0xA1",
- "UMask": "0x20",
- "BriefDescription": "Cycles per thread when uops are executed in port 5",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xA3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xA1",
- "UMask": "0x40",
- "BriefDescription": "Cycles per thread when uops are executed in port 6",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA1",
- "UMask": "0x80",
- "BriefDescription": "Cycles per thread when uops are executed in port 7",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
- "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xa2",
- "UMask": "0x1",
- "BriefDescription": "Resource-related stall cycles",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.ANY",
- "PublicDescription": "Counts resource-related stall cycles.",
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xA2",
- "UMask": "0x8",
- "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
- "Counter": "0,1,2,3",
- "EventName": "RESOURCE_STALLS.SB",
- "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xA3",
- "UMask": "0x1",
- "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
- "CounterMask": "1",
+ "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "EventCode": "0xA3",
- "UMask": "0x4",
- "BriefDescription": "Total execution stalls.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
- "CounterMask": "4",
+ "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
+ "EventCode": "0xA6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x5",
- "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
- "CounterMask": "5",
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x8",
- "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
- "CounterMask": "8",
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0xc",
- "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
- "CounterMask": "12",
+ "BriefDescription": "Instructions retired from execution.",
+ "EventName": "INST_RETIRED.ANY",
+ "PublicDescription": "Counts the number of instructions retired from execution. For instructions that consist of multiple micro-ops, Counts the retirement of the last micro-op of the instruction. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter, leaving the four (eight when Hyperthreading is disabled) programmable counters available for other events. INST_RETIRED.ANY_P is counted by a programmable counter and it is an architectural performance event. Counting: Faulting executions of GETSEC/VM entry/VM Exit/MWait will not count as retired instructions.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA3",
- "UMask": "0x10",
- "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
- "CounterMask": "16",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "SampleAfterValue": "2000003"
},
{
- "EventCode": "0xA3",
- "UMask": "0x14",
- "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
- "CounterMask": "20",
+ "BriefDescription": "Number of all retired NOP instructions.",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.NOP",
+ "PEBS": "1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3"
+ "UMask": "0x2"
},
{
- "EventCode": "0xA6",
- "UMask": "0x1",
- "BriefDescription": "Cycles where no uops were executed, the Reservation Station was not empty, the Store Buffer was full and there was no outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
- "PublicDescription": "Counts cycles during which no uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "2",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA6",
- "UMask": "0x2",
- "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
- "Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
- "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
+ "CounterMask": "10",
+ "Errata": "SKL091, SKL044",
+ "EventCode": "0xC0",
+ "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
+ "Invert": "1",
+ "PEBS": "2",
+ "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA6",
- "UMask": "0x4",
- "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
- "Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
- "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "BriefDescription": "Cycles the issue-stage is waiting for front-end to fetch from resteered path following branch misprediction or machine clear events.",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x80"
},
{
- "EventCode": "0xA6",
- "UMask": "0x8",
- "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
- "Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
- "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread (e.g. misprediction or memory nuke)",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Core cycles the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA6",
- "UMask": "0x10",
- "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
- "Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
- "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "AnyThread": "1",
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for any thread running on the physical core (e.g. misprediction or memory nuke).",
+ "EventCode": "0x0D",
+ "EventName": "INT_MISC.RECOVERY_CYCLES_ANY",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA6",
- "UMask": "0x40",
- "BriefDescription": "Cycles where the Store Buffer was full and no outstanding load.",
- "Counter": "0,1,2,3",
- "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
- "BriefDescription": "Number of Uops delivered by the LSD.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.UOPS",
- "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Counts false dependencies in MOB when the partial comparison upon loose net check and dependency was resolved by the Enhanced Loose net mechanism. This may not result in high performance penalties. Loose net checks can fail when loads and stores are 4k aliased.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "EventCode": "0x4C",
+ "EventName": "LOAD_HIT_PRE.SW_PF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
"BriefDescription": "Cycles 4 Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_4_UOPS",
"CounterMask": "4",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_4_UOPS",
"PublicDescription": "Counts the cycles when 4 uops are delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xA8",
- "UMask": "0x1",
"BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
- "Counter": "0,1,2,3",
- "EventName": "LSD.CYCLES_ACTIVE",
"CounterMask": "1",
+ "EventCode": "0xA8",
+ "EventName": "LSD.CYCLES_ACTIVE",
"PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
- "CounterMask": "4",
- "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xA8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
- "CounterMask": "3",
- "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
- "CounterMask": "2",
- "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xC3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
+ "EventCode": "0xC1",
+ "EventName": "OTHER_ASSISTS.ANY",
+ "SampleAfterValue": "100003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "Cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0x59",
+ "EventName": "PARTIAL_RAT_STALLS.SCOREBOARD",
+ "PublicDescription": "This event counts cycles during which the microcode scoreboard stalls happen.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
- "CounterMask": "1",
- "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "BriefDescription": "Resource-related stall cycles",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.ANY",
+ "PublicDescription": "Counts resource-related stall cycles.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.STALL_CYCLES",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xA2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xB1",
- "UMask": "0x1",
- "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.THREAD",
- "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Number of uops executed on the core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE",
- "PublicDescription": "Number of uops executed from any thread.",
+ "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
+ "EventCode": "0xCC",
+ "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x40"
},
{
- "Invert": "1",
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
- "CounterMask": "1",
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for the thread.; Note: In ST-mode, not active thread should drive 0. This is usually caused by severely costly branch mispredictions, or allocator/FE issues.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
- "CounterMask": "4",
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate Frontend Latency Bound issues.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5E",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to precisely locate front-end Latency Bound issues.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
- "CounterMask": "3",
+ "BriefDescription": "Cycles per thread when uops are executed in port 0",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_0",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
- "CounterMask": "2",
+ "BriefDescription": "Cycles per thread when uops are executed in port 1",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_1",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xB1",
- "UMask": "0x2",
- "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
- "CounterMask": "1",
+ "BriefDescription": "Cycles per thread when uops are executed in port 2",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_2",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 2.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
- "EventCode": "0xB1",
- "UMask": "0x10",
- "BriefDescription": "Counts the number of x87 uops dispatched.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_EXECUTED.X87",
- "PublicDescription": "Counts the number of x87 uops executed.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 3",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 3.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
- "EventCode": "0xC0",
- "UMask": "0x0",
- "BriefDescription": "Number of instructions retired. General Counter - architectural event",
- "Counter": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "Errata": "SKL091, SKL044",
- "PublicDescription": "Counts the number of instructions (EOMs) retired. Counting covers macro-fused instructions individually (that is, increments by two).",
+ "BriefDescription": "Cycles per thread when uops are executed in port 4",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_4",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 4.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xC0",
- "UMask": "0x1",
- "BriefDescription": "Precise instruction retired event with HW to reduce effect of PEBS shadow in IP distribution",
- "PEBS": "2",
- "Counter": "1",
- "EventName": "INST_RETIRED.PREC_DIST",
- "Errata": "SKL091, SKL044",
- "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled.",
+ "BriefDescription": "Cycles per thread when uops are executed in port 5",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "1"
+ "UMask": "0x20"
},
{
- "Invert": "1",
- "EventCode": "0xC0",
- "UMask": "0x1",
- "BriefDescription": "Number of cycles using always true condition applied to PEBS instructions retired event.",
- "PEBS": "2",
- "Counter": "0,2,3",
- "EventName": "INST_RETIRED.TOTAL_CYCLES_PS",
- "CounterMask": "10",
- "Errata": "SKL091, SKL044",
- "PublicDescription": "Number of cycles using an always true condition applied to PEBS instructions retired event. (inst_ret< 16)",
+ "BriefDescription": "Cycles per thread when uops are executed in port 6",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_6",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,2,3"
+ "UMask": "0x40"
},
{
- "EventCode": "0xC1",
- "UMask": "0x3f",
- "BriefDescription": "Number of times a microcode assist is invoked by HW other than FP-assist. Examples include AD (page Access Dirty) and AVX* related assists.",
- "Counter": "0,1,2,3",
- "EventName": "OTHER_ASSISTS.ANY",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles per thread when uops are executed in port 7",
+ "EventCode": "0xA1",
+ "EventName": "UOPS_DISPATCHED_PORT.PORT_7",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 7.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Cycles with less than 10 actually retired uops.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
- "CounterMask": "10",
- "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Number of uops executed from any thread.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "Invert": "1",
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Cycles without actually retired uops.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
"CounterMask": "1",
- "PublicDescription": "This event counts cycles without actually retired uops.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xC2",
- "UMask": "0x2",
- "BriefDescription": "Retirement slots used.",
- "Counter": "0,1,2,3",
- "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
- "PublicDescription": "Counts the retirement slots used.",
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EdgeDetect": "1",
- "EventCode": "0xC3",
- "UMask": "0x1",
- "BriefDescription": "Number of machine clears (nukes) of any type.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.COUNT",
- "CounterMask": "1",
- "PublicDescription": "Number of machine clears (nukes) of any type.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC3",
- "UMask": "0x4",
- "BriefDescription": "Self-modifying code (SMC) detected.",
- "Counter": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.SMC",
- "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC4",
- "UMask": "0x0",
- "BriefDescription": "All (macro) branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "Errata": "SKL091",
- "PublicDescription": "Counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with no micro-ops executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_NONE",
+ "Invert": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC4",
- "UMask": "0x1",
- "BriefDescription": "Conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.CONDITIONAL",
- "Errata": "SKL091",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1_UOP_EXEC",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x2",
- "BriefDescription": "Direct and indirect near call instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_CALL",
- "Errata": "SKL091",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts both direct and indirect near call instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x4",
- "BriefDescription": "All (macro) branch instructions retired.",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES_PEBS",
- "Errata": "SKL091",
- "PublicDescription": "This is a precise version of BR_INST_RETIRED.ALL_BRANCHES that counts all (macro) branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x8",
- "BriefDescription": "Return instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_RETURN",
- "Errata": "SKL091",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts return instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4_UOPS_EXEC",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x10",
- "BriefDescription": "Counts all not taken macro branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NOT_TAKEN",
- "Errata": "SKL091",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts not taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x20",
- "BriefDescription": "Taken branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
- "Errata": "SKL091",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts taken branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "PublicDescription": "Number of uops to be executed per-thread each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC4",
- "UMask": "0x40",
- "BriefDescription": "Counts the number of far branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.FAR_BRANCH",
- "Errata": "SKL091",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts far branch instructions retired.",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xB1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0xC5",
- "UMask": "0x0",
- "BriefDescription": "All mispredicted macro branch instructions retired.",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Uops that Resource Allocation Table (RAT) issues to Reservation Station (RS)",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "UMask": "0x1",
- "BriefDescription": "Mispredicted conditional branch instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.CONDITIONAL",
- "PublicDescription": "This is a precise version (that is, uses PEBS) of the event that counts mispredicted conditional branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of slow LEA uops being allocated. A uop is generally considered SlowLea if it has 3 sources (e.g. 2 sources + immediate) regardless if as a result of LEA instruction or not.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.SLOW_LEA",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
},
{
- "EventCode": "0xC5",
- "UMask": "0x2",
- "BriefDescription": "Mispredicted direct and indirect near call instructions retired.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.NEAR_CALL",
- "PublicDescription": "This event counts both taken and not taken retired mispredicted direct and indirect near calls, including both register and memory indirect.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles when Resource Allocation Table (RAT) does not issue Uops to Reservation Station (RS) for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
},
{
- "EventCode": "0xC5",
- "UMask": "0x4",
- "BriefDescription": "Mispredicted macro branch instructions retired.",
- "PEBS": "2",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES_PEBS",
- "PublicDescription": "This is a precise version of BR_MISP_RETIRED.ALL_BRANCHES that counts all mispredicted macro branch instructions retired.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3"
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "EventCode": "0x0E",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
- "EventCode": "0xC5",
- "UMask": "0x20",
- "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "PEBS": "1",
- "Counter": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
- "PublicDescription": "Number of near branch instructions retired that were mispredicted and taken.",
- "SampleAfterValue": "400009",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Number of macro-fused uops retired. (non precise)",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MACRO_FUSED",
+ "PublicDescription": "Counts the number of macro-fused uops retired. (non precise)",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
},
{
- "EventCode": "0xCC",
- "UMask": "0x20",
- "BriefDescription": "Increments whenever there is an update to the LBR array.",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.LBR_INSERTS",
- "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.RETIRE_SLOTS",
+ "PublicDescription": "Counts the retirement slots used.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xCC",
- "UMask": "0x40",
- "BriefDescription": "Number of retired PAUSE instructions (that do not end up with a VMExit to the VMM; TSX aborted Instructions may be counted). This event is not supported on first SKL and KBL products.",
- "Counter": "0,1,2,3",
- "EventName": "ROB_MISC_EVENTS.PAUSE_INST",
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
- "EventCode": "0xE6",
- "UMask": "0x1",
- "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
- "Counter": "0,1,2,3",
- "EventName": "BACLEARS.ANY",
- "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "16",
+ "EventCode": "0xC2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
index b4f91137f40c..fa2f7f126a30 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
@@ -1,395 +1,1570 @@
[
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Frontend_Bound",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound."
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Frontend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-ops (uops). Ideally the Frontend can issue 4 uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "C3 residency percent per core",
+ "MetricExpr": "cstate_core@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Core_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Bad_Speculation",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example."
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Bad_Speculation_SMT",
- "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "Uncore frequency per die [GHZ]",
+ "MetricExpr": "tma_info_socket_clks / #num_dies / duration_time / 1e9",
+ "MetricGroup": "SoC",
+ "MetricName": "UNCORE_FREQ"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_0 + UOPS_DISPATCHED_PORT.PORT_1 + UOPS_DISPATCHED_PORT.PORT_5 + UOPS_DISPATCHED_PORT.PORT_6) / tma_info_slots",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * (FP_ASSIST.ANY + OTHER_ASSISTS.ANY) / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: OTHER_ASSISTS.ANY",
+ "ScaleUnit": "100%"
},
{
"BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * cycles)) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles)) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)) )",
- "MetricGroup": "TopdownL1",
- "MetricName": "Backend_Bound",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound."
+ "MetricExpr": "1 - tma_frontend_bound - (UOPS_ISSUED.ANY + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "1 - ( (IDQ_UOPS_NOT_DELIVERED.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) + (UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) )",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Backend_Bound_SMT",
- "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "(UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (INT_MISC.RECOVERY_CYCLES_ANY / 2 if #SMT_on else INT_MISC.RECOVERY_CYCLES)) / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * cycles)",
- "MetricGroup": "TopdownL1",
- "MetricName": "Retiring",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. "
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. SMT version; use when SMT is enabled and measuring per logical CPU.",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))",
- "MetricGroup": "TopdownL1_SMT",
- "MetricName": "Retiring_SMT",
- "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum 4 uops retired per cycle has been achieved. Maximizing Retiring typically increases the Instruction-Per-Cycle metric. Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Microcode assists are categorized under Retiring. They hurt performance and can often be avoided. SMT version; use when SMT is enabled and measuring per logical CPU."
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
- "MetricExpr": "INST_RETIRED.ANY / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "TopDownL1",
- "MetricName": "IPC"
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Uops Per Instruction",
- "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
- "MetricGroup": "Pipeline;Retire",
- "MetricName": "UPI"
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Instruction per taken branch",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;Fetch_BW;PGO",
- "MetricName": "IpTB"
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(44 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 44 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS_PS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_backend_bound - tma_memory_bound",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "44 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT + MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM * (1 - OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE / (OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HIT_PS. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35))",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%"
},
{
- "BriefDescription": "Branch instructions per taken branch. ",
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.ALL_DSB_CYCLES_ANY_UOPS - IDQ.ALL_DSB_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "min(9 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(9 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(110 * tma_info_average_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_MISS.REMOTE_HITM + OFFCORE_RESPONSE.PF_L2_RFO.L3_MISS.REMOTE_HITM) + 47.5 * tma_info_average_frequency * (OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.HITM_OTHER_CORE + OFFCORE_RESPONSE.PF_L2_RFO.L3_HIT.HITM_OTHER_CORE)) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_HITM_PS;OFFCORE_RESPONSE.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "tma_info_load_miss_real_latency * cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@ / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "tma_frontend_bound - tma_fetch_latency",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_512b",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "IDQ_UOPS_NOT_DELIVERED.CORE / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions",
+ "MetricExpr": "tma_light_operations * UOPS_RETIRED.MACRO_FUSED / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fused_instructions",
+ "MetricThreshold": "tma_fused_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring fused instructions -- where one uop can represent multiple contiguous instructions. The instruction pairs of CMP+JCC or DEC+JCC are commonly used examples.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "(UOPS_RETIRED.RETIRE_SLOTS + UOPS_RETIRED.MACRO_FUSED - INST_RETIRED.ANY) / tma_info_slots",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "(ICACHE_16B.IFDATA_STALL + 2 * cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@) / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
+ "MetricName": "tma_info_big_code",
+ "MetricThreshold": "tma_info_big_code > 20",
+ "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_branching_overhead"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
"MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
- "MetricGroup": "Branches;PGO",
- "MetricName": "BpTB"
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
},
{
- "BriefDescription": "Rough Estimation of fraction of fetched lines bytes that were likely (includes speculatively fetches) consumed by program instructions",
- "MetricExpr": "min( 1 , UOPS_ISSUED.ANY / ( (UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY) * 64 * ( ICACHE_64B.IFTAG_HIT + ICACHE_64B.IFTAG_MISS ) / 4.1 ) )",
- "MetricGroup": "PGO;IcMiss",
- "MetricName": "IFetch_Line_Utilization"
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_mispredictions, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
- "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
- "MetricGroup": "DSB;Fetch_BW",
- "MetricName": "DSB_Coverage"
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.CONDITIONAL + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_slots)",
+ "MetricGroup": "Ret;tma_issueBC",
+ "MetricName": "tma_info_branching_overhead",
+ "MetricThreshold": "tma_info_branching_overhead > 10",
+ "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_big_code"
},
{
- "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
- "MetricExpr": "1 / (INST_RETIRED.ANY / cycles)",
- "MetricGroup": "Pipeline;Summary",
- "MetricName": "CPI"
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_callret"
},
{
"BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
"MetricExpr": "CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "CLKS"
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * cycles",
- "MetricGroup": "TopDownL1",
- "MetricName": "SLOTS"
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_code_stlb_mpki"
},
{
- "BriefDescription": "Total issue-pipeline slots (per-Physical Core)",
- "MetricExpr": "4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "TopDownL1_SMT",
- "MetricName": "SLOTS_SMT"
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.NOT_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_nt"
},
{
- "BriefDescription": "Instructions per Load (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpL"
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "(BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_tk"
},
{
- "BriefDescription": "Instructions per Store (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
- "MetricGroup": "Instruction_Type",
- "MetricName": "IpS"
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_core_bound_likely",
+ "MetricThreshold": "tma_info_core_bound_likely > 0.5"
},
{
- "BriefDescription": "Instructions per Branch (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
- "MetricGroup": "Branches;Instruction_Type",
- "MetricName": "IpB"
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "(CPU_CLK_UNHALTED.THREAD / 2 * (1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK) if #core_wide < 1 else (CPU_CLK_UNHALTED.THREAD_ANY / 2 if #SMT_on else tma_info_clks))",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
},
{
- "BriefDescription": "Instruction per (near) call (lower number means higher occurance rate)",
- "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
- "MetricGroup": "Branches",
- "MetricName": "IpCall"
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (UNC_M_CAS_COUNT.RD + UNC_M_CAS_COUNT.WR) / 1e9 / duration_time",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / (IDQ.DSB_UOPS + IDQ.MITE_UOPS + IDQ.MS_UOPS)",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 4 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_misses, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_dsb_misses",
+ "MetricThreshold": "tma_info_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / DSB2MITE_SWITCHES.COUNT",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_dsb_switch_cost"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_fb_hpki"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_fetch_upc"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_ic_misses",
+ "MetricThreshold": "tma_info_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@ + 2",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_icache_miss_latency"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_big_code",
+ "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_instruction_fetch_bw > 20"
},
{
"BriefDescription": "Total number of retired Instructions",
"MetricExpr": "INST_RETIRED.ANY",
- "MetricGroup": "Summary",
- "MetricName": "Instructions"
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / cycles",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC"
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Reads [GB / sec]",
+ "MetricExpr": "(UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3) * 4 / 1e9 / duration_time",
+ "MetricGroup": "IoBW;Mem;Server;SoC",
+ "MetricName": "tma_info_io_read_bw"
},
{
- "BriefDescription": "Instructions Per Cycle (per physical core)",
- "MetricExpr": "INST_RETIRED.ANY / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "SMT",
- "MetricName": "CoreIPC_SMT"
+ "BriefDescription": "Average IO (network or disk) Bandwidth Use for Writes [GB / sec]",
+ "MetricExpr": "(UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3) * 4 / 1e9 / duration_time",
+ "MetricGroup": "IoBW;Mem;Server;SoC",
+ "MetricName": "tma_info_io_write_bw"
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / cycles",
- "MetricGroup": "FLOPS",
- "MetricName": "FLOPc"
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
},
{
- "BriefDescription": "Floating Point Operations Per Cycle",
- "MetricExpr": "(( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))",
- "MetricGroup": "FLOPS_SMT",
- "MetricName": "FLOPc_SMT"
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
},
{
- "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is at least 1 uop executed)",
- "MetricExpr": "UOPS_EXECUTED.THREAD / (( UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 ) if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
- "MetricGroup": "Pipeline",
- "MetricName": "ILP"
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx512",
+ "MetricThreshold": "tma_info_iparith_avx512 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_ipdsb_miss_ret < 50"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * INT_MISC.RECOVERY_CYCLES ) / (4 * cycles))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * cycles)) ) * (4 * cycles) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "Branch_Misprediction_Cost"
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * (FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE) + 8 * (FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
},
{
- "BriefDescription": "Branch Misprediction Cost: Fraction of TopDown slots wasted per non-speculative branch misprediction (jeclear)",
- "MetricExpr": "( ((BR_MISP_RETIRED.ALL_BRANCHES / ( BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT )) * (( UOPS_ISSUED.ANY - UOPS_RETIRED.RETIRE_SLOTS + 4 * (( INT_MISC.RECOVERY_CYCLES_ANY / 2 )) ) / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))))) + (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) * (( INT_MISC.CLEAR_RESTEER_CYCLES + 9 * BACLEARS.ANY ) / cycles) / (4 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE / (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )))) ) * (4 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) ))) / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts_SMT",
- "MetricName": "Branch_Misprediction_Cost_SMT"
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "tma_info_instructions / (UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * cpu@BR_MISP_EXEC.ALL_BRANCHES\\,umask\\=0xE4@)",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
},
{
- "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear)",
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
"MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
- "MetricGroup": "BrMispredicts",
- "MetricName": "IpMispredict"
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
},
{
- "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
- "MetricExpr": "( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )",
- "MetricGroup": "SMT",
- "MetricName": "CORE_CLKS"
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
},
{
- "BriefDescription": "Actual Average Latency for L1 data-cache miss demand loads (in core cycles)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / ( MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT )",
- "MetricGroup": "Memory_Bound;Memory_Lat",
- "MetricName": "Load_Miss_Real_Latency"
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_ipswpf",
+ "MetricThreshold": "tma_info_ipswpf < 100"
},
{
- "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)",
- "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
- "MetricGroup": "Memory_Bound;Memory_BW",
- "MetricName": "MLP"
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 9",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_lcp"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * cycles )",
- "MetricGroup": "TLB",
- "MetricName": "Page_Walks_Utilization",
- "MetricConstraint": "NO_NMI_WATCHDOG"
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
},
{
- "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
- "MetricExpr": "( ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING ) / ( 2 * (( ( CPU_CLK_UNHALTED.THREAD / 2 ) * ( 1 + CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_XCLK ) )) )",
- "MetricGroup": "TLB_SMT",
- "MetricName": "Page_Walks_Utilization_SMT"
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - (BR_INST_RETIRED.CONDITIONAL - BR_INST_RETIRED.NOT_TAKEN) - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_jump"
},
{
- "BriefDescription": "Average data fill bandwidth to the L1 data cache [GB / sec]",
- "MetricExpr": "64 * L1D.REPLACEMENT / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L1D_Cache_Fill_BW"
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
},
{
- "BriefDescription": "Average data fill bandwidth to the L2 cache [GB / sec]",
- "MetricExpr": "64 * L2_LINES_IN.ALL / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L2_Cache_Fill_BW"
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Fill_BW"
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
},
{
- "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
- "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1000000000 / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "L3_Cache_Access_BW"
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
},
{
"BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L1MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
},
{
- "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI"
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki_load"
},
{
- "BriefDescription": "L2 cache misses per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * L2_RQSTS.MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2MPKI_All"
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
+ "MetricExpr": "1e3 * L2_LINES_OUT.NON_SILENT / tma_info_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_l2_evictions_nonsilent_pki"
+ },
+ {
+ "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
+ "MetricExpr": "1e3 * L2_LINES_OUT.SILENT / tma_info_instructions",
+ "MetricGroup": "L2Evicts;Mem;Server",
+ "MetricName": "tma_info_l2_evictions_silent_pki"
},
{
"BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
- "MetricExpr": "1000 * ( L2_RQSTS.REFERENCES - L2_RQSTS.MISS ) / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L2HPKI_All"
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
},
{
"BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
- "MetricExpr": "1000 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
- "MetricGroup": "Cache_Misses",
- "MetricName": "L3MPKI"
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
},
{
- "BriefDescription": "Rate of silent evictions from the L2 cache per Kilo instruction where the evicted lines are dropped (no writeback to L3 or memory)",
- "MetricExpr": "1000 * L2_LINES_OUT.SILENT / INST_RETIRED.ANY",
- "MetricGroup": "",
- "MetricName": "L2_Evictions_Silent_PKI"
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
},
{
- "BriefDescription": "Rate of non silent evictions from the L2 cache per Kilo instruction",
- "MetricExpr": "1000 * L2_LINES_OUT.NON_SILENT / INST_RETIRED.ANY",
- "MetricGroup": "",
- "MetricName": "L2_Evictions_NonSilent_PKI"
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
},
{
- "BriefDescription": "Average CPU Utilization",
- "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / msr@tsc@",
- "MetricGroup": "Summary",
- "MetricName": "CPU_Utilization"
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
},
{
- "BriefDescription": "Giga Floating Point Operations Per Second",
- "MetricExpr": "( (( 1 * ( FP_ARITH_INST_RETIRED.SCALAR_SINGLE + FP_ARITH_INST_RETIRED.SCALAR_DOUBLE ) + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * ( FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE ) + 8 * ( FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE + FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE ) + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE )) / 1000000000 ) / duration_time",
- "MetricGroup": "FLOPS;Summary",
- "MetricName": "GFLOPs"
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_load_stlb_mpki"
},
{
- "BriefDescription": "Average Frequency Utilization relative nominal frequency",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD / CPU_CLK_UNHALTED.REF_TSC",
- "MetricGroup": "Power",
- "MetricName": "Turbo_Utilization"
+ "BriefDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]",
+ "MetricExpr": "1e9 * (UNC_M_RPQ_OCCUPANCY / UNC_M_RPQ_INSERTS) / imc_0@event\\=0x0@",
+ "MetricGroup": "Mem;MemoryLat;Server;SoC",
+ "MetricName": "tma_info_mem_dram_read_latency",
+ "PublicDescription": "Average latency of data read request to external DRAM memory [in nanoseconds]. Accounts for demand loads and L1/L2 data-read prefetches"
},
{
- "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
- "MetricExpr": "1 - CPU_CLK_THREAD_UNHALTED.ONE_THREAD_ACTIVE / ( CPU_CLK_THREAD_UNHALTED.REF_XCLK_ANY / 2 ) if #SMT_on else 0",
- "MetricGroup": "SMT;Summary",
- "MetricName": "SMT_2T_Utilization"
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD@thresh\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
},
{
- "BriefDescription": "Fraction of cycles spent in Kernel mode",
- "MetricExpr": "CPU_CLK_UNHALTED.THREAD:k / CPU_CLK_UNHALTED.THREAD",
- "MetricGroup": "Summary",
- "MetricName": "Kernel_Utilization"
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "1e9 * (UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD / UNC_CHA_TOR_INSERTS.IA_MISS_DRD) / (tma_info_socket_clks / duration_time)",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
},
{
- "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
- "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_BW_Use"
+ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
+ "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_memory_bandwidth",
+ "MetricThreshold": "tma_info_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
},
{
- "BriefDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "1000000000 * ( cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x35\\\\\\,umask\\=0x21@ ) / ( cha_0@event\\=0x0@ / duration_time )",
- "MetricGroup": "Memory_Lat",
- "MetricName": "DRAM_Read_Latency"
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency)))",
+ "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_memory_data_tlbs",
+ "MetricThreshold": "tma_info_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store"
},
{
- "BriefDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches",
- "MetricExpr": "cha@event\\=0x36\\\\\\,umask\\=0x21@ / cha@event\\=0x36\\\\\\,umask\\=0x21\\\\\\,thresh\\=1@",
- "MetricGroup": "Memory_BW",
- "MetricName": "DRAM_Parallel_Reads"
+ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))",
+ "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_memory_latency",
+ "MetricThreshold": "tma_info_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency"
},
{
- "BriefDescription": "Socket actual clocks when any core is active on that socket",
- "MetricExpr": "cha_0@event\\=0x0@",
- "MetricGroup": "",
- "MetricName": "Socket_CLKS"
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_mispredictions",
+ "MetricThreshold": "tma_info_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_mispredicts_resteers"
},
{
- "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions. )",
- "MetricExpr": "INST_RETIRED.ANY / ( BR_INST_RETIRED.FAR_BRANCH / 2 )",
- "MetricGroup": "",
- "MetricName": "IpFarBranch"
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
},
{
- "BriefDescription": "C3 residency percent per core",
- "MetricExpr": "(cstate_core@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Core_Residency"
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING + EPT.WALK_PENDING) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
},
{
- "BriefDescription": "C6 residency percent per core",
- "MetricExpr": "(cstate_core@c6\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0",
+ "MetricExpr": "(CORE_POWER.LVL0_TURBO_LICENSE / 2 / tma_info_core_clks if #SMT_on else CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_clks)",
"MetricGroup": "Power",
- "MetricName": "C6_Core_Residency"
+ "MetricName": "tma_info_power_license0_utilization",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
},
{
- "BriefDescription": "C7 residency percent per core",
- "MetricExpr": "(cstate_core@c7\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
+ "MetricExpr": "(CORE_POWER.LVL1_TURBO_LICENSE / 2 / tma_info_core_clks if #SMT_on else CORE_POWER.LVL1_TURBO_LICENSE / tma_info_core_clks)",
"MetricGroup": "Power",
- "MetricName": "C7_Core_Residency"
+ "MetricName": "tma_info_power_license1_utilization",
+ "MetricThreshold": "tma_info_power_license1_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
},
{
- "BriefDescription": "C2 residency percent per package",
- "MetricExpr": "(cstate_pkg@c2\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
+ "MetricExpr": "(CORE_POWER.LVL2_TURBO_LICENSE / 2 / tma_info_core_clks if #SMT_on else CORE_POWER.LVL2_TURBO_LICENSE / tma_info_core_clks)",
"MetricGroup": "Power",
- "MetricName": "C2_Pkg_Residency"
+ "MetricName": "tma_info_power_license2_utilization",
+ "MetricThreshold": "tma_info_power_license2_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
},
{
- "BriefDescription": "C3 residency percent per package",
- "MetricExpr": "(cstate_pkg@c3\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C3_Pkg_Residency"
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / cpu@UOPS_RETIRED.RETIRE_SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
},
{
- "BriefDescription": "C6 residency percent per package",
- "MetricExpr": "(cstate_pkg@c6\\-residency@ / msr@tsc@) * 100",
- "MetricGroup": "Power",
- "MetricName": "C6_Pkg_Residency"
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "4 * tma_info_core_clks",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
},
{
- "BriefDescription": "C7 residency percent per package",
- "MetricExpr": "(cstate_pkg@c7\\-residency@ / msr@tsc@) * 100",
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / (CPU_CLK_UNHALTED.REF_XCLK_ANY / 2) if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "Socket actual clocks when any core is active on that socket",
+ "MetricExpr": "cha_0@event\\=0x0@",
+ "MetricGroup": "SoC",
+ "MetricName": "tma_info_socket_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_store_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
"MetricGroup": "Power",
- "MetricName": "C7_Pkg_Residency"
+ "MetricName": "tma_info_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 6"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + cpu@L1D_PEND_MISS.FB_FULL\\,cmask\\=1@) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "17 * tma_info_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "tma_retiring - tma_heavy_operations",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "(UOPS_DISPATCHED_PORT.PORT_2 + UOPS_DISPATCHED_PORT.PORT_3 + UOPS_DISPATCHED_PORT.PORT_7 - UOPS_DISPATCHED_PORT.PORT_4) / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory",
+ "MetricExpr": "59.5 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Server;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_local_dram",
+ "MetricThreshold": "tma_local_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from local memory. Caching will improve the latency and increase performance. Sample with: MEM_LOAD_L3_MISS_RETIRED.LOCAL_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricExpr": "(12 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (11 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "tma_bad_speculation - tma_branch_mispredicts",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_info_mispredictions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.ALL_MITE_CYCLES_ANY_UOPS - IDQ.ALL_MITE_CYCLES_4_UOPS) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued",
+ "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "2 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused",
+ "MetricExpr": "tma_light_operations * (BR_INST_RETIRED.ALL_BRANCHES - UOPS_RETIRED.MACRO_FUSED) / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_non_fused_branches",
+ "MetricThreshold": "tma_non_fused_branches > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions that were not fused. Non-conditional branches like direct JMP or CALL would count here. Can be used to examine fusible conditional jumps that were not fused.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / UOPS_RETIRED.RETIRE_SLOTS",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_fused_instructions + tma_non_fused_branches + tma_nop_instructions))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED_PORT.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_2 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_2",
+ "MetricThreshold": "tma_port_2 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 2 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_3 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_load_op_utilization_group",
+ "MetricName": "tma_port_3",
+ "MetricThreshold": "tma_port_3 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 3 ([SNB+]Loads and Store-address; [ICL+] Loads). Sample with: UOPS_DISPATCHED_PORT.PORT_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data)",
+ "MetricExpr": "tma_store_op_utilization",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_issueSpSt;tma_store_op_utilization_group",
+ "MetricName": "tma_port_4",
+ "MetricThreshold": "tma_port_4 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 4 (Store-data). Sample with: UOPS_DISPATCHED_PORT.PORT_4. Related metrics: tma_split_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED_PORT.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address)",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_7 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_store_op_utilization_group",
+ "MetricName": "tma_port_7",
+ "MetricThreshold": "tma_port_7 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 7 ([HSW+]simple Store-address). Sample with: UOPS_DISPATCHED_PORT.PORT_7",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((EXE_ACTIVITY.EXE_BOUND_0_PORTS + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_NONE / 2 if #SMT_on else CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_1 - UOPS_EXECUTED.CORE_CYCLES_GE_2) / 2 if #SMT_on else EXE_ACTIVITY.1_PORTS_UTIL) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "((UOPS_EXECUTED.CORE_CYCLES_GE_2 - UOPS_EXECUTED.CORE_CYCLES_GE_3) / 2 if #SMT_on else EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise).",
+ "MetricExpr": "(UOPS_EXECUTED.CORE_CYCLES_GE_3 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_3) / tma_info_core_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(89.5 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM + 89.5 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Server;Snoop;TopdownL5;tma_L5_group;tma_issueSyncxn;tma_mem_latency_group",
+ "MetricName": "tma_remote_cache",
+ "MetricThreshold": "tma_remote_cache > 0.05 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote cache in other sockets including synchronizations issues. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_HITM_PS;MEM_LOAD_L3_MISS_RETIRED.REMOTE_FWD_PS. Related metrics: tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_machine_clears",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory",
+ "MetricExpr": "127 * tma_info_average_frequency * MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Server;Snoop;TopdownL5;tma_L5_group;tma_mem_latency_group",
+ "MetricName": "tma_remote_dram",
+ "MetricThreshold": "tma_remote_dram > 0.1 & (tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling loads from remote memory. This is caused often due to non-optimal NUMA allocations. #link to NUMA article. Sample with: MEM_LOAD_L3_MISS_RETIRED.REMOTE_DRAM_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "UOPS_RETIRED.RETIRE_SLOTS / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "PARTIAL_RAT_STALLS.SCOREBOARD / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: PARTIAL_RAT_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "(OFFCORE_REQUESTS_BUFFER.SQ_FULL / 2 if #SMT_on else OFFCORE_REQUESTS_BUFFER.SQ_FULL) / tma_info_core_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricConstraint": "NO_GROUP_EVENTS_NMI",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 11 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "UOPS_DISPATCHED_PORT.PORT_4 / tma_info_core_clks",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "9 * BACLEARS.ANY / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_elision",
+ "ScaleUnit": "1cycles / elision"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json
new file mode 100644
index 000000000000..543dfc1e5ad7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-cache.json
@@ -0,0 +1,10649 @@
+[
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Intermediate bypass Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the intermediate bypass.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Not Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass; Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not.; Filter for transactions that succeeded in taking the full bypass.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Clockticks of the uncore caching & home agent (CHA)",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the clock controlling the uncore caching and home agent (CHA).",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xC0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C1 State",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C1_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C1 Transition",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C6 State",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C6_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; C6 Transition",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core PMA Events; GV",
+ "EventCode": "0x17",
+ "EventName": "UNC_CHA_CORE_PMA.GV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Cycle with Multiple Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xe2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Single Snoop",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xe1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Any Snoop to Remote Node",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xe4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Single Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Core Request to Remote Node",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Single Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Eviction to Remote Node",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Multiple External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; Single External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued; External Snoop to Remote Node",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "EventCode": "0x1F",
+ "EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
+ "EventCode": "0x53",
+ "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
+ "EventCode": "0x54",
+ "EventName": "UNC_CHA_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "EventCode": "0xAE",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "EventCode": "0xAE",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; Shared hit and op is RdInvOwn, RdInv, Inv*",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoE",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Hits in HitMe Cache; op is WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "EventCode": "0x5F",
+ "EventName": "UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is RdCode, RdData, RdDataMigratory, RdCur, RdInvOwn, RdInv, Inv*",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times HitMe Cache is accessed; op is WbMtoE, WbMtoI, WbPushMtoI, WbFlush, or WbMtoS",
+ "EventCode": "0x5E",
+ "EventName": "UNC_CHA_HITME_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; No SF/LLC HitS/F and op is RdInvOwn",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; op is RdCode, RdData, RdDataMigratory, RdCur, RdInv, Inv*",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of Misses in HitMe Cache; SF/LLC HitS/F and op is RdInvOwn",
+ "EventCode": "0x60",
+ "EventName": "UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Deallocate HitME$ on Reads without RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a local request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "PerPkg": "1",
+ "PublicDescription": "Received RspFwdI* for a local request, but converted HitME$ to SF entry",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache on RdInvOwn even if not RspFwdI*",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; op is RspIFwd or RspIFwdWb for a remote request",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "PublicDescription": "Updated HitME$ on RspFwdI* or local HitM/E received for a remote request",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts the number of Allocate/Update to HitMe Cache; Update HitMe Cache to SHARed",
+ "EventCode": "0x61",
+ "EventName": "UNC_CHA_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued; ISOCH",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Full Line MIG",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Full Line",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Partial Non-ISOCH",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; Partial MIG",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.; Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Writes Issued to the iMC by the HA; ISOCH Partial",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of writes issued from the HA into the memory controller. This counts for all four channels. It can be filtered by full/partial and ISOCH/non-ISOCH.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.INVITOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IODC allocations dropped due to IODC Full",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.IODCFULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts Number of times IODC entry allocation is attempted; Number of IDOC allocation dropped due to OSB gate",
+ "EventCode": "0x62",
+ "EventName": "UNC_CHA_IODC_ALLOC.OSBGATED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to any reason",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to conflicting transaction",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.SNPOUT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoE",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbMtoI",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counts number of IODC deallocations; IODC deallocated due to WbPushMtoI",
+ "EventCode": "0x63",
+ "EventName": "UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Moved to Cbo section",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Read transactions",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Local",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Remote",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.",
+ "UMask": "0x91",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; External Snoop Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for only snoop requests coming from the remote socket(s) through the IPQ.",
+ "UMask": "0x9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Write Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x5",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in E State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in F State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in M State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Local - Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in E State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in F State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_F",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in M State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Remote - Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.REMOTE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in F State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in M state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized; Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; CV0 Prefetch Miss",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; CV0 Prefetch Victim",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state.",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc; Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Miscellaneous events in the Cbo.; Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "OSB Snoop Broadcast",
+ "EventCode": "0x55",
+ "EventName": "UNC_CHA_OSB",
+ "PerPkg": "1",
+ "PublicDescription": "Count of OSB snoop broadcasts. Counts by 1 per request causing OSB snoops to be broadcast. Does not count all the snoops generated by OSB.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC0_SMI2",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC1_SMI3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC2_SMI4",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; EDC3_SMI5",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; MC0_SMI0",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty; MC1_SMI1",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue.; Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a unit on this socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a unit on this socket made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read requests from a remote socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests coming from a remote socket made into the CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write).",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write requests",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0xc",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Write Requests from a unit on this socket",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests coming from a unit on this socket made into this CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Read and Write Requests; Writes Remote",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of read requests made into the Home Agent. Reads include all read opcodes (including RFO). Writes include all writes (streaming, evictions, HitM, etc).",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IPQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; RRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations; WBQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; AD REQ on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; AD RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Non UPI AK Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL NCB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL NCS on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL RSP on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; BL WB on VN0",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Non UPI IV Request",
+ "EventCode": "0x22",
+ "EventName": "UNC_CHA_RxC_IPQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Allow Snoop",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; ANY0",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; HA",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; LLC Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; PhyAddr Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; SF Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress Probe Queue Rejects; Victim",
+ "EventCode": "0x23",
+ "EventName": "UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; AD REQ on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; AD RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; Non UPI AK Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL NCB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL NCS on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; BL WB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; Non UPI IV Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; AD REQ on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; AD RSP on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Non UPI AK Request",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL NCB on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL NCS on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL RSP on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; BL WB on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; Non UPI IV Request",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; ANY0",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects; HA",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; ANY0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries; HA",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; IPQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; RRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy; WBQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; AD REQ on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; AD RSP on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Non UPI AK Request",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL NCB on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL NCS on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL RSP on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; BL WB on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Non UPI IV Request",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Allow Snoop",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; ANY0",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; HA",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; LLC Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; PhyAddr Match",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; SF Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries; Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD REQ on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; AD RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI AK Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL NCS on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; BL WB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Non UPI IV Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Allow Snoop",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; ANY0",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; HA",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC OR SF Way",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; LLC Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; SF Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; AD REQ on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; AD RSP on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Non UPI AK Request",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL NCB on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL NCS on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL RSP on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; BL WB on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Non UPI IV Request",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Allow Snoop",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; ANY0",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; HA",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; LLC Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; PhyAddr Match",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; SF Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries; Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; AD REQ on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; AD RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Non UPI AK Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL NCB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL NCS on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL RSP on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; BL WB on VN0",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Non UPI IV Request",
+ "EventCode": "0x26",
+ "EventName": "UNC_CHA_RxC_RRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Allow Snoop",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; ANY0",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; HA",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; LLC Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; PhyAddr Match",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; SF Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RRQ Rejects; Victim",
+ "EventCode": "0x27",
+ "EventName": "UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the RRQ (Remote Response Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; AD REQ on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; AD RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Non UPI AK Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL NCB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL NCS on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL RSP on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; BL WB on VN0",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Non UPI IV Request",
+ "EventCode": "0x28",
+ "EventName": "UNC_CHA_RxC_WBQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Allow Snoop",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; ANY0",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; HA",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Merging these two together to make room for ANY_REJECT_*0",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; LLC Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; PhyAddr Match",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; SF Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WBQ Rejects; Victim",
+ "EventCode": "0x29",
+ "EventName": "UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a transaction flowing through the WBQ (Writeback Queue) had to retry.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores cache. Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry. Does not count clean evictions such as when a cores cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; All",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast snoop for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast snoops issued by the HA. This filter includes only requests coming from local sockets.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast snoops issued by the HA.This filter includes only requests coming from remote sockets.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Directed snoops for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of directed snoops issued by the HA. This filter includes only requests coming from local sockets.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Directed snoops for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of directed snoops issued by the HA. This filter includes only requests coming from remote sockets.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the local socket.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent; Broadcast or directed Snoops sent for Remote Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of snoops issued by the HA.; Counts the number of broadcast or directed snoops issued by the HA per request. This filter includes only requests coming from the remote socket.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspCnflct* Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspCnflct* Snoop Response was received. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent. This triggers conflict resolution hardware. This covers both the opcode RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received; RspFwd",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1.; Filters for a snoop response of RspFwd to a CA request. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspI Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspIFwd Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received : RspS",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received : RspS : Counts the total number of RspI snoop responses received. Whenever a snoops are issued, one or more snoop responses will be returned depending on the topology of the system. In systems larger than 2s, when multiple snoops are returned this will count all the snoops that are received. For example, if 3 snoops were issued and returned RspI, RspS, and RspSFwd; then each of these sub-events would increment by 1. : Filters for snoop responses of RspS. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "RspSFwd Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to its home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in >= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to its home socket to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Rsp*WB Snoop Responses Received",
+ "EventCode": "0x5C",
+ "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to its home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This response will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspCnflct",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspI",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspIFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspS",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; RspSFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its current copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*FWD*WB",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local; Rsp*WB",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snoop responses received for a Local request; Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x15",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local iA and IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_IO_IA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally initiated requests",
+ "UMask": "0x35",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from Local",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x25",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; SF/LLC Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hit (Not a Miss)",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; HITs (hit is defined to be not a miss [see below], as a result for any request allocated into the TOR, one of either HIT or MISS must be true)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally initiated requests from iA Cores",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : LLCPrefRFO issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; All from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; All locally generated IO traffic",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Hits from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Misses from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; ItoM misses from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "Filter": "config1=0x49033",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM request is used by IIO to request a data write without first reading the data for ownership.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RdCur misses from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RDCUR",
+ "Filter": "config1=0x43C33",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RdCur requests and miss the LLC. A RdCur request is used by IIO to read data without changing state.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; RFO misses from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests a cache line to be cached in E state with the intent to modify.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; IPQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; IRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; Miss",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.; Misses. (a miss is defined to be any transaction from the IRQ, PRQ, RRQ, IPQ or (in the victim case) the ISMQ, that required the CHA to spawn a new UPI/SMI3 request on the UPI fabric (including UPI snoops and/or any RD/WR to a local memory controller, in the event that the CHA is the home node)). Basically, if the LLC/SF/MLC complex were not able to service the request without involving another agent...it is a miss. If only IDI snoops were required, it is not a miss (that means the SF/MLC com",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; PRQ",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.REM_ALL",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.RRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x60",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.WBQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. There are a number of subevent 'filters' but only a subset of the subevent combinations are valid. Subevents that require an opcode or NID match require the Cn_MSR_PMON_BOX_FILTER.{opc, nid} field to be set. If, for example, one wanted to count DRD Local Misses, one should select MISS_OPC_MATCH and set Cn_MSR_PMON_BOX_FILTER.opc to DRD (0x182); All remotely generated requests",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x17",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x27",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hit (Not a Miss)",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; HITs (hit is defined to be not a miss [see below], as a result for any request allocated into the TOR, one of either HIT or MISS must be true)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; All locally initiated requests from iA Cores",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "Filter": "config1=0x40233",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD",
+ "Filter": "config1=0x40433",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefCRD",
+ "Filter": "config1=0x4b233",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefDRD",
+ "Filter": "config1=0x4b433",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_LlcPrefRFO",
+ "Filter": "config1=0x4b033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : LLCPrefRFO issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; All from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; All locally generated IO traffic",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Hits from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; ITOM Misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "Filter": "config1=0x49033",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO ItoM requests that miss the LLC. An ItoM is used by IIO to request a data write without first reading the data for ownership.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RDCUR misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RDCUR",
+ "Filter": "config1=0x43C33",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RdCur requests that miss the LLC. A RdCur request is used by IIO to read data without changing state.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; RFO misses from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "Filter": "config1=0x40033",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that are generated from local IO RFO requests that miss the LLC. A read for ownership (RFO) requests data to be cached in E state with the intent to modify.",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; IPQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; IRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.ALL_FROM_LOC",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; Miss",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T; Misses. (a miss is defined to be any transaction from the IRQ, PRQ, RRQ, IPQ or (in the victim case) the ISMQ, that required the CHA to spawn a new UPI/SMI3 request on the UPI fabric (including UPI snoops and/or any RD/WR to a local memory controller, in the event that the CHA is the home node)). Basically, if the LLC/SF/MLC complex were not able to service the request without involving another agent...it is a miss. If only IDI snoops were required, it is not a miss (that means the SF/MLC com",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; PRQ",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. T",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; AD REQ Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; AD RSP VN0 Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL NCB Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL NCS Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL RSP Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; BL DRS Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; VN0 Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credit Allocations; VNA Credits",
+ "EventCode": "0x38",
+ "EventName": "UNC_CHA_UPI_CREDITS_ACQUIRED.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of UPI credits acquired for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This can be used with the Credit Occupancy event in order to calculate average credit lifetime. This event supports filtering to cover the VNA/VN0 credits and the different message classes. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD REQ VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD RSP VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCB VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL NCS VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL RSP VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL DRS VN0 Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VN0_BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; AD VNA Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_AD",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "UPI Ingress Credits In Use Cycles; BL VNA Credits",
+ "EventCode": "0x3B",
+ "EventName": "UNC_CHA_UPI_CREDIT_OCCUPANCY.VNA_BL",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of UPI credits available in each cycle for either the AD or BL ring. In order to send snoops, snoop responses, requests, data, etc to the UPI agent on the ring, it is necessary to first acquire a credit for the UPI ingress buffer. This stat increments by the number of credits that are available each cycle. This can be used in conjunction with the Credit Acquired event in order to calculate average credit lifetime. This event supports filtering for the different types of credits that are available. Note that you must select the link that you would like to monitor using the link select register, and you can only monitor 1 link at a time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI; Pushed to LLC",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was received WbPushMtoI; Counts the number of times when the CHA was able to push WbPushMToI to LLC",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI; Pushed to Memory",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when the CHA was received WbPushMtoI; Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC0_SMI2",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC1_SMI3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC2_SMI4",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; EDC3_SMI5",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC0_SMI0",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty; MC1_SMI1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue.; Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspIFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response I to Fwd F/E",
+ "UMask": "0xe4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response I to Fwd M",
+ "UMask": "0xf0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspSFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response S to Fwd F/E",
+ "UMask": "0xe2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspSFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response S to Fwd M",
+ "UMask": "0xe8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Any RspHitFSE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Any Request - Response any to Hit F/S/E",
+ "UMask": "0xe1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspIFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response I to Fwd F/E",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspIFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response I to Fwd M",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspSFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response S to Fwd F/E",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspSFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response S to Fwd M",
+ "UMask": "0x48",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Core RspHitFSE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Core Request - Response any to Hit F/S/E",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response I to Fwd F/E",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspIFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response I to Fwd M",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response S to Fwd F/E",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspSFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response S to Fwd M",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; Evict RspHitFSE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; Eviction Request - Response any to Hit F/S/E",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspIFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response I to Fwd F/E",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspIFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response I to Fwd M",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspSFwdFE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response S to Fwd F/E",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspSFwdM",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response S to Fwd M",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoop Responses; External RspHitFSE",
+ "EventCode": "0x32",
+ "EventName": "UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of core cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type. This event can be filtered based on who triggered the initial snoop(s): from Evictions, Core or External (i.e. from a remote node) Requests. And the event can be filtered based on the responses: RspX_Fwd/HitY where Y is the state prior to the snoop response and X is the state following.; External Request - Response any to Hit F/S/E",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CLOCKTICKS",
+ "Deprecated": "1",
+ "EventName": "UNC_C_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_FAST_ASSERTED.HORZ",
+ "Deprecated": "1",
+ "EventCode": "0xA5",
+ "EventName": "UNC_C_FAST_ASSERTED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.ANY",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.ANY",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x91",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.REMOTE_SNOOP",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.REMOTE_SNOOP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.WRITE",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_C_LLC_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_E",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_F",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.F_STATE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x2f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_M",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.REMOTE_ALL",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_VICTIMS.TOTAL_S",
+ "Deprecated": "1",
+ "EventCode": "0x37",
+ "EventName": "UNC_C_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SRC_THRTL",
+ "Deprecated": "1",
+ "EventCode": "0xA4",
+ "EventName": "UNC_C_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.EVICT",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.HIT",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.IRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IA",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.MISS",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.PRQ",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.IO_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.REM_ALL",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.RRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.RRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x60",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WBQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_C_TOR_INSERTS.WBQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x18",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IPQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.IRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "UMask": "0x37",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IA",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "UMask": "0x31",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "UMask": "0x34",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ_HIT",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x36",
+ "EventName": "UNC_C_TOR_OCCUPANCY.PRQ_MISS",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_ACQUIRED.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x80",
+ "EventName": "UNC_H_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x82",
+ "EventName": "UNC_H_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_ACQUIRED.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x88",
+ "EventName": "UNC_H_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_H_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_ACQUIRED.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_H_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x86",
+ "EventName": "UNC_H_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_H_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_H_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "Deprecated": "1",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "Deprecated": "1",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "Deprecated": "1",
+ "EventCode": "0x57",
+ "EventName": "UNC_H_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CMS_CLOCKTICKS",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_H_CLOCK",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C1_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.C1_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C1_TRANSITION",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.C1_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C6_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.C6_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.C6_TRANSITION",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.C6_TRANSITION",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_PMA.GV",
+ "Deprecated": "1",
+ "EventCode": "0x17",
+ "EventName": "UNC_H_CORE_PMA.GV",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_GTONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "UMask": "0xe2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_ONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "UMask": "0xe1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.ANY_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.ANY_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0xe4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_GTONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_ONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.CORE_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.CORE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_ONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EVICT_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EVICT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_GTONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_ONE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_CORE_SNP.EXT_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x33",
+ "EventName": "UNC_H_CORE_SNP.EXT_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_COUNTER0_OCCUPANCY",
+ "Deprecated": "1",
+ "EventCode": "0x1F",
+ "EventName": "UNC_H_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.NO_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_DIR_LOOKUP.NO_SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_LOOKUP.SNP",
+ "Deprecated": "1",
+ "EventCode": "0x53",
+ "EventName": "UNC_H_DIR_LOOKUP.SNP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.HA",
+ "Deprecated": "1",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_DIR_UPDATE.HA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_DIR_UPDATE.TOR",
+ "Deprecated": "1",
+ "EventCode": "0x54",
+ "EventName": "UNC_H_DIR_UPDATE.TOR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "Deprecated": "1",
+ "EventCode": "0xAE",
+ "EventName": "UNC_H_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "Deprecated": "1",
+ "EventCode": "0xAE",
+ "EventName": "UNC_H_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.EX_RDS",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.EX_RDS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.SHARED_OWNREQ",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.SHARED_OWNREQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.WBMTOE",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_HIT.WBMTOI_OR_S",
+ "Deprecated": "1",
+ "EventCode": "0x5F",
+ "EventName": "UNC_H_HITME_HIT.WBMTOI_OR_S",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_LOOKUP.READ",
+ "Deprecated": "1",
+ "EventCode": "0x5E",
+ "EventName": "UNC_H_HITME_LOOKUP.READ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_LOOKUP.WRITE",
+ "Deprecated": "1",
+ "EventCode": "0x5E",
+ "EventName": "UNC_H_HITME_LOOKUP.WRITE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.NOTSHARED_RDINVOWN",
+ "Deprecated": "1",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_HITME_MISS.NOTSHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.READ_OR_INV",
+ "Deprecated": "1",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_HITME_MISS.READ_OR_INV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_MISS.SHARED_RDINVOWN",
+ "Deprecated": "1",
+ "EventCode": "0x60",
+ "EventName": "UNC_H_HITME_MISS.SHARED_RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.DEALLOCATE",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.DEALLOCATE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.DEALLOCATE_RSPFWDI_LOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.RDINVOWN",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.RDINVOWN",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.RSPFWDI_REM",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.RSPFWDI_REM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HITME_UPDATE.SHARED",
+ "Deprecated": "1",
+ "EventCode": "0x61",
+ "EventName": "UNC_H_HITME_UPDATE.SHARED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA7",
+ "EventName": "UNC_H_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA9",
+ "EventName": "UNC_H_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xAB",
+ "EventName": "UNC_H_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "Deprecated": "1",
+ "EventCode": "0xAD",
+ "EventName": "UNC_H_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "Deprecated": "1",
+ "EventCode": "0xAD",
+ "EventName": "UNC_H_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "Deprecated": "1",
+ "EventCode": "0x59",
+ "EventName": "UNC_H_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "Deprecated": "1",
+ "EventCode": "0x59",
+ "EventName": "UNC_H_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL_MIG",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.FULL_MIG",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL_MIG",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "Deprecated": "1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_H_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.INVITOM",
+ "Deprecated": "1",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IODC_ALLOC.INVITOM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.IODCFULL",
+ "Deprecated": "1",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IODC_ALLOC.IODCFULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_ALLOC.OSBGATED",
+ "Deprecated": "1",
+ "EventCode": "0x62",
+ "EventName": "UNC_H_IODC_ALLOC.OSBGATED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.ALL",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.SNPOUT",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.SNPOUT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBMTOE",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.WBMTOE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBMTOI",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.WBMTOI",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_IODC_DEALLOC.WBPUSHMTOI",
+ "Deprecated": "1",
+ "EventCode": "0x63",
+ "EventName": "UNC_H_IODC_DEALLOC.WBPUSHMTOI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.CV0_PREF_MISS",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.CV0_PREF_VIC",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RFO_HIT_S",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.RSPI_WAS_FSE",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_MISC.WC_ALIASING",
+ "Deprecated": "1",
+ "EventCode": "0x39",
+ "EventName": "UNC_H_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_OSB",
+ "Deprecated": "1",
+ "EventCode": "0x55",
+ "EventName": "UNC_H_OSB",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC0_SMI2",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC1_SMI3",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC2_SMI4",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.EDC3_SMI5",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.MC0_SMI0",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_READ_NO_CREDITS.MC1_SMI1",
+ "Deprecated": "1",
+ "EventCode": "0x58",
+ "EventName": "UNC_H_READ_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.INVITOE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_REQUESTS.INVITOE_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.INVITOE_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from local home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "read requests from remote home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.READS_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from local home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "write requests from remote home agent",
+ "Deprecated": "1",
+ "EventCode": "0x50",
+ "EventName": "UNC_H_REQUESTS.WRITES_REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "Deprecated": "1",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "Deprecated": "1",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "Deprecated": "1",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "Deprecated": "1",
+ "EventCode": "0xA1",
+ "EventName": "UNC_H_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.AD",
+ "Deprecated": "1",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.AK",
+ "Deprecated": "1",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.BL",
+ "Deprecated": "1",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_BOUNCES_VERT.IV",
+ "Deprecated": "1",
+ "EventCode": "0xA0",
+ "EventName": "UNC_H_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "Deprecated": "1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_H_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "Deprecated": "1",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "Deprecated": "1",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "Deprecated": "1",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "Deprecated": "1",
+ "EventCode": "0xA2",
+ "EventName": "UNC_H_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.PRQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.RRQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_INSERTS.WBQ",
+ "Deprecated": "1",
+ "EventCode": "0x13",
+ "EventName": "UNC_H_RxC_INSERTS.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x22",
+ "EventName": "UNC_H_RxC_IPQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.ANY_IPQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IPQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x23",
+ "EventName": "UNC_H_RxC_IPQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x18",
+ "EventName": "UNC_H_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.ANY_REJECT_IRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x19",
+ "EventName": "UNC_H_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x24",
+ "EventName": "UNC_H_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2C",
+ "EventName": "UNC_H_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x25",
+ "EventName": "UNC_H_RxC_ISMQ1_REJECT.ANY_ISMQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x25",
+ "EventName": "UNC_H_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_H_RxC_ISMQ1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "Deprecated": "1",
+ "EventCode": "0x2D",
+ "EventName": "UNC_H_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IPQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.IPQ",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.RRQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.RRQ",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OCCUPANCY.WBQ",
+ "Deprecated": "1",
+ "EventCode": "0x11",
+ "EventName": "UNC_H_RxC_OCCUPANCY.WBQ",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2E",
+ "EventName": "UNC_H_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2F",
+ "EventName": "UNC_H_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x20",
+ "EventName": "UNC_H_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.ANY_PRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x21",
+ "EventName": "UNC_H_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x2A",
+ "EventName": "UNC_H_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.ANY",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x2B",
+ "EventName": "UNC_H_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x26",
+ "EventName": "UNC_H_RxC_RRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.ANY_RRQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_RRQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x27",
+ "EventName": "UNC_H_RxC_RRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "Deprecated": "1",
+ "EventCode": "0x28",
+ "EventName": "UNC_H_RxC_WBQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.ANY0",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.ANY_WBQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.HA",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.PA_MATCH",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.SF_VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxC_WBQ1_REJECT.VICTIM",
+ "Deprecated": "1",
+ "EventCode": "0x29",
+ "EventName": "UNC_H_RxC_WBQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_H_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_BYPASS.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_H_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.IFV",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_CRD_STARVED.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_H_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_INSERTS.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_H_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_RxR_OCCUPANCY.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_H_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.E_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x3D",
+ "EventName": "UNC_H_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.M_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x3D",
+ "EventName": "UNC_H_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SF_EVICTION.S_STATE",
+ "Deprecated": "1",
+ "EventCode": "0x3D",
+ "EventName": "UNC_H_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.ALL",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.BCST_LOC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.BCST_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.BCST_REM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.DIRECT_LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.DIRECT_REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.DIRECT_REM",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOPS_SENT.REMOTE",
+ "Deprecated": "1",
+ "EventCode": "0x51",
+ "EventName": "UNC_H_SNOOPS_SENT.REMOTE",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPI",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPIFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPS",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSPSFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP.RSP_WBWB",
+ "Deprecated": "1",
+ "EventCode": "0x5C",
+ "EventName": "UNC_H_SNOOP_RESP.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_FWD_WB",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_FWD_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_SNOOP_RESP_LOCAL.RSP_WB",
+ "Deprecated": "1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_H_SNP_RSP_RCV_LOCAL.RSP_WB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "Deprecated": "1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_H_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_H_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_BYPASS.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9F",
+ "EventName": "UNC_H_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x96",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x97",
+ "EventName": "UNC_H_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_INSERTS.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x95",
+ "EventName": "UNC_H_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_NACK.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x99",
+ "EventName": "UNC_H_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x94",
+ "EventName": "UNC_H_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.AD_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.AK_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.BL_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_HORZ_STARVED.IV_BNC",
+ "Deprecated": "1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_H_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_H_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_BYPASS.IV",
+ "Deprecated": "1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_H_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_FULL.IV",
+ "Deprecated": "1",
+ "EventCode": "0x92",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_FULL.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_CYCLES_NE.IV",
+ "Deprecated": "1",
+ "EventCode": "0x93",
+ "EventName": "UNC_H_TxR_VERT_CYCLES_NE.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_INSERTS.IV",
+ "Deprecated": "1",
+ "EventCode": "0x91",
+ "EventName": "UNC_H_TxR_VERT_INSERTS.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_NACK.IV",
+ "Deprecated": "1",
+ "EventCode": "0x98",
+ "EventName": "UNC_H_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_OCCUPANCY.IV",
+ "Deprecated": "1",
+ "EventCode": "0x90",
+ "EventName": "UNC_H_TxR_VERT_OCCUPANCY.IV_AG0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AD_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AD_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AK_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.AK_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.BL_AG0",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.BL_AG1",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TxR_VERT_STARVED.IV",
+ "Deprecated": "1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_H_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA6",
+ "EventName": "UNC_H_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xA8",
+ "EventName": "UNC_H_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "Deprecated": "1",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "Deprecated": "1",
+ "EventCode": "0xAA",
+ "EventName": "UNC_H_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "Deprecated": "1",
+ "EventCode": "0xAC",
+ "EventName": "UNC_H_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "Deprecated": "1",
+ "EventCode": "0xAC",
+ "EventName": "UNC_H_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WB_PUSH_MTOI.LLC",
+ "Deprecated": "1",
+ "EventCode": "0x56",
+ "EventName": "UNC_H_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WB_PUSH_MTOI.MEM",
+ "Deprecated": "1",
+ "EventCode": "0x56",
+ "EventName": "UNC_H_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC0_SMI2",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.EDC0_SMI2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC1_SMI3",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.EDC1_SMI3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC2_SMI4",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.EDC2_SMI4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.EDC3_SMI5",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.EDC3_SMI5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.MC0_SMI0",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.MC0_SMI0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_WRITE_NO_CREDITS.MC1_SMI1",
+ "Deprecated": "1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_H_WRITE_NO_CREDITS.MC1_SMI1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPI_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0xe4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPI_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0xf0",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPS_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0xe2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSPS_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0xe8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.ANY_RSP_HITFSE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.ANY_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0xe1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPI_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPI_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x50",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPS_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSPS_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x48",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.CORE_RSP_HITFSE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.CORE_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x84",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPI_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSPS_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EVICT_RSP_HITFSE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EVICT_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPI_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSPI_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x24",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPI_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSPI_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPS_FWDFE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSPS_FWDFE",
+ "PerPkg": "1",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSPS_FWDM",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSPS_FWDM",
+ "PerPkg": "1",
+ "UMask": "0x28",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_XSNP_RESP.EXT_RSP_HITFSE",
+ "Deprecated": "1",
+ "EventCode": "0x32",
+ "EventName": "UNC_H_XSNP_RESP.EXT_RSP_HITFSE",
+ "PerPkg": "1",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json
new file mode 100644
index 000000000000..26a5a20bf37a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-interconnect.json
@@ -0,0 +1,11248 @@
+[
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Any Source",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.; Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy; Snoops",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests.",
+ "EventCode": "0xF",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "Total IRP occupancy of inbound read and write requests. This is effectively the sum of read occupancy and write occupancy.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "IRP Clocks",
+ "EventCode": "0x1",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CLFlush",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; CRd",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; DRd",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.DRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIDCAHin5t",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCIDCAHINT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; PCIRdCur",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "PublicDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline to coherent memory, without a RFO. PCIITOM is a speculative Invalidate to Modified command that requests ownership of the cacheline and does not move data from the mesh to IRP cache.",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline to coherent memory. RFO is a Read For Ownership command that requests ownership of the cacheline and moves data from the mesh to IRP cache.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops; WbMtoI",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound read requests to coherent memory, received by the IRP and inserted into the Fire and Forget queue (FAF), a queue used for processing inbound reads in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue.",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy of the IRP Fire and Forget (FAF) queue, a queue used for processing inbound reads in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "All Inserts Inbound (p2p + faf + cset)",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Atomic Transactions as Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Read Transactions as Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Cache Inserts of Write Transactions as Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Rejects",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Requests",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Fastpath Transfers From Primary to Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0; Prefetch Ack Hints From Primary to Secondary",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 0",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_MISC0.UNKNOWN",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Lost Forward",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop pulled away ownership before a write was committed",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Invalid",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Received Valid",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of E Line",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of I Line",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of M Line",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1; Slow Transfer of S Line",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Requests",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_P2P_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "P2P requests from the ITC",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Occupancy",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_P2P_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "P2P B & S Queue Occupancy",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P completions",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if local only",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if local and target matches",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P Message",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P reads",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; Match if remote only",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; match if remote and target matches",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions; P2P Writes",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M line in the IIO cache",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit E or S",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit I",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Hit M",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; Miss",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpCode",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpData",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses; SnpInv",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Atomic",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of atomic transactions",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Other",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of 'other' kinds of transactions.",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Read Prefetches",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.RD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks the number of read prefetches.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Reads",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Tracks only read requests (not including read prefetches).",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count; Writes",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID.; Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations",
+ "EventCode": "0xB",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Cycles Full",
+ "EventCode": "0x5",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Inserts",
+ "EventCode": "0x2",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Occupancy",
+ "EventCode": "0x8",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Cycles Full",
+ "EventCode": "0x6",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Inserts",
+ "EventCode": "0x3",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Occupancy",
+ "EventCode": "0x9",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Cycles Full",
+ "EventCode": "0x7",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Inserts",
+ "EventCode": "0x4",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Occupancy",
+ "EventCode": "0xA",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD Egress Credit Stalls",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_TxR2_AD_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue a request to the R2PCIe because there are no AD Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x1B",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xD",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0xE",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0xC",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjunction with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts traffic in which the M2M (Mesh to Memory) to iMC (Memory Controller) bypass was not taken",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Taken",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_Egress.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Not Taken",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass; Taken",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles - at UCLK",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when direct to core mode (which bypasses the CHA) was disabled",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when messages were sent direct to core (bypassing the CHA)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction were overridden",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts reads in which direct to core transactions (which would have bypassed the CHA) were overridden",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to Intel(R) UPI transactions were overridden",
+ "EventCode": "0x28",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts reads in which direct to Intel(R) Ultra Path Interconnect (UPI) transactions (which would have bypassed the CHA) were overridden",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to Intel(R) UPI was disabled",
+ "EventCode": "0x27",
+ "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the ability to send messages direct to the Intel(R) Ultra Path Interconnect (bypassing the CHA) was disabled",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages sent direct to the Intel(R) UPI",
+ "EventCode": "0x26",
+ "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when messages were sent direct to the Intel(R) Ultra Path Interconnect (bypassing the CHA)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads that a message sent direct2 Intel(R) UPI was overridden",
+ "EventCode": "0x29",
+ "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a read message that was sent direct to the Intel(R) Ultra Path Interconnect (bypassing the CHA) was overridden",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in A State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in I State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in L State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On NonDirty Line in S State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in A State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in I State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in L State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Hit; On Dirty Line in S State",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M2M_DIRECTORY_HIT.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in Any State (A, I, S or unused)",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state)",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in the A (SnoopAll) state, indicating the cacheline is stored in another socket in any state, and we must snoop the other sockets to make sure we get the latest data. The data may be stored in any state in the local socket.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state)",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the I (Invalid) state indicating the cacheline is not stored in another socket, and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state)",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the S (Shared) state indicating the cacheline is either stored in another socket in the S(hared) state , and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in A State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_A",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in I State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_I",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in L State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_P",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On NonDirty Line in S State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.CLEAN_S",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in A State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_A",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in I State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_I",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in L State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_P",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Directory Miss; On Dirty Line in S State",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M2M_DIRECTORY_MISS.DIRTY_S",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to I",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from A (SnoopAll) to I (Invalid)",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from A to S",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from A (SnoopAll) to S (Shared)",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from/to Any state",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory to a new state",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to A",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from I (Invalid) to A (SnoopAll)",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from I to S",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from I (Invalid) to S (Shared)",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to A",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from S (Shared) to A (SnoopAll)",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Multi-socket cacheline Directory update from S to I",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from S (Shared) to I (Invalid)",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller).",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC; All, regardless of priority.",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TRANSGRESS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC; Critical Priority",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller). It only counts normal priority non-isochronous reads.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Writes to iMC issued",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues writes to the iMC (Memory Controller).",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TRANSGRESS",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; Full Line Non-ISOCH",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; ISOCH Full Line",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; All, regardless of priority.",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Partial Non-Isochronous writes to the iMC",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) issues partial writes to the iMC (Memory Controller). It only counts normal priority non-isochronous writes.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC; ISOCH Partial",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches; MC Match",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches; Mesh Match",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full",
+ "EventCode": "0x53",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty",
+ "EventCode": "0x54",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch requests that got turn into a demand request",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) promotes a outstanding request in the prefetch queue due to a subsequent demand read request that entered the M2M with the same address. Explanatory Side Note: The Prefetch queue is made of CAM (Content Addressable Memory)",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
+ "EventCode": "0x57",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the M2M (Mesh to Memory) receives a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "Deprecated": "1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_NO_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 0",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 1",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular; Channel 2",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 0",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special; Channel 2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Full",
+ "EventCode": "0x4",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "EventCode": "0x3",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x1",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the a new entry is Received(RxC) and then added to the AD (Address Ring) Ingress Queue from the CMS (Common Mesh Stop). This is generally used for reads, and",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "EventCode": "0x2",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Full",
+ "EventCode": "0x8",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "EventCode": "0x7",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "EventCode": "0x5",
+ "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "EventCode": "0x6",
+ "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 0",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 1",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full; Channel 2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 0",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 1",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty; Channel 2",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_CYCLES_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 0",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 1",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts; Channel 2",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 0",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 1",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy; Channel 2",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Pending Occupancy",
+ "EventCode": "0x48",
+ "EventName": "UNC_M2M_TRACKER_PENDING_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "EventCode": "0xD",
+ "EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "EventCode": "0xE",
+ "EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Full",
+ "EventCode": "0xC",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Not Empty",
+ "EventCode": "0xB",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Allocations",
+ "EventCode": "0x9",
+ "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "EventCode": "0xF",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "EventCode": "0xA",
+ "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK; CRD Transactions to Cbo",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK; NDR Transactions",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.NDR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M2M_TxC_AK_CREDIT_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; All",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Read Credit Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Compare Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full; Write Credit Request",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; All",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Read Credit Request",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Write Compare Request",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty; Write Credit Request",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; All",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Prefetch Read Cam Hit",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Read Credit Request",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Write Compare Request",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations; Write Credit Request",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; All",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Read Credit Request",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Write Compare Request",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy; Write Credit Request",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Sideband",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_TxC_AK_SIDEBAND.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Sideband",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_TxC_AK_SIDEBAND.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Cache",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to Core",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache; Data to QPI",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Near Side",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired; Common Mesh Stop - Far Side",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Near Side",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credits Occupancy; Common Mesh Stop - Far Side",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M2M_TxC_BL_CREDIT_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; All",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Near Side",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full; Common Mesh Stop - Far Side",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; All",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Near Side",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty; Common Mesh Stop - Far Side",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; All",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Near Side",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations; Common Mesh Stop - Far Side",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Near Side",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits; Common Mesh Stop - Far Side",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; All",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Near Side",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Occupancy; Common Mesh Stop - Far Side",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2M_TxC_BL_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "Deprecated": "1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_NO_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Regular; Channel 2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_CYCLES_REG_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 0",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 1",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M->iMC WPQ Cycles w/Credits - Special; Channel 2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_CYCLES_SPEC_CREDITS.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 0",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 1",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full; Channel 2",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_FULL.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 0",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 1",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty; Channel 2",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WRITE_TRACKER_CYCLES_NE.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 0",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 1",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts; Channel 2",
+ "EventCode": "0x61",
+ "EventName": "UNC_M2M_WRITE_TRACKER_INSERTS.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 0",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 1",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy; Channel 2",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_WRITE_TRACKER_OCCUPANCY.CH2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_M3UPI_AG0_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M3UPI_AG0_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired; For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy; For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_M3UPI_AG1_AD_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy; For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M3UPI_AG1_BL_CRD_OCCUPANCY.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired; For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M3UPI_AG1_BL_CREDITS_ACQUIRED.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Requests",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Snoops",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; VNA Messages",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CBox AD Credits Empty; Writebacks",
+ "EventCode": "0x22",
+ "EventName": "UNC_M3UPI_CHA_AD_CREDITS_EMPTY.WB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to Cbox on the AD Ring (covers higher CBoxes)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of uclks in domain",
+ "EventCode": "0x1",
+ "EventName": "UNC_M3UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of uclks in the M3 uclk domain. This could be slightly different than the count in the Ubox because of enable/freeze delays. However, because the M3 is close to the Ubox, they generally should not diverge by more than a handful of cycles.",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M3UPI_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2C Sent",
+ "EventCode": "0x2B",
+ "EventName": "UNC_M3UPI_D2C_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases BL sends direct to core",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "D2U Sent",
+ "EventCode": "0x2A",
+ "EventName": "UNC_M3UPI_D2U_SENT",
+ "PerPkg": "1",
+ "PublicDescription": "Cases where SMI3 sends D2U command",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Down",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements; Up",
+ "EventCode": "0xAE",
+ "EventName": "UNC_M3UPI_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Horizontal",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_FAST_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "FaST wire asserted; Vertical",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M3UPI_FAST_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles either the local or incoming distress signals are asserted. Incoming distress includes up, dn and across.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Left and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Even",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use; Right and Odd",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M3UPI_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Left and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Even",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use; Right and Odd",
+ "EventCode": "0xA9",
+ "EventName": "UNC_M3UPI_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Left and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Even",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use; Right and Odd",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M3UPI_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Left",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use; Right",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M3UPI_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO0 and IIO1 share the same ring destination. (1 VN0 credit only)",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO0_IIO1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO2",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO2_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO3",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO3_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO4",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO4_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; IIO5",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.IIO5_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; All IIO targets for NCS are in single mask. ORs them together",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "M2 BL Credits Empty; Selected M2p BL NCS credits",
+ "EventCode": "0x23",
+ "EventName": "UNC_M3UPI_M2_BL_CREDITS_EMPTY.NCS_SEL",
+ "PerPkg": "1",
+ "PublicDescription": "No vn0 and vna credits available to send to M2",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 0",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 1",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AD - Slot 2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AK - Slot 0",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; AK - Slot 2",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.AK_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Multi Slot Flit Received; BL - Slot 0",
+ "EventCode": "0x3E",
+ "EventName": "UNC_M3UPI_MULTI_SLOT_RCVD.BL_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Multi slot flit received - S0, S1 and/or S2 populated (can use AK S0/S1 masks for AK allocations)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AD",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; BL",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring.; IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; AD",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Acknowledgements to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Data Responses to core",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.; Snoops of processor's cache.",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M3UPI_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AD",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; Acknowledgements to Agent 1",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; BL",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring; IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; AD",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Acknowledgements to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Data Responses to core",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring; Snoops of processor's cache.",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M3UPI_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M3UPI_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; REQ on AD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; RSP on AD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; SNP on AD",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; NCB on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; NCS on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; RSP on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN0; WB on BL",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message requested but lost arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; REQ on AD",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; RSP on AD",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; SNP on AD",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; NCB on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; NCS on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; RSP on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Lost Arb for VN1; WB on BL",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M3UPI_RxC_ARB_LOST_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message requested but lost arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; AD, BL Parallel Win",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.ADBL_PARALLEL_WIN",
+ "PerPkg": "1",
+ "PublicDescription": "AD and BL messages won arbitration concurrently / in parallel",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arbitration stage made no progress on pending ad vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending AD VN1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_AD_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arbitration stage made no progress on pending ad vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Arbitration stage made no progress on pending bl vn0 messages because slotting stage cannot accept new message",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; No Progress on Pending BL VN1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.NO_PROG_BL_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Arbitration stage made no progress on pending bl vn1 messages because slotting stage cannot accept new message",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; Parallel Bias to VN0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "VN0/VN1 arbiter gave second, consecutive win to vn0, delaying vn1 win, because vn0 offered parallel ad/bl",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Arb Miscellaneous; Parallel Bias to VN1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M3UPI_RxC_ARB_MISC.PAR_BIAS_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "VN0/VN1 arbiter gave second, consecutive win to vn1, delaying vn0 win, because vn1 offered parallel ad/bl",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; REQ on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; RSP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; SNP on AD",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; NCB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; NCS on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; RSP on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN0; WB on BL",
+ "EventCode": "0x49",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message was not able to request arbitration while some other message won arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; REQ on AD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; RSP on AD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; SNP on AD",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; NCB on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; NCS on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; RSP on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Can't Arb for VN1; WB on BL",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOAD_REQ_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message was not able to request arbitration while some other message won arbitration; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; REQ on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; RSP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; SNP on AD",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; NCB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; NCS on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; RSP on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN0; WB on BL",
+ "EventCode": "0x47",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 message is blocked from requesting arbitration due to lack of remote UPI credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; REQ on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; RSP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; SNP on AD",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; NCB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; NCS on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; RSP on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "No Credits to Arb for VN1; WB on BL",
+ "EventCode": "0x48",
+ "EventName": "UNC_M3UPI_RxC_ARB_NOCRED_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "VN1 message is blocked from requesting arbitration due to lack of remote UPI credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on BL Arb",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_BL_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to slot 0 of independent flit while bl message is in arbitration",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD to Slot 0 on Idle",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S0_IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to slot 0 of independent flit while pipeline is idle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 1",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S1_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to flit slot 1 while merging with bl message in same flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Ingress Queue Bypasses; AD + BL to Slot 2",
+ "EventCode": "0x40",
+ "EventName": "UNC_M3UPI_RxC_BYPASSED.AD_S2_BL_SLOT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times message is bypassed around the Ingress Queue; AD is taking bypass to flit slot 2 while merging with bl message in same flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; REQ on AD",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; RSP on AD",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; SNP on AD",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; NCB on BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; NCS on BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; RSP on BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message lost contest for flit; WB on BL",
+ "EventCode": "0x50",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN0 packets lost the contest for Flit Slot 0.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; REQ on AD",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; RSP on AD",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; SNP on AD",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; NCB on BL",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; NCS on BL",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; RSP on BL",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message lost contest for flit; WB on BL",
+ "EventCode": "0x51",
+ "EventName": "UNC_M3UPI_RxC_COLLISION_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress VN1 packets lost the contest for Flit Slot 0.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; Any In BGF FIFO",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Indication that at least one packet (flit) is in the bgf (fifo only)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; Any in BGF Path",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.ANY_BGF_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Indication that at least one packet (flit) is in the bgf path (i.e. pipe to fifo)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Miscellaneous Credit Events; No D2K For Arb",
+ "EventCode": "0x60",
+ "EventName": "UNC_M3UPI_RxC_CRD_MISC.NO_D2K_FOR_ARB",
+ "PerPkg": "1",
+ "PublicDescription": "VN0 or VN1 BL RSP message was blocked from arbitration request due to lack of D2K CMP credits",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; D2K Credits",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.D2K_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "D2K completion fifo credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Packets in BGF FIFO",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in fifo",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Packets in BGF Path",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.FLITS_IN_PATH",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy of m3upi ingress -> upi link layer bgf; packets (flits) in path (i.e. pipe to fifo or fifo)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_FIFO",
+ "PerPkg": "1",
+ "PublicDescription": "count of bl messages in pump-1-pending state, in completion fifo only",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.P1P_TOTAL",
+ "PerPkg": "1",
+ "PublicDescription": "count of bl messages in pump-1-pending state, in marker table and in fifo",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; Transmit Credits",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.TxQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Link layer transmit queue credit occupancy (credits in use), accumulated across all cycles",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Credit Occupancy; VNA In Use",
+ "EventCode": "0x61",
+ "EventName": "UNC_M3UPI_RxC_CRD_OCC.VNA_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "Remote UPI VNA credit occupancy (number of credits in use), accumulated across all cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "EventCode": "0x43",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the UPI Ingress is not empty. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; REQ on AD",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on AD",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; SNP on AD",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCB on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; NCS on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; RSP on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Cycles Not Empty; WB on BL",
+ "EventCode": "0x44",
+ "EventName": "UNC_M3UPI_RxC_CYCLES_NE_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; All",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Data flit is ready for transmission but could not be sent",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; No BGF Credits",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_BGF",
+ "PerPkg": "1",
+ "PublicDescription": "Data flit is ready for transmission but could not be sent",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Data Flit Not Sent; No TxQ Credits",
+ "EventCode": "0x57",
+ "EventName": "UNC_M3UPI_RxC_FLITS_DATA_NOT_SENT.NO_TXQ",
+ "PerPkg": "1",
+ "PublicDescription": "Data flit is ready for transmission but could not be sent",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 0",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "generating bl data flit sequence; waiting for data pump 0",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_AT_LIMIT",
+ "PerPkg": "1",
+ "PublicDescription": "pump-1-pending logic is at capacity (pending table plus completion fifo at limit)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_BUSY",
+ "PerPkg": "1",
+ "PublicDescription": "pump-1-pending logic is tracking at least one message",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_FIFO_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "pump-1-pending completion fifo is full",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_HOLD_P0",
+ "PerPkg": "1",
+ "PublicDescription": "pump-1-pending logic is at or near capacity, such that pump-0-only bl messages are getting stalled in slotting stage",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1P_TO_LIMBO",
+ "PerPkg": "1",
+ "PublicDescription": "a bl message finished but is in limbo and moved to pump-1-pending logic",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Generating BL Data Flit Sequence; Wait on Pump 1",
+ "EventCode": "0x59",
+ "EventName": "UNC_M3UPI_RxC_FLITS_GEN_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "generating bl data flit sequence; waiting for data pump 1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UNC_M3UPI_RxC_FLITS_MISC",
+ "EventCode": "0x5A",
+ "EventName": "UNC_M3UPI_RxC_FLITS_MISC",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; One Message",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "One message in flit; VNA or non-VNA flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; One Message in non-VNA",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.1_MSG_VNX",
+ "PerPkg": "1",
+ "PublicDescription": "One message in flit; non-VNA flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; Two Messages",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.2_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Two messages in flit; VNA flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit; Three Messages",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.3_MSGS",
+ "PerPkg": "1",
+ "PublicDescription": "Three messages in flit; VNA flit",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_1",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_2",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Sent Header Flit",
+ "EventCode": "0x56",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SENT.SLOTS_3",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; All",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Needs Data Flit",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.NEED_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "BL message requires data flit sequence",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 0",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P0_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Waiting for header pump 0",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Header pump 1 is not required for flit",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Bubble",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_BUT_BUBBLE",
+ "PerPkg": "1",
+ "PublicDescription": "Header pump 1 is not required for flit but flit transmission delayed",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Don't Need Pump 1 - Not Avail",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_NOT_REQ_NOT_AVAIL",
+ "PerPkg": "1",
+ "PublicDescription": "Header pump 1 is not required for flit and not available",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Slotting BL Message Into Header Flit; Wait on Pump 1",
+ "EventCode": "0x58",
+ "EventName": "UNC_M3UPI_RxC_FLITS_SLOT_BL.P1_WAIT",
+ "PerPkg": "1",
+ "PublicDescription": "Waiting for header pump 1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting control state machine is in any accumulate state; multi-message flit may be assembled over multiple cycles",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate Ready",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; header flit slotting control state machine is in accum_ready state; flit is ready to send but transmission is blocked; more messages may be slotted into flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Accumulate Wasted",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.ACCUM_WASTED",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Flit is being assembled over multiple cycles, but no additional message is being slotted into flit in current cycle; accumulate cycle is wasted",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Run-Ahead - Blocked",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_BLOCKED",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting entered run-ahead state; new header flit is started while transmission of prior, fully assembled flit is blocked",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Run-Ahead - Message",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.AHEAD_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit slotting is in run-ahead to start new flit, and message is actually slotted into new flit",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Ok",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; New header flit construction may proceed in parallel with data flit sequence",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Flit Finished",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_FLIT",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Header flit finished assembly in parallel with data flit sequence",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 1; Parallel Message",
+ "EventCode": "0x53",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR1.PAR_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 1; Message is slotted into header flit in parallel with data flit sequence",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2; Rate-matching Stall",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 2; Rate-matching stall injected",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Flit Gen - Header 2; Rate-matching Stall - No Message",
+ "EventCode": "0x54",
+ "EventName": "UNC_M3UPI_RxC_FLIT_GEN_HDR2.RMSTALL_NOMSG",
+ "PerPkg": "1",
+ "PublicDescription": "Events related to Header Flit Generation - Set 2; Rate matching stall injected, but no additional message slotted during stall cycle",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; All",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No BGF Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; No BGF credits available",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No BGF Credits + No Extra Message Slotted",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_BGF_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; No BGF credits available; no additional message slotted into flit",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No TxQ Credits",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; No TxQ credits available",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; No TxQ Credits + No Extra Message Slotted",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.NO_TXQ_NO_MSG",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; No TxQ credits available; no additional message slotted into flit",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - One Slot Taken",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.ONE_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with only one slot taken (two slots free)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - Three Slots Taken",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.THREE_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with three slots taken (no slots free)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Header Not Sent; Sent - Two Slots Taken",
+ "EventCode": "0x55",
+ "EventName": "UNC_M3UPI_RxC_FLIT_NOT_SENT.TWO_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "header flit is ready for transmission but could not be sent; sending header flit with only two slots taken (one slots free)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Can't Slot AD",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_AD",
+ "PerPkg": "1",
+ "PublicDescription": "some AD message could not be slotted (logical OR of all AD events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Can't Slot BL",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.CANT_SLOT_BL",
+ "PerPkg": "1",
+ "PublicDescription": "some BL message could not be slotted (logical OR of all BL events under INGR_SLOT_CANT_MC_VN{0,1})",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel AD Lost",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_AD_LOST",
+ "PerPkg": "1",
+ "PublicDescription": "some AD message lost contest for slot 0 (logical OR of all AD events under INGR_SLOT_LOST_MC_VN{0,1})",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel Attempt",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_ATTEMPT",
+ "PerPkg": "1",
+ "PublicDescription": "ad and bl messages attempted to slot into the same flit in parallel",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel BL Lost",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_BL_LOST",
+ "PerPkg": "1",
+ "PublicDescription": "some BL message lost contest for slot 0 (logical OR of all BL events under INGR_SLOT_LOST_MC_VN{0,1})",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; Parallel Success",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.PARALLEL_SUCCESS",
+ "PerPkg": "1",
+ "PublicDescription": "ad and bl messages were actually slotted into the same flit in paralle",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; VN0",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN0",
+ "PerPkg": "1",
+ "PublicDescription": "vn0 message(s) that couldn't be slotted into last vn0 flit are held in slotting stage while processing vn1 flit",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Message Held; VN1",
+ "EventCode": "0x52",
+ "EventName": "UNC_M3UPI_RxC_HELD.VN1",
+ "PerPkg": "1",
+ "PublicDescription": "vn1 message(s) that couldn't be slotted into last vn1 flit are held in slotting stage while processing vn0 flit",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "EventCode": "0x41",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; REQ on AD",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on AD",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; SNP on AD",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCB on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; NCS on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; RSP on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Inserts; WB on BL",
+ "EventCode": "0x42",
+ "EventName": "UNC_M3UPI_RxC_INSERTS_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the UPI VN1 Ingress. This tracks one of the three rings that are used by the UPI agent. This can be used in conjunction with the UPI VN1 Ingress Occupancy Accumulator event in order to calculate average queue latency. Multiple ingress buffers can be tracked at a given time using multiple counters.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "EventCode": "0x45",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; REQ on AD",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on AD",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; SNP on AD",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCB on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; NCS on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; RSP on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Ingress (from CMS) Queue - Occupancy; WB on BL",
+ "EventCode": "0x46",
+ "EventName": "UNC_M3UPI_RxC_OCCUPANCY_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the occupancy of a given UPI VN1 Ingress queue in each cycle. This tracks one of the three ring Ingress buffers. This can be used with the UPI VN1 Ingress Not Empty event to calculate average occupancy or the UPI VN1 Ingress Allocations event in order to calculate average queuing latency.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; REQ on AD",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; RSP on AD",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; SNP on AD",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; NCB on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; NCS on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; RSP on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 message can't slot into flit; WB on BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN0.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; REQ on AD",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; RSP on AD",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; SNP on AD",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.AD_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; NCB on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; NCS on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Non-Coherent Standard (NCS) messages on BL.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; RSP on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 message can't slot into flit; WB on BL",
+ "EventCode": "0x4F",
+ "EventName": "UNC_M3UPI_RxC_PACKING_MISS_VN1.BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where Ingress has packets to send but did not have time to pack into flit before sending to Agent so slot was left NULL which could have been used.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Lost Arbitration",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARB_LOST",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Arrived",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.ARRIVED",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Dropped - Old",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_OLD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Dropped - Wrap",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.DROP_WRAP",
+ "PerPkg": "1",
+ "PublicDescription": "Dropped because it was overwritten by new message while prefetch queue was full",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "SMI3 Prefetch Messages; Slotted",
+ "EventCode": "0x62",
+ "EventName": "UNC_M3UPI_RxC_SMI3_PFTCH.SLOTTED",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Any In Use",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.ANY_IN_USE",
+ "PerPkg": "1",
+ "PublicDescription": "At least one remote vna credit is in use",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Corrected",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.CORRECTED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of remote vna credits corrected (local return) per cycle",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level < 1",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT1",
+ "PerPkg": "1",
+ "PublicDescription": "Remote vna credit level is less than 1 (i.e. no vna credits available)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level < 4",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT4",
+ "PerPkg": "1",
+ "PublicDescription": "Remote vna credit level is less than 4; bl (or ad requiring 4 vna) cannot arb on vna",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Level < 5",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.LT5",
+ "PerPkg": "1",
+ "PublicDescription": "Remote vna credit level is less than 5; parallel ad/bl arb on vna not possible",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Remote VNA Credits; Used",
+ "EventCode": "0x5B",
+ "EventName": "UNC_M3UPI_RxC_VNA_CRD.USED",
+ "PerPkg": "1",
+ "PublicDescription": "Number of remote vna credits consumed per cycle",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M3UPI_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AD - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; AK - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; BL - Credit",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass; IV - Bounce",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M3UPI_RxR_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AD - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; AK - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; BL - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IFV - Credit",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation; IV - Bounce",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M3UPI_RxR_CRD_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AD - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; AK - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; BL - Credit",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations; IV - Bounce",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M3UPI_RxR_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AD - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; AK - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; BL - Credit",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy; IV - Bounce",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M3UPI_RxR_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits; For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M3UPI_STALL_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN0 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 REQ Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 RSP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 SNP Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for AD; VN1 WB Messages",
+ "EventCode": "0x30",
+ "EventName": "UNC_M3UPI_TxC_AD_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.AD_SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD FlowQ Bypass",
+ "EventCode": "0x2C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_BYPASS.BL_EARLY_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cases when the AD flowQ is bypassed (S0, S1 and S2 indicate which slot was bypassed with S0 having the highest priority and S2 the least)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN0 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 REQ Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 RSP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 SNP Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Not Empty; VN1 WB Messages",
+ "EventCode": "0x27",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the AD Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 REQ Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 RSP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 SNP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN0 WB Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 REQ Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 RSP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Inserts; VN1 SNP Messages",
+ "EventCode": "0x2D",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_INSERTS.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_SNP",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN0 WB Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 REQ Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_REQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 RSP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AD Flow Q Occupancy; VN1 SNP Messages",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M3UPI_TxC_AD_FLQ_OCCUPANCY.VN1_SNP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; CHA on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_CHA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to CHA",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_NON_IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of non-idle cycles in issuing Vn0 Snpf",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to peer UPI0",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN0",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN0_PEER_UPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN0 Snpf to peer UPI1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; CHA on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_CHA",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to CHA",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Non Idle cycles on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_NON_IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of non-idle cycles in issuing Vn1 Snpf",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI0 on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to peer UPI0",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Number of Snoop Targets; Peer UPI1 on VN1",
+ "EventCode": "0x3C",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP1_VN1.VN1_PEER_UPI1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of snpfanout targets and non-idle cycles can be used to calculate average snpfanout latency; Number of VN1 Snpf to peer UPI1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_NONSNP",
+ "PerPkg": "1",
+ "PublicDescription": "Outcome of SnpF pending arbitration; FlowQ txn issued when SnpF pending on Vn0",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN0_SNPFP_VN2SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Outcome of SnpF pending arbitration; FlowQ Vn0 SnpF issued when SnpF pending on Vn1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ Won",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_NONSNP",
+ "PerPkg": "1",
+ "PublicDescription": "Outcome of SnpF pending arbitration; FlowQ txn issued when SnpF pending on Vn1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Snoop Arbitration; FlowQ SnpF Won",
+ "EventCode": "0x3D",
+ "EventName": "UNC_M3UPI_TxC_AD_SNPF_GRP2_VN1.VN1_SNPFP_VN0SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Outcome of SnpF pending arbitration; FlowQ Vn1 SnpF issued when SnpF pending on Vn0",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 REQ Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 SNP Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN0 WB Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 REQ Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 SNP Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - Credit Available; VN1 WB Messages",
+ "EventCode": "0x34",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_CRD_AVAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request with prior cycle credit check complete and credit avail",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 REQ Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 SNP Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN0 WB Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 REQ Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 SNP Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - New Message; VN1 WB Messages",
+ "EventCode": "0x33",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NEW_MSG.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 REQ Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 RSP Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 SNP Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN0 WB Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 REQ Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 RSP Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 SNP Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD - No Credit; VN1 WB Messages",
+ "EventCode": "0x32",
+ "EventName": "UNC_M3UPI_TxC_AD_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "AD speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Inserts",
+ "EventCode": "0x2F",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "AK Flow Q Occupancy",
+ "EventCode": "0x1E",
+ "EventName": "UNC_M3UPI_TxC_AK_FLQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN0 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 NCS Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 NCB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 RSP Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Failed ARB for BL; VN1 WB Messages",
+ "EventCode": "0x35",
+ "EventName": "UNC_M3UPI_TxC_BL_ARB_FAIL.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL arb but no win; arb request asserted but not won",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN0 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 REQ Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 RSP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 SNP Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Not Empty; VN1 WB Messages",
+ "EventCode": "0x28",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_CYCLES_NE.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the BL Egress queue is Not Empty",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 RSP Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 WB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 NCS Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN0 NCB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1 RSP Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1 WB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1_NCB Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Inserts; VN1_NCS Messages",
+ "EventCode": "0x2E",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_INSERTS.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of allocations into the QPI FlowQ. This can be used in conjunction with the QPI FlowQ Occupancy Accumulator event in order to calculate average queue latency. Only a single FlowQ queue can be tracked at any given time. It is not possible to filter based on direction or polarity.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_RSP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN0 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN0_WB",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1_NCS Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1_NCB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1 RSP Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_RSP",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "BL Flow Q Occupancy; VN1 WB Messages",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M3UPI_TxC_BL_FLQ_OCCUPANCY.VN1_WB",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 NCS Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN0 WB Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 WB Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 NCB Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for BL - New Message; VN1 RSP Messages",
+ "EventCode": "0x38",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NEW_MSG.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request due to new message arriving on a specific channel (MC/VN)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCB Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 NCS Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 RSP Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN0 WB Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCS Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 NCB Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x80",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 RSP Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Speculative ARB for AD Failed - No Credit; VN1 WB Messages",
+ "EventCode": "0x37",
+ "EventName": "UNC_M3UPI_TxC_BL_SPEC_ARB_NO_OTHER_PEND.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "BL speculative arb request asserted due to no other channel being active (have a valid entry but don't have credits to send)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AD - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; AK - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Bounce",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used; BL - Credit",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M3UPI_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AD - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; AK - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; BL - Credit",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used; IV - Bounce",
+ "EventCode": "0x9F",
+ "EventName": "UNC_M3UPI_TxR_HORZ_BYPASS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AD - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; AK - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; BL - Credit",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full; IV - Bounce",
+ "EventCode": "0x96",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_FULL.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AD - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; AK - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; BL - Credit",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty; IV - Bounce",
+ "EventCode": "0x97",
+ "EventName": "UNC_M3UPI_TxR_HORZ_CYCLES_NE.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AD - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; AK - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; BL - Credit",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts; IV - Bounce",
+ "EventCode": "0x95",
+ "EventName": "UNC_M3UPI_TxR_HORZ_INSERTS.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AD - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; AK - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; BL - Credit",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs; IV - Bounce",
+ "EventCode": "0x99",
+ "EventName": "UNC_M3UPI_TxR_HORZ_NACK.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AD - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; AK - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; BL - Credit",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy; IV - Bounce",
+ "EventCode": "0x94",
+ "EventName": "UNC_M3UPI_TxR_HORZ_OCCUPANCY.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AD - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AD_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; AK - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.AK_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; BL - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.BL_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation; IV - Bounce",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M3UPI_TxR_HORZ_STARVED.IV_BNC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M3UPI_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AD - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; AK - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; BL - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used; IV",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M3UPI_TxR_VERT_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full; IV",
+ "EventCode": "0x92",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AD - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; AK - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; BL - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty; IV",
+ "EventCode": "0x93",
+ "EventName": "UNC_M3UPI_TxR_VERT_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AD - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; AK - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; BL - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations; IV",
+ "EventCode": "0x91",
+ "EventName": "UNC_M3UPI_TxR_VERT_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs; IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_M3UPI_TxR_VERT_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy; IV",
+ "EventCode": "0x90",
+ "EventName": "UNC_M3UPI_TxR_VERT_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh.; Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation; IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M3UPI_TxR_VERT_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN0 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN0_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 REQ Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 RSP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VN1 SNP Messages",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VN1_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 AD Credits Empty; VNA",
+ "EventCode": "0x20",
+ "EventName": "UNC_M3UPI_UPI_PEER_AD_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPIs on the AD Ring",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN0 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN0_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 RSP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_NCS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 REQ Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VN1 SNP Messages",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VN1_WB",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x40",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "UPI0 BL Credits Empty; VNA",
+ "EventCode": "0x21",
+ "EventName": "UNC_M3UPI_UPI_PEER_BL_CREDITS_EMPTY.VNA",
+ "PerPkg": "1",
+ "PublicDescription": "No credits available to send to UPI on the BL Ring (diff between non-SMI and SMI mode)",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit.",
+ "EventCode": "0x29",
+ "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
+ "PerPkg": "1",
+ "PublicDescription": "Count cases where flow control queue that sits between the Intel(R) Ultra Path Interconnect (UPI) and the mesh spawns a prefetch to the iMC (Memory Controller)",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Down and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Even",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use; Up and Odd",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M3UPI_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Down and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Even",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use; Up and Odd",
+ "EventCode": "0xA8",
+ "EventName": "UNC_M3UPI_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Down and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Even",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use; Up and Odd",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M3UPI_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Down",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use; Up",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M3UPI_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; WB on BL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; NCB on BL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; REQ on AD",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; RSP on AD",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; SNP on AD",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Used; RSP on BL",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M3UPI_VN0_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN0 credit was used on the DRS message channel. In order for a request to be transferred across UPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN0. VNA is a shared pool used to achieve high performance. The VN0 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN0 if they fail. This counts the number of times a VN0 credit was used. Note that a single VN0 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN0 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; WB on BL",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; NCB on BL",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; REQ on AD",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; RSP on AD",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; SNP on AD",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN0 No Credits; RSP on BL",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M3UPI_VN0_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN0 Credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; WB on BL",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; NCB on BL",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; REQ on AD",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; RSP on AD",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; SNP on AD",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Used; RSP on BL",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M3UPI_VN1_CREDITS_USED.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times a VN1 credit was used on the WB message channel. In order for a request to be transferred across QPI, it must be guaranteed to have a flit buffer on the remote socket to sink into. There are two credit pools, VNA and VN1. VNA is a shared pool used to achieve high performance. The VN1 pool has reserved entries for each message class and is used to prevent deadlock. Requests first attempt to acquire a VNA credit, and then fall back to VN1 if they fail. This counts the number of times a VN1 credit was used. Note that a single VN1 credit holds access to potentially multiple flit buffers. For example, a transfer that uses VNA could use 9 flit buffers and in that case uses 9 credits. A transfer on VN1 will only count a single credit even though it may use multiple buffers.; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; WB on BL",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Data Response (WB) messages on BL. WB is generally used to transmit data with coherency. For example, remote reads and writes, or cache to cache transfers will transmit their data using WB.",
+ "UMask": "0x10",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; NCB on BL",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Non-Coherent Broadcast (NCB) messages on BL. NCB is generally used to transmit data without coherency. For example, non-coherent read data returns.",
+ "UMask": "0x20",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; REQ on AD",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Home (REQ) messages on AD. REQ is generally used to send requests, request responses, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; RSP on AD",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Response (RSP) messages on AD. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x4",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; SNP on AD",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Snoops (SNP) messages on AD. SNP is used for outgoing snoops.",
+ "UMask": "0x2",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "VN1 No Credits; RSP on BL",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M3UPI_VN1_NO_CREDITS.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Number of Cycles there were no VN1 Credits; Response (RSP) messages on BL. RSP packets are used to transmit a variety of protocol flits including grants and completions (CMP).",
+ "UMask": "0x8",
+ "Unit": "M3UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_M2M_TxC_BL.DRS_UPI",
+ "Deprecated": "1",
+ "EventCode": "0x40",
+ "EventName": "UNC_NoUnit_TxC_BL.DRS_UPI",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clocks of the Intel(R) Ultra Path Interconnect (UPI)",
+ "EventCode": "0x1",
+ "EventName": "UNC_UPI_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the fixed frequency clock controlling the Intel(R) Ultra Path Interconnect (UPI). This clock runs at1/8th the 'GT/s' speed of the UPI link. For example, a 9.6GT/s link will have a fixed Frequency of 1.2 Ghz.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to core",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to core bypassing the CHA.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "Deprecated": "1",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2K",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Data Response packets that go direct to Intel(R) UPI",
+ "EventCode": "0x12",
+ "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
+ "PerPkg": "1",
+ "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to Intel(R) Ultra Path Interconnect (UPI) bypassing the CHA .",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AD_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ2",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.AK_VNA_EQ3",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "EventCode": "0x18",
+ "EventName": "UNC_UPI_FLOWQ_NO_VNA_CRD.BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles Intel(R) UPI is in L1 power mode (shutdown)",
+ "EventCode": "0x21",
+ "EventName": "UNC_UPI_L1_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the Intel(R) Ultra Path Interconnect (UPI) is in L1 power mode. L1 is a mode that totally shuts down the UPI link. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another, this event only coutns when both links are shutdown.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "EventCode": "0x14",
+ "EventName": "UNC_UPI_M3_BYP_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "EventCode": "0x16",
+ "EventName": "UNC_UPI_M3_CRD_RETURN_BLOCKED",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.BGF_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_BTW_2_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AD_VNA_LE2",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_AK_VNA_LE3",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_BTW_0_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.FLOWQ_BL_VNA_EQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "EventCode": "0x15",
+ "EventName": "UNC_UPI_M3_RXQ_BLOCKED.GV_BLOCK",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles where phy is not in L0, L0c, L0p, L1",
+ "EventCode": "0x20",
+ "EventName": "UNC_UPI_PHY_INIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req Nack",
+ "EventCode": "0x23",
+ "EventName": "UNC_UPI_POWER_L1_NACK",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a link sends/receives a LinkReqNAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqNAck refers to receiving an NAck (meaning this agent's Tx originally requested the power change). A Tx LinkReqNAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "L1 Req (same as L1 Ack).",
+ "EventCode": "0x22",
+ "EventName": "UNC_UPI_POWER_L1_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a link sends/receives a LinkReqAck. When the UPI links would like to change power state, the Tx side initiates a request to the Rx side requesting to change states. This requests can either be accepted or denied. If the Rx side replies with an Ack, the power mode will change. If it replies with NAck, no change will take place. This can be filtered based on Rx and Tx. An Rx LinkReqAck refers to receiving an Ack (meaning this agent's Tx originally requested the power change). A Tx LinkReqAck refers to sending this command (meaning the peer agent's Tx originally requested the power change and this agent accepted it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.ACK",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN0",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VN1",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "EventCode": "0x46",
+ "EventName": "UNC_UPI_REQ_SLOT2_FROM_M3.VNA",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles the Rx of the Intel(R) UPI is in L0p power mode",
+ "EventCode": "0x25",
+ "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the receive side (Rx) of the Intel(R) Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0. Receive side.",
+ "EventCode": "0x24",
+ "EventName": "UNC_UPI_RxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCB",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Bypass",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCB",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCS",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Non-Coherent Standard",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCS",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Request",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "REQ Message Class",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Request Opcode",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match REQ Opcodes - Specified in Umask[7:4]",
+ "UMask": "0x108",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Conflict",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1aa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Invalid",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x12a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - Data",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0x10c",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - RSP",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Response - No Data",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - RSP",
+ "UMask": "0x10a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Snoop",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "SNP Message Class",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Snoop Opcode",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match SNP Opcodes - Specified in Umask[7:4]",
+ "UMask": "0x109",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Receive path of a UPI Port; Writeback",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0x10d",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
+ "EventCode": "0x31",
+ "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "CRC Errors Detected",
+ "EventCode": "0xB",
+ "EventName": "UNC_UPI_RxL_CRC_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of CRC errors detected in the UPI Agent. Each UPI flit incorporates 8 bits of CRC for error detection. This counts the number of flits where the CRC was able to detect an error. After an error has been detected, the UPI agent will send a request to the transmitting socket to resend the flit (as well as any flits that came after it).",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "LLR Requests Sent",
+ "EventCode": "0x8",
+ "EventName": "UNC_UPI_RxL_CRC_LLR_REQ_TRANSMIT",
+ "PerPkg": "1",
+ "PublicDescription": "Number of LLR Requests were transmitted. This should generally be <= the number of CRC errors detected. If multiple errors are detected before the Rx side receives a LLC_REQ_ACK from the Tx side, there is no need to send more LLR_REQ_NACKs.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN0 Credit Consumed",
+ "EventCode": "0x39",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN0 credit was consumed (i.e. message uses a VN0 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VN1 Credit Consumed",
+ "EventCode": "0x3A",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VN1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VN1 credit was consumed (i.e. message uses a VN1 credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credit Consumed",
+ "EventCode": "0x38",
+ "EventName": "UNC_UPI_RxL_CREDITS_CONSUMED_VNA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that an RxQ VNA credit was consumed (i.e. message uses a VNA credit for the Rx Buffer). This includes packets that went through the RxQ and those that were bypasssed.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid data FLITs received from any slot",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) received from any of the 3 Intel(R) Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Null FLITs received from any slot",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) received from any of the 3 Intel(R) Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Data",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Idle",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; LLCRD Not Empty",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; LLCTRL",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs received from any slot",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) received from any of the 3 UPI slots on this UPI unit.",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.ALL_NULL",
+ "Deprecated": "1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Protocol Header",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_FLITS.PROTHDR",
+ "Deprecated": "1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.PROT_HDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 0",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 1",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Received; Slot 2",
+ "EventCode": "0x3",
+ "EventName": "UNC_UPI_RxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCB",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.NCS",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.REQ",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.RSP",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.SNP",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_RxL_BASIC_HDR_MATCH.WB",
+ "Deprecated": "1",
+ "EventCode": "0x5",
+ "EventName": "UNC_UPI_RxL_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 0",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 1",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Flit Buffer Allocations; Slot 2",
+ "EventCode": "0x30",
+ "EventName": "UNC_UPI_RxL_INSERTS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the UPI Rx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "RxQ Occupancy - All Packets; Slot 2",
+ "EventCode": "0x32",
+ "EventName": "UNC_UPI_RxL_OCCUPANCY.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of elements in the UPI RxQ in each cycle. Generally, when data is transmitted across UPI, it will bypass the RxQ and pass directly to the ring interface. If things back up getting transmitted onto the ring, however, it may need to allocate into this buffer, thus increasing the latency. This event can be used in conjunction with the Flit Buffer Not Empty event to calculate average occupancy, or with the Flit Buffer Allocations event to track average lifetime.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S0_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S1_RXQ2",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "EventCode": "0x33",
+ "EventName": "UNC_UPI_RxL_SLOT_BYPASS.S2_RXQ1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.CFG_CTL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.DFX",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RETRY",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_BYPASS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.RXQ_CRED",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.SPARE",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "EventCode": "0x2A",
+ "EventName": "UNC_UPI_TxL0P_CLK_ACTIVE.TXQ",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in which the Tx of the Intel(R) Ultra Path Interconnect (UPI) is in L0p power mode",
+ "EventCode": "0x27",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when the transmit side (Tx) of the Intel(R) Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "EventCode": "0x28",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_LL_ENTER",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "EventCode": "0x29",
+ "EventName": "UNC_UPI_TxL0P_POWER_CYCLES_M3_EXIT",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Cycles in L0. Transmit side.",
+ "EventCode": "0x26",
+ "EventName": "UNC_UPI_TxL0_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of UPI qfclk cycles spent in L0 power mode in the Link Layer. L0 is the default mode which provides the highest performance with the most power. Use edge detect to count the number of instances that the link entered L0. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another. The phy layer sometimes leaves L0 for training, which will not be captured by this event.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCB",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Bypass",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCB",
+ "UMask": "0x10e",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCS",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Non-Coherent Standard",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.NCS_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - NCS",
+ "UMask": "0x10f",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Request",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "PublicDescription": "REQ Message Class",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Request Opcode",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.REQ_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match REQ Opcodes - Specified in Umask[7:4]",
+ "UMask": "0x108",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Conflict",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPCNFLT",
+ "PerPkg": "1",
+ "UMask": "0x1aa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Invalid",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSPI",
+ "PerPkg": "1",
+ "UMask": "0x12a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - Data",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0x10c",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - RSP",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Response - No Data",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class - RSP",
+ "UMask": "0x10a",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Snoop",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "PublicDescription": "SNP Message Class",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Snoop Opcode",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.SNP_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match SNP Opcodes - Specified in Umask[7:4]",
+ "UMask": "0x109",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0xd",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Matches on Transmit path of a UPI Port; Writeback",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_BASIC_HDR_MATCH.WB_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "Match Message Class -WB",
+ "UMask": "0x10d",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "FLITs that bypassed the TxL Buffer",
+ "EventCode": "0x41",
+ "EventName": "UNC_UPI_TxL_BYPASSED",
+ "PerPkg": "1",
+ "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the TxL(transmit) FLIT buffer and pass directly out the UPI Link. Generally, when data is transmitted across the Intel(R) Ultra Path Interconnect (UPI), it will bypass the TxQ and pass directly to the link. However, the TxQ will be used in L0p (Low Power) mode and (Link Layer Retry) LLR mode, increasing latency to transfer out to the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid data FLITs transmitted via any slot",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) transmitted (TxL) via any of the 3 Intel(R) Ultra Path Interconnect (UPI) slots on this UPI unit.",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Null FLITs transmitted from any slot",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) transmitted via any of the 3 Intel(R) Ulra Path Interconnect (UPI) slots on this UPI unit.",
+ "UMask": "0x27",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Data",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Data Flits (which consume all slots), but how much to count is based on Slot0-2 mask, so count can be 0-3 depending on which slots are enabled for counting..",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Idle FLITs transmitted",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.IDLE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when the Intel Ultra Path Interconnect(UPI) transmits an idle FLIT(80 bit FLow control unITs). Every UPI cycle must be sending either data FLITs, protocol/credit FLITs or idle FLITs.",
+ "UMask": "0x47",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; LLCRD Not Empty",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables counting of LLCRD (with non-zero payload). This only applies to slot 2 since LLCRD is only allowed in slot 2",
+ "UMask": "0x10",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; LLCTRL",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.LLCTRL",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Equivalent to an idle packet. Enables counting of slot 0 LLCTRL messages.",
+ "UMask": "0x40",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
+ "PerPkg": "1",
+ "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) transmitted across any of the 3 UPI (Ultra Path Interconnect) slots on this UPI unit.",
+ "UMask": "0x97",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.ALL_NULL",
+ "Deprecated": "1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.NULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Protocol Header",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.PROTHDR",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Enables count of protocol headers in slot 0,1,2 (depending on slot uMask bits)",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_FLITS.PROTHDR",
+ "Deprecated": "1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.PROT_HDR",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 0",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 0 - Other mask bits determine types of headers to count.",
+ "UMask": "0x1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 1",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 1 - Other mask bits determine types of headers to count.",
+ "UMask": "0x2",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Valid Flits Sent; Slot 2",
+ "EventCode": "0x2",
+ "EventName": "UNC_UPI_TxL_FLITS.SLOT2",
+ "PerPkg": "1",
+ "PublicDescription": "Shows legal flit time (hides impact of L0p and L0c).; Count Slot 2 - Other mask bits determine types of headers to count.",
+ "UMask": "0x4",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.DATA_HDR",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.DUAL_SLOT_HDR",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.LOC",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCB",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NCB",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.NCS",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NCS",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.NON_DATA_HDR",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.REM",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.REQ",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.REQ",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_DATA",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_DATA",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.RSP_NODATA",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.RSP_NODATA",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.SGL_SLOT_HDR",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.SNP",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.SNP",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_UPI_TxL_BASIC_HDR_MATCH.WB",
+ "Deprecated": "1",
+ "EventCode": "0x4",
+ "EventName": "UNC_UPI_TxL_HDR_MATCH.WB",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Allocations",
+ "EventCode": "0x40",
+ "EventName": "UNC_UPI_TxL_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number of allocations into the UPI Tx Flit Buffer. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This event can be used in conjunction with the Flit Buffer Occupancy event in order to calculate the average flit buffer lifetime.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Tx Flit Buffer Occupancy",
+ "EventCode": "0x42",
+ "EventName": "UNC_UPI_TxL_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Accumulates the number of flits in the TxQ. Generally, when data is transmitted across UPI, it will bypass the TxQ and pass directly to the link. However, the TxQ will be used with L0p and when LLR occurs, increasing latency to transfer out to the link. This can be used with the cycles not empty event to track average occupancy, or the allocations event to track average lifetime in the TxQ.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "EventCode": "0x45",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_BLOCKED_VN01",
+ "PerPkg": "1",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "VNA Credits Pending Return - Occupancy",
+ "EventCode": "0x44",
+ "EventName": "UNC_UPI_VNA_CREDIT_RETURN_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Number of VNA credits in the Rx side that are waitng to be returned back across the link.",
+ "Unit": "UPI"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; IPI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.; Inter Processor Interrupts",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; MSI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.; Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received; VLW",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of times an IDI Lock/SplitLock sequence was started",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack; Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "PHOLD cycles.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
+ "EventCode": "0x2",
+ "EventName": "UPI_DATA_BANDWIDTH_TX",
+ "PerPkg": "1",
+ "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) transmitted (TxL) via any of the 3 Intel(R) Ultra Path Interconnect (UPI) slots on this UPI unit.",
+ "ScaleUnit": "7.11E-06Bytes",
+ "UMask": "0xf",
+ "Unit": "UPI"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json
new file mode 100644
index 000000000000..2a3a709018bb
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-io.json
@@ -0,0 +1,4250 @@
+[
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_READ",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_WRITE",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Clockticks of the IIO Traffic Controller",
+ "EventCode": "0x1",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the 1GHz trafiic controller clock in the IIO unit.",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x0f",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x4",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 0",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 1",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 2",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts; Port 3",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.PORT3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0-3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 0",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 1",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 2",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer occupancy of completions with data: Part 3",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core reading from Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part0",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part2",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by a different IIO unit to IIO Part3",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts ever peer to peer read request for 4 bytes of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part1 by a different IIO unit",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part2 by a different IIO unit",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made to IIO Part3 by a different IIO unit",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests initiated by the main die to the attached device.; VTd - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for 4 bytes of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of 4 bytes of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part1 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part2 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for 4 bytes made by IIO Part3 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer read request for 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of 4 bytes made by IIO Part0 to an IIO target",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of 4 bytes of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU; Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number of double word (4 bytes) requests the attached device made of the main die.; VTd - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num Link Correctable Errors",
+ "EventCode": "0xF",
+ "EventName": "UNC_IIO_LINK_NUM_CORR_ERR",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num Link Retries",
+ "EventCode": "0xE",
+ "EventName": "UNC_IIO_LINK_NUM_RETRIES",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number packets that passed the Mask/Match Filter",
+ "EventCode": "0x21",
+ "EventName": "UNC_IIO_MASK_MATCH",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; PCIE bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus",
+ "EventCode": "0x2",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if all bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and PCIE bus",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; PCIE bus",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus; !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x3",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "Asserted if any bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Counting disabled",
+ "EventName": "UNC_IIO_NOTHING",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMIC.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.ATOMICCMP.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.ATOMICCMP.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.MSG.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.MSG.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_IN.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.CFG_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.IO_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_PAYLOAD_BYTES_OUT.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Symbol Times on Link",
+ "EventCode": "0x82",
+ "EventName": "UNC_IIO_SYMBOL_TIMES",
+ "PerPkg": "1",
+ "PublicDescription": "Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMIC.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.ATOMICCMP.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.MSG.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_IN.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.CFG_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.IO_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.MEM_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_READ.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART2",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.PART3",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x8",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD0",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "Deprecated": "1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_OUT.PEER_WRITE.VTD1",
+ "FCMask": "0x7",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part0. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) or by another IIO unit to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core reading from Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core) or by another IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Core writing to Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part0",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part0. Does not include requests made by the same IIO unit. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part1",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part1. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part2",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part2. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request for up to a 64 byte transaction is made by a different IIO unit to IIO Part3",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer read request for up to a 64 byte transaction of data made by a different IIO unit to the MMIO space of a card on IIO Part3. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part0 by a different IIO unit",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part1 by a different IIO unit",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part2 by a different IIO unit",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made to IIO Part3 by a different IIO unit",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a different IIO unit. Does not include requests made by the same IIO unit. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU; Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Outbound. Number of requests, to the attached device, initiated by the main die.; VTd - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Completion of atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMICCMP.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x16 card plugged in to stack, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part1 to the MMIO space of an IIO target. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer read request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer read request of up to a 64 byte transaction made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part0 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part0 to the MMIO space of an IIO target. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part1 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part1 to the MMIO space of an IIO target.In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part2 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part2 to the MMIO space of an IIO target. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Peer to peer write request of up to a 64 byte transaction is made by IIO Part3 to an IIO target",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Counts every peer to peer write request of up to a 64 byte transaction of data made by IIO Part3 to the MMIO space of an IIO target. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU; Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.VTD1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Also known as Inbound. Number of 64 byte cache line requests initiated by the attached device.; VTd - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; context cache miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.CTXT_MISS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L1 miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L1_MISS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L2 miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L2_MISS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; L3 miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L3_MISS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; Vtd hit",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.L4_PAGE_HIT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB1_MISS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB is full",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Access; TLB miss",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_VTD_ACCESS.TLB_MISS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "VTd Occupancy",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_VTD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
index 9c7e5f8beee2..6f8ff2262ce7 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-memory.json
@@ -1,150 +1,1898 @@
[
{
"BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_READ",
"PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Access Select) read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every read. This event includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"ScaleUnit": "64Bytes",
"UMask": "0x3",
"Unit": "iMC"
},
{
"BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
"EventCode": "0x4",
"EventName": "LLC_MISSES.MEM_WRITE",
"PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"ScaleUnit": "64Bytes",
- "UMask": "0xC",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Bypass",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count; Activate due to Read",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Page Activate commands sent due to a write request",
+ "EventCode": "0x1",
+ "EventName": "UNC_M_ACT_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts DRAM Page Activate commands sent on this channel due to a write request to the iMC (Memory Controller). Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS (Column Access Select) command.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ACT command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.ACT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CAS command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.CAS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "PRE command issued by 2 cycle bypass",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M_BYP_CMDS.PRE",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS Commands issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, so this event increments for every read and write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM Read CAS Commands issued (including underfills)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Access Select) read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every read. This event includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Read ISOCH Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills)",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "Counts CAS (Column Access Select) regular read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every regular read. This event only counts regular reads and does not includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in RMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_RMM",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Underfill Read CAS Commands issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts CAS (Column Access Select) underfill read commands issued to DRAM due to a partial write, on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this command counts underfill reads. Partial writes must be completed by first reading in the underfill from DRAM and then merging in the partial write data before writing the full line back to DRAM. This event will generally count about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ (due to a previous write request).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in WMM",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.RD_WMM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM Write CAS commands issued",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; Read CAS issued in Write ISOCH Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Read Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_RMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of Opportunistic DRAM Write CAS commands issued on this channel while in Read-Major-Mode.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM CAS (Column Address Strobe) Commands.; DRAM WR_CAS (w/ and w/out auto-pre) in Write Major Mode",
+ "EventCode": "0x4",
+ "EventName": "UNC_M_CAS_COUNT.WR_WMM",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number or DRAM Write CAS commands issued on this channel while in Write-Major-Mode.",
+ "UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Memory controller clock ticks",
- "Counter": "0,1,2,3",
"EventName": "UNC_M_CLOCKTICKS",
"PerPkg": "1",
+ "PublicDescription": "Counts clockticks of the fixed frequency clock of the memory controller using one of the programmable counters.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Clockticks in the Memory Controller using a dedicated 48-bit Fixed Counter",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_CLOCKTICKS_F",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x6",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x5",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "ECC Correctable Errors",
+ "EventCode": "0x9",
+ "EventName": "UNC_M_ECC_CORRECTABLE_ERRORS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of ECC errors detected and corrected by the iMC on this channel. This counter is only useful with ECC DRAM devices. This count will increment one time for each correction regardless of the number of bits corrected. The iMC can correct up to 4 bit errors in independent channel mode and 8 bit errors in lockstep mode.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Isoch Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.ISOCH",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; We group these two modes together so that we can use four counters to track each of the major modes at one time. These major modes are used whenever there is an ISOCH txn in the memory controller. In these mode, only ISOCH transactions are processed.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Partial Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This major mode is used to drain starved underfill reads. Regular reads and writes are blocked and only underfill reads will be processed.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Read Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; Read Major Mode is the default mode for the iMC, as reads are generally more critical to forward progress than writes.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles in a Major Mode; Write Major Mode",
+ "EventCode": "0x7",
+ "EventName": "UNC_M_MAJOR_MODES.WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of cycles spent in a major mode (selected by a filter) on the given channel. Major modea are channel-wide, and not a per-rank (or dimm or bank) mode.; This mode is triggered when the WPQ hits high occupancy and causes writes to be higher priority than reads. This can cause blips in the available read bandwidth in the system and temporarily increase read latencies in order to achieve better bus utilizations and higher bandwidth.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Channel DLLOFF Cycles",
+ "EventCode": "0x84",
+ "EventName": "UNC_M_POWER_CHANNEL_DLLOFF",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles when all the ranks in the channel are in CKE Slow (DLLOFF) mode.",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
- "Counter": "0,1,2,3",
"EventCode": "0x85",
"EventName": "UNC_M_POWER_CHANNEL_PPD",
- "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_channel_ppd %",
+ "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100",
+ "MetricName": "power_channel_ppd",
+ "PerPkg": "1",
+ "PublicDescription": "Counts cycles when all the ranks in the channel are in PPD (PreCharge Power Down) mode. If IBT (Input Buffer Terminators)=off is enabled, then this event counts the cycles in PPD mode. If IBT=off is not enabled, then this event counts the number of cycles when being in PPD mode could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK4",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank; DIMM ID",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Critical Throttle Cycles",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRITICAL_THROTTLE_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC is in critical thermal throttling. When this happens, all traffic is blocked. This should be rare unless something bad is going on in the platform. There is no filtering by rank for this event.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_POWER_PCU_THROTTLING",
+ "EventCode": "0x42",
+ "EventName": "UNC_M_POWER_PCU_THROTTLING",
"PerPkg": "1",
"Unit": "iMC"
},
{
"BriefDescription": "Cycles Memory is in self refresh power mode",
- "Counter": "0,1,2,3",
"EventCode": "0x43",
"EventName": "UNC_M_POWER_SELF_REFRESH",
- "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100.",
- "MetricName": "power_self_refresh %",
+ "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100",
+ "MetricName": "power_self_refresh",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the iMC (memory controller) is in self-refresh and has a clock. This happens in some ACPI CPU package C-states for the sleep levels. For example, the PCU (Power Control Unit) may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Intel? Dynamic Power Technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK0",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.; Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK1",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK2",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK3",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK4",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK5",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK6",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x40",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0; DIMM ID",
+ "EventCode": "0x41",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.RANK7",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x80",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Read Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts another read.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Preemption Count; Read over Write Preemption",
+ "EventCode": "0x8",
+ "EventName": "UNC_M_PREEMPTION.RD_PREEMPT_WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a read in the iMC preempts another read or write. Generally reads to an open page are issued ahead of requests to closed pages. This improves the page hit rate of the system. However, high priority requests can cause pages of active requests to be closed in order to get them out. This will reduce the latency of the high-priority request at the expense of lower bandwidth and increased overall average latency.; Filter for when a read preempts a write.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to bypass",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.; Precharge due to timer expiration",
+ "EventCode": "0x2",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_CLOSE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.; Counts the number of DRAM Precharge commands sent on this channel as a result of the page close counter expiring. This does not include implicit precharge commands sent in auto-precharge mode.",
+ "UMask": "0x2",
"Unit": "iMC"
},
{
"BriefDescription": "Pre-charges due to page misses",
- "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of explicit DRAM Precharge commands sent on this channel as a result of a DRAM page miss. This does not include the implicit precharge commands sent with CAS commands in Auto-Precharge mode. This does not include Precharge commands sent as a result of a page close counter expiration.",
"UMask": "0x1",
"Unit": "iMC"
},
{
"BriefDescription": "Pre-charge for reads",
- "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.RD",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of explicit DRAM Precharge commands issued on a per channel basis due to a read, so as to close the previous DRAM page, before opening the requested page.",
"UMask": "0x4",
"Unit": "iMC"
},
{
"BriefDescription": "Pre-charge for writes",
- "Counter": "0,1,2,3",
"EventCode": "0x2",
"EventName": "UNC_M_PRE_COUNT.WR",
"PerPkg": "1",
+ "PublicDescription": "Counts the number of DRAM Precharge commands sent on this channel.",
"UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM Page Activate commands sent due to a write request",
- "Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_M_ACT_COUNT.WR",
+ "BriefDescription": "Read CAS issued with HIGH priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.HIGH",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with LOW priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.LOW",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read CAS issued with MEDIUM priority",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.MED",
"PerPkg": "1",
- "PublicDescription": "Counts DRAM Page Activate commands sent on this channel due to a write request to the iMC (Memory Controller). Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS (Column Access Select) command.",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "All DRAM CAS Commands issued",
- "Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.ALL",
+ "BriefDescription": "Read CAS issued with PANIC NON ISOCH priority (starved)",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_RD_CAS_PRIO.PANIC",
"PerPkg": "1",
- "PublicDescription": "Counts all CAS (Column Address Select) commands issued to DRAM per memory channel. CAS commands are issued to specify the address to read or write on DRAM, so this event increments for every read and write. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
- "UMask": "0xF",
+ "UMask": "0x8",
"Unit": "iMC"
},
{
- "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_READ",
+ "BriefDescription": "RD_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK3",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
"UMask": "0x3",
"Unit": "iMC"
},
{
- "BriefDescription": "All DRAM Read CAS Commands issued (does not include underfills) ",
- "Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M_RD_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK1",
"PerPkg": "1",
- "PublicDescription": "Counts CAS (Column Access Select) regular read commands issued to DRAM on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this event increments for every regular read. This event only counts regular reads and does not includes underfill reads due to partial write requests. This event counts whether AutoPrecharge (which closes the DRAM Page automatically after a read/write) is enabled or not.",
"UMask": "0x1",
"Unit": "iMC"
},
{
- "BriefDescription": "DRAM Underfill Read CAS Commands issued",
- "Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK2",
"PerPkg": "1",
- "PublicDescription": "Counts CAS (Column Access Select) underfill read commands issued to DRAM due to a partial write, on a per channel basis. CAS commands are issued to specify the address to read or write on DRAM, and this command counts underfill reads. Partial writes must be completed by first reading in the underfill from DRAM and then merging in the partial write data before writing the full line back to DRAM. This event will generally count about the same as the number of partial writes, but may be slightly less because of partials hitting in the WPQ (due to a previous write request). ",
"UMask": "0x2",
"Unit": "iMC"
},
{
- "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
- "EventCode": "0x4",
- "EventName": "LLC_MISSES.MEM_WRITE",
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK3",
"PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0xC",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M_RD_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; All Banks",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 0",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 1",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 10",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 11",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 12",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 13",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 14",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 15",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 2",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 3",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 4",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 5",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 6",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 7",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 8",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank 9",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M_RD_CAS_RANK2.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; All Banks",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 0",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 1",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 10",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 11",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 12",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 13",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 14",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 15",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 2",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 3",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 4",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 5",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 6",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 7",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 8",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank 9",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M_RD_CAS_RANK3.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M_RD_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M_RD_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M_RD_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "RD_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M_RD_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Allocations",
- "Counter": "0,1,2,3",
"EventCode": "0x10",
"EventName": "UNC_M_RPQ_INSERTS",
"PerPkg": "1",
- "PublicDescription": "Counts the number of read requests allocated into the Read Pending Queue (RPQ). This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. The requests deallocate after the read CAS command has been issued to DRAM. This event counts both Isochronous and non-Isochronous requests which were issued to the RPQ. ",
+ "PublicDescription": "Counts the number of read requests allocated into the Read Pending Queue (RPQ). This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. The requests deallocate after the read CAS command has been issued to DRAM. This event counts both Isochronous and non-Isochronous requests which were issued to the RPQ.",
"Unit": "iMC"
},
{
"BriefDescription": "Read Pending Queue Occupancy",
- "Counter": "0,1,2,3",
"EventCode": "0x80",
"EventName": "UNC_M_RPQ_OCCUPANCY",
"PerPkg": "1",
@@ -152,8 +1900,47 @@
"Unit": "iMC"
},
{
+ "BriefDescription": "Transition from WMM to RMM because of low threshold; Transition from WMM to RMM because of starve counter",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.LOW_THRESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.STARVE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Transition from WMM to RMM because of low threshold",
+ "EventCode": "0xC0",
+ "EventName": "UNC_M_WMM_TO_RMM.VMSE_RETRY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "Unit": "iMC"
+ },
+ {
"BriefDescription": "Write Pending Queue Allocations",
- "Counter": "0,1,2,3",
"EventCode": "0x20",
"EventName": "UNC_M_WPQ_INSERTS",
"PerPkg": "1",
@@ -162,11 +1949,1369 @@
},
{
"BriefDescription": "Write Pending Queue Occupancy",
- "Counter": "0,1,2,3",
"EventCode": "0x81",
"EventName": "UNC_M_WPQ_OCCUPANCY",
"PerPkg": "1",
- "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests.",
+ "PublicDescription": "Counts the number of entries in the Write Pending Queue (WPQ) at each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule writes out to the memory controller and to track the requests. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC (memory controller). They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have 'posted' to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the 'not posted' filter, we can track how long writes spent in the iMC before completions were sent to the HA. The 'posted' filter, on the other hand, provides information about how much queueing is actually happening in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts. Is there a filter of sorts?",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Not getting the requested Major Mode",
+ "EventCode": "0xC1",
+ "EventName": "UNC_M_WRONG_MM",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; All Banks",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 0",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 1",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 10",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 11",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 12",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 13",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 14",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 15",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 2",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 3",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 4",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 5",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 6",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 7",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 8",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank 9",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 0; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M_WR_CAS_RANK0.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; All Banks",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 0",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 1",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 10",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 11",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 12",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 13",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 14",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 15",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 2",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 3",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 4",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 5",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 6",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 7",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 8",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank 9",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 1; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M_WR_CAS_RANK1.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; All Banks",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 0",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 1",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 10",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 11",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 12",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 13",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 14",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 15",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 2",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 3",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 4",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 5",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 6",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 7",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 8",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank 9",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 2; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M_WR_CAS_RANK2.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; All Banks",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 0",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 1",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 10",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 11",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 12",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 13",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 14",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 15",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 2",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 3",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 4",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 5",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 6",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 7",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 8",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank 9",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 3; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M_WR_CAS_RANK3.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; All Banks",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 0",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 1",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 10",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 11",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 12",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 13",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 14",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 15",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 2",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 3",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 4",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 5",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 6",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 7",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 8",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank 9",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 4; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBC",
+ "EventName": "UNC_M_WR_CAS_RANK4.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; All Banks",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 0",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 1",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 10",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 11",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 12",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 13",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 14",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 15",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 2",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 3",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 4",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 5",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 6",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 7",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 8",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank 9",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 5; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBD",
+ "EventName": "UNC_M_WR_CAS_RANK5.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; All Banks",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 0",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 1",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 10",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 11",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 12",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 13",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 14",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 15",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 2",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 3",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 4",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 5",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 6",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 7",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 8",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank 9",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 6; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBE",
+ "EventName": "UNC_M_WR_CAS_RANK6.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; All Banks",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.ALLBANKS",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 0",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK0",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 1",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK1",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 10",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK10",
+ "PerPkg": "1",
+ "UMask": "0xa",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 11",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK11",
+ "PerPkg": "1",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 12",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK12",
+ "PerPkg": "1",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 13",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK13",
+ "PerPkg": "1",
+ "UMask": "0xd",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 14",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK14",
+ "PerPkg": "1",
+ "UMask": "0xe",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 15",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK15",
+ "PerPkg": "1",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 2",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK2",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 3",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK3",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 4",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK4",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 5",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK5",
+ "PerPkg": "1",
+ "UMask": "0x5",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 6",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK6",
+ "PerPkg": "1",
+ "UMask": "0x6",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 7",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK7",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 8",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK8",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank 9",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANK9",
+ "PerPkg": "1",
+ "UMask": "0x9",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 0 (Banks 0-3)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG0",
+ "PerPkg": "1",
+ "UMask": "0x11",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 1 (Banks 4-7)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG1",
+ "PerPkg": "1",
+ "UMask": "0x12",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 2 (Banks 8-11)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG2",
+ "PerPkg": "1",
+ "UMask": "0x13",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "WR_CAS Access to Rank 7; Bank Group 3 (Banks 12-15)",
+ "EventCode": "0xBF",
+ "EventName": "UNC_M_WR_CAS_RANK7.BANKG3",
+ "PerPkg": "1",
+ "UMask": "0x14",
"Unit": "iMC"
}
]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
deleted file mode 100644
index adb42c72f5c8..000000000000
--- a/tools/perf/pmu-events/arch/x86/skylakex/uncore-other.json
+++ /dev/null
@@ -1,1156 +0,0 @@
-[
- {
- "BriefDescription": "Uncore cache clock ticks",
- "Counter": "0,1,2,3",
- "EventName": "UNC_CHA_CLOCKTICKS",
- "PerPkg": "1",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "config1=0x40e33",
- "PerPkg": "1",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_READ",
- "Filter": "config1=0x40040e33",
- "PerPkg": "1",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_WRITE",
- "Filter": "config1=0x40041e33",
- "PerPkg": "1",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "config1=0x41833",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "config1=0x41a33",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x21",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "read requests from home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS",
- "PerPkg": "1",
- "UMask": "0x03",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "read requests from local home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS_LOCAL",
- "PerPkg": "1",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "read requests from remote home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.READS_REMOTE",
- "PerPkg": "1",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "write requests from home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES",
- "PerPkg": "1",
- "UMask": "0x0C",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "write requests from local home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES_LOCAL",
- "PerPkg": "1",
- "UMask": "0x04",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "write requests from remote home agent",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.WRITES_REMOTE",
- "PerPkg": "1",
- "UMask": "0x08",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UPI_DATA_BANDWIDTH_TX",
- "PerPkg": "1",
- "ScaleUnit": "7.11E-06Bytes",
- "UMask": "0x0F",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_READ",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
- "Counter": "0,1",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Core Cross Snoops Issued; Multiple Core Requests",
- "Counter": "0,1,2,3",
- "EventCode": "0x33",
- "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
- "UMask": "0x42",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Core Cross Snoops Issued; Multiple Eviction",
- "Counter": "0,1,2,3",
- "EventCode": "0x33",
- "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
- "PerPkg": "1",
- "PublicDescription": "Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
- "UMask": "0x82",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Not Needed",
- "Counter": "0,1,2,3",
- "EventCode": "0x53",
- "EventName": "UNC_CHA_DIR_LOOKUP.NO_SNP",
- "PerPkg": "1",
- "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and therefore did not send a snoop because the Directory indicated it was not needed",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory state lookups; Snoop Needed",
- "Counter": "0,1,2,3",
- "EventCode": "0x53",
- "EventName": "UNC_CHA_DIR_LOOKUP.SNP",
- "PerPkg": "1",
- "PublicDescription": "Counts transactions that looked into the multi-socket cacheline Directory state, and sent one or more snoops, because the Directory indicated it was needed",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from the HA pipe",
- "Counter": "0,1,2,3",
- "EventCode": "0x54",
- "EventName": "UNC_CHA_DIR_UPDATE.HA",
- "PerPkg": "1",
- "PublicDescription": "Counts only multi-socket cacheline Directory state updates memory writes issued from the HA pipe. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory state updates; Directory Updated memory write from TOR pipe",
- "Counter": "0,1,2,3",
- "EventCode": "0x54",
- "EventName": "UNC_CHA_DIR_UPDATE.TOR",
- "PerPkg": "1",
- "PublicDescription": "Counts only multi-socket cacheline Directory state updates due to memory writes issued from the TOR pipe which are the result of remote transaction hitting the SF/LLC and returning data Core2Core. This does not include memory write requests which are for I (Invalid) or E (Exclusive) cachelines.",
- "UMask": "0x02",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Read request from a remote socket which hit in the HitMe Cache to a line In the E state",
- "Counter": "0,1,2,3",
- "EventCode": "0x5F",
- "EventName": "UNC_CHA_HITME_HIT.EX_RDS",
- "PerPkg": "1",
- "PublicDescription": "Counts read requests from a remote socket which hit in the HitME cache (used to cache the multi-socket Directory state) to a line in the E(Exclusive) state. This includes the following read opcodes (RdCode, RdData, RdDataMigratory, RdCur, RdInv*, Inv*)",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
- "Counter": "0,1,2,3",
- "EventCode": "0x59",
- "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
- "PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "CHA to iMC Full Line Writes Issued; Full Line Non-ISOCH",
- "Counter": "0,1,2,3",
- "EventCode": "0x5B",
- "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
- "PerPkg": "1",
- "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to the any of the memory controller channels.",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Number of times that an RFO hit in S state.",
- "Counter": "0,1,2,3",
- "EventCode": "0x39",
- "EventName": "UNC_CHA_MISC.RFO_HIT_S",
- "PerPkg": "1",
- "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
- "UMask": "0x08",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.INVITOE_LOCAL",
- "PerPkg": "1",
- "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
- "UMask": "0x10",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Local requests for exclusive ownership of a cache line without receiving data",
- "Counter": "0,1,2,3",
- "EventCode": "0x50",
- "EventName": "UNC_CHA_REQUESTS.INVITOE_REMOTE",
- "PerPkg": "1",
- "PublicDescription": "Counts the total number of requests coming from a remote socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
- "UMask": "0x20",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "RspCnflct* Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPCNFLCTS",
- "PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspCnflct* Snoop Response was received. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent. This triggers conflict resolution hardware. This covers both the opcode RspCnflct and RspCnflctWbI.",
- "UMask": "0x40",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "RspI Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPI",
- "PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type RspI Snoop Response was received which indicates the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO: the Read for Ownership issued before a write hits non-modified data).",
- "UMask": "0x01",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "RspIFwd Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPIFWD",
- "PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspIFwd Snoop Response was received which indicates a remote caching agent forwarded the data and the requesting agent is able to acquire the data in E (Exclusive) or M (modified) states. This is commonly returned with RFO (the Read for Ownership issued before a write) transactions. The snoop could have either been to a cacheline in the M,E,F (Modified, Exclusive or Forward) states.",
- "UMask": "0x04",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "RspSFwd Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSPSFWD",
- "PerPkg": "1",
- "PublicDescription": "Counts when a a transaction with the opcode type RspSFwd Snoop Response was received which indicates a remote caching agent forwarded the data but held on to its current copy. This is common for data and code reads that hit in a remote socket in E (Exclusive) or F (Forward) state.",
- "UMask": "0x08",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Rsp*Fwd*WB Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSP_FWD_WB",
- "PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*Fwd*WB Snoop Response was received which indicates the data was written back to it's home socket, and the cacheline was forwarded to the requestor socket. This snoop response is only used in >= 4 socket systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to it's home socket to be written back to memory.",
- "UMask": "0x20",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Rsp*WB Snoop Responses Received",
- "Counter": "0,1,2,3",
- "EventCode": "0x5C",
- "EventName": "UNC_CHA_SNOOP_RESP.RSP_WBWB",
- "PerPkg": "1",
- "PublicDescription": "Counts when a transaction with the opcode type Rsp*WB Snoop Response was received which indicates which indicates the data was written back to it's home. This is returned when a non-RFO request hits a cacheline in the Modified state. The Cache can either downgrade the cacheline to a S (Shared) or I (Invalid) state depending on how the system has been configured. This response will also be sent when a cache requests E (Exclusive) ownership of a cache line without receiving data, because the cache must acquire ownership.",
- "UMask": "0x10",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Clockticks of the IIO Traffic Controller",
- "Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_IIO_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Counts clockticks of the 1GHz trafiic controller clock in the IIO unit.",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part0",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) to the MMIO space of a card on IIO Part0. In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part1",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part2",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for 4 bytes made by the CPU to IIO Part3",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every read request for 4 bytes of data made by a unit on the main die (generally a core) to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of 4 bytes made to IIO Part0 by the CPU",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of 4 bytes made to IIO Part1 by the CPU",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of 4 bytes made to IIO Part2 by the CPU ",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of 4 bytes made to IIO Part3 by the CPU ",
- "Counter": "2,3",
- "EventCode": "0xC0",
- "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every write request of 4 bytes of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part0",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) to the MMIO space of a card on IIO Part0. In the general case, part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part1",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) to the MMIO space of a card on IIO Part1. In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part2",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) to the MMIO space of a card on IIO Part2. In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by the CPU to IIO Part3",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by a unit on the main die (generally a core) to the MMIO space of a card on IIO Part3. In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part0 by the CPU",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part0 by a unit on the main die (generally a core). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part1 by the CPU",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part1 by a unit on the main die (generally a core). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part2 by the CPU ",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part2 by a unit on the main die (generally a core). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made to IIO Part3 by the CPU ",
- "Counter": "0,1,2,3",
- "EventCode": "0xC1",
- "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made to the MMIO space of a card on IIO Part3 by a unit on the main die (generally a core). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part0 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part1 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part2 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Read request for up to a 64 byte transaction is made by IIO Part3 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every read request for up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part0 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x01",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part0 to a unit on the main die (generally memory). In the general case, Part0 refers to a standard PCIe card of any size (x16,x8,x4) that is plugged directly into one of the PCIe slots. Part0 could also refer to any device plugged into the first slot of a PCIe riser card or to a device attached to the IIO unit which starts its use of the bus using lane 0 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part1 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part1 to a unit on the main die (generally memory). In the general case, Part1 refers to a x4 PCIe card plugged into the second slot of a PCIe riser card, but it could refer to any x4 device attached to the IIO unit using lanes starting at lane 4 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part2 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part2 to a unit on the main die (generally memory). In the general case, Part2 refers to a x4 or x8 PCIe card plugged into the third slot of a PCIe riser card, but it could refer to any x4 or x8 device attached to the IIO unit and using lanes starting at lane 8 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Write request of up to a 64 byte transaction is made by IIO Part3 to Memory",
- "Counter": "0,1,2,3",
- "EventCode": "0x84",
- "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "PublicDescription": "Counts every write request of up to a 64 byte transaction of data made by IIO Part3 to a unit on the main die (generally memory). In the general case, Part3 refers to a x4 PCIe card plugged into the fourth slot of a PCIe riser card, but it could brefer to any device attached to the IIO unit using the lanes starting at lane 12 of the 16 lanes supported by the bus.",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "Traffic in which the M2M to iMC Bypass was not taken",
- "Counter": "0,1,2,3",
- "EventCode": "0x22",
- "EventName": "UNC_M2M_BYPASS_M2M_Egress.NOT_TAKEN",
- "PerPkg": "1",
- "PublicDescription": "Counts traffic in which the M2M (Mesh to Memory) to iMC (Memory Controller) bypass was not taken",
- "UMask": "0x2",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Cycles when direct to core mode (which bypasses the CHA) was disabled",
- "Counter": "0,1,2,3",
- "EventCode": "0x24",
- "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when direct to core mode (which bypasses the CHA) was disabled",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Messages sent direct to core (bypassing the CHA)",
- "Counter": "0,1,2,3",
- "EventCode": "0x23",
- "EventName": "UNC_M2M_DIRECT2CORE_TAKEN",
- "PerPkg": "1",
- "PublicDescription": "Counts when messages were sent direct to core (bypassing the CHA)",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Number of reads in which direct to core transaction were overridden",
- "Counter": "0,1,2,3",
- "EventCode": "0x25",
- "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
- "PerPkg": "1",
- "PublicDescription": "Counts reads in which direct to core transactions (which would have bypassed the CHA) were overridden",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Number of reads in which direct to Intel UPI transactions were overridden",
- "Counter": "0,1,2,3",
- "EventCode": "0x28",
- "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_CREDITS",
- "PerPkg": "1",
- "PublicDescription": "Counts reads in which direct to Intel Ultra Path Interconnect (UPI) transactions (which would have bypassed the CHA) were overridden",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Cycles when direct to Intel UPI was disabled",
- "Counter": "0,1,2,3",
- "EventCode": "0x27",
- "EventName": "UNC_M2M_DIRECT2UPI_NOT_TAKEN_DIRSTATE",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when the ability to send messages direct to the Intel Ultra Path Interconnect (bypassing the CHA) was disabled",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Messages sent direct to the Intel UPI",
- "Counter": "0,1,2,3",
- "EventCode": "0x26",
- "EventName": "UNC_M2M_DIRECT2UPI_TAKEN",
- "PerPkg": "1",
- "PublicDescription": "Counts when messages were sent direct to the Intel Ultra Path Interconnect (bypassing the CHA)",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Number of reads that a message sent direct2 Intel UPI was overridden",
- "Counter": "0,1,2,3",
- "EventCode": "0x29",
- "EventName": "UNC_M2M_DIRECT2UPI_TXN_OVERRIDE",
- "PerPkg": "1",
- "PublicDescription": "Counts when a read message that was sent direct to the Intel Ultra Path Interconnect (bypassing the CHA) was overridden",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory lookups (any state found)",
- "Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.ANY",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in Any State (A, I, S or unused)",
- "UMask": "0x1",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory lookups (cacheline found in A state) ",
- "Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_A",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state, and found the cacheline marked in the A (SnoopAll) state, indicating the cacheline is stored in another socket in any state, and we must snoop the other sockets to make sure we get the latest data. The data may be stored in any state in the local socket.",
- "UMask": "0x8",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in I state) ",
- "Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_I",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the I (Invalid) state indicating the cacheline is not stored in another socket, and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
- "UMask": "0x2",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory lookup (cacheline found in S state) ",
- "Counter": "0,1,2,3",
- "EventCode": "0x2D",
- "EventName": "UNC_M2M_DIRECTORY_LOOKUP.STATE_S",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) looks into the multi-socket cacheline Directory state , and found the cacheline marked in the S (Shared) state indicating the cacheline is either stored in another socket in the S(hared) state , and so there is no need to snoop the other sockets for the latest data. The data may be stored in any state in the local socket.",
- "UMask": "0x4",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from A to I",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2I",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to I (Invalid)",
- "UMask": "0x20",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from A to S",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.A2S",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from A (SnoopAll) to S (Shared)",
- "UMask": "0x40",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from/to Any state ",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.ANY",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory to a new state",
- "UMask": "0x1",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from I to A",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2A",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to A (SnoopAll)",
- "UMask": "0x4",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from I to S",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.I2S",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from I (Invalid) to S (Shared)",
- "UMask": "0x2",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from S to A",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2A",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to A (SnoopAll)",
- "UMask": "0x10",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Multi-socket cacheline Directory update from S to I",
- "Counter": "0,1,2,3",
- "EventCode": "0x2E",
- "EventName": "UNC_M2M_DIRECTORY_UPDATE.S2I",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) updates the multi-socket cacheline Directory state from from S (Shared) to I (Invalid)",
- "UMask": "0x8",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Reads to iMC issued",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.ALL",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller). ",
- "UMask": "0x4",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Reads to iMC issued at Normal Priority (Non-Isochronous)",
- "Counter": "0,1,2,3",
- "EventCode": "0x37",
- "EventName": "UNC_M2M_IMC_READS.NORMAL",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues reads to the iMC (Memory Controller). It only counts normal priority non-isochronous reads.",
- "UMask": "0x1",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Writes to iMC issued",
- "Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.ALL",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues writes to the iMC (Memory Controller).",
- "UMask": "0x10",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Partial Non-Isochronous writes to the iMC",
- "Counter": "0,1,2,3",
- "EventCode": "0x38",
- "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) issues partial writes to the iMC (Memory Controller). It only counts normal priority non-isochronous writes.",
- "UMask": "0x2",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Prefecth requests that got turn into a demand request",
- "Counter": "0,1,2,3",
- "EventCode": "0x56",
- "EventName": "UNC_M2M_PREFCAM_DEMAND_PROMOTIONS",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) promotes a outstanding request in the prefetch queue due to a subsequent demand read request that entered the M2M with the same address. Explanatory Side Note: The Prefecth queue is made of CAM (Content Addressable Memory)",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Inserts into the Memory Controller Prefetch Queue",
- "Counter": "0,1,2,3",
- "EventCode": "0x57",
- "EventName": "UNC_M2M_PREFCAM_INSERTS",
- "PerPkg": "1",
- "PublicDescription": "Counts when the M2M (Mesh to Memory) receives a prefetch request and inserts it into its outstanding prefetch queue. Explanatory Side Note: the prefect queue is made from CAM: Content Addressable Memory",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "AD Ingress (from CMS) Queue Inserts",
- "Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_M2M_RxC_AD_INSERTS",
- "PerPkg": "1",
- "PublicDescription": "Counts when the a new entry is Received(RxC) and then added to the AD (Address Ring) Ingress Queue from the CMS (Common Mesh Stop). This is generally used for reads, and ",
- "Unit": "M2M"
- },
- {
- "BriefDescription": "Prefetches generated by the flow control queue of the M3UPI unit.",
- "Counter": "0,1,2,3",
- "EventCode": "0x29",
- "EventName": "UNC_M3UPI_UPI_PREFETCH_SPAWN",
- "PerPkg": "1",
- "PublicDescription": "Count cases where flow control queue that sits between the Intel Ultra Path Interconnect (UPI) and the mesh spawns a prefetch to the iMC (Memory Controller)",
- "Unit": "M3UPI"
- },
- {
- "BriefDescription": "Clocks of the Intel Ultra Path Interconnect (UPI)",
- "Counter": "0,1,2,3",
- "EventCode": "0x1",
- "EventName": "UNC_UPI_CLOCKTICKS",
- "PerPkg": "1",
- "PublicDescription": "Counts clockticks of the fixed frequency clock controlling the Intel Ultra Path Interconnect (UPI). This clock runs at1/8th the 'GT/s' speed of the UPI link. For example, a 9.6GT/s link will have a fixed Frequency of 1.2 Ghz.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Data Response packets that go direct to core",
- "Counter": "0,1,2,3",
- "EventCode": "0x12",
- "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2C",
- "PerPkg": "1",
- "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to core bypassing the CHA.",
- "UMask": "0x1",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Data Response packets that go direct to Intel UPI",
- "Counter": "0,1,2,3",
- "EventCode": "0x12",
- "EventName": "UNC_UPI_DIRECT_ATTEMPTS.D2U",
- "PerPkg": "1",
- "PublicDescription": "Counts Data Response (DRS) packets that attempted to go direct to Intel Ultra Path Interconnect (UPI) bypassing the CHA .",
- "UMask": "0x2",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Cycles Intel UPI is in L1 power mode (shutdown)",
- "Counter": "0,1,2,3",
- "EventCode": "0x21",
- "EventName": "UNC_UPI_L1_POWER_CYCLES",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when the Intel Ultra Path Interconnect (UPI) is in L1 power mode. L1 is a mode that totally shuts down the UPI link. Link power states are per link and per direction, so for example the Tx direction could be in one state while Rx was in another, this event only coutns when both links are shutdown.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Cycles the Rx of the Intel UPI is in L0p power mode",
- "Counter": "0,1,2,3",
- "EventCode": "0x25",
- "EventName": "UNC_UPI_RxL0P_POWER_CYCLES",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when the the receive side (Rx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
- "Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT0",
- "PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot0 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x1",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
- "Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT1",
- "PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot1 RxQ buffer (Receive Queue) and passed directly across the BGF and into the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x2",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "FLITs received which bypassed the Slot0 Receive Buffer",
- "Counter": "0,1,2,3",
- "EventCode": "0x31",
- "EventName": "UNC_UPI_RxL_BYPASSED.SLOT2",
- "PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the slot2 RxQ buffer (Receive Queue) and passed directly to the Egress. This is a latency optimization, and should generally be the common case. If this value is less than the number of FLITs transferred, it implies that there was queueing getting onto the ring, and thus the transactions saw higher latency.",
- "UMask": "0x4",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Valid data FLITs received from any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_DATA",
- "PerPkg": "1",
- "PublicDescription": "Counts valid data FLITs (80 bit FLow control unITs: 64bits of data) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
- "UMask": "0x0F",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Null FLITs received from any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.ALL_NULL",
- "PerPkg": "1",
- "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) received from any of the 3 Intel Ultra Path Interconnect (UPI) Receive Queue slots on this UPI unit.",
- "UMask": "0x27",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Protocol header and credit FLITs received from any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x3",
- "EventName": "UNC_UPI_RxL_FLITS.NON_DATA",
- "PerPkg": "1",
- "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) received from any of the 3 UPI slots on this UPI unit.",
- "UMask": "0x97",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Cycles in which the Tx of the Intel Ultra Path Interconnect (UPI) is in L0p power mode",
- "Counter": "0,1,2,3",
- "EventCode": "0x27",
- "EventName": "UNC_UPI_TxL0P_POWER_CYCLES",
- "PerPkg": "1",
- "PublicDescription": "Counts cycles when the transmit side (Tx) of the Intel Ultra Path Interconnect(UPI) is in L0p power mode. L0p is a mode where we disable 60% of the UPI lanes, decreasing our bandwidth in order to save power.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "FLITs that bypassed the TxL Buffer",
- "Counter": "0,1,2,3",
- "EventCode": "0x41",
- "EventName": "UNC_UPI_TxL_BYPASSED",
- "PerPkg": "1",
- "PublicDescription": "Counts incoming FLITs (FLow control unITs) which bypassed the TxL(transmit) FLIT buffer and pass directly out the UPI Link. Generally, when data is transmitted across the Intel Ultra Path Interconnect (UPI), it will bypass the TxQ and pass directly to the link. However, the TxQ will be used in L0p (Low Power) mode and (Link Layer Retry) LLR mode, increasing latency to transfer out to the link.",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "UPI interconnect send bandwidth for payload. Derived from unc_upi_txl_flits.all_data",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UPI_DATA_BANDWIDTH_TX",
- "PerPkg": "1",
- "ScaleUnit": "7.11E-06Bytes",
- "UMask": "0x0F",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Null FLITs transmitted from any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.ALL_NULL",
- "PerPkg": "1",
- "PublicDescription": "Counts null FLITs (80 bit FLow control unITs) transmitted via any of the 3 Intel Ulra Path Interconnect (UPI) slots on this UPI unit.",
- "UMask": "0x27",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Idle FLITs transmitted",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.IDLE",
- "PerPkg": "1",
- "PublicDescription": "Counts when the Intel Ultra Path Interconnect(UPI) transmits an idle FLIT(80 bit FLow control unITs). Every UPI cycle must be sending either data FLITs, protocol/credit FLITs or idle FLITs.",
- "UMask": "0x47",
- "Unit": "UPI LL"
- },
- {
- "BriefDescription": "Protocol header and credit FLITs transmitted across any slot",
- "Counter": "0,1,2,3",
- "EventCode": "0x2",
- "EventName": "UNC_UPI_TxL_FLITS.NON_DATA",
- "PerPkg": "1",
- "PublicDescription": "Counts protocol header and credit FLITs (80 bit FLow control unITs) transmitted across any of the 3 UPI (Ultra Path Interconnect) slots on this UPI unit.",
- "UMask": "0x97",
- "Unit": "UPI LL"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json b/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json
new file mode 100644
index 000000000000..c6254af7a468
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/skylakex/uncore-power.json
@@ -0,0 +1,196 @@
+[
+ {
+ "BriefDescription": "pclk Cycles",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "The PCU runs off a fixed 1 GHz clock. This event counts the number of pclk cycles measured while the counter was enabled. The pclk, like the Memory Controller's dclk, counts at a constant rate making it a good measure of actual wall time.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent in phase-shedding power state 0",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent in phase-shedding power state 1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent in phase-shedding power state 2",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent in phase-shedding power state 3",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "EventCode": "0x4",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when thermal conditions are the upper limit on frequency. This is related to the THERMAL_THROTTLE CYCLES_ABOVE_TEMP event, which always counts cycles when we are above the thermal temperature. This event (STRONGEST_UPPER_LIMIT) is sampled at the output of the algorithm that determines the actual frequency, while THERMAL_THROTTLE looks at the input.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "EventCode": "0x5",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when power is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_MCP_PROCHOT_CYCLES",
+ "EventCode": "0x6",
+ "EventName": "UNC_P_MCP_PROCHOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "EventCode": "0x7",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C0 and C1",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State; C6 and C7",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with thresholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0xA",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x9",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
index 7f466c97e485..f59405877ae8 100644
--- a/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
+++ b/tools/perf/pmu-events/arch/x86/skylakex/virtual-memory.json
@@ -1,284 +1,228 @@
[
{
- "EventCode": "0x08",
- "UMask": "0x1",
"BriefDescription": "Load misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
+ "EventCode": "0x08",
"EventName": "DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data loads that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
"EventCode": "0x08",
- "UMask": "0x2",
- "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"EventCode": "0x08",
- "UMask": "0x4",
- "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x08",
- "UMask": "0x8",
- "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts page walks completed due to demand data loads whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Page walk completed due to a demand data load to a 1G page",
"EventCode": "0x08",
- "UMask": "0xe",
- "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
- "PublicDescription": "Counts demand data loads that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Page walk completed due to a demand data load to a 2M/4M page",
"EventCode": "0x08",
- "UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Page walk completed due to a demand data load to a 4K page",
"EventCode": "0x08",
- "UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a load.",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake.",
"EventCode": "0x08",
- "UMask": "0x20",
- "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
- "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a load. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0x49",
- "UMask": "0x1",
"BriefDescription": "Store misses in all DTLB levels that cause page walks",
- "Counter": "0,1,2,3",
+ "EventCode": "0x49",
"EventName": "DTLB_STORE_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts demand data stores that caused a page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels, but the walk need not have completed.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
"EventCode": "0x49",
- "UMask": "0x2",
- "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"EventCode": "0x49",
- "UMask": "0x4",
- "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x49",
- "UMask": "0x8",
- "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 1G pages. The page walks can end with or without a page fault.",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Page walk completed due to a demand data store to a 1G page",
"EventCode": "0x49",
- "UMask": "0xe",
- "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
- "PublicDescription": "Counts demand data stores that caused a completed page walk of any page size (4K/2M/4M/1G). This implies it missed in all TLB levels. The page walk can end with or without a fault.",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Page walk completed due to a demand data store to a 2M/4M page",
"EventCode": "0x49",
- "UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
- "SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Page walk completed due to a demand data store to a 4K page",
"EventCode": "0x49",
- "UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
- "CounterMask": "1",
- "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data stores. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake.",
"EventCode": "0x49",
- "UMask": "0x20",
- "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
- "Counter": "0,1,2,3",
- "EventName": "DTLB_STORE_MISSES.STLB_HIT",
- "PublicDescription": "Stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
- "SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for a store. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
},
{
- "EventCode": "0x4F",
- "UMask": "0x10",
"BriefDescription": "Counts 1 per cycle for each PMH that is busy with a EPT (Extended Page Table) walk for any request type.",
- "Counter": "0,1,2,3",
+ "EventCode": "0x4f",
"EventName": "EPT.WALK_PENDING",
"PublicDescription": "Counts cycles for each PMH (Page Miss Handler) that is busy with an EPT (Extended Page Table) walk for any request type.",
"SampleAfterValue": "2000003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
+ "EventCode": "0xAE",
+ "EventName": "ITLB.ITLB_FLUSH",
+ "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
},
{
- "EventCode": "0x85",
- "UMask": "0x1",
"BriefDescription": "Misses at all ITLB levels that cause page walks",
- "Counter": "0,1,2,3",
+ "EventCode": "0x85",
"EventName": "ITLB_MISSES.MISS_CAUSES_A_WALK",
"PublicDescription": "Counts page walks of any page size (4K/2M/4M/1G) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB, but the walk need not have completed.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
"EventCode": "0x85",
- "UMask": "0x2",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
- "PublicDescription": "Counts completed page walks (4K page size) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.STLB_HIT",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
},
{
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
+ "CounterMask": "1",
"EventCode": "0x85",
- "UMask": "0x4",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
- "PublicDescription": "Counts code misses in all ITLB levels that caused a completed page walk (2M and 4M page sizes). The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
"EventCode": "0x85",
- "UMask": "0x8",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
- "PublicDescription": "Counts store misses in all DTLB levels that cause a completed page walk (1G page size). The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0xe"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (1G)",
"EventCode": "0x85",
- "UMask": "0xe",
- "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_COMPLETED",
- "PublicDescription": "Counts completed page walks (2M and 4M page sizes) caused by a code fetch. This implies it missed in the ITLB and further levels of TLB. The page walk can end with or without a fault.",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts completed page walks (1G page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x8"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
"EventCode": "0x85",
- "UMask": "0x10",
- "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_PENDING",
- "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x4"
},
{
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
"EventCode": "0x85",
- "UMask": "0x10",
- "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.WALK_ACTIVE",
- "CounterMask": "1",
- "PublicDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request. EPT page walk duration are excluded in Skylake microarchitecture.",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x2"
},
{
+ "BriefDescription": "Counts 1 per cycle for each PMH that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake.",
"EventCode": "0x85",
- "UMask": "0x20",
- "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB_MISSES.STLB_HIT",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts 1 per cycle for each PMH (Page Miss Handler) that is busy with a page walk for an instruction fetch request. EPT page walk duration are excluded in Skylake michroarchitecture.",
"SampleAfterValue": "100003",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x10"
},
{
- "EventCode": "0xAE",
- "UMask": "0x1",
- "BriefDescription": "Flushing of the Instruction TLB (ITLB) pages, includes 4k/2M/4M pages.",
- "Counter": "0,1,2,3",
- "EventName": "ITLB.ITLB_FLUSH",
- "PublicDescription": "Counts the number of flushes of the big or small ITLB pages. Counting include both TLB Flush (covering all sets) and TLB Set Clear (set-specific).",
- "SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
- },
- {
- "EventCode": "0xBD",
- "UMask": "0x1",
"BriefDescription": "DTLB flush attempts of the thread-specific entries",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.DTLB_THREAD",
"PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x1"
},
{
- "EventCode": "0xBD",
- "UMask": "0x20",
"BriefDescription": "STLB flush attempts",
- "Counter": "0,1,2,3",
+ "EventCode": "0xBD",
"EventName": "TLB_FLUSH.STLB_ANY",
"PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
"SampleAfterValue": "100007",
- "CounterHTOff": "0,1,2,3,4,5,6,7"
+ "UMask": "0x20"
}
-] \ No newline at end of file
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/cache.json b/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
new file mode 100644
index 000000000000..0ab90e3bf76b
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/cache.json
@@ -0,0 +1,886 @@
+[
+ {
+ "BriefDescription": "Counts the number of core requests (demand and L1 prefetchers) rejected by the L2 queue (L2Q) due to a full condition.",
+ "EventCode": "0x31",
+ "EventName": "CORE_REJECT_L2Q.ANY",
+ "PublicDescription": "Counts the number of (demand and L1 prefetchers) core requests rejected by the L2 queue (L2Q) due to a full or nearly full condition, which likely indicates back pressure from L2Q. It also counts requests that would have gone directly to the External Queue (XQ), but are rejected due to a full or nearly full condition, indicating back pressure from the IDI link. The L2Q may also reject transactions from a core to ensure fairness between cores, or to delay a cores dirty eviction when the address conflicts incoming external snoops. (Note that L2 prefetcher requests that are dropped are not counted by this event). Counts on a per core basis.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches.",
+ "EventCode": "0x51",
+ "EventName": "DL1.DIRTY_EVICTION",
+ "PublicDescription": "Counts the number of L1D cacheline (dirty) evictions caused by load misses, stores, and prefetches. Does not count evictions or dirty writebacks caused by snoops. Does not count a replacement unless a (dirty) line was written back.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition.",
+ "EventCode": "0x30",
+ "EventName": "L2_REJECT_XQ.ANY",
+ "PublicDescription": "Counts the number of demand and prefetch transactions that the External Queue (XQ) rejects due to a full or near full condition which likely indicates back pressure from the IDI link. The XQ may reject transactions from the L2Q (non-cacheable requests), BBL (L2 misses) and WOB (L2 write-back victims).",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the total number of L2 Cache accesses. Counts on a per core basis.",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.ALL",
+ "PublicDescription": "Counts the total number of L2 Cache Accesses, includes hits, misses, rejects front door requests for CRd/DRd/RFO/ItoM/L2 Prefetches only. Counts on a per core basis.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache accesses that resulted in a hit. Counts on a per core basis.",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.HIT",
+ "PublicDescription": "Counts the number of L2 Cache accesses that resulted in a hit from a front door request only (does not include rejects or recycles), Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache accesses that resulted in a miss. Counts on a per core basis.",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.MISS",
+ "PublicDescription": "Counts the number of L2 Cache accesses that resulted in a miss from a front door request only (does not include rejects or recycles). Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of L2 Cache accesses that miss the L2 and get rejected. Counts on a per core basis.",
+ "EventCode": "0x24",
+ "EventName": "L2_REQUEST.REJECTS",
+ "PublicDescription": "Counts the number of L2 Cache accesses that miss the L2 and get BBL reject short and long rejects (includes those counted in L2_reject_XQ.any). Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that miss in the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts the number of cacheable memory requests that miss in the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Counts the number of cacheable memory requests that access the LLC. Counts on a per core basis.",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.REFERENCE",
+ "PublicDescription": "Counts the number of cacheable memory requests that access the Last Level Cache (LLC). Requests include demand loads, reads for ownership (RFO), instruction fetches and L1 HW prefetches. If the platform has an L3 cache, the LLC is the L3 cache, otherwise it is the L2 cache. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4f"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH",
+ "SampleAfterValue": "200003",
+ "UMask": "0x38"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_DRAM_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or translation lookaside buffer (TLB) miss which hit in DRAM or MMIO (non-DRAM).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the L2 cache.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_L2_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to an instruction cache or TLB miss which hit in the LLC or other core with HITE/F/M.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.IFETCH_LLC_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to an instruction cache or Translation Lookaside Buffer (TLB) miss which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in the L2, LLC, DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load miss which hit in DRAM or MMIO (Non-DRAM).",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_DRAM_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the L2 cache.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the LLC or other core with HITE/F/M.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.LOAD_LLC_HIT",
+ "PublicDescription": "Counts the number of cycles the core is stalled due to a demand load which hit in the Last Level Cache (LLC) or other core with HITE/F/M.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the core is stalled due to a store buffer being full.",
+ "EventCode": "0x34",
+ "EventName": "MEM_BOUND_STALLS.STORE_BUFFER_FULL",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in DRAM.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.DRAM_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L3 cache, in which a snoop was required and modified data was forwarded from another core or module.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.HITM",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L1 data cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that miss in the L1 data cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L2 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that miss in the L2 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that hit in the L3 cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of memory uops retired. A single uop that performs both a load AND a store will be counted as 1, not 2 (e.g. ADD [mem], CONST)",
+ "SampleAfterValue": "200003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of load uops retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Counts the number of store uops retired.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of store uops retired.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that performed one or more locks.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that were splits.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x43"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split load uops.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Counts the number of retired split store uops.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3001F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HITM",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_MISS",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x801F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2001F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x401F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x4003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1003C0477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1F803C0800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x101F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1010003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, but no data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_NO_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1004003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent, the snoop hit, and non-modified data was forwarded.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1008003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where a snoop was sent but the snoop missed.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by the L3 cache where no snoop was needed to satisfy the request.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_HIT.SNOOP_NOT_NEEDED",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1001003C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that were supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.L3_HIT",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x201F803C0000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to instruction cache misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ICACHE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json b/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
new file mode 100644
index 000000000000..88522244b760
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/floating-point.json
@@ -0,0 +1,26 @@
+[
+ {
+ "BriefDescription": "Counts the number of cycles the floating point divider is busy.",
+ "EventCode": "0xcd",
+ "EventName": "CYCLES_DIV_BUSY.FPDIV",
+ "PublicDescription": "Counts the number of cycles the floating point divider is busy. Does not imply a stall waiting for the divider.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point operations retired that required microcode assist.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.FP_ASSIST",
+ "PublicDescription": "Counts the number of floating point operations retired that required microcode assist, which is not a reflection of the number of FP operations, instructions or uops.",
+ "SampleAfterValue": "20003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of floating point divide uops retired (x87 and SSE, including x87 sqrt).",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.FPDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/frontend.json b/tools/perf/pmu-events/arch/x86/snowridgex/frontend.json
new file mode 100644
index 000000000000..5ba998e06592
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/frontend.json
@@ -0,0 +1,69 @@
+[
+ {
+ "BriefDescription": "Counts the total number of BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the total number of BACLEARS, which occur when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a conditional jump.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.COND",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to an indirect branch.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.INDIRECT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a return branch.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.RETURN",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of BACLEARS due to a direct, unconditional jump.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.UNCOND",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of times a decode restriction reduces the decode throughput due to wrong instruction length prediction.",
+ "EventCode": "0xe9",
+ "EventName": "DECODE_RESTRICTION.PREDECODE_WRONG",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of requests to the instruction cache for one or more bytes of a cache line.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.ACCESSES",
+ "PublicDescription": "Counts the total number of requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line or byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction cache hits.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.HIT",
+ "PublicDescription": "Counts the number of requests that hit in the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of instruction cache misses.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE.MISSES",
+ "PublicDescription": "Counts the number of missed requests to the instruction cache. The event only counts new cache line accesses, so that multiple back to back fetches to the exact same cache line and byte chunk count as one. Specifically, the event counts when accesses from sequential code crosses the cache line boundary, or when a branch target is moved to a new line or to a non-sequential byte chunk of the same line.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
new file mode 100644
index 000000000000..18621909d1a9
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/memory.json
@@ -0,0 +1,358 @@
+[
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering caused by a snoop from an external agent. Does not count internally generated machine clears such as those due to memory disambiguation.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "SampleAfterValue": "20003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of misaligned load uops that are 4K page splits.",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.LOAD_PAGE_SPLIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of misaligned store uops that are 4K page splits.",
+ "EventCode": "0x13",
+ "EventName": "MISALIGN_MEM_REF.STORE_PAGE_SPLIT",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.L3_MISS_LOCAL",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x802184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x802184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2002184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.OTHER.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.OTHER.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184008000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x402184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x402184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all hardware and software prefetches that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PREFETCHES.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000470",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2184000800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x102184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x102184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.L3_MISS",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x202184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that were not supplied by the L3 cache.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.L3_MISS_LOCAL",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x202184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/other.json b/tools/perf/pmu-events/arch/x86/snowridgex/other.json
new file mode 100644
index 000000000000..00ae180ded25
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/other.json
@@ -0,0 +1,532 @@
+[
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.SELF_LOCKS",
+ "EdgeDetect": "1",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.ALL",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock issued by other cores.",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.BLOCK_CYCLES",
+ "PublicDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock issued by other cores. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.BLOCK_CYCLES",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.CYCLES_OTHER_BLOCK",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event BUS_LOCK.LOCK_CYCLES",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.CYCLES_SELF_BLOCK",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock it issued.",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.LOCK_CYCLES",
+ "PublicDescription": "Counts the number of unhalted cycles a core is blocked due to an accepted lock it issued. Counts on a per core basis.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of bus locks a core issued its self (e.g. lock to UC or Split Lock) and does not include cache locks.",
+ "EdgeDetect": "1",
+ "EventCode": "0x63",
+ "EventName": "BUS_LOCK.SELF_LOCKS",
+ "PublicDescription": "Counts the number of bus locks a core issued its self (e.g. lock to UC or Split Lock) and does not include cache locks. Counts on a per core basis.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_DRAM_HIT",
+ "EventCode": "0x34",
+ "EventName": "C0_STALLS.LOAD_DRAM_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_L2_HIT",
+ "EventCode": "0x34",
+ "EventName": "C0_STALLS.LOAD_L2_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event MEM_BOUND_STALLS.LOAD_LLC_HIT",
+ "EventCode": "0x34",
+ "EventName": "C0_STALLS.LOAD_LLC_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of core cycles during which interrupts are masked (disabled).",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.MASKED",
+ "PublicDescription": "Counts the number of core cycles during which interrupts are masked (disabled). Increments by 1 each core cycle that EFLAGS.IF is 0, regardless of whether interrupts are pending or not.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled).",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.PENDING_AND_MASKED",
+ "PublicDescription": "Counts the number of core cycles during which there are pending interrupts while interrupts are masked (disabled). Increments by 1 each core cycle that both EFLAGS.IF is 0 and an INTR is pending (which means the APIC is telling the ROB to cause an INTR). This event does not increment if EFLAGS.IF is 0 but all interrupt in the APICs Interrupt Request Register (IRR) are inhibited by the PPR (thus either by ISRV or TPR) because in these cases the interrupts would be held up in the APIC and would not be pended to the ROB. This event does count when an interrupt is only inhibited by MOV/POP SS state machines or the STI state machine. These extra inhibits only last for a single instructions and would not be important.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of hardware interrupts received by the processor.",
+ "EventCode": "0xcb",
+ "EventName": "HW_INTERRUPTS.RECEIVED",
+ "SampleAfterValue": "203",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all code reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.ALL_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000044",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x3000000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache and L2 cache that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.COREWB_M.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8003000000000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand instruction fetches and L1 instruction cache prefetches that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000004",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cacheable demand data reads, L1 data cache hardware prefetches and software prefetches (except PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.ANY_RESPONSE",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.DRAM",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.LOCAL_DRAM",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event OCR.DEMAND_DATA_AND_L1PF_RD.OUTSTANDING",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_DATA_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) and software prefetches for exclusive ownership (PREFETCHW) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.DEMAND_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify a full 64 byte cacheline that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.FULL_STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x800000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L1 data cache hardware prefetches and software prefetches (except PREFETCHW and PFRFO) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L1D_AND_SWPF.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10400",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch code reads (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_CODE_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000040",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch data reads (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_DATA_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000010",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts L2 cache hardware prefetch RFOs (written to the L2 cache only) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.HWPF_L2_RFO.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000020",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writebacks from L1 cache that miss the L2 cache that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L1WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x1000000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts modified writeBacks from L2 cache that miss the L3 cache that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.L2WB_M.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x2000000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts miscellaneous requests, such as I/O accesses, that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.OTHER.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x18000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores which modify only part of a 64 byte cacheline that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PARTIAL_STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x400000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all hardware and software prefetches that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.PREFETCHES.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10470",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x184000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts all data read, code read and RFO requests including demands and prefetches to the core caches (L1 or L2) that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.READS_TO_CORE.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000000000000477",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that were supplied by DRAM.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.LOCAL_DRAM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x100184000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory reads that have an outstanding request. Returns the number of cycles until the response is received (i.e. XQ to XQ latency).",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_RD.OUTSTANDING",
+ "MSRIndex": "0x1a6",
+ "MSRValue": "0x8000100000000000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts uncached memory writes that have any type of response.",
+ "EventCode": "0XB7",
+ "EventName": "OCR.UC_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x200000010000",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json b/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
new file mode 100644
index 000000000000..9dd8c909facc
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/pipeline.json
@@ -0,0 +1,450 @@
+[
+ {
+ "BriefDescription": "Counts the total number of branch instructions retired for all branch types.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions in which the instruction pointer (IP) of the processor is resteered due to a branch instruction and the branch instruction successfully retires. All branch type instructions are accounted for.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of near CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf9"
+ },
+ {
+ "BriefDescription": "Counts the number of far branch instructions retired, includes far jump, far call and return, and interrupt call and return.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xbf"
+ },
+ {
+ "BriefDescription": "Counts the number of near indirect CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "Counts the number of retired JCC (Jump on Conditional Code) branch instructions retired, includes both taken and not taken branches.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
+ },
+ {
+ "BriefDescription": "Counts the number of near indirect JMP and near indirect CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
+ "BriefDescription": "Counts the number of near relative CALL branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.REL_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfd"
+ },
+ {
+ "BriefDescription": "Counts the number of near RET branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7"
+ },
+ {
+ "BriefDescription": "Counts the number of taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
+ },
+ {
+ "BriefDescription": "Counts the total number of mispredicted branch instructions retired for all branch types.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of mispredicted branch instructions retired. All branch type instructions are accounted for. Prediction of the branch target address enables the processor to begin executing instructions before the non-speculative execution path is known. The branch prediction unit (BPU) predicts the target address based on the instruction pointer (IP) of the branch and on the execution path through which execution reached this IP. A branch misprediction occurs when the prediction is wrong, and results in discarding all instructions executed in the speculative path and re-fetching from the correct path.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.IND_CALL",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfb"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7e"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near indirect JMP and near indirect CALL branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NON_RETURN_IND",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xeb"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted near RET branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RETURN",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xf7"
+ },
+ {
+ "BriefDescription": "Counts the number of mispredicted taken JCC (Jump on Conditional Code) branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.TAKEN_JCC",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0xfe"
+ },
+ {
+ "BriefDescription": "Counts the total number of BTCLEARS.",
+ "EventCode": "0xe8",
+ "EventName": "BTCLEAR.ANY",
+ "PublicDescription": "Counts the total number of BTCLEARS which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.CORE",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted core clock cycles.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.CORE_P",
+ "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses fixed counter 2.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC_P",
+ "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. This event is not affected by core frequency changes and increments at a fixed frequency that is also used for the Time Stamp Counter (TSC). This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "EventCode": "0xcd",
+ "EventName": "CYCLES_DIV_BUSY.ANY",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of cycles the integer divider is busy.",
+ "EventCode": "0xcd",
+ "EventName": "CYCLES_DIV_BUSY.IDIV",
+ "PublicDescription": "Counts the number of cycles the integer divider is busy. Does not imply a stall waiting for the divider.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number of instructions retired. (Fixed event)",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number of instructions retired.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the total number of instructions that retired. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. This event continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses a programmable general purpose performance counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because it initially appears to be store forward blocked, but subsequently is shown not to be blocked based on 4K alias check.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.4K_ALIAS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked for any of the following reasons: DTLB miss, address alias, store forward or data unknown (includes memory disambiguation blocks and ESP consuming load blocks).",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address exactly matches an older store whose data is not ready.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DATA_UNKNOWN",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked because its address partially overlapped with an older store.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of machine clears for any reason including, but not limited to, memory ordering, memory disambiguation, SMC, and FP assist.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.ANY",
+ "SampleAfterValue": "20003"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to memory ordering in which an internal load passes an older store within the same CPU.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.DISAMBIGUATION",
+ "SampleAfterValue": "20003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to a page fault. Counts both I-Side and D-Side (Loads/Stores) page faults. A page fault occurs when either the page is not present, or an access violation occurs.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.PAGE_FAULT",
+ "SampleAfterValue": "20003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of machine clears due to program modifying data (self modifying code) within 1K of a recently fetched code page.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "SampleAfterValue": "20003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.ALL",
+ "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ) even if an FE_bound event occurs during this period. Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to fast nukes such as memory ordering and memory disambiguation machine clears.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a machine clear (nuke) of any kind including memory ordering and memory disambiguation.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MACHINE_CLEARS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to branch mispredicts.",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MISPREDICT",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event TOPDOWN_BAD_SPECULATION.FASTNUKE",
+ "EventCode": "0x73",
+ "EventName": "TOPDOWN_BAD_SPECULATION.MONUKE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to backend stalls.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to certain allocation restrictions.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.ALLOC_RESTRICTIONS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to memory reservation stalls in which a scheduler is not able to accept uops.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to IEC or FPC RAT stalls, which can be due to FIQ or IEC reservation stalls in which the integer, floating point or SIMD scheduler is not able to accept uops.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.NON_MEM_SCHEDULER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the physical register file unable to accept an entry (marble stalls).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REGISTER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to the reorder buffer being full (ROB stalls).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.REORDER_BUFFER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not consumed by the backend due to scoreboards from the instruction queue (IQ), jump execution unit (JEU), or microcode sequencer (MS).",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.SERIALIZATION",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "EventCode": "0x74",
+ "EventName": "TOPDOWN_BE_BOUND.STORE_BUFFER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the total number of issue slots every cycle that were not consumed by the backend due to frontend stalls.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ALL",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_DETECT",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BACLEARS, which occurs when the Branch Target Buffer (BTB) prediction or lack thereof, was corrected by a later branch predictor in the frontend. Includes BACLEARS due to all branch types including conditional and unconditional jumps, returns, and indirect branches.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.BRANCH_RESTEER",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to BTCLEARS, which occurs when the Branch Target Buffer (BTB) predicts a taken branch.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to the microcode sequencer (MS).",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.CISC",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to decode stalls.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.DECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to ITLB misses.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.ITLB",
+ "PublicDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to Instruction Table Lookaside Buffer (ITLB) misses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to other common frontend stalls not categorized.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.OTHER",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of issue slots every cycle that were not delivered by the frontend due to wrong predecodes.",
+ "EventCode": "0x71",
+ "EventName": "TOPDOWN_FE_BOUND.PREDECODE",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the total number of consumed retirement slots.",
+ "EventCode": "0xc2",
+ "EventName": "TOPDOWN_RETIRING.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003"
+ },
+ {
+ "BriefDescription": "Counts the number of uops issued by the front end every cycle.",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops issued by the front end every cycle. When 4-uops are requested and only 2-uops are delivered, the event counts 2. Uops_issued correlates to the number of ROB entries. If uop takes 2 ROB slots it counts as 2 uops_issued.",
+ "SampleAfterValue": "200003"
+ },
+ {
+ "BriefDescription": "Counts the total number of uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.ALL",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Counts the number of integer divide uops retired.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.IDIV",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of uops that are from complex flows issued by the micro-sequencer (MS).",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.MS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of uops that are from complex flows issued by the Microcode Sequencer (MS). This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops retired, includes those in MS flows.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.X87",
+ "PEBS": "1",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json
new file mode 100644
index 000000000000..a68a5bb05c22
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-cache.json
@@ -0,0 +1,7100 @@
+[
+ {
+ "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.MMIO_READ",
+ "Filter": "config1=0x40040e33",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.MMIO_WRITE",
+ "Filter": "config1=0x40041e33",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_MISSES.UNCACHEABLE",
+ "Filter": "config1=0x40e33",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_FULL",
+ "Filter": "config1=0x41833",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
+ "EventCode": "0x35",
+ "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
+ "Filter": "config1=0x41a33",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x80",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x81",
+ "EventName": "UNC_CHA_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x82",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x83",
+ "EventName": "UNC_CHA_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "EventCode": "0x88",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x89",
+ "EventName": "UNC_CHA_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8A",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8B",
+ "EventName": "UNC_CHA_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x84",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x85",
+ "EventName": "UNC_CHA_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x86",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x87",
+ "EventName": "UNC_CHA_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x8D",
+ "EventName": "UNC_CHA_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8E",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8F",
+ "EventName": "UNC_CHA_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Intermediate bypass Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.INTERMEDIATE",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Intermediate bypass Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the intermediate bypass.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Not Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.NOT_TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Not Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that could not take the bypass, and issues a read to memory. Note that transactions that did not take the bypass but did not issue read to memory will not be counted.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Bypass : Taken",
+ "EventCode": "0x57",
+ "EventName": "UNC_CHA_BYPASS_CHA_IMC.TAKEN",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Bypass : Taken : Counts the number of times when the CHA was able to bypass HA pipe on the way to iMC. This is a latency optimization for situations when there is light loadings on the memory subsystem. This can be filtered by when the bypass was taken and when it was not. : Filter for transactions that succeeded in taking the full bypass.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Uncore cache clock ticks",
+ "EventName": "UNC_CHA_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_CHA_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Any Cycle with Multiple Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xf2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Any Single Snoop",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.ANY_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Any Single Snoop : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0xf1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x42",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Core Requests",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.CORE_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Core Requests : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x41",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x82",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single Eviction",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EVICT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single Eviction : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x81",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Multiple External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_GTONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Multiple External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x22",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Core Cross Snoops Issued : Single External Snoops",
+ "EventCode": "0x33",
+ "EventName": "UNC_CHA_CORE_SNP.EXT_ONE",
+ "PerPkg": "1",
+ "PublicDescription": "Core Cross Snoops Issued : Single External Snoops : Counts the number of transactions that trigger a configurable number of cross snoops. Cores are snooped if the transaction looks up the cache and determines that it is necessary based on the operation type and what CoreValid bits are set. For example, if 2 CV bits are set on a data read, the cores must have the data in S state so it is not necessary to snoop them. However, if only 1 CV bit is set the core my have modified the data. If the transaction was an RFO, it would need to invalidate the lines. This event can be filtered based on who triggered the initial snoop(s).",
+ "UMask": "0x21",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Counter 0 Occupancy",
+ "EventCode": "0x1F",
+ "EventName": "UNC_CHA_COUNTER0_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Counter 0 Occupancy : Since occupancy counts can only be captured in the Cbo's 0 counter, this event allows a user to capture occupancy related information by filtering the Cb0 occupancy count captured in Counter 0. The filtering available is found in the control register - threshold, invert and edge detect. E.g. setting threshold to 1 can effectively monitor how many cycles the monitored queue has an entry.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_DRD",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_SUPPRESS_NO_D2C",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6E",
+ "EventName": "UNC_CHA_DIRECT_GO.HA_TOR_DEALLOC",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.EXTCMP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.FAST_GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.GO_PULL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.IDLE_DUE_SUPPRESS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.NOP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Direct GO",
+ "EventCode": "0x6D",
+ "EventName": "UNC_CHA_DIRECT_GO_OPC.PULL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "EventCode": "0xAF",
+ "EventName": "UNC_CHA_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xBA",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xBA",
+ "EventName": "UNC_CHA_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_CHA_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_CHA_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_CHA_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_CHA_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "EventCode": "0xB9",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "EventCode": "0xB9",
+ "EventName": "UNC_CHA_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Normal priority reads issued to the memory controller from the CHA",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.NORMAL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) read is issued to any of the memory controller channels from the CHA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "HA to iMC Reads Issued : ISOCH",
+ "EventCode": "0x59",
+ "EventName": "UNC_CHA_IMC_READS_COUNT.PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "HA to iMC Reads Issued : ISOCH : Count of the number of reads issued to any of the memory controller channels. This can be filtered by the priority of the reads.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Full Line Non-ISOCH",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a normal (Non-Isochronous) full line write is issued from the CHA to any of the memory controller channels.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.FULL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Full Line : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : Partial Non-ISOCH : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial",
+ "EventCode": "0x5B",
+ "EventName": "UNC_CHA_IMC_WRITES_COUNT.PARTIAL_PRIORITY",
+ "PerPkg": "1",
+ "PublicDescription": "CHA to iMC Full Line Writes Issued : ISOCH Partial : Counts the total number of full line writes issued from the HA into the memory controller.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Any Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state.; Filters for any transaction originating from the IPQ or IRQ. This does not include lookups originating from the ISMQ.",
+ "UMask": "0x1fffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.ANY_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local or remote transaction to the LLC, including prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE",
+ "PerPkg": "1",
+ "UMask": "0x1bd0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Code Reads",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Code Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd0ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : CRd Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : CRd Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote CRd transactions to the LLC. This includes CRd prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Code Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.CODE_READ_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Code Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Local request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.COREPREF_OR_DMND_LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Local request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Any local transaction to the LLC, including prefetches from the Core",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_RD",
+ "PerPkg": "1",
+ "UMask": "0x1bc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache and Snoop Filter Lookups; Data Read Request",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. CHAFilter0[24:21,17] bits correspond to [FMESI] state. Read transactions",
+ "UMask": "0x1bc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_ALL",
+ "PerPkg": "1",
+ "UMask": "0x1fc1ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Read Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Read transactions.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Data Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DATA_READ_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Data Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bc101",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.DATA_READ_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.DMND_READ_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x841ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : E State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.E",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Exclusive State",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : F State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : F State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Forward State",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_INV",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1a44ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Flush or Invalidate Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.FLUSH_OR_INV_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Flush or Invalidate Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : I State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.I",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : I State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Miss",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Transactions homed locally Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.LOCAL_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Transactions homed locally Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Transaction whose address resides in the local MC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : M State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.M",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : M State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Modified State",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : All Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.MISS_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : All Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1fe001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Write Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.OTHER_REQ_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Write Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Writeback transactions to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Reads",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Reads : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd9ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_LOC_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Locally Requested Reads that are Locally HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x9d9ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_LOCAL_REM_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Locally Requested Reads that are Remotely HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x11d9ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd901",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Locally HOMed Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_LOC_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Locally HOMed Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0xbd901",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely HOMed Read Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_MISS_REM_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remotely HOMed Read Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x13d901",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_OR_SNOOP_REMOTE_MISS_REM_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remotely requested Read or Snoop Misses that are Remotely HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x161901",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_REMOTE_LOC_HOM",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Remotely Requested Reads that are Locally HOMed : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0xa19ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Reads that Hit the Snoop Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.READ_SF_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Reads that Hit the Snoop Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bd90e",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Requests",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "UMask": "0x1bc8ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Request Filter",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_F",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Request Filter : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Local or remote RFO transactions to the LLC. This includes RFO prefetch.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : RFO Misses",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : RFO Misses : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing.",
+ "UMask": "0x1bc801",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_LLC_LOOKUP.RFO_LOCAL",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.RFO_PREF_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x888ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : S State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.S",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : Hit Shared State",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - E State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_E",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - E State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit Exclusive State",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - H State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_H",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - H State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit HitMe State",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : SnoopFilter - S State",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.SF_S",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : SnoopFilter - S State : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS select a state or states (in the umask field) to match. Otherwise, the event will count nothing. : SF Hit Shared State",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cache Lookups : Filters Requests for those that write info into the cache",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITES_AND_OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Cache Lookups : Write Requests : Counts the number of times the LLC was accessed - this includes code, data, prefetches and hints coming from L2. This has numerous filters available. Note the non-standard filtering equation. This event will count requests that lookup the cache multiple times with multiple increments. One must ALWAYS set umask bit 0 and select a state or states to match. Otherwise, the event will count nothing. : Writeback transactions from L2 to the LLC This includes all write transactions -- both Cacheable and UC.",
+ "UMask": "0x1a42ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated.",
+ "Deprecated": "1",
+ "EventCode": "0x34",
+ "EventName": "UNC_CHA_LLC_LOOKUP.WRITE_LOCAL",
+ "PerPkg": "1",
+ "UMask": "0x842ff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : All Lines Victimized",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : All Lines Victimized : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0xf",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in E state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in E state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - All Lines",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - All Lines : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x200f",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in E State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_E",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in E State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2002",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in M State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_M",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in M State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2001",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local Only",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_ONLY",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local Only : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Local - Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.LOCAL_S",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Local - Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x2004",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in M state",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in M state : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Lines Victimized : Lines in S State",
+ "EventCode": "0x37",
+ "EventName": "UNC_CHA_LLC_VICTIMS.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Lines Victimized : Lines in S State : Counts the number of lines that were victimized on a fill. This can be filtered by the state that the line was in.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Miss",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : CV0 Prefetch Miss : Miscellaneous events in the Cbo.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : CV0 Prefetch Victim",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.CV0_PREF_VIC",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : CV0 Prefetch Victim : Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Number of times that an RFO hit in S state.",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RFO_HIT_S",
+ "PerPkg": "1",
+ "PublicDescription": "Counts when a RFO (the Read for Ownership issued before a write) request hit a cacheline in the S (Shared) state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Silent Snoop Eviction",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.RSPI_WAS_FSE",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : Silent Snoop Eviction : Miscellaneous events in the Cbo. : Counts the number of times when a Snoop hit in FSE states and triggered a silent eviction. This is useful because this information is lost in the PRE encodings.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cbo Misc : Write Combining Aliasing",
+ "EventCode": "0x39",
+ "EventName": "UNC_CHA_MISC.WC_ALIASING",
+ "PerPkg": "1",
+ "PublicDescription": "Cbo Misc : Write Combining Aliasing : Miscellaneous events in the Cbo. : Counts the number of times that a USWC write (WCIL(F)) transaction hit in the LLC in M state, triggering a WBMtoI followed by the USWC write. This occurs when there is WC aliasing.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "EventCode": "0xE6",
+ "EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "EventCode": "0xE6",
+ "EventName": "UNC_CHA_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ADEGRCREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.AKEGRCREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ALLRSFWAYS_RES",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.BLEGRCREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.FSF_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLOWSNP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_ALLWAYRSV",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_PAMATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.GOTRACK_WAYMATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.HACREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IDX_INPIPE",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IPQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IRQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ISMQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.IVEGRCREDIT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.LLC_WAYS_RES",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.NOTALLOWSNOOP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ONE_FSF_VIC",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.ONE_RSP_CON",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.PTL_INPIPE",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.RMW_SETMATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.RRQ_SETMATCH_VICP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.SETMATCHENTRYWSCT",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.SF_WAYS_RES",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.TOPA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.TORID_MATCH_GO_P",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_AD_REQ",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_AD_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_RSP",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.VN_BL_WB",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Pipe Rejects",
+ "EventCode": "0x42",
+ "EventName": "UNC_CHA_PIPE_REJECT.WAY_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Pipe Rejects : More Miscellaneous events in the Cbo.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC0",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC0 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC1",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC1 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC10",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC10",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC10 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 10 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC11",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC11",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC11 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 11 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC12",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC12",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC12 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 12 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC13",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC13",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC13 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 13 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC2",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC2 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC3",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC3 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC4",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC4 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC5",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC5 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC6",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC6",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC6 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 6 only.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC7",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC7",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC7 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 7 only.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC8",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC8",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC8 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 8 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx READ Credits Empty : MC9",
+ "EventCode": "0x58",
+ "EventName": "UNC_CHA_READ_NO_CREDITS.MC9",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx READ Credits Empty : MC9 : Counts the number of times when there are no credits available for sending reads from the CHA into the iMC. In order to send reads into the memory controller, the HA must first acquire a credit for the iMC's AD Ingress queue. : Filter for memory controller 9 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local INVITOE requests (exclusive ownership of a cache line without receiving data) that miss the SF/LLC and remote INVITOE requests sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.INVITOE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of requests coming from a unit on this socket for exclusive ownership of a cache line without receiving data (INVITOE) to the CHA.",
+ "UMask": "0x30",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local read requests that miss the SF/LLC and remote read requests sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.READS",
+ "PerPkg": "1",
+ "PublicDescription": "Counts read requests made into this CHA. Reads include all read opcodes (including RFO: the Read for Ownership issued before a write) .",
+ "UMask": "0x3",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Local write requests that miss the SF/LLC and remote write requests sent to the CHA's home agent",
+ "EventCode": "0x50",
+ "EventName": "UNC_CHA_REQUESTS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Counts write requests made into the CHA, including streaming, evictions, HitM (Reads from another core to a Modified cacheline), etc.",
+ "UMask": "0xc",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "EventCode": "0xAC",
+ "EventName": "UNC_CHA_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_CHA_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "EventCode": "0xAD",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "EventCode": "0xAB",
+ "EventName": "UNC_CHA_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xae",
+ "EventName": "UNC_CHA_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : IRQ Rejected",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.IRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : IRQ Rejected : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Allocations : PRQ",
+ "EventCode": "0x13",
+ "EventName": "UNC_CHA_RxC_INSERTS.PRQ_REJ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Allocations : PRQ : Counts number of allocations per cycle into the specified Ingress queue.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x18",
+ "EventName": "UNC_CHA_RxC_IRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the IRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC or SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Request Queue Rejects; PhyAddr Match",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "IRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "IRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x19",
+ "EventName": "UNC_CHA_RxC_IRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : BL WB on VN0",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request",
+ "EventCode": "0x24",
+ "EventName": "UNC_CHA_RxC_ISMQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : AD REQ on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : AD RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : Non UPI AK Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL NCB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL NCS on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL RSP on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : BL WB on VN0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2C",
+ "EventName": "UNC_CHA_RxC_ISMQ0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 0 : Non UPI IV Request : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : ANY0",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Rejects - Set 1 : HA",
+ "EventCode": "0x25",
+ "EventName": "UNC_CHA_RxC_ISMQ1_REJECT.HA",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Rejects - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : ANY0",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 1 : ANY0 : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores. : Any condition listed in the ISMQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "ISMQ Retries - Set 1 : HA",
+ "EventCode": "0x2D",
+ "EventName": "UNC_CHA_RxC_ISMQ1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "ISMQ Retries - Set 1 : HA : Number of times a transaction flowing through the ISMQ had to retry. Transaction pass through the ISMQ as responses for requests that already exist in the Cbo. Some examples include: when data is returned or when snoop responses come back from the cores.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Occupancy : IRQ",
+ "EventCode": "0x11",
+ "EventName": "UNC_CHA_RxC_OCCUPANCY.IRQ",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Occupancy : IRQ : Counts number of entries in the specified Ingress queue in each cycle.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : AD REQ on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : AD RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : Non UPI AK Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL NCB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL NCS on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL RSP on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : BL WB on VN0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2E",
+ "EventName": "UNC_CHA_RxC_OTHER0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 0 : Non UPI IV Request : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Allow Snoop",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : Allow Snoop : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : ANY0",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : ANY0 : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Any condition listed in the Other0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : HA",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : HA : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC OR SF Way",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : LLC OR SF Way : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : LLC Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : LLC Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : PhyAddr Match",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : PhyAddr Match : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : SF Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : SF Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject) : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Other Retries - Set 1 : Victim",
+ "EventCode": "0x2F",
+ "EventName": "UNC_CHA_RxC_OTHER1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Other Retries - Set 1 : Victim : Retry Queue Inserts of Transactions that were already in another Retry Q (sub-events encode the reason for the next reject)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD REQ on VN0 : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : AD RSP on VN0 : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI AK Request : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCB on VN0 : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL NCS on VN0 : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL RSP on VN0 : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : BL WB on VN0 : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request",
+ "EventCode": "0x20",
+ "EventName": "UNC_CHA_RxC_PRQ0_REJECT.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 0 : Non UPI IV Request : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Allow Snoop",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ALLOW_SNP",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : ANY0 : Any condition listed in the PRQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : HA",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.HA",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC OR SF Way : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : LLC Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.LLC_VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : PhyAddr Match : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "PRQ Requests (from CMS) Rejected - Set 1 : SF Victim : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "PRQ Requests (from CMS) Rejected - Set 1 : Victim",
+ "EventCode": "0x21",
+ "EventName": "UNC_CHA_RxC_PRQ1_REJECT.VICTIM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD REQ on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_REQ_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : AD REQ on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a request",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : AD RSP on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AD_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : AD RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No AD VN0 credit for generating a response",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI AK Request",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.AK_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : Non UPI AK Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject AK ring message",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCB on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL NCB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCB",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL NCS on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_NCS_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL NCS on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for NCS",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL RSP on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_RSP_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL RSP on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a response",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : BL WB on VN0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.BL_WB_VN0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : BL WB on VN0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : No BL VN0 credit for generating a writeback",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 0 : Non UPI IV Request",
+ "EventCode": "0x2A",
+ "EventName": "UNC_CHA_RxC_REQ_Q0_RETRY.IV_NON_UPI",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 0 : Non UPI IV Request : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Can't inject IV ring message",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Allow Snoop",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ALLOW_SNP",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : Allow Snoop : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : ANY0",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.ANY0",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : ANY0 : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Any condition listed in the WBQ0 Reject counter was true",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : HA",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.HA",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : HA : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC OR SF Way",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_OR_SF_WAY",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : LLC OR SF Way : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Way conflict with another request that caused the reject",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : LLC Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.LLC_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : LLC Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : PhyAddr Match",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.PA_MATCH",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : PhyAddr Match : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Address match with an outstanding request that was rejected.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : SF Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.SF_VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : SF Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ) : Requests did not generate Snoop filter victim",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Request Queue Retries - Set 1 : Victim",
+ "EventCode": "0x2B",
+ "EventName": "UNC_CHA_RxC_REQ_Q1_RETRY.VICTIM",
+ "PerPkg": "1",
+ "PublicDescription": "Request Queue Retries - Set 1 : Victim : REQUESTQ includes: IRQ, PRQ, IPQ, RRQ, WBQ (everything except for ISMQ)",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_CHA_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "EventCode": "0xE2",
+ "EventName": "UNC_CHA_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "EventCode": "0xE3",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "EventCode": "0xe4",
+ "EventName": "UNC_CHA_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "EventCode": "0xE1",
+ "EventName": "UNC_CHA_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "EventCode": "0xE0",
+ "EventName": "UNC_CHA_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for E-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.E_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking exclusive lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for M-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.M_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking modified lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop filter capacity evictions for S-state entries.",
+ "EventCode": "0x3D",
+ "EventName": "UNC_CHA_SF_EVICTION.S_STATE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts snoop filter capacity evictions for entries tracking shared lines in the cores? cache.? Snoop filter capacity evictions occur when the snoop filter is full and evicts an existing entry to track a new entry.? Does not count clean evictions such as when a core?s cache replaces a tracked cacheline with a new cacheline.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : All",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : All : Counts the number of snoops issued by the HA.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Broadcast snoops for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.BCST_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Broadcast snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast snoops issued by the HA responding to local requests",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Directed snoops for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.DIRECT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Directed snoops for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of directed snoops issued by the HA responding to local requests",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoops Sent : Snoops sent for Local Requests",
+ "EventCode": "0x51",
+ "EventName": "UNC_CHA_SNOOPS_SENT.LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Snoops Sent : Snoops sent for Local Requests : Counts the number of snoops issued by the HA. : Counts the number of broadcast or directed snoops issued by the HA responding to local requests",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspCnflct",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPCNFLCT",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspCnflct : Number of snoop responses received for a Local request : Filters for snoops responses of RspConflict to local CA requests. This is returned when a snoop finds an existing outstanding transaction in a remote caching agent when it CAMs that caching agent. This triggers conflict resolution hardware. This covers both RspCnflct and RspCnflctWbI.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspFwd to local CA requests. This snoop response is only possible for RdCur when a snoop HITM/E in a remote caching agent and it directly forwards data to a requestor without changing the requestor's cache line state.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*FWD*WB",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPFWDWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : Rsp*FWD*WB : Number of snoop responses received for a Local request : Filters for a snoop response of Rsp*Fwd*WB to local CA requests. This snoop response is only used in 4s systems. It is used when a snoop HITM's in a remote caching agent and it directly forwards data to a requestor, and simultaneously returns data to the home to be written back to memory.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspI",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPI",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspI : Number of snoop responses received for a Local request : Filters for snoops responses of RspI to local CA requests. RspI is returned when the remote cache does not have the data, or when the remote cache silently evicts data (such as when an RFO hits non-modified data).",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspIFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPIFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspIFwd : Number of snoop responses received for a Local request : Filters for snoop responses of RspIFwd to local CA requests. This is returned when a remote caching agent forwards data and the requesting agent is able to acquire the data in E or M states. This is commonly returned with RFO transactions. It can be either a HitM or a HitFE.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspS",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPS",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspS : Number of snoop responses received for a Local request : Filters for snoop responses of RspS to local CA requests. RspS is returned when a remote cache has data but is not forwarding it. It is a way to let the requesting socket know that it cannot allocate the data in E state. No data is sent with S RspS.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : RspSFwd",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPSFWD",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : RspSFwd : Number of snoop responses received for a Local request : Filters for a snoop response of RspSFwd to local CA requests. This is returned when a remote caching agent forwards data but holds on to its currently copy. This is common for data and code reads that hit in a remote socket in E or F state.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Snoop Responses Received Local : Rsp*WB",
+ "EventCode": "0x5D",
+ "EventName": "UNC_CHA_SNOOP_RESP_LOCAL.RSPWB",
+ "PerPkg": "1",
+ "PublicDescription": "Snoop Responses Received Local : Rsp*WB : Number of snoop responses received for a Local request : Filters for a snoop response of RspIWB or RspSWB to local CA requests. This is returned when a non-RFO request hits in M state. Data and Code Reads can return either RspIWB or RspSWB depending on how the system has been configured. InvItoE transactions will also return RspIWB because they must acquire ownership.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIDataM",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPDATAM",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : MtoI RspIFwdM",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.MTOI_RSPIFWDM",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit LLC",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : Pull Data Partial - Hit SF",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.PULLDATAPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit LLC",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITLLC",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Misc Snoop Responses Received : RspIFwdPtl Hit SF",
+ "EventCode": "0x6B",
+ "EventName": "UNC_CHA_SNOOP_RSP_MISC.RSPIFWDMPTL_HITSF",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD0",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD2",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD4",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD6",
+ "EventName": "UNC_CHA_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD1",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD3",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD5",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD7",
+ "EventName": "UNC_CHA_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ffff",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DDR4 Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DDR4 Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_CHA_TOR_INSERTS.DDR",
+ "Deprecated": "1",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.DDR4",
+ "PerPkg": "1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : SF/LLC Evictions",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : SF/LLC Evictions : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Hits",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Hits : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CLFlushes issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushOpts issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CLFlushOpts issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8d7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRDs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRDs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; CRd Pref from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to page walks that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opts issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRds issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CRd_Prefs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd PTEs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd PTEs issued by iA Cores due to a page walk that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opt issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : DRd_Opt_Prefs issued by iA Cores that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiLF misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_FULL_STREAMING_WR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts; WCiL misses from local IA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_PARTIAL_STREAMING_WR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : UCRdFs issued by iA Cores that Missed LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores that Missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WiLs issued by iA Cores that Missed LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFO_Prefs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFO_Prefs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOE",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc3fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBEFtoIs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBEFTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbEFtoIs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc37ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBMtoEs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOE",
+ "PerPkg": "1",
+ "PublicDescription": "WbMtoEs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc2fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by an iA Cores. Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbMtoIs issued by iA Cores . (Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WBStoIs issued by an IA Core. Non Modified Write Backs",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WBSTOI",
+ "PerPkg": "1",
+ "PublicDescription": "WbStoIs issued by iA Cores . (Non Modified Write Backs) :Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc67ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLs issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLs issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WCiLF issued by iA Cores",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WCiLF issued by iA Cores : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : CLFlushes issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : CLFlushes issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that Hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that hit the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All requests from IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All requests from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices that missed the LLC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PCIRdCurs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PCIRdCurs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : RFOs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : RFOs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : WbMtoIs issued by IO Devices",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : WbMtoIs issued by IO Devices : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IRQ - iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From an iA Core",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : IRQ - Non iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.IRQ_NON_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : IRQ - Non iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just ISOC",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.ISOC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just ISOC : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Local Targets",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOCAL_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Local Targets : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA and IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA and IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local iA",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local iA : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests from iA Cores",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : All from Local IO",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.LOC_IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : All from Local IO : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally generated IO traffic",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MATCH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Match the Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just Misses",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just Misses : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : MMCFG Access",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.MMCFG",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : MMCFG Access : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NearMem",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NonCoherent",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NONCOH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NonCoherent : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Just NotNearMem",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.NOT_NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Just NotNearMem : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PREMORPH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : Match the PreMorphed Opcode in b[29:19] of the extended umask field : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - IOSF",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PRQ - IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From a PCIe Device",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Inserts : PRQ - Non IOSF",
+ "EventCode": "0x35",
+ "EventName": "UNC_CHA_TOR_INSERTS.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Inserts : PRQ - Non IOSF : Counts the number of entries successfully inserted into the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DDR4 Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.DDR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DDR4 Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : SF/LLC Evictions",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.EVICT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : SF/LLC Evictions : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : TOR allocation occurred as a result of SF/LLC evictions (came from the ISMQ)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Hits",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Hits : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CLFLUSHOPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushOpts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8d7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRDs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRDs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; CRd Pref from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Code read prefetch from local IA that misses in the snoop filter",
+ "UMask": "0xc88fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88ffd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opts issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_HIT_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887fd01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRds issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc80ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_CRD_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CRd_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc88ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRDPTE",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRdPte issued by iA Cores due to a page walk that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc837fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opt issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc827fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_DRD_OPT_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : DRd_Opt_Prefs issued by iA Cores that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8a7fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiLF misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_FULL_STREAMING_WR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy; WCiL misses from local IA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_PARTIAL_STREAMING_WR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy; Data read from local IA that misses in the snoop filter",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_UCRDF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : UCRdFs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc877de01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86ffe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores that Missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867fe01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_MISS_WIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WiLs issued by iA Cores that Missed LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc87fde01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc807ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_RFO_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFO_Prefs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc887ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc27ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLs issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCIL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLs issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc86fff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WCiLF issued by iA Cores",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IA_WCILF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WCiLF issued by iA Cores : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc867ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : CLFlushes issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : CLFlushes issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8c3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that Hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_HIT_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that hit the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fd04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All requests from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc001fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_ITOMCACHENEAR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : ItoMCacheNears, indicating a partial write request, from IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcd43fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_MISS_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices that missed the LLC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803fe04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_PCIRDCUR",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PCIRdCurs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc8f3ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : RFOs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_RFO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : RFOs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xc803ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : WbMtoIs issued by IO Devices",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IO_WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : WbMtoIs issued by IO Devices : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0xcc23ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IRQ - iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From an iA Core",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : IRQ - Non iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.IRQ_NON_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : IRQ - Non iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just ISOC",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.ISOC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just ISOC : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Local Targets",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOCAL_TGT",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Local Targets : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA and IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA and IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests",
+ "UMask": "0xc000ff05",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local iA",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IA",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local iA : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally initiated requests from iA Cores",
+ "UMask": "0xc000ff01",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : All from Local IO",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.LOC_IO",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : All from Local IO : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : All locally generated IO traffic",
+ "UMask": "0xc000ff04",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MATCH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Match the Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just Misses",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MISS",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just Misses : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : MMCFG Access",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.MMCFG",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : MMCFG Access : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NearMem",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NonCoherent",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NONCOH",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NonCoherent : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Just NotNearMem",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.NOT_NEARMEM",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Just NotNearMem : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PREMORPH_OPC",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : Match the PreMorphed Opcode in b[29:19] of the extended umask field : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - IOSF",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PRQ - IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts. : From a PCIe Device",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "TOR Occupancy : PRQ - Non IOSF",
+ "EventCode": "0x36",
+ "EventName": "UNC_CHA_TOR_OCCUPANCY.PRQ_NON_IOSF",
+ "PerPkg": "1",
+ "PublicDescription": "TOR Occupancy : PRQ - Non IOSF : For each cycle, this event accumulates the number of valid entries in the TOR that match qualifications specified by the subevent. Does not include addressless requests such as locks and interrupts.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_CHA_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "EventCode": "0xA7",
+ "EventName": "UNC_CHA_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "EventCode": "0xA2",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_CHA_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_CHA_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "EventCode": "0xA4",
+ "EventName": "UNC_CHA_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "EventCode": "0xA0",
+ "EventName": "UNC_CHA_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "EventCode": "0xA5",
+ "EventName": "UNC_CHA_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_CHA_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_CHA_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "EventCode": "0x95",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "EventCode": "0x97",
+ "EventName": "UNC_CHA_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_CHA_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_CHA_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "EventCode": "0x99",
+ "EventName": "UNC_CHA_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_CHA_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_CHA_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_CHA_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_CHA_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_CHA_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_CHA_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "EventCode": "0xB3",
+ "EventName": "UNC_CHA_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_CHA_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to LLC",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.LLC",
+ "PerPkg": "1",
+ "PublicDescription": "WbPushMtoI : Pushed to LLC : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was able to push WbPushMToI to LLC",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "WbPushMtoI : Pushed to Memory",
+ "EventCode": "0x56",
+ "EventName": "UNC_CHA_WB_PUSH_MTOI.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "WbPushMtoI : Pushed to Memory : Counts the number of times when the CHA was received WbPushMtoI : Counts the number of times when the CHA was unable to push WbPushMToI to LLC (hence pushed it to MEM)",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC0",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC0",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC0 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 0 only.",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC1",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC1",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC1 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 1 only.",
+ "UMask": "0x2",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC10",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC10",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC10 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 10 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC11",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC11",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC11 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 11 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC12",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC12",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC12 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 12 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC13",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC13",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC13 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 13 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC2",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC2",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC2 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 2 only.",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC3",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC3",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC3 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 3 only.",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC4",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC4",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC4 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 4 only.",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC5",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC5",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC5 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 5 only.",
+ "UMask": "0x20",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC6",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC6",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC6 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 6 only.",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC7",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC7",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC7 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 7 only.",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC8",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC8",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC8 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 8 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "CHA iMC CHNx WRITE Credits Empty : MC9",
+ "EventCode": "0x5A",
+ "EventName": "UNC_CHA_WRITE_NO_CREDITS.MC9",
+ "PerPkg": "1",
+ "PublicDescription": "CHA iMC CHNx WRITE Credits Empty : MC9 : Counts the number of times when there are no credits available for sending WRITEs from the CHA into the iMC. In order to send WRITEs into the memory controller, the HA must first acquire a credit for the iMC's BL Ingress queue. : Filter for memory controller 9 only.",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - Conflict",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 0?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
+ "UMask": "0x8",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 0?) - No Credits",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP0_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 0?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
+ "UMask": "0x4",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - Conflict",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_CONFLICT",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 1?) - Conflict : Number of XPT prefetches dropped due to AD CMS write port contention",
+ "UMask": "0x80",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Dropped (on 1?) - No Credits",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.DROP1_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Dropped (on 1?) - No Credits : Number of XPT prefetches dropped due to lack of XPT AD egress credits",
+ "UMask": "0x40",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 0?)",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT0",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Sent (on 0?) : Number of XPT prefetches sent",
+ "UMask": "0x1",
+ "Unit": "CHA"
+ },
+ {
+ "BriefDescription": "XPT Prefetches : Sent (on 1?)",
+ "EventCode": "0x6f",
+ "EventName": "UNC_CHA_XPT_PREF.SENT1",
+ "PerPkg": "1",
+ "PublicDescription": "XPT Prefetches : Sent (on 1?) : Number of XPT prefetches sent",
+ "UMask": "0x10",
+ "Unit": "CHA"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json
new file mode 100644
index 000000000000..de3840078e21
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-interconnect.json
@@ -0,0 +1,6016 @@
+[
+ {
+ "BriefDescription": "Total Write Cache Occupancy : Any Source",
+ "EventCode": "0x0F",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.ANY",
+ "PerPkg": "1",
+ "PublicDescription": "Total Write Cache Occupancy : Any Source : Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events. : Tracks all requests from any source port.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total Write Cache Occupancy : Snoops",
+ "EventCode": "0x0F",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.IV_Q",
+ "PerPkg": "1",
+ "PublicDescription": "Total Write Cache Occupancy : Snoops : Accumulates the number of reads and writes that are outstanding in the uncore in each cycle. This is effectively the sum of the READ_OCCUPANCY and WRITE_OCCUPANCY events.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Total IRP occupancy of inbound read and write requests to coherent memory.",
+ "EventCode": "0x0f",
+ "EventName": "UNC_I_CACHE_TOTAL_OCCUPANCY.MEM",
+ "PerPkg": "1",
+ "PublicDescription": "Total IRP occupancy of inbound read and write requests to coherent memory. This is effectively the sum of read occupancy and write occupancy.",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Clockticks of the IO coherency tracker (IRP)",
+ "EventCode": "0x01",
+ "EventName": "UNC_I_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : CLFlush",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.CLFLUSH",
+ "PerPkg": "1",
+ "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline.",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.PCITOM",
+ "PerPkg": "1",
+ "PublicDescription": "PCIITOM request issued by the IRP unit to the mesh with the intention of writing a full cacheline to coherent memory, without a RFO. PCIITOM is a speculative Invalidate to Modified command that requests ownership of the cacheline and does not move data from the mesh to IRP cache.",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline.",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.RFO",
+ "PerPkg": "1",
+ "PublicDescription": "RFO request issued by the IRP unit to the mesh with the intention of writing a partial cacheline to coherent memory. RFO is a Read For Ownership command that requests ownership of the cacheline and moves data from the mesh to IRP cache.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Coherent Ops : WbMtoI",
+ "EventCode": "0x10",
+ "EventName": "UNC_I_COHERENT_OPS.WBMTOI",
+ "PerPkg": "1",
+ "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations servied by the IRP",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF RF full",
+ "EventCode": "0x17",
+ "EventName": "UNC_I_FAF_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound read requests received by the IRP and inserted into the FAF queue.",
+ "EventCode": "0x18",
+ "EventName": "UNC_I_FAF_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound read requests to coherent memory, received by the IRP and inserted into the Fire and Forget queue (FAF), a queue used for processing inbound reads in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Occupancy of the IRP FAF queue.",
+ "EventCode": "0x19",
+ "EventName": "UNC_I_FAF_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Occupancy of the IRP Fire and Forget (FAF) queue, a queue used for processing inbound reads in the IRP.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "FAF allocation -- sent to ADQ",
+ "EventCode": "0x16",
+ "EventName": "UNC_I_FAF_TRANSACTIONS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.EVICTS",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Inbound (p2p + faf + cset)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.INBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": ": All Inserts Outbound (BL, AK, Snoops)",
+ "EventCode": "0x20",
+ "EventName": "UNC_I_IRP_ALL.OUTBOUND_INSERTS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Atomic Transactions as Secondary",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.2ND_ATOMIC_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Read Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_RD_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Cache Inserts of Write Transactions as Secondary",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.2ND_WR_INSERT",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Rejects",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.FAST_REJ",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Requests",
+ "EventCode": "0x1e",
+ "EventName": "UNC_I_MISC0.FAST_REQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Fastpath Transfers From Primary to Secondary",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.FAST_XFER",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Prefetch Ack Hints From Primary to Secondary",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.PF_ACK_HINT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Counts Timeouts - Set 0 : Slow path fwpf didn't find prefetch",
+ "EventCode": "0x1E",
+ "EventName": "UNC_I_MISC0.SLOWPATH_FWPF_NO_PRF",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Lost Forward",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.LOST_FWD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Lost Forward : Snoop pulled away ownership before a write was committed",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Invalid",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_INVLD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Received Invalid : Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Received Valid",
+ "EventCode": "0x1F",
+ "EventName": "UNC_I_MISC1.SEC_RCVD_VLD",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Received Valid : Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of E Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_E",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of E Line : Secondary received a transfer that did have sufficient MESI state",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of I Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_I",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of I Line : Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of M Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_M",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of M Line : Snoop took cacheline ownership before write from data was committed.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Misc Events - Set 1 : Slow Transfer of S Line",
+ "EventCode": "0x1f",
+ "EventName": "UNC_I_MISC1.SLOW_S",
+ "PerPkg": "1",
+ "PublicDescription": "Misc Events - Set 1 : Slow Transfer of S Line : Secondary received a transfer that did not have sufficient MESI state",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Requests",
+ "EventCode": "0x14",
+ "EventName": "UNC_I_P2P_INSERTS",
+ "PerPkg": "1",
+ "PublicDescription": "P2P Requests : P2P requests from the ITC",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Occupancy",
+ "EventCode": "0x15",
+ "EventName": "UNC_I_P2P_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "P2P Occupancy : P2P B & S Queue Occupancy",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P completions",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.CMPL",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if local only",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if local and target matches",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.LOC_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P Message",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.MSG",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P reads",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : Match if remote only",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : match if remote and target matches",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.REM_AND_TGT_MATCH",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "P2P Transactions : P2P Writes",
+ "EventCode": "0x13",
+ "EventName": "UNC_I_P2P_TRANSACTIONS.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M, E, S or I line in the IIO",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M, E, S or I line in the IIO",
+ "UMask": "0x7e",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit E or S line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_ES",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit E or S line in the IIO cache",
+ "UMask": "0x74",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit I line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_I",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit I line in the IIO cache",
+ "UMask": "0x72",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that hit M line in the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_HIT_M",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that hit M line in the IIO cache",
+ "UMask": "0x78",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Responses to snoops of any type that miss the IIO cache",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.ALL_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "Responses to snoops of any type (code, data, invalidate) that miss the IIO cache",
+ "UMask": "0x71",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit E or S",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_ES",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit I",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_I",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Hit M",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.HIT_M",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : Miss",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.MISS",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpCode",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPCODE",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpData",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPDATA",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Snoop Responses : SnpInv",
+ "EventCode": "0x12",
+ "EventName": "UNC_I_SNOOP_RESP.SNPINV",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Atomic",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.ATOMIC",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound Transaction Count : Atomic : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks the number of atomic transactions",
+ "UMask": "0x10",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Other",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.OTHER",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound Transaction Count : Other : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks the number of 'other' kinds of transactions.",
+ "UMask": "0x20",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound Transaction Count : Writes",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WRITES",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.",
+ "UMask": "0x2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Inbound write (fast path) requests received by the IRP.",
+ "EventCode": "0x11",
+ "EventName": "UNC_I_TRANSACTIONS.WR_PREF",
+ "PerPkg": "1",
+ "PublicDescription": "Inbound write (fast path) requests to coherent memory, received by the IRP resulting in write ownership requests issued by IRP to the mesh.",
+ "UMask": "0x8",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "AK Egress Allocations",
+ "EventCode": "0x0B",
+ "EventName": "UNC_I_TxC_AK_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Cycles Full",
+ "EventCode": "0x05",
+ "EventName": "UNC_I_TxC_BL_DRS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Inserts",
+ "EventCode": "0x02",
+ "EventName": "UNC_I_TxC_BL_DRS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL DRS Egress Occupancy",
+ "EventCode": "0x08",
+ "EventName": "UNC_I_TxC_BL_DRS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Cycles Full",
+ "EventCode": "0x06",
+ "EventName": "UNC_I_TxC_BL_NCB_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Inserts",
+ "EventCode": "0x03",
+ "EventName": "UNC_I_TxC_BL_NCB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCB Egress Occupancy",
+ "EventCode": "0x09",
+ "EventName": "UNC_I_TxC_BL_NCB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Cycles Full",
+ "EventCode": "0x07",
+ "EventName": "UNC_I_TxC_BL_NCS_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Inserts",
+ "EventCode": "0x04",
+ "EventName": "UNC_I_TxC_BL_NCS_INSERTS",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "BL NCS Egress Occupancy",
+ "EventCode": "0x0A",
+ "EventName": "UNC_I_TxC_BL_NCS_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "EventCode": "0x1C",
+ "EventName": "UNC_I_TxR2_AD01_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": ": Counts the number times when it is not possible to issue a request to the M2PCIe because there are no Egress Credits available on AD0, A1 or AD0&AD1 both. Stalls on both AD0 and AD1 will count as 2",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD0 Egress Credits Stalls",
+ "EventCode": "0x1A",
+ "EventName": "UNC_I_TxR2_AD0_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No AD0 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD0 Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No AD1 Egress Credits Stalls",
+ "EventCode": "0x1B",
+ "EventName": "UNC_I_TxR2_AD1_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No AD1 Egress Credits Stalls : Counts the number times when it is not possible to issue a request to the M2PCIe because there are no AD1 Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "No BL Egress Credit Stalls",
+ "EventCode": "0x1D",
+ "EventName": "UNC_I_TxR2_BL_STALL_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "No BL Egress Credit Stalls : Counts the number times when it is not possible to issue data to the R2PCIe because there are no BL Egress Credits available.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0x0D",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Read Requests",
+ "EventCode": "0x0E",
+ "EventName": "UNC_I_TxS_DATA_INSERTS_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Read Requests : Counts the number of requests issued to the switch (towards the devices).",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "Outbound Request Queue Occupancy",
+ "EventCode": "0x0C",
+ "EventName": "UNC_I_TxS_REQUEST_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "Outbound Request Queue Occupancy : Accumultes the number of outstanding outbound requests from the IRP to the switch (towards the devices). This can be used in conjuection with the allocations event in order to calculate average latency of outbound requests.",
+ "Unit": "IRP"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2M_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2M_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2M_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8A",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8B",
+ "EventName": "UNC_M2M_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2M_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2M_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8C",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x8D",
+ "EventName": "UNC_M2M_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8E",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8F",
+ "EventName": "UNC_M2M_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_EGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Taken",
+ "EventCode": "0x22",
+ "EventName": "UNC_M2M_BYPASS_M2M_EGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Not Taken",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.NOT_TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC Bypass : Taken",
+ "EventCode": "0x21",
+ "EventName": "UNC_M2M_BYPASS_M2M_INGRESS.TAKEN",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to memory (M2M)",
+ "EventName": "UNC_M2M_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2M_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles when direct to core mode, which bypasses the CHA, was disabled",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_DIRSTATE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "EventCode": "0x60",
+ "EventName": "UNC_M2M_DIRECT2CORE_NOT_TAKEN_NOTFORKED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number of reads in which direct to core transaction was overridden",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2M_DIRECT2CORE_TXN_OVERRIDE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "EventCode": "0xAF",
+ "EventName": "UNC_M2M_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xBA",
+ "EventName": "UNC_M2M_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "EventCode": "0xB6",
+ "EventName": "UNC_M2M_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xBB",
+ "EventName": "UNC_M2M_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xB7",
+ "EventName": "UNC_M2M_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "EventCode": "0xB8",
+ "EventName": "UNC_M2M_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "EventCode": "0xB9",
+ "EventName": "UNC_M2M_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x704",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ALL",
+ "PerPkg": "1",
+ "UMask": "0x104",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x140",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x102",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch0",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH0_NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x101",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : All, regardless of priority. - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ALL",
+ "PerPkg": "1",
+ "UMask": "0x204",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x240",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x202",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - Ch1",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.CH1_NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x201",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : From TGR - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.FROM_TGR",
+ "PerPkg": "1",
+ "UMask": "0x740",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Critical Priority - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x702",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Reads Issued to iMC : Normal Priority - All Channels",
+ "EventCode": "0x37",
+ "EventName": "UNC_M2M_IMC_READS.NORMAL",
+ "PerPkg": "1",
+ "UMask": "0x701",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1c10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_ALL",
+ "PerPkg": "1",
+ "UMask": "0x410",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL",
+ "PerPkg": "1",
+ "UMask": "0x401",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x404",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x402",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch0",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH0_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x408",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : All Writes - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_ALL",
+ "PerPkg": "1",
+ "UMask": "0x810",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL",
+ "PerPkg": "1",
+ "UMask": "0x801",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x804",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x802",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - Ch1",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.CH1_PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x808",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : From TGR - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FROM_TGR",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Full Line Non-ISOCH - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL",
+ "PerPkg": "1",
+ "UMask": "0x1c01",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Full Line - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.FULL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1c04",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Non-Inclusive Miss - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.NI_MISS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : Partial Non-ISOCH - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL",
+ "PerPkg": "1",
+ "UMask": "0x1c02",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M Writes Issued to iMC : ISOCH Partial - All Channels",
+ "EventCode": "0x38",
+ "EventName": "UNC_M2M_IMC_WRITES.PARTIAL_ISOCH",
+ "PerPkg": "1",
+ "UMask": "0x1c08",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts",
+ "EventCode": "0x64",
+ "EventName": "UNC_M2M_MIRR_WRQ_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "EventCode": "0x65",
+ "EventName": "UNC_M2M_MIRR_WRQ_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "EventCode": "0xE6",
+ "EventName": "UNC_M2M_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches : MC Match",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number Packet Header Matches : Mesh Match",
+ "EventCode": "0x4C",
+ "EventName": "UNC_M2M_PKT_MATCH.MESH",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "EventCode": "0x73",
+ "EventName": "UNC_M2M_PREFCAM_CIS_DROPS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : All Channels",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 0",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Full : Channel 1",
+ "EventCode": "0x6B",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : All Channels",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 0",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Cycles Not Empty : Channel 1",
+ "EventCode": "0x6C",
+ "EventName": "UNC_M2M_PREFCAM_CYCLES_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA0_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_HITA1_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_MISS_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH0_RSP_PDRESET",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA0_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_HITA1_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_MISS_INVAL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Deallocs",
+ "EventCode": "0x6E",
+ "EventName": "UNC_M2M_PREFCAM_DEALLOCS.CH1_RSP_PDRESET",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 0",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - Ch 1",
+ "EventCode": "0x6F",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped : XPT - All Channels",
+ "EventCode": "0x6f",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_DROPS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT - Ch 0",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 0",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH0_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 0",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT - Ch 1",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH1_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- Ch 2",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.CH2_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - Ch 2",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT & UPI- All Channels",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPTUPI_ALLCH",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Merged with CAMed Prefetches : XPT & UPI - All Channels",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Merged with CAMed Prefetches : XPT - All Channels",
+ "EventCode": "0x74",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_MERGE.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT - Ch 0",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 0",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH0_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI- Ch 0",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT - Ch 1",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 1",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH1_XPTUPI",
+ "PerPkg": "1",
+ "PublicDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI- Ch 1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - Ch 2",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.CH2_XPTUPI",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT & UPI - All Channels",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.XPTUPI_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Demands Not Merged with CAMed Prefetches : XPT - All Channels",
+ "EventCode": "0x75",
+ "EventName": "UNC_M2M_PREFCAM_DEMAND_NO_MERGE.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.RPQ_PROXY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch0 - Reasons",
+ "EventCode": "0x70",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH0.XPT_THRESH",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.ERRORBLK_RxC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.NOT_PF_SAD_REGION",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_AD_CRD",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_FULL",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.PF_SECURE_DROP",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.RPQ_PROXY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.STOP_B2B",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.WPQ_PROXY",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Data Prefetches Dropped Ch1 - Reasons",
+ "EventCode": "0x71",
+ "EventName": "UNC_M2M_PREFCAM_DROP_REASONS_CH1.XPT_THRESH",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 0",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH0_XPT",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - Ch 1",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.CH1_XPT",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Inserts : XPT - All Channels",
+ "EventCode": "0x6D",
+ "EventName": "UNC_M2M_PREFCAM_INSERTS.XPT_ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x15",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : All Channels",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 0",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Prefetch CAM Occupancy : Channel 1",
+ "EventCode": "0x6A",
+ "EventName": "UNC_M2M_PREFCAM_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": All Channels",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.ALLCH",
+ "PerPkg": "1",
+ "UMask": "0x7",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 0",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": ": Channel 1",
+ "EventCode": "0x76",
+ "EventName": "UNC_M2M_PREFCAM_RESP_MISS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "EventCode": "0x79",
+ "EventName": "UNC_M2M_PREFCAM_RxC_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.1LM_POSTED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.CIS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "EventCode": "0x7A",
+ "EventName": "UNC_M2M_PREFCAM_RxC_DEALLOCS.SQUASHED",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "EventCode": "0x78",
+ "EventName": "UNC_M2M_PREFCAM_RxC_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "EventCode": "0x77",
+ "EventName": "UNC_M2M_PREFCAM_RxC_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "EventCode": "0xAC",
+ "EventName": "UNC_M2M_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "EventCode": "0xAA",
+ "EventName": "UNC_M2M_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "EventCode": "0xAD",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "EventCode": "0xAB",
+ "EventName": "UNC_M2M_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xae",
+ "EventName": "UNC_M2M_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 0",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Regular : Channel 1",
+ "EventCode": "0x43",
+ "EventName": "UNC_M2M_RPQ_NO_REG_CRD.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 0",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M to iMC RPQ Cycles w/Credits - Special : Channel 1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2M_RPQ_NO_SPEC_CRD.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Full",
+ "EventCode": "0x04",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Not Empty",
+ "EventCode": "0x03",
+ "EventName": "UNC_M2M_RxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Allocations",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2M_RxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy",
+ "EventCode": "0x02",
+ "EventName": "UNC_M2M_RxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Ingress (from CMS) Occupancy - Prefetches",
+ "EventCode": "0x77",
+ "EventName": "UNC_M2M_RxC_AD_PREF_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x5C",
+ "EventName": "UNC_M2M_RxC_AK_WR_CMP",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Full",
+ "EventCode": "0x08",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Not Empty",
+ "EventCode": "0x07",
+ "EventName": "UNC_M2M_RxC_BL_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Allocations",
+ "EventCode": "0x05",
+ "EventName": "UNC_M2M_RxC_BL_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Ingress (from CMS) Occupancy",
+ "EventCode": "0x06",
+ "EventName": "UNC_M2M_RxC_BL_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE5",
+ "EventName": "UNC_M2M_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "EventCode": "0xE2",
+ "EventName": "UNC_M2M_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "EventCode": "0xE3",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M2M_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "EventCode": "0xE1",
+ "EventName": "UNC_M2M_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "EventCode": "0xE0",
+ "EventName": "UNC_M2M_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD0",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD2",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD4",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xD6",
+ "EventName": "UNC_M2M_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD1",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD3",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD5",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xD7",
+ "EventName": "UNC_M2M_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number AD Ingress Credits",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2M_TGR_AD_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Number BL Ingress Credits",
+ "EventCode": "0x42",
+ "EventName": "UNC_M2M_TGR_BL_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 0",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Full : Channel 1",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2M_TRACKER_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 0",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Inserts : Channel 1",
+ "EventCode": "0x49",
+ "EventName": "UNC_M2M_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 0",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Cycles Not Empty : Channel 1",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2M_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 0",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Tracker Occupancy : Channel 1",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2M_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credit Acquired",
+ "EventCode": "0x0d",
+ "EventName": "UNC_M2M_TxC_AD_CREDITS_ACQUIRED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Credits Occupancy",
+ "EventCode": "0x0e",
+ "EventName": "UNC_M2M_TxC_AD_CREDIT_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Full",
+ "EventCode": "0x0c",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_FULL",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Not Empty",
+ "EventCode": "0x0b",
+ "EventName": "UNC_M2M_TxC_AD_CYCLES_NE",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Allocations",
+ "EventCode": "0x09",
+ "EventName": "UNC_M2M_TxC_AD_INSERTS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AD Egress (to CMS) Credits",
+ "EventCode": "0x0f",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_CYCLES",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AD Egress (to CMS) Credits",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2M_TxC_AD_NO_CREDIT_STALLED",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AD Egress (to CMS) Occupancy",
+ "EventCode": "0x0A",
+ "EventName": "UNC_M2M_TxC_AD_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK : CRD Transactions to Cbo",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.CRD_CBO",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound Ring Transactions on AK : NDR Transactions",
+ "EventCode": "0x39",
+ "EventName": "UNC_M2M_TxC_AK.NDR",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AKC Credits",
+ "EventCode": "0x5F",
+ "EventName": "UNC_M2M_TxC_AKC_CREDITS",
+ "PerPkg": "1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "EventCode": "0x1D",
+ "EventName": "UNC_M2M_TxC_AK_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : All",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD0",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.RDCRD1",
+ "PerPkg": "1",
+ "UMask": "0x88",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP0",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCMP1",
+ "PerPkg": "1",
+ "UMask": "0xa0",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD0",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Full",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_FULL.WRCRD1",
+ "PerPkg": "1",
+ "UMask": "0x90",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : All",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Not Empty",
+ "EventCode": "0x13",
+ "EventName": "UNC_M2M_TxC_AK_CYCLES_NE.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : All",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.PREF_RD_CAM_HIT",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Allocations",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2M_TxC_AK_INSERTS.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "EventCode": "0x1F",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No AK Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "EventCode": "0x20",
+ "EventName": "UNC_M2M_TxC_AK_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : All",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Near Side",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy : Common Mesh Stop - Far Side",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.RDCRD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCMP",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "AK Egress (to CMS) Occupancy",
+ "EventCode": "0x12",
+ "EventName": "UNC_M2M_TxC_AK_OCCUPANCY.WRCRD",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Cache",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CACHE",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Outbound DRS Ring Transactions to Cache : Data to Core",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2M_TxC_BL.DRS_CORE",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Near Side",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Credit Acquired : Common Mesh Stop - Far Side",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2M_TxC_BL_CREDITS_ACQUIRED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : All",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Near Side",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Full : Common Mesh Stop - Far Side",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_FULL.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : All",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Near Side",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Not Empty : Common Mesh Stop - Far Side",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2M_TxC_BL_CYCLES_NE.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : All",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x3",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Near Side",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "BL Egress (to CMS) Allocations : Common Mesh Stop - Far Side",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2M_TxC_BL_INSERTS.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "EventCode": "0x1B",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_CYCLES.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Near Side",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles Stalled with No BL Egress (to CMS) Credits : Common Mesh Stop - Far Side",
+ "EventCode": "0x1C",
+ "EventName": "UNC_M2M_TxC_BL_NO_CREDIT_STALLED.CMS1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "EventCode": "0xA6",
+ "EventName": "UNC_M2M_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "EventCode": "0xA7",
+ "EventName": "UNC_M2M_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "EventCode": "0xA2",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "EventCode": "0xA3",
+ "EventName": "UNC_M2M_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "EventCode": "0xA1",
+ "EventName": "UNC_M2M_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "EventCode": "0xA4",
+ "EventName": "UNC_M2M_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M2M_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x80",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "EventCode": "0xA5",
+ "EventName": "UNC_M2M_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9C",
+ "EventName": "UNC_M2M_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "EventCode": "0x9D",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "EventCode": "0x9E",
+ "EventName": "UNC_M2M_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2M_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2M_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2M_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2M_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2M_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "EventCode": "0x9A",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9B",
+ "EventName": "UNC_M2M_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "EventCode": "0xB0",
+ "EventName": "UNC_M2M_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "EventCode": "0xB4",
+ "EventName": "UNC_M2M_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "EventCode": "0xB1",
+ "EventName": "UNC_M2M_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "EventCode": "0xB2",
+ "EventName": "UNC_M2M_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "EventCode": "0xB3",
+ "EventName": "UNC_M2M_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "EventCode": "0xB5",
+ "EventName": "UNC_M2M_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 0",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "WPQ Flush : Channel 1",
+ "EventCode": "0x58",
+ "EventName": "UNC_M2M_WPQ_FLUSH.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Regular : Channel 0",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Regular : Channel 1",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Regular : Channel 2",
+ "EventCode": "0x4D",
+ "EventName": "UNC_M2M_WPQ_NO_REG_CRD.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Special : Channel 0",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Special : Channel 1",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "M2M-&gt;iMC WPQ Cycles w/Credits - Special : Channel 2",
+ "EventCode": "0x4E",
+ "EventName": "UNC_M2M_WPQ_NO_SPEC_CRD.CHN2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 0",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Channel 1",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Full : Mirror",
+ "EventCode": "0x4A",
+ "EventName": "UNC_M2M_WR_TRACKER_FULL.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 0",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Inserts : Channel 1",
+ "EventCode": "0x56",
+ "EventName": "UNC_M2M_WR_TRACKER_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 0",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Channel 1",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty : Mirror",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Cycles Not Empty",
+ "EventCode": "0x4B",
+ "EventName": "UNC_M2M_WR_TRACKER_NE.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 0",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Inserts : Channel 1",
+ "EventCode": "0x63",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 0",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Non-Posted Occupancy : Channel 1",
+ "EventCode": "0x62",
+ "EventName": "UNC_M2M_WR_TRACKER_NONPOSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 0",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Channel 1",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy : Mirror",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_NONTGR",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Occupancy",
+ "EventCode": "0x55",
+ "EventName": "UNC_M2M_WR_TRACKER_OCCUPANCY.MIRR_PWR",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 0",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Inserts : Channel 1",
+ "EventCode": "0x5E",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_INSERTS.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 0",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Write Tracker Posted Occupancy : Channel 1",
+ "EventCode": "0x5D",
+ "EventName": "UNC_M2M_WR_TRACKER_POSTED_OCCUPANCY.CH1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2M"
+ },
+ {
+ "BriefDescription": "Clockticks in the UBOX using a dedicated 48-bit Fixed Counter",
+ "EventCode": "0xff",
+ "EventName": "UNC_U_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Doorbell",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.DOORBELL_RCVD",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : Interrupt",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.INT_PRIO",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : Interrupt : Interrupts",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : IPI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.IPI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : IPI : Inter Processor Interrupts",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : MSI",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.MSI_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : MSI : Message Signaled Interrupts - interrupts sent by devices (including PCIe via IOxAPIC) (Socket Mode only)",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Message Received : VLW",
+ "EventCode": "0x42",
+ "EventName": "UNC_U_EVENT_MSG.VLW_RCVD",
+ "PerPkg": "1",
+ "PublicDescription": "Message Received : VLW : Virtual Logical Wire (legacy) message were received from Uncore.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "IDI Lock/SplitLock Cycles",
+ "EventCode": "0x44",
+ "EventName": "UNC_U_LOCK_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "IDI Lock/SplitLock Cycles : Number of times an IDI Lock/SplitLock sequence was started",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.RxC_CYCLES_NE_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "EventCode": "0x4D",
+ "EventName": "UNC_U_M2U_MISC1.TxC_CYCLES_CRD_OVF_CBO_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.RxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_CRD_OVF_VN0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AK",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_AKC",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_EMPTY_BL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "EventCode": "0x4E",
+ "EventName": "UNC_U_M2U_MISC2.TxC_CYCLES_FULL_BL",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "EventCode": "0x4F",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AK",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "EventCode": "0x4F",
+ "EventName": "UNC_U_M2U_MISC3.TxC_CYCLES_FULL_AKC",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "Cycles PHOLD Assert to Ack : Assert to ACK",
+ "EventCode": "0x45",
+ "EventName": "UNC_U_PHOLD_CYCLES.ASSERT_TO_ACK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles PHOLD Assert to Ack : Assert to ACK : PHOLD cycles.",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.PFTCH_BUF_EMPTY",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDRAND",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDRAND",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "UNC_U_RACU_DRNG.RDSEED",
+ "EventCode": "0x4C",
+ "EventName": "UNC_U_RACU_DRNG.RDSEED",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "UBOX"
+ },
+ {
+ "BriefDescription": "RACU Request",
+ "EventCode": "0x46",
+ "EventName": "UNC_U_RACU_REQUESTS",
+ "PerPkg": "1",
+ "PublicDescription": "RACU Request : Number outstanding register requests within message channel tracker",
+ "Unit": "UBOX"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json
new file mode 100644
index 000000000000..996028071ee4
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-io.json
@@ -0,0 +1,8944 @@
+[
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_READ",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "MetricName": "LLC_MISSES.PCIE_READ",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
+ "EventCode": "0x83",
+ "EventName": "LLC_MISSES.PCIE_WRITE",
+ "FCMask": "0x07",
+ "Filter": "ch_mask=0x1f",
+ "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 + UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "MetricName": "LLC_MISSES.PCIE_WRITE",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "ScaleUnit": "4Bytes",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_IIO_BANDWIDTH_IN.PART0_FREERUN",
+ "UMask": "0x20",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_IIO_BANDWIDTH_IN.PART1_FREERUN",
+ "UMask": "0x21",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_IIO_BANDWIDTH_IN.PART2_FREERUN",
+ "UMask": "0x22",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_IIO_BANDWIDTH_IN.PART3_FREERUN",
+ "UMask": "0x23",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_IIO_BANDWIDTH_IN.PART4_FREERUN",
+ "UMask": "0x24",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_IIO_BANDWIDTH_IN.PART5_FREERUN",
+ "UMask": "0x25",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_IIO_BANDWIDTH_IN.PART6_FREERUN",
+ "UMask": "0x26",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for every 32 bytes of data sent from the IO agent to the SOC",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_IIO_BANDWIDTH_IN.PART7_FREERUN",
+ "UMask": "0x27",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "Clockticks of the integrated IO (IIO) traffic controller",
+ "EventCode": "0x01",
+ "EventName": "UNC_IIO_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for IIO clocktick",
+ "EventCode": "0xff",
+ "EventName": "UNC_IIO_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "Free running counter that increments for integrated IO (IIO) traffic controller clockticks",
+ "UMask": "0x10",
+ "Unit": "iio_free_running"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts : All Ports",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0-7",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0xff",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0-7",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 0",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 1",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 1",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 2",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 2",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 3",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 3",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 4",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 4",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 5",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 5",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 6",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 6",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Inserts of completions with data: Part 7",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_COMP_BUF_INSERTS.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "PCIe Completion Buffer Inserts of completions with data : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 7",
+ "UMask": "0x3",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "EventCode": "0xD5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 0-7",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0-7",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.ALL_PARTS",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 0-7",
+ "UMask": "0xff",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 0",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART0",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 0 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 1",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART1",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 1 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 2",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART2",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 2 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 3",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART3",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 3 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 4",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART4",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 4 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 5",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART5",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 5 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 5",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 6",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART6",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 6 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Completion Buffer Occupancy of completions with data : Part 7",
+ "EventCode": "0xd5",
+ "EventName": "UNC_IIO_COMP_BUF_OCCUPANCY.CMPD.PART7",
+ "FCMask": "0x04",
+ "PerPkg": "1",
+ "PublicDescription": "PCIe Completion Buffer Occupancy : Part 7 : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.CFG_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's PCICFG space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core reading from Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.IO_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's IO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM",
+ "EventCode": "0xc0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core reporting completion of Card read from Core DRAM : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Core writing to Card's MMIO space : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) reading from this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC0",
+ "EventName": "UNC_IIO_DATA_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested by the CPU : Another card (different IIO stack) writing to this card. : Number of DWs (4 bytes) requested by the main die. Includes all requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.ATOMIC.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Atomic requests targeting DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : CmpD - device sending completion to CPU request : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 0",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card reading from DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Card reading from DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 0",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Four byte data request of the CPU : Card writing to DRAM",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Card writing to DRAM : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Messages",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MSG.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Messages : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Card reading from another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Data requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x83",
+ "EventName": "UNC_IIO_DATA_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Data requested of the CPU : Card writing to another Card (same or different stack) : Number of DWs (4 bytes) the card requests of the main die. Includes all requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Passing data to be written",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Passing data to be written : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing final read or write of line",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Issuing final read or write of line : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Processing response from IOMMU",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Processing response from IOMMU : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Issuing to IOMMU",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Issuing to IOMMU : How often different queues (e.g. channel / fc) ask to send request into pipeline",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Request Ownership",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Request Ownership : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests : Writing line",
+ "EventCode": "0x86",
+ "EventName": "UNC_IIO_INBOUND_ARB_REQ.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests : Writing line : How often different queues (e.g. channel / fc) ask to send request into pipeline : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Passing data to be written",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Passing data to be written : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing final read or write of line",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Issuing final read or write of line : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Processing response from IOMMU",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Processing response from IOMMU : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Issuing to IOMMU",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Issuing to IOMMU : How often different queues (e.g. channel / fc) are allowed to send request into pipeline",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Request Ownership",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Request Ownership : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Incoming arbitration requests granted : Writing line",
+ "EventCode": "0x87",
+ "EventName": "UNC_IIO_INBOUND_ARB_WON.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Incoming arbitration requests granted : Writing line : How often different queues (e.g. channel / fc) are allowed to send request into pipeline : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 1G Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.1G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 2M Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.2M_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Hits to a 4K Page",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.4K_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups all",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.ALL_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB lookups all : Some transactions have to look up IOTLB multiple times. Counts every time a request looks up IOTLB.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache hits",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": Context cache hits : Counts each time a first look up of the transaction hits the RCC.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache lookups",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.CTXT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": Context cache lookups : Counts each time a transaction looks up root context cache.",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB lookups first",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.FIRST_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB lookups first : Some transactions have to look up IOTLB multiple times. Counts the first time a request looks up IOTLB.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOTLB Fills (same as IOTLB miss)",
+ "EventCode": "0x40",
+ "EventName": "UNC_IIO_IOMMU0.MISSES",
+ "PerPkg": "1",
+ "PublicDescription": ": IOTLB Fills (same as IOTLB miss) : When a transaction misses IOTLB, it does a page walk to look up memory and bring in the relevant page translation. Counts when this page translation is written to IOTLB.",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Cycles PWT full",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.CYC_PWT_FULL",
+ "PerPkg": "1",
+ "PublicDescription": ": Cycles PWT full : Counts cycles the IOMMU has reached its maximum limit for outstanding page walks.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": IOMMU memory access",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.NUM_MEM_ACCESSES",
+ "PerPkg": "1",
+ "PublicDescription": ": IOMMU memory access : IOMMU sends out memory fetches when it misses the cache look up which is indicated by this signal. M2IOSF only uses low priority channel",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 1G page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_1G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 1G page : Counts each time a transaction's first look up hits the SLPWC at the 1G level",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 2M page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_2M_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 2M page : Counts each time a transaction's first look up hits the SLPWC at the 2M level",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWC Hit to a 4K page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_4K_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWC Hit to a 4K page : Counts each time a transaction's first look up hits the SLPWC at the 4K level",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PWT Hit to a 256T page",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_512G_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": PWT Hit to a 256T page : Counts each time a transaction's first look up hits the SLPWC at the 512G level",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache fill",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWC_CACHE_FILLS",
+ "PerPkg": "1",
+ "PublicDescription": ": PageWalk cache fill : When a transaction misses SLPWC, it does a page walk to look up memory and bring in the relevant page translation. When this page translation is written to SLPWC, ObsPwcFillValid_nnnH is asserted.",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": PageWalk cache lookup",
+ "EventCode": "0x41",
+ "EventName": "UNC_IIO_IOMMU1.PWT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": PageWalk cache lookup : Counts each time a transaction looks up second level page walk cache.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Interrupt Entry cache hit",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_HITS",
+ "PerPkg": "1",
+ "PublicDescription": ": Interrupt Entry cache hit : Counts each time a transaction's first look up hits the IEC.",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Interrupt Entry cache lookup",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.INT_CACHE_LOOKUPS",
+ "PerPkg": "1",
+ "PublicDescription": ": Interrupt Entry cache lookup : Counts the number of transaction looks up that interrupt remapping cache.",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Device-selective Context cache invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DEVICE",
+ "PerPkg": "1",
+ "PublicDescription": ": Device-selective Context cache invalidation cycles : Counts number of Device selective context cache invalidation events",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Domain-selective Context cache invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_DOMAIN",
+ "PerPkg": "1",
+ "PublicDescription": ": Domain-selective Context cache invalidation cycles : Counts number of Domain selective context cache invalidation events",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Context cache global invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_CTXT_CACHE_INVAL_GBL",
+ "PerPkg": "1",
+ "PublicDescription": ": Context cache global invalidation cycles : Counts number of Context Cache global invalidation events",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Domain-selective IOTLB invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_DOMAIN",
+ "PerPkg": "1",
+ "PublicDescription": ": Domain-selective IOTLB invalidation cycles : Counts number of Domain selective invalidation events",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Global IOTLB invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_GBL",
+ "PerPkg": "1",
+ "PublicDescription": ": Global IOTLB invalidation cycles : Indicates that IOMMU is doing global invalidation.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Page-selective IOTLB invalidation cycles",
+ "EventCode": "0x43",
+ "EventName": "UNC_IIO_IOMMU3.NUM_INVAL_PAGE",
+ "PerPkg": "1",
+ "PublicDescription": ": Page-selective IOTLB invalidation cycles : Counts number of Page-selective within Domain Invalidation events",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if all bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if all bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x02",
+ "EventName": "UNC_IIO_MASK_MATCH_AND.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "AND Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if all bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus)",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : Non-PCIE bus and !(PCIE bus) : Asserted if any bits specified by mask match",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and PCIE bus : Asserted if any bits specified by mask match",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus)",
+ "EventCode": "0x03",
+ "EventName": "UNC_IIO_MASK_MATCH_OR.NOT_BUS0_NOT_BUS1",
+ "PerPkg": "1",
+ "PublicDescription": "OR Mask/match for debug bus : !(Non-PCIE bus) and !(PCIE bus) : Asserted if any bits specified by mask match",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Counting disabled",
+ "EventCode": "0x80",
+ "EventName": "UNC_IIO_NOTHING",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Occupancy of outbound request queue : To device",
+ "EventCode": "0xC5",
+ "EventName": "UNC_IIO_NUM_OUSTANDING_REQ_FROM_CPU.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Occupancy of outbound request queue : To device : Counts number of outbound requests/completions IIO is currently processing",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Passing data to be written",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": ": Passing data to be written : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Issuing final read or write of line",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Processing response from IOMMU",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Issuing to IOMMU",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Request Ownership",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": ": Request Ownership : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": ": Writing line",
+ "EventCode": "0x88",
+ "EventName": "UNC_IIO_NUM_OUTSTANDING_REQ_OF_CPU.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": ": Writing line : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : From IRP",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.IRP",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Number requests sent to PCIe from main die : From IRP : Captures Posted/Non-posted allocations from IRP. i.e. either non-confined P2P traffic or from the CPU",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : From ITC",
+ "EventCode": "0xC2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.ITC",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Number requests sent to PCIe from main die : From ITC : Confined P2P",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests sent to PCIe from main die : Completion allocations",
+ "EventCode": "0xc2",
+ "EventName": "UNC_IIO_NUM_REQ_FROM_CPU.PREALLOC",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : Drop request",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.ALL.DROP",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Number requests PCIe makes of the main die : Drop request : Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU. : Packet error detected, must be dropped",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number requests PCIe makes of the main die : All",
+ "EventCode": "0x85",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU.COMMIT.ALL",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Number requests PCIe makes of the main die : All : Counts full PCIe requests before they're broken into a series of cache-line size requests as measured by DATA_REQ_OF_CPU and TXN_REQ_OF_CPU.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Abort",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.ABORT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Confined P2P",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.CONFINED_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Local P2P",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.LOC_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Multi-cast",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MCAST",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Memory",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MEM",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : MsgB",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.MSGB",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Remote P2P",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.REM_P2P",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Num requests sent by PCIe - by target : Ubox",
+ "EventCode": "0x8E",
+ "EventName": "UNC_IIO_NUM_REQ_OF_CPU_BY_TGT.UBOX",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "ITC address map 1",
+ "EventCode": "0x8F",
+ "EventName": "UNC_IIO_NUM_TGT_MATCHED_REQ_OF_CPU",
+ "PerPkg": "1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound cacheline requests issued : 64B requests issued to device",
+ "EventCode": "0xD0",
+ "EventName": "UNC_IIO_OUTBOUND_CL_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Outbound cacheline requests issued : 64B requests issued to device : Each outbound cacheline granular request may need to make multiple passes through the pipeline. Each time a cacheline completes all its passes it advances line",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Outbound TLP (transaction layer packet) requests issued : To device",
+ "EventCode": "0xD1",
+ "EventName": "UNC_IIO_OUTBOUND_TLP_REQS_ISSUED.TO_IO",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "Outbound TLP (transaction layer packet) requests issued : To device : Each time an outbound completes all its passes it advances the pointer",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PWT occupancy",
+ "EventCode": "0x42",
+ "EventName": "UNC_IIO_PWT_OCCUPANCY",
+ "PerPkg": "1",
+ "PublicDescription": "PWT occupancy : Indicates how many page walks are outstanding at any point in time.",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Passing data to be written",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - cacheline complete : Passing data to be written : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Issuing final read or write of line",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - cacheline complete : Issuing final read or write of line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Request Ownership",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - cacheline complete : Request Ownership : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - cacheline complete : Writing line",
+ "EventCode": "0x91",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CL_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - cacheline complete : Writing line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes all its passes (e.g. finishes posting writes to all multi-cast targets) it advances line : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Passing data to be written",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Passing data to be written : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer. : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Issuing final read or write of line",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Issuing final read or write of line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Processing response from IOMMU",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_HIT",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Processing response from IOMMU : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Issuing to IOMMU",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.IOMMU_REQ",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Issuing to IOMMU : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer.",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Request Ownership",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Request Ownership : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer. : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request complete : Writing line",
+ "EventCode": "0x92",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request complete : Writing line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a single PCIe request completes all its cacheline granular requests, it advances pointer. : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Passing data to be written",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.DATA",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - pass complete : Passing data to be written : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Issuing final read or write of line",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.FINAL_RD_WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - pass complete : Issuing final read or write of line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Request Ownership",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.REQ_OWN",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - pass complete : Request Ownership : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "PCIe Request - pass complete : Writing line",
+ "EventCode": "0x90",
+ "EventName": "UNC_IIO_REQ_FROM_PCIE_PASS_CMPL.WR",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0xFF",
+ "PublicDescription": "PCIe Request - pass complete : Writing line : Each PCIe request is broken down into a series of cacheline granular requests and each cacheline size request may need to make multiple passes through the pipeline (e.g. for posted interrupts or multi-cast). Each time a cacheline completes a single pass (e.g. posts a write to single multi-cast target) it advances state : Only for posted requests",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Symbol Times on Link",
+ "EventCode": "0x82",
+ "EventName": "UNC_IIO_SYMBOL_TIMES",
+ "PerPkg": "1",
+ "PublicDescription": "Symbol Times on Link : Gen1 - increment once every 4nS, Gen2 - increment once every 2nS, Gen3 - increment once every 1nS",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.CFG_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's PCICFG space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.IO_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's IO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x20",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core reading from Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space",
+ "EventCode": "0xc1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Core writing to Card's MMIO space : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) reading from this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card.",
+ "EventCode": "0xC1",
+ "EventName": "UNC_IIO_TXN_REQ_BY_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested by the CPU : Another card (different IIO stack) writing to this card. : Also known as Outbound. Number of requests initiated by the main die, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.ATOMIC.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Atomic requests targeting DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x10",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.CMPD.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : CmpD - device sending completion to CPU request : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x80",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x4",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to DRAM",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MEM_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to DRAM : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x1",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Messages",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.MSG.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Messages : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x40",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_READ.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Card reading from another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x8",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x100",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.IOMMU1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x200",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : IOMMU - Type 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART0",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x01",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 0/1/2/3, Or x8 card plugged in to Lane 0/1, Or x4 card is plugged in to slot 0",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART1",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x02",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 1",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART2",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x04",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 2/3, Or x4 card is plugged in to slot 2",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART3",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x08",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 3",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART4",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x10",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x16 card plugged in to Lane 4/5/6/7, Or x8 card plugged in to Lane 4/5, Or x4 card is plugged in to slot 4",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART5",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x20",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 5",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART6",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x40",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x8 card plugged in to Lane 6/7, Or x4 card is plugged in to slot 6",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack)",
+ "EventCode": "0x84",
+ "EventName": "UNC_IIO_TXN_REQ_OF_CPU.PEER_WRITE.PART7",
+ "FCMask": "0x07",
+ "PerPkg": "1",
+ "PortMask": "0x80",
+ "PublicDescription": "Number Transactions requested of the CPU : Card writing to another Card (same or different stack) : Also known as Inbound. Number of 64B cache line requests initiated by the Card, including reads and writes. : x4 card is plugged in to slot 7",
+ "UMask": "0x2",
+ "Unit": "IIO"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x80",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x81",
+ "EventName": "UNC_M2P_AG0_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 0 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x82",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x83",
+ "EventName": "UNC_M2P_AG0_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 6 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7",
+ "EventCode": "0x88",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 7 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x89",
+ "EventName": "UNC_M2P_AG0_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 0 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8a",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8b",
+ "EventName": "UNC_M2P_AG0_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent0 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 0 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 0 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 1 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 2 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 3 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 4 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 5 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 6 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7",
+ "EventCode": "0x84",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 7 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 10 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 8 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9",
+ "EventCode": "0x85",
+ "EventName": "UNC_M2P_AG1_AD_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Acquired : For Transgress 9 : Number of CMS Agent 1 AD credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7",
+ "EventCode": "0x86",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9",
+ "EventCode": "0x87",
+ "EventName": "UNC_M2P_AG1_AD_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 AD Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 AD credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 0 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 1 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 2 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 3 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 4 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5",
+ "EventCode": "0x8c",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 5 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 10 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 8 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9",
+ "EventCode": "0x8d",
+ "EventName": "UNC_M2P_AG1_BL_CRD_ACQUIRED1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Acquired : For Transgress 9 : Number of CMS Agent 1 BL credits acquired in a given cycle, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 0 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 1 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 2 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 3 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 4 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 5 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 6 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7",
+ "EventCode": "0x8e",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 7 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 10 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 8 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9",
+ "EventCode": "0x8f",
+ "EventName": "UNC_M2P_AG1_BL_CRD_OCCUPANCY1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Agent1 BL Credits Occupancy : For Transgress 9 : Number of CMS Agent 1 BL credits in use in a given cycle, per transgress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Clockticks of the mesh to PCI (M2P)",
+ "EventCode": "0x01",
+ "EventName": "UNC_M2P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Clockticks",
+ "EventCode": "0xc0",
+ "EventName": "UNC_M2P_CMS_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Local",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_LOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Local : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle triggered by this tile",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Remote",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_NONLOCAL",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Remote : Counts the number of cycles either the local or incoming distress signals are asserted. : Dynamic Prefetch Throttle received by this tile",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - IV",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_IV",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - IV : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while regular IVs were received, causing DPT to be stalled",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : DPT Stalled - No Credit",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.DPT_STALL_NOCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : DPT Stalled - No Credit : Counts the number of cycles either the local or incoming distress signals are asserted. : DPT occurred while credit not available causing DPT to be stalled",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Horizontal",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.HORZ",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Horizontal : Counts the number of cycles either the local or incoming distress signals are asserted. : If TGR egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Distress signal asserted : Vertical",
+ "EventCode": "0xaf",
+ "EventName": "UNC_M2P_DISTRESS_ASSERTED.VERT",
+ "PerPkg": "1",
+ "PublicDescription": "Distress signal asserted : Vertical : Counts the number of cycles either the local or incoming distress signals are asserted. : If IRQ egress is full, then agents will throttle outgoing AD IDI transactions",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Down",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_DN",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Down : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress Blocking due to Ordering requirements : Up",
+ "EventCode": "0xba",
+ "EventName": "UNC_M2P_EGRESS_ORDERING.IV_SNOOPGO_UP",
+ "PerPkg": "1",
+ "PublicDescription": "Egress Blocking due to Ordering requirements : Up : Counts number of cycles IV was blocked in the TGR Egress due to SNP/GO Ordering requirements",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Even",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Left and Odd",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Even",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AD Ring In Use : Right and Odd",
+ "EventCode": "0xb6",
+ "EventName": "UNC_M2P_HORZ_RING_AD_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AD Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xbb",
+ "EventName": "UNC_M2P_HORZ_RING_AKC_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Even",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Left and Odd",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Left and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Even",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Even : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal AK Ring In Use : Right and Odd",
+ "EventCode": "0xb7",
+ "EventName": "UNC_M2P_HORZ_RING_AK_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal AK Ring In Use : Right and Odd : Counts the number of cycles that the Horizontal AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Even",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Left and Odd",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.LEFT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Left and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Even",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Even : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal BL Ring in Use : Right and Odd",
+ "EventCode": "0xb8",
+ "EventName": "UNC_M2P_HORZ_RING_BL_IN_USE.RIGHT_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal BL Ring in Use : Right and Odd : Counts the number of cycles that the Horizontal BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Left",
+ "EventCode": "0xb9",
+ "EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.LEFT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Left : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Horizontal IV Ring in Use : Right",
+ "EventCode": "0xb9",
+ "EventName": "UNC_M2P_HORZ_RING_IV_IN_USE.RIGHT",
+ "PerPkg": "1",
+ "PublicDescription": "Horizontal IV Ring in Use : Right : Counts the number of cycles that the Horizontal IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : DRS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.DRS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : DRS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCB",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCB_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCB : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credit Acquired : NCS",
+ "EventCode": "0x33",
+ "EventName": "UNC_M2P_IIO_CREDITS_ACQUIRED.NCS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credit Acquired : NCS : Counts the number of credits that are acquired in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.DRS",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : DRS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the DRS message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCB",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCB : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCB message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS",
+ "EventCode": "0x34",
+ "EventName": "UNC_M2P_IIO_CREDITS_REJECT.NCS",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Failed to Acquire a Credit : NCS : Counts the number of times that a request pending in the BL Ingress attempted to acquire either a NCB or NCS credit to transmit into the IIO, but was rejected because no credits were available. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.DRS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : DRS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the DRS message class.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCB_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCB to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCB message class.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_0",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 0 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credits for transfer through CMS Port 0 to the IIO for the NCS message class.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1",
+ "EventCode": "0x32",
+ "EventName": "UNC_M2P_IIO_CREDITS_USED.NCS_1",
+ "PerPkg": "1",
+ "PublicDescription": "M2PCIe IIO Credits in Use : NCS to CMS Port 1 : Counts the number of cycles when one or more credits in the M2PCIe agent for sending transactions into the IIO on either NCB or NCS are in use. Transactions from the BL ring going into the IIO Agent must first acquire a credit. These credits are for either the NCB or NCS message classes. NCB, or non-coherent bypass messages are used to transmit data without coherency (and are common). NCS is used for reads to PCIe (and should be used sparingly). : Credit for transfer through CMS Port 0s to the IIO for the NCS message class.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x46",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Dedicated P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x47",
+ "EventName": "UNC_M2P_LOCAL_DED_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x19",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_0.MS2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Dedicated Credits Returned - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x1a",
+ "EventName": "UNC_M2P_LOCAL_P2P_DED_RETURNED_1.MS2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent0",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent1",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local P2P Shared Credits Returned : Agent2",
+ "EventCode": "0x17",
+ "EventName": "UNC_M2P_LOCAL_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent0",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent1",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent2",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent3",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_3",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent4",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_4",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Returned to credit ring : Agent5",
+ "EventCode": "0x44",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_RETURNED.AGENT_5",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x40",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Local Shared P2P Credit Taken - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x41",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_TAKEN_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF0 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF0_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF1 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF1_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCB",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF2 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF2_NCS",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCB",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCB",
+ "PerPkg": "1",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 0 : M2IOSF3 - NCS",
+ "EventCode": "0x4a",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_0.M2IOSF3_NCS",
+ "PerPkg": "1",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCB",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF4 - NCS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF4_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCB",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Waiting on Local Shared P2P Credit - 1 : M2IOSF5 - NCS",
+ "EventCode": "0x4b",
+ "EventName": "UNC_M2P_LOCAL_SHAR_P2P_CRD_WAIT_1.M2IOSF5_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI0",
+ "EventCode": "0xe6",
+ "EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Miscellaneous Events (mostly from MS2IDI) : Number of cycles MBE is high for MS2IDI1",
+ "EventCode": "0xe6",
+ "EventName": "UNC_M2P_MISC_EXTERNAL.MBE_INST1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : All",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Local NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCB",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "P2P Credit Occupancy : Remote NCS",
+ "EventCode": "0x14",
+ "EventName": "UNC_M2P_P2P_CRD_OCCUPANCY.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : All",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Local NCS",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCB",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Dedicated Credits Received : Remote NCS",
+ "EventCode": "0x16",
+ "EventName": "UNC_M2P_P2P_DED_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : All",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.ALL",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCB",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Local NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.LOCAL_NCS",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCB",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCB",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Shared Credits Received : Remote NCS",
+ "EventCode": "0x15",
+ "EventName": "UNC_M2P_P2P_SHAR_RECEIVED.REMOTE_NCS",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent0",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent1",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote P2P Shared Credits Returned : Agent2",
+ "EventCode": "0x18",
+ "EventName": "UNC_M2P_REMOTE_P2P_SHAR_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent0",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_0",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent1",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_1",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Remote Shared P2P Credit Returned to credit ring : Agent2",
+ "EventCode": "0x45",
+ "EventName": "UNC_M2P_REMOTE_SHAR_P2P_CRD_RETURNED.AGENT_2",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AD",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AD : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : AK",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : AK : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : BL",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : BL : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Horizontal Ring. : IV",
+ "EventCode": "0xac",
+ "EventName": "UNC_M2P_RING_BOUNCES_HORZ.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Horizontal Ring. : IV : Number of cycles incoming messages from the Horizontal ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : AD",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AD",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : AD : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Acknowledgements to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring.",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.AKC",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.BL",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Data Responses to core : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache.",
+ "EventCode": "0xaa",
+ "EventName": "UNC_M2P_RING_BOUNCES_VERT.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Messages that bounced on the Vertical Ring. : Snoops of processor's cache. : Number of cycles incoming messages from the Vertical ring that were bounced, by ring type.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AD",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : AK",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : Acknowledgements to Agent 1",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.AK_AG1",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : BL",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Horizontal Ring : IV",
+ "EventCode": "0xad",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_HORZ.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : AD",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Acknowledgements to core",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AK",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.AKC",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Data Responses to core",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.BL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Sink Starvation on Vertical Ring : Snoops of processor's cache.",
+ "EventCode": "0xab",
+ "EventName": "UNC_M2P_RING_SINK_STARVED_VERT.IV",
+ "PerPkg": "1",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Source Throttle",
+ "EventCode": "0xae",
+ "EventName": "UNC_M2P_RING_SRC_THRTL",
+ "PerPkg": "1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_IDI",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.CHA_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Cycles Not Empty",
+ "EventCode": "0x10",
+ "EventName": "UNC_M2P_RxC_CYCLES_NE.IIO_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Cycles Not Empty : Counts the number of cycles when the M2PCIe Ingress is not empty.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_IDI",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.CHA_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCB",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Ingress (from CMS) Queue Inserts",
+ "EventCode": "0x11",
+ "EventName": "UNC_M2P_RxC_INSERTS.IIO_NCS",
+ "PerPkg": "1",
+ "PublicDescription": "Ingress (from CMS) Queue Inserts : Counts the number of entries inserted into the M2PCIe Ingress Queue. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue latency.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xe5",
+ "EventName": "UNC_M2P_RxR_BUSY_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, because a message from the other queue has higher priority",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - All",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Credited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AD - Uncredited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AD - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AK",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AK : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : AKC - Uncredited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : AKC - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - All",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - All : Number of packets bypassing the CMS Ingress : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Credited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Credited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : BL - Uncredited",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : BL - Uncredited : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Bypass : IV",
+ "EventCode": "0xe2",
+ "EventName": "UNC_M2P_RxR_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Bypass : IV : Number of packets bypassing the CMS Ingress",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - All",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Credited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AD - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : AK",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : AK : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - All",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - All : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Credited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : BL - Uncredited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IFV - Credited",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.IFV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IFV - Credited : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation : IV",
+ "EventCode": "0xe3",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : IV : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Injection Starvation",
+ "EventCode": "0xe4",
+ "EventName": "UNC_M2P_RxR_CRD_STARVED_1",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Injection Starvation : Counts cycles under injection starvation mode. This starvation is triggered when the CMS Ingress cannot send a transaction onto the mesh for a long period of time. In this case, the Ingress is unable to forward to the Egress due to a lack of credit.",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - All",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Credited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AD - Uncredited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AD - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AK",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AK : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : AKC - Uncredited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : AKC - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - All",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - All : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Credited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Credited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : BL - Uncredited",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : BL - Uncredited : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Allocations : IV",
+ "EventCode": "0xe1",
+ "EventName": "UNC_M2P_RxR_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Allocations : IV : Number of allocations into the CMS Ingress The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - All",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Credited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AD - Uncredited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AD - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AK",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AK : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : AKC - Uncredited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : AKC - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - All",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - All : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Credited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Credited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : BL - Uncredited",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : BL - Uncredited : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Transgress Ingress Occupancy : IV",
+ "EventCode": "0xe0",
+ "EventName": "UNC_M2P_RxR_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Transgress Ingress Occupancy : IV : Occupancy event for the Ingress buffers in the CMS The Ingress is used to queue up requests received from the mesh",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xd0",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 0 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 1 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 2 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 3 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 4 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 5 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 6 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xd2",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_AD_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 7 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7",
+ "EventCode": "0xd4",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG0.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR0",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 0 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR1",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 1 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR2",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 2 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR3",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 3 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR4",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 4 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR5",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 5 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR6",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 6 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7",
+ "EventCode": "0xd6",
+ "EventName": "UNC_M2P_STALL0_NO_TxR_HORZ_CRD_BL_AG1.TGR7",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 7 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xd1",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG0.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent0 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 10 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 8 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xd3",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_AD_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No AD Agent1 Transgress Credits : For Transgress 9 : Number of cycles the AD Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9",
+ "EventCode": "0xd5",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG0_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent0 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 0 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR10",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 10 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR8",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 8 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9",
+ "EventCode": "0xd7",
+ "EventName": "UNC_M2P_STALL1_NO_TxR_HORZ_CRD_BL_AG1_1.TGR9",
+ "PerPkg": "1",
+ "PublicDescription": "Stall on No BL Agent1 Transgress Credits : For Transgress 9 : Number of cycles the BL Agent 1 Egress Buffer is stalled waiting for a TGR credit to become available, per transgress.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "UNC_M2P_TxC_CREDITS.PRQ",
+ "EventCode": "0x2d",
+ "EventName": "UNC_M2P_TxC_CREDITS.PRQ",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AD_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.AK_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Full",
+ "EventCode": "0x25",
+ "EventName": "UNC_M2P_TxC_CYCLES_FULL.BL_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Full : Counts the number of cycles when the M2PCIe Egress is full. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AD_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AD_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AK_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.AK_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.BL_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Cycles Not Empty",
+ "EventCode": "0x23",
+ "EventName": "UNC_M2P_TxC_CYCLES_NE.BL_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Cycles Not Empty : Counts the number of cycles when the M2PCIe Egress is not empty. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy. Multiple egress buffers can be tracked at a given time using multiple counters.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AD_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AD_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.AK_CRD_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.BL_0",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Egress (to CMS) Ingress",
+ "EventCode": "0x24",
+ "EventName": "UNC_M2P_TxC_INSERTS.BL_1",
+ "PerPkg": "1",
+ "PublicDescription": "Egress (to CMS) Ingress : Counts the number of number of messages inserted into the the M2PCIe Egress queue. This tracks messages for one of the two CMS ports that are used by the M2PCIe agent. This can be used in conjunction with the M2PCIe Ingress Occupancy Accumulator event in order to calculate average queue occupancy.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - All",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Credited",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : AD - Uncredited",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : AD - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - All",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - All : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Credited",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Credited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal ADS Used : BL - Uncredited",
+ "EventCode": "0xa6",
+ "EventName": "UNC_M2P_TxR_HORZ_ADS_USED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal ADS Used : BL - Uncredited : Number of packets using the Horizontal Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - All",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Credited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AD - Uncredited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AD - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AK",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AK : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : AKC - Uncredited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : AKC - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - All",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - All : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Credited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Credited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : BL - Uncredited",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : BL - Uncredited : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Bypass Used : IV",
+ "EventCode": "0xa7",
+ "EventName": "UNC_M2P_TxR_HORZ_BYPASS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Bypass Used : IV : Number of packets bypassing the Horizontal Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AK",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AK : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Full : IV",
+ "EventCode": "0xa2",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_FULL.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Full : IV : Cycles the Transgress buffers in the Common Mesh Stop are Full. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AD - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AK",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AK : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : AKC - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - All : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Credited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : BL - Uncredited : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV",
+ "EventCode": "0xa3",
+ "EventName": "UNC_M2P_TxR_HORZ_CYCLES_NE.IV",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Horizontal Egress Queue is Not Empty : IV : Cycles the Transgress buffers in the Common Mesh Stop are Not-Empty. The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - All",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Credited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AD - Uncredited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AD - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AK",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AK : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : AKC - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - All",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - All : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Credited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Credited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : BL - Uncredited",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : BL - Uncredited : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Inserts : IV",
+ "EventCode": "0xa1",
+ "EventName": "UNC_M2P_TxR_HORZ_INSERTS.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Inserts : IV : Number of allocations into the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - All",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Credited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AD - Uncredited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AD - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AK",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AK : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : AKC - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - All",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - All : Counts number of Egress packets NACK'ed on to the Horizontal Ring : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Credited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Credited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : BL - Uncredited",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : BL - Uncredited : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress NACKs : IV",
+ "EventCode": "0xa4",
+ "EventName": "UNC_M2P_TxR_HORZ_NACK.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Horizontal Ring",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - All",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x11",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Credited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AD - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AK",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AK : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : AKC - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - All",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - All : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh. : All == Credited + Uncredited",
+ "UMask": "0x44",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Credited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_CRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Credited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : BL - Uncredited : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Occupancy : IV",
+ "EventCode": "0xa0",
+ "EventName": "UNC_M2P_TxR_HORZ_OCCUPANCY.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Occupancy : IV : Occupancy event for the Transgress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Horizontal Ring on the Mesh.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - All",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AD_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AD - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AK",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AK",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AK : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.AKC_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : AKC - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x80",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - All",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - All : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time. : All == Credited + Uncredited",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.BL_UNCRD",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : BL - Uncredited : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Horizontal Egress Injection Starvation : IV",
+ "EventCode": "0xa5",
+ "EventName": "UNC_M2P_TxR_HORZ_STARVED.IV",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Horizontal Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Transgress buffer cannot send a transaction onto the Horizontal ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9c",
+ "EventName": "UNC_M2P_TxR_VERT_ADS_USED.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets using the Vertical Anti-Deadlock Slot, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 0",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AD - Agent 1",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AD - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 0",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AK - Agent 1",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AK - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 0",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : BL - Agent 1",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : BL - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : IV - Agent 1",
+ "EventCode": "0x9d",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS.IV_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : IV - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 0",
+ "EventCode": "0x9e",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 0 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical ADS Used : AKC - Agent 1",
+ "EventCode": "0x9e",
+ "EventName": "UNC_M2P_TxR_VERT_BYPASS_1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical ADS Used : AKC - Agent 1 : Number of packets bypassing the Vertical Egress, broken down by ring type and CMS Agent.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0",
+ "EventCode": "0x94",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1",
+ "EventCode": "0x95",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_FULL1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Full : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Full. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AD - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AK - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : BL - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0",
+ "EventCode": "0x96",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : IV - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 0 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1",
+ "EventCode": "0x97",
+ "EventName": "UNC_M2P_TxR_VERT_CYCLES_NE1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles CMS Vertical Egress Queue Is Not Empty : AKC - Agent 1 : Number of cycles the Common Mesh Stop Egress was Not Empty. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AD - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AD - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AK - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AK - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : BL - Agent 1",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : BL - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : IV - Agent 0",
+ "EventCode": "0x92",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : IV - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 0",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 0 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Allocations : AKC - Agent 1",
+ "EventCode": "0x93",
+ "EventName": "UNC_M2P_TxR_VERT_INSERTS1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Allocations : AKC - Agent 1 : Number of allocations into the Common Mesh Stop Egress. The Egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AD - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AD - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AK - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AK - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 0",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : BL - Agent 1",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : BL - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : IV",
+ "EventCode": "0x98",
+ "EventName": "UNC_M2P_TxR_VERT_NACK0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : IV : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 0",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 0 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress NACKs : AKC - Agent 1",
+ "EventCode": "0x99",
+ "EventName": "UNC_M2P_TxR_VERT_NACK1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress NACKs : AKC - Agent 1 : Counts number of Egress packets NACK'ed on to the Vertical Ring",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AD - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AD - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AD ring. This is commonly used for outbound requests.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AK - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AK - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the AK ring.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the BL ring. This is commonly used to send data from the cache to various destinations.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : BL - Agent 1",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : BL - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 1 destined for the BL ring. This is commonly used for transferring writeback data to the cache.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : IV - Agent 0",
+ "EventCode": "0x90",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : IV - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the IV ring. This is commonly used for snoops to the cores.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 0",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 0 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AD ring. Some example include outbound requests, snoop requests, and snoop responses.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vert Egress Occupancy : AKC - Agent 1",
+ "EventCode": "0x91",
+ "EventName": "UNC_M2P_TxR_VERT_OCCUPANCY1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vert Egress Occupancy : AKC - Agent 1 : Occupancy event for the Egress buffers in the Common Mesh Stop The egress is used to queue up requests destined for the Vertical Ring on the Mesh. : Ring transactions from Agent 0 destined for the AK ring. This is commonly used for credit returns and GO responses.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AD_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AD - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x10",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.AK_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AK - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x20",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.BL_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : BL - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x40",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : IV",
+ "EventCode": "0x9a",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED0.IV_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : IV : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG0",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.AKC_AG1",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 1 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0",
+ "EventCode": "0x9b",
+ "EventName": "UNC_M2P_TxR_VERT_STARVED1.TGC",
+ "PerPkg": "1",
+ "PublicDescription": "CMS Vertical Egress Injection Starvation : AKC - Agent 0 : Counts injection starvation. This starvation is triggered when the CMS Egress cannot send a transaction onto the Vertical ring for a long period of time.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Even",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Down and Odd",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Even",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Even : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AD Ring In Use : Up and Odd",
+ "EventCode": "0xb0",
+ "EventName": "UNC_M2P_VERT_RING_AD_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AD Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AD ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Even",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Down and Odd",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Even",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Even : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AKC Ring In Use : Up and Odd",
+ "EventCode": "0xb4",
+ "EventName": "UNC_M2P_VERT_RING_AKC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AKC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AKC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Even",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Down and Odd",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Down and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Even",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Even : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical AK Ring In Use : Up and Odd",
+ "EventCode": "0xb1",
+ "EventName": "UNC_M2P_VERT_RING_AK_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical AK Ring In Use : Up and Odd : Counts the number of cycles that the Vertical AK ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Even",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Down and Odd",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Down and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Even",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Even : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical BL Ring in Use : Up and Odd",
+ "EventCode": "0xb2",
+ "EventName": "UNC_M2P_VERT_RING_BL_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical BL Ring in Use : Up and Odd : Counts the number of cycles that the Vertical BL ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Down",
+ "EventCode": "0xb3",
+ "EventName": "UNC_M2P_VERT_RING_IV_IN_USE.DN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Down : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical IV Ring in Use : Up",
+ "EventCode": "0xb3",
+ "EventName": "UNC_M2P_VERT_RING_IV_IN_USE.UP",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical IV Ring in Use : Up : Counts the number of cycles that the Vertical IV ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop. There is only 1 IV ring. Therefore, if one wants to monitor the Even ring, they should select both UP_EVEN and DN_EVEN. To monitor the Odd ring, they should select both UP_ODD and DN_ODD.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Even",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x4",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Down and Odd",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.DN_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Down and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x8",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Even",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_EVEN",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Even : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x1",
+ "Unit": "M2PCIe"
+ },
+ {
+ "BriefDescription": "Vertical TGC Ring In Use : Up and Odd",
+ "EventCode": "0xb5",
+ "EventName": "UNC_M2P_VERT_RING_TGC_IN_USE.UP_ODD",
+ "PerPkg": "1",
+ "PublicDescription": "Vertical TGC Ring In Use : Up and Odd : Counts the number of cycles that the Vertical TGC ring is being used at this ring stop. This includes when packets are passing by and when packets are being sunk, but does not include when packets are being sent from the ring stop.We really have two rings in JKT -- a clockwise ring and a counter-clockwise ring. On the left side of the ring, the UP direction is on the clockwise ring and DN is on the counter-clockwise ring. On the right side of the ring, this is reversed. The first half of the CBos are on the left side of the ring, and the 2nd half are on the right side of the ring. In other words (for example), in a 4c part, Cbo 0 UP AD is NOT the same ring as CBo 2 UP AD because they are on opposite sides of the ring.",
+ "UMask": "0x2",
+ "Unit": "M2PCIe"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json
new file mode 100644
index 000000000000..530e9b71b92a
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-memory.json
@@ -0,0 +1,547 @@
+[
+ {
+ "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
+ "EventCode": "0x04",
+ "EventName": "LLC_MISSES.MEM_READ",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM Read CAS commands, w/ and w/o auto-pre, issued on this channel. This includes underfills.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
+ "EventCode": "0x04",
+ "EventName": "LLC_MISSES.MEM_WRITE",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM Write CAS commands issued, w/ and w/o auto-pre, on this channel.",
+ "ScaleUnit": "64Bytes",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : All Activates",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_ACT_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Activate Count : All Activates : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0xb",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Activate Count : Activate due to Bypass",
+ "EventCode": "0x01",
+ "EventName": "UNC_M_ACT_COUNT.BYP",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Activate Count : Activate due to Bypass : Counts the number of DRAM Activate commands sent on this channel. Activate commands are issued to open up a page on the DRAM devices so that it can be read or written to with a CAS. One can calculate the number of Page Misses by subtracting the number of Page Miss precharges from the number of Activates.",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM CAS commands issued",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM CAS commands issued on this channel.",
+ "UMask": "0x3f",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (including underfills)",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM Read CAS commands, w/ and w/o auto-pre, issued on this channel. This includes underfills.",
+ "UMask": "0xf",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_REG",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS commands w/auto-pre : DRAM RD_CAS and WR_CAS Commands : Counts the total number or DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with explicit Precharge. AutoPre is only used in systems that are using closed page policy. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands.",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_PRE_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM read CAS commands issued (does not include underfills)",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_REG",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM Read CAS commands issued on this channel. This includes both regular RD CAS commands as well as those with implicit Precharge. We do not filter based on major mode, as RD_CAS is not issued during WMM (with the exception of underfills).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM underfill read CAS commands issued",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.RD_UNDERFILL",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total of DRAM Read CAS commands issued due to an underfill",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "All DRAM write CAS commands issued",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "Counts the total number of DRAM Write CAS commands issued, w/ and w/o auto-pre, on this channel.",
+ "UMask": "0x30",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR_NONPRE",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/o auto-pre : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre",
+ "EventCode": "0x04",
+ "EventName": "UNC_M_CAS_COUNT.WR_PRE",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM RD_CAS and WR_CAS Commands. : DRAM WR_CAS commands w/ auto-pre : DRAM RD_CAS and WR_CAS Commands",
+ "UMask": "0x20",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Memory controller clock ticks",
+ "EventName": "UNC_M_CLOCKTICKS",
+ "PerPkg": "1",
+ "PublicDescription": "Clockticks of the integrated memory controller (IMC)",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Free running counter that increments for the Memory Controller",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_CLOCKTICKS_FREERUN",
+ "PerPkg": "1",
+ "PublicDescription": "UNC_M_CLOCKTICKS_FREERUN",
+ "UMask": "0x10",
+ "Unit": "imc_free_running"
+ },
+ {
+ "BriefDescription": "DRAM Precharge All Commands",
+ "EventCode": "0x44",
+ "EventName": "UNC_M_DRAM_PRE_ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge All Commands : Counts the number of times that the precharge all command was sent.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.HIGH",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.OPPORTUNISTIC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Number of DRAM Refreshes Issued",
+ "EventCode": "0x45",
+ "EventName": "UNC_M_DRAM_REFRESH.PANIC",
+ "PerPkg": "1",
+ "PublicDescription": "Number of DRAM Refreshes Issued : Counts the number of refreshes issued.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Half clockticks for IMC",
+ "EventCode": "0xff",
+ "EventName": "UNC_M_HCLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PARITY_ERRORS",
+ "EventCode": "0x2c",
+ "EventName": "UNC_M_PARITY_ERRORS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.RD",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.RD",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.TOTAL",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.TOTAL",
+ "PerPkg": "1",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "UNC_M_PCLS.WR",
+ "EventCode": "0xA0",
+ "EventName": "UNC_M_PCLS.WR",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles where DRAM ranks are in power down (CKE) mode",
+ "EventCode": "0x85",
+ "EventName": "UNC_M_POWER_CHANNEL_PPD",
+ "MetricExpr": "(UNC_M_POWER_CHANNEL_PPD / UNC_M_CLOCKTICKS) * 100",
+ "MetricName": "power_channel_ppd",
+ "PerPkg": "1",
+ "PublicDescription": "Channel PPD Cycles : Number of cycles when all the ranks in the channel are in PPD mode. If IBT=off is enabled, then this can be used to count those cycles. If it is not enabled, then this can count the number of cycles when that could have been taken advantage of.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_0",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_1",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_2",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "CKE_ON_CYCLES by Rank : DIMM ID",
+ "EventCode": "0x47",
+ "EventName": "UNC_M_POWER_CKE_CYCLES.LOW_3",
+ "PerPkg": "1",
+ "PublicDescription": "CKE_ON_CYCLES by Rank : DIMM ID : Number of cycles spent in CKE ON mode. The filter allows you to select a rank to monitor. If multiple ranks are in CKE ON mode at one time, the counter will ONLY increment by one rather than doing accumulation. Multiple counters will need to be used to track multiple ranks simultaneously. There is no distinction between the different CKE modes (APD, PPDS, PPDF). This can be determined based on the system programming. These events should commonly be used with Invert to get the number of cycles in power saving mode. Edge Detect is also useful here. Make sure that you do NOT use Invert with Edge Detect (this just confuses the system and is not necessary).",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x86",
+ "EventName": "UNC_M_POWER_CRIT_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Cycles Memory is in self refresh power mode",
+ "EventCode": "0x43",
+ "EventName": "UNC_M_POWER_SELF_REFRESH",
+ "MetricExpr": "(UNC_M_POWER_SELF_REFRESH / UNC_M_CLOCKTICKS) * 100",
+ "MetricName": "power_self_refresh",
+ "PerPkg": "1",
+ "PublicDescription": "Clock-Enabled Self-Refresh : Counts the number of cycles when the iMC is in self-refresh and the iMC still has a clock. This happens in some package C-states. For example, the PCU may ask the iMC to enter self-refresh even though some of the cores are still processing. One use of this is for Monroe technology. Self-refresh is required during package C3 and C6, but there is no clock in the iMC at this time, so it is not possible to count these cases.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT0",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1. : Thermal throttling is performed per DIMM. We support 3 DIMMs per channel. This ID allows us to filter by ID.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Throttle Cycles for Rank 0",
+ "EventCode": "0x46",
+ "EventName": "UNC_M_POWER_THROTTLE_CYCLES.SLOT1",
+ "PerPkg": "1",
+ "PublicDescription": "Throttle Cycles for Rank 0 : Counts the number of cycles while the iMC is being throttled by either thermal constraints or by the PCU throttling. It is not possible to distinguish between the two. This can be filtered by rank. If multiple ranks are selected and are being throttled at the same time, the counter will only increment by 1.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands.",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.ALL",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Counts the number of DRAM Precharge commands sent on this channel.",
+ "UMask": "0x1c",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charges due to page misses",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.PAGE_MISS",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to page miss : Counts the number of DRAM Precharge commands sent on this channel. : Pages Misses are due to precharges from bank scheduler (rd/wr requests)",
+ "UMask": "0xc",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "DRAM Precharge commands. : Precharge due to page table",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.PGT",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to page table : Counts the number of DRAM Precharge commands sent on this channel. : Prechages from Page Table",
+ "UMask": "0x10",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charge for reads",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.RD",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to read : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from read bank scheduler",
+ "UMask": "0x4",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Pre-charge for writes",
+ "EventCode": "0x02",
+ "EventName": "UNC_M_PRE_COUNT.WR",
+ "PerPkg": "1",
+ "PublicDescription": "DRAM Precharge commands. : Precharge due to write : Counts the number of DRAM Precharge commands sent on this channel. : Precharge from write bank scheduler",
+ "UMask": "0x8",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Full",
+ "EventCode": "0x19",
+ "EventName": "UNC_M_RDB_FULL",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Inserts",
+ "EventCode": "0x17",
+ "EventName": "UNC_M_RDB_INSERTS",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Not Empty",
+ "EventCode": "0x18",
+ "EventName": "UNC_M_RDB_NOT_EMPTY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Data Buffer Occupancy",
+ "EventCode": "0x1A",
+ "EventName": "UNC_M_RDB_OCCUPANCY",
+ "PerPkg": "1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "EventCode": "0x12",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Full Cycles",
+ "EventCode": "0x15",
+ "EventName": "UNC_M_RPQ_CYCLES_FULL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Full Cycles : Counts the number of cycles when the Read Pending Queue is full. When the RPQ is full, the HA will not be able to issue any additional read requests into the iMC. This count should be similar count in the HA which tracks the number of cycles that the HA has no RPQ credits, just somewhat smaller to account for the credit return overhead. We generally do not expect to see RPQ become full except for potentially during Write Major Mode or while running with slow DRAM. This event only tracks non-ISOC queue entries.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Not Empty",
+ "EventCode": "0x11",
+ "EventName": "UNC_M_RPQ_CYCLES_NE.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Not Empty : Counts the number of cycles that the Read Pending Queue is not empty. This can then be used to calculate the average occupancy (in conjunction with the Read Pending Queue Occupancy count). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This filter is to be used in conjunction with the occupancy filter so that one can correctly track the average occupancies for schedulable entries and scheduled requests.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Allocations",
+ "EventCode": "0x10",
+ "EventName": "UNC_M_RPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Allocations : Counts the number of allocations into the Read Pending Queue. This queue is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory. This includes both ISOCH and non-ISOCH requests.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x80",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Read Pending Queue Occupancy",
+ "EventCode": "0x81",
+ "EventName": "UNC_M_RPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Read Pending Queue Occupancy : Accumulates the occupancies of the Read Pending Queue each cycle. This can then be used to calculate both the average occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The RPQ is used to schedule reads out to the memory controller and to track the requests. Requests allocate into the RPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after the CAS command has been issued to memory.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x22",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Full Cycles : Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Full Cycles",
+ "EventCode": "0x16",
+ "EventName": "UNC_M_WPQ_CYCLES_FULL_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Full Cycles : Counts the number of cycles when the Write Pending Queue is full. When the WPQ is full, the HA will not be able to issue any additional write requests into the iMC. This count should be similar count in the CHA which tracks the number of cycles that the CHA has no WPQ credits, just somewhat smaller to account for the credit return overhead.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Not Empty : Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Not Empty",
+ "EventCode": "0x21",
+ "EventName": "UNC_M_WPQ_CYCLES_NE.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Not Empty : Counts the number of cycles that the Write Pending Queue is not empty. This can then be used to calculate the average queue occupancy (in conjunction with the WPQ Occupancy Accumulation count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Allocations",
+ "EventCode": "0x20",
+ "EventName": "UNC_M_WPQ_INSERTS.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Allocations : Counts the number of allocations into the Write Pending Queue. This can then be used to calculate the average queuing latency (in conjunction with the WPQ occupancy count). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the CHA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x82",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue Occupancy",
+ "EventCode": "0x83",
+ "EventName": "UNC_M_WPQ_OCCUPANCY_PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue Occupancy : Accumulates the occupancies of the Write Pending Queue each cycle. This can then be used to calculate both the average queue occupancy (in conjunction with the number of cycles not empty) and the average latency (in conjunction with the number of allocations). The WPQ is used to schedule write out to the memory controller and to track the writes. Requests allocate into the WPQ soon after they enter the memory controller, and need credits for an entry in this buffer before being sent from the HA to the iMC. They deallocate after being issued to DRAM. Write requests themselves are able to complete (from the perspective of the rest of the system) as soon they have posted to the iMC. This is not to be confused with actually performing the write to DRAM. Therefore, the average latency for this queue is actually not useful for deconstruction intermediate write latencies. So, we provide filtering based on if the request has posted or not. By using the not posted filter, we can track how long writes spent in the iMC before completions were sent to the HA. The posted filter, on the other hand, provides information about how much queueing is actually happenning in the iMC for writes before they are actually issued to memory. High average occupancies will generally coincide with high write major mode counts.",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x23",
+ "EventName": "UNC_M_WPQ_READ_HIT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT.PCH0",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x1",
+ "Unit": "iMC"
+ },
+ {
+ "BriefDescription": "Write Pending Queue CAM Match",
+ "EventCode": "0x24",
+ "EventName": "UNC_M_WPQ_WRITE_HIT.PCH1",
+ "PerPkg": "1",
+ "PublicDescription": "Write Pending Queue CAM Match : Counts the number of times a request hits in the WPQ (write-pending queue). The iMC allows writes and reads to pass up other writes to different addresses. Before a read or a write is issued, it will first CAM the WPQ to see if there is a write pending to that address. When reads hit, they are able to directly pull their data from the WPQ instead of going to memory. Writes that hit will overwrite the existing data. Partial writes that hit will not need to do underfill reads and will simply update their relevant sections.",
+ "UMask": "0x2",
+ "Unit": "iMC"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json
new file mode 100644
index 000000000000..27fc155f1223
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/uncore-power.json
@@ -0,0 +1,203 @@
+[
+ {
+ "BriefDescription": "Clockticks of the power control unit (PCU)",
+ "EventName": "UNC_P_CLOCKTICKS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_CORE_TRANSITION_CYCLES",
+ "EventCode": "0x60",
+ "EventName": "UNC_P_CORE_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_DEMOTIONS",
+ "EventCode": "0x30",
+ "EventName": "UNC_P_DEMOTIONS",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 0 Cycles",
+ "EventCode": "0x75",
+ "EventName": "UNC_P_FIVR_PS_PS0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 0 Cycles : Cycles spent in phase-shedding power state 0",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 1 Cycles",
+ "EventCode": "0x76",
+ "EventName": "UNC_P_FIVR_PS_PS1_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 1 Cycles : Cycles spent in phase-shedding power state 1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 2 Cycles",
+ "EventCode": "0x77",
+ "EventName": "UNC_P_FIVR_PS_PS2_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 2 Cycles : Cycles spent in phase-shedding power state 2",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Phase Shed 3 Cycles",
+ "EventCode": "0x78",
+ "EventName": "UNC_P_FIVR_PS_PS3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Phase Shed 3 Cycles : Cycles spent in phase-shedding power state 3",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX256 Frequency Clipping",
+ "EventCode": "0x49",
+ "EventName": "UNC_P_FREQ_CLIP_AVX256",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "AVX512 Frequency Clipping",
+ "EventCode": "0x4a",
+ "EventName": "UNC_P_FREQ_CLIP_AVX512",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Thermal Strongest Upper Limit Cycles",
+ "EventCode": "0x04",
+ "EventName": "UNC_P_FREQ_MAX_LIMIT_THERMAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Thermal Strongest Upper Limit Cycles : Number of cycles any frequency is reduced due to a thermal limit. Count only if throttling is occurring.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Power Strongest Upper Limit Cycles",
+ "EventCode": "0x05",
+ "EventName": "UNC_P_FREQ_MAX_POWER_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Power Strongest Upper Limit Cycles : Counts the number of cycles when power is the upper limit on frequency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "IO P Limit Strongest Lower Limit Cycles",
+ "EventCode": "0x73",
+ "EventName": "UNC_P_FREQ_MIN_IO_P_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "IO P Limit Strongest Lower Limit Cycles : Counts the number of cycles when IO P Limit is preventing us from dropping the frequency lower. This algorithm monitors the needs to the IO subsystem on both local and remote sockets and will maintain a frequency high enough to maintain good IO BW. This is necessary for when all the IA cores on a socket are idle but a user still would like to maintain high IO Bandwidth.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Cycles spent changing Frequency",
+ "EventCode": "0x74",
+ "EventName": "UNC_P_FREQ_TRANS_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Cycles spent changing Frequency : Counts the number of cycles when the system is changing frequency. This can not be filtered by thread ID. One can also use it with the occupancy counter that monitors number of threads in C0 to estimate the performance impact that frequency transitions had on the system.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Memory Phase Shedding Cycles",
+ "EventCode": "0x2F",
+ "EventName": "UNC_P_MEMORY_PHASE_SHEDDING_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Memory Phase Shedding Cycles : Counts the number of cycles that the PCU has triggered memory phase shedding. This is a mode that can be run in the iMC physicals that saves power at the expense of additional latency.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C0",
+ "EventCode": "0x2A",
+ "EventName": "UNC_P_PKG_RESIDENCY_C0_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C0 : Counts the number of cycles when the package was in C0. This event can be used in conjunction with edge detect to count C0 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C2E",
+ "EventCode": "0x2B",
+ "EventName": "UNC_P_PKG_RESIDENCY_C2E_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C2E : Counts the number of cycles when the package was in C2E. This event can be used in conjunction with edge detect to count C2E entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C3",
+ "EventCode": "0x2C",
+ "EventName": "UNC_P_PKG_RESIDENCY_C3_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C3 : Counts the number of cycles when the package was in C3. This event can be used in conjunction with edge detect to count C3 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Package C State Residency - C6",
+ "EventCode": "0x2D",
+ "EventName": "UNC_P_PKG_RESIDENCY_C6_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Package C State Residency - C6 : Counts the number of cycles when the package was in C6. This event can be used in conjunction with edge detect to count C6 entrances (or exits using invert). Residency events do not include transition times.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "EventCode": "0x06",
+ "EventName": "UNC_P_PMAX_THROTTLED_CYCLES",
+ "PerPkg": "1",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C0 and C1",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C0",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C-State : C0 and C1 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C3",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C3",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C-State : C3 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Number of cores in C-State : C6 and C7",
+ "EventCode": "0x80",
+ "EventName": "UNC_P_POWER_STATE_OCCUPANCY.CORES_C6",
+ "PerPkg": "1",
+ "PublicDescription": "Number of cores in C-State : C6 and C7 : This is an occupancy event that tracks the number of cores that are in the chosen C-State. It can be used by itself to get the average number of cores in that C-state with threshholding to generate histograms, or with other PCU events and occupancy triggering to capture other details.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "External Prochot",
+ "EventCode": "0x0A",
+ "EventName": "UNC_P_PROCHOT_EXTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "External Prochot : Counts the number of cycles that we are in external PROCHOT mode. This mode is triggered when a sensor off the die determines that something off-die (like DRAM) is too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Internal Prochot",
+ "EventCode": "0x09",
+ "EventName": "UNC_P_PROCHOT_INTERNAL_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Internal Prochot : Counts the number of cycles that we are in Internal PROCHOT mode. This mode is triggered when a sensor on the die determines that we are too hot and must throttle to avoid damaging the chip.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "Total Core C State Transition Cycles",
+ "EventCode": "0x72",
+ "EventName": "UNC_P_TOTAL_TRANSITION_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "Total Core C State Transition Cycles : Number of cycles spent performing core C state transitions across all cores.",
+ "Unit": "PCU"
+ },
+ {
+ "BriefDescription": "VR Hot",
+ "EventCode": "0x42",
+ "EventName": "UNC_P_VR_HOT_CYCLES",
+ "PerPkg": "1",
+ "PublicDescription": "VR Hot : Number of cycles that a CPU SVID VR is hot. Does not cover DRAM VRs",
+ "Unit": "PCU"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json b/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
new file mode 100644
index 000000000000..cabe29e70e79
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/snowridgex/virtual-memory.json
@@ -0,0 +1,247 @@
+[
+ {
+ "BriefDescription": "Counts the number of page walks due to loads that miss the PDE (Page Directory Entry) cache.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "200003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to a demand load that did not start a page walk. Account for all page sizes. Will result in a DTLB write from STLB.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to any page size.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 1G page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1GB pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 2M or 4M page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to load DTLB misses to a 4K page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts the number of page walks completed due to loads (including SW prefetches) whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for demand loads every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks due to stores that miss the PDE (Page Directory Entry) cache.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to stores that did not start a page walk. Account for all pages sizes. Will result in a DTLB write from STLB.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to any page size.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 1G page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 2M or 4M page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to store DTLB misses to a 4K page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts the number of page walks completed due to stores whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for stores every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk). Includes EPT-walk intervals.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of Extended Page Directory Entry hits.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.EPDE_HIT",
+ "PublicDescription": "Counts the number of Extended Page Directory Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of Extended Page Directory Entry misses.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.EPDE_MISS",
+ "PublicDescription": "Counts the number Extended Page Directory Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of Extended Page Directory Pointer Entry hits.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.EPDPE_HIT",
+ "PublicDescription": "Counts the number Extended Page Directory Pointer Entry hits. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of Extended Page Directory Pointer Entry misses.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.EPDPE_MISS",
+ "PublicDescription": "Counts the number Extended Page Directory Pointer Entry misses. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle.",
+ "EventCode": "0x4f",
+ "EventName": "EPT.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an Extended Page table walk including GTLB hits per cycle. The Extended Page Directory cache is used by Virtual Machine operating systems while the guest operating systems use the standard TLB caches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of times there was an ITLB miss and a new translation was filled into the ITLB.",
+ "EventCode": "0x81",
+ "EventName": "ITLB.FILLS",
+ "PublicDescription": "Counts the number of times the machine was unable to find a translation in the Instruction Translation Lookaside Buffer (ITLB) and a new translation was filled into the ITLB. The event is speculative in nature, but will not count translations (page walks) that are begun and not finished, or translations that are finished but not filled into the ITLB.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks due to an instruction fetch that miss the PDE (Page Directory Entry) cache.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.PDE_CACHE_MISS",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Counts the number of first level TLB misses but second level hits due to an instruction fetch that did not start a page walk. Account for all pages sizes. Will result in an ITLB write from STLB.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to any page size.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to any page size. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 1G page.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_1G",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 1G pages. Includes page walks that page fault.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 2M or 4M page.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 2M or 4M pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks completed due to instruction fetch misses to a 4K page.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts the number of page walks completed due to instruction fetches whose address translations missed in all Translation Lookaside Buffer (TLB) levels and were mapped to 4K pages. Includes page walks that page fault.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding in the page miss handler (PMH) for instruction fetches every cycle. A page walk is outstanding from start till PMH becomes idle again (ready to serve next walk).",
+ "SampleAfterValue": "200003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts the number of retired loads that are blocked due to a first level TLB miss.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.DTLB_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts the number of memory uops retired that missed in the second level TLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x13"
+ },
+ {
+ "BriefDescription": "Counts the number of load uops retired that miss in the second Level TLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_LOADS",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Counts the number of store uops retired that miss in the second level TLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_UOPS_RETIRED.DTLB_MISS_STORES",
+ "PEBS": "1",
+ "SampleAfterValue": "200003",
+ "UMask": "0x12"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/cache.json b/tools/perf/pmu-events/arch/x86/tigerlake/cache.json
new file mode 100644
index 000000000000..738249a6f488
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/cache.json
@@ -0,0 +1,553 @@
+[
+ {
+ "BriefDescription": "Counts the number of cache lines replaced in L1 data cache.",
+ "EventCode": "0x51",
+ "EventName": "L1D.REPLACEMENT",
+ "PublicDescription": "Counts L1D data line replacements including opportunistic replacements, and replacements that require stall-for-replace or block-for-replace.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.FB_FULL_PERIODS",
+ "PublicDescription": "Counts number of phases a demand request has waited due to L1D Fill Buffer (FB) unavailability. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of cycles a demand request has waited due to L1D due to lack of L2 resources.",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.L2_STALL",
+ "PublicDescription": "Counts number of cycles a demand request has waited due to L1D due to lack of L2 resources. Demand requests include cacheable/uncacheable demand load, store, lock or SW prefetch accesses.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of L1D misses that are outstanding",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING",
+ "PublicDescription": "Counts number of L1D misses that are outstanding in each cycle, that is each cycle the number of Fill Buffers (FB) outstanding required by Demand Reads. FB either is held by demand loads, or it is held by non-demand loads and gets hit at least once by demand. The valid outstanding interval is defined until the FB deallocation by one of the following ways: from FB allocation, if FB is allocated by demand from the demand Hit FB, if it is allocated by hardware or software prefetch. Note: In the L1D, a Demand Read contains cacheable or noncacheable demand loads, including ones causing cache-line splits and reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with L1D load Misses outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0x48",
+ "EventName": "L1D_PEND_MISS.PENDING_CYCLES",
+ "PublicDescription": "Counts duration of L1D miss outstanding in cycles.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 cache lines filling L2",
+ "EventCode": "0xf1",
+ "EventName": "L2_LINES_IN.ALL",
+ "PublicDescription": "Counts the number of L2 cache lines filling the L2. Counting does not cover rejects.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1f"
+ },
+ {
+ "BriefDescription": "Modified cache lines that are evicted by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0xf2",
+ "EventName": "L2_LINES_OUT.NON_SILENT",
+ "PublicDescription": "Counts the number of lines that are evicted by L2 cache when triggered by an L2 cache fill. Those lines are in Modified state. Modified lines are written back to L3",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Non-modified cache lines that are silently dropped by L2 cache when triggered by an L2 cache fill.",
+ "EventCode": "0xf2",
+ "EventName": "L2_LINES_OUT.SILENT",
+ "PublicDescription": "Counts the number of lines that are silently dropped by L2 cache when triggered by an L2 cache fill. These lines are typically in Shared or Exclusive state. A non-threaded event.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "L2 code requests",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_CODE_RD",
+ "PublicDescription": "Counts the total number of L2 code requests.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe4"
+ },
+ {
+ "BriefDescription": "Demand Data Read access L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_DEMAND_DATA_RD",
+ "PublicDescription": "Counts Demand Data Read requests accessing the L2 cache. These requests may hit or miss L2 cache. True-miss exclude misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe1"
+ },
+ {
+ "BriefDescription": "RFO requests to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.ALL_RFO",
+ "PublicDescription": "Counts the total number of RFO (read for ownership) requests to L2 cache. L2 RFO requests include both L1D demand RFO misses as well as L1D RFO prefetches.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xe2"
+ },
+ {
+ "BriefDescription": "L2 cache hits when fetching instructions, code reads.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_HIT",
+ "PublicDescription": "Counts L2 cache hits when fetching instructions, code reads.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc4"
+ },
+ {
+ "BriefDescription": "L2 cache misses when fetching instructions",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.CODE_RD_MISS",
+ "PublicDescription": "Counts L2 cache misses when fetching instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x24"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_HIT",
+ "PublicDescription": "Counts the number of demand Data Read requests initiated by load instructions that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc1"
+ },
+ {
+ "BriefDescription": "Demand Data Read miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.DEMAND_DATA_RD_MISS",
+ "PublicDescription": "Counts demand Data Read requests with true-miss in the L2 cache. True-miss excludes misses that were merged with ongoing L2 misses. An access is counted once.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Read requests with true-miss in L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.MISS",
+ "PublicDescription": "Counts read requests of any type with true-miss in the L2 cache. True-miss excludes L2 misses that were merged with ongoing L2 misses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x3f"
+ },
+ {
+ "BriefDescription": "All accesses to L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.REFERENCES",
+ "PublicDescription": "Counts all requests that were hit or true misses in L2 cache. True-miss excludes misses that were merged with ongoing L2 misses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xff"
+ },
+ {
+ "BriefDescription": "RFO requests that hit L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_HIT",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that hit L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc2"
+ },
+ {
+ "BriefDescription": "RFO requests that miss L2 cache",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.RFO_MISS",
+ "PublicDescription": "Counts the RFO (Read-for-Ownership) requests that miss L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x22"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that hit L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_HIT",
+ "PublicDescription": "Counts Software prefetch requests that hit the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0xc8"
+ },
+ {
+ "BriefDescription": "SW prefetch requests that miss L2 cache.",
+ "EventCode": "0x24",
+ "EventName": "L2_RQSTS.SWPF_MISS",
+ "PublicDescription": "Counts Software prefetch requests that miss the L2 cache. Accounts for PREFETCHNTA and PREFETCHT0/1/2 instructions when FB is not full.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x28"
+ },
+ {
+ "BriefDescription": "L2 writebacks that access L2 cache",
+ "EventCode": "0xf0",
+ "EventName": "L2_TRANS.L2_WB",
+ "PublicDescription": "Counts L2 writebacks that access L2 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles when L1D is locked",
+ "EventCode": "0x63",
+ "EventName": "LOCK_CYCLES.CACHE_LOCK_DURATION",
+ "PublicDescription": "This event counts the number of cycles when the L1D is locked. It is a superset of the 0x1 mask (BUS_LOCK_CLOCKS.BUS_LOCK_DURATION).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core-originated cacheable requests that missed L3 (Except hardware prefetches to the L3)",
+ "EventCode": "0x2e",
+ "EventName": "LONGEST_LAT_CACHE.MISS",
+ "PublicDescription": "Counts core-originated cacheable requests that miss the L3 cache (Longest Latency cache). Requests include data and code reads, Reads-for-Ownership (RFOs), speculative accesses and hardware prefetches to the L1 and L2. It does not include hardware prefetches to the L3, and may not count other types of requests to the L3.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired load instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired load instructions. This event accounts for SW prefetch instructions of PREFETCHNTA or PREFETCHT0/1/2 or PREFETCHW.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x81"
+ },
+ {
+ "BriefDescription": "Retired store instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ALL_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired store instructions.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x82"
+ },
+ {
+ "BriefDescription": "All retired memory instructions.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired memory instructions - loads and stores.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x83"
+ },
+ {
+ "BriefDescription": "Retired load instructions with locked access.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.LOCK_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with locked access.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Retired load instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x41"
+ },
+ {
+ "BriefDescription": "Retired store instructions that split across a cacheline boundary.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.SPLIT_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired store instructions that split across a cacheline boundary.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x42"
+ },
+ {
+ "BriefDescription": "Retired load instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_LOADS",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired load instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Retired store instructions that miss the STLB.",
+ "Data_LA": "1",
+ "EventCode": "0xd0",
+ "EventName": "MEM_INST_RETIRED.STLB_MISS_STORES",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired store instructions that (start a) miss in the 2nd-level TLB (STLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x12"
+ },
+ {
+ "BriefDescription": "Snoop hit a modified(HITM) or clean line(HIT_W_FWD) in another on-pkg core which forwarded the data back due to a retired load instruction.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions where a cross-core snoop hit in another cores caches on this socket, the data was forwarded back to the requesting core as the data was modified (SNOOP_HITM) or the L3 did not have the data(SNOOP_HIT_WITH_FWD).",
+ "SampleAfterValue": "20011",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts the retired load instructions whose data sources were L3 hit and cross-core snoop missed in on-pkg core cache.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions whose data sources were hits in L3 without snoops required",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NONE",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions whose data sources were hits in L3 without snoops required.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Snoop hit without forwarding in another on-pkg core due to a retired load instruction, data was supplied by the L3.",
+ "Data_LA": "1",
+ "EventCode": "0xd2",
+ "EventName": "MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions in which the L3 supplied the data and a cross-core snoop hit in another cores caches on this socket but that other core did not forward the data back (SNOOP_HIT_NO_FWD).",
+ "SampleAfterValue": "20011",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of completed demand load requests that missed the L1, but hit the FB(fill buffer), because a preceding miss to the same cacheline initiated the line to be brought into L1, but data is not yet ready in L1.",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.FB_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop was load missed in L1 but hit FB (Fill Buffers) due to preceding miss to the same cache line with data not ready.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L1 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L1 data cache. This event includes all SW prefetches and lock instructions regardless of the data source.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L1 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L1_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L1 cache.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L2 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with L2 cache hits as data sources.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L2 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L2_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions missed L2 cache as data sources.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Retired load instructions with L3 cache hits as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_HIT",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that hit in the L3 cache.",
+ "SampleAfterValue": "100021",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Retired load instructions missed L3 cache as data sources",
+ "Data_LA": "1",
+ "EventCode": "0xd1",
+ "EventName": "MEM_LOAD_RETIRED.L3_MISS",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired load instructions with at least one uop that missed in the L3 cache.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts demand data reads that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x8003C0001",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts demand reads for ownership (RFO) requests and software prefetches for exclusive ownership (PREFETCHW) that hit a cacheline in the L3 where a snoop hit in another cores caches, data forwarding is required as the data is modified.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10003C0002",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand and prefetch data reads",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.ALL_DATA_RD",
+ "PublicDescription": "Counts the demand and prefetch data reads. All Core Data Reads include cacheable 'Demands' and L2 prefetchers (not L3 prefetchers). Counting also covers reads due to page walks resulted from any request type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Any memory transaction that reached the SQ.",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.ALL_REQUESTS",
+ "PublicDescription": "Counts memory transactions reached the super queue including requests initiated by the core, all L3 prefetches, page walks, etc..",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests sent to uncore",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the Demand Data Read requests sent to uncore. Use it in conjunction with OFFCORE_REQUESTS_OUTSTANDING to determine average latency in the uncore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand RFO requests including regular RFOs, locks, ItoM",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.DEMAND_RFO",
+ "PublicDescription": "Counts the demand RFO (read for ownership) requests including regular RFOs, locks, ItoM.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Offcore outstanding cacheable Core Data Read transactions in SuperQueue (SQ), queue to uncore",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD",
+ "PublicDescription": "Counts the number of offcore outstanding cacheable Core Data Read transactions in the super queue every cycle. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles when offcore outstanding cacheable Core Data Read transactions are present in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding cacheable Core Data Read transactions are present in the super queue. A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation). See corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles when offcore outstanding Demand Data Read transactions are present in SuperQueue (SQ), queue to uncore",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_DATA_RD",
+ "PublicDescription": "Counts cycles when offcore outstanding Demand Data Read transactions are present in the super queue (SQ). A transaction is considered to be in the Offcore outstanding state between L2 miss and transaction completion sent to requestor (SQ de-allocation).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with offcore outstanding demand rfo reads transactions in SuperQueue (SQ), queue to uncore.",
+ "CounterMask": "1",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO",
+ "PublicDescription": "Counts the number of offcore outstanding demand rfo Reads transactions in the super queue every cycle. The 'Offcore outstanding' state of the transaction lasts from the L2 miss until the sending transaction completion to requestor (SQ deallocation). See the corresponding Umask under OFFCORE_REQUESTS.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Demand Data Read transactions pending for off-core. Highly correlated.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD",
+ "PublicDescription": "Counts the number of off-core outstanding Demand Data Read transactions every cycle. A transaction is considered to be in the Off-core outstanding state between L2 cache miss and data-return to the core.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles with at least 6 offcore outstanding Demand Data Read transactions in uncore queue.",
+ "CounterMask": "6",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD_GE_6",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Store Read transactions pending for off-core. Highly correlated.",
+ "EventCode": "0x60",
+ "EventName": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_RFO",
+ "PublicDescription": "Counts the number of off-core outstanding read-for-ownership (RFO) store transactions every cycle. An RFO transaction is considered to be in the Off-core outstanding state between L2 cache miss and transaction completion.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles the superQ cannot take any more entries.",
+ "EventCode": "0xf4",
+ "EventName": "SQ_MISC.SQ_FULL",
+ "PublicDescription": "Counts the cycles for which the thread is active and the superQ cannot take any more entries.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHNTA instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.NTA",
+ "PublicDescription": "Counts the number of PREFETCHNTA instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHW instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.PREFETCHW",
+ "PublicDescription": "Counts the number of PREFETCHW instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT0 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T0",
+ "PublicDescription": "Counts the number of PREFETCHT0 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "EventCode": "0x32",
+ "EventName": "SW_PREFETCH_ACCESS.T1_T2",
+ "PublicDescription": "Counts the number of PREFETCHT1 or PREFETCHT2 instructions executed.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json b/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json
new file mode 100644
index 000000000000..63b5b56d1ed0
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/floating-point.json
@@ -0,0 +1,105 @@
+[
+ {
+ "BriefDescription": "Counts all microcode FP assists.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.FP",
+ "PublicDescription": "Counts all microcode Floating Point assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 4 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RCP DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 128-bit packed single and 256-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and packed double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.4_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 128-bit packed single precision and 256-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 or/and 4 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point and packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX RCP14 RSQRT14 SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational 512-bit packed single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 16 computation operations, one for each element. Applies to SSE* and AVX* packed single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT14 RCP14 FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision FP instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, 1 for each element. Applies to SSE* and AVX* packed single precision and double precision FP instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.8_FLOPS",
+ "PublicDescription": "Number of SSE/AVX computational 256-bit packed single precision and 512-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 8 computation operations, one for each element. Applies to SSE* and AVX* packed single precision and double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT RSQRT RSQRT14 RCP RCP14 DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x60"
+ },
+ {
+ "BriefDescription": "Number of SSE/AVX computational scalar floating-point instructions retired; some instructions will count twice as noted below. Applies to SSE* and AVX* scalar, double and single precision floating-point: ADD SUB MUL DIV MIN MAX RCP14 RSQRT14 RANGE SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform multiple calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision and double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar double precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "PublicDescription": "Number of SSE/AVX computational scalar single precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 1 computational operation. Applies to SSE* and AVX* scalar single precision floating-point instructions: ADD SUB MUL DIV MIN MAX SQRT RSQRT RCP FM(N)ADD/SUB. FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element. The DAZ and FTZ flags in the MXCSR register need to be set when using these events.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of any Vector retired FP arithmetic instructions",
+ "EventCode": "0xc7",
+ "EventName": "FP_ARITH_INST_RETIRED.VECTOR",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xfc"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json b/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
new file mode 100644
index 000000000000..23b8528590b3
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/frontend.json
@@ -0,0 +1,353 @@
+[
+ {
+ "BriefDescription": "Counts the total number when the front end is resteered, mainly when the BPU cannot provide a correct prediction and this is corrected by other branch handling mechanisms at the front end.",
+ "EventCode": "0xe6",
+ "EventName": "BACLEARS.ANY",
+ "PublicDescription": "Counts the number of times the front-end is resteered when it finds a branch instruction in a fetch line. This occurs for the first time a branch instruction is fetched or when the branch is not tracked by the BPU (Branch Prediction Unit) anymore.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Decode Stream Buffer (DSB)-to-MITE transitions count.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.COUNT",
+ "PublicDescription": "Counts the number of Decode Stream Buffer (DSB a.k.a. Uop Cache)-to-MITE speculative transitions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "DSB-to-MITE switch true penalty cycles.",
+ "EventCode": "0xab",
+ "EventName": "DSB2MITE_SWITCHES.PENALTY_CYCLES",
+ "PublicDescription": "Decode Stream Buffer (DSB) is a Uop-cache that holds translations of previously fetched instructions that were decoded by the legacy x86 decode pipeline (MITE). This event counts fetch penalty cycles when a transition occurs from DSB to MITE.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x1",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced DSB (Decode stream buffer i.e. the decoded instruction-cache) miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced a critical DSB miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.DSB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x11",
+ "PEBS": "1",
+ "PublicDescription": "Number of retired Instructions that experienced a critical DSB (Decode stream buffer i.e. the decoded instruction-cache) miss. Critical means stalls were exposed to the back-end as a result of the DSB miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced iTLB true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.ITLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x14",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced iTLB (Instruction TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L1 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L1I_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x12",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L1 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced Instruction L2 Cache true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.L2_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x13",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions who experienced Instruction L2 Cache true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 1 cycle",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500106",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 1 cycle which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_128",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x508006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 128 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 16 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_16",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x501006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 16 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions after front-end starvation of at least 2 cycles",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500206",
+ "PEBS": "1",
+ "PublicDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of at least 2 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_256",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x510006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 256 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end had at least 1 bubble-slot for a period of 2 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x100206",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after the front-end had at least 1 bubble-slot for a period of 2 cycles. A bubble-slot is an empty issue-pipeline slot while there was no RAT stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 32 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_32",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x502006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 32 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_4",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500406",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 4 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_512",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x520006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 512 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_64",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x504006",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 64 cycles which was not interrupted by a back-end stall.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired instructions that are fetched after an interval where the front-end delivered no uops for a period of 8 cycles which was not interrupted by a back-end stall.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.LATENCY_GE_8",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x500806",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired instructions that are delivered to the back-end after a front-end stall of at least 8 cycles. During this period the front-end delivered no uops.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Retired Instructions who experienced STLB (2nd level TLB) true miss.",
+ "EventCode": "0xc6",
+ "EventName": "FRONTEND_RETIRED.STLB_MISS",
+ "MSRIndex": "0x3F7",
+ "MSRValue": "0x15",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired Instructions that experienced STLB (2nd level TLB) true miss.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache miss.",
+ "EventCode": "0x80",
+ "EventName": "ICACHE_16B.IFDATA_STALL",
+ "PublicDescription": "Counts cycles where a code line fetch is stalled due to an L1 instruction cache miss. The legacy decode pipeline works at a 16 Byte granularity.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_HIT",
+ "PublicDescription": "Counts instruction fetch tag lookups that hit in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_MISS",
+ "PublicDescription": "Counts instruction fetch tag lookups that miss in the instruction cache (L1I). Counts at 64-byte cache-line granularity. Accounts for both cacheable and uncacheable accesses.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "EventCode": "0x83",
+ "EventName": "ICACHE_64B.IFTAG_STALL",
+ "PublicDescription": "Counts cycles where a code fetch is stalled due to L1 instruction cache tag miss.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles Decode Stream Buffer (DSB) is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles DSB is delivering optimal number of Uops",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.DSB_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the Decode Stream Buffer (DSB) path.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering any Uop",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_ANY",
+ "PublicDescription": "Counts the number of cycles uops were delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles MITE is delivering optimal number of Uops",
+ "CounterMask": "5",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_CYCLES_OK",
+ "PublicDescription": "Counts the number of cycles where optimal number of uops was delivered to the Instruction Decode Queue (IDQ) from the MITE (legacy decode pipeline) path. During these cycles uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Uops delivered to Instruction Decode Queue (IDQ) from MITE path",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MITE_UOPS",
+ "PublicDescription": "Counts the number of uops delivered to Instruction Decode Queue (IDQ) from the MITE path. This also means that uops are not being delivered from the Decode Stream Buffer (DSB).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles when uops are being delivered to IDQ while MS is busy",
+ "CounterMask": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_CYCLES_ANY",
+ "PublicDescription": "Counts cycles during which uops are being delivered to Instruction Decode Queue (IDQ) while the Microcode Sequencer (MS) is busy. Uops maybe initiated by Decode Stream Buffer (DSB) or MITE.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Number of switches from DSB or MITE to the MS",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_SWITCHES",
+ "PublicDescription": "Number of switches from DSB (Decode Stream Buffer) or MITE (legacy decode pipeline) to the Microcode Sequencer.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops delivered to IDQ while MS is busy",
+ "EventCode": "0x79",
+ "EventName": "IDQ.MS_UOPS",
+ "PublicDescription": "Counts the total number of uops delivered by the Microcode Sequencer (MS). Any instruction over 4 uops will be delivered by the MS. Some instructions such as transcendentals may additionally generate uops from the MS.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x30"
+ },
+ {
+ "BriefDescription": "Uops not delivered by IDQ when backend of the machine is not stalled",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CORE",
+ "PublicDescription": "Counts the number of uops not delivered to by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when no uops are not delivered by the IDQ when backend of the machine is not stalled",
+ "CounterMask": "5",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE",
+ "PublicDescription": "Counts the number of cycles when no uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when optimal number of uops was delivered to the back-end when the back-end is not stalled",
+ "CounterMask": "1",
+ "EventCode": "0x9c",
+ "EventName": "IDQ_UOPS_NOT_DELIVERED.CYCLES_FE_WAS_OK",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles when the optimal number of uops were delivered by the Instruction Decode Queue (IDQ) to the back-end of the pipeline when there was no back-end stalls. This event counts for one SMT thread in a given cycle.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/memory.json b/tools/perf/pmu-events/arch/x86/tigerlake/memory.json
new file mode 100644
index 000000000000..8848fcbcc35c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/memory.json
@@ -0,0 +1,218 @@
+[
+ {
+ "BriefDescription": "Execution stalls while L3 cache miss demand load is outstanding.",
+ "CounterMask": "6",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L3_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x6"
+ },
+ {
+ "BriefDescription": "Number of machine clears due to memory ordering conflicts.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.MEMORY_ORDERING",
+ "PublicDescription": "Counts the number of Machine Clears detected dye to memory ordering. Memory Ordering Machine Clears may apply when a memory read may not conform to the memory ordering rules of the x86 architecture",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_128",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x80",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 128 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "1009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_16",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x10",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 16 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "20011",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_256",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x100",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 256 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "503",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_32",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x20",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 32 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_4",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x4",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 4 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_512",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x200",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 512 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "101",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_64",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x40",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 64 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "2003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles.",
+ "Data_LA": "1",
+ "EventCode": "0xcd",
+ "EventName": "MEM_TRANS_RETIRED.LOAD_LATENCY_GT_8",
+ "MSRIndex": "0x3F6",
+ "MSRValue": "0x8",
+ "PEBS": "2",
+ "PublicDescription": "Counts randomly selected loads when the latency from first dispatch to completion is greater than 8 cycles. Reported latency may be longer than just the memory latency.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Demand Data Read requests who miss L3 cache",
+ "EventCode": "0xb0",
+ "EventName": "OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "PublicDescription": "Demand Data Read requests who miss L3 cache.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED",
+ "PublicDescription": "Counts the number of times RTM abort was triggered.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_EVENTS",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to none of the previous 4 categories (e.g. interrupt).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts)",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEM",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to various memory events (e.g. read/write capacity and conflicts).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to incompatible memory type",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_MEMTYPE",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to incompatible memory type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution aborted due to HLE-unfriendly instructions",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.ABORTED_UNFRIENDLY",
+ "PublicDescription": "Counts the number of times an RTM execution aborted due to HLE-unfriendly instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution successfully committed",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.COMMIT",
+ "PublicDescription": "Counts the number of times RTM commit succeeded.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an RTM execution started.",
+ "EventCode": "0xc9",
+ "EventName": "RTM_RETIRED.START",
+ "PublicDescription": "Counts the number of times we entered an RTM region. Does not count nested transactions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of times a class of instructions that may cause a transactional abort was executed inside a transactional region",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC2",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a vzeroupper instruction.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times an instruction execution caused the transactional nest count supported to be exceeded",
+ "EventCode": "0x5d",
+ "EventName": "TX_EXEC.MISC3",
+ "PublicDescription": "Counts Unfriendly TSX abort triggered by a nest count that is too deep.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional reads",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_READ",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional reads",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Speculatively counts the number of TSX aborts due to a data capacity limitation for transactional writes.",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CAPACITY_WRITE",
+ "PublicDescription": "Speculatively counts the number of Transactional Synchronization Extensions (TSX) aborts due to a data capacity limitation for transactional writes.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of times a transactional abort was signaled due to a data conflict on a transactionally accessed address",
+ "EventCode": "0x54",
+ "EventName": "TX_MEM.ABORT_CONFLICT",
+ "PublicDescription": "Counts the number of times a TSX line had a cache conflict.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/other.json b/tools/perf/pmu-events/arch/x86/tigerlake/other.json
new file mode 100644
index 000000000000..55f3048bcfa6
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/other.json
@@ -0,0 +1,35 @@
+[
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the Non-AVX turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL0_TURBO_LICENSE",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX2 turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL1_TURBO_LICENSE",
+ "PublicDescription": "Counts Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x18"
+ },
+ {
+ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.",
+ "EventCode": "0x28",
+ "EventName": "CORE_POWER.LVL2_TURBO_LICENSE",
+ "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.",
+ "SampleAfterValue": "200003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Counts streaming stores that have any type of response.",
+ "EventCode": "0xB7, 0xBB",
+ "EventName": "OCR.STREAMING_WR.ANY_RESPONSE",
+ "MSRIndex": "0x1a6,0x1a7",
+ "MSRValue": "0x10800",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
new file mode 100644
index 000000000000..a0aeeb801fd7
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/pipeline.json
@@ -0,0 +1,808 @@
+[
+ {
+ "BriefDescription": "Cycles when divide unit is busy executing divide or square root operations.",
+ "CounterMask": "1",
+ "EventCode": "0x14",
+ "EventName": "ARITH.DIVIDER_ACTIVE",
+ "PublicDescription": "Counts cycles when divide unit is busy executing divide or square root operations. Accounts for integer and floating-point operations.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x9"
+ },
+ {
+ "BriefDescription": "Number of occurrences where a microcode assist is invoked by hardware.",
+ "EventCode": "0xc1",
+ "EventName": "ASSISTS.ANY",
+ "PublicDescription": "Counts the number of occurrences where a microcode assist is invoked by hardware Examples include AD (page Access Dirty), FP and AVX related assists.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x7"
+ },
+ {
+ "BriefDescription": "All branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all branch instructions retired.",
+ "SampleAfterValue": "400009"
+ },
+ {
+ "BriefDescription": "Conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Not taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts not taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Taken conditional branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Far branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.FAR_BRANCH",
+ "PEBS": "1",
+ "PublicDescription": "Counts far branch instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Indirect near branch instructions retired (excluding returns)",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts near indirect branch instructions retired excluding returns. TSX abort is an indirect branch.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Direct and indirect near call instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts both direct and indirect near call instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Return instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_RETURN",
+ "PEBS": "1",
+ "PublicDescription": "Counts return instructions retired.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Taken branch instructions retired.",
+ "EventCode": "0xc4",
+ "EventName": "BR_INST_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken branch instructions retired.",
+ "SampleAfterValue": "400009",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "All mispredicted branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
+ "PEBS": "1",
+ "PublicDescription": "Counts all the retired branch instructions that were mispredicted by the processor. A branch misprediction occurs when the processor incorrectly predicts the destination of the branch. When the misprediction is discovered at execution, all the instructions executed in the wrong (speculative) path must be discarded, and the processor must start fetching from the correct path.",
+ "SampleAfterValue": "50021"
+ },
+ {
+ "BriefDescription": "Mispredicted conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND",
+ "PEBS": "1",
+ "PublicDescription": "Counts mispredicted conditional branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x11"
+ },
+ {
+ "BriefDescription": "Mispredicted non-taken conditional branch instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_NTAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of conditional branch instructions retired that were mispredicted and the branch direction was not taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "number of branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.COND_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts taken conditional mispredicted branch instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "All miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT",
+ "PEBS": "1",
+ "PublicDescription": "Counts all miss-predicted indirect branch instructions retired (excluding RETs. TSX aborts is considered indirect branch).",
+ "SampleAfterValue": "50021",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Mispredicted indirect CALL instructions retired.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.INDIRECT_CALL",
+ "PEBS": "1",
+ "PublicDescription": "Counts retired mispredicted indirect (near taken) CALL instructions, including both register and memory indirect.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of near branch instructions retired that were mispredicted and taken.",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.NEAR_TAKEN",
+ "PEBS": "1",
+ "PublicDescription": "Counts number of near branch instructions retired that were mispredicted and taken.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "This event counts the number of mispredicted ret instructions retired. Non PEBS",
+ "EventCode": "0xc5",
+ "EventName": "BR_MISP_RETIRED.RET",
+ "PEBS": "1",
+ "PublicDescription": "This is a non-precise version (that is, does not use PEBS) of the event that counts mispredicted return instructions retired.",
+ "SampleAfterValue": "50021",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0xec",
+ "EventName": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "PublicDescription": "This event distributes cycle counts between active hyperthreads, i.e., those in C0. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If all other hyperthreads are inactive (or disabled or do not exist), all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when this thread is unhalted and the other thread is halted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE",
+ "PublicDescription": "Counts Core crystal clock cycles when current thread is unhalted and the other thread is halted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles. Cycle counts are evenly distributed between active threads in the Core.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_DISTRIBUTED",
+ "PublicDescription": "This event distributes Core crystal clock cycle counts between active hyperthreads, i.e., those in C0 sleep-state. A hyperthread becomes inactive when it executes the HLT or MWAIT instructions. If one thread is active in a core, all counts are attributed to this hyperthread. To obtain the full count when the Core is active, sum the counts from each hyperthread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Reference cycles when the core is not in halt state.",
+ "EventName": "CPU_CLK_UNHALTED.REF_TSC",
+ "PublicDescription": "Counts the number of reference cycles when the core is not in a halt state. The core enters the halt state when it is running the HLT instruction or the MWAIT instruction. This event is not affected by core frequency changes (for example, P states, TM2 transitions) but has the same incrementing frequency as the time stamp counter. This event can approximate elapsed time while the core was not in a halt state. This event has a constant ratio with the CPU_CLK_UNHALTED.REF_XCLK event. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events. Note: On all current platforms this event stops counting during 'throttling (TM)' states duty off periods the processor is 'halted'. The counter update is done at a lower clock rate then the core clock the overflow status bit for this counter may appear 'sticky'. After the counter has overflowed and software clears the overflow status bit and resets the counter to less than MAX. The reset value to the counter is not clocked immediately so the overflow status bit will flip 'high (1)' and generate another PMI (if enabled) after which the reset value gets clocked into the counter. Therefore, software will get the interrupt, read the overflow status bit '1 for bit 34 while the counter value is less than MAX. Software should ignore this case.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Core crystal clock cycles when the thread is unhalted.",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.REF_XCLK",
+ "PublicDescription": "Counts core crystal clock cycles when the thread is unhalted.",
+ "SampleAfterValue": "25003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Core cycles when the thread is not in halt state",
+ "EventName": "CPU_CLK_UNHALTED.THREAD",
+ "PublicDescription": "Counts the number of core cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. This event is a component in many key event ratios. The core frequency may change from time to time due to transitions associated with Enhanced Intel SpeedStep Technology or TM2. For this reason this event may have a changing ratio with regards to time. When the core frequency is constant, this event can approximate elapsed time while the core was not in the halt state. It is counted on a dedicated fixed counter, leaving the eight programmable counters available for other events.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Thread cycles when thread is not in halt state",
+ "EventCode": "0x3c",
+ "EventName": "CPU_CLK_UNHALTED.THREAD_P",
+ "PublicDescription": "This is an architectural event that counts the number of thread cycles while the thread is not in a halt state. The thread enters the halt state when it is running the HLT instruction. The core frequency may change from time to time due to power or thermal throttling. For this reason, this event may have a changing ratio with regards to wall clock time.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Cycles while L1 cache miss demand load is outstanding.",
+ "CounterMask": "8",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles while L2 cache miss demand load is outstanding.",
+ "CounterMask": "1",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles while memory subsystem has an outstanding load.",
+ "CounterMask": "16",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.CYCLES_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Execution stalls while L1 cache miss demand load is outstanding.",
+ "CounterMask": "12",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L1D_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0xc"
+ },
+ {
+ "BriefDescription": "Execution stalls while L2 cache miss demand load is outstanding.",
+ "CounterMask": "5",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_L2_MISS",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x5"
+ },
+ {
+ "BriefDescription": "Execution stalls while memory subsystem has an outstanding load.",
+ "CounterMask": "20",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_MEM_ANY",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x14"
+ },
+ {
+ "BriefDescription": "Total execution stalls.",
+ "CounterMask": "4",
+ "EventCode": "0xa3",
+ "EventName": "CYCLE_ACTIVITY.STALLS_TOTAL",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 1 uop is executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.1_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 1 uop was executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles total of 2 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.2_PORTS_UTIL",
+ "PublicDescription": "Counts cycles during which a total of 2 uops were executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.3_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 3 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station was not empty.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.4_PORTS_UTIL",
+ "PublicDescription": "Cycles total of 4 uops are executed on all ports and Reservation Station (RS) was not empty.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Cycles when the memory subsystem has an outstanding load. Increments by 4 for every such cycle.",
+ "CounterMask": "5",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_LOADS",
+ "PublicDescription": "Counts cycles when the memory subsystem has an outstanding load. Increments by 4 for every such cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x21"
+ },
+ {
+ "BriefDescription": "Cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "CounterMask": "2",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.BOUND_ON_STORES",
+ "PublicDescription": "Counts cycles where the Store Buffer was full and no loads caused an execution stall.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles no uop executed while RS was not empty, the SB was not full and there was no outstanding load.",
+ "EventCode": "0xa6",
+ "EventName": "EXE_ACTIVITY.EXE_BOUND_0_PORTS",
+ "PublicDescription": "Number of cycles total of 0 uops executed on all ports, Reservation Station (RS) was not empty, the Store Buffer (SB) was not full and there was no outstanding load.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Stalls caused by changing prefix length of the instruction.",
+ "EventCode": "0x87",
+ "EventName": "ILD_STALL.LCP",
+ "PublicDescription": "Counts cycles that the Instruction Length decoder (ILD) stalls occurred due to dynamically changing prefix length of the decoded instruction (by operand size prefix instruction 0x66, address size prefix instruction 0x67 or REX.W for Intel64). Count is proportional to the number of prefixes in a 16B-line. This may result in a three-cycle penalty for each LCP (Length changing prefix) in a 16-byte chunk.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Instruction decoders utilized in a cycle",
+ "EventCode": "0x55",
+ "EventName": "INST_DECODED.DECODERS",
+ "PublicDescription": "Number of decoders utilized in a cycle when the MITE (legacy decode pipeline) fetches instructions.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. Fixed Counter - architectural event",
+ "EventName": "INST_RETIRED.ANY",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of instructions retired. General Counter - architectural event",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.ANY_P",
+ "PEBS": "1",
+ "PublicDescription": "Counts the number of X86 instructions retired - an Architectural PerfMon event. Counting continues during hardware interrupts, traps, and inside interrupt handlers. Notes: INST_RETIRED.ANY is counted by a designated fixed counter freeing up programmable counters to count other events. INST_RETIRED.ANY_P is counted by a programmable counter.",
+ "SampleAfterValue": "2000003"
+ },
+ {
+ "BriefDescription": "Retired NOP instructions.",
+ "EventCode": "0xc0",
+ "EventName": "INST_RETIRED.NOP",
+ "PEBS": "1",
+ "PublicDescription": "Counts all retired NOP or ENDBR32/64 instructions",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Precise instruction retired event with a reduced effect of PEBS shadow in IP distribution",
+ "EventName": "INST_RETIRED.PREC_DIST",
+ "PEBS": "1",
+ "PublicDescription": "A version of INST_RETIRED that allows for a more unbiased distribution of samples across instructions retired. It utilizes the Precise Distribution of Instructions Retired (PDIR) feature to mitigate some bias in how retired instructions get sampled. Use on Fixed Counter 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "CounterMask": "1",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.ALL_RECOVERY_CYCLES",
+ "PublicDescription": "Counts cycles the Backend cluster is recovering after a miss-speculation or a Store Buffer or Load Buffer drain stall.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x3"
+ },
+ {
+ "BriefDescription": "Clears speculative count",
+ "CounterMask": "1",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.CLEARS_COUNT",
+ "PublicDescription": "Counts the number of speculative clears due to any type of branch misprediction or machine clears",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.CLEAR_RESTEER_CYCLES",
+ "PublicDescription": "Cycles after recovery from a branch misprediction or machine clear till the first uop is issued from the resteered path.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Core cycles the allocator was stalled due to recovery from earlier clear event for this thread",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.RECOVERY_CYCLES",
+ "PublicDescription": "Counts core cycles when the Resource allocator was stalled due to recovery from an earlier branch misprediction or machine clear event.",
+ "SampleAfterValue": "500009",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where uops got dropped",
+ "EventCode": "0x0d",
+ "EventName": "INT_MISC.UOP_DROPPING",
+ "PublicDescription": "Estimated number of Top-down Microarchitecture Analysis slots that got dropped due to non front-end reasons",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "The number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.NO_SR",
+ "PublicDescription": "Counts the number of times that split load operations are temporarily blocked because all resources for handling the split accesses are in use.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Loads blocked due to overlapping with a preceding store that cannot be forwarded.",
+ "EventCode": "0x03",
+ "EventName": "LD_BLOCKS.STORE_FORWARD",
+ "PublicDescription": "Counts the number of times where store forwarding was prevented for a load operation. The most common case is a load blocked due to the address of memory access (partially) overlapping with a preceding uncompleted store. Note: See the table of not supported store forwards in the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "False dependencies in MOB due to partial compare on address.",
+ "EventCode": "0x07",
+ "EventName": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS",
+ "PublicDescription": "Counts the number of times a load got blocked due to false dependencies in MOB due to partial compare on address.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of demand load dispatches that hit L1D fill buffer (FB) allocated for software prefetch.",
+ "EventCode": "0x4c",
+ "EventName": "LOAD_HIT_PREFETCH.SWPF",
+ "PublicDescription": "Counts all not software-prefetch load dispatches that hit the fill buffer (FB) allocated for the software prefetch. It can also be incremented by some lock instructions. So it should only be used with profiling so that the locks can be excluded by ASM (Assembly File) inspection of the nearby instructions.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles Uops delivered by the LSD, but didn't come from the decoder.",
+ "CounterMask": "1",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_ACTIVE",
+ "PublicDescription": "Counts the cycles when at least one uop is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles optimal number of Uops delivered by the LSD, but did not come from the decoder.",
+ "CounterMask": "5",
+ "EventCode": "0xa8",
+ "EventName": "LSD.CYCLES_OK",
+ "PublicDescription": "Counts the cycles when optimal number of uops is delivered by the LSD (Loop-stream detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of Uops delivered by the LSD.",
+ "EventCode": "0xa8",
+ "EventName": "LSD.UOPS",
+ "PublicDescription": "Counts the number of uops delivered to the back-end by the LSD(Loop Stream Detector).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of machine clears (nukes) of any type.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.COUNT",
+ "PublicDescription": "Counts the number of machine clears (nukes) of any type.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Self-modifying code (SMC) detected.",
+ "EventCode": "0xc3",
+ "EventName": "MACHINE_CLEARS.SMC",
+ "PublicDescription": "Counts self-modifying code (SMC) detected, which causes a machine clear.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Increments whenever there is an update to the LBR array.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.LBR_INSERTS",
+ "PublicDescription": "Increments when an entry is added to the Last Branch Record (LBR) array (or removed from the array in case of RETURNs in call stack mode). The event requires LBR enable via IA32_DEBUGCTL MSR and branch type selection via MSR_LBR_SELECT.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "EventCode": "0xcc",
+ "EventName": "MISC_RETIRED.PAUSE_INST",
+ "PublicDescription": "Counts number of retired PAUSE instructions. This event is not supported on first SKL and KBL products.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Cycles stalled due to no store buffers available. (not including draining form sync).",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SB",
+ "PublicDescription": "Counts allocation stall cycles caused by the store buffer (SB) being full. This counts cycles that the pipeline back-end blocked uop delivery from the front-end.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "Counts cycles where the pipeline is stalled due to serializing operations.",
+ "EventCode": "0xa2",
+ "EventName": "RESOURCE_STALLS.SCOREBOARD",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread",
+ "EventCode": "0x5e",
+ "EventName": "RS_EVENTS.EMPTY_CYCLES",
+ "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts end of periods where the Reservation Station (RS) was empty.",
+ "CounterMask": "1",
+ "EdgeDetect": "1",
+ "EventCode": "0x5e",
+ "EventName": "RS_EVENTS.EMPTY_END",
+ "Invert": "1",
+ "PublicDescription": "Counts end of periods where the Reservation Station (RS) was empty. Could be useful to closely sample on front-end latency issues (see the FRONTEND_RETIRED event of designated precise events)",
+ "SampleAfterValue": "100003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "TMA slots where no uops were being issued due to lack of back-end resources.",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BACKEND_BOUND_SLOTS",
+ "PublicDescription": "Counts the number of Top-down Microarchitecture Analysis (TMA) method's slots where no micro-operations were being issued from front-end to back-end of the machine due to lack of back-end resources.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "TMA slots wasted due to incorrect speculation by branch mispredictions",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.BR_MISPREDICT_SLOTS",
+ "PublicDescription": "Number of TMA slots that were wasted due to incorrect speculation by branch mispredictions. This event estimates number of operations that were issued but not retired from the specualtive path as well as the out-of-order engine recovery past a branch misprediction.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x8"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. Fixed counter - architectural event",
+ "EventName": "TOPDOWN.SLOTS",
+ "PublicDescription": "Number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method (TMA). The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core. Software can use this event as the denominator for the top-level metrics of the TMA method. This architectural event is counted on a designated fixed counter (Fixed Counter 3).",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "TMA slots available for an unhalted logical processor. General counter - architectural event",
+ "EventCode": "0xa4",
+ "EventName": "TOPDOWN.SLOTS_P",
+ "PublicDescription": "Counts the number of available slots for an unhalted logical processor. The event increments by machine-width of the narrowest pipeline as employed by the Top-down Microarchitecture Analysis method. The count is distributed among unhalted logical processors (hyper-threads) who share the same physical core.",
+ "SampleAfterValue": "10000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops decoded out of instructions exclusively fetched by decoder 0",
+ "EventCode": "0x56",
+ "EventName": "UOPS_DECODED.DEC0",
+ "PublicDescription": "Uops exclusively fetched by decoder 0",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 0",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_0",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 0.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 1",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_1",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 1.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 2 and 3",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_2_3",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 2 and 3.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 4 and 9",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_4_9",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 5 and 9.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 5",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_5",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 5.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 6",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_6",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to port 6.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x40"
+ },
+ {
+ "BriefDescription": "Number of uops executed on port 7 and 8",
+ "EventCode": "0xa1",
+ "EventName": "UOPS_DISPATCHED.PORT_7_8",
+ "PublicDescription": "Counts, on the per-thread basis, cycles during which at least one uop is dispatched from the Reservation Station (RS) to ports 7 and 8.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x80"
+ },
+ {
+ "BriefDescription": "Number of uops executed on the core.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE",
+ "PublicDescription": "Counts the number of uops executed from any thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 1 micro-op is executed from any thread on physical core.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_1",
+ "PublicDescription": "Counts cycles when at least 1 micro-op is executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 2 micro-op is executed from any thread on physical core.",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_2",
+ "PublicDescription": "Counts cycles when at least 2 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 3 micro-op is executed from any thread on physical core.",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_3",
+ "PublicDescription": "Counts cycles when at least 3 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles at least 4 micro-op is executed from any thread on physical core.",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CORE_CYCLES_GE_4",
+ "PublicDescription": "Counts cycles when at least 4 micro-ops are executed from any thread on physical core.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles where at least 1 uop was executed per-thread",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_1",
+ "PublicDescription": "Cycles where at least 1 uop was executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 2 uops were executed per-thread",
+ "CounterMask": "2",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_2",
+ "PublicDescription": "Cycles where at least 2 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 3 uops were executed per-thread",
+ "CounterMask": "3",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_3",
+ "PublicDescription": "Cycles where at least 3 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles where at least 4 uops were executed per-thread",
+ "CounterMask": "4",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.CYCLES_GE_4",
+ "PublicDescription": "Cycles where at least 4 uops were executed per-thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts number of cycles no uops were dispatched to be executed on this thread.",
+ "CounterMask": "1",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which no uops were dispatched from the Reservation Station (RS) per thread.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of uops to be executed per-thread each cycle.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.THREAD",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Counts the number of x87 uops dispatched.",
+ "EventCode": "0xb1",
+ "EventName": "UOPS_EXECUTED.X87",
+ "PublicDescription": "Counts the number of x87 uops executed.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Uops that RAT issues to RS",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.ANY",
+ "PublicDescription": "Counts the number of uops that the Resource Allocation Table (RAT) issues to the Reservation Station (RS).",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Cycles when RAT does not issue Uops to RS for the thread",
+ "CounterMask": "1",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts cycles during which the Resource Allocation Table (RAT) does not issue any Uops to the reservation station (RS) for the current thread.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "Uops inserted at issue-stage in order to preserve upper bits of vector registers.",
+ "EventCode": "0x0e",
+ "EventName": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH",
+ "PublicDescription": "Counts the number of Blend Uops issued by the Resource Allocation Table (RAT) to the reservation station (RS) in order to preserve upper bits of vector registers. Starting with the Skylake microarchitecture, these Blend uops are needed since every Intel SSE instruction executed in Dirty Upper State needs to preserve bits 128-255 of the destination register. For more information, refer to Mixing Intel AVX and Intel SSE Code section of the Optimization Guide.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Retirement slots used.",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.SLOTS",
+ "PublicDescription": "Counts the retirement slots used each cycle.",
+ "SampleAfterValue": "2000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles without actually retired uops.",
+ "CounterMask": "1",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.STALL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "This event counts cycles without actually retired uops.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Cycles with less than 10 actually retired uops.",
+ "CounterMask": "10",
+ "EventCode": "0xc2",
+ "EventName": "UOPS_RETIRED.TOTAL_CYCLES",
+ "Invert": "1",
+ "PublicDescription": "Counts the number of cycles using always true condition (uops_ret < 16) applied to non PEBS uops retired event.",
+ "SampleAfterValue": "1000003",
+ "UMask": "0x2"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
new file mode 100644
index 000000000000..4c80d6be6cf1
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
@@ -0,0 +1,1532 @@
+[
+ {
+ "BriefDescription": "C10 residency percent per package",
+ "MetricExpr": "cstate_pkg@c10\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C10_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C2 residency percent per package",
+ "MetricExpr": "cstate_pkg@c2\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C2_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C3 residency percent per package",
+ "MetricExpr": "cstate_pkg@c3\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C3_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per core",
+ "MetricExpr": "cstate_core@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C6 residency percent per package",
+ "MetricExpr": "cstate_pkg@c6\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C6_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per core",
+ "MetricExpr": "cstate_core@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Core_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C7 residency percent per package",
+ "MetricExpr": "cstate_pkg@c7\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C7_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C8 residency percent per package",
+ "MetricExpr": "cstate_pkg@c8\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C8_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "C9 residency percent per package",
+ "MetricExpr": "cstate_pkg@c9\\-residency@ / TSC",
+ "MetricGroup": "Power",
+ "MetricName": "C9_Pkg_Residency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles spent in System Management Interrupts.",
+ "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)",
+ "MetricGroup": "smi",
+ "MetricName": "smi_cycles",
+ "MetricThreshold": "smi_cycles > 0.1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of SMI interrupts.",
+ "MetricExpr": "msr@smi@",
+ "MetricGroup": "smi",
+ "MetricName": "smi_num",
+ "ScaleUnit": "1SMI#"
+ },
+ {
+ "BriefDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset",
+ "MetricExpr": "LD_BLOCKS_PARTIAL.ADDRESS_ALIAS / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_4k_aliasing",
+ "MetricThreshold": "tma_4k_aliasing > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often memory load accesses were aliased by preceding stores (in program order) with a 4K address offset. False match is possible; which incur a few cycles load re-issue. However; the short re-issue duration is often hidden by the out-of-order core and HW optimizations; hence a user may safely ignore a high value of this metric unless it manages to propagate up into parent nodes of the hierarchy (e.g. to L1_Bound).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_0 + UOPS_DISPATCHED.PORT_1 + UOPS_DISPATCHED.PORT_5 + UOPS_DISPATCHED.PORT_6) / (4 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_alu_op_utilization",
+ "MetricThreshold": "tma_alu_op_utilization > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists",
+ "MetricExpr": "100 * ASSISTS.ANY / tma_info_slots",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_assists",
+ "MetricThreshold": "tma_assists > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of slots the CPU retired uops delivered by the Microcode_Sequencer as a result of Assists. Assists are long sequences of uops that are required in certain corner-cases for operations that cannot be handled natively by the execution pipeline. For example; when working with very small floating point values (so-called Denormals); the FP units are not set up to perform these operations natively. Instead; a sequence of instructions to perform the computation on the Denormals is injected into the pipeline. Since these microcode sequences might be dozens of uops long; Assists can be extremely deleterious to performance and they can be avoided in many cases. Sample with: ASSISTS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend",
+ "MetricExpr": "topdown\\-be\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 5 * cpu@INT_MISC.RECOVERY_CYCLES\\,cmask\\=1\\,edge@ / tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_backend_bound",
+ "MetricThreshold": "tma_backend_bound > 0.2",
+ "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots wasted due to incorrect speculations",
+ "MetricExpr": "max(1 - (tma_frontend_bound + tma_backend_bound + tma_retiring), 0)",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_bad_speculation",
+ "MetricThreshold": "tma_bad_speculation > 0.15",
+ "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring branch instructions.",
+ "MetricExpr": "tma_light_operations * BR_INST_RETIRED.ALL_BRANCHES / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_branch_instructions",
+ "MetricThreshold": "tma_branch_instructions > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * tma_bad_speculation",
+ "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
+ "MetricName": "tma_branch_mispredicts",
+ "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction. These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers",
+ "MetricExpr": "INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks + tma_unknown_branches",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_branch_resteers",
+ "MetricThreshold": "tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers. Branch Resteers estimates the Frontend delay in fetching operations from corrected path; following all sorts of miss-predicted branches. For example; branchy code with lots of miss-predictions might get categorized under Branch Resteers. Note the value of this node may overlap with its siblings. Sample with: BR_MISP_RETIRED.ALL_BRANCHES",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction",
+ "MetricExpr": "max(0, tma_microcode_sequencer - tma_assists)",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_microcode_sequencer_group",
+ "MetricName": "tma_cisc",
+ "MetricThreshold": "tma_cisc > 0.1 & (tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU retired uops originated from CISC (complex instruction set computer) instruction. A CISC instruction has multiple uops that are required to perform the instruction's functionality as in the case of read-modify-write as an example. Since these instructions require multiple uops they may or may not imply sub-optimal use of machine resources.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears",
+ "MetricExpr": "(1 - BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT)) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;MachineClears;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueMC",
+ "MetricName": "tma_clears_resteers",
+ "MetricThreshold": "tma_clears_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Machine Clears. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(49 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) + 48 * tma_info_average_frequency * MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_contested_accesses",
+ "MetricThreshold": "tma_contested_accesses > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to contested accesses. Contested accesses occur when data written by one Logical Processor are read by another Logical Processor on a different Physical Core. Examples of contested accesses include synchronizations such as locks; true data sharing such as modified locked variables; and false sharing. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD;MEM_LOAD_L3_HIT_RETIRED.XSNP_MISS. Related metrics: tma_data_sharing, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck",
+ "MetricExpr": "max(0, tma_backend_bound - tma_memory_bound)",
+ "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_core_bound",
+ "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck. Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "48 * tma_info_average_frequency * (MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD + MEM_LOAD_L3_HIT_RETIRED.XSNP_FWD * (1 - OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM / (OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HITM + OCR.DEMAND_DATA_RD.L3_HIT.SNOOP_HIT_WITH_FWD))) * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_l3_bound_group",
+ "MetricName": "tma_data_sharing",
+ "MetricThreshold": "tma_data_sharing > 0.05 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles while the memory subsystem was handling synchronizations due to data-sharing accesses. Data shared by multiple Logical Processors (even just read shared) may cause increased access latency due to cache coherency. Excessive data sharing can drastically harm multithreaded performance. Sample with: MEM_LOAD_L3_HIT_RETIRED.XSNP_NO_FWD. Related metrics: tma_contested_accesses, tma_false_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder",
+ "MetricExpr": "(cpu@INST_DECODED.DECODERS\\,cmask\\=1@ - cpu@INST_DECODED.DECODERS\\,cmask\\=2@) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_issueD0;tma_mite_group",
+ "MetricName": "tma_decoder0_alone",
+ "MetricThreshold": "tma_decoder0_alone > 0.1 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35))",
+ "PublicDescription": "This metric represents fraction of cycles where decoder-0 was the only active decoder. Related metrics: tma_few_uops_instructions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the Divider unit was active",
+ "MetricExpr": "ARITH.DIVIDER_ACTIVE / tma_info_clks",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_divider",
+ "MetricThreshold": "tma_divider > 0.2 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric represents fraction of cycles where the Divider unit was active. Divide and square root instructions are performed by the Divider unit and can take considerably longer latency than integer or Floating Point addition; subtraction; or multiplication. Sample with: ARITH.DIVIDER_ACTIVE",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "CYCLE_ACTIVITY.STALLS_L3_MISS / tma_info_clks + (CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks - tma_l2_bound",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_dram_bound",
+ "MetricThreshold": "tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled on accesses to external memory (DRAM) by loads. Better caching can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline",
+ "MetricExpr": "(IDQ.DSB_CYCLES_ANY - IDQ.DSB_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSB;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_dsb",
+ "MetricThreshold": "tma_dsb > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to DSB (decoded uop cache) fetch pipeline. For example; inefficient utilization of the DSB cache structure or bank conflict when reading from it; are categorized here.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_dsb_switches",
+ "MetricThreshold": "tma_dsb_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to switches from DSB to MITE pipelines. The DSB (decoded i-cache) is a Uop Cache where the front-end directly delivers Uops (micro operations) avoiding heavy x86 decoding. The DSB pipeline has shorter latency and delivered higher bandwidth than the MITE (legacy instruction decode pipeline). Switching between the two pipelines can cause penalties hence this metric measures the exposed penalty. Sample with: FRONTEND_RETIRED.DSB_MISS_PS. Related metrics: tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses",
+ "MetricExpr": "min(7 * cpu@DTLB_LOAD_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_LOAD_MISSES.WALK_ACTIVE, max(CYCLE_ACTIVITY.CYCLES_MEM_ANY - CYCLE_ACTIVITY.CYCLES_L1D_MISS, 0)) / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_l1_bound_group",
+ "MetricName": "tma_dtlb_load",
+ "MetricThreshold": "tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles where the Data TLB (DTLB) was missed by load accesses. TLBs (Translation Look-aside Buffers) are processor caches for recently used entries out of the Page Tables that are used to map virtual- to physical-addresses by the operating system. This metric approximates the potential delay of demand loads missing the first-level data TLB (assuming worst case scenario with back to back misses to different pages). This includes hitting in the second-level TLB (STLB) as well as performing a hardware page walk on an STLB miss. Sample with: MEM_INST_RETIRED.STLB_MISS_LOADS_PS. Related metrics: tma_dtlb_store, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses",
+ "MetricExpr": "(7 * cpu@DTLB_STORE_MISSES.STLB_HIT\\,cmask\\=1@ + DTLB_STORE_MISSES.WALK_ACTIVE) / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL4;tma_L4_group;tma_issueTLB;tma_store_bound_group",
+ "MetricName": "tma_dtlb_store",
+ "MetricThreshold": "tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates the fraction of cycles spent handling first-level data TLB store misses. As with ordinary data caching; focus on improving data locality and reducing working-set size to reduce DTLB overhead. Additionally; consider using profile-guided optimization (PGO) to collocate frequently-used data on the same page. Try using larger page sizes for large amounts of frequently-used data. Sample with: MEM_INST_RETIRED.STLB_MISS_STORES_PS. Related metrics: tma_dtlb_load, tma_info_memory_data_tlbs",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing",
+ "MetricExpr": "54 * tma_info_average_frequency * OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM / tma_info_clks",
+ "MetricGroup": "DataSharing;Offcore;Snoop;TopdownL4;tma_L4_group;tma_issueSyncxn;tma_store_bound_group",
+ "MetricName": "tma_false_sharing",
+ "MetricThreshold": "tma_false_sharing > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates how often CPU was handling synchronizations due to False Sharing. False Sharing is a multithreading hiccup; where multiple Logical Processors contend on different data-elements mapped into the same cache line. Sample with: OCR.DEMAND_RFO.L3_HIT.SNOOP_HITM. Related metrics: tma_contested_accesses, tma_data_sharing, tma_machine_clears, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed",
+ "MetricExpr": "L1D_PEND_MISS.FB_FULL / tma_info_clks",
+ "MetricGroup": "MemoryBW;TopdownL4;tma_L4_group;tma_issueBW;tma_issueSL;tma_issueSmSt;tma_l1_bound_group",
+ "MetricName": "tma_fb_full",
+ "MetricThreshold": "tma_fb_full > 0.3",
+ "PublicDescription": "This metric does a *rough estimation* of how often L1D Fill Buffer unavailability limited additional L1D miss memory access requests to proceed. The higher the metric value; the deeper the memory hierarchy level the misses are satisfied from (metric values >1 are valid). Often it hints on approaching bandwidth limits (to L2 cache; L3 cache or external memory). Related metrics: tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full, tma_store_latency, tma_streaming_stores",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues",
+ "MetricExpr": "max(0, tma_frontend_bound - tma_fetch_latency)",
+ "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
+ "MetricName": "tma_fetch_bandwidth",
+ "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues. For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues",
+ "MetricExpr": "(5 * IDQ_UOPS_NOT_DELIVERED.CYCLES_0_UOPS_DELIV.CORE - INT_MISC.UOP_DROPPING) / tma_info_slots",
+ "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
+ "MetricName": "tma_fetch_latency",
+ "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues. For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops",
+ "MetricExpr": "tma_heavy_operations - tma_microcode_sequencer",
+ "MetricGroup": "TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueD0",
+ "MetricName": "tma_few_uops_instructions",
+ "MetricThreshold": "tma_few_uops_instructions > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring instructions that that are decoder into two or up to ([SNB+] four; [ADL+] five) uops. This highly-correlates with the number of uops in such instructions. Related metrics: tma_decoder0_alone",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired)",
+ "MetricExpr": "tma_x87_use + tma_fp_scalar + tma_fp_vector",
+ "MetricGroup": "HPC;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_fp_arith",
+ "MetricThreshold": "tma_fp_arith > 0.2 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents overall arithmetic floating-point (FP) operations fraction the CPU has executed (retired). Note this metric's value may exceed its parent due to use of \"Uops\" CountDomain and FMA double-counting.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_scalar",
+ "MetricThreshold": "tma_fp_scalar > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) scalar uops fraction the CPU has retired. May overcount due to FMA double counting. Related metrics: tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths",
+ "MetricExpr": "cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@ / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL4;tma_L4_group;tma_fp_arith_group;tma_issue2P",
+ "MetricName": "tma_fp_vector",
+ "MetricThreshold": "tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric approximates arithmetic floating-point (FP) vector uops fraction the CPU has retired aggregated across all vector widths. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_128b",
+ "MetricThreshold": "tma_fp_vector_128b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 128-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_256b",
+ "MetricThreshold": "tma_fp_vector_256b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 256-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors",
+ "MetricExpr": "(FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Compute;Flops;TopdownL5;tma_L5_group;tma_fp_vector_group;tma_issue2P",
+ "MetricName": "tma_fp_vector_512b",
+ "MetricThreshold": "tma_fp_vector_512b > 0.1 & (tma_fp_vector > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6))",
+ "PublicDescription": "This metric approximates arithmetic FP vector uops fraction the CPU has retired for 512-bit wide vectors. May overcount due to FMA double counting. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_port_0, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend",
+ "MetricExpr": "topdown\\-fe\\-bound / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) - INT_MISC.UOP_DROPPING / tma_info_slots",
+ "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_frontend_bound",
+ "MetricThreshold": "tma_frontend_bound > 0.15",
+ "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences",
+ "MetricExpr": "tma_microcode_sequencer + tma_retiring * (UOPS_DECODED.DEC0 - cpu@UOPS_DECODED.DEC0\\,cmask\\=1@) / IDQ.MITE_UOPS",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_heavy_operations",
+ "MetricThreshold": "tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;IcMiss;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_icache_misses",
+ "MetricThreshold": "tma_icache_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to instruction cache misses. Sample with: FRONTEND_RETIRED.L2_MISS_PS;FRONTEND_RETIRED.L1I_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Measured Average Frequency for unhalted processors [GHz]",
+ "MetricExpr": "tma_info_turbo_utilization * TSC / 1e9 / duration_time",
+ "MetricGroup": "Power;Summary",
+ "MetricName": "tma_info_average_frequency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "100 * tma_fetch_latency * (tma_itlb_misses + tma_icache_misses + tma_unknown_branches) / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)",
+ "MetricGroup": "BigFoot;Fed;Frontend;IcMiss;MemoryTLB;tma_issueBC",
+ "MetricName": "tma_info_big_code",
+ "MetricThreshold": "tma_info_big_code > 20",
+ "PublicDescription": "Total pipeline cost of instruction fetch related bottlenecks by large code footprint programs (i-side cache; TLB and BTB misses). Related metrics: tma_info_branching_overhead"
+ },
+ {
+ "BriefDescription": "Branch instructions per taken branch.",
+ "MetricExpr": "BR_INST_RETIRED.ALL_BRANCHES / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_bptkbranch"
+ },
+ {
+ "BriefDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear)",
+ "MetricExpr": "(tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) * tma_info_slots / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_branch_misprediction_cost",
+ "PublicDescription": "Branch Misprediction Cost: Fraction of TMA slots wasted per non-speculative branch misprediction (retired JEClear). Related metrics: tma_branch_mispredicts, tma_info_mispredictions, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls)",
+ "MetricExpr": "100 * ((BR_INST_RETIRED.COND + 3 * BR_INST_RETIRED.NEAR_CALL + (BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL)) / tma_info_slots)",
+ "MetricGroup": "Ret;tma_issueBC",
+ "MetricName": "tma_info_branching_overhead",
+ "MetricThreshold": "tma_info_branching_overhead > 10",
+ "PublicDescription": "Total pipeline cost of branch related instructions (used for program control-flow including function calls). Related metrics: tma_info_big_code"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are CALL or RET",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_CALL + BR_INST_RETIRED.NEAR_RETURN) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_callret"
+ },
+ {
+ "BriefDescription": "Per-Logical Processor actual clocks when the Logical Processor is active.",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "Pipeline",
+ "MetricName": "tma_info_clks"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) code speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * ITLB_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Fed;MemoryTLB",
+ "MetricName": "tma_info_code_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are non-taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_NTAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_nt"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are taken conditionals",
+ "MetricExpr": "BR_INST_RETIRED.COND_TAKEN / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches;CodeGen;PGO",
+ "MetricName": "tma_info_cond_tk"
+ },
+ {
+ "BriefDescription": "Probability of Core Bound bottleneck hidden by SMT-profiling artifacts",
+ "MetricExpr": "(100 * (1 - tma_core_bound / tma_ports_utilization if tma_core_bound < tma_ports_utilization else 1) if tma_info_smt_2t_utilization > 0.5 else 0)",
+ "MetricGroup": "Cor;SMT",
+ "MetricName": "tma_info_core_bound_likely",
+ "MetricThreshold": "tma_info_core_bound_likely > 0.5"
+ },
+ {
+ "BriefDescription": "Core actual clocks when any Logical Processor is active on the Physical Core",
+ "MetricExpr": "CPU_CLK_UNHALTED.DISTRIBUTED",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_core_clks"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle across hyper-threads (per physical core)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_core_clks",
+ "MetricGroup": "Ret;SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_coreipc"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction (per Logical Processor)",
+ "MetricExpr": "1 / tma_info_ipc",
+ "MetricGroup": "Mem;Pipeline",
+ "MetricName": "tma_info_cpi"
+ },
+ {
+ "BriefDescription": "Average CPU Utilization",
+ "MetricExpr": "CPU_CLK_UNHALTED.REF_TSC / TSC",
+ "MetricGroup": "HPC;Summary",
+ "MetricName": "tma_info_cpu_utilization"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss data reads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD / OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_data_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]",
+ "MetricExpr": "64 * (arb@event\\=0x81\\,umask\\=0x1@ + arb@event\\=0x84\\,umask\\=0x1@) / 1e6 / duration_time / 1e3",
+ "MetricGroup": "HPC;Mem;MemoryBW;SoC;tma_issueBW",
+ "MetricName": "tma_info_dram_bw_use",
+ "PublicDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]. Related metrics: tma_fb_full, tma_info_memory_bandwidth, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache)",
+ "MetricExpr": "IDQ.DSB_UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "DSB;Fed;FetchBW;tma_issueFB",
+ "MetricName": "tma_info_dsb_coverage",
+ "MetricThreshold": "tma_info_dsb_coverage < 0.7 & tma_info_ipc / 5 > 0.35",
+ "PublicDescription": "Fraction of Uops delivered by the DSB (aka Decoded ICache; or Uop Cache). Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_misses, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_dsb_switches / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches) + tma_fetch_bandwidth * tma_mite / (tma_dsb + tma_lsd + tma_mite))",
+ "MetricGroup": "DSBmiss;Fed;tma_issueFB",
+ "MetricName": "tma_info_dsb_misses",
+ "MetricThreshold": "tma_info_dsb_misses > 10",
+ "PublicDescription": "Total pipeline cost of DSB (uop cache) misses - subset of the Instruction_Fetch_BW Bottleneck. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_iptb, tma_lcp"
+ },
+ {
+ "BriefDescription": "Average number of cycles of a switch from the DSB fetch-unit to MITE fetch unit - see DSB_Switches tree node for details.",
+ "MetricExpr": "DSB2MITE_SWITCHES.PENALTY_CYCLES / cpu@DSB2MITE_SWITCHES.PENALTY_CYCLES\\,cmask\\=1\\,edge@",
+ "MetricGroup": "DSBmiss",
+ "MetricName": "tma_info_dsb_switch_cost"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-thread",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / cpu@UOPS_EXECUTED.THREAD\\,cmask\\=1@",
+ "MetricGroup": "Cor;Pipeline;PortsUtil;SMT",
+ "MetricName": "tma_info_execute"
+ },
+ {
+ "BriefDescription": "The ratio of Executed- by Issued-Uops",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / UOPS_ISSUED.ANY",
+ "MetricGroup": "Cor;Pipeline",
+ "MetricName": "tma_info_execute_per_issue",
+ "PublicDescription": "The ratio of Executed- by Issued-Uops. Ratio > 1 suggests high rate of uop micro-fusions. Ratio < 1 suggest high rate of \"execute\" at rename stage."
+ },
+ {
+ "BriefDescription": "Fill Buffer (FB) hits per kilo instructions for retired demand loads (L1D misses that merge into ongoing miss-handling entries)",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.FB_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_fb_hpki"
+ },
+ {
+ "BriefDescription": "Average number of Uops issued by front-end when it issued something",
+ "MetricExpr": "UOPS_ISSUED.ANY / cpu@UOPS_ISSUED.ANY\\,cmask\\=1@",
+ "MetricGroup": "Fed;FetchBW",
+ "MetricName": "tma_info_fetch_upc"
+ },
+ {
+ "BriefDescription": "Floating Point Operations Per Cycle",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / tma_info_core_clks",
+ "MetricGroup": "Flops;Ret",
+ "MetricName": "tma_info_flopc"
+ },
+ {
+ "BriefDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width)",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_fp_arith_utilization",
+ "PublicDescription": "Actual per-core usage of the Floating Point non-X87 execution units (regardless of precision or vector-width). Values > 1 are possible due to ([BDW+] Fused-Multiply Add (FMA) counting - common; [ADL+] use all of ADD/MUL/FMA in Scalar or 128/256-bit vectors - less common)."
+ },
+ {
+ "BriefDescription": "Giga Floating Point Operations Per Second",
+ "MetricExpr": "(cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE) / 1e9 / duration_time",
+ "MetricGroup": "Cor;Flops;HPC",
+ "MetricName": "tma_info_gflops",
+ "PublicDescription": "Giga Floating Point Operations Per Second. Aggregate across all supported options of: FP precisions, scalar and vector instructions, vector-width and AMX engine."
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck",
+ "MetricExpr": "100 * (tma_fetch_latency * tma_icache_misses / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Fed;FetchLat;IcMiss;tma_issueFL",
+ "MetricName": "tma_info_ic_misses",
+ "MetricThreshold": "tma_info_ic_misses > 5",
+ "PublicDescription": "Total pipeline cost of Instruction Cache misses - subset of the Big_Code Bottleneck. Related metrics: "
+ },
+ {
+ "BriefDescription": "Average Latency for L1 instruction cache misses",
+ "MetricExpr": "ICACHE_16B.IFDATA_STALL / cpu@ICACHE_16B.IFDATA_STALL\\,cmask\\=1\\,edge@",
+ "MetricGroup": "Fed;FetchLat;IcMiss",
+ "MetricName": "tma_info_icache_miss_latency"
+ },
+ {
+ "BriefDescription": "Instruction-Level-Parallelism (average number of uops executed when there is execution) per-core",
+ "MetricExpr": "UOPS_EXECUTED.THREAD / (UOPS_EXECUTED.CORE_CYCLES_GE_1 / 2 if #SMT_on else UOPS_EXECUTED.CORE_CYCLES_GE_1)",
+ "MetricGroup": "Backend;Cor;Pipeline;PortsUtil",
+ "MetricName": "tma_info_ilp"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of instruction fetch bandwidth related bottlenecks",
+ "MetricExpr": "100 * (tma_frontend_bound - tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches)) - tma_info_big_code",
+ "MetricGroup": "Fed;FetchBW;Frontend",
+ "MetricName": "tma_info_instruction_fetch_bw",
+ "MetricThreshold": "tma_info_instruction_fetch_bw > 20"
+ },
+ {
+ "BriefDescription": "Total number of retired Instructions",
+ "MetricExpr": "INST_RETIRED.ANY",
+ "MetricGroup": "Summary;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_instructions",
+ "PublicDescription": "Total number of retired Instructions. Sample with: INST_RETIRED.PREC_DIST"
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + cpu@FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE\\,umask\\=0xfc@)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_iparith",
+ "MetricThreshold": "tma_info_iparith < 10",
+ "PublicDescription": "Instructions per FP Arithmetic instruction (lower number means higher occurrence rate). May undercount due to FMA double counting. Approximated prior to BDW."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx128",
+ "MetricThreshold": "tma_info_iparith_avx128 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX/SSE 128-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.256B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx256",
+ "MetricThreshold": "tma_info_iparith_avx256 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX* 256-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (FP_ARITH_INST_RETIRED.512B_PACKED_DOUBLE + FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;FpVector;InsType",
+ "MetricName": "tma_info_iparith_avx512",
+ "MetricThreshold": "tma_info_iparith_avx512 < 10",
+ "PublicDescription": "Instructions per FP Arithmetic AVX 512-bit instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_DOUBLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_dp",
+ "MetricThreshold": "tma_info_iparith_scalar_dp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Double-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FP_ARITH_INST_RETIRED.SCALAR_SINGLE",
+ "MetricGroup": "Flops;FpScalar;InsType",
+ "MetricName": "tma_info_iparith_scalar_sp",
+ "MetricThreshold": "tma_info_iparith_scalar_sp < 10",
+ "PublicDescription": "Instructions per FP Arithmetic Scalar Single-Precision instruction (lower number means higher occurrence rate). May undercount due to FMA double counting."
+ },
+ {
+ "BriefDescription": "Instructions per Branch (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Branches;Fed;InsType",
+ "MetricName": "tma_info_ipbranch",
+ "MetricThreshold": "tma_info_ipbranch < 8"
+ },
+ {
+ "BriefDescription": "Instructions Per Cycle (per Logical Processor)",
+ "MetricExpr": "INST_RETIRED.ANY / tma_info_clks",
+ "MetricGroup": "Ret;Summary",
+ "MetricName": "tma_info_ipc"
+ },
+ {
+ "BriefDescription": "Instructions per (near) call (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_CALL",
+ "MetricGroup": "Branches;Fed;PGO",
+ "MetricName": "tma_info_ipcall",
+ "MetricThreshold": "tma_info_ipcall < 200"
+ },
+ {
+ "BriefDescription": "Instructions per non-speculative DSB miss (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / FRONTEND_RETIRED.ANY_DSB_MISS",
+ "MetricGroup": "DSBmiss;Fed",
+ "MetricName": "tma_info_ipdsb_miss_ret",
+ "MetricThreshold": "tma_info_ipdsb_miss_ret < 50"
+ },
+ {
+ "BriefDescription": "Instructions per Far Branch ( Far Branches apply upon transition from application to operating system, handling interrupts, exceptions) [lower number means higher occurrence rate]",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.FAR_BRANCH:u",
+ "MetricGroup": "Branches;OS",
+ "MetricName": "tma_info_ipfarbranch",
+ "MetricThreshold": "tma_info_ipfarbranch < 1e6"
+ },
+ {
+ "BriefDescription": "Instructions per Floating Point (FP) Operation (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / (cpu@FP_ARITH_INST_RETIRED.SCALAR_SINGLE\\,umask\\=0x03@ + 2 * FP_ARITH_INST_RETIRED.128B_PACKED_DOUBLE + 4 * cpu@FP_ARITH_INST_RETIRED.128B_PACKED_SINGLE\\,umask\\=0x18@ + 8 * cpu@FP_ARITH_INST_RETIRED.256B_PACKED_SINGLE\\,umask\\=0x60@ + 16 * FP_ARITH_INST_RETIRED.512B_PACKED_SINGLE)",
+ "MetricGroup": "Flops;InsType",
+ "MetricName": "tma_info_ipflop",
+ "MetricThreshold": "tma_info_ipflop < 10"
+ },
+ {
+ "BriefDescription": "Instructions per Load (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_LOADS",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipload",
+ "MetricThreshold": "tma_info_ipload < 3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional non-taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_NTAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_ntaken",
+ "MetricThreshold": "tma_info_ipmisp_cond_ntaken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for conditional taken branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.COND_TAKEN",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_cond_taken",
+ "MetricThreshold": "tma_info_ipmisp_cond_taken < 200"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for indirect CALL or JMP branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.INDIRECT",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_indirect",
+ "MetricThreshold": "tma_info_ipmisp_indirect < 1e3"
+ },
+ {
+ "BriefDescription": "Instructions per retired mispredicts for return branches (lower number means higher occurrence rate).",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.RET",
+ "MetricGroup": "Bad;BrMispredicts",
+ "MetricName": "tma_info_ipmisp_ret",
+ "MetricThreshold": "tma_info_ipmisp_ret < 500"
+ },
+ {
+ "BriefDescription": "Number of Instructions per non-speculative Branch Misprediction (JEClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / BR_MISP_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts",
+ "MetricName": "tma_info_ipmispredict",
+ "MetricThreshold": "tma_info_ipmispredict < 200"
+ },
+ {
+ "BriefDescription": "Instructions per Store (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / MEM_INST_RETIRED.ALL_STORES",
+ "MetricGroup": "InsType",
+ "MetricName": "tma_info_ipstore",
+ "MetricThreshold": "tma_info_ipstore < 8"
+ },
+ {
+ "BriefDescription": "Instructions per Software prefetch instruction (of any type: NTA/T0/T1/T2/Prefetch) (lower number means higher occurrence rate)",
+ "MetricExpr": "INST_RETIRED.ANY / cpu@SW_PREFETCH_ACCESS.T0\\,umask\\=0xF@",
+ "MetricGroup": "Prefetches",
+ "MetricName": "tma_info_ipswpf",
+ "MetricThreshold": "tma_info_ipswpf < 100"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "INST_RETIRED.ANY / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW;Frontend;PGO;tma_issueFB",
+ "MetricName": "tma_info_iptb",
+ "MetricThreshold": "tma_info_iptb < 11",
+ "PublicDescription": "Instruction per taken branch. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_lcp"
+ },
+ {
+ "BriefDescription": "Instructions per speculative Unknown Branch Misprediction (BAClear) (lower number means higher occurrence rate)",
+ "MetricExpr": "tma_info_instructions / BACLEARS.ANY",
+ "MetricGroup": "Fed",
+ "MetricName": "tma_info_ipunknown_branch"
+ },
+ {
+ "BriefDescription": "Fraction of branches that are unconditional (direct or indirect) jumps",
+ "MetricExpr": "(BR_INST_RETIRED.NEAR_TAKEN - BR_INST_RETIRED.COND_TAKEN - 2 * BR_INST_RETIRED.NEAR_CALL) / BR_INST_RETIRED.ALL_BRANCHES",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_jump"
+ },
+ {
+ "BriefDescription": "Cycles Per Instruction for the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / INST_RETIRED.ANY_P:k",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_cpi"
+ },
+ {
+ "BriefDescription": "Fraction of cycles spent in the Operating System (OS) Kernel mode",
+ "MetricExpr": "CPU_CLK_UNHALTED.THREAD_P:k / CPU_CLK_UNHALTED.THREAD",
+ "MetricGroup": "OS",
+ "MetricName": "tma_info_kernel_utilization",
+ "MetricThreshold": "tma_info_kernel_utilization > 0.05"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "64 * L1D.REPLACEMENT / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L1 data cache [GB / sec]",
+ "MetricExpr": "tma_info_l1d_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l1d_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L1_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki"
+ },
+ {
+ "BriefDescription": "L1 cache true misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.ALL_DEMAND_DATA_RD / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l1mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "64 * L2_LINES_IN.ALL / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L2 cache [GB / sec]",
+ "MetricExpr": "tma_info_l2_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l2_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * (L2_RQSTS.REFERENCES - L2_RQSTS.MISS) / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache hits per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_HIT / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2hpki_load"
+ },
+ {
+ "BriefDescription": "L2 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "Backend;CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all request types (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem;Offcore",
+ "MetricName": "tma_info_l2mpki_all"
+ },
+ {
+ "BriefDescription": "L2 cache true code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * FRONTEND_RETIRED.L2_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code"
+ },
+ {
+ "BriefDescription": "L2 cache speculative code cacheline misses per kilo instruction",
+ "MetricExpr": "1e3 * L2_RQSTS.CODE_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "IcMiss",
+ "MetricName": "tma_info_l2mpki_code_all"
+ },
+ {
+ "BriefDescription": "L2 cache ([RKL+] true) misses per kilo instruction for all demand loads (including speculative)",
+ "MetricExpr": "1e3 * L2_RQSTS.DEMAND_DATA_RD_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l2mpki_load"
+ },
+ {
+ "BriefDescription": "Average per-core data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * OFFCORE_REQUESTS.ALL_REQUESTS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data access bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_access_bw",
+ "MetricGroup": "Mem;MemoryBW;Offcore",
+ "MetricName": "tma_info_l3_cache_access_bw_1t"
+ },
+ {
+ "BriefDescription": "Average per-core data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "64 * LONGEST_LAT_CACHE.MISS / 1e9 / duration_time",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw"
+ },
+ {
+ "BriefDescription": "Average per-thread data fill bandwidth to the L3 cache [GB / sec]",
+ "MetricExpr": "tma_info_l3_cache_fill_bw",
+ "MetricGroup": "Mem;MemoryBW",
+ "MetricName": "tma_info_l3_cache_fill_bw_1t"
+ },
+ {
+ "BriefDescription": "L3 cache true misses per kilo instruction for retired demand loads",
+ "MetricExpr": "1e3 * MEM_LOAD_RETIRED.L3_MISS / INST_RETIRED.ANY",
+ "MetricGroup": "CacheMisses;Mem",
+ "MetricName": "tma_info_l3mpki"
+ },
+ {
+ "BriefDescription": "Average Latency for L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / OFFCORE_REQUESTS.DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l2_miss_latency"
+ },
+ {
+ "BriefDescription": "Average Parallel L2 cache miss demand Loads",
+ "MetricExpr": "OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD / cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,cmask\\=1@",
+ "MetricGroup": "Memory_BW;Offcore",
+ "MetricName": "tma_info_load_l2_mlp"
+ },
+ {
+ "BriefDescription": "Average Latency for L3 cache miss demand Loads",
+ "MetricExpr": "cpu@OFFCORE_REQUESTS_OUTSTANDING.DEMAND_DATA_RD\\,umask\\=0x10@ / OFFCORE_REQUESTS.L3_MISS_DEMAND_DATA_RD",
+ "MetricGroup": "Memory_Lat;Offcore",
+ "MetricName": "tma_info_load_l3_miss_latency"
+ },
+ {
+ "BriefDescription": "Actual Average Latency for L1 data-cache miss demand load operations (in core cycles)",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / (MEM_LOAD_RETIRED.L1_MISS + MEM_LOAD_RETIRED.FB_HIT)",
+ "MetricGroup": "Mem;MemoryBound;MemoryLat",
+ "MetricName": "tma_info_load_miss_real_latency"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data load speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_LOAD_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_load_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Fraction of Uops delivered by the LSD (Loop Stream Detector; aka Loop Cache)",
+ "MetricExpr": "LSD.UOPS / UOPS_ISSUED.ANY",
+ "MetricGroup": "Fed;LSD",
+ "MetricName": "tma_info_lsd_coverage"
+ },
+ {
+ "BriefDescription": "Average number of parallel data read requests to external memory",
+ "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@",
+ "MetricGroup": "Mem;MemoryBW;SoC",
+ "MetricName": "tma_info_mem_parallel_reads",
+ "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches"
+ },
+ {
+ "BriefDescription": "Average latency of data read request to external memory (in nanoseconds)",
+ "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.RD + UNC_ARB_DAT_OCCUPANCY.RD) / UNC_ARB_TRK_REQUESTS.RD",
+ "MetricGroup": "Mem;MemoryLat;SoC",
+ "MetricName": "tma_info_mem_read_latency",
+ "PublicDescription": "Average latency of data read request to external memory (in nanoseconds). Accounts for demand loads and L1/L2 prefetches. ([RKL+]memory-controller only)"
+ },
+ {
+ "BriefDescription": "Average latency of all requests to external memory (in Uncore cycles)",
+ "MetricExpr": "(UNC_ARB_TRK_OCCUPANCY.ALL + UNC_ARB_DAT_OCCUPANCY.RD) / arb@event\\=0x81\\,umask\\=0x1@",
+ "MetricGroup": "Mem;SoC",
+ "MetricName": "tma_info_mem_request_latency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_bandwidth / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_sq_full / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full))) + tma_l1_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_fb_full / (tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk))",
+ "MetricGroup": "Mem;MemoryBW;Offcore;tma_issueBW",
+ "MetricName": "tma_info_memory_bandwidth",
+ "MetricThreshold": "tma_info_memory_bandwidth > 20",
+ "PublicDescription": "Total pipeline cost of (external) Memory Bandwidth related bottlenecks. Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_mem_bandwidth, tma_sq_full"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_l1_bound / max(tma_memory_bound, tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_load / max(tma_l1_bound, tma_4k_aliasing + tma_dtlb_load + tma_fb_full + tma_lock_latency + tma_split_loads + tma_store_fwd_blk)) + tma_store_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_dtlb_store / (tma_dtlb_store + tma_false_sharing + tma_split_stores + tma_store_latency + tma_streaming_stores)))",
+ "MetricGroup": "Mem;MemoryTLB;Offcore;tma_issueTLB",
+ "MetricName": "tma_info_memory_data_tlbs",
+ "MetricThreshold": "tma_info_memory_data_tlbs > 20",
+ "PublicDescription": "Total pipeline cost of Memory Address Translation related bottlenecks (data-side TLBs). Related metrics: tma_dtlb_load, tma_dtlb_store"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches)",
+ "MetricExpr": "100 * tma_memory_bound * (tma_dram_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_mem_latency / (tma_mem_bandwidth + tma_mem_latency)) + tma_l3_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound) * (tma_l3_hit_latency / (tma_contested_accesses + tma_data_sharing + tma_l3_hit_latency + tma_sq_full)) + tma_l2_bound / (tma_dram_bound + tma_l1_bound + tma_l2_bound + tma_l3_bound + tma_store_bound))",
+ "MetricGroup": "Mem;MemoryLat;Offcore;tma_issueLat",
+ "MetricName": "tma_info_memory_latency",
+ "MetricThreshold": "tma_info_memory_latency > 20",
+ "PublicDescription": "Total pipeline cost of Memory Latency related bottlenecks (external memory and off-core caches). Related metrics: tma_l3_hit_latency, tma_mem_latency"
+ },
+ {
+ "BriefDescription": "Total pipeline cost of Branch Misprediction related bottlenecks",
+ "MetricExpr": "100 * (tma_branch_mispredicts + tma_fetch_latency * tma_mispredicts_resteers / (tma_branch_resteers + tma_dsb_switches + tma_icache_misses + tma_itlb_misses + tma_lcp + tma_ms_switches))",
+ "MetricGroup": "Bad;BadSpec;BrMispredicts;tma_issueBM",
+ "MetricName": "tma_info_mispredictions",
+ "MetricThreshold": "tma_info_mispredictions > 20",
+ "PublicDescription": "Total pipeline cost of Branch Misprediction related bottlenecks. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_mispredicts_resteers"
+ },
+ {
+ "BriefDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss",
+ "MetricExpr": "L1D_PEND_MISS.PENDING / L1D_PEND_MISS.PENDING_CYCLES",
+ "MetricGroup": "Mem;MemoryBW;MemoryBound",
+ "MetricName": "tma_info_mlp",
+ "PublicDescription": "Memory-Level-Parallelism (average number of L1 miss demand load when there is at least one such miss. Per-Logical Processor)"
+ },
+ {
+ "BriefDescription": "Fraction of branches of other types (not individually covered by other metrics in Info.Branches group)",
+ "MetricExpr": "1 - (tma_info_cond_nt + tma_info_cond_tk + tma_info_callret + tma_info_jump)",
+ "MetricGroup": "Bad;Branches",
+ "MetricName": "tma_info_other_branches"
+ },
+ {
+ "BriefDescription": "Utilization of the core's Page Walker(s) serving STLB misses triggered by instruction/Load/Store accesses",
+ "MetricExpr": "(ITLB_MISSES.WALK_PENDING + DTLB_LOAD_MISSES.WALK_PENDING + DTLB_STORE_MISSES.WALK_PENDING) / (2 * tma_info_core_clks)",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_page_walks_utilization",
+ "MetricThreshold": "tma_info_page_walks_utilization > 0.5"
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0",
+ "MetricExpr": "CORE_POWER.LVL0_TURBO_LICENSE / tma_info_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_power_license0_utilization",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for baseline license level 0. This includes non-AVX codes, SSE, AVX 128-bit, and low-current AVX 256-bit codes."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1",
+ "MetricExpr": "CORE_POWER.LVL1_TURBO_LICENSE / tma_info_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_power_license1_utilization",
+ "MetricThreshold": "tma_info_power_license1_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 1. This includes high current AVX 256-bit instructions as well as low current AVX 512-bit instructions."
+ },
+ {
+ "BriefDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX)",
+ "MetricExpr": "CORE_POWER.LVL2_TURBO_LICENSE / tma_info_core_clks",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_power_license2_utilization",
+ "MetricThreshold": "tma_info_power_license2_utilization > 0.5",
+ "PublicDescription": "Fraction of Core cycles where the core was running with power-delivery for license level 2 (introduced in SKX). This includes high current AVX 512-bit instructions."
+ },
+ {
+ "BriefDescription": "Average number of Uops retired in cycles where at least one uop has retired.",
+ "MetricExpr": "tma_retiring * tma_info_slots / cpu@UOPS_RETIRED.SLOTS\\,cmask\\=1@",
+ "MetricGroup": "Pipeline;Ret",
+ "MetricName": "tma_info_retire"
+ },
+ {
+ "BriefDescription": "Total issue-pipeline slots (per-Physical Core till ICL; per-Logical Processor ICL onward)",
+ "MetricExpr": "TOPDOWN.SLOTS",
+ "MetricGroup": "TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots"
+ },
+ {
+ "BriefDescription": "Fraction of Physical Core issue-slots utilized by this Logical Processor",
+ "MetricExpr": "(tma_info_slots / (TOPDOWN.SLOTS / 2) if #SMT_on else 1)",
+ "MetricGroup": "SMT;TmaL1;tma_L1_group",
+ "MetricName": "tma_info_slots_utilization"
+ },
+ {
+ "BriefDescription": "Fraction of cycles where both hardware Logical Processors were active",
+ "MetricExpr": "(1 - CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE / CPU_CLK_UNHALTED.REF_DISTRIBUTED if #SMT_on else 0)",
+ "MetricGroup": "SMT",
+ "MetricName": "tma_info_smt_2t_utilization"
+ },
+ {
+ "BriefDescription": "STLB (2nd level TLB) data store speculative misses per kilo instruction (misses of any page-size that complete the page walk)",
+ "MetricExpr": "1e3 * DTLB_STORE_MISSES.WALK_COMPLETED / INST_RETIRED.ANY",
+ "MetricGroup": "Mem;MemoryTLB",
+ "MetricName": "tma_info_store_stlb_mpki"
+ },
+ {
+ "BriefDescription": "Average Frequency Utilization relative nominal frequency",
+ "MetricExpr": "tma_info_clks / CPU_CLK_UNHALTED.REF_TSC",
+ "MetricGroup": "Power",
+ "MetricName": "tma_info_turbo_utilization"
+ },
+ {
+ "BriefDescription": "Uops Per Instruction",
+ "MetricExpr": "tma_retiring * tma_info_slots / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;Ret;Retire",
+ "MetricName": "tma_info_uoppi",
+ "MetricThreshold": "tma_info_uoppi > 1.05"
+ },
+ {
+ "BriefDescription": "Instruction per taken branch",
+ "MetricExpr": "tma_retiring * tma_info_slots / BR_INST_RETIRED.NEAR_TAKEN",
+ "MetricGroup": "Branches;Fed;FetchBW",
+ "MetricName": "tma_info_uptb",
+ "MetricThreshold": "tma_info_uptb < 7.5"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses",
+ "MetricExpr": "ICACHE_64B.IFTAG_STALL / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;MemoryTLB;TopdownL3;tma_L3_group;tma_fetch_latency_group",
+ "MetricName": "tma_itlb_misses",
+ "MetricThreshold": "tma_itlb_misses > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Instruction TLB (ITLB) misses. Sample with: FRONTEND_RETIRED.STLB_MISS_PS;FRONTEND_RETIRED.ITLB_MISS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache",
+ "MetricExpr": "max((CYCLE_ACTIVITY.STALLS_MEM_ANY - CYCLE_ACTIVITY.STALLS_L1D_MISS) / tma_info_clks, 0)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_issueL1;tma_issueMC;tma_memory_bound_group",
+ "MetricName": "tma_l1_bound",
+ "MetricThreshold": "tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled without loads missing the L1 data cache. The L1 data cache typically has the shortest latency. However; in certain cases like loads blocked on older stores; a load might suffer due to high latency even though it is being satisfied by the L1. Another example is loads who miss in the TLB. These cases are characterized by execution unit stalls; while some non-completed demand load lives in the machine without having that demand load missing the L1 cache. Sample with: MEM_LOAD_RETIRED.L1_HIT_PS;MEM_LOAD_RETIRED.FB_HIT_PS. Related metrics: tma_clears_resteers, tma_machine_clears, tma_microcode_sequencer, tma_ms_switches, tma_ports_utilized_1",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) / (MEM_LOAD_RETIRED.L2_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS) + L1D_PEND_MISS.FB_FULL_PERIODS) * ((CYCLE_ACTIVITY.STALLS_L1D_MISS - CYCLE_ACTIVITY.STALLS_L2_MISS) / tma_info_clks)",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l2_bound",
+ "MetricThreshold": "tma_l2_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to L2 cache accesses by loads. Avoiding cache misses (i.e. L1 misses/L2 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L2_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_L2_MISS - CYCLE_ACTIVITY.STALLS_L3_MISS) / tma_info_clks",
+ "MetricGroup": "CacheMisses;MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_l3_bound",
+ "MetricThreshold": "tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often the CPU was stalled due to loads accesses to L3 cache or contended with a sibling Core. Avoiding cache misses (i.e. L2 misses/L3 hits) can improve the latency and increase performance. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited)",
+ "MetricExpr": "17.5 * tma_info_average_frequency * MEM_LOAD_RETIRED.L3_HIT * (1 + MEM_LOAD_RETIRED.FB_HIT / MEM_LOAD_RETIRED.L1_MISS / 2) / tma_info_clks",
+ "MetricGroup": "MemoryLat;TopdownL4;tma_L4_group;tma_issueLat;tma_l3_bound_group",
+ "MetricName": "tma_l3_hit_latency",
+ "MetricThreshold": "tma_l3_hit_latency > 0.1 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles with demand load accesses that hit the L3 cache under unloaded scenarios (possibly L3 latency limited). Avoiding private cache misses (i.e. L2 misses/L3 hits) will improve the latency; reduce contention with sibling physical cores and increase performance. Note the value of this node may overlap with its siblings. Sample with: MEM_LOAD_RETIRED.L3_HIT_PS. Related metrics: tma_info_memory_latency, tma_mem_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs)",
+ "MetricExpr": "ILD_STALL.LCP / tma_info_clks",
+ "MetricGroup": "FetchLat;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueFB",
+ "MetricName": "tma_lcp",
+ "MetricThreshold": "tma_lcp > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric represents fraction of cycles CPU was stalled due to Length Changing Prefixes (LCPs). Using proper compiler flags or Intel Compiler by default will certainly avoid this. #Link: Optimization Guide about LCP BKMs. Related metrics: tma_dsb_switches, tma_fetch_bandwidth, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation)",
+ "MetricExpr": "max(0, tma_retiring - tma_heavy_operations)",
+ "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
+ "MetricName": "tma_light_operations",
+ "MetricThreshold": "tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_2_3 / (2 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_load_op_utilization",
+ "MetricThreshold": "tma_load_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Load operations. Sample with: UOPS_DISPATCHED.PORT_2_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the (first level) DTLB was missed by load accesses, that later on hit in second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_load - tma_load_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_hit",
+ "MetricThreshold": "tma_load_stlb_hit > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the Second-level TLB (STLB) was missed by load accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_LOAD_MISSES.WALK_ACTIVE / tma_info_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_load_group",
+ "MetricName": "tma_load_stlb_miss",
+ "MetricThreshold": "tma_load_stlb_miss > 0.05 & (tma_dtlb_load > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations",
+ "MetricConstraint": "NO_GROUP_EVENTS",
+ "MetricExpr": "(16 * max(0, MEM_INST_RETIRED.LOCK_LOADS - L2_RQSTS.ALL_RFO) + MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES * (10 * L2_RQSTS.RFO_HIT + min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO))) / tma_info_clks",
+ "MetricGroup": "Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_l1_bound_group",
+ "MetricName": "tma_lock_latency",
+ "MetricThreshold": "tma_lock_latency > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU spent handling cache misses due to lock operations. Due to the microarchitecture handling of locks; they are classified as L1_Bound regardless of what memory source satisfied them. Sample with: MEM_INST_RETIRED.LOCK_LOADS_PS. Related metrics: tma_store_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit",
+ "MetricExpr": "(LSD.CYCLES_ACTIVE - LSD.CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "FetchBW;LSD;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_lsd",
+ "MetricThreshold": "tma_lsd > 0.15 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to LSD (Loop Stream Detector) unit. LSD typically does well sustaining Uop supply. However; in some rare cases; optimal uop-delivery could not be reached for small loops whose size (in terms of number of uops) does not suit well the LSD structure.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears",
+ "MetricExpr": "max(0, tma_bad_speculation - tma_branch_mispredicts)",
+ "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
+ "MetricName": "tma_machine_clears",
+ "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+ "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears. These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, cpu@OFFCORE_REQUESTS_OUTSTANDING.ALL_DATA_RD\\,cmask\\=4@) / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueBW",
+ "MetricName": "tma_mem_bandwidth",
+ "MetricThreshold": "tma_mem_bandwidth > 0.2 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the core's performance was likely hurt due to approaching bandwidth limits of external memory (DRAM). The underlying heuristic assumes that a similar off-core traffic is generated by all IA cores. This metric does not aggregate non-data-read requests by this logical processor; requests from other IA Logical Processors/Physical Cores/sockets; or other non-IA devices like GPU; hence the maximum external memory bandwidth limits may or may not be approached when this metric is flagged (see Uncore counters for that). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_sq_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM)",
+ "MetricExpr": "min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DATA_RD) / tma_info_clks - tma_mem_bandwidth",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_dram_bound_group;tma_issueLat",
+ "MetricName": "tma_mem_latency",
+ "MetricThreshold": "tma_mem_latency > 0.1 & (tma_dram_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles where the performance was likely hurt due to latency from external memory (DRAM). This metric does not aggregate requests from other Logical Processors/Physical Cores/sockets (see Uncore counters for that). Related metrics: tma_info_memory_latency, tma_l3_hit_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck",
+ "MetricExpr": "(CYCLE_ACTIVITY.STALLS_MEM_ANY + EXE_ACTIVITY.BOUND_ON_STORES) / (CYCLE_ACTIVITY.STALLS_TOTAL + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) + EXE_ACTIVITY.BOUND_ON_STORES) * tma_backend_bound",
+ "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
+ "MetricName": "tma_memory_bound",
+ "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+ "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck. Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring memory operations -- uops for memory load or store accesses.",
+ "MetricExpr": "tma_light_operations * MEM_INST_RETIRED.ANY / INST_RETIRED.ANY",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_memory_operations",
+ "MetricThreshold": "tma_memory_operations > 0.1 & tma_light_operations > 0.6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit",
+ "MetricExpr": "tma_retiring * tma_info_slots / UOPS_ISSUED.ANY * IDQ.MS_UOPS / tma_info_slots",
+ "MetricGroup": "MicroSeq;TopdownL3;tma_L3_group;tma_heavy_operations_group;tma_issueMC;tma_issueMS",
+ "MetricName": "tma_microcode_sequencer",
+ "MetricThreshold": "tma_microcode_sequencer > 0.05 & tma_heavy_operations > 0.1",
+ "PublicDescription": "This metric represents fraction of slots the CPU was retiring uops fetched by the Microcode Sequencer (MS) unit. The MS is used for CISC instructions not supported by the default decoders (like repeat move strings; or CPUID); or by microcode assists used to address some operation modes (like in Floating Point assists). These cases can often be avoided. Sample with: IDQ.MS_UOPS. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage",
+ "MetricExpr": "BR_MISP_RETIRED.ALL_BRANCHES / (BR_MISP_RETIRED.ALL_BRANCHES + MACHINE_CLEARS.COUNT) * INT_MISC.CLEAR_RESTEER_CYCLES / tma_info_clks",
+ "MetricGroup": "BadSpec;BrMispredicts;TopdownL4;tma_L4_group;tma_branch_resteers_group;tma_issueBM",
+ "MetricName": "tma_mispredicts_resteers",
+ "MetricThreshold": "tma_mispredicts_resteers > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to Branch Resteers as a result of Branch Misprediction at execution stage. Sample with: INT_MISC.CLEAR_RESTEER_CYCLES. Related metrics: tma_branch_mispredicts, tma_info_branch_misprediction_cost, tma_info_mispredictions",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline)",
+ "MetricExpr": "(IDQ.MITE_CYCLES_ANY - IDQ.MITE_CYCLES_OK) / tma_info_core_clks / 2",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL3;tma_L3_group;tma_fetch_bandwidth_group",
+ "MetricName": "tma_mite",
+ "MetricThreshold": "tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35)",
+ "PublicDescription": "This metric represents Core fraction of cycles in which CPU was likely limited due to the MITE pipeline (the legacy decode pipeline). This pipeline is used for code that was not pre-cached in the DSB or LSD. For example; inefficiencies due to asymmetric decoders; use of long immediate or LCP can manifest as MITE fetch bandwidth bottleneck. Sample with: FRONTEND_RETIRED.ANY_DSB_MISS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where (only) 4 uops were delivered by the MITE pipeline",
+ "MetricExpr": "(cpu@IDQ.MITE_UOPS\\,cmask\\=4@ - cpu@IDQ.MITE_UOPS\\,cmask\\=5@) / tma_info_clks",
+ "MetricGroup": "DSBmiss;FetchBW;TopdownL4;tma_L4_group;tma_mite_group",
+ "MetricName": "tma_mite_4wide",
+ "MetricThreshold": "tma_mite_4wide > 0.05 & (tma_mite > 0.1 & (tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued",
+ "MetricExpr": "UOPS_ISSUED.VECTOR_WIDTH_MISMATCH / UOPS_ISSUED.ANY",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_issueMV;tma_ports_utilized_0_group",
+ "MetricName": "tma_mixing_vectors",
+ "MetricThreshold": "tma_mixing_vectors > 0.05",
+ "PublicDescription": "The Mixing_Vectors metric gives the percentage of injected blend uops out of all uops issued. Usually a Mixing_Vectors over 5% is worth investigating. Read more in Appendix B1 of the Optimizations Guide for this topic. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS)",
+ "MetricExpr": "3 * IDQ.MS_SWITCHES / tma_info_clks",
+ "MetricGroup": "FetchLat;MicroSeq;TopdownL3;tma_L3_group;tma_fetch_latency_group;tma_issueMC;tma_issueMS;tma_issueMV;tma_issueSO",
+ "MetricName": "tma_ms_switches",
+ "MetricThreshold": "tma_ms_switches > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15)",
+ "PublicDescription": "This metric estimates the fraction of cycles when the CPU was stalled due to switches of uop delivery to the Microcode Sequencer (MS). Commonly used instructions are optimized for delivery by the DSB (decoded i-cache) or MITE (legacy instruction decode) pipelines. Certain operations cannot be handled natively by the execution pipeline; and must be performed by microcode (small programs injected into the execution stream). Switching to the MS too often can negatively impact performance. The MS is designated to deliver long uop flows required by CISC instructions like CPUID; or uncommon conditions like Floating Point Assists when dealing with Denormals. Sample with: IDQ.MS_SWITCHES. Related metrics: tma_clears_resteers, tma_l1_bound, tma_machine_clears, tma_microcode_sequencer, tma_mixing_vectors, tma_serializing_operation",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions",
+ "MetricExpr": "tma_light_operations * INST_RETIRED.NOP / (tma_retiring * tma_info_slots)",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_nop_instructions",
+ "MetricThreshold": "tma_nop_instructions > 0.1 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents fraction of slots where the CPU was retiring NOP (no op) instructions. Compilers often use NOPs for certain address alignments - e.g. start address of a function or loop body. Sample with: INST_RETIRED.NOP",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes",
+ "MetricExpr": "max(0, tma_light_operations - (tma_fp_arith + tma_memory_operations + tma_branch_instructions + tma_nop_instructions))",
+ "MetricGroup": "Pipeline;TopdownL3;tma_L3_group;tma_light_operations_group",
+ "MetricName": "tma_other_light_ops",
+ "MetricThreshold": "tma_other_light_ops > 0.3 & tma_light_operations > 0.6",
+ "PublicDescription": "This metric represents the remaining light uops fraction the CPU has executed - remaining means not covered by other sibling nodes. May undercount due to FMA double counting",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_0 / tma_info_core_clks",
+ "MetricGroup": "Compute;TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_0",
+ "MetricThreshold": "tma_port_0 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 0 ([SNB+] ALU; [HSW+] ALU and 2nd branch). Sample with: UOPS_DISPATCHED.PORT_0. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_1, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_1 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_1",
+ "MetricThreshold": "tma_port_1 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 1 (ALU). Sample with: UOPS_DISPATCHED.PORT_1. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_5, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_5 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_5",
+ "MetricThreshold": "tma_port_5 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 5 ([SNB+] Branches and ALU; [HSW+] ALU). Sample with: UOPS_DISPATCHED.PORT_5. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_6, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU)",
+ "MetricExpr": "UOPS_DISPATCHED.PORT_6 / tma_info_core_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_alu_op_utilization_group;tma_issue2P",
+ "MetricName": "tma_port_6",
+ "MetricThreshold": "tma_port_6 > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port 6 ([HSW+]Primary Branch and simple ALU). Sample with: UOPS_DISPATCHED.PORT_6. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_ports_utilized_2",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related)",
+ "MetricExpr": "((cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) + (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL)) / tma_info_clks if ARITH.DIVIDER_ACTIVE < CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY else (EXE_ACTIVITY.1_PORTS_UTIL + tma_retiring * EXE_ACTIVITY.2_PORTS_UTIL) / tma_info_clks)",
+ "MetricGroup": "PortsUtil;TopdownL3;tma_L3_group;tma_core_bound_group",
+ "MetricName": "tma_ports_utilization",
+ "MetricThreshold": "tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU performance was potentially limited due to Core computation issues (non divider-related). Two distinct categories can be attributed into this metric: (1) heavy data-dependency among contiguous instructions would manifest in this metric - such cases are often referred to as low Instruction Level Parallelism (ILP). (2) Contention on some hardware execution unit other than Divider. For example; when there are too many multiply operations.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "cpu@EXE_ACTIVITY.3_PORTS_UTIL\\,umask\\=0x80@ / tma_info_clks + tma_serializing_operation * (CYCLE_ACTIVITY.STALLS_TOTAL - CYCLE_ACTIVITY.STALLS_MEM_ANY) / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_0",
+ "MetricThreshold": "tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed no uops on any execution port (Logical Processor cycles since ICL, Physical Core cycles otherwise). Long-latency instructions like divides may contribute to this metric.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.1_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issueL1;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_1",
+ "MetricThreshold": "tma_ports_utilized_1 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles where the CPU executed total of 1 uop per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). This can be due to heavy data-dependency among software instructions; or over oversubscribing a particular hardware resource. In some other cases with high 1_Port_Utilized and L1_Bound; this metric can point to L1 data-cache latency bottleneck that may not necessarily manifest with complete execution starvation (due to the short L1 latency e.g. walking a linked list) - looking at the assembly can be helpful. Sample with: EXE_ACTIVITY.1_PORTS_UTIL. Related metrics: tma_l1_bound",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "EXE_ACTIVITY.2_PORTS_UTIL / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_issue2P;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_2",
+ "MetricThreshold": "tma_ports_utilized_2 > 0.15 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 2 uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Loop Vectorization -most compilers feature auto-Vectorization options today- reduces pressure on the execution ports as multiple elements are calculated with same uop. Sample with: EXE_ACTIVITY.2_PORTS_UTIL. Related metrics: tma_fp_scalar, tma_fp_vector, tma_fp_vector_128b, tma_fp_vector_256b, tma_fp_vector_512b, tma_port_0, tma_port_1, tma_port_5, tma_port_6",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise)",
+ "MetricExpr": "UOPS_EXECUTED.CYCLES_GE_3 / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL4;tma_L4_group;tma_ports_utilization_group",
+ "MetricName": "tma_ports_utilized_3m",
+ "MetricThreshold": "tma_ports_utilized_3m > 0.7 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents fraction of cycles CPU executed total of 3 or more uops per cycle on all execution ports (Logical Processor cycles since ICL, Physical Core cycles otherwise). Sample with: UOPS_EXECUTED.CYCLES_GE_3",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired",
+ "MetricExpr": "topdown\\-retiring / (topdown\\-fe\\-bound + topdown\\-bad\\-spec + topdown\\-retiring + topdown\\-be\\-bound) + 0 * tma_info_slots",
+ "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
+ "MetricName": "tma_retiring",
+ "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+ "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category. Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved. Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance. For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations",
+ "MetricExpr": "RESOURCE_STALLS.SCOREBOARD / tma_info_clks",
+ "MetricGroup": "PortsUtil;TopdownL5;tma_L5_group;tma_issueSO;tma_ports_utilized_0_group",
+ "MetricName": "tma_serializing_operation",
+ "MetricThreshold": "tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2)))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU issue-pipeline was stalled due to serializing operations. Instructions like CPUID; WRMSR or LFENCE serialize the out-of-order execution which may limit performance. Sample with: RESOURCE_STALLS.SCOREBOARD. Related metrics: tma_ms_switches",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions",
+ "MetricExpr": "140 * MISC_RETIRED.PAUSE_INST / tma_info_clks",
+ "MetricGroup": "TopdownL6;tma_L6_group;tma_serializing_operation_group",
+ "MetricName": "tma_slow_pause",
+ "MetricThreshold": "tma_slow_pause > 0.05 & (tma_serializing_operation > 0.1 & (tma_ports_utilized_0 > 0.2 & (tma_ports_utilization > 0.15 & (tma_core_bound > 0.1 & tma_backend_bound > 0.2))))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to PAUSE Instructions. Sample with: MISC_RETIRED.PAUSE_INST",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary",
+ "MetricExpr": "tma_info_load_miss_real_latency * LD_BLOCKS.NO_SR / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_split_loads",
+ "MetricThreshold": "tma_split_loads > 0.2 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles handling memory load split accesses - load that cross 64-byte cache line boundary. Sample with: MEM_INST_RETIRED.SPLIT_LOADS_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents rate of split store accesses",
+ "MetricExpr": "MEM_INST_RETIRED.SPLIT_STORES / tma_info_core_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_issueSpSt;tma_store_bound_group",
+ "MetricName": "tma_split_stores",
+ "MetricThreshold": "tma_split_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric represents rate of split store accesses. Consider aligning your data to the 64-byte cache line granularity. Sample with: MEM_INST_RETIRED.SPLIT_STORES_PS. Related metrics: tma_port_4",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors)",
+ "MetricExpr": "L1D_PEND_MISS.L2_STALL / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueBW;tma_l3_bound_group",
+ "MetricName": "tma_sq_full",
+ "MetricThreshold": "tma_sq_full > 0.3 & (tma_l3_bound > 0.05 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric measures fraction of cycles where the Super Queue (SQ) was full taking into account all request-types and both hardware SMT threads (Logical Processors). Related metrics: tma_fb_full, tma_info_dram_bw_use, tma_info_memory_bandwidth, tma_mem_bandwidth",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write",
+ "MetricExpr": "EXE_ACTIVITY.BOUND_ON_STORES / tma_info_clks",
+ "MetricGroup": "MemoryBound;TmaL3mem;TopdownL3;tma_L3_group;tma_memory_bound_group",
+ "MetricName": "tma_store_bound",
+ "MetricThreshold": "tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to RFO store memory accesses; RFO store issue a read-for-ownership request before the write. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should RFO stores be a bottleneck. Sample with: MEM_INST_RETIRED.ALL_STORES_PS",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores",
+ "MetricExpr": "13 * LD_BLOCKS.STORE_FORWARD / tma_info_clks",
+ "MetricGroup": "TopdownL4;tma_L4_group;tma_l1_bound_group",
+ "MetricName": "tma_store_fwd_blk",
+ "MetricThreshold": "tma_store_fwd_blk > 0.1 & (tma_l1_bound > 0.1 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric roughly estimates fraction of cycles when the memory subsystem had loads blocked since they could not forward data from earlier (in program order) overlapping stores. To streamline memory operations in the pipeline; a load can avoid waiting for memory if a prior in-flight store is writing the data that the load wants to read (store forwarding process). However; in some cases the load may be blocked for a significant time pending the store forward. For example; when the prior store is writing a smaller region than the load is reading.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses",
+ "MetricExpr": "(L2_RQSTS.RFO_HIT * 10 * (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) + (1 - MEM_INST_RETIRED.LOCK_LOADS / MEM_INST_RETIRED.ALL_STORES) * min(CPU_CLK_UNHALTED.THREAD, OFFCORE_REQUESTS_OUTSTANDING.CYCLES_WITH_DEMAND_RFO)) / tma_info_clks",
+ "MetricGroup": "MemoryLat;Offcore;TopdownL4;tma_L4_group;tma_issueRFO;tma_issueSL;tma_store_bound_group",
+ "MetricName": "tma_store_latency",
+ "MetricThreshold": "tma_store_latency > 0.1 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates fraction of cycles the CPU spent handling L1D store misses. Store accesses usually less impact out-of-order core performance; however; holding resources for longer time can lead into undesired implications (e.g. contention on L1D fill-buffer entries - see FB_Full). Related metrics: tma_fb_full, tma_lock_latency",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations",
+ "MetricExpr": "(UOPS_DISPATCHED.PORT_4_9 + UOPS_DISPATCHED.PORT_7_8) / (4 * tma_info_core_clks)",
+ "MetricGroup": "TopdownL5;tma_L5_group;tma_ports_utilized_3m_group",
+ "MetricName": "tma_store_op_utilization",
+ "MetricThreshold": "tma_store_op_utilization > 0.6",
+ "PublicDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution port for Store operations. Sample with: UOPS_DISPATCHED.PORT_7_8",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric roughly estimates the fraction of cycles where the TLB was missed by store accesses, hitting in the second-level TLB (STLB)",
+ "MetricExpr": "tma_dtlb_store - tma_store_stlb_miss",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_hit",
+ "MetricThreshold": "tma_store_stlb_hit > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates the fraction of cycles where the STLB was missed by store accesses, performing a hardware page walk",
+ "MetricExpr": "DTLB_STORE_MISSES.WALK_ACTIVE / tma_info_core_clks",
+ "MetricGroup": "MemoryTLB;TopdownL5;tma_L5_group;tma_dtlb_store_group",
+ "MetricName": "tma_store_stlb_miss",
+ "MetricThreshold": "tma_store_stlb_miss > 0.05 & (tma_dtlb_store > 0.05 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2)))",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores",
+ "MetricExpr": "9 * OCR.STREAMING_WR.ANY_RESPONSE / tma_info_clks",
+ "MetricGroup": "MemoryBW;Offcore;TopdownL4;tma_L4_group;tma_issueSmSt;tma_store_bound_group",
+ "MetricName": "tma_streaming_stores",
+ "MetricThreshold": "tma_streaming_stores > 0.2 & (tma_store_bound > 0.2 & (tma_memory_bound > 0.2 & tma_backend_bound > 0.2))",
+ "PublicDescription": "This metric estimates how often CPU was stalled due to Streaming store memory accesses; Streaming store optimize out a read request required by RFO stores. Even though store accesses do not typically stall out-of-order CPUs; there are few cases where stores can lead to actual stalls. This metric will be flagged should Streaming stores be a bottleneck. Sample with: OCR.STREAMING_WR.ANY_RESPONSE. Related metrics: tma_fb_full",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears",
+ "MetricExpr": "10 * BACLEARS.ANY / tma_info_clks",
+ "MetricGroup": "BigFoot;FetchLat;TopdownL4;tma_L4_group;tma_branch_resteers_group",
+ "MetricName": "tma_unknown_branches",
+ "MetricThreshold": "tma_unknown_branches > 0.05 & (tma_branch_resteers > 0.05 & (tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15))",
+ "PublicDescription": "This metric represents fraction of cycles the CPU was stalled due to new branch address clears. These are fetched branches the Branch Prediction Unit was unable to recognize (e.g. first time the branch is fetched or hitting BTB capacity limit). Sample with: BACLEARS.ANY",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "This metric serves as an approximation of legacy x87 usage",
+ "MetricExpr": "tma_retiring * UOPS_EXECUTED.X87 / UOPS_EXECUTED.THREAD",
+ "MetricGroup": "Compute;TopdownL4;tma_L4_group;tma_fp_arith_group",
+ "MetricName": "tma_x87_use",
+ "MetricThreshold": "tma_x87_use > 0.1 & (tma_fp_arith > 0.2 & tma_light_operations > 0.6)",
+ "PublicDescription": "This metric serves as an approximation of legacy x87 usage. It accounts for instructions beyond X87 FP arithmetic operations; hence may be used as a thermometer to avoid X87 high usage and preferably upgrade to modern ISA. See Tip under Tuning Hint.",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Percentage of cycles in aborted transactions.",
+ "MetricExpr": "max(cpu@cycles\\-t@ - cpu@cycles\\-ct@, 0) / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_aborted_cycles",
+ "ScaleUnit": "100%"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of elisions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@el\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_elision",
+ "ScaleUnit": "1cycles / elision"
+ },
+ {
+ "BriefDescription": "Number of cycles within a transaction divided by the number of transactions.",
+ "MetricExpr": "cpu@cycles\\-t@ / cpu@tx\\-start@",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_cycles_per_transaction",
+ "ScaleUnit": "1cycles / transaction"
+ },
+ {
+ "BriefDescription": "Percentage of cycles within a transaction region.",
+ "MetricExpr": "cpu@cycles\\-t@ / cycles",
+ "MetricGroup": "transaction",
+ "MetricName": "tsx_transactional_cycles",
+ "ScaleUnit": "100%"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json
new file mode 100644
index 000000000000..eed1b90a2779
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-interconnect.json
@@ -0,0 +1,90 @@
+[
+ {
+ "BriefDescription": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "EventCode": "0x84",
+ "EventName": "UNC_ARB_COH_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of any coherent request at memory controller that were issued by any core.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle counts number of coherent reads pending on data return from memory controller that were issued by any core.",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_DAT_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_DAT_REQUESTS.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "This event is deprecated. Refer to new event UNC_ARB_DAT_OCCUPANCY.ALL",
+ "EventCode": "0x85",
+ "EventName": "UNC_ARB_IFA_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_OCCUPANCY.RD]",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_REQ_TRK_OCCUPANCY.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_TRK_REQUESTS.RD]",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_REQ_TRK_REQUEST.DRD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of all outgoing valid entries in ReqTrk. Such entry is defined as valid from it's allocation in ReqTrk till deallocation. Accounts for Coherent and non-coherent traffic.",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Each cycle count number of 'valid' coherent Data Read entries . Such entry is defined as valid when it is allocated till deallocation. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_OCCUPANCY.DRD]",
+ "EventCode": "0x80",
+ "EventName": "UNC_ARB_TRK_OCCUPANCY.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "UNC_ARB_TRK_REQUESTS.ALL",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.ALL",
+ "PerPkg": "1",
+ "UMask": "0x1",
+ "Unit": "ARB"
+ },
+ {
+ "BriefDescription": "Number of all coherent Data Read entries. Doesn't include prefetches [This event is alias to UNC_ARB_REQ_TRK_REQUEST.DRD]",
+ "EventCode": "0x81",
+ "EventName": "UNC_ARB_TRK_REQUESTS.RD",
+ "PerPkg": "1",
+ "UMask": "0x2",
+ "Unit": "ARB"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-memory.json b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-memory.json
new file mode 100644
index 000000000000..99fb5259fd25
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-memory.json
@@ -0,0 +1,50 @@
+[
+ {
+ "BriefDescription": "Counts every read (RdCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every 64B read and write request entering the Memory Controller to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_TOTAL_REQCOUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every write (WrCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC0_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "imc_free_running_0"
+ },
+ {
+ "BriefDescription": "Counts every read (RdCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_RDCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x20",
+ "Unit": "imc_free_running_1"
+ },
+ {
+ "BriefDescription": "Counts every 64B read and write request entering the Memory Controller to DRAM (sum of all channels). Each write request counts as a new request incrementing this counter. However, same cache line write requests (both full and partial) are combined to a single 64 byte data transfer to DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_TOTAL_REQCOUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x10",
+ "Unit": "imc_free_running_1"
+ },
+ {
+ "BriefDescription": "Counts every write (WrCAS) issued by the Memory Controller to DRAM (sum of all channels). All requests result in 64 byte data transfers from DRAM.",
+ "EventCode": "0xff",
+ "EventName": "UNC_MC1_WRCAS_COUNT_FREERUN",
+ "PerPkg": "1",
+ "UMask": "0x30",
+ "Unit": "imc_free_running_1"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json
new file mode 100644
index 000000000000..c6596ba09195
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/uncore-other.json
@@ -0,0 +1,9 @@
+[
+ {
+ "BriefDescription": "UNC_CLOCK.SOCKET",
+ "EventCode": "0xff",
+ "EventName": "UNC_CLOCK.SOCKET",
+ "PerPkg": "1",
+ "Unit": "CLOCK"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json b/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json
new file mode 100644
index 000000000000..adb2f6b3e77c
--- /dev/null
+++ b/tools/perf/pmu-events/arch/x86/tigerlake/virtual-memory.json
@@ -0,0 +1,165 @@
+[
+ {
+ "BriefDescription": "Loads that miss the DTLB and hit the STLB.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.STLB_HIT",
+ "PublicDescription": "Counts loads that miss the DTLB (Data TLB) and hit the STLB (Second level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a demand load.",
+ "CounterMask": "1",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a demand load.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Load miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data loads. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 2M/4M page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data load to a 4K page.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K sizes) caused by demand data loads. This implies address translations missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a demand load in the PMH each cycle.",
+ "EventCode": "0x08",
+ "EventName": "DTLB_LOAD_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a demand load in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Stores that miss the DTLB and hit the STLB.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.STLB_HIT",
+ "PublicDescription": "Counts stores that miss the DTLB (Data TLB) and hit the STLB (2nd Level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for a store.",
+ "CounterMask": "1",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a store.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Store misses in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by demand data stores. This implies it missed in the DTLB and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 2M/4M page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 2M/4M pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Page walks completed due to a demand data store to a 4K page.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts page walks completed due to demand data stores whose address translations missed in the TLB and were mapped to 4K pages. The page walks can end with or without a page fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for a store in the PMH each cycle.",
+ "EventCode": "0x49",
+ "EventName": "DTLB_STORE_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for a store in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Instruction fetch requests that miss the ITLB and hit the STLB.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.STLB_HIT",
+ "PublicDescription": "Counts instruction fetch requests that miss the ITLB (Instruction TLB) and hit the STLB (Second-level TLB).",
+ "SampleAfterValue": "100003",
+ "UMask": "0x20"
+ },
+ {
+ "BriefDescription": "Cycles when at least one PMH is busy with a page walk for code (instruction fetch) request.",
+ "CounterMask": "1",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_ACTIVE",
+ "PublicDescription": "Counts cycles when at least one PMH (Page Miss Handler) is busy with a page walk for a code (instruction fetch) request.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (All page sizes)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED",
+ "PublicDescription": "Counts completed page walks (all page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0xe"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (2M/4M)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_2M_4M",
+ "PublicDescription": "Counts completed page walks (2M/4M page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x4"
+ },
+ {
+ "BriefDescription": "Code miss in all TLB levels causes a page walk that completes. (4K)",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_COMPLETED_4K",
+ "PublicDescription": "Counts completed page walks (4K page sizes) caused by a code fetch. This implies it missed in the ITLB (Instruction TLB) and further levels of TLB. The page walk can end with or without a fault.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x2"
+ },
+ {
+ "BriefDescription": "Number of page walks outstanding for an outstanding code request in the PMH each cycle.",
+ "EventCode": "0x85",
+ "EventName": "ITLB_MISSES.WALK_PENDING",
+ "PublicDescription": "Counts the number of page walks outstanding for an outstanding code (instruction fetch) request in the PMH (Page Miss Handler) each cycle.",
+ "SampleAfterValue": "100003",
+ "UMask": "0x10"
+ },
+ {
+ "BriefDescription": "DTLB flush attempts of the thread-specific entries",
+ "EventCode": "0xbd",
+ "EventName": "TLB_FLUSH.DTLB_THREAD",
+ "PublicDescription": "Counts the number of DTLB flush attempts of the thread-specific entries.",
+ "SampleAfterValue": "100007",
+ "UMask": "0x1"
+ },
+ {
+ "BriefDescription": "STLB flush attempts",
+ "EventCode": "0xbd",
+ "EventName": "TLB_FLUSH.STLB_ANY",
+ "PublicDescription": "Counts the number of any STLB flush attempts (such as entire, VPID, PCID, InvPage, CR3 write, etc.).",
+ "SampleAfterValue": "100007",
+ "UMask": "0x20"
+ }
+]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/cache.json b/tools/perf/pmu-events/arch/x86/tremontx/cache.json
deleted file mode 100644
index f88040171b4d..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/cache.json
+++ /dev/null
@@ -1,111 +0,0 @@
-[
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cacheable memory requests that miss in the the Last Level Cache. Requests include Demand Loads, Reads for Ownership(RFO), Instruction fetches and L1 HW prefetches. If the platform has an L3 cache, last level cache is the L3, otherwise it is the L2.",
- "EventCode": "0x2e",
- "Counter": "0,1,2,3",
- "UMask": "0x41",
- "PEBScounters": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.MISS",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts memory requests originating from the core that miss in the last level cache. If the platform has an L3 cache, last level cache is the L3, otherwise it is the L2."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts cacheable memory requests that access the Last Level Cache. Requests include Demand Loads, Reads for Ownership(RFO), Instruction fetches and L1 HW prefetches. If the platform has an L3 cache, last level cache is the L3, otherwise it is the L2.",
- "EventCode": "0x2e",
- "Counter": "0,1,2,3",
- "UMask": "0x4f",
- "PEBScounters": "0,1,2,3",
- "EventName": "LONGEST_LAT_CACHE.REFERENCE",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts memory requests originating from the core that reference a cache line in the last level cache. If the platform has an L3 cache, last level cache is the L3, otherwise it is the L2."
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of load uops retired. This event is Precise Event capable",
- "EventCode": "0xd0",
- "Counter": "0,1,2,3",
- "UMask": "0x81",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_LOADS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load uops retired.",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of store uops retired. This event is Precise Event capable",
- "EventCode": "0xd0",
- "Counter": "0,1,2,3",
- "UMask": "0x82",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_UOPS_RETIRED.ALL_STORES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of store uops retired.",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "EventCode": "0xd1",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load uops retired that hit the level 1 data cache",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "EventCode": "0xd1",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load uops retired that hit in the level 2 cache",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "EventCode": "0xd1",
- "Counter": "0,1,2,3",
- "UMask": "0x4",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L3_HIT",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load uops retired that miss in the level 3 cache"
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "EventCode": "0xd1",
- "Counter": "0,1,2,3",
- "UMask": "0x8",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L1_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load uops retired that miss in the level 1 data cache",
- "Data_LA": "1"
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "EventCode": "0xd1",
- "Counter": "0,1,2,3",
- "UMask": "0x10",
- "PEBScounters": "0,1,2,3",
- "EventName": "MEM_LOAD_UOPS_RETIRED.L2_MISS",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of load uops retired that miss in the level 2 cache",
- "Data_LA": "1"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/frontend.json b/tools/perf/pmu-events/arch/x86/tremontx/frontend.json
deleted file mode 100644
index 73b0a1ed5756..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/frontend.json
+++ /dev/null
@@ -1,26 +0,0 @@
-[
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line and that cache line is not in the ICache (miss). The event strives to count on a cache line basis, so that multiple accesses which miss in a single cache line count as one ICACHE.MISS. Specifically, the event counts when straight line code crosses the cache line boundary, or when a branch target is to a new line, and that cache line is not in the ICache.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x2",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE.MISSES",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in a cache line and they do not hit in the ICache (miss)."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes in an ICache Line. The event strives to count on a cache line basis, so that multiple fetches to a single cache line count as one ICACHE.ACCESS. Specifically, the event counts when accesses from straight line code crosses the cache line boundary, or when a branch target is to a new line.",
- "EventCode": "0x80",
- "Counter": "0,1,2,3",
- "UMask": "0x3",
- "PEBScounters": "0,1,2,3",
- "EventName": "ICACHE.ACCESSES",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts requests to the Instruction Cache (ICache) for one or more bytes cache Line."
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/memory.json b/tools/perf/pmu-events/arch/x86/tremontx/memory.json
deleted file mode 100644
index 65469e84f35b..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/memory.json
+++ /dev/null
@@ -1,26 +0,0 @@
-[
- {
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "EventCode": "0XB7",
- "MSRValue": "0x000000003F04000001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OCR.DEMAND_DATA_RD.L3_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that was not supplied by the L3 cache.",
- "Offcore": "1"
- },
- {
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "EventCode": "0XB7",
- "MSRValue": "0x000000003F04000002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OCR.DEMAND_RFO.L3_MISS",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand reads for ownership (RFO) requests and software based prefetches for exclusive ownership (PREFETCHW) that was not supplied by the L3 cache.",
- "Offcore": "1"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/other.json b/tools/perf/pmu-events/arch/x86/tremontx/other.json
deleted file mode 100644
index 85bf3c8f3914..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/other.json
+++ /dev/null
@@ -1,26 +0,0 @@
-[
- {
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "EventCode": "0XB7",
- "MSRValue": "0x000000000000010001",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OCR.DEMAND_DATA_RD.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts demand data reads that have any response type.",
- "Offcore": "1"
- },
- {
- "PublicDescription": "Offcore response can be programmed only with a specific pair of event select and counter MSR, and with specific event codes and predefine mask bit value in a dedicated MSR to specify attributes of the offcore transaction.",
- "EventCode": "0XB7",
- "MSRValue": "0x000000000000010002",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "EventName": "OCR.DEMAND_RFO.ANY_RESPONSE",
- "MSRIndex": "0x1a6,0x1a7",
- "SampleAfterValue": "100003",
- "BriefDescription": "Counts all demand reads for ownership (RFO) requests and software based prefetches for exclusive ownership (PREFETCHW) that have any response type.",
- "Offcore": "1"
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/pipeline.json b/tools/perf/pmu-events/arch/x86/tremontx/pipeline.json
deleted file mode 100644
index 05a8f6a7d9c0..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/pipeline.json
+++ /dev/null
@@ -1,111 +0,0 @@
-[
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of instructions that retire. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The counter continues counting during hardware interrupts, traps, and inside interrupt handlers. This event uses fixed counter 0.",
- "Counter": "32",
- "UMask": "0x1",
- "PEBScounters": "32",
- "EventName": "INST_RETIRED.ANY",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of instructions retired. (Fixed event)"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses fixed counter 1.",
- "Counter": "33",
- "UMask": "0x2",
- "PEBScounters": "33",
- "EventName": "CPU_CLK_UNHALTED.CORE",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of unhalted core clock cycles. (Fixed event)"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of reference cycles that the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time. This event is not affected by core frequency changes and at a fixed frequency. This event uses fixed counter 2.",
- "Counter": "34",
- "UMask": "0x3",
- "PEBScounters": "34",
- "EventName": "CPU_CLK_UNHALTED.REF_TSC",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency. (Fixed event)"
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of core cycles while the core is not in a halt state. The core enters the halt state when it is running the HLT instruction. The core frequency may change from time to time. For this reason this event may have a changing ratio with regards to time. This event uses a programmable general purpose performance counter.",
- "EventCode": "0x3c",
- "Counter": "0,1,2,3",
- "PEBScounters": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.CORE_P",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of unhalted core clock cycles."
- },
- {
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts reference cycles (at TSC frequency) when core is not halted. This event uses a programmable general purpose perfmon counter.",
- "EventCode": "0x3c",
- "Counter": "0,1,2,3",
- "UMask": "0x1",
- "PEBScounters": "0,1,2,3",
- "EventName": "CPU_CLK_UNHALTED.REF",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of unhalted reference clock cycles at TSC frequency."
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts the number of instructions that retire execution. For instructions that consist of multiple uops, this event counts the retirement of the last uop of the instruction. The event continues counting during hardware interrupts, traps, and inside interrupt handlers. This is an architectural performance event. This event uses a Programmable general purpose perfmon counter. *This event is Precise Event capable: The EventingRIP field in the PEBS record is precise to the address of the instruction which caused the event.",
- "EventCode": "0xc0",
- "Counter": "0,1,2,3",
- "PEBScounters": "0,1,2,3",
- "EventName": "INST_RETIRED.ANY_P",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts the number of instructions retired."
- },
- {
- "CollectPEBSRecord": "2",
- "EventCode": "0xc3",
- "Counter": "0,1,2,3",
- "PEBScounters": "0,1,2,3",
- "EventName": "MACHINE_CLEARS.ANY",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "20003",
- "BriefDescription": "Counts all machine clears due to, but not limited to memory ordering, memory disambiguation, SMC, page faults and FP assist."
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts branch instructions retired for all branch types. This event is Precise Event capable. This is an architectural event.",
- "EventCode": "0xc4",
- "Counter": "0,1,2,3",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_INST_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of branch instructions retired for all branch types."
- },
- {
- "PEBS": "1",
- "CollectPEBSRecord": "2",
- "PublicDescription": "Counts mispredicted branch instructions retired for all branch types. This event is Precise Event capable. This is an architectural event.",
- "EventCode": "0xc5",
- "Counter": "0,1,2,3",
- "PEBScounters": "0,1,2,3",
- "EventName": "BR_MISP_RETIRED.ALL_BRANCHES",
- "SampleAfterValue": "200003",
- "BriefDescription": "Counts the number of mispredicted branch instructions retired."
- },
- {
- "CollectPEBSRecord": "2",
- "EventCode": "0xcd",
- "Counter": "0,1,2,3",
- "PEBScounters": "0,1,2,3",
- "EventName": "CYCLES_DIV_BUSY.ANY",
- "PDIR_COUNTER": "na",
- "SampleAfterValue": "2000003",
- "BriefDescription": "Counts cycles the floating point divider or integer divider or both are busy. Does not imply a stall waiting for either divider."
- }
-] \ No newline at end of file
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json b/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json
deleted file mode 100644
index 15376f2cf052..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/uncore-memory.json
+++ /dev/null
@@ -1,73 +0,0 @@
-[
- {
- "BriefDescription": "read requests to memory controller. Derived from unc_m_cas_count.rd",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x04",
- "EventName": "LLC_MISSES.MEM_READ",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x0f",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "write requests to memory controller. Derived from unc_m_cas_count.wr",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x04",
- "EventName": "LLC_MISSES.MEM_WRITE",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0x30",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Memory controller clock ticks",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventName": "UNC_M_CLOCKTICKS",
- "PerPkg": "1",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Pre-charge for reads",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_M_PRE_COUNT.RD",
- "PerPkg": "1",
- "UMask": "0x04",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Pre-charge for writes",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_M_PRE_COUNT.WR",
- "PerPkg": "1",
- "UMask": "0x08",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "Precharge due to read on page miss, write on page miss or PGT",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_M_PRE_COUNT.ALL",
- "PerPkg": "1",
- "UMask": "0x1c",
- "Unit": "iMC"
- },
- {
- "BriefDescription": "DRAM Precharge commands. : Precharge due to page table",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x02",
- "EventName": "UNC_M_PRE_COUNT.PGT",
- "PerPkg": "1",
- "PublicDescription": "DRAM Precharge commands. : Precharge due to page table : Counts the number of DRAM Precharge commands sent on this channel.",
- "UMask": "0x10",
- "Unit": "iMC"
- }
-]
diff --git a/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json b/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json
deleted file mode 100644
index 6deff1fe89e3..000000000000
--- a/tools/perf/pmu-events/arch/x86/tremontx/uncore-other.json
+++ /dev/null
@@ -1,431 +0,0 @@
-[
- {
- "BriefDescription": "Uncore cache clock ticks",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventName": "UNC_CHA_CLOCKTICKS",
- "PerPkg": "1",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "LLC misses - Uncacheable reads (from cpu) . Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.UNCACHEABLE",
- "Filter": "config1=0x40e33",
- "PerPkg": "1",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO reads. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_READ",
- "Filter": "config1=0x40040e33",
- "PerPkg": "1",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "MMIO writes. Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_MISSES.MMIO_WRITE",
- "Filter": "config1=0x40041e33",
- "PerPkg": "1",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (full cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_FULL",
- "Filter": "config1=0x41833",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "Streaming stores (partial cache line). Derived from unc_cha_tor_inserts.ia_miss",
- "Counter": "0,1,2,3",
- "CounterType": "PGMABLE",
- "EventCode": "0x35",
- "EventName": "LLC_REFERENCES.STREAMING_PARTIAL",
- "Filter": "config1=0x41a33",
- "PerPkg": "1",
- "ScaleUnit": "64Bytes",
- "UMask": "0xC001FE01",
- "UMaskExt": "0xC001FE",
- "Unit": "CHA"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO. Derived from unc_iio_data_req_of_cpu.mem_read.part0",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_READ",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "MetricName": "LLC_MISSES.PCIE_READ",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO. Derived from unc_iio_data_req_of_cpu.mem_write.part0",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "LLC_MISSES.PCIE_WRITE",
- "FCMask": "0x07",
- "Filter": "ch_mask=0x1f",
- "MetricExpr": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART0 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2 +UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "MetricName": "LLC_MISSES.PCIE_WRITE",
- "PerPkg": "1",
- "PortMask": "0x01",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 1",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 2",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth writing at IIO, part 3",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_WRITE.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "ScaleUnit": "4Bytes",
- "UMask": "0x01",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 1",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART1",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x02",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 2",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART2",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x04",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",
- "Unit": "IIO"
- },
- {
- "BriefDescription": "PCI Express bandwidth reading at IIO, part 3",
- "Counter": "0,1",
- "CounterType": "PGMABLE",
- "EventCode": "0x83",
- "EventName": "UNC_IIO_DATA_REQ_OF_CPU.MEM_READ.PART3",
- "FCMask": "0x07",
- "PerPkg": "1",
- "PortMask": "0x08",
- "ScaleUnit": "4Bytes",
- "UMask": "0x04",